aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-04-29 22:38:01 -0400
committerPaul Mackerras <paulus@samba.org>2007-04-29 22:38:01 -0400
commit49e1900d4cc2e7bcecb681fe60f0990bec2dcce8 (patch)
tree253801ebf57e0a23856a2c7be129c2c178f62fdf /drivers
parent34f6d749c0a328817d5e36274e53121c1db734dc (diff)
parentb9099ff63c75216d6ca10bce5a1abcd9293c27e6 (diff)
Merge branch 'linux-2.6' into for-2.6.22
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/thermal.c3
-rw-r--r--drivers/amba/bus.c13
-rw-r--r--drivers/ata/pata_sis.c10
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/atm/eni.h2
-rw-r--r--drivers/atm/fore200e.c20
-rw-r--r--drivers/atm/fore200e.h2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/idt77252.c28
-rw-r--r--drivers/atm/nicstar.c14
-rw-r--r--drivers/base/attribute_container.c26
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c112
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c293
-rw-r--r--drivers/base/dd.c66
-rw-r--r--drivers/base/dmapool.c14
-rw-r--r--drivers/base/driver.c20
-rw-r--r--drivers/base/firmware_class.c10
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/resume.c13
-rw-r--r--drivers/base/power/shutdown.c2
-rw-r--r--drivers/base/power/suspend.c12
-rw-r--r--drivers/block/aoe/aoe.h9
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/block/cciss.c35
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/ub.c11
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c6
-rw-r--r--drivers/bluetooth/bpa10x.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c6
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/dtl1_cs.c2
-rw-r--r--drivers/bluetooth/hci_h4.c6
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mxser.c48
-rw-r--r--drivers/char/mxser_new.c45
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c38
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/hwmon/w83627ehf.c20
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c6
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/ide/pci/delkin_cb.c1
-rw-r--r--drivers/ide/pci/hpt366.c5
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/eth1394.h2
-rw-r--r--drivers/ieee1394/nodemgr.c22
-rw-r--r--drivers/infiniband/core/mad.c34
-rw-r--r--drivers/infiniband/core/sa_query.c24
-rw-r--r--drivers/infiniband/core/smi.c86
-rw-r--r--drivers/infiniband/core/smi.h34
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucm.c23
-rw-r--r--drivers/infiniband/core/ucma.c22
-rw-r--r--drivers/infiniband/core/user_mad.c20
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c29
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c55
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c1
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c24
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h23
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c38
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c123
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c287
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c152
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c73
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c86
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c100
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c133
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c920
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c63
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c16
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h57
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c66
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/input/gameport/gameport.c39
-rw-r--r--drivers/input/serio/serio.c41
-rw-r--r--drivers/isdn/act2000/module.c2
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c2
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c3
-rw-r--r--drivers/isdn/hardware/avm/c4.c3
-rw-r--r--drivers/isdn/hisax/elsa_ser.c6
-rw-r--r--drivers/isdn/hisax/isdnl2.c3
-rw-r--r--drivers/isdn/hysdn/hycapi.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_sched.c5
-rw-r--r--drivers/isdn/i4l/isdn_common.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c11
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c3
-rw-r--r--drivers/isdn/pcbit/capi.c12
-rw-r--r--drivers/kvm/mmu.c1
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/media/common/ir-keymaps.c18
-rw-r--r--drivers/media/common/saa7146_video.c6
-rw-r--r--drivers/media/dvb/b2c2/Kconfig1
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c3
-rw-r--r--drivers/media/dvb/b2c2/flexcop-pci.c9
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig2
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c4
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c3
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.h2
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c56
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.h2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c20
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c48
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.h1
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig12
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile4
-rw-r--r--drivers/media/dvb/dvb-usb/au6610.c6
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gl861.c8
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c235
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.h46
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c581
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.h9
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c7
-rw-r--r--drivers/media/dvb/frontends/Kconfig28
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c340
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.h16
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb/frontends/lgh06xf.c134
-rw-r--r--drivers/media/dvb/frontends/lgh06xf.h35
-rw-r--r--drivers/media/dvb/frontends/or51132.c305
-rw-r--r--drivers/media/dvb/frontends/tda10021.c47
-rw-r--r--drivers/media/dvb/frontends/tda10023.c540
-rw-r--r--drivers/media/dvb/frontends/tda1002x.h (renamed from drivers/media/dvb/frontends/tda10021.h)33
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c98
-rw-r--r--drivers/media/dvb/frontends/tda1004x.h54
-rw-r--r--drivers/media/dvb/frontends/tda827x.c512
-rw-r--r--drivers/media/dvb/frontends/tda827x.h62
-rw-r--r--drivers/media/dvb/pluto2/Kconfig1
-rw-r--r--drivers/media/dvb/ttpci/Kconfig6
-rw-r--r--drivers/media/dvb/ttpci/av7110.c17
-rw-r--r--drivers/media/dvb/ttpci/av7110.h28
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c24
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.h10
-rw-r--r--drivers/media/dvb/ttpci/av7110_ir.c365
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c147
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c96
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c58
-rw-r--r--drivers/media/dvb/ttpci/budget.h3
-rw-r--r--drivers/media/dvb/ttusb-budget/Kconfig1
-rw-r--r--drivers/media/radio/radio-aimslab.c240
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c253
-rw-r--r--drivers/media/radio/radio-gemtek.c260
-rw-r--r--drivers/media/radio/radio-maestro.c266
-rw-r--r--drivers/media/radio/radio-rtrack2.c255
-rw-r--r--drivers/media/radio/radio-sf16fmi.c262
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c350
-rw-r--r--drivers/media/radio/radio-terratec.c247
-rw-r--r--drivers/media/radio/radio-trust.c256
-rw-r--r--drivers/media/radio/radio-typhoon.c239
-rw-r--r--drivers/media/radio/radio-zoltrix.c256
-rw-r--r--drivers/media/video/Kconfig14
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c53
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c24
-rw-r--r--drivers/media/video/bt8xx/bttv-gpio.c5
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-if.c48
-rw-r--r--drivers/media/video/bt8xx/bttv.h25
-rw-r--r--drivers/media/video/bt8xx/bttvp.h3
-rw-r--r--drivers/media/video/cafe_ccic.c79
-rw-r--r--drivers/media/video/cpia_pp.c49
-rw-r--r--drivers/media/video/cs53l32a.c4
-rw-r--r--drivers/media/video/cx2341x.c72
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c9
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h3
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c1
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c9
-rw-r--r--drivers/media/video/cx88/cx88-cards.c37
-rw-r--r--drivers/media/video/cx88/cx88-core.c8
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c31
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c3
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c29
-rw-r--r--drivers/media/video/cx88/cx88-video.c5
-rw-r--r--drivers/media/video/cx88/cx88.h4
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c2
-rw-r--r--drivers/media/video/ivtv/Kconfig26
-rw-r--r--drivers/media/video/ivtv/Makefile7
-rw-r--r--drivers/media/video/ivtv/ivtv-audio.c74
-rw-r--r--drivers/media/video/ivtv/ivtv-audio.h23
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.c964
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.h207
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c303
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.h21
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c1374
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h868
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c921
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.h44
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c272
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.h25
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.c307
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.h25
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c748
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.h36
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c1567
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h28
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c838
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.h26
-rw-r--r--drivers/media/video/ivtv/ivtv-mailbox.c360
-rw-r--r--drivers/media/video/ivtv/ivtv-mailbox.h25
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c262
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.h64
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c977
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.h31
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c200
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.h43
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c538
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.h26
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h26
-rw-r--r--drivers/media/video/ivtv/ivtv-video.c142
-rw-r--r--drivers/media/video/ivtv/ivtv-video.h24
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c1129
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.h24
-rw-r--r--drivers/media/video/msp3400-driver.c5
-rw-r--r--drivers/media/video/msp3400-driver.h1
-rw-r--r--drivers/media/video/ov7670.c40
-rw-r--r--drivers/media/video/planb.c6
-rw-r--r--drivers/media/video/planb.h1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c16
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c30
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c61
-rw-r--r--drivers/media/video/pwc/pwc-if.c16
-rw-r--r--drivers/media/video/pwc/pwc-ioctl.h36
-rw-r--r--drivers/media/video/pwc/pwc-kiara.c2
-rw-r--r--drivers/media/video/pwc/pwc-kiara.h5
-rw-r--r--drivers/media/video/pwc/pwc-timon.c4
-rw-r--r--drivers/media/video/pwc/pwc-timon.h6
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c60
-rw-r--r--drivers/media/video/pwc/pwc.h5
-rw-r--r--drivers/media/video/saa7115.c10
-rw-r--r--drivers/media/video/saa7127.c12
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c204
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c125
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c1027
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c85
-rw-r--r--drivers/media/video/saa7134/saa7134.h10
-rw-r--r--drivers/media/video/se401.c36
-rw-r--r--drivers/media/video/sn9c102/Kconfig2
-rw-r--r--drivers/media/video/sn9c102/Makefile17
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h17
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c274
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h14
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131d.c25
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131r.c366
-rw-r--r--drivers/media/video/sn9c102/sn9c102_mi0343.c130
-rw-r--r--drivers/media/video/sn9c102/sn9c102_mi0360.c338
-rw-r--r--drivers/media/video/sn9c102/sn9c102_ov7630.c121
-rw-r--r--drivers/media/video/sn9c102/sn9c102_ov7660.c234
-rw-r--r--drivers/media/video/sn9c102/sn9c102_pas106b.c23
-rw-r--r--drivers/media/video/sn9c102/sn9c102_pas202bcb.c77
-rw-r--r--drivers/media/video/sn9c102/sn9c102_sensor.h12
-rw-r--r--drivers/media/video/sn9c102/sn9c102_tas5110c1b.c18
-rw-r--r--drivers/media/video/sn9c102/sn9c102_tas5110d.c118
-rw-r--r--drivers/media/video/sn9c102/sn9c102_tas5130d1b.c19
-rw-r--r--drivers/media/video/tda7432.c1
-rw-r--r--drivers/media/video/tda8290.c144
-rw-r--r--drivers/media/video/tda9875.c1
-rw-r--r--drivers/media/video/tuner-core.c28
-rw-r--r--drivers/media/video/tvaudio.c5
-rw-r--r--drivers/media/video/tveeprom.c45
-rw-r--r--drivers/media/video/upd64031a.c4
-rw-r--r--drivers/media/video/upd64083.c5
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c18
-rw-r--r--drivers/media/video/usbvision/usbvision-cards.c1165
-rw-r--r--drivers/media/video/usbvision/usbvision-cards.h66
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c10
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c104
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c173
-rw-r--r--drivers/media/video/usbvision/usbvision.h39
-rw-r--r--drivers/media/video/v4l2-common.c41
-rw-r--r--drivers/media/video/videocodec.c3
-rw-r--r--drivers/media/video/videodev.c40
-rw-r--r--drivers/media/video/wm8739.c4
-rw-r--r--drivers/media/video/wm8775.c4
-rw-r--r--drivers/media/video/zr364xx.c929
-rw-r--r--drivers/message/fusion/mptlan.c36
-rw-r--r--drivers/mmc/mmc_sysfs.c27
-rw-r--r--drivers/mtd/Kconfig30
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/Kconfig12
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c93
-rw-r--r--drivers/mtd/chips/fwh_lock.h3
-rw-r--r--drivers/mtd/devices/Kconfig26
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/at91_dataflash26.c485
-rw-r--r--drivers/mtd/devices/block2mtd.c67
-rw-r--r--drivers/mtd/maps/Kconfig47
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/alchemy-flash.c9
-rw-r--r--drivers/mtd/maps/ck804xrom.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c3
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c184
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c105
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/Kconfig63
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/cafe.c25
-rw-r--r--drivers/mtd/nand/cmx270_nand.c267
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c518
-rw-r--r--drivers/mtd/onenand/Kconfig15
-rw-r--r--drivers/mtd/onenand/onenand_base.c21
-rw-r--r--drivers/mtd/ubi/Kconfig58
-rw-r--r--drivers/mtd/ubi/Kconfig.debug104
-rw-r--r--drivers/mtd/ubi/Makefile7
-rw-r--r--drivers/mtd/ubi/build.c848
-rw-r--r--drivers/mtd/ubi/cdev.c722
-rw-r--r--drivers/mtd/ubi/debug.c224
-rw-r--r--drivers/mtd/ubi/debug.h161
-rw-r--r--drivers/mtd/ubi/eba.c1241
-rw-r--r--drivers/mtd/ubi/gluebi.c323
-rw-r--r--drivers/mtd/ubi/io.c1259
-rw-r--r--drivers/mtd/ubi/kapi.c575
-rw-r--r--drivers/mtd/ubi/misc.c105
-rw-r--r--drivers/mtd/ubi/scan.c1368
-rw-r--r--drivers/mtd/ubi/scan.h167
-rw-r--r--drivers/mtd/ubi/ubi.h535
-rw-r--r--drivers/mtd/ubi/upd.c348
-rw-r--r--drivers/mtd/ubi/vmt.c809
-rw-r--r--drivers/mtd/ubi/vtbl.c809
-rw-r--r--drivers/mtd/ubi/wl.c1671
-rw-r--r--drivers/net/3c501.c1
-rw-r--r--drivers/net/3c505.c3
-rw-r--r--drivers/net/3c507.c1
-rw-r--r--drivers/net/3c509.c1
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c523.c3
-rw-r--r--drivers/net/3c527.c1
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/7990.c3
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/8139too.c7
-rw-r--r--drivers/net/82596.c1
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/a2065.c3
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/amd8111e.c4
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c15
-rw-r--r--drivers/net/arcnet/arc-rawmode.c2
-rw-r--r--drivers/net/arcnet/arcnet.c17
-rw-r--r--drivers/net/arcnet/capmode.c14
-rw-r--r--drivers/net/arcnet/rfc1051.c2
-rw-r--r--drivers/net/arcnet/rfc1201.c2
-rw-r--r--drivers/net/ariadne.c1
-rw-r--r--drivers/net/arm/am79c961a.c1
-rw-r--r--drivers/net/arm/at91_ether.c1
-rw-r--r--drivers/net/arm/ep93xx_eth.c1
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/at1700.c1
-rw-r--r--drivers/net/atari_bionet.c7
-rw-r--r--drivers/net/atari_pamsnet.c6
-rw-r--r--drivers/net/atarilance.c1
-rw-r--r--drivers/net/atl1/atl1_main.c33
-rw-r--r--drivers/net/atp.c1
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bmac.c1
-rw-r--r--drivers/net/bnx2.c44
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bonding/bond_3ad.c8
-rw-r--r--drivers/net/bonding/bond_alb.c36
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/cassini.c11
-rw-r--r--drivers/net/chelsio/sge.c39
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c71
-rw-r--r--drivers/net/cxgb3/sge.c39
-rw-r--r--drivers/net/cxgb3/t3_hw.c18
-rw-r--r--drivers/net/de600.c1
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c1
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c4
-rw-r--r--drivers/net/dgrs.c3
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dm9000.c1
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000_main.c176
-rw-r--r--drivers/net/eepro.c1
-rw-r--r--drivers/net/eepro100.c6
-rw-r--r--drivers/net/eexpress.c1
-rw-r--r--drivers/net/ehea/ehea_main.c39
-rw-r--r--drivers/net/epic100.c3
-rw-r--r--drivers/net/eth16i.c1
-rw-r--r--drivers/net/ewrk3.c1
-rw-r--r--drivers/net/fealnx.c1
-rw-r--r--drivers/net/fec.c1
-rw-r--r--drivers/net/fec_8xx/fec_main.c5
-rw-r--r--drivers/net/forcedeth.c30
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c9
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/hamachi.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c6
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c3
-rw-r--r--drivers/net/ibmlana.c1
-rw-r--r--drivers/net/ibmveth.c1
-rw-r--r--drivers/net/ioc3-eth.c13
-rw-r--r--drivers/net/irda/ali-ircc.c9
-rw-r--r--drivers/net/irda/au1k_ir.c6
-rw-r--r--drivers/net/irda/donauboe.c8
-rw-r--r--drivers/net/irda/irda-usb.c6
-rw-r--r--drivers/net/irda/mcs7780.c38
-rw-r--r--drivers/net/irda/nsc-ircc.c15
-rw-r--r--drivers/net/irda/pxaficp_ir.c6
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c5
-rw-r--r--drivers/net/irda/stir4200.c5
-rw-r--r--drivers/net/irda/via-ircc.c18
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c12
-rw-r--r--drivers/net/iseries_veth.c1
-rw-r--r--drivers/net/ixgb/ixgb_main.c36
-rw-r--r--drivers/net/ixp2000/ixpdev.c3
-rw-r--r--drivers/net/lance.c3
-rw-r--r--drivers/net/lasi_82596.c1
-rw-r--r--drivers/net/lib8390.c1
-rw-r--r--drivers/net/loopback.c24
-rw-r--r--drivers/net/lp486e.c1
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c11
-rw-r--r--drivers/net/mace.c1
-rw-r--r--drivers/net/macmace.c4
-rw-r--r--drivers/net/meth.c11
-rw-r--r--drivers/net/mipsnet.c1
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/myri_sbus.c4
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c15
-rw-r--r--drivers/net/netxen/netxen_nic_init.c1
-rw-r--r--drivers/net/netxen/netxen_nic_main.c12
-rw-r--r--drivers/net/ni5010.c1
-rw-r--r--drivers/net/ni52.c3
-rw-r--r--drivers/net/ni65.c7
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pasemi_mac.c14
-rw-r--r--drivers/net/pci-skeleton.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c1
-rw-r--r--drivers/net/pcmcia/3c589_cs.c1
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c1
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c1
-rw-r--r--drivers/net/pcnet32.c1
-rw-r--r--drivers/net/phy/fixed.c6
-rw-r--r--drivers/net/phy/phy_device.c9
-rw-r--r--drivers/net/plip.c2
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/ppp_generic.c6
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/pppoe.c156
-rw-r--r--drivers/net/pppox.c2
-rwxr-xr-xdrivers/net/qla3xxx.c5
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/rionet.c1
-rw-r--r--drivers/net/rrunner.c3
-rw-r--r--drivers/net/s2io.c4
-rw-r--r--drivers/net/saa9730.c1
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/sb1250-mac.c3
-rw-r--r--drivers/net/sc92031.c1
-rw-r--r--drivers/net/seeq8005.c1
-rw-r--r--drivers/net/sgiseeq.c3
-rw-r--r--drivers/net/sis190.c1
-rw-r--r--drivers/net/sis900.c56
-rw-r--r--drivers/net/sk98lin/skge.c12
-rw-r--r--drivers/net/skfp/skfddi.c3
-rw-r--r--drivers/net/skge.c6
-rw-r--r--drivers/net/sky2.c187
-rw-r--r--drivers/net/sky2.h11
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc9194.c1
-rw-r--r--drivers/net/smc91x.c1
-rw-r--r--drivers/net/sonic.c2
-rw-r--r--drivers/net/spider_net.c5
-rw-r--r--drivers/net/starfire.c1
-rw-r--r--drivers/net/sun3_82586.c3
-rw-r--r--drivers/net/sun3lance.c5
-rw-r--r--drivers/net/sunbmac.c1
-rw-r--r--drivers/net/sundance.c1
-rw-r--r--drivers/net/sungem.c46
-rw-r--r--drivers/net/sungem.h2
-rw-r--r--drivers/net/sunhme.c35
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/sunqe.c7
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tlan.c4
-rw-r--r--drivers/net/tokenring/3c359.c11
-rw-r--r--drivers/net/tokenring/ibmtr.c1
-rw-r--r--drivers/net/tokenring/lanstreamer.c7
-rw-r--r--drivers/net/tokenring/olympic.c18
-rw-r--r--drivers/net/tokenring/smctr.c6
-rw-r--r--drivers/net/tokenring/tms380tr.c6
-rw-r--r--drivers/net/tsi108_eth.c1
-rw-r--r--drivers/net/tulip/de2104x.c7
-rw-r--r--drivers/net/tulip/de4x5.c12
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/tulip_core.c30
-rw-r--r--drivers/net/tulip/uli526x.c22
-rw-r--r--drivers/net/tulip/winbond-840.c5
-rw-r--r--drivers/net/tulip/xircom_cb.c7
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c7
-rw-r--r--drivers/net/tun.c46
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/via-rhine.c1
-rw-r--r--drivers/net/via-velocity.c12
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/cycx_x25.c2
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/dscc4.c3
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c16
-rw-r--r--drivers/net/wan/pc300_drv.c6
-rw-r--r--drivers/net/wan/pc300_tty.c6
-rw-r--r--drivers/net/wan/sbni.c5
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wan/z85230.c4
-rw-r--r--drivers/net/wireless/Kconfig120
-rw-r--r--drivers/net/wireless/airo.c11
-rw-r--r--drivers/net/wireless/arlan-main.c1
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c3
-rw-r--r--drivers/net/wireless/hostap/Kconfig3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c23
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c25
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c17
-rw-r--r--drivers/net/wireless/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/netwave_cs.c1
-rw-r--r--drivers/net/wireless/orinoco.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c23
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/wireless/wavelan.c9
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/wireless/zd1201.c6
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/yellowfin.c1
-rw-r--r--drivers/net/znet.c1
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/parport/parport_sunbpp.c10
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c58
-rw-r--r--drivers/pci/probe.c45
-rw-r--r--drivers/pnp/card.c6
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c58
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h72
-rw-r--r--drivers/s390/char/sclp_chp.c196
-rw-r--r--drivers/s390/char/sclp_config.c75
-rw-r--r--drivers/s390/char/sclp_cpi.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_rw.c16
-rw-r--r--drivers/s390/char/sclp_sdias.c255
-rw-r--r--drivers/s390/char/sclp_tty.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c9
-rw-r--r--drivers/s390/char/zcore.c651
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwgroup.c33
-rw-r--r--drivers/s390/cio/chp.c683
-rw-r--r--drivers/s390/cio/chp.h53
-rw-r--r--drivers/s390/cio/chsc.c1024
-rw-r--r--drivers/s390/cio/chsc.h42
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/cio.h17
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c201
-rw-r--r--drivers/s390/cio/css.h16
-rw-r--r--drivers/s390/cio/device.c252
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c7
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/ioasm.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c28
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcmain.c51
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/netiucv.c21
-rw-r--r--drivers/s390/net/qeth_eddp.c30
-rw-r--r--drivers/s390/net/qeth_main.c45
-rw-r--r--drivers/s390/net/qeth_proc.c2
-rw-r--r--drivers/s390/net/qeth_tso.h14
-rw-r--r--drivers/s390/s390mach.c25
-rw-r--r--drivers/s390/sysinfo.c18
-rw-r--r--drivers/sbus/char/envctrl.c8
-rw-r--r--drivers/sbus/char/flash.c2
-rw-r--r--drivers/sbus/char/openprom.c25
-rw-r--r--drivers/sbus/char/vfc_dev.c3
-rw-r--r--drivers/sbus/sbus.c4
-rw-r--r--drivers/scsi/3w-xxxx.c11
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/esp.c4394
-rw-r--r--drivers/scsi/esp.h406
-rw-r--r--drivers/scsi/esp_scsi.c2711
-rw-r--r--drivers/scsi/esp_scsi.h560
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi_netlink.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sun_esp.c634
-rw-r--r--drivers/serial/8250.c8
-rw-r--r--drivers/serial/icom.c9
-rw-r--r--drivers/serial/icom.h1
-rw-r--r--drivers/serial/sunsu.c4
-rw-r--r--drivers/spi/spi_s3c24xx.c4
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c411
-rw-r--r--drivers/usb/atm/usbatm.c27
-rw-r--r--drivers/usb/class/cdc-acm.c81
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/Kconfig25
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c109
-rw-r--r--drivers/usb/core/driver.c267
-rw-r--r--drivers/usb/core/hcd.c34
-rw-r--r--drivers/usb/core/hcd.h3
-rw-r--r--drivers/usb/core/hub.c29
-rw-r--r--drivers/usb/core/inode.c2
-rw-r--r--drivers/usb/core/message.c83
-rw-r--r--drivers/usb/core/quirks.c2
-rw-r--r--drivers/usb/core/sysfs.c102
-rw-r--r--drivers/usb/core/usb.c46
-rw-r--r--drivers/usb/core/usb.h26
-rw-r--r--drivers/usb/gadget/Kconfig22
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/ether.c7
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c2500
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h579
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c98
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-fsl.h4
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/hc_crisv10.c4550
-rw-r--r--drivers/usb/host/hc_crisv10.h289
-rw-r--r--drivers/usb/host/ohci-hcd.c6
-rw-r--r--drivers/usb/host/ohci-pci.c32
-rw-r--r--drivers/usb/host/uhci-q.c16
-rw-r--r--drivers/usb/input/ati_remote2.c89
-rw-r--r--drivers/usb/input/gtco.c5
-rw-r--r--drivers/usb/misc/adutux.c48
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c4
-rw-r--r--drivers/usb/misc/ftdi-elan.c19
-rw-r--r--drivers/usb/misc/iowarrior.c20
-rw-r--r--drivers/usb/misc/ldusb.c3
-rw-r--r--drivers/usb/misc/usblcd.c7
-rw-r--r--drivers/usb/mon/mon_bin.c14
-rw-r--r--drivers/usb/mon/mon_main.c158
-rw-r--r--drivers/usb/mon/mon_text.c315
-rw-r--r--drivers/usb/mon/usb_mon.h6
-rw-r--r--drivers/usb/net/asix.c8
-rw-r--r--drivers/usb/net/catc.c30
-rw-r--r--drivers/usb/net/dm9601.c5
-rw-r--r--drivers/usb/net/gl620a.c2
-rw-r--r--drivers/usb/net/kaweth.c2
-rw-r--r--drivers/usb/net/net1080.c2
-rw-r--r--drivers/usb/net/pegasus.c14
-rw-r--r--drivers/usb/net/rndis_host.c114
-rw-r--r--drivers/usb/net/rtl8150.c1
-rw-r--r--drivers/usb/net/usbnet.c9
-rw-r--r--drivers/usb/net/usbnet.h1
-rw-r--r--drivers/usb/serial/Kconfig6
-rw-r--r--drivers/usb/serial/aircable.c7
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/cp2101.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c23
-rw-r--r--drivers/usb/serial/ftdi_sio.h1
-rw-r--r--drivers/usb/serial/io_edgeport.c139
-rw-r--r--drivers/usb/serial/io_edgeport.h6
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/kl5kusb105.c28
-rw-r--r--drivers/usb/serial/mct_u232.c12
-rw-r--r--drivers/usb/serial/mos7720.c34
-rw-r--r--drivers/usb/serial/mos7840.c233
-rw-r--r--drivers/usb/serial/omninet.c40
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/sierra.c25
-rw-r--r--drivers/usb/serial/visor.c22
-rw-r--r--drivers/usb/serial/whiteheat.c8
-rw-r--r--drivers/usb/serial/whiteheat.h4
-rw-r--r--drivers/usb/storage/libusual.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/usb-skeleton.c41
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/aty/atyfb_base.c6
-rw-r--r--drivers/video/aty/radeon_base.c12
-rw-r--r--drivers/video/aty/radeon_monitor.c16
-rw-r--r--drivers/video/aty/radeonfb.h4
-rw-r--r--drivers/video/cg3.c2
-rw-r--r--drivers/video/igafb.c24
772 files changed, 51059 insertions, 19177 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 3a718f51350e..920c975bb6d4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
72obj-$(CONFIG_MMC) += mmc/ 72obj-$(CONFIG_MMC) += mmc/
73obj-$(CONFIG_NEW_LEDS) += leds/ 73obj-$(CONFIG_NEW_LEDS) += leds/
74obj-$(CONFIG_INFINIBAND) += infiniband/ 74obj-$(CONFIG_INFINIBAND) += infiniband/
75obj-$(CONFIG_IPATH_CORE) += infiniband/
76obj-$(CONFIG_SGI_SN) += sn/ 75obj-$(CONFIG_SGI_SN) += sn/
77obj-y += firmware/ 76obj-y += firmware/
78obj-$(CONFIG_CRYPTO) += crypto/ 77obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0ae8b9310cbf..589b98b7b216 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -758,7 +758,8 @@ static void acpi_thermal_check(void *data)
758 del_timer(&(tz->timer)); 758 del_timer(&(tz->timer));
759 } else { 759 } else {
760 if (timer_pending(&(tz->timer))) 760 if (timer_pending(&(tz->timer)))
761 mod_timer(&(tz->timer), (HZ * sleep_time) / 1000); 761 mod_timer(&(tz->timer),
762 jiffies + (HZ * sleep_time) / 1000);
762 else { 763 else {
763 tz->timer.data = (unsigned long)tz; 764 tz->timer.data = (unsigned long)tz;
764 tz->timer.function = acpi_thermal_run; 765 tz->timer.function = acpi_thermal_run;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index fd5475071acc..268e301775fc 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -47,14 +47,13 @@ static int amba_match(struct device *dev, struct device_driver *drv)
47static int amba_uevent(struct device *dev, char **envp, int nr_env, char *buf, int bufsz) 47static int amba_uevent(struct device *dev, char **envp, int nr_env, char *buf, int bufsz)
48{ 48{
49 struct amba_device *pcdev = to_amba_device(dev); 49 struct amba_device *pcdev = to_amba_device(dev);
50 int retval = 0, i = 0, len = 0;
50 51
51 if (nr_env < 2) 52 retval = add_uevent_var(envp, nr_env, &i,
52 return -ENOMEM; 53 buf, bufsz, &len,
53 54 "AMBA_ID=%08x", pcdev->periphid);
54 snprintf(buf, bufsz, "AMBA_ID=%08x", pcdev->periphid); 55 envp[i] = NULL;
55 *envp++ = buf; 56 return retval;
56 *envp++ = NULL;
57 return 0;
58} 57}
59#else 58#else
60#define amba_uevent NULL 59#define amba_uevent NULL
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index f48207865930..8dc3bc4f5863 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -878,6 +878,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
878 struct ata_port_info *port; 878 struct ata_port_info *port;
879 struct pci_dev *host = NULL; 879 struct pci_dev *host = NULL;
880 struct sis_chipset *chipset = NULL; 880 struct sis_chipset *chipset = NULL;
881 struct sis_chipset *sets;
881 882
882 static struct sis_chipset sis_chipsets[] = { 883 static struct sis_chipset sis_chipsets[] = {
883 884
@@ -932,10 +933,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
932 933
933 /* We have to find the bridge first */ 934 /* We have to find the bridge first */
934 935
935 for (chipset = &sis_chipsets[0]; chipset->device; chipset++) { 936 for (sets = &sis_chipsets[0]; sets->device; sets++) {
936 host = pci_get_device(PCI_VENDOR_ID_SI, chipset->device, NULL); 937 host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
937 if (host != NULL) { 938 if (host != NULL) {
938 if (chipset->device == 0x630) { /* SIS630 */ 939 chipset = sets; /* Match found */
940 if (sets->device == 0x630) { /* SIS630 */
939 u8 host_rev; 941 u8 host_rev;
940 pci_read_config_byte(host, PCI_REVISION_ID, &host_rev); 942 pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
941 if (host_rev >= 0x30) /* 630 ET */ 943 if (host_rev >= 0x30) /* 630 ET */
@@ -946,7 +948,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
946 } 948 }
947 949
948 /* Look for concealed bridges */ 950 /* Look for concealed bridges */
949 if (host == NULL) { 951 if (chipset == NULL) {
950 /* Second check */ 952 /* Second check */
951 u32 idemisc; 953 u32 idemisc;
952 u16 trueid; 954 u16 trueid;
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 3c372e08f77d..59651abfa4f8 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -821,7 +821,7 @@ static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
821 } 821 }
822 // cast needed as there is no %? for pointer differences 822 // cast needed as there is no %? for pointer differences
823 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", 823 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
824 skb, skb->head, (long) (skb->end - skb->head)); 824 skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
825 rx.handle = virt_to_bus (skb); 825 rx.handle = virt_to_bus (skb);
826 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); 826 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
827 if (rx_give (dev, &rx, pool)) 827 if (rx_give (dev, &rx, pool))
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index fc518d85543d..02ad83d6b562 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
221 hdr->vpi = htons(vcc->vpi); 221 hdr->vpi = htons(vcc->vpi);
222 hdr->vci = htons(vcc->vci); 222 hdr->vci = htons(vcc->vci);
223 hdr->length = htonl(skb->len); 223 hdr->length = htonl(skb->len);
224 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); 224 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
225 if (vcc->pop) vcc->pop(vcc,skb); 225 if (vcc->pop) vcc->pop(vcc,skb);
226 else dev_kfree_skb(skb); 226 else dev_kfree_skb(skb);
227 out_vcc->push(out_vcc,new_skb); 227 out_vcc->push(out_vcc,new_skb);
@@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
310 goto done; 310 goto done;
311 } 311 }
312 __net_timestamp(new_skb); 312 __net_timestamp(new_skb);
313 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); 313 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
314 out_vcc->push(out_vcc,new_skb); 314 out_vcc->push(out_vcc,new_skb);
315 atomic_inc(&vcc->stats->tx); 315 atomic_inc(&vcc->stats->tx);
316 atomic_inc(&out_vcc->stats->rx); 316 atomic_inc(&out_vcc->stats->rx);
@@ -352,7 +352,7 @@ static struct atm_dev atmtcp_control_dev = {
352 .ops = &atmtcp_c_dev_ops, 352 .ops = &atmtcp_c_dev_ops,
353 .type = "atmtcp", 353 .type = "atmtcp",
354 .number = 999, 354 .number = 999,
355 .lock = SPIN_LOCK_UNLOCKED 355 .lock = __SPIN_LOCK_UNLOCKED(atmtcp_control_dev.lock)
356}; 356};
357 357
358 358
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 8fccf018f165..0d3a38b1cb0b 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -536,7 +536,7 @@ static int rx_aal0(struct atm_vcc *vcc)
536 return 0; 536 return 0;
537 } 537 }
538 skb_put(skb,length); 538 skb_put(skb,length);
539 skb_set_timestamp(skb, &eni_vcc->timestamp); 539 skb->tstamp = eni_vcc->timestamp;
540 DPRINTK("got len %ld\n",length); 540 DPRINTK("got len %ld\n",length);
541 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; 541 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
542 eni_vcc->rxing++; 542 eni_vcc->rxing++;
@@ -701,7 +701,7 @@ static void get_service(struct atm_dev *dev)
701 DPRINTK("Grr, servicing VCC %ld twice\n",vci); 701 DPRINTK("Grr, servicing VCC %ld twice\n",vci);
702 continue; 702 continue;
703 } 703 }
704 do_gettimeofday(&ENI_VCC(vcc)->timestamp); 704 ENI_VCC(vcc)->timestamp = ktime_get_real();
705 ENI_VCC(vcc)->next = NULL; 705 ENI_VCC(vcc)->next = NULL;
706 if (vcc->qos.rxtp.traffic_class == ATM_CBR) { 706 if (vcc->qos.rxtp.traffic_class == ATM_CBR) {
707 if (eni_dev->fast) 707 if (eni_dev->fast)
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index 385090c2a580..d04fefb0841f 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -59,7 +59,7 @@ struct eni_vcc {
59 int rxing; /* number of pending PDUs */ 59 int rxing; /* number of pending PDUs */
60 int servicing; /* number of waiting VCs (0 or 1) */ 60 int servicing; /* number of waiting VCs (0 or 1) */
61 int txing; /* number of pending TX bytes */ 61 int txing; /* number of pending TX bytes */
62 struct timeval timestamp; /* for RX timing */ 62 ktime_t timestamp; /* for RX timing */
63 struct atm_vcc *next; /* next pending RX */ 63 struct atm_vcc *next; /* next pending RX */
64 struct sk_buff *last; /* last PDU being DMAed (used to carry 64 struct sk_buff *last; /* last PDU being DMAed (used to carry
65 discard information) */ 65 discard information) */
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index a7c0ed3107e3..405ee5e09221 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1,6 +1,4 @@
1/* 1/*
2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
3
4 A FORE Systems 200E-series driver for ATM on Linux. 2 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
6 4
@@ -1502,9 +1500,9 @@ fore200e_open(struct atm_vcc *vcc)
1502 /* pseudo-CBR bandwidth requested? */ 1500 /* pseudo-CBR bandwidth requested? */
1503 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1501 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1504 1502
1505 down(&fore200e->rate_sf); 1503 mutex_lock(&fore200e->rate_mtx);
1506 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1504 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1507 up(&fore200e->rate_sf); 1505 mutex_unlock(&fore200e->rate_mtx);
1508 1506
1509 kfree(fore200e_vcc); 1507 kfree(fore200e_vcc);
1510 vc_map->vcc = NULL; 1508 vc_map->vcc = NULL;
@@ -1513,7 +1511,7 @@ fore200e_open(struct atm_vcc *vcc)
1513 1511
1514 /* reserve bandwidth */ 1512 /* reserve bandwidth */
1515 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1513 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1516 up(&fore200e->rate_sf); 1514 mutex_unlock(&fore200e->rate_mtx);
1517 } 1515 }
1518 1516
1519 vcc->itf = vcc->dev->number; 1517 vcc->itf = vcc->dev->number;
@@ -1599,9 +1597,9 @@ fore200e_close(struct atm_vcc* vcc)
1599 /* release reserved bandwidth, if any */ 1597 /* release reserved bandwidth, if any */
1600 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1598 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1601 1599
1602 down(&fore200e->rate_sf); 1600 mutex_lock(&fore200e->rate_mtx);
1603 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1601 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1604 up(&fore200e->rate_sf); 1602 mutex_unlock(&fore200e->rate_mtx);
1605 1603
1606 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1604 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1607 } 1605 }
@@ -2064,16 +2062,16 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2064 2062
2065 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 2063 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2066 2064
2067 down(&fore200e->rate_sf); 2065 mutex_lock(&fore200e->rate_mtx);
2068 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 2066 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2069 up(&fore200e->rate_sf); 2067 mutex_unlock(&fore200e->rate_mtx);
2070 return -EAGAIN; 2068 return -EAGAIN;
2071 } 2069 }
2072 2070
2073 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 2071 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2074 fore200e->available_cell_rate -= qos->txtp.max_pcr; 2072 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2075 2073
2076 up(&fore200e->rate_sf); 2074 mutex_unlock(&fore200e->rate_mtx);
2077 2075
2078 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 2076 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2079 2077
@@ -2459,7 +2457,7 @@ fore200e_initialize(struct fore200e* fore200e)
2459 2457
2460 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2458 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2461 2459
2462 init_MUTEX(&fore200e->rate_sf); 2460 mutex_init(&fore200e->rate_mtx);
2463 spin_lock_init(&fore200e->q_lock); 2461 spin_lock_init(&fore200e->q_lock);
2464 2462
2465 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2463 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index f9abfdac33e4..b85a54613dea 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -869,7 +869,7 @@ typedef struct fore200e {
869 869
870 struct stats* stats; /* last snapshot of the stats */ 870 struct stats* stats; /* last snapshot of the stats */
871 871
872 struct semaphore rate_sf; /* protects rate reservation ops */ 872 struct mutex rate_mtx; /* protects rate reservation ops */
873 spinlock_t q_lock; /* protects queue ops */ 873 spinlock_t q_lock; /* protects queue ops */
874#ifdef FORE200E_USE_TASKLET 874#ifdef FORE200E_USE_TASKLET
875 struct tasklet_struct tx_tasklet; /* performs tx interrupt work */ 875 struct tasklet_struct tx_tasklet; /* performs tx interrupt work */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 8510026b690a..d33aba6864c2 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1901,13 +1901,13 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1901 case ATM_AAL0: 1901 case ATM_AAL0:
1902 /* 2.10.1.5 raw cell receive */ 1902 /* 2.10.1.5 raw cell receive */
1903 skb->len = ATM_AAL0_SDU; 1903 skb->len = ATM_AAL0_SDU;
1904 skb->tail = skb->data + skb->len; 1904 skb_set_tail_pointer(skb, skb->len);
1905 break; 1905 break;
1906 case ATM_AAL5: 1906 case ATM_AAL5:
1907 /* 2.10.1.2 aal5 receive */ 1907 /* 2.10.1.2 aal5 receive */
1908 1908
1909 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); 1909 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1910 skb->tail = skb->data + skb->len; 1910 skb_set_tail_pointer(skb, skb->len);
1911#ifdef USE_CHECKSUM_HW 1911#ifdef USE_CHECKSUM_HW
1912 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { 1912 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1913 skb->ip_summed = CHECKSUM_COMPLETE; 1913 skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index b4b80140c398..057efbc55d38 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1065,7 +1065,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1065 vcc = vc->rx_vcc; 1065 vcc = vc->rx_vcc;
1066 1066
1067 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), 1067 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
1068 skb->end - skb->data, PCI_DMA_FROMDEVICE); 1068 skb_end_pointer(skb) - skb->data,
1069 PCI_DMA_FROMDEVICE);
1069 1070
1070 if ((vcc->qos.aal == ATM_AAL0) || 1071 if ((vcc->qos.aal == ATM_AAL0) ||
1071 (vcc->qos.aal == ATM_AAL34)) { 1072 (vcc->qos.aal == ATM_AAL34)) {
@@ -1194,7 +1195,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1194 } 1195 }
1195 1196
1196 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), 1197 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
1197 skb->end - skb->data, PCI_DMA_FROMDEVICE); 1198 skb_end_pointer(skb) - skb->data,
1199 PCI_DMA_FROMDEVICE);
1198 sb_pool_remove(card, skb); 1200 sb_pool_remove(card, skb);
1199 1201
1200 skb_trim(skb, len); 1202 skb_trim(skb, len);
@@ -1267,7 +1269,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1267 tail = readl(SAR_REG_RAWCT); 1269 tail = readl(SAR_REG_RAWCT);
1268 1270
1269 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), 1271 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
1270 queue->end - queue->head - 16, 1272 skb_end_pointer(queue) - queue->head - 16,
1271 PCI_DMA_FROMDEVICE); 1273 PCI_DMA_FROMDEVICE);
1272 1274
1273 while (head != tail) { 1275 while (head != tail) {
@@ -1363,7 +1365,8 @@ drop:
1363 queue = card->raw_cell_head; 1365 queue = card->raw_cell_head;
1364 pci_dma_sync_single_for_cpu(card->pcidev, 1366 pci_dma_sync_single_for_cpu(card->pcidev,
1365 IDT77252_PRV_PADDR(queue), 1367 IDT77252_PRV_PADDR(queue),
1366 queue->end - queue->data, 1368 (skb_end_pointer(queue) -
1369 queue->data),
1367 PCI_DMA_FROMDEVICE); 1370 PCI_DMA_FROMDEVICE);
1368 } else { 1371 } else {
1369 card->raw_cell_head = NULL; 1372 card->raw_cell_head = NULL;
@@ -1816,7 +1819,8 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
1816 u32 handle; 1819 u32 handle;
1817 u32 addr; 1820 u32 addr;
1818 1821
1819 skb->data = skb->tail = skb->head; 1822 skb->data = skb->head;
1823 skb_reset_tail_pointer(skb);
1820 skb->len = 0; 1824 skb->len = 0;
1821 1825
1822 skb_reserve(skb, 16); 1826 skb_reserve(skb, 16);
@@ -1835,7 +1839,6 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
1835 skb_put(skb, SAR_FB_SIZE_3); 1839 skb_put(skb, SAR_FB_SIZE_3);
1836 break; 1840 break;
1837 default: 1841 default:
1838 dev_kfree_skb(skb);
1839 return -1; 1842 return -1;
1840 } 1843 }
1841 1844
@@ -1874,7 +1877,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
1874 } 1877 }
1875 1878
1876 paddr = pci_map_single(card->pcidev, skb->data, 1879 paddr = pci_map_single(card->pcidev, skb->data,
1877 skb->end - skb->data, 1880 skb_end_pointer(skb) - skb->data,
1878 PCI_DMA_FROMDEVICE); 1881 PCI_DMA_FROMDEVICE);
1879 IDT77252_PRV_PADDR(skb) = paddr; 1882 IDT77252_PRV_PADDR(skb) = paddr;
1880 1883
@@ -1888,7 +1891,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
1888 1891
1889outunmap: 1892outunmap:
1890 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), 1893 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
1891 skb->end - skb->data, PCI_DMA_FROMDEVICE); 1894 skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
1892 1895
1893 handle = IDT77252_PRV_POOL(skb); 1896 handle = IDT77252_PRV_POOL(skb);
1894 card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; 1897 card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -1905,12 +1908,14 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
1905 int err; 1908 int err;
1906 1909
1907 pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), 1910 pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
1908 skb->end - skb->data, PCI_DMA_FROMDEVICE); 1911 skb_end_pointer(skb) - skb->data,
1912 PCI_DMA_FROMDEVICE);
1909 1913
1910 err = push_rx_skb(card, skb, POOL_QUEUE(handle)); 1914 err = push_rx_skb(card, skb, POOL_QUEUE(handle));
1911 if (err) { 1915 if (err) {
1912 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), 1916 pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
1913 skb->end - skb->data, PCI_DMA_FROMDEVICE); 1917 skb_end_pointer(skb) - skb->data,
1918 PCI_DMA_FROMDEVICE);
1914 sb_pool_remove(card, skb); 1919 sb_pool_remove(card, skb);
1915 dev_kfree_skb(skb); 1920 dev_kfree_skb(skb);
1916 } 1921 }
@@ -3122,7 +3127,8 @@ deinit_card(struct idt77252_dev *card)
3122 if (skb) { 3127 if (skb) {
3123 pci_unmap_single(card->pcidev, 3128 pci_unmap_single(card->pcidev,
3124 IDT77252_PRV_PADDR(skb), 3129 IDT77252_PRV_PADDR(skb),
3125 skb->end - skb->data, 3130 (skb_end_pointer(skb) -
3131 skb->data),
3126 PCI_DMA_FROMDEVICE); 3132 PCI_DMA_FROMDEVICE);
3127 card->sbpool[i].skb[j] = NULL; 3133 card->sbpool[i].skb[j] = NULL;
3128 dev_kfree_skb(skb); 3134 dev_kfree_skb(skb);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index aab9b3733d52..14ced85b3f54 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -2208,7 +2208,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2208 if (i == 1 && ns_rsqe_eopdu(rsqe)) 2208 if (i == 1 && ns_rsqe_eopdu(rsqe))
2209 *((u32 *) sb->data) |= 0x00000002; 2209 *((u32 *) sb->data) |= 0x00000002;
2210 skb_put(sb, NS_AAL0_HEADER); 2210 skb_put(sb, NS_AAL0_HEADER);
2211 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); 2211 memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2212 skb_put(sb, ATM_CELL_PAYLOAD); 2212 skb_put(sb, ATM_CELL_PAYLOAD);
2213 ATM_SKB(sb)->vcc = vcc; 2213 ATM_SKB(sb)->vcc = vcc;
2214 __net_timestamp(sb); 2214 __net_timestamp(sb);
@@ -2252,7 +2252,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2252 vc->rx_iov = iovb; 2252 vc->rx_iov = iovb;
2253 NS_SKB(iovb)->iovcnt = 0; 2253 NS_SKB(iovb)->iovcnt = 0;
2254 iovb->len = 0; 2254 iovb->len = 0;
2255 iovb->tail = iovb->data = iovb->head; 2255 iovb->data = iovb->head;
2256 skb_reset_tail_pointer(iovb);
2256 NS_SKB(iovb)->vcc = vcc; 2257 NS_SKB(iovb)->vcc = vcc;
2257 /* IMPORTANT: a pointer to the sk_buff containing the small or large 2258 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2258 buffer is stored as iovec base, NOT a pointer to the 2259 buffer is stored as iovec base, NOT a pointer to the
@@ -2265,7 +2266,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2265 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); 2266 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
2266 NS_SKB(iovb)->iovcnt = 0; 2267 NS_SKB(iovb)->iovcnt = 0;
2267 iovb->len = 0; 2268 iovb->len = 0;
2268 iovb->tail = iovb->data = iovb->head; 2269 iovb->data = iovb->head;
2270 skb_reset_tail_pointer(iovb);
2269 NS_SKB(iovb)->vcc = vcc; 2271 NS_SKB(iovb)->vcc = vcc;
2270 } 2272 }
2271 iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; 2273 iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
@@ -2393,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2393 skb->destructor = ns_lb_destructor; 2395 skb->destructor = ns_lb_destructor;
2394#endif /* NS_USE_DESTRUCTORS */ 2396#endif /* NS_USE_DESTRUCTORS */
2395 skb_push(skb, NS_SMBUFSIZE); 2397 skb_push(skb, NS_SMBUFSIZE);
2396 memcpy(skb->data, sb->data, NS_SMBUFSIZE); 2398 skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
2397 skb_put(skb, len - NS_SMBUFSIZE); 2399 skb_put(skb, len - NS_SMBUFSIZE);
2398 ATM_SKB(skb)->vcc = vcc; 2400 ATM_SKB(skb)->vcc = vcc;
2399 __net_timestamp(skb); 2401 __net_timestamp(skb);
@@ -2477,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2477 { 2479 {
2478 /* Copy the small buffer to the huge buffer */ 2480 /* Copy the small buffer to the huge buffer */
2479 sb = (struct sk_buff *) iov->iov_base; 2481 sb = (struct sk_buff *) iov->iov_base;
2480 memcpy(hb->data, sb->data, iov->iov_len); 2482 skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
2481 skb_put(hb, iov->iov_len); 2483 skb_put(hb, iov->iov_len);
2482 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2483 iov++; 2485 iov++;
@@ -2489,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2489 { 2491 {
2490 lb = (struct sk_buff *) iov->iov_base; 2492 lb = (struct sk_buff *) iov->iov_base;
2491 tocopy = min_t(int, remaining, iov->iov_len); 2493 tocopy = min_t(int, remaining, iov->iov_len);
2492 memcpy(hb->tail, lb->data, tocopy); 2494 skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
2493 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2494 iov++; 2496 iov++;
2495 remaining -= tocopy; 2497 remaining -= tocopy;
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 22220733f76f..1ec0654665cf 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
62 62
63static struct list_head attribute_container_list; 63static struct list_head attribute_container_list;
64 64
65static DECLARE_MUTEX(attribute_container_mutex); 65static DEFINE_MUTEX(attribute_container_mutex);
66 66
67/** 67/**
68 * attribute_container_register - register an attribute container 68 * attribute_container_register - register an attribute container
@@ -77,9 +77,9 @@ attribute_container_register(struct attribute_container *cont)
77 klist_init(&cont->containers,internal_container_klist_get, 77 klist_init(&cont->containers,internal_container_klist_get,
78 internal_container_klist_put); 78 internal_container_klist_put);
79 79
80 down(&attribute_container_mutex); 80 mutex_lock(&attribute_container_mutex);
81 list_add_tail(&cont->node, &attribute_container_list); 81 list_add_tail(&cont->node, &attribute_container_list);
82 up(&attribute_container_mutex); 82 mutex_unlock(&attribute_container_mutex);
83 83
84 return 0; 84 return 0;
85} 85}
@@ -94,7 +94,7 @@ int
94attribute_container_unregister(struct attribute_container *cont) 94attribute_container_unregister(struct attribute_container *cont)
95{ 95{
96 int retval = -EBUSY; 96 int retval = -EBUSY;
97 down(&attribute_container_mutex); 97 mutex_lock(&attribute_container_mutex);
98 spin_lock(&cont->containers.k_lock); 98 spin_lock(&cont->containers.k_lock);
99 if (!list_empty(&cont->containers.k_list)) 99 if (!list_empty(&cont->containers.k_list))
100 goto out; 100 goto out;
@@ -102,7 +102,7 @@ attribute_container_unregister(struct attribute_container *cont)
102 list_del(&cont->node); 102 list_del(&cont->node);
103 out: 103 out:
104 spin_unlock(&cont->containers.k_lock); 104 spin_unlock(&cont->containers.k_lock);
105 up(&attribute_container_mutex); 105 mutex_unlock(&attribute_container_mutex);
106 return retval; 106 return retval;
107 107
108} 108}
@@ -145,7 +145,7 @@ attribute_container_add_device(struct device *dev,
145{ 145{
146 struct attribute_container *cont; 146 struct attribute_container *cont;
147 147
148 down(&attribute_container_mutex); 148 mutex_lock(&attribute_container_mutex);
149 list_for_each_entry(cont, &attribute_container_list, node) { 149 list_for_each_entry(cont, &attribute_container_list, node) {
150 struct internal_container *ic; 150 struct internal_container *ic;
151 151
@@ -173,7 +173,7 @@ attribute_container_add_device(struct device *dev,
173 attribute_container_add_class_device(&ic->classdev); 173 attribute_container_add_class_device(&ic->classdev);
174 klist_add_tail(&ic->node, &cont->containers); 174 klist_add_tail(&ic->node, &cont->containers);
175 } 175 }
176 up(&attribute_container_mutex); 176 mutex_unlock(&attribute_container_mutex);
177} 177}
178 178
179/* FIXME: can't break out of this unless klist_iter_exit is also 179/* FIXME: can't break out of this unless klist_iter_exit is also
@@ -211,7 +211,7 @@ attribute_container_remove_device(struct device *dev,
211{ 211{
212 struct attribute_container *cont; 212 struct attribute_container *cont;
213 213
214 down(&attribute_container_mutex); 214 mutex_lock(&attribute_container_mutex);
215 list_for_each_entry(cont, &attribute_container_list, node) { 215 list_for_each_entry(cont, &attribute_container_list, node) {
216 struct internal_container *ic; 216 struct internal_container *ic;
217 struct klist_iter iter; 217 struct klist_iter iter;
@@ -234,7 +234,7 @@ attribute_container_remove_device(struct device *dev,
234 } 234 }
235 } 235 }
236 } 236 }
237 up(&attribute_container_mutex); 237 mutex_unlock(&attribute_container_mutex);
238} 238}
239 239
240/** 240/**
@@ -255,7 +255,7 @@ attribute_container_device_trigger(struct device *dev,
255{ 255{
256 struct attribute_container *cont; 256 struct attribute_container *cont;
257 257
258 down(&attribute_container_mutex); 258 mutex_lock(&attribute_container_mutex);
259 list_for_each_entry(cont, &attribute_container_list, node) { 259 list_for_each_entry(cont, &attribute_container_list, node) {
260 struct internal_container *ic; 260 struct internal_container *ic;
261 struct klist_iter iter; 261 struct klist_iter iter;
@@ -273,7 +273,7 @@ attribute_container_device_trigger(struct device *dev,
273 fn(cont, dev, &ic->classdev); 273 fn(cont, dev, &ic->classdev);
274 } 274 }
275 } 275 }
276 up(&attribute_container_mutex); 276 mutex_unlock(&attribute_container_mutex);
277} 277}
278 278
279/** 279/**
@@ -295,12 +295,12 @@ attribute_container_trigger(struct device *dev,
295{ 295{
296 struct attribute_container *cont; 296 struct attribute_container *cont;
297 297
298 down(&attribute_container_mutex); 298 mutex_lock(&attribute_container_mutex);
299 list_for_each_entry(cont, &attribute_container_list, node) { 299 list_for_each_entry(cont, &attribute_container_list, node) {
300 if (cont->match(cont, dev)) 300 if (cont->match(cont, dev))
301 fn(cont, dev); 301 fn(cont, dev);
302 } 302 }
303 up(&attribute_container_mutex); 303 mutex_unlock(&attribute_container_mutex);
304} 304}
305 305
306/** 306/**
diff --git a/drivers/base/base.h b/drivers/base/base.h
index de7e1442ce60..d597f2659b23 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -16,7 +16,7 @@ extern int cpu_dev_init(void);
16extern int attribute_container_init(void); 16extern int attribute_container_init(void);
17 17
18extern int bus_add_device(struct device * dev); 18extern int bus_add_device(struct device * dev);
19extern int bus_attach_device(struct device * dev); 19extern void bus_attach_device(struct device * dev);
20extern void bus_remove_device(struct device * dev); 20extern void bus_remove_device(struct device * dev);
21extern struct bus_type *get_bus(struct bus_type * bus); 21extern struct bus_type *get_bus(struct bus_type * bus);
22extern void put_bus(struct bus_type * bus); 22extern void put_bus(struct bus_type * bus);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 253868e03c70..1d76e2349654 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -27,6 +27,9 @@
27#define to_driver(obj) container_of(obj, struct device_driver, kobj) 27#define to_driver(obj) container_of(obj, struct device_driver, kobj)
28 28
29 29
30static int __must_check bus_rescan_devices_helper(struct device *dev,
31 void *data);
32
30static ssize_t 33static ssize_t
31drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf) 34drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
32{ 35{
@@ -60,8 +63,19 @@ static struct sysfs_ops driver_sysfs_ops = {
60 63
61static void driver_release(struct kobject * kobj) 64static void driver_release(struct kobject * kobj)
62{ 65{
63 struct device_driver * drv = to_driver(kobj); 66 /*
64 complete(&drv->unloaded); 67 * Yes this is an empty release function, it is this way because struct
68 * device is always a static object, not a dynamic one. Yes, this is
69 * not nice and bad, but remember, drivers are code, reference counted
70 * by the module count, not a device, which is really data. And yes,
71 * in the future I do want to have all drivers be created dynamically,
72 * and am working toward that goal, but it will take a bit longer...
73 *
74 * But do not let this example give _anyone_ the idea that they can
75 * create a release function without any code in it at all, to do that
76 * is almost always wrong. If you have any questions about this,
77 * please send an email to <greg@kroah.com>
78 */
65} 79}
66 80
67static struct kobj_type ktype_driver = { 81static struct kobj_type ktype_driver = {
@@ -133,7 +147,6 @@ static decl_subsys(bus, &ktype_bus, NULL);
133 147
134 148
135#ifdef CONFIG_HOTPLUG 149#ifdef CONFIG_HOTPLUG
136
137/* Manually detach a device from its associated driver. */ 150/* Manually detach a device from its associated driver. */
138static int driver_helper(struct device *dev, void *data) 151static int driver_helper(struct device *dev, void *data)
139{ 152{
@@ -199,6 +212,33 @@ static ssize_t driver_bind(struct device_driver *drv,
199} 212}
200static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind); 213static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
201 214
215static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
216{
217 return sprintf(buf, "%d\n", bus->drivers_autoprobe);
218}
219
220static ssize_t store_drivers_autoprobe(struct bus_type *bus,
221 const char *buf, size_t count)
222{
223 if (buf[0] == '0')
224 bus->drivers_autoprobe = 0;
225 else
226 bus->drivers_autoprobe = 1;
227 return count;
228}
229
230static ssize_t store_drivers_probe(struct bus_type *bus,
231 const char *buf, size_t count)
232{
233 struct device *dev;
234
235 dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
236 if (!dev)
237 return -ENODEV;
238 if (bus_rescan_devices_helper(dev, NULL) != 0)
239 return -EINVAL;
240 return count;
241}
202#endif 242#endif
203 243
204static struct device * next_device(struct klist_iter * i) 244static struct device * next_device(struct klist_iter * i)
@@ -418,21 +458,21 @@ out_put:
418 * - Add device to bus's list of devices. 458 * - Add device to bus's list of devices.
419 * - Try to attach to driver. 459 * - Try to attach to driver.
420 */ 460 */
421int bus_attach_device(struct device * dev) 461void bus_attach_device(struct device * dev)
422{ 462{
423 struct bus_type *bus = dev->bus; 463 struct bus_type *bus = dev->bus;
424 int ret = 0; 464 int ret = 0;
425 465
426 if (bus) { 466 if (bus) {
427 dev->is_registered = 1; 467 dev->is_registered = 1;
428 ret = device_attach(dev); 468 if (bus->drivers_autoprobe)
429 if (ret >= 0) { 469 ret = device_attach(dev);
470 WARN_ON(ret < 0);
471 if (ret >= 0)
430 klist_add_tail(&dev->knode_bus, &bus->klist_devices); 472 klist_add_tail(&dev->knode_bus, &bus->klist_devices);
431 ret = 0; 473 else
432 } else
433 dev->is_registered = 0; 474 dev->is_registered = 0;
434 } 475 }
435 return ret;
436} 476}
437 477
438/** 478/**
@@ -515,9 +555,41 @@ static void remove_bind_files(struct device_driver *drv)
515 driver_remove_file(drv, &driver_attr_bind); 555 driver_remove_file(drv, &driver_attr_bind);
516 driver_remove_file(drv, &driver_attr_unbind); 556 driver_remove_file(drv, &driver_attr_unbind);
517} 557}
558
559static int add_probe_files(struct bus_type *bus)
560{
561 int retval;
562
563 bus->drivers_probe_attr.attr.name = "drivers_probe";
564 bus->drivers_probe_attr.attr.mode = S_IWUSR;
565 bus->drivers_probe_attr.attr.owner = bus->owner;
566 bus->drivers_probe_attr.store = store_drivers_probe;
567 retval = bus_create_file(bus, &bus->drivers_probe_attr);
568 if (retval)
569 goto out;
570
571 bus->drivers_autoprobe_attr.attr.name = "drivers_autoprobe";
572 bus->drivers_autoprobe_attr.attr.mode = S_IWUSR | S_IRUGO;
573 bus->drivers_autoprobe_attr.attr.owner = bus->owner;
574 bus->drivers_autoprobe_attr.show = show_drivers_autoprobe;
575 bus->drivers_autoprobe_attr.store = store_drivers_autoprobe;
576 retval = bus_create_file(bus, &bus->drivers_autoprobe_attr);
577 if (retval)
578 bus_remove_file(bus, &bus->drivers_probe_attr);
579out:
580 return retval;
581}
582
583static void remove_probe_files(struct bus_type *bus)
584{
585 bus_remove_file(bus, &bus->drivers_autoprobe_attr);
586 bus_remove_file(bus, &bus->drivers_probe_attr);
587}
518#else 588#else
519static inline int add_bind_files(struct device_driver *drv) { return 0; } 589static inline int add_bind_files(struct device_driver *drv) { return 0; }
520static inline void remove_bind_files(struct device_driver *drv) {} 590static inline void remove_bind_files(struct device_driver *drv) {}
591static inline int add_probe_files(struct bus_type *bus) { return 0; }
592static inline void remove_probe_files(struct bus_type *bus) {}
521#endif 593#endif
522 594
523/** 595/**
@@ -531,7 +603,7 @@ int bus_add_driver(struct device_driver *drv)
531 int error = 0; 603 int error = 0;
532 604
533 if (!bus) 605 if (!bus)
534 return 0; 606 return -EINVAL;
535 607
536 pr_debug("bus %s: add driver %s\n", bus->name, drv->name); 608 pr_debug("bus %s: add driver %s\n", bus->name, drv->name);
537 error = kobject_set_name(&drv->kobj, "%s", drv->name); 609 error = kobject_set_name(&drv->kobj, "%s", drv->name);
@@ -541,9 +613,11 @@ int bus_add_driver(struct device_driver *drv)
541 if ((error = kobject_register(&drv->kobj))) 613 if ((error = kobject_register(&drv->kobj)))
542 goto out_put_bus; 614 goto out_put_bus;
543 615
544 error = driver_attach(drv); 616 if (drv->bus->drivers_autoprobe) {
545 if (error) 617 error = driver_attach(drv);
546 goto out_unregister; 618 if (error)
619 goto out_unregister;
620 }
547 klist_add_tail(&drv->knode_bus, &bus->klist_drivers); 621 klist_add_tail(&drv->knode_bus, &bus->klist_drivers);
548 module_add_driver(drv->owner, drv); 622 module_add_driver(drv->owner, drv);
549 623
@@ -605,8 +679,6 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
605 ret = device_attach(dev); 679 ret = device_attach(dev);
606 if (dev->parent) 680 if (dev->parent)
607 up(&dev->parent->sem); 681 up(&dev->parent->sem);
608 if (ret > 0)
609 ret = 0;
610 } 682 }
611 return ret < 0 ? ret : 0; 683 return ret < 0 ? ret : 0;
612} 684}
@@ -762,6 +834,12 @@ int bus_register(struct bus_type * bus)
762 834
763 klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put); 835 klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put);
764 klist_init(&bus->klist_drivers, NULL, NULL); 836 klist_init(&bus->klist_drivers, NULL, NULL);
837
838 bus->drivers_autoprobe = 1;
839 retval = add_probe_files(bus);
840 if (retval)
841 goto bus_probe_files_fail;
842
765 retval = bus_add_attrs(bus); 843 retval = bus_add_attrs(bus);
766 if (retval) 844 if (retval)
767 goto bus_attrs_fail; 845 goto bus_attrs_fail;
@@ -770,6 +848,8 @@ int bus_register(struct bus_type * bus)
770 return 0; 848 return 0;
771 849
772bus_attrs_fail: 850bus_attrs_fail:
851 remove_probe_files(bus);
852bus_probe_files_fail:
773 kset_unregister(&bus->drivers); 853 kset_unregister(&bus->drivers);
774bus_drivers_fail: 854bus_drivers_fail:
775 kset_unregister(&bus->devices); 855 kset_unregister(&bus->devices);
@@ -779,7 +859,6 @@ out:
779 return retval; 859 return retval;
780} 860}
781 861
782
783/** 862/**
784 * bus_unregister - remove a bus from the system 863 * bus_unregister - remove a bus from the system
785 * @bus: bus. 864 * @bus: bus.
@@ -791,6 +870,7 @@ void bus_unregister(struct bus_type * bus)
791{ 870{
792 pr_debug("bus %s: unregistering\n", bus->name); 871 pr_debug("bus %s: unregistering\n", bus->name);
793 bus_remove_attrs(bus); 872 bus_remove_attrs(bus);
873 remove_probe_files(bus);
794 kset_unregister(&bus->drivers); 874 kset_unregister(&bus->drivers);
795 kset_unregister(&bus->devices); 875 kset_unregister(&bus->devices);
796 subsystem_unregister(&bus->subsys); 876 subsystem_unregister(&bus->subsys);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d5968128be2b..80bbb2074636 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -145,6 +145,7 @@ int class_register(struct class * cls)
145 INIT_LIST_HEAD(&cls->children); 145 INIT_LIST_HEAD(&cls->children);
146 INIT_LIST_HEAD(&cls->devices); 146 INIT_LIST_HEAD(&cls->devices);
147 INIT_LIST_HEAD(&cls->interfaces); 147 INIT_LIST_HEAD(&cls->interfaces);
148 kset_init(&cls->class_dirs);
148 init_MUTEX(&cls->sem); 149 init_MUTEX(&cls->sem);
149 error = kobject_set_name(&cls->subsys.kset.kobj, "%s", cls->name); 150 error = kobject_set_name(&cls->subsys.kset.kobj, "%s", cls->name);
150 if (error) 151 if (error)
@@ -163,7 +164,6 @@ int class_register(struct class * cls)
163void class_unregister(struct class * cls) 164void class_unregister(struct class * cls)
164{ 165{
165 pr_debug("device class '%s': unregistering\n", cls->name); 166 pr_debug("device class '%s': unregistering\n", cls->name);
166 kobject_unregister(cls->virtual_dir);
167 remove_class_attrs(cls); 167 remove_class_attrs(cls);
168 subsystem_unregister(&cls->subsys); 168 subsystem_unregister(&cls->subsys);
169} 169}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d7fcf823a42a..8aa090da1cd7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -43,7 +43,8 @@ int (*platform_notify_remove)(struct device * dev) = NULL;
43const char *dev_driver_string(struct device *dev) 43const char *dev_driver_string(struct device *dev)
44{ 44{
45 return dev->driver ? dev->driver->name : 45 return dev->driver ? dev->driver->name :
46 (dev->bus ? dev->bus->name : ""); 46 (dev->bus ? dev->bus->name :
47 (dev->class ? dev->class->name : ""));
47} 48}
48EXPORT_SYMBOL(dev_driver_string); 49EXPORT_SYMBOL(dev_driver_string);
49 50
@@ -119,6 +120,8 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
119 120
120 if (ktype == &ktype_device) { 121 if (ktype == &ktype_device) {
121 struct device *dev = to_dev(kobj); 122 struct device *dev = to_dev(kobj);
123 if (dev->uevent_suppress)
124 return 0;
122 if (dev->bus) 125 if (dev->bus)
123 return 1; 126 return 1;
124 if (dev->class) 127 if (dev->class)
@@ -156,6 +159,11 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
156 "MINOR=%u", MINOR(dev->devt)); 159 "MINOR=%u", MINOR(dev->devt));
157 } 160 }
158 161
162 if (dev->type && dev->type->name)
163 add_uevent_var(envp, num_envp, &i,
164 buffer, buffer_size, &length,
165 "DEVTYPE=%s", dev->type->name);
166
159 if (dev->driver) 167 if (dev->driver)
160 add_uevent_var(envp, num_envp, &i, 168 add_uevent_var(envp, num_envp, &i,
161 buffer, buffer_size, &length, 169 buffer, buffer_size, &length,
@@ -238,71 +246,152 @@ static struct kset_uevent_ops device_uevent_ops = {
238 .uevent = dev_uevent, 246 .uevent = dev_uevent,
239}; 247};
240 248
249static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
250 char *buf)
251{
252 struct kobject *top_kobj;
253 struct kset *kset;
254 char *envp[32];
255 char data[PAGE_SIZE];
256 char *pos;
257 int i;
258 size_t count = 0;
259 int retval;
260
261 /* search the kset, the device belongs to */
262 top_kobj = &dev->kobj;
263 if (!top_kobj->kset && top_kobj->parent) {
264 do {
265 top_kobj = top_kobj->parent;
266 } while (!top_kobj->kset && top_kobj->parent);
267 }
268 if (!top_kobj->kset)
269 goto out;
270 kset = top_kobj->kset;
271 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
272 goto out;
273
274 /* respect filter */
275 if (kset->uevent_ops && kset->uevent_ops->filter)
276 if (!kset->uevent_ops->filter(kset, &dev->kobj))
277 goto out;
278
279 /* let the kset specific function add its keys */
280 pos = data;
281 retval = kset->uevent_ops->uevent(kset, &dev->kobj,
282 envp, ARRAY_SIZE(envp),
283 pos, PAGE_SIZE);
284 if (retval)
285 goto out;
286
287 /* copy keys to file */
288 for (i = 0; envp[i]; i++) {
289 pos = &buf[count];
290 count += sprintf(pos, "%s\n", envp[i]);
291 }
292out:
293 return count;
294}
295
241static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, 296static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
242 const char *buf, size_t count) 297 const char *buf, size_t count)
243{ 298{
299 if (memcmp(buf, "add", 3) != 0)
300 dev_err(dev, "uevent: unsupported action-string; this will "
301 "be ignored in a future kernel version");
244 kobject_uevent(&dev->kobj, KOBJ_ADD); 302 kobject_uevent(&dev->kobj, KOBJ_ADD);
245 return count; 303 return count;
246} 304}
247 305
248static int device_add_groups(struct device *dev) 306static int device_add_attributes(struct device *dev,
307 struct device_attribute *attrs)
308{
309 int error = 0;
310 int i;
311
312 if (attrs) {
313 for (i = 0; attr_name(attrs[i]); i++) {
314 error = device_create_file(dev, &attrs[i]);
315 if (error)
316 break;
317 }
318 if (error)
319 while (--i >= 0)
320 device_remove_file(dev, &attrs[i]);
321 }
322 return error;
323}
324
325static void device_remove_attributes(struct device *dev,
326 struct device_attribute *attrs)
249{ 327{
250 int i; 328 int i;
329
330 if (attrs)
331 for (i = 0; attr_name(attrs[i]); i++)
332 device_remove_file(dev, &attrs[i]);
333}
334
335static int device_add_groups(struct device *dev,
336 struct attribute_group **groups)
337{
251 int error = 0; 338 int error = 0;
339 int i;
252 340
253 if (dev->groups) { 341 if (groups) {
254 for (i = 0; dev->groups[i]; i++) { 342 for (i = 0; groups[i]; i++) {
255 error = sysfs_create_group(&dev->kobj, dev->groups[i]); 343 error = sysfs_create_group(&dev->kobj, groups[i]);
256 if (error) { 344 if (error) {
257 while (--i >= 0) 345 while (--i >= 0)
258 sysfs_remove_group(&dev->kobj, dev->groups[i]); 346 sysfs_remove_group(&dev->kobj, groups[i]);
259 goto out; 347 break;
260 } 348 }
261 } 349 }
262 } 350 }
263out:
264 return error; 351 return error;
265} 352}
266 353
267static void device_remove_groups(struct device *dev) 354static void device_remove_groups(struct device *dev,
355 struct attribute_group **groups)
268{ 356{
269 int i; 357 int i;
270 if (dev->groups) { 358
271 for (i = 0; dev->groups[i]; i++) { 359 if (groups)
272 sysfs_remove_group(&dev->kobj, dev->groups[i]); 360 for (i = 0; groups[i]; i++)
273 } 361 sysfs_remove_group(&dev->kobj, groups[i]);
274 }
275} 362}
276 363
277static int device_add_attrs(struct device *dev) 364static int device_add_attrs(struct device *dev)
278{ 365{
279 struct class *class = dev->class; 366 struct class *class = dev->class;
280 struct device_type *type = dev->type; 367 struct device_type *type = dev->type;
281 int error = 0; 368 int error;
282 int i;
283 369
284 if (class && class->dev_attrs) { 370 if (class) {
285 for (i = 0; attr_name(class->dev_attrs[i]); i++) { 371 error = device_add_attributes(dev, class->dev_attrs);
286 error = device_create_file(dev, &class->dev_attrs[i]);
287 if (error)
288 break;
289 }
290 if (error) 372 if (error)
291 while (--i >= 0) 373 return error;
292 device_remove_file(dev, &class->dev_attrs[i]);
293 } 374 }
294 375
295 if (type && type->attrs) { 376 if (type) {
296 for (i = 0; attr_name(type->attrs[i]); i++) { 377 error = device_add_groups(dev, type->groups);
297 error = device_create_file(dev, &type->attrs[i]);
298 if (error)
299 break;
300 }
301 if (error) 378 if (error)
302 while (--i >= 0) 379 goto err_remove_class_attrs;
303 device_remove_file(dev, &type->attrs[i]);
304 } 380 }
305 381
382 error = device_add_groups(dev, dev->groups);
383 if (error)
384 goto err_remove_type_groups;
385
386 return 0;
387
388 err_remove_type_groups:
389 if (type)
390 device_remove_groups(dev, type->groups);
391 err_remove_class_attrs:
392 if (class)
393 device_remove_attributes(dev, class->dev_attrs);
394
306 return error; 395 return error;
307} 396}
308 397
@@ -310,17 +399,14 @@ static void device_remove_attrs(struct device *dev)
310{ 399{
311 struct class *class = dev->class; 400 struct class *class = dev->class;
312 struct device_type *type = dev->type; 401 struct device_type *type = dev->type;
313 int i;
314 402
315 if (class && class->dev_attrs) { 403 device_remove_groups(dev, dev->groups);
316 for (i = 0; attr_name(class->dev_attrs[i]); i++)
317 device_remove_file(dev, &class->dev_attrs[i]);
318 }
319 404
320 if (type && type->attrs) { 405 if (type)
321 for (i = 0; attr_name(type->attrs[i]); i++) 406 device_remove_groups(dev, type->groups);
322 device_remove_file(dev, &type->attrs[i]); 407
323 } 408 if (class)
409 device_remove_attributes(dev, class->dev_attrs);
324} 410}
325 411
326 412
@@ -394,9 +480,10 @@ void device_remove_bin_file(struct device *dev, struct bin_attribute *attr)
394EXPORT_SYMBOL_GPL(device_remove_bin_file); 480EXPORT_SYMBOL_GPL(device_remove_bin_file);
395 481
396/** 482/**
397 * device_schedule_callback - helper to schedule a callback for a device 483 * device_schedule_callback_owner - helper to schedule a callback for a device
398 * @dev: device. 484 * @dev: device.
399 * @func: callback function to invoke later. 485 * @func: callback function to invoke later.
486 * @owner: module owning the callback routine
400 * 487 *
401 * Attribute methods must not unregister themselves or their parent device 488 * Attribute methods must not unregister themselves or their parent device
402 * (which would amount to the same thing). Attempts to do so will deadlock, 489 * (which would amount to the same thing). Attempts to do so will deadlock,
@@ -407,20 +494,23 @@ EXPORT_SYMBOL_GPL(device_remove_bin_file);
407 * argument in the workqueue's process context. @dev will be pinned until 494 * argument in the workqueue's process context. @dev will be pinned until
408 * @func returns. 495 * @func returns.
409 * 496 *
497 * This routine is usually called via the inline device_schedule_callback(),
498 * which automatically sets @owner to THIS_MODULE.
499 *
410 * Returns 0 if the request was submitted, -ENOMEM if storage could not 500 * Returns 0 if the request was submitted, -ENOMEM if storage could not
411 * be allocated. 501 * be allocated, -ENODEV if a reference to @owner isn't available.
412 * 502 *
413 * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an 503 * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an
414 * underlying sysfs routine (since it is intended for use by attribute 504 * underlying sysfs routine (since it is intended for use by attribute
415 * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. 505 * methods), and if sysfs isn't available you'll get nothing but -ENOSYS.
416 */ 506 */
417int device_schedule_callback(struct device *dev, 507int device_schedule_callback_owner(struct device *dev,
418 void (*func)(struct device *)) 508 void (*func)(struct device *), struct module *owner)
419{ 509{
420 return sysfs_schedule_callback(&dev->kobj, 510 return sysfs_schedule_callback(&dev->kobj,
421 (void (*)(void *)) func, dev); 511 (void (*)(void *)) func, dev, owner);
422} 512}
423EXPORT_SYMBOL_GPL(device_schedule_callback); 513EXPORT_SYMBOL_GPL(device_schedule_callback_owner);
424 514
425static void klist_children_get(struct klist_node *n) 515static void klist_children_get(struct klist_node *n)
426{ 516{
@@ -477,34 +567,58 @@ static struct kobject * get_device_parent(struct device *dev,
477 return NULL; 567 return NULL;
478} 568}
479#else 569#else
480static struct kobject * virtual_device_parent(struct device *dev) 570static struct kobject *virtual_device_parent(struct device *dev)
481{ 571{
482 if (!dev->class) 572 static struct kobject *virtual_dir = NULL;
483 return ERR_PTR(-ENODEV);
484 573
485 if (!dev->class->virtual_dir) { 574 if (!virtual_dir)
486 static struct kobject *virtual_dir = NULL; 575 virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual");
487 576
488 if (!virtual_dir) 577 return virtual_dir;
489 virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual");
490 dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name);
491 }
492
493 return dev->class->virtual_dir;
494} 578}
495 579
496static struct kobject * get_device_parent(struct device *dev, 580static struct kobject * get_device_parent(struct device *dev,
497 struct device *parent) 581 struct device *parent)
498{ 582{
499 /* if this is a class device, and has no parent, create one */ 583 if (dev->class) {
500 if ((dev->class) && (parent == NULL)) { 584 struct kobject *kobj = NULL;
501 return virtual_device_parent(dev); 585 struct kobject *parent_kobj;
502 } else if (parent) 586 struct kobject *k;
587
588 /*
589 * If we have no parent, we live in "virtual".
590 * Class-devices with a bus-device as parent, live
591 * in a class-directory to prevent namespace collisions.
592 */
593 if (parent == NULL)
594 parent_kobj = virtual_device_parent(dev);
595 else if (parent->class)
596 return &parent->kobj;
597 else
598 parent_kobj = &parent->kobj;
599
600 /* find our class-directory at the parent and reference it */
601 spin_lock(&dev->class->class_dirs.list_lock);
602 list_for_each_entry(k, &dev->class->class_dirs.list, entry)
603 if (k->parent == parent_kobj) {
604 kobj = kobject_get(k);
605 break;
606 }
607 spin_unlock(&dev->class->class_dirs.list_lock);
608 if (kobj)
609 return kobj;
610
611 /* or create a new class-directory at the parent device */
612 return kobject_kset_add_dir(&dev->class->class_dirs,
613 parent_kobj, dev->class->name);
614 }
615
616 if (parent)
503 return &parent->kobj; 617 return &parent->kobj;
504 return NULL; 618 return NULL;
505} 619}
506
507#endif 620#endif
621
508static int setup_parent(struct device *dev, struct device *parent) 622static int setup_parent(struct device *dev, struct device *parent)
509{ 623{
510 struct kobject *kobj; 624 struct kobject *kobj;
@@ -541,7 +655,6 @@ int device_add(struct device *dev)
541 pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id); 655 pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id);
542 656
543 parent = get_device(dev->parent); 657 parent = get_device(dev->parent);
544
545 error = setup_parent(dev, parent); 658 error = setup_parent(dev, parent);
546 if (error) 659 if (error)
547 goto Error; 660 goto Error;
@@ -562,10 +675,11 @@ int device_add(struct device *dev)
562 BUS_NOTIFY_ADD_DEVICE, dev); 675 BUS_NOTIFY_ADD_DEVICE, dev);
563 676
564 dev->uevent_attr.attr.name = "uevent"; 677 dev->uevent_attr.attr.name = "uevent";
565 dev->uevent_attr.attr.mode = S_IWUSR; 678 dev->uevent_attr.attr.mode = S_IRUGO | S_IWUSR;
566 if (dev->driver) 679 if (dev->driver)
567 dev->uevent_attr.attr.owner = dev->driver->owner; 680 dev->uevent_attr.attr.owner = dev->driver->owner;
568 dev->uevent_attr.store = store_uevent; 681 dev->uevent_attr.store = store_uevent;
682 dev->uevent_attr.show = show_uevent;
569 error = device_create_file(dev, &dev->uevent_attr); 683 error = device_create_file(dev, &dev->uevent_attr);
570 if (error) 684 if (error)
571 goto attrError; 685 goto attrError;
@@ -614,16 +728,12 @@ int device_add(struct device *dev)
614 728
615 if ((error = device_add_attrs(dev))) 729 if ((error = device_add_attrs(dev)))
616 goto AttrsError; 730 goto AttrsError;
617 if ((error = device_add_groups(dev)))
618 goto GroupError;
619 if ((error = device_pm_add(dev))) 731 if ((error = device_pm_add(dev)))
620 goto PMError; 732 goto PMError;
621 if ((error = bus_add_device(dev))) 733 if ((error = bus_add_device(dev)))
622 goto BusError; 734 goto BusError;
623 if (!dev->uevent_suppress) 735 kobject_uevent(&dev->kobj, KOBJ_ADD);
624 kobject_uevent(&dev->kobj, KOBJ_ADD); 736 bus_attach_device(dev);
625 if ((error = bus_attach_device(dev)))
626 goto AttachError;
627 if (parent) 737 if (parent)
628 klist_add_tail(&dev->knode_parent, &parent->klist_children); 738 klist_add_tail(&dev->knode_parent, &parent->klist_children);
629 739
@@ -639,19 +749,15 @@ int device_add(struct device *dev)
639 up(&dev->class->sem); 749 up(&dev->class->sem);
640 } 750 }
641 Done: 751 Done:
642 kfree(class_name); 752 kfree(class_name);
643 put_device(dev); 753 put_device(dev);
644 return error; 754 return error;
645 AttachError:
646 bus_remove_device(dev);
647 BusError: 755 BusError:
648 device_pm_remove(dev); 756 device_pm_remove(dev);
649 PMError: 757 PMError:
650 if (dev->bus) 758 if (dev->bus)
651 blocking_notifier_call_chain(&dev->bus->bus_notifier, 759 blocking_notifier_call_chain(&dev->bus->bus_notifier,
652 BUS_NOTIFY_DEL_DEVICE, dev); 760 BUS_NOTIFY_DEL_DEVICE, dev);
653 device_remove_groups(dev);
654 GroupError:
655 device_remove_attrs(dev); 761 device_remove_attrs(dev);
656 AttrsError: 762 AttrsError:
657 if (dev->devt_attr) { 763 if (dev->devt_attr) {
@@ -677,15 +783,6 @@ int device_add(struct device *dev)
677#endif 783#endif
678 sysfs_remove_link(&dev->kobj, "device"); 784 sysfs_remove_link(&dev->kobj, "device");
679 } 785 }
680
681 down(&dev->class->sem);
682 /* notify any interfaces that the device is now gone */
683 list_for_each_entry(class_intf, &dev->class->interfaces, node)
684 if (class_intf->remove_dev)
685 class_intf->remove_dev(dev, class_intf);
686 /* remove the device from the class list */
687 list_del_init(&dev->node);
688 up(&dev->class->sem);
689 } 786 }
690 ueventattrError: 787 ueventattrError:
691 device_remove_file(dev, &dev->uevent_attr); 788 device_remove_file(dev, &dev->uevent_attr);
@@ -796,9 +893,33 @@ void device_del(struct device * dev)
796 /* remove the device from the class list */ 893 /* remove the device from the class list */
797 list_del_init(&dev->node); 894 list_del_init(&dev->node);
798 up(&dev->class->sem); 895 up(&dev->class->sem);
896
897 /* If we live in a parent class-directory, unreference it */
898 if (dev->kobj.parent->kset == &dev->class->class_dirs) {
899 struct device *d;
900 int other = 0;
901
902 /*
903 * if we are the last child of our class, delete
904 * our class-directory at this parent
905 */
906 down(&dev->class->sem);
907 list_for_each_entry(d, &dev->class->devices, node) {
908 if (d == dev)
909 continue;
910 if (d->kobj.parent == dev->kobj.parent) {
911 other = 1;
912 break;
913 }
914 }
915 if (!other)
916 kobject_del(dev->kobj.parent);
917
918 kobject_put(dev->kobj.parent);
919 up(&dev->class->sem);
920 }
799 } 921 }
800 device_remove_file(dev, &dev->uevent_attr); 922 device_remove_file(dev, &dev->uevent_attr);
801 device_remove_groups(dev);
802 device_remove_attrs(dev); 923 device_remove_attrs(dev);
803 bus_remove_device(dev); 924 bus_remove_device(dev);
804 925
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6a48824e43ff..18dba8e78da7 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -94,19 +94,11 @@ int device_bind_driver(struct device *dev)
94 return ret; 94 return ret;
95} 95}
96 96
97struct stupid_thread_structure {
98 struct device_driver *drv;
99 struct device *dev;
100};
101
102static atomic_t probe_count = ATOMIC_INIT(0); 97static atomic_t probe_count = ATOMIC_INIT(0);
103static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); 98static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
104 99
105static int really_probe(void *void_data) 100static int really_probe(struct device *dev, struct device_driver *drv)
106{ 101{
107 struct stupid_thread_structure *data = void_data;
108 struct device_driver *drv = data->drv;
109 struct device *dev = data->dev;
110 int ret = 0; 102 int ret = 0;
111 103
112 atomic_inc(&probe_count); 104 atomic_inc(&probe_count);
@@ -154,7 +146,6 @@ probe_failed:
154 */ 146 */
155 ret = 0; 147 ret = 0;
156done: 148done:
157 kfree(data);
158 atomic_dec(&probe_count); 149 atomic_dec(&probe_count);
159 wake_up(&probe_waitqueue); 150 wake_up(&probe_waitqueue);
160 return ret; 151 return ret;
@@ -186,16 +177,14 @@ int driver_probe_done(void)
186 * format of the ID structures, nor what is to be considered a match and 177 * format of the ID structures, nor what is to be considered a match and
187 * what is not. 178 * what is not.
188 * 179 *
189 * This function returns 1 if a match is found, an error if one occurs 180 * This function returns 1 if a match is found, -ENODEV if the device is
190 * (that is not -ENODEV or -ENXIO), and 0 otherwise. 181 * not registered, and 0 otherwise.
191 * 182 *
192 * This function must be called with @dev->sem held. When called for a 183 * This function must be called with @dev->sem held. When called for a
193 * USB interface, @dev->parent->sem must be held as well. 184 * USB interface, @dev->parent->sem must be held as well.
194 */ 185 */
195int driver_probe_device(struct device_driver * drv, struct device * dev) 186int driver_probe_device(struct device_driver * drv, struct device * dev)
196{ 187{
197 struct stupid_thread_structure *data;
198 struct task_struct *probe_task;
199 int ret = 0; 188 int ret = 0;
200 189
201 if (!device_is_registered(dev)) 190 if (!device_is_registered(dev))
@@ -206,19 +195,7 @@ int driver_probe_device(struct device_driver * drv, struct device * dev)
206 pr_debug("%s: Matched Device %s with Driver %s\n", 195 pr_debug("%s: Matched Device %s with Driver %s\n",
207 drv->bus->name, dev->bus_id, drv->name); 196 drv->bus->name, dev->bus_id, drv->name);
208 197
209 data = kmalloc(sizeof(*data), GFP_KERNEL); 198 ret = really_probe(dev, drv);
210 if (!data)
211 return -ENOMEM;
212 data->drv = drv;
213 data->dev = dev;
214
215 if (drv->multithread_probe) {
216 probe_task = kthread_run(really_probe, data,
217 "probe-%s", dev->bus_id);
218 if (IS_ERR(probe_task))
219 ret = really_probe(data);
220 } else
221 ret = really_probe(data);
222 199
223done: 200done:
224 return ret; 201 return ret;
@@ -230,30 +207,57 @@ static int __device_attach(struct device_driver * drv, void * data)
230 return driver_probe_device(drv, dev); 207 return driver_probe_device(drv, dev);
231} 208}
232 209
210static int device_probe_drivers(void *data)
211{
212 struct device *dev = data;
213 int ret = 0;
214
215 if (dev->bus) {
216 down(&dev->sem);
217 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
218 up(&dev->sem);
219 }
220 return ret;
221}
222
233/** 223/**
234 * device_attach - try to attach device to a driver. 224 * device_attach - try to attach device to a driver.
235 * @dev: device. 225 * @dev: device.
236 * 226 *
237 * Walk the list of drivers that the bus has and call 227 * Walk the list of drivers that the bus has and call
238 * driver_probe_device() for each pair. If a compatible 228 * driver_probe_device() for each pair. If a compatible
239 * pair is found, break out and return. 229 * pair is found, break out and return. If the bus specifies
230 * multithreaded probing, walking the list of drivers is done
231 * on a probing thread.
240 * 232 *
241 * Returns 1 if the device was bound to a driver; 233 * Returns 1 if the device was bound to a driver;
242 * 0 if no matching device was found; error code otherwise. 234 * 0 if no matching device was found or multithreaded probing is done;
235 * -ENODEV if the device is not registered.
243 * 236 *
244 * When called for a USB interface, @dev->parent->sem must be held. 237 * When called for a USB interface, @dev->parent->sem must be held.
245 */ 238 */
246int device_attach(struct device * dev) 239int device_attach(struct device * dev)
247{ 240{
248 int ret = 0; 241 int ret = 0;
242 struct task_struct *probe_task = ERR_PTR(-ENOMEM);
249 243
250 down(&dev->sem); 244 down(&dev->sem);
251 if (dev->driver) { 245 if (dev->driver) {
252 ret = device_bind_driver(dev); 246 ret = device_bind_driver(dev);
253 if (ret == 0) 247 if (ret == 0)
254 ret = 1; 248 ret = 1;
255 } else 249 else {
256 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 250 dev->driver = NULL;
251 ret = 0;
252 }
253 } else {
254 if (dev->bus->multithread_probe)
255 probe_task = kthread_run(device_probe_drivers, dev,
256 "probe-%s", dev->bus_id);
257 if(IS_ERR(probe_task))
258 ret = bus_for_each_drv(dev->bus, NULL, dev,
259 __device_attach);
260 }
257 up(&dev->sem); 261 up(&dev->sem);
258 return ret; 262 return ret;
259} 263}
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index cd467c9f33b3..9406259754ad 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -37,7 +37,7 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
37 37
38#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 38#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
39 39
40static DECLARE_MUTEX (pools_lock); 40static DEFINE_MUTEX (pools_lock);
41 41
42static ssize_t 42static ssize_t
43show_pools (struct device *dev, struct device_attribute *attr, char *buf) 43show_pools (struct device *dev, struct device_attribute *attr, char *buf)
@@ -55,7 +55,7 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf)
55 size -= temp; 55 size -= temp;
56 next += temp; 56 next += temp;
57 57
58 down (&pools_lock); 58 mutex_lock(&pools_lock);
59 list_for_each_entry(pool, &dev->dma_pools, pools) { 59 list_for_each_entry(pool, &dev->dma_pools, pools) {
60 unsigned pages = 0; 60 unsigned pages = 0;
61 unsigned blocks = 0; 61 unsigned blocks = 0;
@@ -73,7 +73,7 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf)
73 size -= temp; 73 size -= temp;
74 next += temp; 74 next += temp;
75 } 75 }
76 up (&pools_lock); 76 mutex_unlock(&pools_lock);
77 77
78 return PAGE_SIZE - size; 78 return PAGE_SIZE - size;
79} 79}
@@ -143,7 +143,7 @@ dma_pool_create (const char *name, struct device *dev,
143 if (dev) { 143 if (dev) {
144 int ret; 144 int ret;
145 145
146 down (&pools_lock); 146 mutex_lock(&pools_lock);
147 if (list_empty (&dev->dma_pools)) 147 if (list_empty (&dev->dma_pools))
148 ret = device_create_file (dev, &dev_attr_pools); 148 ret = device_create_file (dev, &dev_attr_pools);
149 else 149 else
@@ -155,7 +155,7 @@ dma_pool_create (const char *name, struct device *dev,
155 kfree(retval); 155 kfree(retval);
156 retval = NULL; 156 retval = NULL;
157 } 157 }
158 up (&pools_lock); 158 mutex_unlock(&pools_lock);
159 } else 159 } else
160 INIT_LIST_HEAD (&retval->pools); 160 INIT_LIST_HEAD (&retval->pools);
161 161
@@ -231,11 +231,11 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page)
231void 231void
232dma_pool_destroy (struct dma_pool *pool) 232dma_pool_destroy (struct dma_pool *pool)
233{ 233{
234 down (&pools_lock); 234 mutex_lock(&pools_lock);
235 list_del (&pool->pools); 235 list_del (&pool->pools);
236 if (pool->dev && list_empty (&pool->dev->dma_pools)) 236 if (pool->dev && list_empty (&pool->dev->dma_pools))
237 device_remove_file (pool->dev, &dev_attr_pools); 237 device_remove_file (pool->dev, &dev_attr_pools);
238 up (&pools_lock); 238 mutex_unlock(&pools_lock);
239 239
240 while (!list_empty (&pool->page_list)) { 240 while (!list_empty (&pool->page_list)) {
241 struct dma_page *page; 241 struct dma_page *page;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 082bfded3854..eb11475293ed 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -149,10 +149,6 @@ void put_driver(struct device_driver * drv)
149 * We pass off most of the work to the bus_add_driver() call, 149 * We pass off most of the work to the bus_add_driver() call,
150 * since most of the things we have to do deal with the bus 150 * since most of the things we have to do deal with the bus
151 * structures. 151 * structures.
152 *
153 * The one interesting aspect is that we setup @drv->unloaded
154 * as a completion that gets complete when the driver reference
155 * count reaches 0.
156 */ 152 */
157int driver_register(struct device_driver * drv) 153int driver_register(struct device_driver * drv)
158{ 154{
@@ -162,35 +158,19 @@ int driver_register(struct device_driver * drv)
162 printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name); 158 printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name);
163 } 159 }
164 klist_init(&drv->klist_devices, NULL, NULL); 160 klist_init(&drv->klist_devices, NULL, NULL);
165 init_completion(&drv->unloaded);
166 return bus_add_driver(drv); 161 return bus_add_driver(drv);
167} 162}
168 163
169
170/** 164/**
171 * driver_unregister - remove driver from system. 165 * driver_unregister - remove driver from system.
172 * @drv: driver. 166 * @drv: driver.
173 * 167 *
174 * Again, we pass off most of the work to the bus-level call. 168 * Again, we pass off most of the work to the bus-level call.
175 *
176 * Though, once that is done, we wait until @drv->unloaded is completed.
177 * This will block until the driver refcount reaches 0, and it is
178 * released. Only modular drivers will call this function, and we
179 * have to guarantee that it won't complete, letting the driver
180 * unload until all references are gone.
181 */ 169 */
182 170
183void driver_unregister(struct device_driver * drv) 171void driver_unregister(struct device_driver * drv)
184{ 172{
185 bus_remove_driver(drv); 173 bus_remove_driver(drv);
186 /*
187 * If the driver is a module, we are probably in
188 * the module unload path, and we want to wait
189 * for everything to unload before we can actually
190 * finish the unload.
191 */
192 if (drv->owner)
193 wait_for_completion(&drv->unloaded);
194} 174}
195 175
196/** 176/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c0a979a5074b..97ab5bd1c4d6 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -31,8 +31,6 @@ enum {
31 FW_STATUS_LOADING, 31 FW_STATUS_LOADING,
32 FW_STATUS_DONE, 32 FW_STATUS_DONE,
33 FW_STATUS_ABORT, 33 FW_STATUS_ABORT,
34 FW_STATUS_READY,
35 FW_STATUS_READY_NOHOTPLUG,
36}; 34};
37 35
38static int loading_timeout = 60; /* In seconds */ 36static int loading_timeout = 60; /* In seconds */
@@ -96,9 +94,6 @@ static int firmware_uevent(struct device *dev, char **envp, int num_envp,
96 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 94 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
97 int i = 0, len = 0; 95 int i = 0, len = 0;
98 96
99 if (!test_bit(FW_STATUS_READY, &fw_priv->status))
100 return -ENODEV;
101
102 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 97 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
103 "FIRMWARE=%s", fw_priv->fw_id)) 98 "FIRMWARE=%s", fw_priv->fw_id))
104 return -ENOMEM; 99 return -ENOMEM;
@@ -333,6 +328,7 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
333 f_dev->parent = device; 328 f_dev->parent = device;
334 f_dev->class = &firmware_class; 329 f_dev->class = &firmware_class;
335 dev_set_drvdata(f_dev, fw_priv); 330 dev_set_drvdata(f_dev, fw_priv);
331 f_dev->uevent_suppress = 1;
336 retval = device_register(f_dev); 332 retval = device_register(f_dev);
337 if (retval) { 333 if (retval) {
338 printk(KERN_ERR "%s: device_register failed\n", 334 printk(KERN_ERR "%s: device_register failed\n",
@@ -382,9 +378,7 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p,
382 } 378 }
383 379
384 if (uevent) 380 if (uevent)
385 set_bit(FW_STATUS_READY, &fw_priv->status); 381 f_dev->uevent_suppress = 0;
386 else
387 set_bit(FW_STATUS_READY_NOHOTPLUG, &fw_priv->status);
388 *dev_p = f_dev; 382 *dev_p = f_dev;
389 goto out; 383 goto out;
390 384
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bbbb973a9d3c..05dc8764e765 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,9 @@ LIST_HEAD(dpm_off_irq);
29DECLARE_MUTEX(dpm_sem); 29DECLARE_MUTEX(dpm_sem);
30DECLARE_MUTEX(dpm_list_sem); 30DECLARE_MUTEX(dpm_list_sem);
31 31
32int (*platform_enable_wakeup)(struct device *dev, int is_on);
33
34
32/** 35/**
33 * device_pm_set_parent - Specify power dependency. 36 * device_pm_set_parent - Specify power dependency.
34 * @dev: Device who needs power. 37 * @dev: Device who needs power.
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index 020be36705a6..a2c64188d713 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -26,7 +26,9 @@ int resume_device(struct device * dev)
26 26
27 TRACE_DEVICE(dev); 27 TRACE_DEVICE(dev);
28 TRACE_RESUME(0); 28 TRACE_RESUME(0);
29
29 down(&dev->sem); 30 down(&dev->sem);
31
30 if (dev->power.pm_parent 32 if (dev->power.pm_parent
31 && dev->power.pm_parent->power.power_state.event) { 33 && dev->power.pm_parent->power.power_state.event) {
32 dev_err(dev, "PM: resume from %d, parent %s still %d\n", 34 dev_err(dev, "PM: resume from %d, parent %s still %d\n",
@@ -34,15 +36,24 @@ int resume_device(struct device * dev)
34 dev->power.pm_parent->bus_id, 36 dev->power.pm_parent->bus_id,
35 dev->power.pm_parent->power.power_state.event); 37 dev->power.pm_parent->power.power_state.event);
36 } 38 }
39
37 if (dev->bus && dev->bus->resume) { 40 if (dev->bus && dev->bus->resume) {
38 dev_dbg(dev,"resuming\n"); 41 dev_dbg(dev,"resuming\n");
39 error = dev->bus->resume(dev); 42 error = dev->bus->resume(dev);
40 } 43 }
41 if (dev->class && dev->class->resume) { 44
45 if (!error && dev->type && dev->type->resume) {
46 dev_dbg(dev,"resuming\n");
47 error = dev->type->resume(dev);
48 }
49
50 if (!error && dev->class && dev->class->resume) {
42 dev_dbg(dev,"class resume\n"); 51 dev_dbg(dev,"class resume\n");
43 error = dev->class->resume(dev); 52 error = dev->class->resume(dev);
44 } 53 }
54
45 up(&dev->sem); 55 up(&dev->sem);
56
46 TRACE_RESUME(error); 57 TRACE_RESUME(error);
47 return error; 58 return error;
48} 59}
diff --git a/drivers/base/power/shutdown.c b/drivers/base/power/shutdown.c
index 3483ae4d57f5..58b6f77a1b34 100644
--- a/drivers/base/power/shutdown.c
+++ b/drivers/base/power/shutdown.c
@@ -36,7 +36,6 @@ void device_shutdown(void)
36{ 36{
37 struct device * dev, *devn; 37 struct device * dev, *devn;
38 38
39 down_write(&devices_subsys.rwsem);
40 list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.kset.list, 39 list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.kset.list,
41 kobj.entry) { 40 kobj.entry) {
42 if (dev->bus && dev->bus->shutdown) { 41 if (dev->bus && dev->bus->shutdown) {
@@ -47,7 +46,6 @@ void device_shutdown(void)
47 dev->driver->shutdown(dev); 46 dev->driver->shutdown(dev);
48 } 47 }
49 } 48 }
50 up_write(&devices_subsys.rwsem);
51 49
52 sysdev_shutdown(); 50 sysdev_shutdown();
53} 51}
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index ece136bf97e3..42d2b86ba765 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -78,6 +78,18 @@ int suspend_device(struct device * dev, pm_message_t state)
78 suspend_report_result(dev->class->suspend, error); 78 suspend_report_result(dev->class->suspend, error);
79 } 79 }
80 80
81 if (!error && dev->type && dev->type->suspend && !dev->power.power_state.event) {
82 dev_dbg(dev, "%s%s\n",
83 suspend_verb(state.event),
84 ((state.event == PM_EVENT_SUSPEND)
85 && device_may_wakeup(dev))
86 ? ", may wakeup"
87 : ""
88 );
89 error = dev->type->suspend(dev, state);
90 suspend_report_result(dev->type->suspend, error);
91 }
92
81 if (!error && dev->bus && dev->bus->suspend && !dev->power.power_state.event) { 93 if (!error && dev->bus && dev->bus->suspend && !dev->power.power_state.event) {
82 dev_dbg(dev, "%s%s\n", 94 dev_dbg(dev, "%s%s\n",
83 suspend_verb(state.event), 95 suspend_verb(state.event),
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 2308e83e5f33..1d8466817943 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -48,6 +48,15 @@ struct aoe_hdr {
48 __be32 tag; 48 __be32 tag;
49}; 49};
50 50
51#ifdef __KERNEL__
52#include <linux/skbuff.h>
53
54static inline struct aoe_hdr *aoe_hdr(const struct sk_buff *skb)
55{
56 return (struct aoe_hdr *)skb_mac_header(skb);
57}
58#endif
59
51struct aoe_atahdr { 60struct aoe_atahdr {
52 unsigned char aflags; 61 unsigned char aflags;
53 unsigned char errfeat; 62 unsigned char errfeat;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8d17d8df3662..1a6aeac5a1c3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -27,7 +27,8 @@ new_skb(ulong len)
27 27
28 skb = alloc_skb(len, GFP_ATOMIC); 28 skb = alloc_skb(len, GFP_ATOMIC);
29 if (skb) { 29 if (skb) {
30 skb->nh.raw = skb->mac.raw = skb->data; 30 skb_reset_mac_header(skb);
31 skb_reset_network_header(skb);
31 skb->protocol = __constant_htons(ETH_P_AOE); 32 skb->protocol = __constant_htons(ETH_P_AOE);
32 skb->priority = 0; 33 skb->priority = 0;
33 skb->next = skb->prev = NULL; 34 skb->next = skb->prev = NULL;
@@ -118,7 +119,7 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f)
118 119
119 /* initialize the headers & frame */ 120 /* initialize the headers & frame */
120 skb = f->skb; 121 skb = f->skb;
121 h = (struct aoe_hdr *) skb->mac.raw; 122 h = aoe_hdr(skb);
122 ah = (struct aoe_atahdr *) (h+1); 123 ah = (struct aoe_atahdr *) (h+1);
123 skb_put(skb, sizeof *h + sizeof *ah); 124 skb_put(skb, sizeof *h + sizeof *ah);
124 memset(h, 0, skb->len); 125 memset(h, 0, skb->len);
@@ -207,7 +208,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
207 skb->dev = ifp; 208 skb->dev = ifp;
208 if (sl_tail == NULL) 209 if (sl_tail == NULL)
209 sl_tail = skb; 210 sl_tail = skb;
210 h = (struct aoe_hdr *) skb->mac.raw; 211 h = aoe_hdr(skb);
211 memset(h, 0, sizeof *h + sizeof *ch); 212 memset(h, 0, sizeof *h + sizeof *ch);
212 213
213 memset(h->dst, 0xff, sizeof h->dst); 214 memset(h->dst, 0xff, sizeof h->dst);
@@ -300,7 +301,7 @@ rexmit(struct aoedev *d, struct frame *f)
300 aoechr_error(buf); 301 aoechr_error(buf);
301 302
302 skb = f->skb; 303 skb = f->skb;
303 h = (struct aoe_hdr *) skb->mac.raw; 304 h = aoe_hdr(skb);
304 ah = (struct aoe_atahdr *) (h+1); 305 ah = (struct aoe_atahdr *) (h+1);
305 f->tag = n; 306 f->tag = n;
306 h->tag = cpu_to_be32(n); 307 h->tag = cpu_to_be32(n);
@@ -529,7 +530,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
529 char ebuf[128]; 530 char ebuf[128];
530 u16 aoemajor; 531 u16 aoemajor;
531 532
532 hin = (struct aoe_hdr *) skb->mac.raw; 533 hin = aoe_hdr(skb);
533 aoemajor = be16_to_cpu(get_unaligned(&hin->major)); 534 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
534 d = aoedev_by_aoeaddr(aoemajor, hin->minor); 535 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
535 if (d == NULL) { 536 if (d == NULL) {
@@ -561,7 +562,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
561 calc_rttavg(d, tsince(f->tag)); 562 calc_rttavg(d, tsince(f->tag));
562 563
563 ahin = (struct aoe_atahdr *) (hin+1); 564 ahin = (struct aoe_atahdr *) (hin+1);
564 hout = (struct aoe_hdr *) f->skb->mac.raw; 565 hout = aoe_hdr(f->skb);
565 ahout = (struct aoe_atahdr *) (hout+1); 566 ahout = (struct aoe_atahdr *) (hout+1);
566 buf = f->buf; 567 buf = f->buf;
567 568
@@ -695,7 +696,7 @@ aoecmd_ata_id(struct aoedev *d)
695 696
696 /* initialize the headers & frame */ 697 /* initialize the headers & frame */
697 skb = f->skb; 698 skb = f->skb;
698 h = (struct aoe_hdr *) skb->mac.raw; 699 h = aoe_hdr(skb);
699 ah = (struct aoe_atahdr *) (h+1); 700 ah = (struct aoe_atahdr *) (h+1);
700 skb_put(skb, sizeof *h + sizeof *ah); 701 skb_put(skb, sizeof *h + sizeof *ah);
701 memset(h, 0, skb->len); 702 memset(h, 0, skb->len);
@@ -726,7 +727,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
726 enum { MAXFRAMES = 16 }; 727 enum { MAXFRAMES = 16 };
727 u16 n; 728 u16 n;
728 729
729 h = (struct aoe_hdr *) skb->mac.raw; 730 h = aoe_hdr(skb);
730 ch = (struct aoe_cfghdr *) (h+1); 731 ch = (struct aoe_cfghdr *) (h+1);
731 732
732 /* 733 /*
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index aab6d91a2c22..f9ddfda4d9cb 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -123,7 +123,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
123 goto exit; 123 goto exit;
124 skb_push(skb, ETH_HLEN); /* (1) */ 124 skb_push(skb, ETH_HLEN); /* (1) */
125 125
126 h = (struct aoe_hdr *) skb->mac.raw; 126 h = aoe_hdr(skb);
127 n = be32_to_cpu(get_unaligned(&h->tag)); 127 n = be32_to_cpu(get_unaligned(&h->tag));
128 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) 128 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
129 goto exit; 129 goto exit;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 14d780666c0a..65a725cd3422 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3423,6 +3423,25 @@ static void cciss_remove_one(struct pci_dev *pdev)
3423 "already be removed \n"); 3423 "already be removed \n");
3424 return; 3424 return;
3425 } 3425 }
3426
3427 remove_proc_entry(hba[i]->devname, proc_cciss);
3428 unregister_blkdev(hba[i]->major, hba[i]->devname);
3429
3430 /* remove it from the disk list */
3431 for (j = 0; j < CISS_MAX_LUN; j++) {
3432 struct gendisk *disk = hba[i]->gendisk[j];
3433 if (disk) {
3434 request_queue_t *q = disk->queue;
3435
3436 if (disk->flags & GENHD_FL_UP)
3437 del_gendisk(disk);
3438 if (q)
3439 blk_cleanup_queue(q);
3440 }
3441 }
3442
3443 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3444
3426 /* Turn board interrupts off and send the flush cache command */ 3445 /* Turn board interrupts off and send the flush cache command */
3427 /* sendcmd will turn off interrupt, and send the flush... 3446 /* sendcmd will turn off interrupt, and send the flush...
3428 * To write all data in the battery backed cache to disks */ 3447 * To write all data in the battery backed cache to disks */
@@ -3444,22 +3463,6 @@ static void cciss_remove_one(struct pci_dev *pdev)
3444#endif /* CONFIG_PCI_MSI */ 3463#endif /* CONFIG_PCI_MSI */
3445 3464
3446 iounmap(hba[i]->vaddr); 3465 iounmap(hba[i]->vaddr);
3447 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3448 unregister_blkdev(hba[i]->major, hba[i]->devname);
3449 remove_proc_entry(hba[i]->devname, proc_cciss);
3450
3451 /* remove it from the disk list */
3452 for (j = 0; j < CISS_MAX_LUN; j++) {
3453 struct gendisk *disk = hba[i]->gendisk[j];
3454 if (disk) {
3455 request_queue_t *q = disk->queue;
3456
3457 if (disk->flags & GENHD_FL_UP)
3458 del_gendisk(disk);
3459 if (q)
3460 blk_cleanup_queue(q);
3461 }
3462 }
3463 3466
3464 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), 3467 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3465 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 3468 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index c852eed91e4b..1eeb8f2cde71 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -140,7 +140,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
140#include <linux/blkdev.h> 140#include <linux/blkdev.h>
141#include <asm/uaccess.h> 141#include <asm/uaccess.h>
142 142
143static spinlock_t pcd_lock; 143static DEFINE_SPINLOCK(pcd_lock);
144 144
145module_param(verbose, bool, 0644); 145module_param(verbose, bool, 0644);
146module_param(major, int, 0); 146module_param(major, int, 0);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 7cdaa1951260..5826508f6731 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -154,7 +154,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
154#include <linux/blkpg.h> 154#include <linux/blkpg.h>
155#include <asm/uaccess.h> 155#include <asm/uaccess.h>
156 156
157static spinlock_t pf_spin_lock; 157static DEFINE_SPINLOCK(pf_spin_lock);
158 158
159module_param(verbose, bool, 0644); 159module_param(verbose, bool, 0644);
160module_param(major, int, 0); 160module_param(major, int, 0);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a4fb70383188..f1b9dd7d47d6 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -777,7 +777,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
777 rq->cmd_flags |= REQ_QUIET; 777 rq->cmd_flags |= REQ_QUIET;
778 778
779 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); 779 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
780 ret = rq->errors; 780 if (rq->errors)
781 ret = -EIO;
781out: 782out:
782 blk_put_request(rq); 783 blk_put_request(rq);
783 return ret; 784 return ret;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 2098eff91e14..746a118a9b52 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2132,10 +2132,13 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2132 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 2132 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2133 == USB_ENDPOINT_XFER_BULK) { 2133 == USB_ENDPOINT_XFER_BULK) {
2134 /* BULK in or out? */ 2134 /* BULK in or out? */
2135 if (ep->bEndpointAddress & USB_DIR_IN) 2135 if (ep->bEndpointAddress & USB_DIR_IN) {
2136 ep_in = ep; 2136 if (ep_in == NULL)
2137 else 2137 ep_in = ep;
2138 ep_out = ep; 2138 } else {
2139 if (ep_out == NULL)
2140 ep_out = ep;
2141 }
2139 } 2142 }
2140 } 2143 }
2141 2144
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 4c766f36d884..b990805806af 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
527 buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; 527 buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;
528 528
529 memcpy(skb_put(nskb, 3), buf, 3); 529 memcpy(skb_put(nskb, 3), buf, 3);
530 memcpy(skb_put(nskb, size), skb->data + sent, size); 530 skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
531 531
532 sent += size; 532 sent += size;
533 count -= size; 533 count -= size;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index acfb6a430dcc..851de4d5b7de 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -461,20 +461,20 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
461 switch (info->rx_state) { 461 switch (info->rx_state) {
462 462
463 case RECV_WAIT_EVENT_HEADER: 463 case RECV_WAIT_EVENT_HEADER:
464 eh = (struct hci_event_hdr *)(info->rx_skb->data); 464 eh = hci_event_hdr(info->rx_skb);
465 info->rx_state = RECV_WAIT_DATA; 465 info->rx_state = RECV_WAIT_DATA;
466 info->rx_count = eh->plen; 466 info->rx_count = eh->plen;
467 break; 467 break;
468 468
469 case RECV_WAIT_ACL_HEADER: 469 case RECV_WAIT_ACL_HEADER:
470 ah = (struct hci_acl_hdr *)(info->rx_skb->data); 470 ah = hci_acl_hdr(info->rx_skb);
471 dlen = __le16_to_cpu(ah->dlen); 471 dlen = __le16_to_cpu(ah->dlen);
472 info->rx_state = RECV_WAIT_DATA; 472 info->rx_state = RECV_WAIT_DATA;
473 info->rx_count = dlen; 473 info->rx_count = dlen;
474 break; 474 break;
475 475
476 case RECV_WAIT_SCO_HEADER: 476 case RECV_WAIT_SCO_HEADER:
477 sh = (struct hci_sco_hdr *)(info->rx_skb->data); 477 sh = hci_sco_hdr(info->rx_skb);
478 info->rx_state = RECV_WAIT_DATA; 478 info->rx_state = RECV_WAIT_DATA;
479 info->rx_count = sh->dlen; 479 info->rx_count = sh->dlen;
480 break; 480 break;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 9fca6513562d..e8ebd5d3de86 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
231 cr = (struct usb_ctrlrequest *) urb->setup_packet; 231 cr = (struct usb_ctrlrequest *) urb->setup_packet;
232 cr->wLength = __cpu_to_le16(skb->len); 232 cr->wLength = __cpu_to_le16(skb->len);
233 233
234 memcpy(urb->transfer_buffer, skb->data, skb->len); 234 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
235 urb->transfer_buffer_length = skb->len; 235 urb->transfer_buffer_length = skb->len;
236 236
237 err = usb_submit_urb(urb, GFP_ATOMIC); 237 err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
250 skb = skb_dequeue(&data->tx_queue); 250 skb = skb_dequeue(&data->tx_queue);
251 251
252 if (skb) { 252 if (skb) {
253 memcpy(urb->transfer_buffer, skb->data, skb->len); 253 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
254 urb->transfer_buffer_length = skb->len; 254 urb->transfer_buffer_length = skb->len;
255 255
256 err = usb_submit_urb(urb, GFP_ATOMIC); 256 err = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 18b0f3992c5b..39516074636b 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -303,20 +303,20 @@ static void bt3c_receive(bt3c_info_t *info)
303 switch (info->rx_state) { 303 switch (info->rx_state) {
304 304
305 case RECV_WAIT_EVENT_HEADER: 305 case RECV_WAIT_EVENT_HEADER:
306 eh = (struct hci_event_hdr *)(info->rx_skb->data); 306 eh = hci_event_hdr(info->rx_skb);
307 info->rx_state = RECV_WAIT_DATA; 307 info->rx_state = RECV_WAIT_DATA;
308 info->rx_count = eh->plen; 308 info->rx_count = eh->plen;
309 break; 309 break;
310 310
311 case RECV_WAIT_ACL_HEADER: 311 case RECV_WAIT_ACL_HEADER:
312 ah = (struct hci_acl_hdr *)(info->rx_skb->data); 312 ah = hci_acl_hdr(info->rx_skb);
313 dlen = __le16_to_cpu(ah->dlen); 313 dlen = __le16_to_cpu(ah->dlen);
314 info->rx_state = RECV_WAIT_DATA; 314 info->rx_state = RECV_WAIT_DATA;
315 info->rx_count = dlen; 315 info->rx_count = dlen;
316 break; 316 break;
317 317
318 case RECV_WAIT_SCO_HEADER: 318 case RECV_WAIT_SCO_HEADER:
319 sh = (struct hci_sco_hdr *)(info->rx_skb->data); 319 sh = hci_sco_hdr(info->rx_skb);
320 info->rx_state = RECV_WAIT_DATA; 320 info->rx_state = RECV_WAIT_DATA;
321 info->rx_count = sh->dlen; 321 info->rx_count = sh->dlen;
322 break; 322 break;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c1bce75148fe..d7d2ea0d86a1 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -250,20 +250,20 @@ static void btuart_receive(btuart_info_t *info)
250 switch (info->rx_state) { 250 switch (info->rx_state) {
251 251
252 case RECV_WAIT_EVENT_HEADER: 252 case RECV_WAIT_EVENT_HEADER:
253 eh = (struct hci_event_hdr *)(info->rx_skb->data); 253 eh = hci_event_hdr(info->rx_skb);
254 info->rx_state = RECV_WAIT_DATA; 254 info->rx_state = RECV_WAIT_DATA;
255 info->rx_count = eh->plen; 255 info->rx_count = eh->plen;
256 break; 256 break;
257 257
258 case RECV_WAIT_ACL_HEADER: 258 case RECV_WAIT_ACL_HEADER:
259 ah = (struct hci_acl_hdr *)(info->rx_skb->data); 259 ah = hci_acl_hdr(info->rx_skb);
260 dlen = __le16_to_cpu(ah->dlen); 260 dlen = __le16_to_cpu(ah->dlen);
261 info->rx_state = RECV_WAIT_DATA; 261 info->rx_state = RECV_WAIT_DATA;
262 info->rx_count = dlen; 262 info->rx_count = dlen;
263 break; 263 break;
264 264
265 case RECV_WAIT_SCO_HEADER: 265 case RECV_WAIT_SCO_HEADER:
266 sh = (struct hci_sco_hdr *)(info->rx_skb->data); 266 sh = hci_sco_hdr(info->rx_skb);
267 info->rx_state = RECV_WAIT_DATA; 267 info->rx_state = RECV_WAIT_DATA;
268 info->rx_count = sh->dlen; 268 info->rx_count = sh->dlen;
269 break; 269 break;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 459aa97937ab..7f9c54b9964a 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
425 return -ENOMEM; 425 return -ENOMEM;
426 426
427 skb_reserve(s, NSHL); 427 skb_reserve(s, NSHL);
428 memcpy(skb_put(s, skb->len), skb->data, skb->len); 428 skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
429 if (skb->len & 0x0001) 429 if (skb->len & 0x0001)
430 *skb_put(s, 1) = 0; /* PAD */ 430 *skb_put(s, 1) = 0; /* PAD */
431 431
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 34f0afc42407..bfbae14cf93d 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -188,7 +188,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
188 continue; 188 continue;
189 189
190 case H4_W4_EVENT_HDR: 190 case H4_W4_EVENT_HDR:
191 eh = (struct hci_event_hdr *) h4->rx_skb->data; 191 eh = hci_event_hdr(h4->rx_skb);
192 192
193 BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); 193 BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
194 194
@@ -196,7 +196,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
196 continue; 196 continue;
197 197
198 case H4_W4_ACL_HDR: 198 case H4_W4_ACL_HDR:
199 ah = (struct hci_acl_hdr *) h4->rx_skb->data; 199 ah = hci_acl_hdr(h4->rx_skb);
200 dlen = __le16_to_cpu(ah->dlen); 200 dlen = __le16_to_cpu(ah->dlen);
201 201
202 BT_DBG("ACL header: dlen %d", dlen); 202 BT_DBG("ACL header: dlen %d", dlen);
@@ -205,7 +205,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
205 continue; 205 continue;
206 206
207 case H4_W4_SCO_HDR: 207 case H4_W4_SCO_HDR:
208 sh = (struct hci_sco_hdr *) h4->rx_skb->data; 208 sh = hci_sco_hdr(h4->rx_skb);
209 209
210 BT_DBG("SCO header: dlen %d", sh->dlen); 210 BT_DBG("SCO header: dlen %d", sh->dlen);
211 211
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f5c160caf9f4..5f066963f171 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -248,7 +248,7 @@ static unsigned long get_unmapped_area_mem(struct file *file,
248{ 248{
249 if (!valid_mmap_phys_addr_range(pgoff, len)) 249 if (!valid_mmap_phys_addr_range(pgoff, len))
250 return (unsigned long) -EINVAL; 250 return (unsigned long) -EINVAL;
251 return pgoff; 251 return pgoff << PAGE_SHIFT;
252} 252}
253 253
254/* can't do an in-place private mapping if there's no MMU */ 254/* can't do an in-place private mapping if there's no MMU */
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index a61fb6da5d03..80a01150b86c 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1338,43 +1338,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1338 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) 1338 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
1339 * Caller should use TIOCGICOUNT to see which one it was 1339 * Caller should use TIOCGICOUNT to see which one it was
1340 */ 1340 */
1341 case TIOCMIWAIT: { 1341 case TIOCMIWAIT:
1342 DECLARE_WAITQUEUE(wait, current); 1342 spin_lock_irqsave(&info->slock, flags);
1343 int ret; 1343 cnow = info->icount; /* note the counters on entry */
1344 spin_unlock_irqrestore(&info->slock, flags);
1345
1346 wait_event_interruptible(info->delta_msr_wait, ({
1347 cprev = cnow;
1344 spin_lock_irqsave(&info->slock, flags); 1348 spin_lock_irqsave(&info->slock, flags);
1345 cprev = info->icount; /* note the counters on entry */ 1349 cnow = info->icount; /* atomic copy */
1346 spin_unlock_irqrestore(&info->slock, flags); 1350 spin_unlock_irqrestore(&info->slock, flags);
1347 1351
1348 add_wait_queue(&info->delta_msr_wait, &wait); 1352 ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
1349 while (1) { 1353 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
1350 spin_lock_irqsave(&info->slock, flags); 1354 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
1351 cnow = info->icount; /* atomic copy */ 1355 ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
1352 spin_unlock_irqrestore(&info->slock, flags); 1356 }));
1353 1357 break;
1354 set_current_state(TASK_INTERRUPTIBLE);
1355 if (((arg & TIOCM_RNG) &&
1356 (cnow.rng != cprev.rng)) ||
1357 ((arg & TIOCM_DSR) &&
1358 (cnow.dsr != cprev.dsr)) ||
1359 ((arg & TIOCM_CD) &&
1360 (cnow.dcd != cprev.dcd)) ||
1361 ((arg & TIOCM_CTS) &&
1362 (cnow.cts != cprev.cts))) {
1363 ret = 0;
1364 break;
1365 }
1366 /* see if a signal did it */
1367 if (signal_pending(current)) {
1368 ret = -ERESTARTSYS;
1369 break;
1370 }
1371 cprev = cnow;
1372 }
1373 current->state = TASK_RUNNING;
1374 remove_wait_queue(&info->delta_msr_wait, &wait);
1375 break;
1376 }
1377 /* NOTREACHED */
1378 /* 1358 /*
1379 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 1359 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1380 * Return: write counters to the user passed counter struct 1360 * Return: write counters to the user passed counter struct
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
index 9af07e4999d5..f7603b6aeb87 100644
--- a/drivers/char/mxser_new.c
+++ b/drivers/char/mxser_new.c
@@ -1758,43 +1758,23 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1758 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) 1758 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
1759 * Caller should use TIOCGICOUNT to see which one it was 1759 * Caller should use TIOCGICOUNT to see which one it was
1760 */ 1760 */
1761 case TIOCMIWAIT: { 1761 case TIOCMIWAIT:
1762 DECLARE_WAITQUEUE(wait, current);
1763 int ret;
1764 spin_lock_irqsave(&info->slock, flags); 1762 spin_lock_irqsave(&info->slock, flags);
1765 cprev = info->icount; /* note the counters on entry */ 1763 cnow = info->icount; /* note the counters on entry */
1766 spin_unlock_irqrestore(&info->slock, flags); 1764 spin_unlock_irqrestore(&info->slock, flags);
1767 1765
1768 add_wait_queue(&info->delta_msr_wait, &wait); 1766 wait_event_interruptible(info->delta_msr_wait, ({
1769 while (1) { 1767 cprev = cnow;
1770 spin_lock_irqsave(&info->slock, flags); 1768 spin_lock_irqsave(&info->slock, flags);
1771 cnow = info->icount; /* atomic copy */ 1769 cnow = info->icount; /* atomic copy */
1772 spin_unlock_irqrestore(&info->slock, flags); 1770 spin_unlock_irqrestore(&info->slock, flags);
1773 1771
1774 set_current_state(TASK_INTERRUPTIBLE); 1772 ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
1775 if (((arg & TIOCM_RNG) && 1773 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
1776 (cnow.rng != cprev.rng)) || 1774 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
1777 ((arg & TIOCM_DSR) && 1775 ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
1778 (cnow.dsr != cprev.dsr)) || 1776 }));
1779 ((arg & TIOCM_CD) &&
1780 (cnow.dcd != cprev.dcd)) ||
1781 ((arg & TIOCM_CTS) &&
1782 (cnow.cts != cprev.cts))) {
1783 ret = 0;
1784 break;
1785 }
1786 /* see if a signal did it */
1787 if (signal_pending(current)) {
1788 ret = -ERESTARTSYS;
1789 break;
1790 }
1791 cprev = cnow;
1792 }
1793 current->state = TASK_RUNNING;
1794 remove_wait_queue(&info->delta_msr_wait, &wait);
1795 break; 1777 break;
1796 }
1797 /* NOTREACHED */
1798 /* 1778 /*
1799 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 1779 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1800 * Return: write counters to the user passed counter struct 1780 * Return: write counters to the user passed counter struct
@@ -2230,7 +2210,14 @@ end_intr:
2230 port->mon_data.rxcnt += cnt; 2210 port->mon_data.rxcnt += cnt;
2231 port->mon_data.up_rxcnt += cnt; 2211 port->mon_data.up_rxcnt += cnt;
2232 2212
2213 /*
2214 * We are called from an interrupt context with &port->slock
2215 * being held. Drop it temporarily in order to prevent
2216 * recursive locking.
2217 */
2218 spin_unlock(&port->slock);
2233 tty_flip_buffer_push(tty); 2219 tty_flip_buffer_push(tty);
2220 spin_lock(&port->slock);
2234} 2221}
2235 2222
2236static void mxser_transmit_chars(struct mxser_port *port) 2223static void mxser_transmit_chars(struct mxser_port *port)
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 8d025e9b5bce..157b1d09ab55 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
4169 netif_stop_queue(dev); 4169 netif_stop_queue(dev);
4170 4170
4171 /* copy data to device buffers */ 4171 /* copy data to device buffers */
4172 memcpy(info->tx_buf, skb->data, skb->len); 4172 skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
4173 info->tx_get = 0; 4173 info->tx_get = 0;
4174 info->tx_put = info->tx_count = skb->len; 4174 info->tx_put = info->tx_count = skb->len;
4175 4175
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b9dc7aa1dfb3..46c1b97748b6 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -881,15 +881,15 @@ EXPORT_SYMBOL(get_random_bytes);
881 */ 881 */
882static void init_std_data(struct entropy_store *r) 882static void init_std_data(struct entropy_store *r)
883{ 883{
884 struct timeval tv; 884 ktime_t now;
885 unsigned long flags; 885 unsigned long flags;
886 886
887 spin_lock_irqsave(&r->lock, flags); 887 spin_lock_irqsave(&r->lock, flags);
888 r->entropy_count = 0; 888 r->entropy_count = 0;
889 spin_unlock_irqrestore(&r->lock, flags); 889 spin_unlock_irqrestore(&r->lock, flags);
890 890
891 do_gettimeofday(&tv); 891 now = ktime_get_real();
892 add_entropy_words(r, (__u32 *)&tv, sizeof(tv)/4); 892 add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
893 add_entropy_words(r, (__u32 *)utsname(), 893 add_entropy_words(r, (__u32 *)utsname(),
894 sizeof(*(utsname()))/4); 894 sizeof(*(utsname()))/4);
895} 895}
@@ -911,14 +911,12 @@ void rand_initialize_irq(int irq)
911 return; 911 return;
912 912
913 /* 913 /*
914 * If kmalloc returns null, we just won't use that entropy 914 * If kzalloc returns null, we just won't use that entropy
915 * source. 915 * source.
916 */ 916 */
917 state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); 917 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
918 if (state) { 918 if (state)
919 memset(state, 0, sizeof(struct timer_rand_state));
920 irq_timer_state[irq] = state; 919 irq_timer_state[irq] = state;
921 }
922} 920}
923 921
924#ifdef CONFIG_BLOCK 922#ifdef CONFIG_BLOCK
@@ -927,14 +925,12 @@ void rand_initialize_disk(struct gendisk *disk)
927 struct timer_rand_state *state; 925 struct timer_rand_state *state;
928 926
929 /* 927 /*
930 * If kmalloc returns null, we just won't use that entropy 928 * If kzalloc returns null, we just won't use that entropy
931 * source. 929 * source.
932 */ 930 */
933 state = kmalloc(sizeof(struct timer_rand_state), GFP_KERNEL); 931 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
934 if (state) { 932 if (state)
935 memset(state, 0, sizeof(struct timer_rand_state));
936 disk->random = state; 933 disk->random = state;
937 }
938} 934}
939#endif 935#endif
940 936
@@ -1469,7 +1465,6 @@ late_initcall(seqgen_init);
1469__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 1465__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1470 __be16 sport, __be16 dport) 1466 __be16 sport, __be16 dport)
1471{ 1467{
1472 struct timeval tv;
1473 __u32 seq; 1468 __u32 seq;
1474 __u32 hash[12]; 1469 __u32 hash[12];
1475 struct keydata *keyptr = get_keyptr(); 1470 struct keydata *keyptr = get_keyptr();
@@ -1485,8 +1480,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1485 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; 1480 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1486 seq += keyptr->count; 1481 seq += keyptr->count;
1487 1482
1488 do_gettimeofday(&tv); 1483 seq += ktime_get_real().tv64;
1489 seq += tv.tv_usec + tv.tv_sec * 1000000;
1490 1484
1491 return seq; 1485 return seq;
1492} 1486}
@@ -1521,7 +1515,6 @@ __u32 secure_ip_id(__be32 daddr)
1521__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 1515__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1522 __be16 sport, __be16 dport) 1516 __be16 sport, __be16 dport)
1523{ 1517{
1524 struct timeval tv;
1525 __u32 seq; 1518 __u32 seq;
1526 __u32 hash[4]; 1519 __u32 hash[4];
1527 struct keydata *keyptr = get_keyptr(); 1520 struct keydata *keyptr = get_keyptr();
@@ -1543,12 +1536,11 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1543 * As close as possible to RFC 793, which 1536 * As close as possible to RFC 793, which
1544 * suggests using a 250 kHz clock. 1537 * suggests using a 250 kHz clock.
1545 * Further reading shows this assumes 2 Mb/s networks. 1538 * Further reading shows this assumes 2 Mb/s networks.
1546 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. 1539 * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
1547 * That's funny, Linux has one built in! Use it! 1540 * That's funny, Linux has one built in! Use it!
1548 * (Networks are faster now - should this be increased?) 1541 * (Networks are faster now - should this be increased?)
1549 */ 1542 */
1550 do_gettimeofday(&tv); 1543 seq += ktime_get_real().tv64;
1551 seq += tv.tv_usec + tv.tv_sec * 1000000;
1552#if 0 1544#if 0
1553 printk("init_seq(%lx, %lx, %d, %d) = %d\n", 1545 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
1554 saddr, daddr, sport, dport, seq); 1546 saddr, daddr, sport, dport, seq);
@@ -1556,8 +1548,6 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1556 return seq; 1548 return seq;
1557} 1549}
1558 1550
1559EXPORT_SYMBOL(secure_tcp_sequence_number);
1560
1561/* Generate secure starting point for ephemeral IPV4 transport port search */ 1551/* Generate secure starting point for ephemeral IPV4 transport port search */
1562u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) 1552u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1563{ 1553{
@@ -1598,7 +1588,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16
1598u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, 1588u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1599 __be16 sport, __be16 dport) 1589 __be16 sport, __be16 dport)
1600{ 1590{
1601 struct timeval tv;
1602 u64 seq; 1591 u64 seq;
1603 __u32 hash[4]; 1592 __u32 hash[4];
1604 struct keydata *keyptr = get_keyptr(); 1593 struct keydata *keyptr = get_keyptr();
@@ -1611,8 +1600,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1611 seq = half_md4_transform(hash, keyptr->secret); 1600 seq = half_md4_transform(hash, keyptr->secret);
1612 seq |= ((u64)keyptr->count) << (32 - HASH_BITS); 1601 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1613 1602
1614 do_gettimeofday(&tv); 1603 seq += ktime_get_real().tv64;
1615 seq += tv.tv_usec + tv.tv_sec * 1000000;
1616 seq &= (1ull << 48) - 1; 1604 seq &= (1ull << 48) - 1;
1617#if 0 1605#if 0
1618 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", 1606 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a905f7820331..a7b9e9bb3e8d 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -212,7 +212,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
212 skb = skb_get(__skb); 212 skb = skb_get(__skb);
213 213
214 if (skb->len >= NLMSG_SPACE(0)) { 214 if (skb->len >= NLMSG_SPACE(0)) {
215 nlh = (struct nlmsghdr *)skb->data; 215 nlh = nlmsg_hdr(skb);
216 216
217 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 217 if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
218 skb->len < nlh->nlmsg_len || 218 skb->len < nlh->nlmsg_len ||
@@ -448,7 +448,7 @@ static int __devinit cn_init(void)
448 448
449 dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, 449 dev->nls = netlink_kernel_create(NETLINK_CONNECTOR,
450 CN_NETLINK_USERS + 0xf, 450 CN_NETLINK_USERS + 0xf,
451 dev->input, THIS_MODULE); 451 dev->input, NULL, THIS_MODULE);
452 if (!dev->nls) 452 if (!dev->nls)
453 return -EIO; 453 return -EIO;
454 454
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index da5828f2dfc2..30a76404f0af 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -121,9 +121,9 @@ superio_exit(void)
121 * ISA constants 121 * ISA constants
122 */ 122 */
123 123
124#define REGION_ALIGNMENT ~7 124#define IOREGION_ALIGNMENT ~7
125#define REGION_OFFSET 5 125#define IOREGION_OFFSET 5
126#define REGION_LENGTH 2 126#define IOREGION_LENGTH 2
127#define ADDR_REG_OFFSET 5 127#define ADDR_REG_OFFSET 5
128#define DATA_REG_OFFSET 6 128#define DATA_REG_OFFSET 6
129 129
@@ -407,7 +407,7 @@ static void w83627ehf_write_fan_div(struct i2c_client *client, int nr)
407 break; 407 break;
408 case 4: 408 case 4:
409 reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73) 409 reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73)
410 | ((data->fan_div[4] & 0x03) << 3) 410 | ((data->fan_div[4] & 0x03) << 2)
411 | ((data->fan_div[4] & 0x04) << 5); 411 | ((data->fan_div[4] & 0x04) << 5);
412 w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg); 412 w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
413 break; 413 break;
@@ -471,9 +471,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
471 time */ 471 time */
472 if (data->fan[i] == 0xff 472 if (data->fan[i] == 0xff
473 && data->fan_div[i] < 0x07) { 473 && data->fan_div[i] < 0x07) {
474 dev_dbg(&client->dev, "Increasing fan %d " 474 dev_dbg(&client->dev, "Increasing fan%d "
475 "clock divider from %u to %u\n", 475 "clock divider from %u to %u\n",
476 i, div_from_reg(data->fan_div[i]), 476 i + 1, div_from_reg(data->fan_div[i]),
477 div_from_reg(data->fan_div[i] + 1)); 477 div_from_reg(data->fan_div[i] + 1));
478 data->fan_div[i]++; 478 data->fan_div[i]++;
479 w83627ehf_write_fan_div(client, i); 479 w83627ehf_write_fan_div(client, i);
@@ -1194,7 +1194,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
1194 u8 fan4pin, fan5pin; 1194 u8 fan4pin, fan5pin;
1195 int i, err = 0; 1195 int i, err = 0;
1196 1196
1197 if (!request_region(address + REGION_OFFSET, REGION_LENGTH, 1197 if (!request_region(address + IOREGION_OFFSET, IOREGION_LENGTH,
1198 w83627ehf_driver.driver.name)) { 1198 w83627ehf_driver.driver.name)) {
1199 err = -EBUSY; 1199 err = -EBUSY;
1200 goto exit; 1200 goto exit;
@@ -1322,7 +1322,7 @@ exit_remove:
1322exit_free: 1322exit_free:
1323 kfree(data); 1323 kfree(data);
1324exit_release: 1324exit_release:
1325 release_region(address + REGION_OFFSET, REGION_LENGTH); 1325 release_region(address + IOREGION_OFFSET, IOREGION_LENGTH);
1326exit: 1326exit:
1327 return err; 1327 return err;
1328} 1328}
@@ -1337,7 +1337,7 @@ static int w83627ehf_detach_client(struct i2c_client *client)
1337 1337
1338 if ((err = i2c_detach_client(client))) 1338 if ((err = i2c_detach_client(client)))
1339 return err; 1339 return err;
1340 release_region(client->addr + REGION_OFFSET, REGION_LENGTH); 1340 release_region(client->addr + IOREGION_OFFSET, IOREGION_LENGTH);
1341 kfree(data); 1341 kfree(data);
1342 1342
1343 return 0; 1343 return 0;
@@ -1380,7 +1380,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr)
1380 superio_select(W83627EHF_LD_HWM); 1380 superio_select(W83627EHF_LD_HWM);
1381 val = (superio_inb(SIO_REG_ADDR) << 8) 1381 val = (superio_inb(SIO_REG_ADDR) << 8)
1382 | superio_inb(SIO_REG_ADDR + 1); 1382 | superio_inb(SIO_REG_ADDR + 1);
1383 *addr = val & REGION_ALIGNMENT; 1383 *addr = val & IOREGION_ALIGNMENT;
1384 if (*addr == 0) { 1384 if (*addr == 0) {
1385 superio_exit(); 1385 superio_exit();
1386 return -ENODEV; 1386 return -ENODEV;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index fb19dbb31e42..ece31d2c6c64 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -344,8 +344,7 @@ config I2C_PARPORT_LIGHT
344 344
345config I2C_PASEMI 345config I2C_PASEMI
346 tristate "PA Semi SMBus interface" 346 tristate "PA Semi SMBus interface"
347# depends on PPC_PASEMI && I2C && PCI 347 depends on PPC_PASEMI && I2C && PCI
348 depends on I2C && PCI
349 help 348 help
350 Supports the PA Semi PWRficient on-chip SMBus interfaces. 349 Supports the PA Semi PWRficient on-chip SMBus interfaces.
351 350
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index f54fb5d65cc4..bf89eeef74e9 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -141,7 +141,7 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
141 for (i = 0; i < msg->len - 1; i++) 141 for (i = 0; i < msg->len - 1; i++)
142 TXFIFO_WR(smbus, msg->buf[i]); 142 TXFIFO_WR(smbus, msg->buf[i]);
143 143
144 TXFIFO_WR(smbus, msg->buf[msg->len] | 144 TXFIFO_WR(smbus, msg->buf[msg->len-1] |
145 (stop ? MTXFIFO_STOP : 0)); 145 (stop ? MTXFIFO_STOP : 0));
146 } 146 }
147 147
@@ -226,7 +226,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
226 rd = RXFIFO_RD(smbus); 226 rd = RXFIFO_RD(smbus);
227 len = min_t(u8, (rd & MRXFIFO_DATA_M), 227 len = min_t(u8, (rd & MRXFIFO_DATA_M),
228 I2C_SMBUS_BLOCK_MAX); 228 I2C_SMBUS_BLOCK_MAX);
229 TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ | 229 TXFIFO_WR(smbus, len | MTXFIFO_READ |
230 MTXFIFO_STOP); 230 MTXFIFO_STOP);
231 } else { 231 } else {
232 len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX); 232 len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX);
@@ -258,7 +258,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
258 rd = RXFIFO_RD(smbus); 258 rd = RXFIFO_RD(smbus);
259 len = min_t(u8, (rd & MRXFIFO_DATA_M), 259 len = min_t(u8, (rd & MRXFIFO_DATA_M),
260 I2C_SMBUS_BLOCK_MAX - len); 260 I2C_SMBUS_BLOCK_MAX - len);
261 TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ | MTXFIFO_STOP); 261 TXFIFO_WR(smbus, len | MTXFIFO_READ | MTXFIFO_STOP);
262 break; 262 break;
263 263
264 default: 264 default:
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index ca2e4f830c39..5bdf64b77913 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -57,6 +57,7 @@ if IDE
57config IDE_MAX_HWIFS 57config IDE_MAX_HWIFS
58 int "Max IDE interfaces" 58 int "Max IDE interfaces"
59 depends on ALPHA || SUPERH || IA64 || EMBEDDED 59 depends on ALPHA || SUPERH || IA64 || EMBEDDED
60 range 1 10
60 default 4 61 default 4
61 help 62 help
62 This is the maximum number of IDE hardware interfaces that will 63 This is the maximum number of IDE hardware interfaces that will
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index afb71c66b6f3..a9e0b30fb1f2 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -310,14 +310,12 @@ static int proc_ide_read_driver
310 ide_driver_t *ide_drv; 310 ide_driver_t *ide_drv;
311 int len; 311 int len;
312 312
313 down_read(&dev->bus->subsys.rwsem);
314 if (dev->driver) { 313 if (dev->driver) {
315 ide_drv = container_of(dev->driver, ide_driver_t, gen_driver); 314 ide_drv = container_of(dev->driver, ide_driver_t, gen_driver);
316 len = sprintf(page, "%s version %s\n", 315 len = sprintf(page, "%s version %s\n",
317 dev->driver->name, ide_drv->version); 316 dev->driver->name, ide_drv->version);
318 } else 317 } else
319 len = sprintf(page, "ide-default version 0.9.newide\n"); 318 len = sprintf(page, "ide-default version 0.9.newide\n");
320 up_read(&dev->bus->subsys.rwsem);
321 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 319 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
322} 320}
323 321
@@ -327,7 +325,6 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
327 int ret = 1; 325 int ret = 1;
328 int err; 326 int err;
329 327
330 down_write(&dev->bus->subsys.rwsem);
331 device_release_driver(dev); 328 device_release_driver(dev);
332 /* FIXME: device can still be in use by previous driver */ 329 /* FIXME: device can still be in use by previous driver */
333 strlcpy(drive->driver_req, driver, sizeof(drive->driver_req)); 330 strlcpy(drive->driver_req, driver, sizeof(drive->driver_req));
@@ -345,7 +342,6 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
345 } 342 }
346 if (dev->driver && !strcmp(dev->driver->name, driver)) 343 if (dev->driver && !strcmp(dev->driver->name, driver))
347 ret = 0; 344 ret = 0;
348 up_write(&dev->bus->subsys.rwsem);
349 345
350 return ret; 346 return ret;
351} 347}
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index d4b753e70119..dd7ec37fdeab 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -108,6 +108,7 @@ delkin_cb_remove (struct pci_dev *dev)
108 108
109static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = { 109static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
110 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 110 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
111 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
111 { 0, }, 112 { 0, },
112}; 113};
113MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl); 114MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 60ecdc258c7c..ab6fa271aeb3 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * linux/drivers/ide/pci/hpt366.c Version 1.01 Dec 23, 2006 2 * linux/drivers/ide/pci/hpt366.c Version 1.02 Apr 18, 2007
3 * 3 *
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
6 * Portions Copyright (C) 2003 Red Hat Inc 6 * Portions Copyright (C) 2003 Red Hat Inc
7 * Portions Copyright (C) 2005-2006 MontaVista Software, Inc. 7 * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
8 * 8 *
9 * Thanks to HighPoint Technologies for their assistance, and hardware. 9 * Thanks to HighPoint Technologies for their assistance, and hardware.
10 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his 10 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -494,6 +494,7 @@ static struct hpt_info hpt302n __devinitdata = {
494 .chip_type = HPT302N, 494 .chip_type = HPT302N,
495 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3, 495 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
496 .dpll_clk = 77, 496 .dpll_clk = 77,
497 .settings = hpt37x_settings
497}; 498};
498 499
499static struct hpt_info hpt371n __devinitdata = { 500static struct hpt_info hpt371n __devinitdata = {
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 03e44b337eb0..a364003ba47f 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -834,7 +834,7 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb,
834 struct eth1394hdr *eth; 834 struct eth1394hdr *eth;
835 unsigned char *rawp; 835 unsigned char *rawp;
836 836
837 skb->mac.raw = skb->data; 837 skb_reset_mac_header(skb);
838 skb_pull (skb, ETH1394_HLEN); 838 skb_pull (skb, ETH1394_HLEN);
839 eth = eth1394_hdr(skb); 839 eth = eth1394_hdr(skb);
840 840
@@ -1668,7 +1668,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1668 if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || 1668 if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
1669 proto == htons(ETH_P_ARP) || 1669 proto == htons(ETH_P_ARP) ||
1670 (proto == htons(ETH_P_IP) && 1670 (proto == htons(ETH_P_IP) &&
1671 IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) { 1671 IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
1672 tx_type = ETH1394_GASP; 1672 tx_type = ETH1394_GASP;
1673 dest_node = LOCAL_BUS | ALL_NODES; 1673 dest_node = LOCAL_BUS | ALL_NODES;
1674 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; 1674 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index c45cbff9138d..1e8356535149 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -90,7 +90,7 @@ struct eth1394hdr {
90 90
91static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) 91static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
92{ 92{
93 return (struct eth1394hdr *)skb->mac.raw; 93 return (struct eth1394hdr *)skb_mac_header(skb);
94} 94}
95#endif 95#endif
96 96
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index c5ace190bfe6..dbeba45a031e 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -370,9 +370,7 @@ static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute
370 370
371 if (state == 1) { 371 if (state == 1) {
372 ud->ignore_driver = 1; 372 ud->ignore_driver = 1;
373 down_write(&ieee1394_bus_type.subsys.rwsem);
374 device_release_driver(dev); 373 device_release_driver(dev);
375 up_write(&ieee1394_bus_type.subsys.rwsem);
376 } else if (state == 0) 374 } else if (state == 0)
377 ud->ignore_driver = 0; 375 ud->ignore_driver = 0;
378 376
@@ -1163,6 +1161,7 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
1163 struct unit_directory *ud; 1161 struct unit_directory *ud;
1164 int i = 0; 1162 int i = 0;
1165 int length = 0; 1163 int length = 0;
1164 int retval = 0;
1166 /* ieee1394:venNmoNspNverN */ 1165 /* ieee1394:venNmoNspNverN */
1167 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1]; 1166 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
1168 1167
@@ -1176,14 +1175,11 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp,
1176 1175
1177#define PUT_ENVP(fmt,val) \ 1176#define PUT_ENVP(fmt,val) \
1178do { \ 1177do { \
1179 int printed; \ 1178 retval = add_uevent_var(envp, num_envp, &i, \
1180 envp[i++] = buffer; \ 1179 buffer, buffer_size, &length, \
1181 printed = snprintf(buffer, buffer_size - length, \ 1180 fmt, val); \
1182 fmt, val); \ 1181 if (retval) \
1183 if ((buffer_size - (length+printed) <= 0) || (i >= num_envp)) \ 1182 return retval; \
1184 return -ENOMEM; \
1185 length += printed+1; \
1186 buffer += printed+1; \
1187} while (0) 1183} while (0)
1188 1184
1189 PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id); 1185 PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
@@ -1393,12 +1389,10 @@ static void nodemgr_suspend_ne(struct node_entry *ne)
1393 if (ud->ne != ne) 1389 if (ud->ne != ne)
1394 continue; 1390 continue;
1395 1391
1396 down_write(&ieee1394_bus_type.subsys.rwsem);
1397 if (ud->device.driver && 1392 if (ud->device.driver &&
1398 (!ud->device.driver->suspend || 1393 (!ud->device.driver->suspend ||
1399 ud->device.driver->suspend(&ud->device, PMSG_SUSPEND))) 1394 ud->device.driver->suspend(&ud->device, PMSG_SUSPEND)))
1400 device_release_driver(&ud->device); 1395 device_release_driver(&ud->device);
1401 up_write(&ieee1394_bus_type.subsys.rwsem);
1402 } 1396 }
1403 up(&nodemgr_ud_class.sem); 1397 up(&nodemgr_ud_class.sem);
1404} 1398}
@@ -1418,10 +1412,8 @@ static void nodemgr_resume_ne(struct node_entry *ne)
1418 if (ud->ne != ne) 1412 if (ud->ne != ne)
1419 continue; 1413 continue;
1420 1414
1421 down_read(&ieee1394_bus_type.subsys.rwsem);
1422 if (ud->device.driver && ud->device.driver->resume) 1415 if (ud->device.driver && ud->device.driver->resume)
1423 ud->device.driver->resume(&ud->device); 1416 ud->device.driver->resume(&ud->device);
1424 up_read(&ieee1394_bus_type.subsys.rwsem);
1425 } 1417 }
1426 up(&nodemgr_ud_class.sem); 1418 up(&nodemgr_ud_class.sem);
1427 1419
@@ -1442,7 +1434,6 @@ static void nodemgr_update_pdrv(struct node_entry *ne)
1442 if (ud->ne != ne) 1434 if (ud->ne != ne)
1443 continue; 1435 continue;
1444 1436
1445 down_write(&ieee1394_bus_type.subsys.rwsem);
1446 if (ud->device.driver) { 1437 if (ud->device.driver) {
1447 pdrv = container_of(ud->device.driver, 1438 pdrv = container_of(ud->device.driver,
1448 struct hpsb_protocol_driver, 1439 struct hpsb_protocol_driver,
@@ -1450,7 +1441,6 @@ static void nodemgr_update_pdrv(struct node_entry *ne)
1450 if (pdrv->update && pdrv->update(ud)) 1441 if (pdrv->update && pdrv->update(ud))
1451 device_release_driver(&ud->device); 1442 device_release_driver(&ud->device);
1452 } 1443 }
1453 up_write(&ieee1394_bus_type.subsys.rwsem);
1454 } 1444 }
1455 up(&nodemgr_ud_class.sem); 1445 up(&nodemgr_ud_class.sem);
1456} 1446}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 13efd4170349..6edfecf1be72 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * 5 *
@@ -31,7 +31,6 @@
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 * 33 *
34 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
35 */ 34 */
36#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
37#include <rdma/ib_cache.h> 36#include <rdma/ib_cache.h>
@@ -668,7 +667,7 @@ static void build_smp_wc(struct ib_qp *qp,
668static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 667static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
669 struct ib_mad_send_wr_private *mad_send_wr) 668 struct ib_mad_send_wr_private *mad_send_wr)
670{ 669{
671 int ret; 670 int ret = 0;
672 struct ib_smp *smp = mad_send_wr->send_buf.mad; 671 struct ib_smp *smp = mad_send_wr->send_buf.mad;
673 unsigned long flags; 672 unsigned long flags;
674 struct ib_mad_local_private *local; 673 struct ib_mad_local_private *local;
@@ -688,14 +687,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
688 */ 687 */
689 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 688 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
690 IB_LID_PERMISSIVE && 689 IB_LID_PERMISSIVE &&
691 !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { 690 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
691 IB_SMI_DISCARD) {
692 ret = -EINVAL; 692 ret = -EINVAL;
693 printk(KERN_ERR PFX "Invalid directed route\n"); 693 printk(KERN_ERR PFX "Invalid directed route\n");
694 goto out; 694 goto out;
695 } 695 }
696
696 /* Check to post send on QP or process locally */ 697 /* Check to post send on QP or process locally */
697 ret = smi_check_local_smp(smp, device); 698 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
698 if (!ret)
699 goto out; 699 goto out;
700 700
701 local = kmalloc(sizeof *local, GFP_ATOMIC); 701 local = kmalloc(sizeof *local, GFP_ATOMIC);
@@ -1874,18 +1874,22 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1874 1874
1875 if (recv->mad.mad.mad_hdr.mgmt_class == 1875 if (recv->mad.mad.mad_hdr.mgmt_class ==
1876 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1876 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1877 if (!smi_handle_dr_smp_recv(&recv->mad.smp, 1877 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1878 port_priv->device->node_type, 1878 port_priv->device->node_type,
1879 port_priv->port_num, 1879 port_priv->port_num,
1880 port_priv->device->phys_port_cnt)) 1880 port_priv->device->phys_port_cnt) ==
1881 IB_SMI_DISCARD)
1881 goto out; 1882 goto out;
1882 if (!smi_check_forward_dr_smp(&recv->mad.smp)) 1883
1884 if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL)
1883 goto local; 1885 goto local;
1884 if (!smi_handle_dr_smp_send(&recv->mad.smp, 1886
1885 port_priv->device->node_type, 1887 if (smi_handle_dr_smp_send(&recv->mad.smp,
1886 port_priv->port_num)) 1888 port_priv->device->node_type,
1889 port_priv->port_num) == IB_SMI_DISCARD)
1887 goto out; 1890 goto out;
1888 if (!smi_check_local_smp(&recv->mad.smp, port_priv->device)) 1891
1892 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1889 goto out; 1893 goto out;
1890 } 1894 }
1891 1895
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 68db633711c5..9a7eaadb1688 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -57,6 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL");
57struct ib_sa_sm_ah { 57struct ib_sa_sm_ah {
58 struct ib_ah *ah; 58 struct ib_ah *ah;
59 struct kref ref; 59 struct kref ref;
60 u8 src_path_mask;
60}; 61};
61 62
62struct ib_sa_port { 63struct ib_sa_port {
@@ -380,6 +381,7 @@ static void update_sm_ah(struct work_struct *work)
380 } 381 }
381 382
382 kref_init(&new_ah->ref); 383 kref_init(&new_ah->ref);
384 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
383 385
384 memset(&ah_attr, 0, sizeof ah_attr); 386 memset(&ah_attr, 0, sizeof ah_attr);
385 ah_attr.dlid = port_attr.sm_lid; 387 ah_attr.dlid = port_attr.sm_lid;
@@ -460,6 +462,25 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
460} 462}
461EXPORT_SYMBOL(ib_sa_cancel_query); 463EXPORT_SYMBOL(ib_sa_cancel_query);
462 464
465static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
466{
467 struct ib_sa_device *sa_dev;
468 struct ib_sa_port *port;
469 unsigned long flags;
470 u8 src_path_mask;
471
472 sa_dev = ib_get_client_data(device, &sa_client);
473 if (!sa_dev)
474 return 0x7f;
475
476 port = &sa_dev->port[port_num - sa_dev->start_port];
477 spin_lock_irqsave(&port->ah_lock, flags);
478 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
479 spin_unlock_irqrestore(&port->ah_lock, flags);
480
481 return src_path_mask;
482}
483
463int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 484int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
464 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 485 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
465{ 486{
@@ -469,7 +490,8 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
469 memset(ah_attr, 0, sizeof *ah_attr); 490 memset(ah_attr, 0, sizeof *ah_attr);
470 ah_attr->dlid = be16_to_cpu(rec->dlid); 491 ah_attr->dlid = be16_to_cpu(rec->dlid);
471 ah_attr->sl = rec->sl; 492 ah_attr->sl = rec->sl;
472 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 0x7f; 493 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
494 get_src_path_mask(device, port_num);
473 ah_attr->port_num = port_num; 495 ah_attr->port_num = port_num;
474 ah_attr->static_rate = rec->rate; 496 ah_attr->static_rate = rec->rate;
475 497
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 54b81e17ad50..2bca753eb622 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -3,7 +3,7 @@
3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * 8 *
9 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
@@ -34,7 +34,6 @@
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE. 35 * SOFTWARE.
36 * 36 *
37 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $
38 */ 37 */
39 38
40#include <rdma/ib_smi.h> 39#include <rdma/ib_smi.h>
@@ -44,9 +43,8 @@
44 * Fixup a directed route SMP for sending 43 * Fixup a directed route SMP for sending
45 * Return 0 if the SMP should be discarded 44 * Return 0 if the SMP should be discarded
46 */ 45 */
47int smi_handle_dr_smp_send(struct ib_smp *smp, 46enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
48 u8 node_type, 47 u8 node_type, int port_num)
49 int port_num)
50{ 48{
51 u8 hop_ptr, hop_cnt; 49 u8 hop_ptr, hop_cnt;
52 50
@@ -59,18 +57,18 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
59 if (hop_cnt && hop_ptr == 0) { 57 if (hop_cnt && hop_ptr == 0) {
60 smp->hop_ptr++; 58 smp->hop_ptr++;
61 return (smp->initial_path[smp->hop_ptr] == 59 return (smp->initial_path[smp->hop_ptr] ==
62 port_num); 60 port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
63 } 61 }
64 62
65 /* C14-9:2 */ 63 /* C14-9:2 */
66 if (hop_ptr && hop_ptr < hop_cnt) { 64 if (hop_ptr && hop_ptr < hop_cnt) {
67 if (node_type != RDMA_NODE_IB_SWITCH) 65 if (node_type != RDMA_NODE_IB_SWITCH)
68 return 0; 66 return IB_SMI_DISCARD;
69 67
70 /* smp->return_path set when received */ 68 /* smp->return_path set when received */
71 smp->hop_ptr++; 69 smp->hop_ptr++;
72 return (smp->initial_path[smp->hop_ptr] == 70 return (smp->initial_path[smp->hop_ptr] ==
73 port_num); 71 port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
74 } 72 }
75 73
76 /* C14-9:3 -- We're at the end of the DR segment of path */ 74 /* C14-9:3 -- We're at the end of the DR segment of path */
@@ -78,29 +76,30 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
78 /* smp->return_path set when received */ 76 /* smp->return_path set when received */
79 smp->hop_ptr++; 77 smp->hop_ptr++;
80 return (node_type == RDMA_NODE_IB_SWITCH || 78 return (node_type == RDMA_NODE_IB_SWITCH ||
81 smp->dr_dlid == IB_LID_PERMISSIVE); 79 smp->dr_dlid == IB_LID_PERMISSIVE ?
80 IB_SMI_HANDLE : IB_SMI_DISCARD);
82 } 81 }
83 82
84 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ 83 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
85 /* C14-9:5 -- Fail unreasonable hop pointer */ 84 /* C14-9:5 -- Fail unreasonable hop pointer */
86 return (hop_ptr == hop_cnt + 1); 85 return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
87 86
88 } else { 87 } else {
89 /* C14-13:1 */ 88 /* C14-13:1 */
90 if (hop_cnt && hop_ptr == hop_cnt + 1) { 89 if (hop_cnt && hop_ptr == hop_cnt + 1) {
91 smp->hop_ptr--; 90 smp->hop_ptr--;
92 return (smp->return_path[smp->hop_ptr] == 91 return (smp->return_path[smp->hop_ptr] ==
93 port_num); 92 port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
94 } 93 }
95 94
96 /* C14-13:2 */ 95 /* C14-13:2 */
97 if (2 <= hop_ptr && hop_ptr <= hop_cnt) { 96 if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
98 if (node_type != RDMA_NODE_IB_SWITCH) 97 if (node_type != RDMA_NODE_IB_SWITCH)
99 return 0; 98 return IB_SMI_DISCARD;
100 99
101 smp->hop_ptr--; 100 smp->hop_ptr--;
102 return (smp->return_path[smp->hop_ptr] == 101 return (smp->return_path[smp->hop_ptr] ==
103 port_num); 102 port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
104 } 103 }
105 104
106 /* C14-13:3 -- at the end of the DR segment of path */ 105 /* C14-13:3 -- at the end of the DR segment of path */
@@ -108,15 +107,16 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
108 smp->hop_ptr--; 107 smp->hop_ptr--;
109 /* C14-13:3 -- SMPs destined for SM shouldn't be here */ 108 /* C14-13:3 -- SMPs destined for SM shouldn't be here */
110 return (node_type == RDMA_NODE_IB_SWITCH || 109 return (node_type == RDMA_NODE_IB_SWITCH ||
111 smp->dr_slid == IB_LID_PERMISSIVE); 110 smp->dr_slid == IB_LID_PERMISSIVE ?
111 IB_SMI_HANDLE : IB_SMI_DISCARD);
112 } 112 }
113 113
114 /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ 114 /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */
115 if (hop_ptr == 0) 115 if (hop_ptr == 0)
116 return 1; 116 return IB_SMI_HANDLE;
117 117
118 /* C14-13:5 -- Check for unreasonable hop pointer */ 118 /* C14-13:5 -- Check for unreasonable hop pointer */
119 return 0; 119 return IB_SMI_DISCARD;
120 } 120 }
121} 121}
122 122
@@ -124,10 +124,8 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
124 * Adjust information for a received SMP 124 * Adjust information for a received SMP
125 * Return 0 if the SMP should be dropped 125 * Return 0 if the SMP should be dropped
126 */ 126 */
127int smi_handle_dr_smp_recv(struct ib_smp *smp, 127enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
128 u8 node_type, 128 int port_num, int phys_port_cnt)
129 int port_num,
130 int phys_port_cnt)
131{ 129{
132 u8 hop_ptr, hop_cnt; 130 u8 hop_ptr, hop_cnt;
133 131
@@ -138,16 +136,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
138 if (!ib_get_smp_direction(smp)) { 136 if (!ib_get_smp_direction(smp)) {
139 /* C14-9:1 -- sender should have incremented hop_ptr */ 137 /* C14-9:1 -- sender should have incremented hop_ptr */
140 if (hop_cnt && hop_ptr == 0) 138 if (hop_cnt && hop_ptr == 0)
141 return 0; 139 return IB_SMI_DISCARD;
142 140
143 /* C14-9:2 -- intermediate hop */ 141 /* C14-9:2 -- intermediate hop */
144 if (hop_ptr && hop_ptr < hop_cnt) { 142 if (hop_ptr && hop_ptr < hop_cnt) {
145 if (node_type != RDMA_NODE_IB_SWITCH) 143 if (node_type != RDMA_NODE_IB_SWITCH)
146 return 0; 144 return IB_SMI_DISCARD;
147 145
148 smp->return_path[hop_ptr] = port_num; 146 smp->return_path[hop_ptr] = port_num;
149 /* smp->hop_ptr updated when sending */ 147 /* smp->hop_ptr updated when sending */
150 return (smp->initial_path[hop_ptr+1] <= phys_port_cnt); 148 return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ?
149 IB_SMI_HANDLE : IB_SMI_DISCARD);
151 } 150 }
152 151
153 /* C14-9:3 -- We're at the end of the DR segment of path */ 152 /* C14-9:3 -- We're at the end of the DR segment of path */
@@ -157,12 +156,13 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
157 /* smp->hop_ptr updated when sending */ 156 /* smp->hop_ptr updated when sending */
158 157
159 return (node_type == RDMA_NODE_IB_SWITCH || 158 return (node_type == RDMA_NODE_IB_SWITCH ||
160 smp->dr_dlid == IB_LID_PERMISSIVE); 159 smp->dr_dlid == IB_LID_PERMISSIVE ?
160 IB_SMI_HANDLE : IB_SMI_DISCARD);
161 } 161 }
162 162
163 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ 163 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
164 /* C14-9:5 -- fail unreasonable hop pointer */ 164 /* C14-9:5 -- fail unreasonable hop pointer */
165 return (hop_ptr == hop_cnt + 1); 165 return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
166 166
167 } else { 167 } else {
168 168
@@ -170,16 +170,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
170 if (hop_cnt && hop_ptr == hop_cnt + 1) { 170 if (hop_cnt && hop_ptr == hop_cnt + 1) {
171 smp->hop_ptr--; 171 smp->hop_ptr--;
172 return (smp->return_path[smp->hop_ptr] == 172 return (smp->return_path[smp->hop_ptr] ==
173 port_num); 173 port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
174 } 174 }
175 175
176 /* C14-13:2 */ 176 /* C14-13:2 */
177 if (2 <= hop_ptr && hop_ptr <= hop_cnt) { 177 if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
178 if (node_type != RDMA_NODE_IB_SWITCH) 178 if (node_type != RDMA_NODE_IB_SWITCH)
179 return 0; 179 return IB_SMI_DISCARD;
180 180
181 /* smp->hop_ptr updated when sending */ 181 /* smp->hop_ptr updated when sending */
182 return (smp->return_path[hop_ptr-1] <= phys_port_cnt); 182 return (smp->return_path[hop_ptr-1] <= phys_port_cnt ?
183 IB_SMI_HANDLE : IB_SMI_DISCARD);
183 } 184 }
184 185
185 /* C14-13:3 -- We're at the end of the DR segment of path */ 186 /* C14-13:3 -- We're at the end of the DR segment of path */
@@ -187,23 +188,20 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
187 if (smp->dr_slid == IB_LID_PERMISSIVE) { 188 if (smp->dr_slid == IB_LID_PERMISSIVE) {
188 /* giving SMP to SM - update hop_ptr */ 189 /* giving SMP to SM - update hop_ptr */
189 smp->hop_ptr--; 190 smp->hop_ptr--;
190 return 1; 191 return IB_SMI_HANDLE;
191 } 192 }
192 /* smp->hop_ptr updated when sending */ 193 /* smp->hop_ptr updated when sending */
193 return (node_type == RDMA_NODE_IB_SWITCH); 194 return (node_type == RDMA_NODE_IB_SWITCH ?
195 IB_SMI_HANDLE: IB_SMI_DISCARD);
194 } 196 }
195 197
196 /* C14-13:4 -- hop_ptr = 0 -> give to SM */ 198 /* C14-13:4 -- hop_ptr = 0 -> give to SM */
197 /* C14-13:5 -- Check for unreasonable hop pointer */ 199 /* C14-13:5 -- Check for unreasonable hop pointer */
198 return (hop_ptr == 0); 200 return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
199 } 201 }
200} 202}
201 203
202/* 204enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
203 * Return 1 if the received DR SMP should be forwarded to the send queue
204 * Return 0 if the SMP should be completed up the stack
205 */
206int smi_check_forward_dr_smp(struct ib_smp *smp)
207{ 205{
208 u8 hop_ptr, hop_cnt; 206 u8 hop_ptr, hop_cnt;
209 207
@@ -213,23 +211,25 @@ int smi_check_forward_dr_smp(struct ib_smp *smp)
213 if (!ib_get_smp_direction(smp)) { 211 if (!ib_get_smp_direction(smp)) {
214 /* C14-9:2 -- intermediate hop */ 212 /* C14-9:2 -- intermediate hop */
215 if (hop_ptr && hop_ptr < hop_cnt) 213 if (hop_ptr && hop_ptr < hop_cnt)
216 return 1; 214 return IB_SMI_SEND;
217 215
218 /* C14-9:3 -- at the end of the DR segment of path */ 216 /* C14-9:3 -- at the end of the DR segment of path */
219 if (hop_ptr == hop_cnt) 217 if (hop_ptr == hop_cnt)
220 return (smp->dr_dlid == IB_LID_PERMISSIVE); 218 return (smp->dr_dlid == IB_LID_PERMISSIVE ?
219 IB_SMI_SEND : IB_SMI_LOCAL);
221 220
222 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ 221 /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
223 if (hop_ptr == hop_cnt + 1) 222 if (hop_ptr == hop_cnt + 1)
224 return 1; 223 return IB_SMI_SEND;
225 } else { 224 } else {
226 /* C14-13:2 */ 225 /* C14-13:2 -- intermediate hop */
227 if (2 <= hop_ptr && hop_ptr <= hop_cnt) 226 if (2 <= hop_ptr && hop_ptr <= hop_cnt)
228 return 1; 227 return IB_SMI_SEND;
229 228
230 /* C14-13:3 -- at the end of the DR segment of path */ 229 /* C14-13:3 -- at the end of the DR segment of path */
231 if (hop_ptr == 1) 230 if (hop_ptr == 1)
232 return (smp->dr_slid != IB_LID_PERMISSIVE); 231 return (smp->dr_slid != IB_LID_PERMISSIVE ?
232 IB_SMI_SEND : IB_SMI_LOCAL);
233 } 233 }
234 return 0; 234 return IB_SMI_LOCAL;
235} 235}
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 3011bfd86dc5..9a4b349efc30 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -3,7 +3,7 @@
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -33,7 +33,6 @@
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE. 34 * SOFTWARE.
35 * 35 *
36 * $Id: smi.h 1389 2004-12-27 22:56:47Z roland $
37 */ 36 */
38 37
39#ifndef __SMI_H_ 38#ifndef __SMI_H_
@@ -41,26 +40,33 @@
41 40
42#include <rdma/ib_smi.h> 41#include <rdma/ib_smi.h>
43 42
44int smi_handle_dr_smp_recv(struct ib_smp *smp, 43enum smi_action {
45 u8 node_type, 44 IB_SMI_DISCARD,
46 int port_num, 45 IB_SMI_HANDLE
47 int phys_port_cnt); 46};
48extern int smi_check_forward_dr_smp(struct ib_smp *smp); 47
49extern int smi_handle_dr_smp_send(struct ib_smp *smp, 48enum smi_forward_action {
50 u8 node_type, 49 IB_SMI_LOCAL, /* SMP should be completed up the stack */
51 int port_num); 50 IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */
51};
52
53enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
54 int port_num, int phys_port_cnt);
55extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
56extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
57 u8 node_type, int port_num);
52 58
53/* 59/*
54 * Return 1 if the SMP should be handled by the local SMA/SM via process_mad 60 * Return 1 if the SMP should be handled by the local SMA/SM via process_mad
55 */ 61 */
56static inline int smi_check_local_smp(struct ib_smp *smp, 62static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
57 struct ib_device *device) 63 struct ib_device *device)
58{ 64{
59 /* C14-9:3 -- We're at the end of the DR segment of path */ 65 /* C14-9:3 -- We're at the end of the DR segment of path */
60 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ 66 /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
61 return ((device->process_mad && 67 return ((device->process_mad &&
62 !ib_get_smp_direction(smp) && 68 !ib_get_smp_direction(smp) &&
63 (smp->hop_ptr == smp->hop_cnt + 1))); 69 (smp->hop_ptr == smp->hop_cnt + 1)) ?
70 IB_SMI_HANDLE : IB_SMI_DISCARD);
64} 71}
65
66#endif /* __SMI_H_ */ 72#endif /* __SMI_H_ */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 000c086bf2e9..08c299ebf4a8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -683,6 +683,7 @@ int ib_device_register_sysfs(struct ib_device *device)
683 683
684 class_dev->class = &ib_class; 684 class_dev->class = &ib_class;
685 class_dev->class_data = device; 685 class_dev->class_data = device;
686 class_dev->dev = device->dma_device;
686 strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); 687 strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE);
687 688
688 INIT_LIST_HEAD(&device->port_list); 689 INIT_LIST_HEAD(&device->port_list);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index ee51d79a7ad5..2586a3ee8eba 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -407,29 +407,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
407 407
408 mutex_lock(&file->file_mutex); 408 mutex_lock(&file->file_mutex);
409 while (list_empty(&file->events)) { 409 while (list_empty(&file->events)) {
410 mutex_unlock(&file->file_mutex);
410 411
411 if (file->filp->f_flags & O_NONBLOCK) { 412 if (file->filp->f_flags & O_NONBLOCK)
412 result = -EAGAIN; 413 return -EAGAIN;
413 break;
414 }
415 414
416 if (signal_pending(current)) { 415 if (wait_event_interruptible(file->poll_wait,
417 result = -ERESTARTSYS; 416 !list_empty(&file->events)))
418 break; 417 return -ERESTARTSYS;
419 }
420 418
421 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
422
423 mutex_unlock(&file->file_mutex);
424 schedule();
425 mutex_lock(&file->file_mutex); 419 mutex_lock(&file->file_mutex);
426
427 finish_wait(&file->poll_wait, &wait);
428 } 420 }
429 421
430 if (result)
431 goto done;
432
433 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); 422 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
434 423
435 if (ib_ucm_new_cm_id(uevent->resp.event)) { 424 if (ib_ucm_new_cm_id(uevent->resp.event)) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index c859134c1daa..53b4c94a7eb5 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -306,26 +306,18 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
306 306
307 mutex_lock(&file->mut); 307 mutex_lock(&file->mut);
308 while (list_empty(&file->event_list)) { 308 while (list_empty(&file->event_list)) {
309 if (file->filp->f_flags & O_NONBLOCK) { 309 mutex_unlock(&file->mut);
310 ret = -EAGAIN;
311 break;
312 }
313 310
314 if (signal_pending(current)) { 311 if (file->filp->f_flags & O_NONBLOCK)
315 ret = -ERESTARTSYS; 312 return -EAGAIN;
316 break; 313
317 } 314 if (wait_event_interruptible(file->poll_wait,
315 !list_empty(&file->event_list)))
316 return -ERESTARTSYS;
318 317
319 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
320 mutex_unlock(&file->mut);
321 schedule();
322 mutex_lock(&file->mut); 318 mutex_lock(&file->mut);
323 finish_wait(&file->poll_wait, &wait);
324 } 319 }
325 320
326 if (ret)
327 goto done;
328
329 uevent = list_entry(file->event_list.next, struct ucma_event, list); 321 uevent = list_entry(file->event_list.next, struct ucma_event, list);
330 322
331 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 323 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c069ebeba8e3..8199b83052a9 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -135,7 +135,7 @@ static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
135 135
136static DEFINE_SPINLOCK(port_lock); 136static DEFINE_SPINLOCK(port_lock);
137static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; 137static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
138static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2); 138static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
139 139
140static void ib_umad_add_one(struct ib_device *device); 140static void ib_umad_add_one(struct ib_device *device);
141static void ib_umad_remove_one(struct ib_device *device); 141static void ib_umad_remove_one(struct ib_device *device);
@@ -231,12 +231,17 @@ static void recv_handler(struct ib_mad_agent *agent,
231 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 231 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
232 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 232 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
233 if (packet->mad.hdr.grh_present) { 233 if (packet->mad.hdr.grh_present) {
234 /* XXX parse GRH */ 234 struct ib_ah_attr ah_attr;
235 packet->mad.hdr.gid_index = 0; 235
236 packet->mad.hdr.hop_limit = 0; 236 ib_init_ah_from_wc(agent->device, agent->port_num,
237 packet->mad.hdr.traffic_class = 0; 237 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
238 memset(packet->mad.hdr.gid, 0, 16); 238 &ah_attr);
239 packet->mad.hdr.flow_label = 0; 239
240 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
241 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
242 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
243 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
244 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
240 } 245 }
241 246
242 if (queue_packet(file, agent, packet)) 247 if (queue_packet(file, agent, packet))
@@ -473,6 +478,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
473 if (packet->mad.hdr.grh_present) { 478 if (packet->mad.hdr.grh_present) {
474 ah_attr.ah_flags = IB_AH_GRH; 479 ah_attr.ah_flags = IB_AH_GRH;
475 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 480 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
481 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
476 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 482 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
477 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 483 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
478 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 484 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 59243d9aedd6..58bc272bd407 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -439,7 +439,8 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
439 } 439 }
440 440
441 /* Setup the skb for reuse since we're dropping this pkt */ 441 /* Setup the skb for reuse since we're dropping this pkt */
442 elem->skb->tail = elem->skb->data = elem->skb->head; 442 elem->skb->data = elem->skb->head;
443 skb_reset_tail_pointer(elem->skb);
443 444
444 /* Zero out the rxp hdr in the sk_buff */ 445 /* Zero out the rxp hdr in the sk_buff */
445 memset(elem->skb->data, 0, sizeof(*rxp_hdr)); 446 memset(elem->skb->data, 0, sizeof(*rxp_hdr));
@@ -521,9 +522,8 @@ static void c2_rx_interrupt(struct net_device *netdev)
521 * "sizeof(struct c2_rxp_hdr)". 522 * "sizeof(struct c2_rxp_hdr)".
522 */ 523 */
523 skb->data += sizeof(*rxp_hdr); 524 skb->data += sizeof(*rxp_hdr);
524 skb->tail = skb->data + buflen; 525 skb_set_tail_pointer(skb, buflen);
525 skb->len = buflen; 526 skb->len = buflen;
526 skb->dev = netdev;
527 skb->protocol = eth_type_trans(skb, netdev); 527 skb->protocol = eth_type_trans(skb, netdev);
528 528
529 netif_rx(skb); 529 netif_rx(skb);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index fef972752912..607c09bf764c 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -796,7 +796,6 @@ int c2_register_device(struct c2_dev *dev)
796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); 796 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
797 dev->ibdev.phys_port_cnt = 1; 797 dev->ibdev.phys_port_cnt = 1;
798 dev->ibdev.dma_device = &dev->pcidev->dev; 798 dev->ibdev.dma_device = &dev->pcidev->dev;
799 dev->ibdev.class_dev.dev = &dev->pcidev->dev;
800 dev->ibdev.query_device = c2_query_device; 799 dev->ibdev.query_device = c2_query_device;
801 dev->ibdev.query_port = c2_query_port; 800 dev->ibdev.query_port = c2_query_port;
802 dev->ibdev.modify_port = c2_modify_port; 801 dev->ibdev.modify_port = c2_modify_port;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d0ed1d35ca3e..3b4b0acd707f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -477,7 +477,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
477 BUG_ON(skb_cloned(skb)); 477 BUG_ON(skb_cloned(skb));
478 478
479 mpalen = sizeof(*mpa) + ep->plen; 479 mpalen = sizeof(*mpa) + ep->plen;
480 if (skb->data + mpalen + sizeof(*req) > skb->end) { 480 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
481 kfree_skb(skb); 481 kfree_skb(skb);
482 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); 482 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
483 if (!skb) { 483 if (!skb) {
@@ -507,7 +507,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
507 */ 507 */
508 skb_get(skb); 508 skb_get(skb);
509 set_arp_failure_handler(skb, arp_failure_discard); 509 set_arp_failure_handler(skb, arp_failure_discard);
510 skb->h.raw = skb->data; 510 skb_reset_transport_header(skb);
511 len = skb->len; 511 len = skb->len;
512 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 512 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
513 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 513 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
@@ -559,7 +559,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
559 skb_get(skb); 559 skb_get(skb);
560 skb->priority = CPL_PRIORITY_DATA; 560 skb->priority = CPL_PRIORITY_DATA;
561 set_arp_failure_handler(skb, arp_failure_discard); 561 set_arp_failure_handler(skb, arp_failure_discard);
562 skb->h.raw = skb->data; 562 skb_reset_transport_header(skb);
563 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 563 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
564 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 564 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
565 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 565 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
@@ -610,7 +610,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
610 */ 610 */
611 skb_get(skb); 611 skb_get(skb);
612 set_arp_failure_handler(skb, arp_failure_discard); 612 set_arp_failure_handler(skb, arp_failure_discard);
613 skb->h.raw = skb->data; 613 skb_reset_transport_header(skb);
614 len = skb->len; 614 len = skb->len;
615 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 615 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
616 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 616 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
@@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
821 /* 821 /*
822 * copy the new data into our accumulation buffer. 822 * copy the new data into our accumulation buffer.
823 */ 823 */
824 memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); 824 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
825 skb->len);
825 ep->mpa_pkt_len += skb->len; 826 ep->mpa_pkt_len += skb->len;
826 827
827 /* 828 /*
@@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
940 /* 941 /*
941 * Copy the new data into our accumulation buffer. 942 * Copy the new data into our accumulation buffer.
942 */ 943 */
943 memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); 944 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
945 skb->len);
944 ep->mpa_pkt_len += skb->len; 946 ep->mpa_pkt_len += skb->len;
945 947
946 /* 948 /*
@@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1619 PDBG("%s ep %p\n", __FUNCTION__, ep); 1621 PDBG("%s ep %p\n", __FUNCTION__, ep);
1620 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1622 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1621 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); 1623 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
1622 memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len); 1624 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1625 skb->len);
1623 ep->com.qp->attr.terminate_msg_len = skb->len; 1626 ep->com.qp->attr.terminate_msg_len = skb->len;
1624 ep->com.qp->attr.is_terminate_local = 0; 1627 ep->com.qp->attr.is_terminate_local = 0;
1625 return CPL_RET_BUF_DONE; 1628 return CPL_RET_BUF_DONE;
@@ -2026,6 +2029,17 @@ static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2026 return 0; 2029 return 0;
2027} 2030}
2028 2031
2032static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2033{
2034 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2035
2036 if (rpl->status != CPL_ERR_NONE) {
2037 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2038 "for tid %u\n", rpl->status, GET_TID(rpl));
2039 }
2040 return CPL_RET_BUF_DONE;
2041}
2042
2029int __init iwch_cm_init(void) 2043int __init iwch_cm_init(void)
2030{ 2044{
2031 skb_queue_head_init(&rxq); 2045 skb_queue_head_init(&rxq);
@@ -2053,6 +2067,7 @@ int __init iwch_cm_init(void)
2053 t3c_handlers[CPL_ABORT_REQ_RSS] = sched; 2067 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2054 t3c_handlers[CPL_RDMA_TERMINATE] = sched; 2068 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2055 t3c_handlers[CPL_RDMA_EC_STATUS] = sched; 2069 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2070 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2056 2071
2057 /* 2072 /*
2058 * These are the real handlers that are called from a 2073 * These are the real handlers that are called from a
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 24e0df04f7db..af28a317016d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1108,7 +1108,6 @@ int iwch_register_device(struct iwch_dev *dev)
1108 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1108 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1109 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1109 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1110 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1110 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1111 dev->ibdev.class_dev.dev = &(dev->rdev.rnic_info.pdev->dev);
1112 dev->ibdev.query_device = iwch_query_device; 1111 dev->ibdev.query_device = iwch_query_device;
1113 dev->ibdev.query_port = iwch_query_port; 1112 dev->ibdev.query_port = iwch_query_port;
1114 dev->ibdev.modify_port = iwch_modify_port; 1113 dev->ibdev.modify_port = iwch_modify_port;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 82ded44c6cee..10fb8fbafa0c 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -106,6 +106,7 @@ struct ehca_shca {
106 struct ehca_mr *maxmr; 106 struct ehca_mr *maxmr;
107 struct ehca_pd *pd; 107 struct ehca_pd *pd;
108 struct h_galpas galpas; 108 struct h_galpas galpas;
109 struct mutex modify_mutex;
109}; 110};
110 111
111struct ehca_pd { 112struct ehca_pd {
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 30eb45df9f0b..32b55a4f0e5b 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -147,6 +147,7 @@ int ehca_query_port(struct ib_device *ibdev,
147 break; 147 break;
148 } 148 }
149 149
150 props->port_cap_flags = rblock->capability_mask;
150 props->gid_tbl_len = rblock->gid_tbl_len; 151 props->gid_tbl_len = rblock->gid_tbl_len;
151 props->max_msg_sz = rblock->max_msg_sz; 152 props->max_msg_sz = rblock->max_msg_sz;
152 props->bad_pkey_cntr = rblock->bad_pkey_cntr; 153 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
@@ -236,10 +237,60 @@ query_gid1:
236 return ret; 237 return ret;
237} 238}
238 239
240const u32 allowed_port_caps = (
241 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
242 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
243 IB_PORT_VENDOR_CLASS_SUP);
244
239int ehca_modify_port(struct ib_device *ibdev, 245int ehca_modify_port(struct ib_device *ibdev,
240 u8 port, int port_modify_mask, 246 u8 port, int port_modify_mask,
241 struct ib_port_modify *props) 247 struct ib_port_modify *props)
242{ 248{
243 /* Not implemented yet */ 249 int ret = 0;
244 return -EFAULT; 250 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
251 struct hipz_query_port *rblock;
252 u32 cap;
253 u64 hret;
254
255 if ((props->set_port_cap_mask | props->clr_port_cap_mask)
256 & ~allowed_port_caps) {
257 ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
258 "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
259 props->clr_port_cap_mask, allowed_port_caps);
260 return -EINVAL;
261 }
262
263 if (mutex_lock_interruptible(&shca->modify_mutex))
264 return -ERESTARTSYS;
265
266 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
267 if (!rblock) {
268 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
269 ret = -ENOMEM;
270 goto modify_port1;
271 }
272
273 if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
274 ehca_err(&shca->ib_device, "Can't query port properties");
275 ret = -EINVAL;
276 goto modify_port2;
277 }
278
279 cap = (rblock->capability_mask | props->set_port_cap_mask)
280 & ~props->clr_port_cap_mask;
281
282 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
283 cap, props->init_type, port_modify_mask);
284 if (hret != H_SUCCESS) {
285 ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
286 ret = -EINVAL;
287 }
288
289modify_port2:
290 ehca_free_fw_ctrlblock(rblock);
291
292modify_port1:
293 mutex_unlock(&shca->modify_mutex);
294
295 return ret;
245} 296}
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index cee66b79b30d..4700085ba834 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -587,6 +587,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
587 ehca_gen_err("Cannot allocate shca memory."); 587 ehca_gen_err("Cannot allocate shca memory.");
588 return -ENOMEM; 588 return -ENOMEM;
589 } 589 }
590 mutex_init(&shca->modify_mutex);
590 591
591 shca->ibmebus_dev = dev; 592 shca->ibmebus_dev = dev;
592 shca->ipz_hca_handle.handle = *handle; 593 shca->ipz_hca_handle.handle = *handle;
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 3fb46e67df87..b564fcd3b282 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -70,6 +70,10 @@
70#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) 70#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
71#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) 71#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
72 72
73#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
74#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
75#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
76
73/* direct access qp controls */ 77/* direct access qp controls */
74#define DAQP_CTRL_ENABLE 0x01 78#define DAQP_CTRL_ENABLE 0x01
75#define DAQP_CTRL_SEND_COMP 0x20 79#define DAQP_CTRL_SEND_COMP 0x20
@@ -364,6 +368,26 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
364 return ret; 368 return ret;
365} 369}
366 370
371u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
372 const u8 port_id, const u32 port_cap,
373 const u8 init_type, const int modify_mask)
374{
375 u64 port_attributes = port_cap;
376
377 if (modify_mask & IB_PORT_SHUTDOWN)
378 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
379 if (modify_mask & IB_PORT_INIT_TYPE)
380 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
381 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
382 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
383
384 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
385 adapter_handle.handle, /* r4 */
386 port_id, /* r5 */
387 port_attributes, /* r6 */
388 0, 0, 0, 0);
389}
390
367u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, 391u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
368 struct hipz_query_hca *query_hca_rblock) 392 struct hipz_query_hca *query_hca_rblock)
369{ 393{
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 587ebd470959..2869f7dd6196 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -85,6 +85,10 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
85 const u8 port_id, 85 const u8 port_id,
86 struct hipz_query_port *query_port_response_block); 86 struct hipz_query_port *query_port_response_block);
87 87
88u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
89 const u8 port_id, const u32 port_cap,
90 const u8 init_type, const int modify_mask);
91
88u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, 92u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
89 struct hipz_query_hca *query_hca_rblock); 93 struct hipz_query_hca *query_hca_rblock);
90 94
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 54139d398181..10c008f22ba6 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -78,6 +78,8 @@
78#define IPATH_IB_LINKINIT 3 78#define IPATH_IB_LINKINIT 3
79#define IPATH_IB_LINKDOWN_SLEEP 4 79#define IPATH_IB_LINKDOWN_SLEEP 4
80#define IPATH_IB_LINKDOWN_DISABLE 5 80#define IPATH_IB_LINKDOWN_DISABLE 5
81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
81 83
82/* 84/*
83 * stats maintained by the driver. For now, at least, this is global 85 * stats maintained by the driver. For now, at least, this is global
@@ -316,11 +318,17 @@ struct ipath_base_info {
316 /* address of readonly memory copy of the rcvhdrq tail register. */ 318 /* address of readonly memory copy of the rcvhdrq tail register. */
317 __u64 spi_rcvhdr_tailaddr; 319 __u64 spi_rcvhdr_tailaddr;
318 320
319 /* shared memory pages for subports if IPATH_RUNTIME_MASTER is set */ 321 /* shared memory pages for subports if port is shared */
320 __u64 spi_subport_uregbase; 322 __u64 spi_subport_uregbase;
321 __u64 spi_subport_rcvegrbuf; 323 __u64 spi_subport_rcvegrbuf;
322 __u64 spi_subport_rcvhdr_base; 324 __u64 spi_subport_rcvhdr_base;
323 325
326 /* shared memory page for hardware port if it is shared */
327 __u64 spi_port_uregbase;
328 __u64 spi_port_rcvegrbuf;
329 __u64 spi_port_rcvhdr_base;
330 __u64 spi_port_rcvhdr_tailaddr;
331
324} __attribute__ ((aligned(8))); 332} __attribute__ ((aligned(8)));
325 333
326 334
@@ -344,7 +352,7 @@ struct ipath_base_info {
344 * may not be implemented; the user code must deal with this if it 352 * may not be implemented; the user code must deal with this if it
345 * cares, or it must abort after initialization reports the difference. 353 * cares, or it must abort after initialization reports the difference.
346 */ 354 */
347#define IPATH_USER_SWMINOR 3 355#define IPATH_USER_SWMINOR 5
348 356
349#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR) 357#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
350 358
@@ -418,11 +426,14 @@ struct ipath_user_info {
418#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */ 426#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
419#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */ 427#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
420#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */ 428#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
421#define IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes */ 429#define __IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes (for old user code) */
422#define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */ 430#define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */
423#define IPATH_CMD_USER_INIT 24 /* set up userspace */ 431#define IPATH_CMD_USER_INIT 24 /* set up userspace */
432#define IPATH_CMD_UNUSED_1 25
433#define IPATH_CMD_UNUSED_2 26
434#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
424 435
425#define IPATH_CMD_MAX 24 436#define IPATH_CMD_MAX 27
426 437
427struct ipath_port_info { 438struct ipath_port_info {
428 __u32 num_active; /* number of active units */ 439 __u32 num_active; /* number of active units */
@@ -430,7 +441,7 @@ struct ipath_port_info {
430 __u16 port; /* port on unit assigned to caller */ 441 __u16 port; /* port on unit assigned to caller */
431 __u16 subport; /* subport on unit assigned to caller */ 442 __u16 subport; /* subport on unit assigned to caller */
432 __u16 num_ports; /* number of ports available on unit */ 443 __u16 num_ports; /* number of ports available on unit */
433 __u16 num_subports; /* number of subport slaves opened on port */ 444 __u16 num_subports; /* number of subports opened on port */
434}; 445};
435 446
436struct ipath_tid_info { 447struct ipath_tid_info {
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 87462e0cb4d2..ea78e6dddc90 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -76,7 +76,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
76 } 76 }
77 return; 77 return;
78 } 78 }
79 wc->queue[head] = *entry; 79 wc->queue[head].wr_id = entry->wr_id;
80 wc->queue[head].status = entry->status;
81 wc->queue[head].opcode = entry->opcode;
82 wc->queue[head].vendor_err = entry->vendor_err;
83 wc->queue[head].byte_len = entry->byte_len;
84 wc->queue[head].imm_data = (__u32 __force)entry->imm_data;
85 wc->queue[head].qp_num = entry->qp->qp_num;
86 wc->queue[head].src_qp = entry->src_qp;
87 wc->queue[head].wc_flags = entry->wc_flags;
88 wc->queue[head].pkey_index = entry->pkey_index;
89 wc->queue[head].slid = entry->slid;
90 wc->queue[head].sl = entry->sl;
91 wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
92 wc->queue[head].port_num = entry->port_num;
80 wc->head = next; 93 wc->head = next;
81 94
82 if (cq->notify == IB_CQ_NEXT_COMP || 95 if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -122,9 +135,30 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
122 if (tail > (u32) cq->ibcq.cqe) 135 if (tail > (u32) cq->ibcq.cqe)
123 tail = (u32) cq->ibcq.cqe; 136 tail = (u32) cq->ibcq.cqe;
124 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { 137 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
138 struct ipath_qp *qp;
139
125 if (tail == wc->head) 140 if (tail == wc->head)
126 break; 141 break;
127 *entry = wc->queue[tail]; 142
143 qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
144 wc->queue[tail].qp_num);
145 entry->qp = &qp->ibqp;
146 if (atomic_dec_and_test(&qp->refcount))
147 wake_up(&qp->wait);
148
149 entry->wr_id = wc->queue[tail].wr_id;
150 entry->status = wc->queue[tail].status;
151 entry->opcode = wc->queue[tail].opcode;
152 entry->vendor_err = wc->queue[tail].vendor_err;
153 entry->byte_len = wc->queue[tail].byte_len;
154 entry->imm_data = wc->queue[tail].imm_data;
155 entry->src_qp = wc->queue[tail].src_qp;
156 entry->wc_flags = wc->queue[tail].wc_flags;
157 entry->pkey_index = wc->queue[tail].pkey_index;
158 entry->slid = wc->queue[tail].slid;
159 entry->sl = wc->queue[tail].sl;
160 entry->dlid_path_bits = wc->queue[tail].dlid_path_bits;
161 entry->port_num = wc->queue[tail].port_num;
128 if (tail >= cq->ibcq.cqe) 162 if (tail >= cq->ibcq.cqe)
129 tail = 0; 163 tail = 0;
130 else 164 else
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index df69f0d80b8b..42bfbdb0d3e6 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -57,6 +57,7 @@
57#define __IPATH_PROCDBG 0x100 57#define __IPATH_PROCDBG 0x100
58/* print mmap/nopage stuff, not using VDBG any more */ 58/* print mmap/nopage stuff, not using VDBG any more */
59#define __IPATH_MMDBG 0x200 59#define __IPATH_MMDBG 0x200
60#define __IPATH_ERRPKTDBG 0x400
60#define __IPATH_USER_SEND 0x1000 /* use user mode send */ 61#define __IPATH_USER_SEND 0x1000 /* use user mode send */
61#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ 62#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
62#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ 63#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 0f13a2182cc7..63e8368b0e95 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -296,7 +296,7 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
296 } 296 }
297 297
298 fp->private_data = dd; 298 fp->private_data = dd;
299 ipath_diag_inuse = 1; 299 ipath_diag_inuse = -2;
300 diag_set_link = 0; 300 diag_set_link = 0;
301 ret = 0; 301 ret = 0;
302 302
@@ -461,6 +461,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data,
461 else if ((count % 4) || (*off % 4)) 461 else if ((count % 4) || (*off % 4))
462 /* address or length is not 32-bit aligned, hence invalid */ 462 /* address or length is not 32-bit aligned, hence invalid */
463 ret = -EINVAL; 463 ret = -EINVAL;
464 else if (ipath_diag_inuse < 1 && (*off || count != 8))
465 ret = -EINVAL; /* prevent cat /dev/ipath_diag* */
464 else if ((count % 8) || (*off % 8)) 466 else if ((count % 8) || (*off % 8))
465 /* address or length not 64-bit aligned; do 32-bit reads */ 467 /* address or length not 64-bit aligned; do 32-bit reads */
466 ret = ipath_read_umem32(dd, data, kreg_base + *off, count); 468 ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
@@ -470,6 +472,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data,
470 if (ret >= 0) { 472 if (ret >= 0) {
471 *off += count; 473 *off += count;
472 ret = count; 474 ret = count;
475 if (ipath_diag_inuse == -2)
476 ipath_diag_inuse++;
473 } 477 }
474 478
475 return ret; 479 return ret;
@@ -489,6 +493,9 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
489 else if ((count % 4) || (*off % 4)) 493 else if ((count % 4) || (*off % 4))
490 /* address or length is not 32-bit aligned, hence invalid */ 494 /* address or length is not 32-bit aligned, hence invalid */
491 ret = -EINVAL; 495 ret = -EINVAL;
496 else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
497 ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */
498 ret = -EINVAL; /* before any other write allowed */
492 else if ((count % 8) || (*off % 8)) 499 else if ((count % 8) || (*off % 8))
493 /* address or length not 64-bit aligned; do 32-bit writes */ 500 /* address or length not 64-bit aligned; do 32-bit writes */
494 ret = ipath_write_umem32(dd, kreg_base + *off, data, count); 501 ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
@@ -498,6 +505,8 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
498 if (ret >= 0) { 505 if (ret >= 0) {
499 *off += count; 506 *off += count;
500 ret = count; 507 ret = count;
508 if (ipath_diag_inuse == -1)
509 ipath_diag_inuse = 1; /* all read/write OK now */
501 } 510 }
502 511
503 return ret; 512 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index ae7f21a0cdc0..e3a223209710 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -390,15 +390,23 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
390 390
391 /* setup the chip-specific functions, as early as possible. */ 391 /* setup the chip-specific functions, as early as possible. */
392 switch (ent->device) { 392 switch (ent->device) {
393#ifdef CONFIG_HT_IRQ
394 case PCI_DEVICE_ID_INFINIPATH_HT: 393 case PCI_DEVICE_ID_INFINIPATH_HT:
394#ifdef CONFIG_HT_IRQ
395 ipath_init_iba6110_funcs(dd); 395 ipath_init_iba6110_funcs(dd);
396 break; 396 break;
397#else
398 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
399 "CONFIG_HT_IRQ is not enabled\n", ent->device);
400 return -ENODEV;
397#endif 401#endif
398#ifdef CONFIG_PCI_MSI
399 case PCI_DEVICE_ID_INFINIPATH_PE800: 402 case PCI_DEVICE_ID_INFINIPATH_PE800:
403#ifdef CONFIG_PCI_MSI
400 ipath_init_iba6120_funcs(dd); 404 ipath_init_iba6120_funcs(dd);
401 break; 405 break;
406#else
407 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
408 "CONFIG_PCI_MSI is not enabled\n", ent->device);
409 return -ENODEV;
402#endif 410#endif
403 default: 411 default:
404 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 412 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
@@ -486,7 +494,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
486 494
487 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */ 495 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
488 if (ret) 496 if (ret)
489 goto bail_iounmap; 497 goto bail_irqsetup;
490 498
491 ret = ipath_enable_wc(dd); 499 ret = ipath_enable_wc(dd);
492 500
@@ -505,6 +513,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
505 513
506 goto bail; 514 goto bail;
507 515
516bail_irqsetup:
517 if (pdev->irq) free_irq(pdev->irq, dd);
518
508bail_iounmap: 519bail_iounmap:
509 iounmap((volatile void __iomem *) dd->ipath_kregbase); 520 iounmap((volatile void __iomem *) dd->ipath_kregbase);
510 521
@@ -525,8 +536,6 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
525{ 536{
526 int port; 537 int port;
527 538
528 ipath_shutdown_device(dd);
529
530 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { 539 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
531 /* can't do anything more with chip; needs re-init */ 540 /* can't do anything more with chip; needs re-init */
532 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT; 541 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
@@ -594,8 +603,9 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
594 603
595 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n", 604 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
596 dd->ipath_pageshadow); 605 dd->ipath_pageshadow);
597 vfree(dd->ipath_pageshadow); 606 tmpp = dd->ipath_pageshadow;
598 dd->ipath_pageshadow = NULL; 607 dd->ipath_pageshadow = NULL;
608 vfree(tmpp);
599 } 609 }
600 610
601 /* 611 /*
@@ -622,6 +632,12 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
622 632
623 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd); 633 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
624 634
635 /*
636 * disable the IB link early, to be sure no new packets arrive, which
637 * complicates the shutdown process
638 */
639 ipath_shutdown_device(dd);
640
625 if (dd->verbs_dev) 641 if (dd->verbs_dev)
626 ipath_unregister_ib_device(dd->verbs_dev); 642 ipath_unregister_ib_device(dd->verbs_dev);
627 643
@@ -754,9 +770,42 @@ static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
754 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; 770 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
755} 771}
756 772
757void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) 773/*
774 * Decode the error status into strings, deciding whether to always
775 * print * it or not depending on "normal packet errors" vs everything
776 * else. Return 1 if "real" errors, otherwise 0 if only packet
777 * errors, so caller can decide what to print with the string.
778 */
779int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
758{ 780{
781 int iserr = 1;
759 *buf = '\0'; 782 *buf = '\0';
783 if (err & INFINIPATH_E_PKTERRS) {
784 if (!(err & ~INFINIPATH_E_PKTERRS))
785 iserr = 0; // if only packet errors.
786 if (ipath_debug & __IPATH_ERRPKTDBG) {
787 if (err & INFINIPATH_E_REBP)
788 strlcat(buf, "EBP ", blen);
789 if (err & INFINIPATH_E_RVCRC)
790 strlcat(buf, "VCRC ", blen);
791 if (err & INFINIPATH_E_RICRC) {
792 strlcat(buf, "CRC ", blen);
793 // clear for check below, so only once
794 err &= INFINIPATH_E_RICRC;
795 }
796 if (err & INFINIPATH_E_RSHORTPKTLEN)
797 strlcat(buf, "rshortpktlen ", blen);
798 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
799 strlcat(buf, "sdroppeddatapkt ", blen);
800 if (err & INFINIPATH_E_SPKTLEN)
801 strlcat(buf, "spktlen ", blen);
802 }
803 if ((err & INFINIPATH_E_RICRC) &&
804 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
805 strlcat(buf, "CRC ", blen);
806 if (!iserr)
807 goto done;
808 }
760 if (err & INFINIPATH_E_RHDRLEN) 809 if (err & INFINIPATH_E_RHDRLEN)
761 strlcat(buf, "rhdrlen ", blen); 810 strlcat(buf, "rhdrlen ", blen);
762 if (err & INFINIPATH_E_RBADTID) 811 if (err & INFINIPATH_E_RBADTID)
@@ -767,12 +816,12 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
767 strlcat(buf, "rhdr ", blen); 816 strlcat(buf, "rhdr ", blen);
768 if (err & INFINIPATH_E_RLONGPKTLEN) 817 if (err & INFINIPATH_E_RLONGPKTLEN)
769 strlcat(buf, "rlongpktlen ", blen); 818 strlcat(buf, "rlongpktlen ", blen);
770 if (err & INFINIPATH_E_RSHORTPKTLEN)
771 strlcat(buf, "rshortpktlen ", blen);
772 if (err & INFINIPATH_E_RMAXPKTLEN) 819 if (err & INFINIPATH_E_RMAXPKTLEN)
773 strlcat(buf, "rmaxpktlen ", blen); 820 strlcat(buf, "rmaxpktlen ", blen);
774 if (err & INFINIPATH_E_RMINPKTLEN) 821 if (err & INFINIPATH_E_RMINPKTLEN)
775 strlcat(buf, "rminpktlen ", blen); 822 strlcat(buf, "rminpktlen ", blen);
823 if (err & INFINIPATH_E_SMINPKTLEN)
824 strlcat(buf, "sminpktlen ", blen);
776 if (err & INFINIPATH_E_RFORMATERR) 825 if (err & INFINIPATH_E_RFORMATERR)
777 strlcat(buf, "rformaterr ", blen); 826 strlcat(buf, "rformaterr ", blen);
778 if (err & INFINIPATH_E_RUNSUPVL) 827 if (err & INFINIPATH_E_RUNSUPVL)
@@ -781,32 +830,20 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
781 strlcat(buf, "runexpchar ", blen); 830 strlcat(buf, "runexpchar ", blen);
782 if (err & INFINIPATH_E_RIBFLOW) 831 if (err & INFINIPATH_E_RIBFLOW)
783 strlcat(buf, "ribflow ", blen); 832 strlcat(buf, "ribflow ", blen);
784 if (err & INFINIPATH_E_REBP)
785 strlcat(buf, "EBP ", blen);
786 if (err & INFINIPATH_E_SUNDERRUN) 833 if (err & INFINIPATH_E_SUNDERRUN)
787 strlcat(buf, "sunderrun ", blen); 834 strlcat(buf, "sunderrun ", blen);
788 if (err & INFINIPATH_E_SPIOARMLAUNCH) 835 if (err & INFINIPATH_E_SPIOARMLAUNCH)
789 strlcat(buf, "spioarmlaunch ", blen); 836 strlcat(buf, "spioarmlaunch ", blen);
790 if (err & INFINIPATH_E_SUNEXPERRPKTNUM) 837 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
791 strlcat(buf, "sunexperrpktnum ", blen); 838 strlcat(buf, "sunexperrpktnum ", blen);
792 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
793 strlcat(buf, "sdroppeddatapkt ", blen);
794 if (err & INFINIPATH_E_SDROPPEDSMPPKT) 839 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
795 strlcat(buf, "sdroppedsmppkt ", blen); 840 strlcat(buf, "sdroppedsmppkt ", blen);
796 if (err & INFINIPATH_E_SMAXPKTLEN) 841 if (err & INFINIPATH_E_SMAXPKTLEN)
797 strlcat(buf, "smaxpktlen ", blen); 842 strlcat(buf, "smaxpktlen ", blen);
798 if (err & INFINIPATH_E_SMINPKTLEN)
799 strlcat(buf, "sminpktlen ", blen);
800 if (err & INFINIPATH_E_SUNSUPVL) 843 if (err & INFINIPATH_E_SUNSUPVL)
801 strlcat(buf, "sunsupVL ", blen); 844 strlcat(buf, "sunsupVL ", blen);
802 if (err & INFINIPATH_E_SPKTLEN)
803 strlcat(buf, "spktlen ", blen);
804 if (err & INFINIPATH_E_INVALIDADDR) 845 if (err & INFINIPATH_E_INVALIDADDR)
805 strlcat(buf, "invalidaddr ", blen); 846 strlcat(buf, "invalidaddr ", blen);
806 if (err & INFINIPATH_E_RICRC)
807 strlcat(buf, "CRC ", blen);
808 if (err & INFINIPATH_E_RVCRC)
809 strlcat(buf, "VCRC ", blen);
810 if (err & INFINIPATH_E_RRCVEGRFULL) 847 if (err & INFINIPATH_E_RRCVEGRFULL)
811 strlcat(buf, "rcvegrfull ", blen); 848 strlcat(buf, "rcvegrfull ", blen);
812 if (err & INFINIPATH_E_RRCVHDRFULL) 849 if (err & INFINIPATH_E_RRCVHDRFULL)
@@ -819,6 +856,8 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
819 strlcat(buf, "hardware ", blen); 856 strlcat(buf, "hardware ", blen);
820 if (err & INFINIPATH_E_RESET) 857 if (err & INFINIPATH_E_RESET)
821 strlcat(buf, "reset ", blen); 858 strlcat(buf, "reset ", blen);
859done:
860 return iserr;
822} 861}
823 862
824/** 863/**
@@ -1662,6 +1701,22 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1662 lstate = IPATH_LINKACTIVE; 1701 lstate = IPATH_LINKACTIVE;
1663 break; 1702 break;
1664 1703
1704 case IPATH_IB_LINK_LOOPBACK:
1705 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
1706 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1707 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1708 dd->ipath_ibcctrl);
1709 ret = 0;
1710 goto bail; // no state change to wait for
1711
1712 case IPATH_IB_LINK_EXTERNAL:
1713 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
1714 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1715 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1716 dd->ipath_ibcctrl);
1717 ret = 0;
1718 goto bail; // no state change to wait for
1719
1665 default: 1720 default:
1666 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); 1721 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1667 ret = -EINVAL; 1722 ret = -EINVAL;
@@ -1765,29 +1820,6 @@ int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1765 return 0; 1820 return 0;
1766} 1821}
1767 1822
1768/**
1769 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
1770 * @dd: the infinipath device
1771 * @regno: the register number to read
1772 * @port: the port containing the register
1773 *
1774 * Registers that vary with the chip implementation constants (port)
1775 * use this routine.
1776 */
1777u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
1778 unsigned port)
1779{
1780 u16 where;
1781
1782 if (port < dd->ipath_portcnt &&
1783 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1784 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1785 where = regno + port;
1786 else
1787 where = -1;
1788
1789 return ipath_read_kreg64(dd, where);
1790}
1791 1823
1792/** 1824/**
1793 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register 1825 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
@@ -1973,7 +2005,8 @@ static int __init infinipath_init(void)
1973{ 2005{
1974 int ret; 2006 int ret;
1975 2007
1976 ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version); 2008 if (ipath_debug & __IPATH_DBG)
2009 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
1977 2010
1978 /* 2011 /*
1979 * These must be called before the driver is registered with 2012 * These must be called before the driver is registered with
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index a4019a6b7560..030185f90ee2 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -626,6 +626,10 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
626 } else 626 } else
627 memcpy(dd->ipath_serial, ifp->if_serial, 627 memcpy(dd->ipath_serial, ifp->if_serial,
628 sizeof ifp->if_serial); 628 sizeof ifp->if_serial);
629 if (!strstr(ifp->if_comment, "Tested successfully"))
630 ipath_dev_err(dd, "Board SN %s did not pass functional "
631 "test: %s\n", dd->ipath_serial,
632 ifp->if_comment);
629 633
630 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n", 634 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
631 (unsigned long long) be64_to_cpu(dd->ipath_guid)); 635 (unsigned long long) be64_to_cpu(dd->ipath_guid));
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 5d64ff875297..1272aaf2a785 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -41,12 +41,6 @@
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_common.h" 42#include "ipath_common.h"
43 43
44/*
45 * mmap64 doesn't allow all 64 bits for 32-bit applications
46 * so only use the low 43 bits.
47 */
48#define MMAP64_MASK 0x7FFFFFFFFFFUL
49
50static int ipath_open(struct inode *, struct file *); 44static int ipath_open(struct inode *, struct file *);
51static int ipath_close(struct inode *, struct file *); 45static int ipath_close(struct inode *, struct file *);
52static ssize_t ipath_write(struct file *, const char __user *, size_t, 46static ssize_t ipath_write(struct file *, const char __user *, size_t,
@@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = {
63 .mmap = ipath_mmap 57 .mmap = ipath_mmap
64}; 58};
65 59
60/*
61 * Convert kernel virtual addresses to physical addresses so they don't
62 * potentially conflict with the chip addresses used as mmap offsets.
63 * It doesn't really matter what mmap offset we use as long as we can
64 * interpret it correctly.
65 */
66static u64 cvt_kvaddr(void *p)
67{
68 struct page *page;
69 u64 paddr = 0;
70
71 page = vmalloc_to_page(p);
72 if (page)
73 paddr = page_to_pfn(page) << PAGE_SHIFT;
74
75 return paddr;
76}
77
66static int ipath_get_base_info(struct file *fp, 78static int ipath_get_base_info(struct file *fp,
67 void __user *ubase, size_t ubase_size) 79 void __user *ubase, size_t ubase_size)
68{ 80{
@@ -87,7 +99,7 @@ static int ipath_get_base_info(struct file *fp,
87 sz = sizeof(*kinfo); 99 sz = sizeof(*kinfo);
88 /* If port sharing is not requested, allow the old size structure */ 100 /* If port sharing is not requested, allow the old size structure */
89 if (!shared) 101 if (!shared)
90 sz -= 3 * sizeof(u64); 102 sz -= 7 * sizeof(u64);
91 if (ubase_size < sz) { 103 if (ubase_size < sz) {
92 ipath_cdbg(PROC, 104 ipath_cdbg(PROC,
93 "Base size %zu, need %zu (version mismatch?)\n", 105 "Base size %zu, need %zu (version mismatch?)\n",
@@ -165,24 +177,41 @@ static int ipath_get_base_info(struct file *fp,
165 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 177 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
166 dd->ipath_palign * 178 dd->ipath_palign *
167 (dd->ipath_pbufsport - kinfo->spi_piocnt); 179 (dd->ipath_pbufsport - kinfo->spi_piocnt);
168 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
169 dd->ipath_palign * pd->port_port;
170 } else { 180 } else {
171 unsigned slave = subport_fp(fp) - 1; 181 unsigned slave = subport_fp(fp) - 1;
172 182
173 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 183 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
174 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 184 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
175 dd->ipath_palign * kinfo->spi_piocnt * slave; 185 dd->ipath_palign * kinfo->spi_piocnt * slave;
176 kinfo->__spi_uregbase = ((u64) pd->subport_uregbase + 186 }
177 PAGE_SIZE * slave) & MMAP64_MASK; 187 if (shared) {
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_palign * pd->port_port;
190 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
191 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
192 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
178 193
179 kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base + 194 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
180 pd->port_rcvhdrq_size * slave) & MMAP64_MASK; 195 PAGE_SIZE * subport_fp(fp));
181 kinfo->spi_rcvhdr_tailaddr = 196
182 (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK; 197 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
183 kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf + 198 pd->port_rcvhdrq_size * subport_fp(fp));
184 dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) & 199 kinfo->spi_rcvhdr_tailaddr = 0;
185 MMAP64_MASK; 200 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
201 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
202 subport_fp(fp));
203
204 kinfo->spi_subport_uregbase =
205 cvt_kvaddr(pd->subport_uregbase);
206 kinfo->spi_subport_rcvegrbuf =
207 cvt_kvaddr(pd->subport_rcvegrbuf);
208 kinfo->spi_subport_rcvhdr_base =
209 cvt_kvaddr(pd->subport_rcvhdr_base);
210 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
211 kinfo->spi_port, kinfo->spi_runtime_flags,
212 (unsigned long long) kinfo->spi_subport_uregbase,
213 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
214 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
186 } 215 }
187 216
188 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 217 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
@@ -199,20 +228,10 @@ static int ipath_get_base_info(struct file *fp,
199 228
200 if (master) { 229 if (master) {
201 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 230 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
202 kinfo->spi_subport_uregbase =
203 (u64) pd->subport_uregbase & MMAP64_MASK;
204 kinfo->spi_subport_rcvegrbuf =
205 (u64) pd->subport_rcvegrbuf & MMAP64_MASK;
206 kinfo->spi_subport_rcvhdr_base =
207 (u64) pd->subport_rcvhdr_base & MMAP64_MASK;
208 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
209 kinfo->spi_port, kinfo->spi_runtime_flags,
210 (unsigned long long) kinfo->spi_subport_uregbase,
211 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
212 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
213 } 231 }
214 232
215 if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) 233 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
234 if (copy_to_user(ubase, kinfo, sz))
216 ret = -EFAULT; 235 ret = -EFAULT;
217 236
218bail: 237bail:
@@ -1132,67 +1151,55 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1132 struct ipath_devdata *dd; 1151 struct ipath_devdata *dd;
1133 void *addr; 1152 void *addr;
1134 size_t size; 1153 size_t size;
1135 int ret; 1154 int ret = 0;
1136 1155
1137 /* If the port is not shared, all addresses should be physical */ 1156 /* If the port is not shared, all addresses should be physical */
1138 if (!pd->port_subport_cnt) { 1157 if (!pd->port_subport_cnt)
1139 ret = -EINVAL;
1140 goto bail; 1158 goto bail;
1141 }
1142 1159
1143 dd = pd->port_dd; 1160 dd = pd->port_dd;
1144 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1161 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1145 1162
1146 /* 1163 /*
1147 * Master has all the slave uregbase, rcvhdrq, and 1164 * Each process has all the subport uregbase, rcvhdrq, and
1148 * rcvegrbufs mmapped. 1165 * rcvegrbufs mmapped - as an array for all the processes,
1166 * and also separately for this process.
1149 */ 1167 */
1150 if (subport == 0) { 1168 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1151 unsigned num_slaves = pd->port_subport_cnt - 1; 1169 addr = pd->subport_uregbase;
1152 1170 size = PAGE_SIZE * pd->port_subport_cnt;
1153 if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) { 1171 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1154 addr = pd->subport_uregbase; 1172 addr = pd->subport_rcvhdr_base;
1155 size = PAGE_SIZE * num_slaves; 1173 size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
1156 } else if (pgaddr == ((u64) pd->subport_rcvhdr_base & 1174 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1157 MMAP64_MASK)) { 1175 addr = pd->subport_rcvegrbuf;
1158 addr = pd->subport_rcvhdr_base; 1176 size *= pd->port_subport_cnt;
1159 size = pd->port_rcvhdrq_size * num_slaves; 1177 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1160 } else if (pgaddr == ((u64) pd->subport_rcvegrbuf & 1178 PAGE_SIZE * subport)) {
1161 MMAP64_MASK)) { 1179 addr = pd->subport_uregbase + PAGE_SIZE * subport;
1162 addr = pd->subport_rcvegrbuf; 1180 size = PAGE_SIZE;
1163 size *= num_slaves; 1181 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1164 } else { 1182 pd->port_rcvhdrq_size * subport)) {
1165 ret = -EINVAL; 1183 addr = pd->subport_rcvhdr_base +
1166 goto bail; 1184 pd->port_rcvhdrq_size * subport;
1167 } 1185 size = pd->port_rcvhdrq_size;
1168 } else if (pgaddr == (((u64) pd->subport_uregbase + 1186 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1169 PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) { 1187 size * subport)) {
1170 addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1); 1188 addr = pd->subport_rcvegrbuf + size * subport;
1171 size = PAGE_SIZE; 1189 /* rcvegrbufs are read-only on the slave */
1172 } else if (pgaddr == (((u64) pd->subport_rcvhdr_base + 1190 if (vma->vm_flags & VM_WRITE) {
1173 pd->port_rcvhdrq_size * (subport - 1)) & 1191 dev_info(&dd->pcidev->dev,
1174 MMAP64_MASK)) { 1192 "Can't map eager buffers as "
1175 addr = pd->subport_rcvhdr_base + 1193 "writable (flags=%lx)\n", vma->vm_flags);
1176 pd->port_rcvhdrq_size * (subport - 1); 1194 ret = -EPERM;
1177 size = pd->port_rcvhdrq_size; 1195 goto bail;
1178 } else if (pgaddr == (((u64) pd->subport_rcvegrbuf + 1196 }
1179 size * (subport - 1)) & MMAP64_MASK)) { 1197 /*
1180 addr = pd->subport_rcvegrbuf + size * (subport - 1); 1198 * Don't allow permission to later change to writeable
1181 /* rcvegrbufs are read-only on the slave */ 1199 * with mprotect.
1182 if (vma->vm_flags & VM_WRITE) { 1200 */
1183 dev_info(&dd->pcidev->dev, 1201 vma->vm_flags &= ~VM_MAYWRITE;
1184 "Can't map eager buffers as "
1185 "writable (flags=%lx)\n", vma->vm_flags);
1186 ret = -EPERM;
1187 goto bail;
1188 }
1189 /*
1190 * Don't allow permission to later change to writeable
1191 * with mprotect.
1192 */
1193 vma->vm_flags &= ~VM_MAYWRITE;
1194 } else { 1202 } else {
1195 ret = -EINVAL;
1196 goto bail; 1203 goto bail;
1197 } 1204 }
1198 len = vma->vm_end - vma->vm_start; 1205 len = vma->vm_end - vma->vm_start;
@@ -1205,7 +1212,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1205 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1212 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1206 vma->vm_ops = &ipath_file_vm_ops; 1213 vma->vm_ops = &ipath_file_vm_ops;
1207 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1214 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1208 ret = 0; 1215 ret = 1;
1209 1216
1210bail: 1217bail:
1211 return ret; 1218 return ret;
@@ -1265,19 +1272,20 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1265 * Check for kernel virtual addresses first, anything else must 1272 * Check for kernel virtual addresses first, anything else must
1266 * match a HW or memory address. 1273 * match a HW or memory address.
1267 */ 1274 */
1268 if (pgaddr >= (1ULL<<40)) { 1275 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1269 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1276 if (ret) {
1277 if (ret > 0)
1278 ret = 0;
1270 goto bail; 1279 goto bail;
1271 } 1280 }
1272 1281
1282 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1273 if (!pd->port_subport_cnt) { 1283 if (!pd->port_subport_cnt) {
1274 /* port is not shared */ 1284 /* port is not shared */
1275 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1276 piocnt = dd->ipath_pbufsport; 1285 piocnt = dd->ipath_pbufsport;
1277 piobufs = pd->port_piobufs; 1286 piobufs = pd->port_piobufs;
1278 } else if (!subport_fp(fp)) { 1287 } else if (!subport_fp(fp)) {
1279 /* caller is the master */ 1288 /* caller is the master */
1280 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1281 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + 1289 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
1282 (dd->ipath_pbufsport % pd->port_subport_cnt); 1290 (dd->ipath_pbufsport % pd->port_subport_cnt);
1283 piobufs = pd->port_piobufs + 1291 piobufs = pd->port_piobufs +
@@ -1286,7 +1294,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1286 unsigned slave = subport_fp(fp) - 1; 1294 unsigned slave = subport_fp(fp) - 1;
1287 1295
1288 /* caller is a slave */ 1296 /* caller is a slave */
1289 ureg = 0;
1290 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; 1297 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
1291 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1298 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1292 } 1299 }
@@ -1300,9 +1307,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1300 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1307 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1301 (void *) dd->ipath_pioavailregs_dma, 1308 (void *) dd->ipath_pioavailregs_dma,
1302 "pioavail registers"); 1309 "pioavail registers");
1303 else if (subport_fp(fp))
1304 /* Subports don't mmap the physical receive buffers */
1305 ret = -EINVAL;
1306 else if (pgaddr == pd->port_rcvegr_phys) 1310 else if (pgaddr == pd->port_rcvegr_phys)
1307 ret = mmap_rcvegrbufs(vma, pd); 1311 ret = mmap_rcvegrbufs(vma, pd);
1308 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1312 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
@@ -1400,32 +1404,41 @@ static int init_subports(struct ipath_devdata *dd,
1400 const struct ipath_user_info *uinfo) 1404 const struct ipath_user_info *uinfo)
1401{ 1405{
1402 int ret = 0; 1406 int ret = 0;
1403 unsigned num_slaves; 1407 unsigned num_subports;
1404 size_t size; 1408 size_t size;
1405 1409
1406 /* Old user binaries don't know about subports */
1407 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
1408 goto bail;
1409 /* 1410 /*
1410 * If the user is requesting zero or one port, 1411 * If the user is requesting zero or one port,
1411 * skip the subport allocation. 1412 * skip the subport allocation.
1412 */ 1413 */
1413 if (uinfo->spu_subport_cnt <= 1) 1414 if (uinfo->spu_subport_cnt <= 1)
1414 goto bail; 1415 goto bail;
1415 if (uinfo->spu_subport_cnt > 4) { 1416
1417 /* Old user binaries don't know about new subport implementation */
1418 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) {
1419 dev_info(&dd->pcidev->dev,
1420 "Mismatched user minor version (%d) and driver "
1421 "minor version (%d) while port sharing. Ensure "
1422 "that driver and library are from the same "
1423 "release.\n",
1424 (int) (uinfo->spu_userversion & 0xffff),
1425 IPATH_USER_SWMINOR);
1426 goto bail;
1427 }
1428 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1416 ret = -EINVAL; 1429 ret = -EINVAL;
1417 goto bail; 1430 goto bail;
1418 } 1431 }
1419 1432
1420 num_slaves = uinfo->spu_subport_cnt - 1; 1433 num_subports = uinfo->spu_subport_cnt;
1421 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves); 1434 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
1422 if (!pd->subport_uregbase) { 1435 if (!pd->subport_uregbase) {
1423 ret = -ENOMEM; 1436 ret = -ENOMEM;
1424 goto bail; 1437 goto bail;
1425 } 1438 }
1426 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1439 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1427 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1440 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1428 sizeof(u32), PAGE_SIZE) * num_slaves; 1441 sizeof(u32), PAGE_SIZE) * num_subports;
1429 pd->subport_rcvhdr_base = vmalloc(size); 1442 pd->subport_rcvhdr_base = vmalloc(size);
1430 if (!pd->subport_rcvhdr_base) { 1443 if (!pd->subport_rcvhdr_base) {
1431 ret = -ENOMEM; 1444 ret = -ENOMEM;
@@ -1434,7 +1447,7 @@ static int init_subports(struct ipath_devdata *dd,
1434 1447
1435 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1448 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
1436 pd->port_rcvegrbuf_size * 1449 pd->port_rcvegrbuf_size *
1437 num_slaves); 1450 num_subports);
1438 if (!pd->subport_rcvegrbuf) { 1451 if (!pd->subport_rcvegrbuf) {
1439 ret = -ENOMEM; 1452 ret = -ENOMEM;
1440 goto bail_rhdr; 1453 goto bail_rhdr;
@@ -1443,6 +1456,12 @@ static int init_subports(struct ipath_devdata *dd,
1443 pd->port_subport_cnt = uinfo->spu_subport_cnt; 1456 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1444 pd->port_subport_id = uinfo->spu_subport_id; 1457 pd->port_subport_id = uinfo->spu_subport_id;
1445 pd->active_slaves = 1; 1458 pd->active_slaves = 1;
1459 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1460 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
1461 memset(pd->subport_rcvhdr_base, 0, size);
1462 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
1463 pd->port_rcvegrbuf_size *
1464 num_subports);
1446 goto bail; 1465 goto bail;
1447 1466
1448bail_rhdr: 1467bail_rhdr:
@@ -1573,18 +1592,19 @@ static int find_best_unit(struct file *fp,
1573 */ 1592 */
1574 if (!cpus_empty(current->cpus_allowed) && 1593 if (!cpus_empty(current->cpus_allowed) &&
1575 !cpus_full(current->cpus_allowed)) { 1594 !cpus_full(current->cpus_allowed)) {
1576 int ncpus = num_online_cpus(), curcpu = -1; 1595 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1577 for (i = 0; i < ncpus; i++) 1596 for (i = 0; i < ncpus; i++)
1578 if (cpu_isset(i, current->cpus_allowed)) { 1597 if (cpu_isset(i, current->cpus_allowed)) {
1579 ipath_cdbg(PROC, "%s[%u] affinity set for " 1598 ipath_cdbg(PROC, "%s[%u] affinity set for "
1580 "cpu %d\n", current->comm, 1599 "cpu %d/%d\n", current->comm,
1581 current->pid, i); 1600 current->pid, i, ncpus);
1582 curcpu = i; 1601 curcpu = i;
1602 nset++;
1583 } 1603 }
1584 if (curcpu != -1) { 1604 if (curcpu != -1 && nset != ncpus) {
1585 if (npresent) { 1605 if (npresent) {
1586 prefunit = curcpu / (ncpus / npresent); 1606 prefunit = curcpu / (ncpus / npresent);
1587 ipath_dbg("%s[%u] %d chips, %d cpus, " 1607 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
1588 "%d cpus/chip, select unit %d\n", 1608 "%d cpus/chip, select unit %d\n",
1589 current->comm, current->pid, 1609 current->comm, current->pid,
1590 npresent, ncpus, ncpus / npresent, 1610 npresent, ncpus, ncpus / npresent,
@@ -1764,11 +1784,17 @@ static int ipath_do_user_init(struct file *fp,
1764 const struct ipath_user_info *uinfo) 1784 const struct ipath_user_info *uinfo)
1765{ 1785{
1766 int ret; 1786 int ret;
1767 struct ipath_portdata *pd; 1787 struct ipath_portdata *pd = port_fp(fp);
1768 struct ipath_devdata *dd; 1788 struct ipath_devdata *dd;
1769 u32 head32; 1789 u32 head32;
1770 1790
1771 pd = port_fp(fp); 1791 /* Subports don't need to initialize anything since master did it. */
1792 if (subport_fp(fp)) {
1793 ret = wait_event_interruptible(pd->port_wait,
1794 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
1795 goto done;
1796 }
1797
1772 dd = pd->port_dd; 1798 dd = pd->port_dd;
1773 1799
1774 if (uinfo->spu_rcvhdrsize) { 1800 if (uinfo->spu_rcvhdrsize) {
@@ -1826,6 +1852,11 @@ static int ipath_do_user_init(struct file *fp,
1826 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); 1852 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
1827 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1853 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1828 dd->ipath_rcvctrl); 1854 dd->ipath_rcvctrl);
1855 /* Notify any waiting slaves */
1856 if (pd->port_subport_cnt) {
1857 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1858 wake_up(&pd->port_wait);
1859 }
1829done: 1860done:
1830 return ret; 1861 return ret;
1831} 1862}
@@ -2017,6 +2048,17 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2017 return ret; 2048 return ret;
2018} 2049}
2019 2050
2051static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
2052{
2053 u64 reg = dd->ipath_sendctrl;
2054
2055 clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
2056 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
2057 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2058
2059 return 0;
2060}
2061
2020static ssize_t ipath_write(struct file *fp, const char __user *data, 2062static ssize_t ipath_write(struct file *fp, const char __user *data,
2021 size_t count, loff_t *off) 2063 size_t count, loff_t *off)
2022{ 2064{
@@ -2071,27 +2113,35 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2071 dest = &cmd.cmd.part_key; 2113 dest = &cmd.cmd.part_key;
2072 src = &ucmd->cmd.part_key; 2114 src = &ucmd->cmd.part_key;
2073 break; 2115 break;
2074 case IPATH_CMD_SLAVE_INFO: 2116 case __IPATH_CMD_SLAVE_INFO:
2075 copy = sizeof(cmd.cmd.slave_mask_addr); 2117 copy = sizeof(cmd.cmd.slave_mask_addr);
2076 dest = &cmd.cmd.slave_mask_addr; 2118 dest = &cmd.cmd.slave_mask_addr;
2077 src = &ucmd->cmd.slave_mask_addr; 2119 src = &ucmd->cmd.slave_mask_addr;
2078 break; 2120 break;
2121 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
2122 copy = 0;
2123 src = NULL;
2124 dest = NULL;
2125 break;
2079 default: 2126 default:
2080 ret = -EINVAL; 2127 ret = -EINVAL;
2081 goto bail; 2128 goto bail;
2082 } 2129 }
2083 2130
2084 if ((count - consumed) < copy) { 2131 if (copy) {
2085 ret = -EINVAL; 2132 if ((count - consumed) < copy) {
2086 goto bail; 2133 ret = -EINVAL;
2087 } 2134 goto bail;
2135 }
2088 2136
2089 if (copy_from_user(dest, src, copy)) { 2137 if (copy_from_user(dest, src, copy)) {
2090 ret = -EFAULT; 2138 ret = -EFAULT;
2091 goto bail; 2139 goto bail;
2140 }
2141
2142 consumed += copy;
2092 } 2143 }
2093 2144
2094 consumed += copy;
2095 pd = port_fp(fp); 2145 pd = port_fp(fp);
2096 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && 2146 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2097 cmd.type != IPATH_CMD_ASSIGN_PORT) { 2147 cmd.type != IPATH_CMD_ASSIGN_PORT) {
@@ -2137,11 +2187,14 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2137 case IPATH_CMD_SET_PART_KEY: 2187 case IPATH_CMD_SET_PART_KEY:
2138 ret = ipath_set_part_key(pd, cmd.cmd.part_key); 2188 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2139 break; 2189 break;
2140 case IPATH_CMD_SLAVE_INFO: 2190 case __IPATH_CMD_SLAVE_INFO:
2141 ret = ipath_get_slave_info(pd, 2191 ret = ipath_get_slave_info(pd,
2142 (void __user *) (unsigned long) 2192 (void __user *) (unsigned long)
2143 cmd.cmd.slave_mask_addr); 2193 cmd.cmd.slave_mask_addr);
2144 break; 2194 break;
2195 case IPATH_CMD_PIOAVAILUPD:
2196 ret = ipath_force_pio_avail_update(pd->port_dd);
2197 break;
2145 } 2198 }
2146 2199
2147 if (ret >= 0) 2200 if (ret >= 0)
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 993482545021..4171198fc202 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -43,6 +43,9 @@
43#include "ipath_kernel.h" 43#include "ipath_kernel.h"
44#include "ipath_registers.h" 44#include "ipath_registers.h"
45 45
46static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
47
48
46/* 49/*
47 * This lists the InfiniPath registers, in the actual chip layout. 50 * This lists the InfiniPath registers, in the actual chip layout.
48 * This structure should never be directly accessed. 51 * This structure should never be directly accessed.
@@ -208,8 +211,8 @@ static const struct ipath_kregs ipath_ht_kregs = {
208 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), 211 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
209 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), 212 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
210 /* 213 /*
211 * These should not be used directly via ipath_read_kreg64(), 214 * These should not be used directly via ipath_write_kreg64(),
212 * use them with ipath_read_kreg64_port(), 215 * use them with ipath_write_kreg64_port(),
213 */ 216 */
214 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), 217 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
215 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0) 218 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
@@ -284,6 +287,14 @@ static const struct ipath_cregs ipath_ht_cregs = {
284#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 287#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
285#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000 288#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
286 289
290
291/* TID entries (memory), HT-only */
292#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
293#define INFINIPATH_RT_VALID 0x8000000000000000ULL
294#define INFINIPATH_RT_ADDR_SHIFT 0
295#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
296#define INFINIPATH_RT_BUFSIZE_SHIFT 48
297
287/* 298/*
288 * masks and bits that are different in different chips, or present only 299 * masks and bits that are different in different chips, or present only
289 * in one 300 * in one
@@ -402,6 +413,14 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
402 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), 413 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
403}; 414};
404 415
416#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
417 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
418 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
419#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
420 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
421
422static int ipath_ht_txe_recover(struct ipath_devdata *);
423
405/** 424/**
406 * ipath_ht_handle_hwerrors - display hardware errors. 425 * ipath_ht_handle_hwerrors - display hardware errors.
407 * @dd: the infinipath device 426 * @dd: the infinipath device
@@ -450,13 +469,12 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
450 469
451 /* 470 /*
452 * make sure we get this much out, unless told to be quiet, 471 * make sure we get this much out, unless told to be quiet,
472 * it's a parity error we may recover from,
453 * or it's occurred within the last 5 seconds 473 * or it's occurred within the last 5 seconds
454 */ 474 */
455 if ((hwerrs & ~(dd->ipath_lasthwerror | 475 if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
456 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 476 RXE_EAGER_PARITY)) ||
457 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 477 (ipath_debug & __IPATH_VERBDBG))
458 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
459 (ipath_debug & __IPATH_VERBDBG))
460 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " 478 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
461 "(cleared)\n", (unsigned long long) hwerrs); 479 "(cleared)\n", (unsigned long long) hwerrs);
462 dd->ipath_lasthwerror |= hwerrs; 480 dd->ipath_lasthwerror |= hwerrs;
@@ -467,7 +485,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
467 (hwerrs & ~dd->ipath_hwe_bitsextant)); 485 (hwerrs & ~dd->ipath_hwe_bitsextant));
468 486
469 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); 487 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
470 if (ctrl & INFINIPATH_C_FREEZEMODE) { 488 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
471 /* 489 /*
472 * parity errors in send memory are recoverable, 490 * parity errors in send memory are recoverable,
473 * just cancel the send (if indicated in * sendbuffererror), 491 * just cancel the send (if indicated in * sendbuffererror),
@@ -476,50 +494,14 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
476 * occur if a processor speculative read is done to the PIO 494 * occur if a processor speculative read is done to the PIO
477 * buffer while we are sending a packet, for example. 495 * buffer while we are sending a packet, for example.
478 */ 496 */
479 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 497 if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd))
480 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 498 hwerrs &= ~TXE_PIO_PARITY;
481 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { 499 if (hwerrs & RXE_EAGER_PARITY)
482 ipath_stats.sps_txeparity++; 500 ipath_dev_err(dd, "RXE parity, Eager TID error is not "
483 ipath_dbg("Recovering from TXE parity error (%llu), " 501 "recoverable\n");
484 "hwerrstatus=%llx\n", 502 if (!hwerrs) {
485 (unsigned long long) ipath_stats.sps_txeparity, 503 ipath_dbg("Clearing freezemode on ignored or "
486 (unsigned long long) hwerrs); 504 "recovered hardware error\n");
487 ipath_disarm_senderrbufs(dd);
488 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
489 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
490 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
491 if (!hwerrs) { /* else leave in freeze mode */
492 ipath_write_kreg(dd,
493 dd->ipath_kregs->kr_control,
494 dd->ipath_control);
495 return;
496 }
497 }
498 if (hwerrs) {
499 /*
500 * if any set that we aren't ignoring; only
501 * make the complaint once, in case it's stuck
502 * or recurring, and we get here multiple
503 * times.
504 */
505 if (dd->ipath_flags & IPATH_INITTED) {
506 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
507 "mode), no longer usable, SN %.16s\n",
508 dd->ipath_serial);
509 isfatal = 1;
510 }
511 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
512 /* mark as having had error */
513 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
514 /*
515 * mark as not usable, at a minimum until driver
516 * is reloaded, probably until reboot, since no
517 * other reset is possible.
518 */
519 dd->ipath_flags &= ~IPATH_INITTED;
520 } else {
521 ipath_dbg("Clearing freezemode on ignored hardware "
522 "error\n");
523 ctrl &= ~INFINIPATH_C_FREEZEMODE; 505 ctrl &= ~INFINIPATH_C_FREEZEMODE;
524 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 506 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
525 ctrl); 507 ctrl);
@@ -587,7 +569,39 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
587 dd->ipath_hwerrmask); 569 dd->ipath_hwerrmask);
588 } 570 }
589 571
590 ipath_dev_err(dd, "%s hardware error\n", msg); 572 if (hwerrs) {
573 /*
574 * if any set that we aren't ignoring; only
575 * make the complaint once, in case it's stuck
576 * or recurring, and we get here multiple
577 * times.
578 * force link down, so switch knows, and
579 * LEDs are turned off
580 */
581 if (dd->ipath_flags & IPATH_INITTED) {
582 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
583 ipath_setup_ht_setextled(dd,
584 INFINIPATH_IBCS_L_STATE_DOWN,
585 INFINIPATH_IBCS_LT_STATE_DISABLED);
586 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
587 "mode), no longer usable, SN %.16s\n",
588 dd->ipath_serial);
589 isfatal = 1;
590 }
591 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
592 /* mark as having had error */
593 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
594 /*
595 * mark as not usable, at a minimum until driver
596 * is reloaded, probably until reboot, since no
597 * other reset is possible.
598 */
599 dd->ipath_flags &= ~IPATH_INITTED;
600 }
601 else
602 *msg = 0; /* recovered from all of them */
603 if (*msg)
604 ipath_dev_err(dd, "%s hardware error\n", msg);
591 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) 605 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
592 /* 606 /*
593 * for status file; if no trailing brace is copied, 607 * for status file; if no trailing brace is copied,
@@ -658,7 +672,8 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
658 if (n) 672 if (n)
659 snprintf(name, namelen, "%s", n); 673 snprintf(name, namelen, "%s", n);
660 674
661 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { 675 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
676 dd->ipath_minrev > 3)) {
662 /* 677 /*
663 * This version of the driver only supports Rev 3.2 and 3.3 678 * This version of the driver only supports Rev 3.2 and 3.3
664 */ 679 */
@@ -1163,6 +1178,8 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
1163 1178
1164 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) 1179 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
1165 ipath_dev_err(dd, "MemBIST did not complete!\n"); 1180 ipath_dev_err(dd, "MemBIST did not complete!\n");
1181 if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
1182 ipath_dbg("MemBIST corrected\n");
1166 1183
1167 ipath_check_htlink(dd); 1184 ipath_check_htlink(dd);
1168 1185
@@ -1366,6 +1383,9 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
1366 u64 __iomem *tidptr, u32 type, 1383 u64 __iomem *tidptr, u32 type,
1367 unsigned long pa) 1384 unsigned long pa)
1368{ 1385{
1386 if (!dd->ipath_kregbase)
1387 return;
1388
1369 if (pa != dd->ipath_tidinvalid) { 1389 if (pa != dd->ipath_tidinvalid) {
1370 if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) { 1390 if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
1371 dev_info(&dd->pcidev->dev, 1391 dev_info(&dd->pcidev->dev,
@@ -1382,10 +1402,10 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
1382 pa |= lenvalid | INFINIPATH_RT_VALID; 1402 pa |= lenvalid | INFINIPATH_RT_VALID;
1383 } 1403 }
1384 } 1404 }
1385 if (dd->ipath_kregbase) 1405 writeq(pa, tidptr);
1386 writeq(pa, tidptr);
1387} 1406}
1388 1407
1408
1389/** 1409/**
1390 * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager 1410 * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
1391 * @dd: the infinipath device 1411 * @dd: the infinipath device
@@ -1515,7 +1535,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1515 INFINIPATH_S_ABORT); 1535 INFINIPATH_S_ABORT);
1516 1536
1517 ipath_get_eeprom_info(dd); 1537 ipath_get_eeprom_info(dd);
1518 if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1538 if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
1519 dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { 1539 dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
1520 /* 1540 /*
1521 * Later production QHT7040 has same changes as QHT7140, so 1541 * Later production QHT7040 has same changes as QHT7140, so
@@ -1528,6 +1548,24 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1528 return 0; 1548 return 0;
1529} 1549}
1530 1550
1551
1552static int ipath_ht_txe_recover(struct ipath_devdata *dd)
1553{
1554 int cnt = ++ipath_stats.sps_txeparity;
1555 if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
1556 if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
1557 ipath_dev_err(dd,
1558 "Too many attempts to recover from "
1559 "TXE parity, giving up\n");
1560 return 0;
1561 }
1562 dev_info(&dd->pcidev->dev,
1563 "Recovering from TXE PIO parity error\n");
1564 ipath_disarm_senderrbufs(dd, 1);
1565 return 1;
1566}
1567
1568
1531/** 1569/**
1532 * ipath_init_ht_get_base_info - set chip-specific flags for user code 1570 * ipath_init_ht_get_base_info - set chip-specific flags for user code
1533 * @dd: the infinipath device 1571 * @dd: the infinipath device
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 05918e1e7c36..1b9c30857754 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -43,6 +43,8 @@
43#include "ipath_kernel.h" 43#include "ipath_kernel.h"
44#include "ipath_registers.h" 44#include "ipath_registers.h"
45 45
46static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
47
46/* 48/*
47 * This file contains all the chip-specific register information and 49 * This file contains all the chip-specific register information and
48 * access functions for the QLogic InfiniPath PCI-Express chip. 50 * access functions for the QLogic InfiniPath PCI-Express chip.
@@ -207,8 +209,8 @@ static const struct ipath_kregs ipath_pe_kregs = {
207 .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), 209 .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
208 210
209 /* 211 /*
210 * These should not be used directly via ipath_read_kreg64(), 212 * These should not be used directly via ipath_write_kreg64(),
211 * use them with ipath_read_kreg64_port() 213 * use them with ipath_write_kreg64_port(),
212 */ 214 */
213 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), 215 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
214 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), 216 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
@@ -321,6 +323,12 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
321 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), 323 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
322}; 324};
323 325
326#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
327 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
328 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
329
330static int ipath_pe_txe_recover(struct ipath_devdata *);
331
324/** 332/**
325 * ipath_pe_handle_hwerrors - display hardware errors. 333 * ipath_pe_handle_hwerrors - display hardware errors.
326 * @dd: the infinipath device 334 * @dd: the infinipath device
@@ -394,32 +402,21 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
394 * occur if a processor speculative read is done to the PIO 402 * occur if a processor speculative read is done to the PIO
395 * buffer while we are sending a packet, for example. 403 * buffer while we are sending a packet, for example.
396 */ 404 */
397 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 405 if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd))
398 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) 406 hwerrs &= ~TXE_PIO_PARITY;
399 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
400 ipath_stats.sps_txeparity++;
401 ipath_dbg("Recovering from TXE parity error (%llu), "
402 "hwerrstatus=%llx\n",
403 (unsigned long long) ipath_stats.sps_txeparity,
404 (unsigned long long) hwerrs);
405 ipath_disarm_senderrbufs(dd);
406 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
407 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
408 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
409 if (!hwerrs) { /* else leave in freeze mode */
410 ipath_write_kreg(dd,
411 dd->ipath_kregs->kr_control,
412 dd->ipath_control);
413 return;
414 }
415 }
416 if (hwerrs) { 407 if (hwerrs) {
417 /* 408 /*
418 * if any set that we aren't ignoring only make the 409 * if any set that we aren't ignoring only make the
419 * complaint once, in case it's stuck or recurring, 410 * complaint once, in case it's stuck or recurring,
420 * and we get here multiple times 411 * and we get here multiple times
412 * Force link down, so switch knows, and
413 * LEDs are turned off
421 */ 414 */
422 if (dd->ipath_flags & IPATH_INITTED) { 415 if (dd->ipath_flags & IPATH_INITTED) {
416 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
417 ipath_setup_pe_setextled(dd,
418 INFINIPATH_IBCS_L_STATE_DOWN,
419 INFINIPATH_IBCS_LT_STATE_DISABLED);
423 ipath_dev_err(dd, "Fatal Hardware Error (freeze " 420 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
424 "mode), no longer usable, SN %.16s\n", 421 "mode), no longer usable, SN %.16s\n",
425 dd->ipath_serial); 422 dd->ipath_serial);
@@ -493,7 +490,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
493 dd->ipath_hwerrmask); 490 dd->ipath_hwerrmask);
494 } 491 }
495 492
496 ipath_dev_err(dd, "%s hardware error\n", msg); 493 if (*msg)
494 ipath_dev_err(dd, "%s hardware error\n", msg);
497 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { 495 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
498 /* 496 /*
499 * for /sys status file ; if no trailing } is copied, we'll 497 * for /sys status file ; if no trailing } is copied, we'll
@@ -581,6 +579,8 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
581 579
582 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) 580 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
583 ipath_dev_err(dd, "MemBIST did not complete!\n"); 581 ipath_dev_err(dd, "MemBIST did not complete!\n");
582 if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
583 ipath_dbg("MemBIST corrected\n");
584 584
585 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ 585 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
586 586
@@ -1330,6 +1330,35 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd)
1330 dd->ipath_irq = 0; 1330 dd->ipath_irq = 0;
1331} 1331}
1332 1332
1333/*
1334 * On platforms using this chip, and not having ordered WC stores, we
1335 * can get TXE parity errors due to speculative reads to the PIO buffers,
1336 * and this, due to a chip bug can result in (many) false parity error
1337 * reports. So it's a debug print on those, and an info print on systems
1338 * where the speculative reads don't occur.
1339 * Because we can get lots of false errors, we have no upper limit
1340 * on recovery attempts on those platforms.
1341 */
1342static int ipath_pe_txe_recover(struct ipath_devdata *dd)
1343{
1344 if (ipath_unordered_wc())
1345 ipath_dbg("Recovering from TXE PIO parity error\n");
1346 else {
1347 int cnt = ++ipath_stats.sps_txeparity;
1348 if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
1349 if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
1350 ipath_dev_err(dd,
1351 "Too many attempts to recover from "
1352 "TXE parity, giving up\n");
1353 return 0;
1354 }
1355 dev_info(&dd->pcidev->dev,
1356 "Recovering from TXE PIO parity error\n");
1357 }
1358 ipath_disarm_senderrbufs(dd, 1);
1359 return 1;
1360}
1361
1333/** 1362/**
1334 * ipath_init_iba6120_funcs - set up the chip-specific function pointers 1363 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1335 * @dd: the infinipath device 1364 * @dd: the infinipath device
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d4f6b5239ef8..7045ba689494 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -216,6 +216,20 @@ static int bringup_link(struct ipath_devdata *dd)
216 return ret; 216 return ret;
217} 217}
218 218
219static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
220{
221 struct ipath_portdata *pd = NULL;
222
223 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
224 if (pd) {
225 pd->port_dd = dd;
226 pd->port_cnt = 1;
227 /* The port 0 pkey table is used by the layer interface. */
228 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
229 }
230 return pd;
231}
232
219static int init_chip_first(struct ipath_devdata *dd, 233static int init_chip_first(struct ipath_devdata *dd,
220 struct ipath_portdata **pdp) 234 struct ipath_portdata **pdp)
221{ 235{
@@ -271,20 +285,16 @@ static int init_chip_first(struct ipath_devdata *dd,
271 goto done; 285 goto done;
272 } 286 }
273 287
274 dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL); 288 pd = create_portdata0(dd);
275 289
276 if (!dd->ipath_pd[0]) { 290 if (!pd) {
277 ipath_dev_err(dd, "Unable to allocate portdata for port " 291 ipath_dev_err(dd, "Unable to allocate portdata for port "
278 "0, failing\n"); 292 "0, failing\n");
279 ret = -ENOMEM; 293 ret = -ENOMEM;
280 goto done; 294 goto done;
281 } 295 }
282 pd = dd->ipath_pd[0]; 296 dd->ipath_pd[0] = pd;
283 pd->port_dd = dd; 297
284 pd->port_port = 0;
285 pd->port_cnt = 1;
286 /* The port 0 pkey table is used by the layer interface. */
287 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
288 dd->ipath_rcvtidcnt = 298 dd->ipath_rcvtidcnt =
289 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); 299 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
290 dd->ipath_rcvtidbase = 300 dd->ipath_rcvtidbase =
@@ -590,6 +600,10 @@ static int init_housekeeping(struct ipath_devdata *dd,
590 goto done; 600 goto done;
591 } 601 }
592 602
603
604 /* clear diagctrl register, in case diags were running and crashed */
605 ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
606
593 /* clear the initial reset flag, in case first driver load */ 607 /* clear the initial reset flag, in case first driver load */
594 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 608 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
595 INFINIPATH_E_RESET); 609 INFINIPATH_E_RESET);
@@ -668,6 +682,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
668{ 682{
669 int ret = 0, i; 683 int ret = 0, i;
670 u32 val32, kpiobufs; 684 u32 val32, kpiobufs;
685 u32 piobufs, uports;
671 u64 val; 686 u64 val;
672 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ 687 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
673 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 688 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
@@ -702,16 +717,17 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
702 * the in memory DMA'ed copies of the registers. This has to 717 * the in memory DMA'ed copies of the registers. This has to
703 * be done early, before we calculate lastport, etc. 718 * be done early, before we calculate lastport, etc.
704 */ 719 */
705 val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; 720 piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
706 /* 721 /*
707 * calc number of pioavail registers, and save it; we have 2 722 * calc number of pioavail registers, and save it; we have 2
708 * bits per buffer. 723 * bits per buffer.
709 */ 724 */
710 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) 725 dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
711 / (sizeof(u64) * BITS_PER_BYTE / 2); 726 / (sizeof(u64) * BITS_PER_BYTE / 2);
727 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
712 if (ipath_kpiobufs == 0) { 728 if (ipath_kpiobufs == 0) {
713 /* not set by user (this is default) */ 729 /* not set by user (this is default) */
714 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) 730 if (piobufs >= (uports * IPATH_MIN_USER_PORT_BUFCNT) + 32)
715 kpiobufs = 32; 731 kpiobufs = 32;
716 else 732 else
717 kpiobufs = 16; 733 kpiobufs = 16;
@@ -719,31 +735,25 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
719 else 735 else
720 kpiobufs = ipath_kpiobufs; 736 kpiobufs = ipath_kpiobufs;
721 737
722 if (kpiobufs > 738 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
723 (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - 739 i = (int) piobufs -
724 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) { 740 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
725 i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
726 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT);
727 if (i < 0) 741 if (i < 0)
728 i = 0; 742 i = 0;
729 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for " 743 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
730 "kernel leaves too few for %d user ports " 744 "%d for kernel leaves too few for %d user ports "
731 "(%d each); using %u\n", kpiobufs, 745 "(%d each); using %u\n", kpiobufs,
732 dd->ipath_cfgports - 1, 746 piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
733 IPATH_MIN_USER_PORT_BUFCNT, i);
734 /* 747 /*
735 * shouldn't change ipath_kpiobufs, because could be 748 * shouldn't change ipath_kpiobufs, because could be
736 * different for different devices... 749 * different for different devices...
737 */ 750 */
738 kpiobufs = i; 751 kpiobufs = i;
739 } 752 }
740 dd->ipath_lastport_piobuf = 753 dd->ipath_lastport_piobuf = piobufs - kpiobufs;
741 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs; 754 dd->ipath_pbufsport =
742 dd->ipath_pbufsport = dd->ipath_cfgports > 1 755 uports ? dd->ipath_lastport_piobuf / uports : 0;
743 ? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1) 756 val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports);
744 : 0;
745 val32 = dd->ipath_lastport_piobuf -
746 (dd->ipath_pbufsport * (dd->ipath_cfgports - 1));
747 if (val32 > 0) { 757 if (val32 > 0) {
748 ipath_dbg("allocating %u pbufs/port leaves %u unused, " 758 ipath_dbg("allocating %u pbufs/port leaves %u unused, "
749 "add to kernel\n", dd->ipath_pbufsport, val32); 759 "add to kernel\n", dd->ipath_pbufsport, val32);
@@ -754,8 +764,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
754 dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; 764 dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
755 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " 765 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
756 "each for %u user ports\n", kpiobufs, 766 "each for %u user ports\n", kpiobufs,
757 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, 767 piobufs, dd->ipath_pbufsport, uports);
758 dd->ipath_pbufsport, dd->ipath_cfgports - 1);
759 768
760 dd->ipath_f_early_init(dd); 769 dd->ipath_f_early_init(dd);
761 770
@@ -839,11 +848,24 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
839 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing 848 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
840 * re-init, the simplest way to handle this is to free 849 * re-init, the simplest way to handle this is to free
841 * existing, and re-allocate. 850 * existing, and re-allocate.
851 * Need to re-create rest of port 0 portdata as well.
842 */ 852 */
843 if (reinit) { 853 if (reinit) {
844 struct ipath_portdata *pd = dd->ipath_pd[0]; 854 /* Alloc and init new ipath_portdata for port0,
845 dd->ipath_pd[0] = NULL; 855 * Then free old pd. Could lead to fragmentation, but also
846 ipath_free_pddata(dd, pd); 856 * makes later support for hot-swap easier.
857 */
858 struct ipath_portdata *npd;
859 npd = create_portdata0(dd);
860 if (npd) {
861 ipath_free_pddata(dd, pd);
862 dd->ipath_pd[0] = pd = npd;
863 } else {
864 ipath_dev_err(dd, "Unable to allocate portdata for"
865 " port 0, failing\n");
866 ret = -ENOMEM;
867 goto done;
868 }
847 } 869 }
848 dd->ipath_f_tidtemplate(dd); 870 dd->ipath_f_tidtemplate(dd);
849 ret = ipath_create_rcvhdrq(dd, pd); 871 ret = ipath_create_rcvhdrq(dd, pd);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 72b9e279d19d..45d033169c6e 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -38,10 +38,39 @@
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40/* 40/*
41 * clear (write) a pio buffer, to clear a parity error. This routine
42 * should only be called when in freeze mode, and the buffer should be
43 * canceled afterwards.
44 */
45static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
46{
47 u32 __iomem *pbuf;
48 u32 dwcnt; /* dword count to write */
49 if (pnum < dd->ipath_piobcnt2k) {
50 pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
51 dd->ipath_palign);
52 dwcnt = dd->ipath_piosize2k >> 2;
53 }
54 else {
55 pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
56 (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
57 dwcnt = dd->ipath_piosize4k >> 2;
58 }
59 dev_info(&dd->pcidev->dev,
60 "Rewrite PIO buffer %u, to recover from parity error\n",
61 pnum);
62 *pbuf = dwcnt+1; /* no flush required, since already in freeze */
63 while(--dwcnt)
64 *pbuf++ = 0;
65}
66
67/*
41 * Called when we might have an error that is specific to a particular 68 * Called when we might have an error that is specific to a particular
42 * PIO buffer, and may need to cancel that buffer, so it can be re-used. 69 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
70 * If rewrite is true, and bits are set in the sendbufferror registers,
71 * we'll write to the buffer, for error recovery on parity errors.
43 */ 72 */
44void ipath_disarm_senderrbufs(struct ipath_devdata *dd) 73void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
45{ 74{
46 u32 piobcnt; 75 u32 piobcnt;
47 unsigned long sbuf[4]; 76 unsigned long sbuf[4];
@@ -74,8 +103,11 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
74 } 103 }
75 104
76 for (i = 0; i < piobcnt; i++) 105 for (i = 0; i < piobcnt; i++)
77 if (test_bit(i, sbuf)) 106 if (test_bit(i, sbuf)) {
107 if (rewrite)
108 ipath_clrpiobuf(dd, i);
78 ipath_disarm_piobufs(dd, i, 1); 109 ipath_disarm_piobufs(dd, i, 1);
110 }
79 dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */ 111 dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */
80 } 112 }
81} 113}
@@ -114,7 +146,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
114{ 146{
115 u64 ignore_this_time = 0; 147 u64 ignore_this_time = 0;
116 148
117 ipath_disarm_senderrbufs(dd); 149 ipath_disarm_senderrbufs(dd, 0);
118 if ((errs & E_SUM_LINK_PKTERRS) && 150 if ((errs & E_SUM_LINK_PKTERRS) &&
119 !(dd->ipath_flags & IPATH_LINKACTIVE)) { 151 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
120 /* 152 /*
@@ -403,10 +435,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
403 * happens so often we never want to count it. 435 * happens so often we never want to count it.
404 */ 436 */
405 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { 437 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
406 ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror & 438 int iserr;
407 ~INFINIPATH_E_IBSTATUSCHANGED); 439 iserr = ipath_decode_err(msg, sizeof msg,
440 dd->ipath_lasterror &
441 ~INFINIPATH_E_IBSTATUSCHANGED);
408 if (dd->ipath_lasterror & 442 if (dd->ipath_lasterror &
409 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) 443 ~(INFINIPATH_E_RRCVEGRFULL |
444 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
410 ipath_dev_err(dd, "Suppressed %u messages for " 445 ipath_dev_err(dd, "Suppressed %u messages for "
411 "fast-repeating errors (%s) (%llx)\n", 446 "fast-repeating errors (%s) (%llx)\n",
412 supp_msgs, msg, 447 supp_msgs, msg,
@@ -420,8 +455,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
420 * them. So only complain about these at debug 455 * them. So only complain about these at debug
421 * level. 456 * level.
422 */ 457 */
423 ipath_dbg("Suppressed %u messages for %s\n", 458 if (iserr)
424 supp_msgs, msg); 459 ipath_dbg("Suppressed %u messages for %s\n",
460 supp_msgs, msg);
461 else
462 ipath_cdbg(ERRPKT,
463 "Suppressed %u messages for %s\n",
464 supp_msgs, msg);
425 } 465 }
426 } 466 }
427} 467}
@@ -462,7 +502,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
462{ 502{
463 char msg[512]; 503 char msg[512];
464 u64 ignore_this_time = 0; 504 u64 ignore_this_time = 0;
465 int i; 505 int i, iserr = 0;
466 int chkerrpkts = 0, noprint = 0; 506 int chkerrpkts = 0, noprint = 0;
467 unsigned supp_msgs; 507 unsigned supp_msgs;
468 508
@@ -502,6 +542,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
502 } 542 }
503 543
504 if (supp_msgs == 250000) { 544 if (supp_msgs == 250000) {
545 int s_iserr;
505 /* 546 /*
506 * It's not entirely reasonable assuming that the errors set 547 * It's not entirely reasonable assuming that the errors set
507 * in the last clear period are all responsible for the 548 * in the last clear period are all responsible for the
@@ -511,17 +552,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
511 dd->ipath_maskederrs |= dd->ipath_lasterror | errs; 552 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
512 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 553 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
513 ~dd->ipath_maskederrs); 554 ~dd->ipath_maskederrs);
514 ipath_decode_err(msg, sizeof msg, 555 s_iserr = ipath_decode_err(msg, sizeof msg,
515 (dd->ipath_maskederrs & ~dd-> 556 (dd->ipath_maskederrs & ~dd->
516 ipath_ignorederrs)); 557 ipath_ignorederrs));
517 558
518 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & 559 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
519 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) 560 ~(INFINIPATH_E_RRCVEGRFULL |
520 ipath_dev_err(dd, "Disabling error(s) %llx because " 561 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
521 "occurring too frequently (%s)\n", 562 ipath_dev_err(dd, "Temporarily disabling "
522 (unsigned long long) 563 "error(s) %llx reporting; too frequent (%s)\n",
523 (dd->ipath_maskederrs & 564 (unsigned long long) (dd->ipath_maskederrs &
524 ~dd->ipath_ignorederrs), msg); 565 ~dd->ipath_ignorederrs), msg);
525 else { 566 else {
526 /* 567 /*
527 * rcvegrfull and rcvhdrqfull are "normal", 568 * rcvegrfull and rcvhdrqfull are "normal",
@@ -530,8 +571,15 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
530 * processing them. So only complain about 571 * processing them. So only complain about
531 * these at debug level. 572 * these at debug level.
532 */ 573 */
533 ipath_dbg("Disabling frequent queue full errors " 574 if (s_iserr)
534 "(%s)\n", msg); 575 ipath_dbg("Temporarily disabling reporting "
576 "too frequent queue full errors (%s)\n",
577 msg);
578 else
579 ipath_cdbg(ERRPKT,
580 "Temporarily disabling reporting too"
581 " frequent packet errors (%s)\n",
582 msg);
535 } 583 }
536 584
537 /* 585 /*
@@ -589,6 +637,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
589 ipath_stats.sps_crcerrs++; 637 ipath_stats.sps_crcerrs++;
590 chkerrpkts = 1; 638 chkerrpkts = 1;
591 } 639 }
640 iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
641
592 642
593 /* 643 /*
594 * We don't want to print these two as they happen, or we can make 644 * We don't want to print these two as they happen, or we can make
@@ -677,8 +727,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
677 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF; 727 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
678 } 728 }
679 729
680 if (!noprint && *msg) 730 if (!noprint && *msg) {
681 ipath_dev_err(dd, "%s error\n", msg); 731 if (iserr)
732 ipath_dev_err(dd, "%s error\n", msg);
733 else
734 dev_info(&dd->pcidev->dev, "%s packet problems\n",
735 msg);
736 }
682 if (dd->ipath_state_wanted & dd->ipath_flags) { 737 if (dd->ipath_state_wanted & dd->ipath_flags) {
683 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " 738 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
684 "waking\n", dd->ipath_state_wanted, 739 "waking\n", dd->ipath_state_wanted,
@@ -819,11 +874,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
819 struct ipath_portdata *pd = dd->ipath_pd[i]; 874 struct ipath_portdata *pd = dd->ipath_pd[i];
820 if (portr & (1 << i) && pd && pd->port_cnt && 875 if (portr & (1 << i) && pd && pd->port_cnt &&
821 test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { 876 test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
822 int rcbit;
823 clear_bit(IPATH_PORT_WAITING_RCV, 877 clear_bit(IPATH_PORT_WAITING_RCV,
824 &pd->port_flag); 878 &pd->port_flag);
825 rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT; 879 clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
826 clear_bit(1UL << rcbit, &dd->ipath_rcvctrl); 880 &dd->ipath_rcvctrl);
827 wake_up_interruptible(&pd->port_wait); 881 wake_up_interruptible(&pd->port_wait);
828 rcvdint = 1; 882 rcvdint = 1;
829 } 883 }
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 6d8d05fb5999..e900c2593f44 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -590,7 +590,6 @@ int ipath_enable_wc(struct ipath_devdata *dd);
590void ipath_disable_wc(struct ipath_devdata *dd); 590void ipath_disable_wc(struct ipath_devdata *dd);
591int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); 591int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
592void ipath_shutdown_device(struct ipath_devdata *); 592void ipath_shutdown_device(struct ipath_devdata *);
593void ipath_disarm_senderrbufs(struct ipath_devdata *);
594 593
595struct file_operations; 594struct file_operations;
596int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, 595int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
@@ -611,7 +610,7 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
611extern int ipath_diag_inuse; 610extern int ipath_diag_inuse;
612 611
613irqreturn_t ipath_intr(int irq, void *devid); 612irqreturn_t ipath_intr(int irq, void *devid);
614void ipath_decode_err(char *buf, size_t blen, ipath_err_t err); 613int ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
615#if __IPATH_INFO || __IPATH_DBG 614#if __IPATH_INFO || __IPATH_DBG
616extern const char *ipath_ibcstatus_str[]; 615extern const char *ipath_ibcstatus_str[];
617#endif 616#endif
@@ -701,6 +700,8 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
701#define IPATH_PORT_WAITING_RCV 2 700#define IPATH_PORT_WAITING_RCV 2
702 /* waiting for a PIO buffer to be available */ 701 /* waiting for a PIO buffer to be available */
703#define IPATH_PORT_WAITING_PIO 3 702#define IPATH_PORT_WAITING_PIO 3
703 /* master has not finished initializing */
704#define IPATH_PORT_MASTER_UNINIT 4
704 705
705/* free up any allocated data at closes */ 706/* free up any allocated data at closes */
706void ipath_free_data(struct ipath_portdata *dd); 707void ipath_free_data(struct ipath_portdata *dd);
@@ -711,6 +712,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *);
711void ipath_init_iba6110_funcs(struct ipath_devdata *); 712void ipath_init_iba6110_funcs(struct ipath_devdata *);
712void ipath_get_eeprom_info(struct ipath_devdata *); 713void ipath_get_eeprom_info(struct ipath_devdata *);
713u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 714u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
715void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
714 716
715/* 717/*
716 * number of words used for protocol header if not set by ipath_userinit(); 718 * number of words used for protocol header if not set by ipath_userinit();
@@ -754,8 +756,6 @@ int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
754/* these are used for the registers that vary with port */ 756/* these are used for the registers that vary with port */
755void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg, 757void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
756 unsigned, u64); 758 unsigned, u64);
757u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
758 unsigned);
759 759
760/* 760/*
761 * We could have a single register get/put routine, that takes a group type, 761 * We could have a single register get/put routine, that takes a group type,
@@ -897,6 +897,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
897 897
898extern unsigned ipath_debug; /* debugging bit mask */ 898extern unsigned ipath_debug; /* debugging bit mask */
899 899
900#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */
901
900const char *ipath_get_unit_name(int unit); 902const char *ipath_get_unit_name(int unit);
901 903
902extern struct mutex ipath_mutex; 904extern struct mutex ipath_mutex;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 851763d7d2db..dd487c100f5b 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -61,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
61 r = (r + 1) & (rkt->max - 1); 61 r = (r + 1) & (rkt->max - 1);
62 if (r == n) { 62 if (r == n) {
63 spin_unlock_irqrestore(&rkt->lock, flags); 63 spin_unlock_irqrestore(&rkt->lock, flags);
64 ipath_dbg(KERN_INFO "LKEY table full\n"); 64 ipath_dbg("LKEY table full\n");
65 ret = 0; 65 ret = 0;
66 goto bail; 66 goto bail;
67 } 67 }
@@ -133,6 +133,12 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
133 * being reversible by calling bus_to_virt(). 133 * being reversible by calling bus_to_virt().
134 */ 134 */
135 if (sge->lkey == 0) { 135 if (sge->lkey == 0) {
136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
137
138 if (pd->user) {
139 ret = 0;
140 goto bail;
141 }
136 isge->mr = NULL; 142 isge->mr = NULL;
137 isge->vaddr = (void *) sge->addr; 143 isge->vaddr = (void *) sge->addr;
138 isge->length = sge->length; 144 isge->length = sge->length;
@@ -206,6 +212,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
206 * (see ipath_get_dma_mr and ipath_dma.c). 212 * (see ipath_get_dma_mr and ipath_dma.c).
207 */ 213 */
208 if (rkey == 0) { 214 if (rkey == 0) {
215 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
216
217 if (pd->user) {
218 ret = 0;
219 goto bail;
220 }
209 sge->mr = NULL; 221 sge->mr = NULL;
210 sge->vaddr = (void *) vaddr; 222 sge->vaddr = (void *) vaddr;
211 sge->length = len; 223 sge->length = len;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 8cc8598d6c69..31e70732e369 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -210,9 +210,15 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
210 m = 0; 210 m = 0;
211 n = 0; 211 n = 0;
212 list_for_each_entry(chunk, &region->chunk_list, list) { 212 list_for_each_entry(chunk, &region->chunk_list, list) {
213 for (i = 0; i < chunk->nmap; i++) { 213 for (i = 0; i < chunk->nents; i++) {
214 mr->mr.map[m]->segs[n].vaddr = 214 void *vaddr;
215 page_address(chunk->page_list[i].page); 215
216 vaddr = page_address(chunk->page_list[i].page);
217 if (!vaddr) {
218 ret = ERR_PTR(-EINVAL);
219 goto bail;
220 }
221 mr->mr.map[m]->segs[n].vaddr = vaddr;
216 mr->mr.map[m]->segs[n].length = region->page_size; 222 mr->mr.map[m]->segs[n].length = region->page_size;
217 n++; 223 n++;
218 if (n == IPATH_SEGSZ) { 224 if (n == IPATH_SEGSZ) {
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 64f07b19349f..16db9ac0b402 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -81,11 +81,51 @@ static u32 credit_table[31] = {
81 32768 /* 1E */ 81 32768 /* 1E */
82}; 82};
83 83
84static u32 alloc_qpn(struct ipath_qp_table *qpt) 84
85static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
86{
87 unsigned long page = get_zeroed_page(GFP_KERNEL);
88 unsigned long flags;
89
90 /*
91 * Free the page if someone raced with us installing it.
92 */
93
94 spin_lock_irqsave(&qpt->lock, flags);
95 if (map->page)
96 free_page(page);
97 else
98 map->page = (void *)page;
99 spin_unlock_irqrestore(&qpt->lock, flags);
100}
101
102
103static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
85{ 104{
86 u32 i, offset, max_scan, qpn; 105 u32 i, offset, max_scan, qpn;
87 struct qpn_map *map; 106 struct qpn_map *map;
88 u32 ret; 107 u32 ret = -1;
108
109 if (type == IB_QPT_SMI)
110 ret = 0;
111 else if (type == IB_QPT_GSI)
112 ret = 1;
113
114 if (ret != -1) {
115 map = &qpt->map[0];
116 if (unlikely(!map->page)) {
117 get_map_page(qpt, map);
118 if (unlikely(!map->page)) {
119 ret = -ENOMEM;
120 goto bail;
121 }
122 }
123 if (!test_and_set_bit(ret, map->page))
124 atomic_dec(&map->n_free);
125 else
126 ret = -EBUSY;
127 goto bail;
128 }
89 129
90 qpn = qpt->last + 1; 130 qpn = qpt->last + 1;
91 if (qpn >= QPN_MAX) 131 if (qpn >= QPN_MAX)
@@ -95,19 +135,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
95 max_scan = qpt->nmaps - !offset; 135 max_scan = qpt->nmaps - !offset;
96 for (i = 0;;) { 136 for (i = 0;;) {
97 if (unlikely(!map->page)) { 137 if (unlikely(!map->page)) {
98 unsigned long page = get_zeroed_page(GFP_KERNEL); 138 get_map_page(qpt, map);
99 unsigned long flags;
100
101 /*
102 * Free the page if someone raced with us
103 * installing it:
104 */
105 spin_lock_irqsave(&qpt->lock, flags);
106 if (map->page)
107 free_page(page);
108 else
109 map->page = (void *)page;
110 spin_unlock_irqrestore(&qpt->lock, flags);
111 if (unlikely(!map->page)) 139 if (unlikely(!map->page))
112 break; 140 break;
113 } 141 }
@@ -151,7 +179,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
151 qpn = mk_qpn(qpt, map, offset); 179 qpn = mk_qpn(qpt, map, offset);
152 } 180 }
153 181
154 ret = 0; 182 ret = -ENOMEM;
155 183
156bail: 184bail:
157 return ret; 185 return ret;
@@ -180,29 +208,19 @@ static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
180 enum ib_qp_type type) 208 enum ib_qp_type type)
181{ 209{
182 unsigned long flags; 210 unsigned long flags;
183 u32 qpn;
184 int ret; 211 int ret;
185 212
186 if (type == IB_QPT_SMI) 213 ret = alloc_qpn(qpt, type);
187 qpn = 0; 214 if (ret < 0)
188 else if (type == IB_QPT_GSI) 215 goto bail;
189 qpn = 1; 216 qp->ibqp.qp_num = ret;
190 else {
191 /* Allocate the next available QPN */
192 qpn = alloc_qpn(qpt);
193 if (qpn == 0) {
194 ret = -ENOMEM;
195 goto bail;
196 }
197 }
198 qp->ibqp.qp_num = qpn;
199 217
200 /* Add the QP to the hash table. */ 218 /* Add the QP to the hash table. */
201 spin_lock_irqsave(&qpt->lock, flags); 219 spin_lock_irqsave(&qpt->lock, flags);
202 220
203 qpn %= qpt->max; 221 ret %= qpt->max;
204 qp->next = qpt->table[qpn]; 222 qp->next = qpt->table[ret];
205 qpt->table[qpn] = qp; 223 qpt->table[ret] = qp;
206 atomic_inc(&qp->refcount); 224 atomic_inc(&qp->refcount);
207 225
208 spin_unlock_irqrestore(&qpt->lock, flags); 226 spin_unlock_irqrestore(&qpt->lock, flags);
@@ -245,9 +263,7 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
245 if (!fnd) 263 if (!fnd)
246 return; 264 return;
247 265
248 /* If QPN is not reserved, mark QPN free in the bitmap. */ 266 free_qpn(qpt, qp->ibqp.qp_num);
249 if (qp->ibqp.qp_num > 1)
250 free_qpn(qpt, qp->ibqp.qp_num);
251 267
252 wait_event(qp->wait, !atomic_read(&qp->refcount)); 268 wait_event(qp->wait, !atomic_read(&qp->refcount));
253} 269}
@@ -270,11 +286,10 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
270 286
271 while (qp) { 287 while (qp) {
272 nqp = qp->next; 288 nqp = qp->next;
273 if (qp->ibqp.qp_num > 1) 289 free_qpn(qpt, qp->ibqp.qp_num);
274 free_qpn(qpt, qp->ibqp.qp_num);
275 if (!atomic_dec_and_test(&qp->refcount) || 290 if (!atomic_dec_and_test(&qp->refcount) ||
276 !ipath_destroy_qp(&qp->ibqp)) 291 !ipath_destroy_qp(&qp->ibqp))
277 ipath_dbg(KERN_INFO "QP memory leak!\n"); 292 ipath_dbg("QP memory leak!\n");
278 qp = nqp; 293 qp = nqp;
279 } 294 }
280 } 295 }
@@ -320,7 +335,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
320 qp->remote_qpn = 0; 335 qp->remote_qpn = 0;
321 qp->qkey = 0; 336 qp->qkey = 0;
322 qp->qp_access_flags = 0; 337 qp->qp_access_flags = 0;
323 clear_bit(IPATH_S_BUSY, &qp->s_flags); 338 qp->s_busy = 0;
339 qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR;
324 qp->s_hdrwords = 0; 340 qp->s_hdrwords = 0;
325 qp->s_psn = 0; 341 qp->s_psn = 0;
326 qp->r_psn = 0; 342 qp->r_psn = 0;
@@ -333,7 +349,6 @@ static void ipath_reset_qp(struct ipath_qp *qp)
333 qp->r_state = IB_OPCODE_UC_SEND_LAST; 349 qp->r_state = IB_OPCODE_UC_SEND_LAST;
334 } 350 }
335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 351 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
337 qp->r_nak_state = 0; 352 qp->r_nak_state = 0;
338 qp->r_wrid_valid = 0; 353 qp->r_wrid_valid = 0;
339 qp->s_rnr_timeout = 0; 354 qp->s_rnr_timeout = 0;
@@ -344,6 +359,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
344 qp->s_ssn = 1; 359 qp->s_ssn = 1;
345 qp->s_lsn = 0; 360 qp->s_lsn = 0;
346 qp->s_wait_credit = 0; 361 qp->s_wait_credit = 0;
362 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
363 qp->r_head_ack_queue = 0;
364 qp->s_tail_ack_queue = 0;
365 qp->s_num_rd_atomic = 0;
347 if (qp->r_rq.wq) { 366 if (qp->r_rq.wq) {
348 qp->r_rq.wq->head = 0; 367 qp->r_rq.wq->head = 0;
349 qp->r_rq.wq->tail = 0; 368 qp->r_rq.wq->tail = 0;
@@ -357,7 +376,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
357 * @err: the receive completion error to signal if a RWQE is active 376 * @err: the receive completion error to signal if a RWQE is active
358 * 377 *
359 * Flushes both send and receive work queues. 378 * Flushes both send and receive work queues.
360 * QP s_lock should be held and interrupts disabled. 379 * The QP s_lock should be held and interrupts disabled.
361 */ 380 */
362 381
363void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) 382void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -365,7 +384,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
365 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 384 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
366 struct ib_wc wc; 385 struct ib_wc wc;
367 386
368 ipath_dbg(KERN_INFO "QP%d/%d in error state\n", 387 ipath_dbg("QP%d/%d in error state\n",
369 qp->ibqp.qp_num, qp->remote_qpn); 388 qp->ibqp.qp_num, qp->remote_qpn);
370 389
371 spin_lock(&dev->pending_lock); 390 spin_lock(&dev->pending_lock);
@@ -389,6 +408,8 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
389 wc.port_num = 0; 408 wc.port_num = 0;
390 if (qp->r_wrid_valid) { 409 if (qp->r_wrid_valid) {
391 qp->r_wrid_valid = 0; 410 qp->r_wrid_valid = 0;
411 wc.wr_id = qp->r_wr_id;
412 wc.opcode = IB_WC_RECV;
392 wc.status = err; 413 wc.status = err;
393 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 414 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
394 } 415 }
@@ -503,13 +524,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
503 attr->path_mig_state != IB_MIG_REARM) 524 attr->path_mig_state != IB_MIG_REARM)
504 goto inval; 525 goto inval;
505 526
527 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
528 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
529 goto inval;
530
506 switch (new_state) { 531 switch (new_state) {
507 case IB_QPS_RESET: 532 case IB_QPS_RESET:
508 ipath_reset_qp(qp); 533 ipath_reset_qp(qp);
509 break; 534 break;
510 535
511 case IB_QPS_ERR: 536 case IB_QPS_ERR:
512 ipath_error_qp(qp, IB_WC_GENERAL_ERR); 537 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
513 break; 538 break;
514 539
515 default: 540 default:
@@ -559,6 +584,12 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
559 if (attr_mask & IB_QP_QKEY) 584 if (attr_mask & IB_QP_QKEY)
560 qp->qkey = attr->qkey; 585 qp->qkey = attr->qkey;
561 586
587 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
588 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
589
590 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
591 qp->s_max_rd_atomic = attr->max_rd_atomic;
592
562 qp->state = new_state; 593 qp->state = new_state;
563 spin_unlock_irqrestore(&qp->s_lock, flags); 594 spin_unlock_irqrestore(&qp->s_lock, flags);
564 595
@@ -598,8 +629,8 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
598 attr->alt_pkey_index = 0; 629 attr->alt_pkey_index = 0;
599 attr->en_sqd_async_notify = 0; 630 attr->en_sqd_async_notify = 0;
600 attr->sq_draining = 0; 631 attr->sq_draining = 0;
601 attr->max_rd_atomic = 1; 632 attr->max_rd_atomic = qp->s_max_rd_atomic;
602 attr->max_dest_rd_atomic = 1; 633 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
603 attr->min_rnr_timer = qp->r_min_rnr_timer; 634 attr->min_rnr_timer = qp->r_min_rnr_timer;
604 attr->port_num = 1; 635 attr->port_num = 1;
605 attr->timeout = qp->timeout; 636 attr->timeout = qp->timeout;
@@ -614,7 +645,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
614 init_attr->recv_cq = qp->ibqp.recv_cq; 645 init_attr->recv_cq = qp->ibqp.recv_cq;
615 init_attr->srq = qp->ibqp.srq; 646 init_attr->srq = qp->ibqp.srq;
616 init_attr->cap = attr->cap; 647 init_attr->cap = attr->cap;
617 if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) 648 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
618 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 649 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
619 else 650 else
620 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 651 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@@ -786,7 +817,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
786 qp->s_size = init_attr->cap.max_send_wr + 1; 817 qp->s_size = init_attr->cap.max_send_wr + 1;
787 qp->s_max_sge = init_attr->cap.max_send_sge; 818 qp->s_max_sge = init_attr->cap.max_send_sge;
788 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 819 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
789 qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; 820 qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
790 else 821 else
791 qp->s_flags = 0; 822 qp->s_flags = 0;
792 dev = to_idev(ibpd->device); 823 dev = to_idev(ibpd->device);
@@ -958,7 +989,7 @@ bail:
958 * @wc: the WC responsible for putting the QP in this state 989 * @wc: the WC responsible for putting the QP in this state
959 * 990 *
960 * Flushes the send work queue. 991 * Flushes the send work queue.
961 * The QP s_lock should be held. 992 * The QP s_lock should be held and interrupts disabled.
962 */ 993 */
963 994
964void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 995void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
@@ -966,7 +997,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
966 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 997 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
967 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 998 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
968 999
969 ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", 1000 ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
970 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1001 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
971 1002
972 spin_lock(&dev->pending_lock); 1003 spin_lock(&dev->pending_lock);
@@ -984,12 +1015,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
984 wc->status = IB_WC_WR_FLUSH_ERR; 1015 wc->status = IB_WC_WR_FLUSH_ERR;
985 1016
986 while (qp->s_last != qp->s_head) { 1017 while (qp->s_last != qp->s_head) {
1018 wqe = get_swqe_ptr(qp, qp->s_last);
987 wc->wr_id = wqe->wr.wr_id; 1019 wc->wr_id = wqe->wr.wr_id;
988 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1020 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
989 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1021 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
990 if (++qp->s_last >= qp->s_size) 1022 if (++qp->s_last >= qp->s_size)
991 qp->s_last = 0; 1023 qp->s_last = 0;
992 wqe = get_swqe_ptr(qp, qp->s_last);
993 } 1024 }
994 qp->s_cur = qp->s_tail = qp->s_head; 1025 qp->s_cur = qp->s_tail = qp->s_head;
995 qp->state = IB_QPS_SQE; 1026 qp->state = IB_QPS_SQE;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 5ff20cb04494..b4b88d0b53f5 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -37,6 +37,19 @@
37/* cut down ridiculously long IB macro names */ 37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_RC_##x 38#define OP(x) IB_OPCODE_RC_##x
39 39
40static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
41 u32 psn, u32 pmtu)
42{
43 u32 len;
44
45 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
46 ss->sge = wqe->sg_list[0];
47 ss->sg_list = wqe->sg_list + 1;
48 ss->num_sge = wqe->wr.num_sge;
49 ipath_skip_sge(ss, len);
50 return wqe->length - len;
51}
52
40/** 53/**
41 * ipath_init_restart- initialize the qp->s_sge after a restart 54 * ipath_init_restart- initialize the qp->s_sge after a restart
42 * @qp: the QP who's SGE we're restarting 55 * @qp: the QP who's SGE we're restarting
@@ -47,15 +60,9 @@
47static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) 60static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
48{ 61{
49 struct ipath_ibdev *dev; 62 struct ipath_ibdev *dev;
50 u32 len;
51 63
52 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * 64 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
53 ib_mtu_enum_to_int(qp->path_mtu); 65 ib_mtu_enum_to_int(qp->path_mtu));
54 qp->s_sge.sge = wqe->sg_list[0];
55 qp->s_sge.sg_list = wqe->sg_list + 1;
56 qp->s_sge.num_sge = wqe->wr.num_sge;
57 ipath_skip_sge(&qp->s_sge, len);
58 qp->s_len = wqe->length - len;
59 dev = to_idev(qp->ibqp.device); 66 dev = to_idev(qp->ibqp.device);
60 spin_lock(&dev->pending_lock); 67 spin_lock(&dev->pending_lock);
61 if (list_empty(&qp->timerwait)) 68 if (list_empty(&qp->timerwait))
@@ -70,107 +77,123 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
70 * @ohdr: a pointer to the IB header being constructed 77 * @ohdr: a pointer to the IB header being constructed
71 * @pmtu: the path MTU 78 * @pmtu: the path MTU
72 * 79 *
73 * Return bth0 if constructed; otherwise, return 0. 80 * Return 1 if constructed; otherwise, return 0.
81 * Note that we are in the responder's side of the QP context.
74 * Note the QP s_lock must be held. 82 * Note the QP s_lock must be held.
75 */ 83 */
76u32 ipath_make_rc_ack(struct ipath_qp *qp, 84static int ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr, 85 struct ipath_other_headers *ohdr,
78 u32 pmtu) 86 u32 pmtu, u32 *bth0p, u32 *bth2p)
79{ 87{
88 struct ipath_ack_entry *e;
80 u32 hwords; 89 u32 hwords;
81 u32 len; 90 u32 len;
82 u32 bth0; 91 u32 bth0;
92 u32 bth2;
83 93
84 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 94 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
85 hwords = 5; 95 hwords = 5;
86 96
87 /*
88 * Send a response. Note that we are in the responder's
89 * side of the QP context.
90 */
91 switch (qp->s_ack_state) { 97 switch (qp->s_ack_state) {
92 case OP(RDMA_READ_REQUEST): 98 case OP(RDMA_READ_RESPONSE_LAST):
93 qp->s_cur_sge = &qp->s_rdma_sge; 99 case OP(RDMA_READ_RESPONSE_ONLY):
94 len = qp->s_rdma_len; 100 case OP(ATOMIC_ACKNOWLEDGE):
95 if (len > pmtu) { 101 qp->s_ack_state = OP(ACKNOWLEDGE);
96 len = pmtu; 102 /* FALLTHROUGH */
97 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 103 case OP(ACKNOWLEDGE):
98 } else 104 /* Check for no next entry in the queue. */
99 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 105 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
100 qp->s_rdma_len -= len; 106 if (qp->s_flags & IPATH_S_ACK_PENDING)
107 goto normal;
108 goto bail;
109 }
110
111 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
112 if (e->opcode == OP(RDMA_READ_REQUEST)) {
113 /* Copy SGE state in case we need to resend */
114 qp->s_ack_rdma_sge = e->rdma_sge;
115 qp->s_cur_sge = &qp->s_ack_rdma_sge;
116 len = e->rdma_sge.sge.sge_length;
117 if (len > pmtu) {
118 len = pmtu;
119 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
120 } else {
121 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
122 if (++qp->s_tail_ack_queue >
123 IPATH_MAX_RDMA_ATOMIC)
124 qp->s_tail_ack_queue = 0;
125 }
126 ohdr->u.aeth = ipath_compute_aeth(qp);
127 hwords++;
128 qp->s_ack_rdma_psn = e->psn;
129 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
130 } else {
131 /* COMPARE_SWAP or FETCH_ADD */
132 qp->s_cur_sge = NULL;
133 len = 0;
134 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
135 ohdr->u.at.aeth = ipath_compute_aeth(qp);
136 ohdr->u.at.atomic_ack_eth[0] =
137 cpu_to_be32(e->atomic_data >> 32);
138 ohdr->u.at.atomic_ack_eth[1] =
139 cpu_to_be32(e->atomic_data);
140 hwords += sizeof(ohdr->u.at) / sizeof(u32);
141 bth2 = e->psn;
142 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
143 qp->s_tail_ack_queue = 0;
144 }
101 bth0 = qp->s_ack_state << 24; 145 bth0 = qp->s_ack_state << 24;
102 ohdr->u.aeth = ipath_compute_aeth(qp);
103 hwords++;
104 break; 146 break;
105 147
106 case OP(RDMA_READ_RESPONSE_FIRST): 148 case OP(RDMA_READ_RESPONSE_FIRST):
107 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 149 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
108 /* FALLTHROUGH */ 150 /* FALLTHROUGH */
109 case OP(RDMA_READ_RESPONSE_MIDDLE): 151 case OP(RDMA_READ_RESPONSE_MIDDLE):
110 qp->s_cur_sge = &qp->s_rdma_sge; 152 len = qp->s_ack_rdma_sge.sge.sge_length;
111 len = qp->s_rdma_len;
112 if (len > pmtu) 153 if (len > pmtu)
113 len = pmtu; 154 len = pmtu;
114 else { 155 else {
115 ohdr->u.aeth = ipath_compute_aeth(qp); 156 ohdr->u.aeth = ipath_compute_aeth(qp);
116 hwords++; 157 hwords++;
117 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 158 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
159 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
160 qp->s_tail_ack_queue = 0;
118 } 161 }
119 qp->s_rdma_len -= len;
120 bth0 = qp->s_ack_state << 24; 162 bth0 = qp->s_ack_state << 24;
121 break; 163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
122
123 case OP(RDMA_READ_RESPONSE_LAST):
124 case OP(RDMA_READ_RESPONSE_ONLY):
125 /*
126 * We have to prevent new requests from changing
127 * the r_sge state while a ipath_verbs_send()
128 * is in progress.
129 */
130 qp->s_ack_state = OP(ACKNOWLEDGE);
131 bth0 = 0;
132 goto bail;
133
134 case OP(COMPARE_SWAP):
135 case OP(FETCH_ADD):
136 qp->s_cur_sge = NULL;
137 len = 0;
138 /*
139 * Set the s_ack_state so the receive interrupt handler
140 * won't try to send an ACK (out of order) until this one
141 * is actually sent.
142 */
143 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
144 bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
145 ohdr->u.at.aeth = ipath_compute_aeth(qp);
146 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
147 hwords += sizeof(ohdr->u.at) / 4;
148 break; 164 break;
149 165
150 default: 166 default:
151 /* Send a regular ACK. */ 167 normal:
152 qp->s_cur_sge = NULL;
153 len = 0;
154 /* 168 /*
155 * Set the s_ack_state so the receive interrupt handler 169 * Send a regular ACK.
156 * won't try to send an ACK (out of order) until this one 170 * Set the s_ack_state so we wait until after sending
157 * is actually sent. 171 * the ACK before setting s_ack_state to ACKNOWLEDGE
172 * (see above).
158 */ 173 */
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 174 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
160 bth0 = OP(ACKNOWLEDGE) << 24; 175 qp->s_flags &= ~IPATH_S_ACK_PENDING;
176 qp->s_cur_sge = NULL;
161 if (qp->s_nak_state) 177 if (qp->s_nak_state)
162 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 178 ohdr->u.aeth =
163 (qp->s_nak_state << 179 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
164 IPATH_AETH_CREDIT_SHIFT)); 180 (qp->s_nak_state <<
181 IPATH_AETH_CREDIT_SHIFT));
165 else 182 else
166 ohdr->u.aeth = ipath_compute_aeth(qp); 183 ohdr->u.aeth = ipath_compute_aeth(qp);
167 hwords++; 184 hwords++;
185 len = 0;
186 bth0 = OP(ACKNOWLEDGE) << 24;
187 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
168 } 188 }
169 qp->s_hdrwords = hwords; 189 qp->s_hdrwords = hwords;
170 qp->s_cur_size = len; 190 qp->s_cur_size = len;
191 *bth0p = bth0;
192 *bth2p = bth2;
193 return 1;
171 194
172bail: 195bail:
173 return bth0; 196 return 0;
174} 197}
175 198
176/** 199/**
@@ -197,9 +220,16 @@ int ipath_make_rc_req(struct ipath_qp *qp,
197 u32 bth2; 220 u32 bth2;
198 char newreq; 221 char newreq;
199 222
223 /* Sending responses has higher priority over sending requests. */
224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
225 (qp->s_flags & IPATH_S_ACK_PENDING) ||
226 qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) &&
227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
228 goto done;
229
200 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || 230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
201 qp->s_rnr_timeout) 231 qp->s_rnr_timeout)
202 goto done; 232 goto bail;
203 233
204 /* Limit the number of packets sent without an ACK. */ 234 /* Limit the number of packets sent without an ACK. */
205 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { 235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
@@ -210,7 +240,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
210 list_add_tail(&qp->timerwait, 240 list_add_tail(&qp->timerwait,
211 &dev->pending[dev->pending_index]); 241 &dev->pending[dev->pending_index]);
212 spin_unlock(&dev->pending_lock); 242 spin_unlock(&dev->pending_lock);
213 goto done; 243 goto bail;
214 } 244 }
215 245
216 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 246 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -232,7 +262,16 @@ int ipath_make_rc_req(struct ipath_qp *qp,
232 if (qp->s_cur == qp->s_tail) { 262 if (qp->s_cur == qp->s_tail) {
233 /* Check if send work queue is empty. */ 263 /* Check if send work queue is empty. */
234 if (qp->s_tail == qp->s_head) 264 if (qp->s_tail == qp->s_head)
235 goto done; 265 goto bail;
266 /*
267 * If a fence is requested, wait for previous
268 * RDMA read and atomic operations to finish.
269 */
270 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
271 qp->s_num_rd_atomic) {
272 qp->s_flags |= IPATH_S_FENCE_PENDING;
273 goto bail;
274 }
236 wqe->psn = qp->s_next_psn; 275 wqe->psn = qp->s_next_psn;
237 newreq = 1; 276 newreq = 1;
238 } 277 }
@@ -250,7 +289,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
250 /* If no credit, return. */ 289 /* If no credit, return. */
251 if (qp->s_lsn != (u32) -1 && 290 if (qp->s_lsn != (u32) -1 &&
252 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) 291 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
253 goto done; 292 goto bail;
254 wqe->lpsn = wqe->psn; 293 wqe->lpsn = wqe->psn;
255 if (len > pmtu) { 294 if (len > pmtu) {
256 wqe->lpsn += (len - 1) / pmtu; 295 wqe->lpsn += (len - 1) / pmtu;
@@ -281,13 +320,13 @@ int ipath_make_rc_req(struct ipath_qp *qp,
281 /* If no credit, return. */ 320 /* If no credit, return. */
282 if (qp->s_lsn != (u32) -1 && 321 if (qp->s_lsn != (u32) -1 &&
283 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) 322 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
284 goto done; 323 goto bail;
285 ohdr->u.rc.reth.vaddr = 324 ohdr->u.rc.reth.vaddr =
286 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 325 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
287 ohdr->u.rc.reth.rkey = 326 ohdr->u.rc.reth.rkey =
288 cpu_to_be32(wqe->wr.wr.rdma.rkey); 327 cpu_to_be32(wqe->wr.wr.rdma.rkey);
289 ohdr->u.rc.reth.length = cpu_to_be32(len); 328 ohdr->u.rc.reth.length = cpu_to_be32(len);
290 hwords += sizeof(struct ib_reth) / 4; 329 hwords += sizeof(struct ib_reth) / sizeof(u32);
291 wqe->lpsn = wqe->psn; 330 wqe->lpsn = wqe->psn;
292 if (len > pmtu) { 331 if (len > pmtu) {
293 wqe->lpsn += (len - 1) / pmtu; 332 wqe->lpsn += (len - 1) / pmtu;
@@ -312,14 +351,17 @@ int ipath_make_rc_req(struct ipath_qp *qp,
312 break; 351 break;
313 352
314 case IB_WR_RDMA_READ: 353 case IB_WR_RDMA_READ:
315 ohdr->u.rc.reth.vaddr = 354 /*
316 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 355 * Don't allow more operations to be started
317 ohdr->u.rc.reth.rkey = 356 * than the QP limits allow.
318 cpu_to_be32(wqe->wr.wr.rdma.rkey); 357 */
319 ohdr->u.rc.reth.length = cpu_to_be32(len);
320 qp->s_state = OP(RDMA_READ_REQUEST);
321 hwords += sizeof(ohdr->u.rc.reth) / 4;
322 if (newreq) { 358 if (newreq) {
359 if (qp->s_num_rd_atomic >=
360 qp->s_max_rd_atomic) {
361 qp->s_flags |= IPATH_S_RDMAR_PENDING;
362 goto bail;
363 }
364 qp->s_num_rd_atomic++;
323 if (qp->s_lsn != (u32) -1) 365 if (qp->s_lsn != (u32) -1)
324 qp->s_lsn++; 366 qp->s_lsn++;
325 /* 367 /*
@@ -330,6 +372,13 @@ int ipath_make_rc_req(struct ipath_qp *qp,
330 qp->s_next_psn += (len - 1) / pmtu; 372 qp->s_next_psn += (len - 1) / pmtu;
331 wqe->lpsn = qp->s_next_psn++; 373 wqe->lpsn = qp->s_next_psn++;
332 } 374 }
375 ohdr->u.rc.reth.vaddr =
376 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
377 ohdr->u.rc.reth.rkey =
378 cpu_to_be32(wqe->wr.wr.rdma.rkey);
379 ohdr->u.rc.reth.length = cpu_to_be32(len);
380 qp->s_state = OP(RDMA_READ_REQUEST);
381 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
333 ss = NULL; 382 ss = NULL;
334 len = 0; 383 len = 0;
335 if (++qp->s_cur == qp->s_size) 384 if (++qp->s_cur == qp->s_size)
@@ -338,32 +387,48 @@ int ipath_make_rc_req(struct ipath_qp *qp,
338 387
339 case IB_WR_ATOMIC_CMP_AND_SWP: 388 case IB_WR_ATOMIC_CMP_AND_SWP:
340 case IB_WR_ATOMIC_FETCH_AND_ADD: 389 case IB_WR_ATOMIC_FETCH_AND_ADD:
341 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) 390 /*
342 qp->s_state = OP(COMPARE_SWAP); 391 * Don't allow more operations to be started
343 else 392 * than the QP limits allow.
344 qp->s_state = OP(FETCH_ADD); 393 */
345 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
346 wqe->wr.wr.atomic.remote_addr);
347 ohdr->u.atomic_eth.rkey = cpu_to_be32(
348 wqe->wr.wr.atomic.rkey);
349 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
350 wqe->wr.wr.atomic.swap);
351 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
352 wqe->wr.wr.atomic.compare_add);
353 hwords += sizeof(struct ib_atomic_eth) / 4;
354 if (newreq) { 394 if (newreq) {
395 if (qp->s_num_rd_atomic >=
396 qp->s_max_rd_atomic) {
397 qp->s_flags |= IPATH_S_RDMAR_PENDING;
398 goto bail;
399 }
400 qp->s_num_rd_atomic++;
355 if (qp->s_lsn != (u32) -1) 401 if (qp->s_lsn != (u32) -1)
356 qp->s_lsn++; 402 qp->s_lsn++;
357 wqe->lpsn = wqe->psn; 403 wqe->lpsn = wqe->psn;
358 } 404 }
359 if (++qp->s_cur == qp->s_size) 405 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
360 qp->s_cur = 0; 406 qp->s_state = OP(COMPARE_SWAP);
407 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
408 wqe->wr.wr.atomic.swap);
409 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
410 wqe->wr.wr.atomic.compare_add);
411 } else {
412 qp->s_state = OP(FETCH_ADD);
413 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
414 wqe->wr.wr.atomic.compare_add);
415 ohdr->u.atomic_eth.compare_data = 0;
416 }
417 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
418 wqe->wr.wr.atomic.remote_addr >> 32);
419 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
420 wqe->wr.wr.atomic.remote_addr);
421 ohdr->u.atomic_eth.rkey = cpu_to_be32(
422 wqe->wr.wr.atomic.rkey);
423 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
361 ss = NULL; 424 ss = NULL;
362 len = 0; 425 len = 0;
426 if (++qp->s_cur == qp->s_size)
427 qp->s_cur = 0;
363 break; 428 break;
364 429
365 default: 430 default:
366 goto done; 431 goto bail;
367 } 432 }
368 qp->s_sge.sge = wqe->sg_list[0]; 433 qp->s_sge.sge = wqe->sg_list[0];
369 qp->s_sge.sg_list = wqe->sg_list + 1; 434 qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -379,7 +444,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
379 qp->s_psn = wqe->lpsn + 1; 444 qp->s_psn = wqe->lpsn + 1;
380 else { 445 else {
381 qp->s_psn++; 446 qp->s_psn++;
382 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 447 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
383 qp->s_next_psn = qp->s_psn; 448 qp->s_next_psn = qp->s_psn;
384 } 449 }
385 /* 450 /*
@@ -406,7 +471,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
406 /* FALLTHROUGH */ 471 /* FALLTHROUGH */
407 case OP(SEND_MIDDLE): 472 case OP(SEND_MIDDLE):
408 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 473 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
409 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 474 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
410 qp->s_next_psn = qp->s_psn; 475 qp->s_next_psn = qp->s_psn;
411 ss = &qp->s_sge; 476 ss = &qp->s_sge;
412 len = qp->s_len; 477 len = qp->s_len;
@@ -442,7 +507,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
442 /* FALLTHROUGH */ 507 /* FALLTHROUGH */
443 case OP(RDMA_WRITE_MIDDLE): 508 case OP(RDMA_WRITE_MIDDLE):
444 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 509 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
445 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 510 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
446 qp->s_next_psn = qp->s_psn; 511 qp->s_next_psn = qp->s_psn;
447 ss = &qp->s_sge; 512 ss = &qp->s_sge;
448 len = qp->s_len; 513 len = qp->s_len;
@@ -479,9 +544,9 @@ int ipath_make_rc_req(struct ipath_qp *qp,
479 cpu_to_be32(wqe->wr.wr.rdma.rkey); 544 cpu_to_be32(wqe->wr.wr.rdma.rkey);
480 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); 545 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
481 qp->s_state = OP(RDMA_READ_REQUEST); 546 qp->s_state = OP(RDMA_READ_REQUEST);
482 hwords += sizeof(ohdr->u.rc.reth) / 4; 547 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
483 bth2 = qp->s_psn++ & IPATH_PSN_MASK; 548 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
484 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 549 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
485 qp->s_next_psn = qp->s_psn; 550 qp->s_next_psn = qp->s_psn;
486 ss = NULL; 551 ss = NULL;
487 len = 0; 552 len = 0;
@@ -489,20 +554,6 @@ int ipath_make_rc_req(struct ipath_qp *qp,
489 if (qp->s_cur == qp->s_size) 554 if (qp->s_cur == qp->s_size)
490 qp->s_cur = 0; 555 qp->s_cur = 0;
491 break; 556 break;
492
493 case OP(RDMA_READ_REQUEST):
494 case OP(COMPARE_SWAP):
495 case OP(FETCH_ADD):
496 /*
497 * We shouldn't start anything new until this request is
498 * finished. The ACK will handle rescheduling us. XXX The
499 * number of outstanding ones is negotiated at connection
500 * setup time (see pg. 258,289)? XXX Also, if we support
501 * multiple outstanding requests, we need to check the WQE
502 * IB_SEND_FENCE flag and not send a new request if a RDMA
503 * read or atomic is pending.
504 */
505 goto done;
506 } 557 }
507 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) 558 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
508 bth2 |= 1 << 31; /* Request ACK. */ 559 bth2 |= 1 << 31; /* Request ACK. */
@@ -512,9 +563,10 @@ int ipath_make_rc_req(struct ipath_qp *qp,
512 qp->s_cur_size = len; 563 qp->s_cur_size = len;
513 *bth0p = bth0 | (qp->s_state << 24); 564 *bth0p = bth0 | (qp->s_state << 24);
514 *bth2p = bth2; 565 *bth2p = bth2;
566done:
515 return 1; 567 return 1;
516 568
517done: 569bail:
518 return 0; 570 return 0;
519} 571}
520 572
@@ -524,7 +576,8 @@ done:
524 * 576 *
525 * This is called from ipath_rc_rcv() and only uses the receive 577 * This is called from ipath_rc_rcv() and only uses the receive
526 * side QP state. 578 * side QP state.
527 * Note that RDMA reads are handled in the send side QP state and tasklet. 579 * Note that RDMA reads and atomics are handled in the
580 * send side QP state and tasklet.
528 */ 581 */
529static void send_rc_ack(struct ipath_qp *qp) 582static void send_rc_ack(struct ipath_qp *qp)
530{ 583{
@@ -535,6 +588,10 @@ static void send_rc_ack(struct ipath_qp *qp)
535 struct ipath_ib_header hdr; 588 struct ipath_ib_header hdr;
536 struct ipath_other_headers *ohdr; 589 struct ipath_other_headers *ohdr;
537 590
591 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
592 if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
593 goto queue_ack;
594
538 /* Construct the header. */ 595 /* Construct the header. */
539 ohdr = &hdr.u.oth; 596 ohdr = &hdr.u.oth;
540 lrh0 = IPATH_LRH_BTH; 597 lrh0 = IPATH_LRH_BTH;
@@ -548,19 +605,14 @@ static void send_rc_ack(struct ipath_qp *qp)
548 lrh0 = IPATH_LRH_GRH; 605 lrh0 = IPATH_LRH_GRH;
549 } 606 }
550 /* read pkey_index w/o lock (its atomic) */ 607 /* read pkey_index w/o lock (its atomic) */
551 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index); 608 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
609 OP(ACKNOWLEDGE) << 24;
552 if (qp->r_nak_state) 610 if (qp->r_nak_state)
553 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 611 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
554 (qp->r_nak_state << 612 (qp->r_nak_state <<
555 IPATH_AETH_CREDIT_SHIFT)); 613 IPATH_AETH_CREDIT_SHIFT));
556 else 614 else
557 ohdr->u.aeth = ipath_compute_aeth(qp); 615 ohdr->u.aeth = ipath_compute_aeth(qp);
558 if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
559 bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
560 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
561 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
562 } else
563 bth0 |= OP(ACKNOWLEDGE) << 24;
564 lrh0 |= qp->remote_ah_attr.sl << 4; 616 lrh0 |= qp->remote_ah_attr.sl << 4;
565 hdr.lrh[0] = cpu_to_be16(lrh0); 617 hdr.lrh[0] = cpu_to_be16(lrh0);
566 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 618 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
@@ -574,31 +626,31 @@ static void send_rc_ack(struct ipath_qp *qp)
574 * If we can send the ACK, clear the ACK state. 626 * If we can send the ACK, clear the ACK state.
575 */ 627 */
576 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) { 628 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
577 qp->r_ack_state = OP(ACKNOWLEDGE);
578 dev->n_unicast_xmit++; 629 dev->n_unicast_xmit++;
579 } else { 630 goto done;
580 /*
581 * We are out of PIO buffers at the moment.
582 * Pass responsibility for sending the ACK to the
583 * send tasklet so that when a PIO buffer becomes
584 * available, the ACK is sent ahead of other outgoing
585 * packets.
586 */
587 dev->n_rc_qacks++;
588 spin_lock_irq(&qp->s_lock);
589 /* Don't coalesce if a RDMA read or atomic is pending. */
590 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
591 qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
592 qp->s_ack_state = qp->r_ack_state;
593 qp->s_nak_state = qp->r_nak_state;
594 qp->s_ack_psn = qp->r_ack_psn;
595 qp->r_ack_state = OP(ACKNOWLEDGE);
596 }
597 spin_unlock_irq(&qp->s_lock);
598
599 /* Call ipath_do_rc_send() in another thread. */
600 tasklet_hi_schedule(&qp->s_task);
601 } 631 }
632
633 /*
634 * We are out of PIO buffers at the moment.
635 * Pass responsibility for sending the ACK to the
636 * send tasklet so that when a PIO buffer becomes
637 * available, the ACK is sent ahead of other outgoing
638 * packets.
639 */
640 dev->n_rc_qacks++;
641
642queue_ack:
643 spin_lock_irq(&qp->s_lock);
644 qp->s_flags |= IPATH_S_ACK_PENDING;
645 qp->s_nak_state = qp->r_nak_state;
646 qp->s_ack_psn = qp->r_ack_psn;
647 spin_unlock_irq(&qp->s_lock);
648
649 /* Call ipath_do_rc_send() in another thread. */
650 tasklet_hi_schedule(&qp->s_task);
651
652done:
653 return;
602} 654}
603 655
604/** 656/**
@@ -727,7 +779,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
727 if (wqe->wr.opcode == IB_WR_RDMA_READ) 779 if (wqe->wr.opcode == IB_WR_RDMA_READ)
728 dev->n_rc_resends++; 780 dev->n_rc_resends++;
729 else 781 else
730 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 782 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
731 783
732 reset_psn(qp, psn); 784 reset_psn(qp, psn);
733 tasklet_hi_schedule(&qp->s_task); 785 tasklet_hi_schedule(&qp->s_task);
@@ -775,10 +827,6 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
775 list_del_init(&qp->timerwait); 827 list_del_init(&qp->timerwait);
776 spin_unlock(&dev->pending_lock); 828 spin_unlock(&dev->pending_lock);
777 829
778 /* Nothing is pending to ACK/NAK. */
779 if (unlikely(qp->s_last == qp->s_tail))
780 goto bail;
781
782 /* 830 /*
783 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 831 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
784 * requests and implicitly NAK RDMA read and atomic requests issued 832 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -806,7 +854,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
806 */ 854 */
807 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 855 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
808 (opcode != OP(RDMA_READ_RESPONSE_LAST) || 856 (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
809 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) || 857 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
810 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 858 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
811 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 859 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
812 (opcode != OP(ATOMIC_ACKNOWLEDGE) || 860 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
@@ -824,20 +872,33 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
824 */ 872 */
825 goto bail; 873 goto bail;
826 } 874 }
827 if (wqe->wr.opcode == IB_WR_RDMA_READ || 875 if (qp->s_num_rd_atomic &&
828 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 876 (wqe->wr.opcode == IB_WR_RDMA_READ ||
829 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 877 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
830 tasklet_hi_schedule(&qp->s_task); 878 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
879 qp->s_num_rd_atomic--;
880 /* Restart sending task if fence is complete */
881 if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
882 !qp->s_num_rd_atomic) {
883 qp->s_flags &= ~IPATH_S_FENCE_PENDING;
884 tasklet_hi_schedule(&qp->s_task);
885 } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
886 qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
887 tasklet_hi_schedule(&qp->s_task);
888 }
889 }
831 /* Post a send completion queue entry if requested. */ 890 /* Post a send completion queue entry if requested. */
832 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || 891 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
833 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 892 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
834 wc.wr_id = wqe->wr.wr_id; 893 wc.wr_id = wqe->wr.wr_id;
835 wc.status = IB_WC_SUCCESS; 894 wc.status = IB_WC_SUCCESS;
836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 895 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
837 wc.vendor_err = 0; 896 wc.vendor_err = 0;
838 wc.byte_len = wqe->length; 897 wc.byte_len = wqe->length;
898 wc.imm_data = 0;
839 wc.qp = &qp->ibqp; 899 wc.qp = &qp->ibqp;
840 wc.src_qp = qp->remote_qpn; 900 wc.src_qp = qp->remote_qpn;
901 wc.wc_flags = 0;
841 wc.pkey_index = 0; 902 wc.pkey_index = 0;
842 wc.slid = qp->remote_ah_attr.dlid; 903 wc.slid = qp->remote_ah_attr.dlid;
843 wc.sl = qp->remote_ah_attr.sl; 904 wc.sl = qp->remote_ah_attr.sl;
@@ -854,15 +915,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
854 if (qp->s_last == qp->s_cur) { 915 if (qp->s_last == qp->s_cur) {
855 if (++qp->s_cur >= qp->s_size) 916 if (++qp->s_cur >= qp->s_size)
856 qp->s_cur = 0; 917 qp->s_cur = 0;
918 qp->s_last = qp->s_cur;
919 if (qp->s_last == qp->s_tail)
920 break;
857 wqe = get_swqe_ptr(qp, qp->s_cur); 921 wqe = get_swqe_ptr(qp, qp->s_cur);
858 qp->s_state = OP(SEND_LAST); 922 qp->s_state = OP(SEND_LAST);
859 qp->s_psn = wqe->psn; 923 qp->s_psn = wqe->psn;
924 } else {
925 if (++qp->s_last >= qp->s_size)
926 qp->s_last = 0;
927 if (qp->s_last == qp->s_tail)
928 break;
929 wqe = get_swqe_ptr(qp, qp->s_last);
860 } 930 }
861 if (++qp->s_last >= qp->s_size)
862 qp->s_last = 0;
863 wqe = get_swqe_ptr(qp, qp->s_last);
864 if (qp->s_last == qp->s_tail)
865 break;
866 } 931 }
867 932
868 switch (aeth >> 29) { 933 switch (aeth >> 29) {
@@ -874,6 +939,18 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
874 list_add_tail(&qp->timerwait, 939 list_add_tail(&qp->timerwait,
875 &dev->pending[dev->pending_index]); 940 &dev->pending[dev->pending_index]);
876 spin_unlock(&dev->pending_lock); 941 spin_unlock(&dev->pending_lock);
942 /*
943 * If we get a partial ACK for a resent operation,
944 * we can stop resending the earlier packets and
945 * continue with the next packet the receiver wants.
946 */
947 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
948 reset_psn(qp, psn + 1);
949 tasklet_hi_schedule(&qp->s_task);
950 }
951 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
952 qp->s_state = OP(SEND_LAST);
953 qp->s_psn = psn + 1;
877 } 954 }
878 ipath_get_credit(qp, aeth); 955 ipath_get_credit(qp, aeth);
879 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 956 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
@@ -884,22 +961,23 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
884 961
885 case 1: /* RNR NAK */ 962 case 1: /* RNR NAK */
886 dev->n_rnr_naks++; 963 dev->n_rnr_naks++;
964 if (qp->s_last == qp->s_tail)
965 goto bail;
887 if (qp->s_rnr_retry == 0) { 966 if (qp->s_rnr_retry == 0) {
888 if (qp->s_last == qp->s_tail)
889 goto bail;
890
891 wc.status = IB_WC_RNR_RETRY_EXC_ERR; 967 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
892 goto class_b; 968 goto class_b;
893 } 969 }
894 if (qp->s_rnr_retry_cnt < 7) 970 if (qp->s_rnr_retry_cnt < 7)
895 qp->s_rnr_retry--; 971 qp->s_rnr_retry--;
896 if (qp->s_last == qp->s_tail)
897 goto bail;
898 972
899 /* The last valid PSN is the previous PSN. */ 973 /* The last valid PSN is the previous PSN. */
900 update_last_psn(qp, psn - 1); 974 update_last_psn(qp, psn - 1);
901 975
902 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 976 if (wqe->wr.opcode == IB_WR_RDMA_READ)
977 dev->n_rc_resends++;
978 else
979 dev->n_rc_resends +=
980 (qp->s_psn - psn) & IPATH_PSN_MASK;
903 981
904 reset_psn(qp, psn); 982 reset_psn(qp, psn);
905 983
@@ -910,26 +988,20 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
910 goto bail; 988 goto bail;
911 989
912 case 3: /* NAK */ 990 case 3: /* NAK */
913 /* The last valid PSN seen is the previous request's. */ 991 if (qp->s_last == qp->s_tail)
914 if (qp->s_last != qp->s_tail) 992 goto bail;
915 update_last_psn(qp, wqe->psn - 1); 993 /* The last valid PSN is the previous PSN. */
994 update_last_psn(qp, psn - 1);
916 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & 995 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
917 IPATH_AETH_CREDIT_MASK) { 996 IPATH_AETH_CREDIT_MASK) {
918 case 0: /* PSN sequence error */ 997 case 0: /* PSN sequence error */
919 dev->n_seq_naks++; 998 dev->n_seq_naks++;
920 /* 999 /*
921 * Back up to the responder's expected PSN. XXX 1000 * Back up to the responder's expected PSN.
922 * Note that we might get a NAK in the middle of an 1001 * Note that we might get a NAK in the middle of an
923 * RDMA READ response which terminates the RDMA 1002 * RDMA READ response which terminates the RDMA
924 * READ. 1003 * READ.
925 */ 1004 */
926 if (qp->s_last == qp->s_tail)
927 break;
928
929 if (ipath_cmp24(psn, wqe->psn) < 0)
930 break;
931
932 /* Retry the request. */
933 ipath_restart_rc(qp, psn, &wc); 1005 ipath_restart_rc(qp, psn, &wc);
934 break; 1006 break;
935 1007
@@ -1003,6 +1075,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1003 u32 psn, u32 hdrsize, u32 pmtu, 1075 u32 psn, u32 hdrsize, u32 pmtu,
1004 int header_in_data) 1076 int header_in_data)
1005{ 1077{
1078 struct ipath_swqe *wqe;
1006 unsigned long flags; 1079 unsigned long flags;
1007 struct ib_wc wc; 1080 struct ib_wc wc;
1008 int diff; 1081 int diff;
@@ -1032,6 +1105,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1032 goto ack_done; 1105 goto ack_done;
1033 } 1106 }
1034 1107
1108 if (unlikely(qp->s_last == qp->s_tail))
1109 goto ack_done;
1110 wqe = get_swqe_ptr(qp, qp->s_last);
1111
1035 switch (opcode) { 1112 switch (opcode) {
1036 case OP(ACKNOWLEDGE): 1113 case OP(ACKNOWLEDGE):
1037 case OP(ATOMIC_ACKNOWLEDGE): 1114 case OP(ATOMIC_ACKNOWLEDGE):
@@ -1042,38 +1119,49 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1042 aeth = be32_to_cpu(((__be32 *) data)[0]); 1119 aeth = be32_to_cpu(((__be32 *) data)[0]);
1043 data += sizeof(__be32); 1120 data += sizeof(__be32);
1044 } 1121 }
1045 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) 1122 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1046 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data; 1123 u64 val;
1124
1125 if (!header_in_data) {
1126 __be32 *p = ohdr->u.at.atomic_ack_eth;
1127
1128 val = ((u64) be32_to_cpu(p[0]) << 32) |
1129 be32_to_cpu(p[1]);
1130 } else
1131 val = be64_to_cpu(((__be64 *) data)[0]);
1132 *(u64 *) wqe->sg_list[0].vaddr = val;
1133 }
1047 if (!do_rc_ack(qp, aeth, psn, opcode) || 1134 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1048 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 1135 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1049 goto ack_done; 1136 goto ack_done;
1050 hdrsize += 4; 1137 hdrsize += 4;
1138 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1139 goto ack_op_err;
1051 /* 1140 /*
1052 * do_rc_ack() has already checked the PSN so skip 1141 * If this is a response to a resent RDMA read, we
1053 * the sequence check. 1142 * have to be careful to copy the data to the right
1143 * location.
1054 */ 1144 */
1055 goto rdma_read; 1145 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1146 wqe, psn, pmtu);
1147 goto read_middle;
1056 1148
1057 case OP(RDMA_READ_RESPONSE_MIDDLE): 1149 case OP(RDMA_READ_RESPONSE_MIDDLE):
1058 /* no AETH, no ACK */ 1150 /* no AETH, no ACK */
1059 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1151 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1060 dev->n_rdma_seq++; 1152 dev->n_rdma_seq++;
1061 if (qp->s_last != qp->s_tail) 1153 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1062 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1063 goto ack_done; 1154 goto ack_done;
1064 } 1155 }
1065 rdma_read: 1156 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1066 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) 1157 goto ack_op_err;
1067 goto ack_done; 1158 read_middle:
1068 if (unlikely(tlen != (hdrsize + pmtu + 4))) 1159 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1069 goto ack_done; 1160 goto ack_len_err;
1070 if (unlikely(pmtu >= qp->s_len)) 1161 if (unlikely(pmtu >= qp->s_rdma_read_len))
1071 goto ack_done; 1162 goto ack_len_err;
1163
1072 /* We got a response so update the timeout. */ 1164 /* We got a response so update the timeout. */
1073 if (unlikely(qp->s_last == qp->s_tail ||
1074 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1075 IB_WR_RDMA_READ))
1076 goto ack_done;
1077 spin_lock(&dev->pending_lock); 1165 spin_lock(&dev->pending_lock);
1078 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) 1166 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1079 list_move_tail(&qp->timerwait, 1167 list_move_tail(&qp->timerwait,
@@ -1082,67 +1170,97 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1082 /* 1170 /*
1083 * Update the RDMA receive state but do the copy w/o 1171 * Update the RDMA receive state but do the copy w/o
1084 * holding the locks and blocking interrupts. 1172 * holding the locks and blocking interrupts.
1085 * XXX Yet another place that affects relaxed RDMA order
1086 * since we don't want s_sge modified.
1087 */ 1173 */
1088 qp->s_len -= pmtu; 1174 qp->s_rdma_read_len -= pmtu;
1089 update_last_psn(qp, psn); 1175 update_last_psn(qp, psn);
1090 spin_unlock_irqrestore(&qp->s_lock, flags); 1176 spin_unlock_irqrestore(&qp->s_lock, flags);
1091 ipath_copy_sge(&qp->s_sge, data, pmtu); 1177 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1092 goto bail; 1178 goto bail;
1093 1179
1094 case OP(RDMA_READ_RESPONSE_LAST): 1180 case OP(RDMA_READ_RESPONSE_ONLY):
1095 /* ACKs READ req. */
1096 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1181 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1097 dev->n_rdma_seq++; 1182 dev->n_rdma_seq++;
1098 if (qp->s_last != qp->s_tail) 1183 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1099 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1100 goto ack_done; 1184 goto ack_done;
1101 } 1185 }
1102 /* FALLTHROUGH */ 1186 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1103 case OP(RDMA_READ_RESPONSE_ONLY): 1187 goto ack_op_err;
1104 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) 1188 /* Get the number of bytes the message was padded by. */
1105 goto ack_done; 1189 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1190 /*
1191 * Check that the data size is >= 0 && <= pmtu.
1192 * Remember to account for the AETH header (4) and
1193 * ICRC (4).
1194 */
1195 if (unlikely(tlen < (hdrsize + pad + 8)))
1196 goto ack_len_err;
1106 /* 1197 /*
1107 * Get the number of bytes the message was padded by. 1198 * If this is a response to a resent RDMA read, we
1199 * have to be careful to copy the data to the right
1200 * location.
1108 */ 1201 */
1202 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1203 wqe, psn, pmtu);
1204 goto read_last;
1205
1206 case OP(RDMA_READ_RESPONSE_LAST):
1207 /* ACKs READ req. */
1208 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1209 dev->n_rdma_seq++;
1210 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1211 goto ack_done;
1212 }
1213 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1214 goto ack_op_err;
1215 /* Get the number of bytes the message was padded by. */
1109 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1216 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1110 /* 1217 /*
1111 * Check that the data size is >= 1 && <= pmtu. 1218 * Check that the data size is >= 1 && <= pmtu.
1112 * Remember to account for the AETH header (4) and 1219 * Remember to account for the AETH header (4) and
1113 * ICRC (4). 1220 * ICRC (4).
1114 */ 1221 */
1115 if (unlikely(tlen <= (hdrsize + pad + 8))) { 1222 if (unlikely(tlen <= (hdrsize + pad + 8)))
1116 /* XXX Need to generate an error CQ entry. */ 1223 goto ack_len_err;
1117 goto ack_done; 1224 read_last:
1118 }
1119 tlen -= hdrsize + pad + 8; 1225 tlen -= hdrsize + pad + 8;
1120 if (unlikely(tlen != qp->s_len)) { 1226 if (unlikely(tlen != qp->s_rdma_read_len))
1121 /* XXX Need to generate an error CQ entry. */ 1227 goto ack_len_err;
1122 goto ack_done;
1123 }
1124 if (!header_in_data) 1228 if (!header_in_data)
1125 aeth = be32_to_cpu(ohdr->u.aeth); 1229 aeth = be32_to_cpu(ohdr->u.aeth);
1126 else { 1230 else {
1127 aeth = be32_to_cpu(((__be32 *) data)[0]); 1231 aeth = be32_to_cpu(((__be32 *) data)[0]);
1128 data += sizeof(__be32); 1232 data += sizeof(__be32);
1129 } 1233 }
1130 ipath_copy_sge(&qp->s_sge, data, tlen); 1234 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1131 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) { 1235 (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST));
1132 /*
1133 * Change the state so we contimue
1134 * processing new requests and wake up the
1135 * tasklet if there are posted sends.
1136 */
1137 qp->s_state = OP(SEND_LAST);
1138 if (qp->s_tail != qp->s_head)
1139 tasklet_hi_schedule(&qp->s_task);
1140 }
1141 goto ack_done; 1236 goto ack_done;
1142 } 1237 }
1143 1238
1144ack_done: 1239ack_done:
1145 spin_unlock_irqrestore(&qp->s_lock, flags); 1240 spin_unlock_irqrestore(&qp->s_lock, flags);
1241 goto bail;
1242
1243ack_op_err:
1244 wc.status = IB_WC_LOC_QP_OP_ERR;
1245 goto ack_err;
1246
1247ack_len_err:
1248 wc.status = IB_WC_LOC_LEN_ERR;
1249ack_err:
1250 wc.wr_id = wqe->wr.wr_id;
1251 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1252 wc.vendor_err = 0;
1253 wc.byte_len = 0;
1254 wc.imm_data = 0;
1255 wc.qp = &qp->ibqp;
1256 wc.src_qp = qp->remote_qpn;
1257 wc.wc_flags = 0;
1258 wc.pkey_index = 0;
1259 wc.slid = qp->remote_ah_attr.dlid;
1260 wc.sl = qp->remote_ah_attr.sl;
1261 wc.dlid_path_bits = 0;
1262 wc.port_num = 0;
1263 ipath_sqerror_qp(qp, &wc);
1146bail: 1264bail:
1147 return; 1265 return;
1148} 1266}
@@ -1162,7 +1280,7 @@ bail:
1162 * incoming RC packet for the given QP. 1280 * incoming RC packet for the given QP.
1163 * Called at interrupt level. 1281 * Called at interrupt level.
1164 * Return 1 if no more processing is needed; otherwise return 0 to 1282 * Return 1 if no more processing is needed; otherwise return 0 to
1165 * schedule a response to be sent and the s_lock unlocked. 1283 * schedule a response to be sent.
1166 */ 1284 */
1167static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, 1285static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1168 struct ipath_other_headers *ohdr, 1286 struct ipath_other_headers *ohdr,
@@ -1173,25 +1291,23 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1173 int diff, 1291 int diff,
1174 int header_in_data) 1292 int header_in_data)
1175{ 1293{
1176 struct ib_reth *reth; 1294 struct ipath_ack_entry *e;
1295 u8 i, prev;
1296 int old_req;
1177 1297
1178 if (diff > 0) { 1298 if (diff > 0) {
1179 /* 1299 /*
1180 * Packet sequence error. 1300 * Packet sequence error.
1181 * A NAK will ACK earlier sends and RDMA writes. 1301 * A NAK will ACK earlier sends and RDMA writes.
1182 * Don't queue the NAK if a RDMA read, atomic, or 1302 * Don't queue the NAK if we already sent one.
1183 * NAK is pending though.
1184 */ 1303 */
1185 if (qp->s_ack_state != OP(ACKNOWLEDGE) || 1304 if (!qp->r_nak_state) {
1186 qp->r_nak_state != 0)
1187 goto done;
1188 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1189 qp->r_ack_state = OP(SEND_ONLY);
1190 qp->r_nak_state = IB_NAK_PSN_ERROR; 1305 qp->r_nak_state = IB_NAK_PSN_ERROR;
1191 /* Use the expected PSN. */ 1306 /* Use the expected PSN. */
1192 qp->r_ack_psn = qp->r_psn; 1307 qp->r_ack_psn = qp->r_psn;
1308 goto send_ack;
1193 } 1309 }
1194 goto send_ack; 1310 goto done;
1195 } 1311 }
1196 1312
1197 /* 1313 /*
@@ -1204,8 +1320,46 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1204 * can coalesce an outstanding duplicate ACK. We have to 1320 * can coalesce an outstanding duplicate ACK. We have to
1205 * send the earliest so that RDMA reads can be restarted at 1321 * send the earliest so that RDMA reads can be restarted at
1206 * the requester's expected PSN. 1322 * the requester's expected PSN.
1323 *
1324 * First, find where this duplicate PSN falls within the
1325 * ACKs previously sent.
1207 */ 1326 */
1208 if (opcode == OP(RDMA_READ_REQUEST)) { 1327 psn &= IPATH_PSN_MASK;
1328 e = NULL;
1329 old_req = 1;
1330 spin_lock_irq(&qp->s_lock);
1331 for (i = qp->r_head_ack_queue; ; i = prev) {
1332 if (i == qp->s_tail_ack_queue)
1333 old_req = 0;
1334 if (i)
1335 prev = i - 1;
1336 else
1337 prev = IPATH_MAX_RDMA_ATOMIC;
1338 if (prev == qp->r_head_ack_queue) {
1339 e = NULL;
1340 break;
1341 }
1342 e = &qp->s_ack_queue[prev];
1343 if (!e->opcode) {
1344 e = NULL;
1345 break;
1346 }
1347 if (ipath_cmp24(psn, e->psn) >= 0)
1348 break;
1349 }
1350 switch (opcode) {
1351 case OP(RDMA_READ_REQUEST): {
1352 struct ib_reth *reth;
1353 u32 offset;
1354 u32 len;
1355
1356 /*
1357 * If we didn't find the RDMA read request in the ack queue,
1358 * or the send tasklet is already backed up to send an
1359 * earlier entry, we can ignore this request.
1360 */
1361 if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
1362 goto unlock_done;
1209 /* RETH comes after BTH */ 1363 /* RETH comes after BTH */
1210 if (!header_in_data) 1364 if (!header_in_data)
1211 reth = &ohdr->u.rc.reth; 1365 reth = &ohdr->u.rc.reth;
@@ -1214,88 +1368,87 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1214 data += sizeof(*reth); 1368 data += sizeof(*reth);
1215 } 1369 }
1216 /* 1370 /*
1217 * If we receive a duplicate RDMA request, it means the 1371 * Address range must be a subset of the original
1218 * requester saw a sequence error and needs to restart 1372 * request and start on pmtu boundaries.
1219 * from an earlier point. We can abort the current 1373 * We reuse the old ack_queue slot since the requester
1220 * RDMA read send in that case. 1374 * should not back up and request an earlier PSN for the
1375 * same request.
1221 */ 1376 */
1222 spin_lock_irq(&qp->s_lock); 1377 offset = ((psn - e->psn) & IPATH_PSN_MASK) *
1223 if (qp->s_ack_state != OP(ACKNOWLEDGE) && 1378 ib_mtu_enum_to_int(qp->path_mtu);
1224 (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) { 1379 len = be32_to_cpu(reth->length);
1225 /* 1380 if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
1226 * We are already sending earlier requested data. 1381 goto unlock_done;
1227 * Don't abort it to send later out of sequence data. 1382 if (len != 0) {
1228 */
1229 spin_unlock_irq(&qp->s_lock);
1230 goto done;
1231 }
1232 qp->s_rdma_len = be32_to_cpu(reth->length);
1233 if (qp->s_rdma_len != 0) {
1234 u32 rkey = be32_to_cpu(reth->rkey); 1383 u32 rkey = be32_to_cpu(reth->rkey);
1235 u64 vaddr = be64_to_cpu(reth->vaddr); 1384 u64 vaddr = be64_to_cpu(reth->vaddr);
1236 int ok; 1385 int ok;
1237 1386
1238 /* 1387 ok = ipath_rkey_ok(qp, &e->rdma_sge,
1239 * Address range must be a subset of the original 1388 len, vaddr, rkey,
1240 * request and start on pmtu boundaries.
1241 */
1242 ok = ipath_rkey_ok(qp, &qp->s_rdma_sge,
1243 qp->s_rdma_len, vaddr, rkey,
1244 IB_ACCESS_REMOTE_READ); 1389 IB_ACCESS_REMOTE_READ);
1245 if (unlikely(!ok)) { 1390 if (unlikely(!ok))
1246 spin_unlock_irq(&qp->s_lock); 1391 goto unlock_done;
1247 goto done;
1248 }
1249 } else { 1392 } else {
1250 qp->s_rdma_sge.sg_list = NULL; 1393 e->rdma_sge.sg_list = NULL;
1251 qp->s_rdma_sge.num_sge = 0; 1394 e->rdma_sge.num_sge = 0;
1252 qp->s_rdma_sge.sge.mr = NULL; 1395 e->rdma_sge.sge.mr = NULL;
1253 qp->s_rdma_sge.sge.vaddr = NULL; 1396 e->rdma_sge.sge.vaddr = NULL;
1254 qp->s_rdma_sge.sge.length = 0; 1397 e->rdma_sge.sge.length = 0;
1255 qp->s_rdma_sge.sge.sge_length = 0; 1398 e->rdma_sge.sge.sge_length = 0;
1256 } 1399 }
1257 qp->s_ack_state = opcode; 1400 e->psn = psn;
1258 qp->s_ack_psn = psn; 1401 qp->s_ack_state = OP(ACKNOWLEDGE);
1259 spin_unlock_irq(&qp->s_lock); 1402 qp->s_tail_ack_queue = prev;
1260 tasklet_hi_schedule(&qp->s_task); 1403 break;
1261 goto send_ack;
1262 } 1404 }
1263 1405
1264 /*
1265 * A pending RDMA read will ACK anything before it so
1266 * ignore earlier duplicate requests.
1267 */
1268 if (qp->s_ack_state != OP(ACKNOWLEDGE))
1269 goto done;
1270
1271 /*
1272 * If an ACK is pending, don't replace the pending ACK
1273 * with an earlier one since the later one will ACK the earlier.
1274 * Also, if we already have a pending atomic, send it.
1275 */
1276 if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
1277 (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
1278 qp->r_ack_state >= OP(COMPARE_SWAP)))
1279 goto send_ack;
1280 switch (opcode) {
1281 case OP(COMPARE_SWAP): 1406 case OP(COMPARE_SWAP):
1282 case OP(FETCH_ADD): 1407 case OP(FETCH_ADD): {
1283 /* 1408 /*
1284 * Check for the PSN of the last atomic operation 1409 * If we didn't find the atomic request in the ack queue
1285 * performed and resend the result if found. 1410 * or the send tasklet is already backed up to send an
1411 * earlier entry, we can ignore this request.
1286 */ 1412 */
1287 if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn) 1413 if (!e || e->opcode != (u8) opcode || old_req)
1288 goto done; 1414 goto unlock_done;
1415 qp->s_ack_state = OP(ACKNOWLEDGE);
1416 qp->s_tail_ack_queue = prev;
1417 break;
1418 }
1419
1420 default:
1421 if (old_req)
1422 goto unlock_done;
1423 /*
1424 * Resend the most recent ACK if this request is
1425 * after all the previous RDMA reads and atomics.
1426 */
1427 if (i == qp->r_head_ack_queue) {
1428 spin_unlock_irq(&qp->s_lock);
1429 qp->r_nak_state = 0;
1430 qp->r_ack_psn = qp->r_psn - 1;
1431 goto send_ack;
1432 }
1433 /*
1434 * Resend the RDMA read or atomic op which
1435 * ACKs this duplicate request.
1436 */
1437 qp->s_ack_state = OP(ACKNOWLEDGE);
1438 qp->s_tail_ack_queue = i;
1289 break; 1439 break;
1290 } 1440 }
1291 qp->r_ack_state = opcode;
1292 qp->r_nak_state = 0; 1441 qp->r_nak_state = 0;
1293 qp->r_ack_psn = psn; 1442 spin_unlock_irq(&qp->s_lock);
1294send_ack: 1443 tasklet_hi_schedule(&qp->s_task);
1295 return 0;
1296 1444
1445unlock_done:
1446 spin_unlock_irq(&qp->s_lock);
1297done: 1447done:
1298 return 1; 1448 return 1;
1449
1450send_ack:
1451 return 0;
1299} 1452}
1300 1453
1301static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1454static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
@@ -1391,15 +1544,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1391 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 1544 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1392 break; 1545 break;
1393 nack_inv: 1546 nack_inv:
1394 /*
1395 * A NAK will ACK earlier sends and RDMA writes.
1396 * Don't queue the NAK if a RDMA read, atomic, or NAK
1397 * is pending though.
1398 */
1399 if (qp->r_ack_state >= OP(COMPARE_SWAP))
1400 goto send_ack;
1401 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR); 1547 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1402 qp->r_ack_state = OP(SEND_ONLY);
1403 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 1548 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1404 qp->r_ack_psn = qp->r_psn; 1549 qp->r_ack_psn = qp->r_psn;
1405 goto send_ack; 1550 goto send_ack;
@@ -1441,9 +1586,8 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1441 * Don't queue the NAK if a RDMA read or atomic 1586 * Don't queue the NAK if a RDMA read or atomic
1442 * is pending though. 1587 * is pending though.
1443 */ 1588 */
1444 if (qp->r_ack_state >= OP(COMPARE_SWAP)) 1589 if (qp->r_nak_state)
1445 goto send_ack; 1590 goto done;
1446 qp->r_ack_state = OP(SEND_ONLY);
1447 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 1591 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1448 qp->r_ack_psn = qp->r_psn; 1592 qp->r_ack_psn = qp->r_psn;
1449 goto send_ack; 1593 goto send_ack;
@@ -1567,7 +1711,19 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1567 goto rnr_nak; 1711 goto rnr_nak;
1568 goto send_last_imm; 1712 goto send_last_imm;
1569 1713
1570 case OP(RDMA_READ_REQUEST): 1714 case OP(RDMA_READ_REQUEST): {
1715 struct ipath_ack_entry *e;
1716 u32 len;
1717 u8 next;
1718
1719 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1720 goto nack_acc;
1721 next = qp->r_head_ack_queue + 1;
1722 if (next > IPATH_MAX_RDMA_ATOMIC)
1723 next = 0;
1724 if (unlikely(next == qp->s_tail_ack_queue))
1725 goto nack_inv;
1726 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1571 /* RETH comes after BTH */ 1727 /* RETH comes after BTH */
1572 if (!header_in_data) 1728 if (!header_in_data)
1573 reth = &ohdr->u.rc.reth; 1729 reth = &ohdr->u.rc.reth;
@@ -1575,72 +1731,75 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1575 reth = (struct ib_reth *)data; 1731 reth = (struct ib_reth *)data;
1576 data += sizeof(*reth); 1732 data += sizeof(*reth);
1577 } 1733 }
1578 if (unlikely(!(qp->qp_access_flags & 1734 len = be32_to_cpu(reth->length);
1579 IB_ACCESS_REMOTE_READ))) 1735 if (len) {
1580 goto nack_acc;
1581 spin_lock_irq(&qp->s_lock);
1582 qp->s_rdma_len = be32_to_cpu(reth->length);
1583 if (qp->s_rdma_len != 0) {
1584 u32 rkey = be32_to_cpu(reth->rkey); 1736 u32 rkey = be32_to_cpu(reth->rkey);
1585 u64 vaddr = be64_to_cpu(reth->vaddr); 1737 u64 vaddr = be64_to_cpu(reth->vaddr);
1586 int ok; 1738 int ok;
1587 1739
1588 /* Check rkey & NAK */ 1740 /* Check rkey & NAK */
1589 ok = ipath_rkey_ok(qp, &qp->s_rdma_sge, 1741 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1590 qp->s_rdma_len, vaddr, rkey, 1742 rkey, IB_ACCESS_REMOTE_READ);
1591 IB_ACCESS_REMOTE_READ); 1743 if (unlikely(!ok))
1592 if (unlikely(!ok)) {
1593 spin_unlock_irq(&qp->s_lock);
1594 goto nack_acc; 1744 goto nack_acc;
1595 }
1596 /* 1745 /*
1597 * Update the next expected PSN. We add 1 later 1746 * Update the next expected PSN. We add 1 later
1598 * below, so only add the remainder here. 1747 * below, so only add the remainder here.
1599 */ 1748 */
1600 if (qp->s_rdma_len > pmtu) 1749 if (len > pmtu)
1601 qp->r_psn += (qp->s_rdma_len - 1) / pmtu; 1750 qp->r_psn += (len - 1) / pmtu;
1602 } else { 1751 } else {
1603 qp->s_rdma_sge.sg_list = NULL; 1752 e->rdma_sge.sg_list = NULL;
1604 qp->s_rdma_sge.num_sge = 0; 1753 e->rdma_sge.num_sge = 0;
1605 qp->s_rdma_sge.sge.mr = NULL; 1754 e->rdma_sge.sge.mr = NULL;
1606 qp->s_rdma_sge.sge.vaddr = NULL; 1755 e->rdma_sge.sge.vaddr = NULL;
1607 qp->s_rdma_sge.sge.length = 0; 1756 e->rdma_sge.sge.length = 0;
1608 qp->s_rdma_sge.sge.sge_length = 0; 1757 e->rdma_sge.sge.sge_length = 0;
1609 } 1758 }
1759 e->opcode = opcode;
1760 e->psn = psn;
1610 /* 1761 /*
1611 * We need to increment the MSN here instead of when we 1762 * We need to increment the MSN here instead of when we
1612 * finish sending the result since a duplicate request would 1763 * finish sending the result since a duplicate request would
1613 * increment it more than once. 1764 * increment it more than once.
1614 */ 1765 */
1615 qp->r_msn++; 1766 qp->r_msn++;
1616
1617 qp->s_ack_state = opcode;
1618 qp->s_ack_psn = psn;
1619 spin_unlock_irq(&qp->s_lock);
1620
1621 qp->r_psn++; 1767 qp->r_psn++;
1622 qp->r_state = opcode; 1768 qp->r_state = opcode;
1623 qp->r_nak_state = 0; 1769 qp->r_nak_state = 0;
1770 barrier();
1771 qp->r_head_ack_queue = next;
1624 1772
1625 /* Call ipath_do_rc_send() in another thread. */ 1773 /* Call ipath_do_rc_send() in another thread. */
1626 tasklet_hi_schedule(&qp->s_task); 1774 tasklet_hi_schedule(&qp->s_task);
1627 1775
1628 goto done; 1776 goto done;
1777 }
1629 1778
1630 case OP(COMPARE_SWAP): 1779 case OP(COMPARE_SWAP):
1631 case OP(FETCH_ADD): { 1780 case OP(FETCH_ADD): {
1632 struct ib_atomic_eth *ateth; 1781 struct ib_atomic_eth *ateth;
1782 struct ipath_ack_entry *e;
1633 u64 vaddr; 1783 u64 vaddr;
1784 atomic64_t *maddr;
1634 u64 sdata; 1785 u64 sdata;
1635 u32 rkey; 1786 u32 rkey;
1787 u8 next;
1636 1788
1789 if (unlikely(!(qp->qp_access_flags &
1790 IB_ACCESS_REMOTE_ATOMIC)))
1791 goto nack_acc;
1792 next = qp->r_head_ack_queue + 1;
1793 if (next > IPATH_MAX_RDMA_ATOMIC)
1794 next = 0;
1795 if (unlikely(next == qp->s_tail_ack_queue))
1796 goto nack_inv;
1637 if (!header_in_data) 1797 if (!header_in_data)
1638 ateth = &ohdr->u.atomic_eth; 1798 ateth = &ohdr->u.atomic_eth;
1639 else { 1799 else
1640 ateth = (struct ib_atomic_eth *)data; 1800 ateth = (struct ib_atomic_eth *)data;
1641 data += sizeof(*ateth); 1801 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
1642 } 1802 be32_to_cpu(ateth->vaddr[1]);
1643 vaddr = be64_to_cpu(ateth->vaddr);
1644 if (unlikely(vaddr & (sizeof(u64) - 1))) 1803 if (unlikely(vaddr & (sizeof(u64) - 1)))
1645 goto nack_inv; 1804 goto nack_inv;
1646 rkey = be32_to_cpu(ateth->rkey); 1805 rkey = be32_to_cpu(ateth->rkey);
@@ -1649,63 +1808,50 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1649 sizeof(u64), vaddr, rkey, 1808 sizeof(u64), vaddr, rkey,
1650 IB_ACCESS_REMOTE_ATOMIC))) 1809 IB_ACCESS_REMOTE_ATOMIC)))
1651 goto nack_acc; 1810 goto nack_acc;
1652 if (unlikely(!(qp->qp_access_flags &
1653 IB_ACCESS_REMOTE_ATOMIC)))
1654 goto nack_acc;
1655 /* Perform atomic OP and save result. */ 1811 /* Perform atomic OP and save result. */
1812 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
1656 sdata = be64_to_cpu(ateth->swap_data); 1813 sdata = be64_to_cpu(ateth->swap_data);
1657 spin_lock_irq(&dev->pending_lock); 1814 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1658 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; 1815 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
1659 if (opcode == OP(FETCH_ADD)) 1816 (u64) atomic64_add_return(sdata, maddr) - sdata :
1660 *(u64 *) qp->r_sge.sge.vaddr = 1817 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
1661 qp->r_atomic_data + sdata; 1818 be64_to_cpu(ateth->compare_data),
1662 else if (qp->r_atomic_data == 1819 sdata);
1663 be64_to_cpu(ateth->compare_data)) 1820 e->opcode = opcode;
1664 *(u64 *) qp->r_sge.sge.vaddr = sdata; 1821 e->psn = psn & IPATH_PSN_MASK;
1665 spin_unlock_irq(&dev->pending_lock);
1666 qp->r_msn++; 1822 qp->r_msn++;
1667 qp->r_atomic_psn = psn & IPATH_PSN_MASK; 1823 qp->r_psn++;
1668 psn |= 1 << 31; 1824 qp->r_state = opcode;
1669 break; 1825 qp->r_nak_state = 0;
1826 barrier();
1827 qp->r_head_ack_queue = next;
1828
1829 /* Call ipath_do_rc_send() in another thread. */
1830 tasklet_hi_schedule(&qp->s_task);
1831
1832 goto done;
1670 } 1833 }
1671 1834
1672 default: 1835 default:
1673 /* Drop packet for unknown opcodes. */ 1836 /* NAK unknown opcodes. */
1674 goto done; 1837 goto nack_inv;
1675 } 1838 }
1676 qp->r_psn++; 1839 qp->r_psn++;
1677 qp->r_state = opcode; 1840 qp->r_state = opcode;
1841 qp->r_ack_psn = psn;
1678 qp->r_nak_state = 0; 1842 qp->r_nak_state = 0;
1679 /* Send an ACK if requested or required. */ 1843 /* Send an ACK if requested or required. */
1680 if (psn & (1 << 31)) { 1844 if (psn & (1 << 31))
1681 /*
1682 * Coalesce ACKs unless there is a RDMA READ or
1683 * ATOMIC pending.
1684 */
1685 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1686 qp->r_ack_state = opcode;
1687 qp->r_ack_psn = psn;
1688 }
1689 goto send_ack; 1845 goto send_ack;
1690 }
1691 goto done; 1846 goto done;
1692 1847
1693nack_acc: 1848nack_acc:
1694 /* 1849 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1695 * A NAK will ACK earlier sends and RDMA writes. 1850 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1696 * Don't queue the NAK if a RDMA read, atomic, or NAK 1851 qp->r_ack_psn = qp->r_psn;
1697 * is pending though. 1852
1698 */
1699 if (qp->r_ack_state < OP(COMPARE_SWAP)) {
1700 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1701 qp->r_ack_state = OP(RDMA_WRITE_ONLY);
1702 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1703 qp->r_ack_psn = qp->r_psn;
1704 }
1705send_ack: 1853send_ack:
1706 /* Send ACK right away unless the send tasklet has a pending ACK. */ 1854 send_rc_ack(qp);
1707 if (qp->s_ack_state == OP(ACKNOWLEDGE))
1708 send_rc_ack(qp);
1709 1855
1710done: 1856done:
1711 return; 1857 return;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index dffc76016d3c..c182bcd62098 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -126,9 +126,18 @@
126#define INFINIPATH_E_RESET 0x0004000000000000ULL 126#define INFINIPATH_E_RESET 0x0004000000000000ULL
127#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL 127#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
128 128
129/*
130 * this is used to print "common" packet errors only when the
131 * __IPATH_ERRPKTDBG bit is set in ipath_debug.
132 */
133#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
134 | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
135 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
136 | INFINIPATH_E_REBP )
137
129/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 138/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
130/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo 139/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
131 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: eagerTID, 3: expTID 140 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
132 * bit 4: flag buffer, 5: datainfo, 6: header info */ 141 * bit 4: flag buffer, 5: datainfo, 6: header info */
133#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL 142#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
134#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40 143#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
@@ -143,8 +152,8 @@
143/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */ 152/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
144#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL 153#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL
145#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL 154#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL
146#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x04ULL 155#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x04ULL
147#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x08ULL 156#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
148#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL 157#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL
149#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL 158#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
150#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL 159#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
@@ -299,13 +308,6 @@
299#define INFINIPATH_XGXS_RX_POL_SHIFT 19 308#define INFINIPATH_XGXS_RX_POL_SHIFT 19
300#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL 309#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
301 310
302#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
303
304/* TID entries (memory), HT-only */
305#define INFINIPATH_RT_VALID 0x8000000000000000ULL
306#define INFINIPATH_RT_ADDR_SHIFT 0
307#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
308#define INFINIPATH_RT_BUFSIZE_SHIFT 48
309 311
310/* 312/*
311 * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our 313 * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index e86cb171872e..d9c2a9b15d86 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -202,6 +202,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
202 wq->tail = tail; 202 wq->tail = tail;
203 203
204 ret = 1; 204 ret = 1;
205 qp->r_wrid_valid = 1;
205 if (handler) { 206 if (handler) {
206 u32 n; 207 u32 n;
207 208
@@ -229,7 +230,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
229 } 230 }
230 } 231 }
231 spin_unlock_irqrestore(&rq->lock, flags); 232 spin_unlock_irqrestore(&rq->lock, flags);
232 qp->r_wrid_valid = 1;
233 233
234bail: 234bail:
235 return ret; 235 return ret;
@@ -255,6 +255,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
255 unsigned long flags; 255 unsigned long flags;
256 struct ib_wc wc; 256 struct ib_wc wc;
257 u64 sdata; 257 u64 sdata;
258 atomic64_t *maddr;
258 259
259 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 260 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
260 if (!qp) { 261 if (!qp) {
@@ -265,7 +266,8 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
265again: 266again:
266 spin_lock_irqsave(&sqp->s_lock, flags); 267 spin_lock_irqsave(&sqp->s_lock, flags);
267 268
268 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) { 269 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
270 qp->s_rnr_timeout) {
269 spin_unlock_irqrestore(&sqp->s_lock, flags); 271 spin_unlock_irqrestore(&sqp->s_lock, flags);
270 goto done; 272 goto done;
271 } 273 }
@@ -310,7 +312,7 @@ again:
310 sqp->s_rnr_retry--; 312 sqp->s_rnr_retry--;
311 dev->n_rnr_naks++; 313 dev->n_rnr_naks++;
312 sqp->s_rnr_timeout = 314 sqp->s_rnr_timeout =
313 ib_ipath_rnr_table[sqp->r_min_rnr_timer]; 315 ib_ipath_rnr_table[qp->r_min_rnr_timer];
314 ipath_insert_rnr_queue(sqp); 316 ipath_insert_rnr_queue(sqp);
315 goto done; 317 goto done;
316 } 318 }
@@ -343,20 +345,22 @@ again:
343 wc.sl = sqp->remote_ah_attr.sl; 345 wc.sl = sqp->remote_ah_attr.sl;
344 wc.dlid_path_bits = 0; 346 wc.dlid_path_bits = 0;
345 wc.port_num = 0; 347 wc.port_num = 0;
348 spin_lock_irqsave(&sqp->s_lock, flags);
346 ipath_sqerror_qp(sqp, &wc); 349 ipath_sqerror_qp(sqp, &wc);
350 spin_unlock_irqrestore(&sqp->s_lock, flags);
347 goto done; 351 goto done;
348 } 352 }
349 break; 353 break;
350 354
351 case IB_WR_RDMA_READ: 355 case IB_WR_RDMA_READ:
356 if (unlikely(!(qp->qp_access_flags &
357 IB_ACCESS_REMOTE_READ)))
358 goto acc_err;
352 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 359 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
353 wqe->wr.wr.rdma.remote_addr, 360 wqe->wr.wr.rdma.remote_addr,
354 wqe->wr.wr.rdma.rkey, 361 wqe->wr.wr.rdma.rkey,
355 IB_ACCESS_REMOTE_READ))) 362 IB_ACCESS_REMOTE_READ)))
356 goto acc_err; 363 goto acc_err;
357 if (unlikely(!(qp->qp_access_flags &
358 IB_ACCESS_REMOTE_READ)))
359 goto acc_err;
360 qp->r_sge.sge = wqe->sg_list[0]; 364 qp->r_sge.sge = wqe->sg_list[0];
361 qp->r_sge.sg_list = wqe->sg_list + 1; 365 qp->r_sge.sg_list = wqe->sg_list + 1;
362 qp->r_sge.num_sge = wqe->wr.num_sge; 366 qp->r_sge.num_sge = wqe->wr.num_sge;
@@ -364,22 +368,22 @@ again:
364 368
365 case IB_WR_ATOMIC_CMP_AND_SWP: 369 case IB_WR_ATOMIC_CMP_AND_SWP:
366 case IB_WR_ATOMIC_FETCH_AND_ADD: 370 case IB_WR_ATOMIC_FETCH_AND_ADD:
371 if (unlikely(!(qp->qp_access_flags &
372 IB_ACCESS_REMOTE_ATOMIC)))
373 goto acc_err;
367 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 374 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
368 wqe->wr.wr.rdma.remote_addr, 375 wqe->wr.wr.atomic.remote_addr,
369 wqe->wr.wr.rdma.rkey, 376 wqe->wr.wr.atomic.rkey,
370 IB_ACCESS_REMOTE_ATOMIC))) 377 IB_ACCESS_REMOTE_ATOMIC)))
371 goto acc_err; 378 goto acc_err;
372 /* Perform atomic OP and save result. */ 379 /* Perform atomic OP and save result. */
373 sdata = wqe->wr.wr.atomic.swap; 380 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
374 spin_lock_irqsave(&dev->pending_lock, flags); 381 sdata = wqe->wr.wr.atomic.compare_add;
375 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; 382 *(u64 *) sqp->s_sge.sge.vaddr =
376 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 383 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
377 *(u64 *) qp->r_sge.sge.vaddr = 384 (u64) atomic64_add_return(sdata, maddr) - sdata :
378 qp->r_atomic_data + sdata; 385 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
379 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add) 386 sdata, wqe->wr.wr.atomic.swap);
380 *(u64 *) qp->r_sge.sge.vaddr = sdata;
381 spin_unlock_irqrestore(&dev->pending_lock, flags);
382 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
383 goto send_comp; 387 goto send_comp;
384 388
385 default: 389 default:
@@ -440,7 +444,7 @@ again:
440send_comp: 444send_comp:
441 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 445 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
442 446
443 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || 447 if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
444 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 448 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
445 wc.wr_id = wqe->wr.wr_id; 449 wc.wr_id = wqe->wr.wr_id;
446 wc.status = IB_WC_SUCCESS; 450 wc.status = IB_WC_SUCCESS;
@@ -502,7 +506,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
502 * We clear the tasklet flag now since we are committing to return 506 * We clear the tasklet flag now since we are committing to return
503 * from the tasklet function. 507 * from the tasklet function.
504 */ 508 */
505 clear_bit(IPATH_S_BUSY, &qp->s_flags); 509 clear_bit(IPATH_S_BUSY, &qp->s_busy);
506 tasklet_unlock(&qp->s_task); 510 tasklet_unlock(&qp->s_task);
507 want_buffer(dev->dd); 511 want_buffer(dev->dd);
508 dev->n_piowait++; 512 dev->n_piowait++;
@@ -541,6 +545,9 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
541 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 545 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
542 ret = -EINVAL; 546 ret = -EINVAL;
543 goto bail; 547 goto bail;
548 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
549 ret = -EINVAL;
550 goto bail;
544 } 551 }
545 /* IB spec says that num_sge == 0 is OK. */ 552 /* IB spec says that num_sge == 0 is OK. */
546 if (wr->num_sge > qp->s_max_sge) { 553 if (wr->num_sge > qp->s_max_sge) {
@@ -647,7 +654,7 @@ void ipath_do_ruc_send(unsigned long data)
647 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); 654 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
648 struct ipath_other_headers *ohdr; 655 struct ipath_other_headers *ohdr;
649 656
650 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) 657 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
651 goto bail; 658 goto bail;
652 659
653 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { 660 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
@@ -683,19 +690,15 @@ again:
683 */ 690 */
684 spin_lock_irqsave(&qp->s_lock, flags); 691 spin_lock_irqsave(&qp->s_lock, flags);
685 692
686 /* Sending responses has higher priority over sending requests. */ 693 if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
687 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE && 694 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
688 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0) 695 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
689 bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK;
690 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
691 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
692 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
693 /* 696 /*
694 * Clear the busy bit before unlocking to avoid races with 697 * Clear the busy bit before unlocking to avoid races with
695 * adding new work queue items and then failing to process 698 * adding new work queue items and then failing to process
696 * them. 699 * them.
697 */ 700 */
698 clear_bit(IPATH_S_BUSY, &qp->s_flags); 701 clear_bit(IPATH_S_BUSY, &qp->s_busy);
699 spin_unlock_irqrestore(&qp->s_lock, flags); 702 spin_unlock_irqrestore(&qp->s_lock, flags);
700 goto bail; 703 goto bail;
701 } 704 }
@@ -728,7 +731,7 @@ again:
728 goto again; 731 goto again;
729 732
730clear: 733clear:
731 clear_bit(IPATH_S_BUSY, &qp->s_flags); 734 clear_bit(IPATH_S_BUSY, &qp->s_busy);
732bail: 735bail:
733 return; 736 return;
734} 737}
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index 30a825928fcf..9307f7187ca5 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -207,7 +207,7 @@ void ipath_get_faststats(unsigned long opaque)
207 * don't access the chip while running diags, or memory diags can 207 * don't access the chip while running diags, or memory diags can
208 * fail 208 * fail
209 */ 209 */
210 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) || 210 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
211 ipath_diag_inuse) 211 ipath_diag_inuse)
212 /* but re-arm the timer, for diags case; won't hurt other */ 212 /* but re-arm the timer, for diags case; won't hurt other */
213 goto done; 213 goto done;
@@ -237,11 +237,13 @@ void ipath_get_faststats(unsigned long opaque)
237 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) 237 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
238 && time_after(jiffies, dd->ipath_unmasktime)) { 238 && time_after(jiffies, dd->ipath_unmasktime)) {
239 char ebuf[256]; 239 char ebuf[256];
240 ipath_decode_err(ebuf, sizeof ebuf, 240 int iserr;
241 iserr = ipath_decode_err(ebuf, sizeof ebuf,
241 (dd->ipath_maskederrs & ~dd-> 242 (dd->ipath_maskederrs & ~dd->
242 ipath_ignorederrs)); 243 ipath_ignorederrs));
243 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) & 244 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
244 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL)) 245 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
246 INFINIPATH_E_PKTERRS ))
245 ipath_dev_err(dd, "Re-enabling masked errors " 247 ipath_dev_err(dd, "Re-enabling masked errors "
246 "(%s)\n", ebuf); 248 "(%s)\n", ebuf);
247 else { 249 else {
@@ -252,8 +254,12 @@ void ipath_get_faststats(unsigned long opaque)
252 * them. So only complain about these at debug 254 * them. So only complain about these at debug
253 * level. 255 * level.
254 */ 256 */
255 ipath_dbg("Disabling frequent queue full errors " 257 if (iserr)
256 "(%s)\n", ebuf); 258 ipath_dbg("Re-enabling queue full errors (%s)\n",
259 ebuf);
260 else
261 ipath_cdbg(ERRPKT, "Re-enabling packet"
262 " problem interrupt (%s)\n", ebuf);
257 } 263 }
258 dd->ipath_maskederrs = dd->ipath_ignorederrs; 264 dd->ipath_maskederrs = dd->ipath_ignorederrs;
259 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 265 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 325d6634ff53..1c2b03c2ef5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -42,7 +42,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
42{ 42{
43 if (++qp->s_last == qp->s_size) 43 if (++qp->s_last == qp->s_size)
44 qp->s_last = 0; 44 qp->s_last = 0;
45 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || 45 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
46 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 46 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
47 wc->wr_id = wqe->wr.wr_id; 47 wc->wr_id = wqe->wr.wr_id;
48 wc->status = IB_WC_SUCCESS; 48 wc->status = IB_WC_SUCCESS;
@@ -344,13 +344,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
344 send_first: 344 send_first:
345 if (qp->r_reuse_sge) { 345 if (qp->r_reuse_sge) {
346 qp->r_reuse_sge = 0; 346 qp->r_reuse_sge = 0;
347 qp->r_sge = qp->s_rdma_sge; 347 qp->r_sge = qp->s_rdma_read_sge;
348 } else if (!ipath_get_rwqe(qp, 0)) { 348 } else if (!ipath_get_rwqe(qp, 0)) {
349 dev->n_pkt_drops++; 349 dev->n_pkt_drops++;
350 goto done; 350 goto done;
351 } 351 }
352 /* Save the WQE so we can reuse it in case of an error. */ 352 /* Save the WQE so we can reuse it in case of an error. */
353 qp->s_rdma_sge = qp->r_sge; 353 qp->s_rdma_read_sge = qp->r_sge;
354 qp->r_rcv_len = 0; 354 qp->r_rcv_len = 0;
355 if (opcode == OP(SEND_ONLY)) 355 if (opcode == OP(SEND_ONLY))
356 goto send_last; 356 goto send_last;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 9a3e54664ee4..a518f7c8fa83 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -308,6 +308,11 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
308 goto bail; 308 goto bail;
309 } 309 }
310 310
311 if (wr->wr.ud.ah->pd != qp->ibqp.pd) {
312 ret = -EPERM;
313 goto bail;
314 }
315
311 /* IB spec says that num_sge == 0 is OK. */ 316 /* IB spec says that num_sge == 0 is OK. */
312 if (wr->num_sge > qp->s_max_sge) { 317 if (wr->num_sge > qp->s_max_sge) {
313 ret = -EINVAL; 318 ret = -EINVAL;
@@ -467,7 +472,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
467 472
468done: 473done:
469 /* Queue the completion status entry. */ 474 /* Queue the completion status entry. */
470 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || 475 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
471 (wr->send_flags & IB_SEND_SIGNALED)) { 476 (wr->send_flags & IB_SEND_SIGNALED)) {
472 wc.wr_id = wr->wr_id; 477 wc.wr_id = wr->wr_id;
473 wc.status = IB_WC_SUCCESS; 478 wc.status = IB_WC_SUCCESS;
@@ -647,6 +652,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
647 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); 652 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
648 ipath_copy_sge(&qp->r_sge, data, 653 ipath_copy_sge(&qp->r_sge, data,
649 wc.byte_len - sizeof(struct ib_grh)); 654 wc.byte_len - sizeof(struct ib_grh));
655 qp->r_wrid_valid = 0;
650 wc.wr_id = qp->r_wr_id; 656 wc.wr_id = qp->r_wr_id;
651 wc.status = IB_WC_SUCCESS; 657 wc.status = IB_WC_SUCCESS;
652 wc.opcode = IB_WC_RECV; 658 wc.opcode = IB_WC_RECV;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 2aaacdb7e52a..18c6df2052c2 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -438,6 +438,10 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
438 struct ipath_mcast *mcast; 438 struct ipath_mcast *mcast;
439 struct ipath_mcast_qp *p; 439 struct ipath_mcast_qp *p;
440 440
441 if (lnh != IPATH_LRH_GRH) {
442 dev->n_pkt_drops++;
443 goto bail;
444 }
441 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); 445 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
442 if (mcast == NULL) { 446 if (mcast == NULL) {
443 dev->n_pkt_drops++; 447 dev->n_pkt_drops++;
@@ -445,8 +449,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
445 } 449 }
446 dev->n_multicast_rcv++; 450 dev->n_multicast_rcv++;
447 list_for_each_entry_rcu(p, &mcast->qp_list, list) 451 list_for_each_entry_rcu(p, &mcast->qp_list, list)
448 ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data, 452 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
449 tlen, p->qp);
450 /* 453 /*
451 * Notify ipath_multicast_detach() if it is waiting for us 454 * Notify ipath_multicast_detach() if it is waiting for us
452 * to finish. 455 * to finish.
@@ -773,7 +776,6 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
773 /* +1 is for the qword padding of pbc */ 776 /* +1 is for the qword padding of pbc */
774 plen = hdrwords + ((len + 3) >> 2) + 1; 777 plen = hdrwords + ((len + 3) >> 2) + 1;
775 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { 778 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
776 ipath_dbg("packet len 0x%x too long, failing\n", plen);
777 ret = -EINVAL; 779 ret = -EINVAL;
778 goto bail; 780 goto bail;
779 } 781 }
@@ -980,14 +982,14 @@ static int ipath_query_device(struct ib_device *ibdev,
980 props->max_cqe = ib_ipath_max_cqes; 982 props->max_cqe = ib_ipath_max_cqes;
981 props->max_mr = dev->lk_table.max; 983 props->max_mr = dev->lk_table.max;
982 props->max_pd = ib_ipath_max_pds; 984 props->max_pd = ib_ipath_max_pds;
983 props->max_qp_rd_atom = 1; 985 props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
984 props->max_qp_init_rd_atom = 1; 986 props->max_qp_init_rd_atom = 255;
985 /* props->max_res_rd_atom */ 987 /* props->max_res_rd_atom */
986 props->max_srq = ib_ipath_max_srqs; 988 props->max_srq = ib_ipath_max_srqs;
987 props->max_srq_wr = ib_ipath_max_srq_wrs; 989 props->max_srq_wr = ib_ipath_max_srq_wrs;
988 props->max_srq_sge = ib_ipath_max_srq_sges; 990 props->max_srq_sge = ib_ipath_max_srq_sges;
989 /* props->local_ca_ack_delay */ 991 /* props->local_ca_ack_delay */
990 props->atomic_cap = IB_ATOMIC_HCA; 992 props->atomic_cap = IB_ATOMIC_GLOB;
991 props->max_pkeys = ipath_get_npkeys(dev->dd); 993 props->max_pkeys = ipath_get_npkeys(dev->dd);
992 props->max_mcast_grp = ib_ipath_max_mcast_grps; 994 props->max_mcast_grp = ib_ipath_max_mcast_grps;
993 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; 995 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
@@ -1557,7 +1559,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1557 dev->node_type = RDMA_NODE_IB_CA; 1559 dev->node_type = RDMA_NODE_IB_CA;
1558 dev->phys_port_cnt = 1; 1560 dev->phys_port_cnt = 1;
1559 dev->dma_device = &dd->pcidev->dev; 1561 dev->dma_device = &dd->pcidev->dev;
1560 dev->class_dev.dev = dev->dma_device;
1561 dev->query_device = ipath_query_device; 1562 dev->query_device = ipath_query_device;
1562 dev->modify_device = ipath_modify_device; 1563 dev->modify_device = ipath_modify_device;
1563 dev->query_port = ipath_query_port; 1564 dev->query_port = ipath_query_port;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index c0c8d5b24a7d..7c4929f1cb5b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -40,9 +40,12 @@
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/kref.h> 41#include <linux/kref.h>
42#include <rdma/ib_pack.h> 42#include <rdma/ib_pack.h>
43#include <rdma/ib_user_verbs.h>
43 44
44#include "ipath_layer.h" 45#include "ipath_layer.h"
45 46
47#define IPATH_MAX_RDMA_ATOMIC 4
48
46#define QPN_MAX (1 << 24) 49#define QPN_MAX (1 << 24)
47#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 50#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
48 51
@@ -89,7 +92,7 @@ struct ib_reth {
89} __attribute__ ((packed)); 92} __attribute__ ((packed));
90 93
91struct ib_atomic_eth { 94struct ib_atomic_eth {
92 __be64 vaddr; 95 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
93 __be32 rkey; 96 __be32 rkey;
94 __be64 swap_data; 97 __be64 swap_data;
95 __be64 compare_data; 98 __be64 compare_data;
@@ -108,7 +111,7 @@ struct ipath_other_headers {
108 } rc; 111 } rc;
109 struct { 112 struct {
110 __be32 aeth; 113 __be32 aeth;
111 __be64 atomic_ack_eth; 114 __be32 atomic_ack_eth[2];
112 } at; 115 } at;
113 __be32 imm_data; 116 __be32 imm_data;
114 __be32 aeth; 117 __be32 aeth;
@@ -186,7 +189,7 @@ struct ipath_mmap_info {
186struct ipath_cq_wc { 189struct ipath_cq_wc {
187 u32 head; /* index of next entry to fill */ 190 u32 head; /* index of next entry to fill */
188 u32 tail; /* index of next ib_poll_cq() entry */ 191 u32 tail; /* index of next ib_poll_cq() entry */
189 struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ 192 struct ib_uverbs_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
190}; 193};
191 194
192/* 195/*
@@ -312,6 +315,19 @@ struct ipath_sge_state {
312}; 315};
313 316
314/* 317/*
318 * This structure holds the information that the send tasklet needs
319 * to send a RDMA read response or atomic operation.
320 */
321struct ipath_ack_entry {
322 u8 opcode;
323 u32 psn;
324 union {
325 struct ipath_sge_state rdma_sge;
326 u64 atomic_data;
327 };
328};
329
330/*
315 * Variables prefixed with s_ are for the requester (sender). 331 * Variables prefixed with s_ are for the requester (sender).
316 * Variables prefixed with r_ are for the responder (receiver). 332 * Variables prefixed with r_ are for the responder (receiver).
317 * Variables prefixed with ack_ are for responder replies. 333 * Variables prefixed with ack_ are for responder replies.
@@ -333,24 +349,24 @@ struct ipath_qp {
333 struct ipath_mmap_info *ip; 349 struct ipath_mmap_info *ip;
334 struct ipath_sge_state *s_cur_sge; 350 struct ipath_sge_state *s_cur_sge;
335 struct ipath_sge_state s_sge; /* current send request data */ 351 struct ipath_sge_state s_sge; /* current send request data */
336 /* current RDMA read send data */ 352 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
337 struct ipath_sge_state s_rdma_sge; 353 struct ipath_sge_state s_ack_rdma_sge;
354 struct ipath_sge_state s_rdma_read_sge;
338 struct ipath_sge_state r_sge; /* current receive data */ 355 struct ipath_sge_state r_sge; /* current receive data */
339 spinlock_t s_lock; 356 spinlock_t s_lock;
340 unsigned long s_flags; 357 unsigned long s_busy;
341 u32 s_hdrwords; /* size of s_hdr in 32 bit words */ 358 u32 s_hdrwords; /* size of s_hdr in 32 bit words */
342 u32 s_cur_size; /* size of send packet in bytes */ 359 u32 s_cur_size; /* size of send packet in bytes */
343 u32 s_len; /* total length of s_sge */ 360 u32 s_len; /* total length of s_sge */
344 u32 s_rdma_len; /* total length of s_rdma_sge */ 361 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
345 u32 s_next_psn; /* PSN for next request */ 362 u32 s_next_psn; /* PSN for next request */
346 u32 s_last_psn; /* last response PSN processed */ 363 u32 s_last_psn; /* last response PSN processed */
347 u32 s_psn; /* current packet sequence number */ 364 u32 s_psn; /* current packet sequence number */
348 u32 s_ack_psn; /* PSN for RDMA_READ */ 365 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
366 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
349 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ 367 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
350 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 368 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
351 u64 r_wr_id; /* ID for current receive WQE */ 369 u64 r_wr_id; /* ID for current receive WQE */
352 u64 r_atomic_data; /* data for last atomic op */
353 u32 r_atomic_psn; /* PSN of last atomic op */
354 u32 r_len; /* total length of r_sge */ 370 u32 r_len; /* total length of r_sge */
355 u32 r_rcv_len; /* receive data len processed */ 371 u32 r_rcv_len; /* receive data len processed */
356 u32 r_psn; /* expected rcv packet sequence number */ 372 u32 r_psn; /* expected rcv packet sequence number */
@@ -360,12 +376,13 @@ struct ipath_qp {
360 u8 s_ack_state; /* opcode of packet to ACK */ 376 u8 s_ack_state; /* opcode of packet to ACK */
361 u8 s_nak_state; /* non-zero if NAK is pending */ 377 u8 s_nak_state; /* non-zero if NAK is pending */
362 u8 r_state; /* opcode of last packet received */ 378 u8 r_state; /* opcode of last packet received */
363 u8 r_ack_state; /* opcode of packet to ACK */
364 u8 r_nak_state; /* non-zero if NAK is pending */ 379 u8 r_nak_state; /* non-zero if NAK is pending */
365 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 380 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
366 u8 r_reuse_sge; /* for UC receive errors */ 381 u8 r_reuse_sge; /* for UC receive errors */
367 u8 r_sge_inx; /* current index into sg_list */ 382 u8 r_sge_inx; /* current index into sg_list */
368 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ 383 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
384 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
385 u8 r_head_ack_queue; /* index into s_ack_queue[] */
369 u8 qp_access_flags; 386 u8 qp_access_flags;
370 u8 s_max_sge; /* size of s_wq->sg_list */ 387 u8 s_max_sge; /* size of s_wq->sg_list */
371 u8 s_retry_cnt; /* number of times to retry */ 388 u8 s_retry_cnt; /* number of times to retry */
@@ -374,6 +391,10 @@ struct ipath_qp {
374 u8 s_rnr_retry; /* requester RNR retry counter */ 391 u8 s_rnr_retry; /* requester RNR retry counter */
375 u8 s_wait_credit; /* limit number of unacked packets sent */ 392 u8 s_wait_credit; /* limit number of unacked packets sent */
376 u8 s_pkey_index; /* PKEY index to use */ 393 u8 s_pkey_index; /* PKEY index to use */
394 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
395 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
396 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
397 u8 s_flags;
377 u8 timeout; /* Timeout for this QP */ 398 u8 timeout; /* Timeout for this QP */
378 enum ib_mtu path_mtu; 399 enum ib_mtu path_mtu;
379 u32 remote_qpn; 400 u32 remote_qpn;
@@ -390,11 +411,16 @@ struct ipath_qp {
390 struct ipath_sge r_sg_list[0]; /* verified SGEs */ 411 struct ipath_sge r_sg_list[0]; /* verified SGEs */
391}; 412};
392 413
414/* Bit definition for s_busy. */
415#define IPATH_S_BUSY 0
416
393/* 417/*
394 * Bit definitions for s_flags. 418 * Bit definitions for s_flags.
395 */ 419 */
396#define IPATH_S_BUSY 0 420#define IPATH_S_SIGNAL_REQ_WR 0x01
397#define IPATH_S_SIGNAL_REQ_WR 1 421#define IPATH_S_FENCE_PENDING 0x02
422#define IPATH_S_RDMAR_PENDING 0x04
423#define IPATH_S_ACK_PENDING 0x08
398 424
399#define IPATH_PSN_CREDIT 2048 425#define IPATH_PSN_CREDIT 2048
400 426
@@ -706,8 +732,6 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
706 732
707int ipath_destroy_srq(struct ib_srq *ibsrq); 733int ipath_destroy_srq(struct ib_srq *ibsrq);
708 734
709void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
710
711int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 735int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
712 736
713struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, 737struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
@@ -757,9 +781,6 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
757 781
758void ipath_do_ruc_send(unsigned long data); 782void ipath_do_ruc_send(unsigned long data);
759 783
760u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
761 u32 pmtu);
762
763int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, 784int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
764 u32 pmtu, u32 *bth0p, u32 *bth2p); 785 u32 pmtu, u32 *bth0p, u32 *bth2p);
765 786
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 0d9b7d06bbc2..773145e29947 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1013,14 +1013,14 @@ static struct {
1013 u64 latest_fw; 1013 u64 latest_fw;
1014 u32 flags; 1014 u32 flags;
1015} mthca_hca_table[] = { 1015} mthca_hca_table[] = {
1016 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), 1016 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
1017 .flags = 0 }, 1017 .flags = 0 },
1018 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600), 1018 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
1019 .flags = MTHCA_FLAG_PCIE }, 1019 .flags = MTHCA_FLAG_PCIE },
1020 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400), 1020 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0),
1021 .flags = MTHCA_FLAG_MEMFREE | 1021 .flags = MTHCA_FLAG_MEMFREE |
1022 MTHCA_FLAG_PCIE }, 1022 MTHCA_FLAG_PCIE },
1023 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0), 1023 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
1024 .flags = MTHCA_FLAG_MEMFREE | 1024 .flags = MTHCA_FLAG_MEMFREE |
1025 MTHCA_FLAG_PCIE | 1025 MTHCA_FLAG_PCIE |
1026 MTHCA_FLAG_SINAI_OPT } 1026 MTHCA_FLAG_SINAI_OPT }
@@ -1135,7 +1135,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1135 goto err_cmd; 1135 goto err_cmd;
1136 1136
1137 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { 1137 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
1138 mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n", 1138 mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
1139 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, 1139 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1140 (int) (mdev->fw_ver & 0xffff), 1140 (int) (mdev->fw_ver & 0xffff),
1141 (int) (mthca_hca_table[hca_type].latest_fw >> 32), 1141 (int) (mthca_hca_table[hca_type].latest_fw >> 32),
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index fdb576dcfaa8..aa6c70a6a36f 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -297,7 +297,8 @@ out:
297 297
298int mthca_write_mtt_size(struct mthca_dev *dev) 298int mthca_write_mtt_size(struct mthca_dev *dev)
299{ 299{
300 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) 300 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
301 !(dev->mthca_flags & MTHCA_FLAG_FMR))
301 /* 302 /*
302 * Be friendly to WRITE_MTT command 303 * Be friendly to WRITE_MTT command
303 * and leave two empty slots for the 304 * and leave two empty slots for the
@@ -355,7 +356,8 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
355 int size = mthca_write_mtt_size(dev); 356 int size = mthca_write_mtt_size(dev);
356 int chunk; 357 int chunk;
357 358
358 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy) 359 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
360 !(dev->mthca_flags & MTHCA_FLAG_FMR))
359 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len); 361 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
360 362
361 while (list_len > 0) { 363 while (list_len > 0) {
@@ -835,6 +837,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
835 837
836 key = arbel_key_to_hw_index(fmr->ibmr.lkey); 838 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
837 key &= dev->limits.num_mpts - 1; 839 key &= dev->limits.num_mpts - 1;
840 key = adjust_key(dev, key);
838 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); 841 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
839 842
840 fmr->maps = 0; 843 fmr->maps = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0725ad7ad9bf..47e6fd46d9c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1293,7 +1293,6 @@ int mthca_register_device(struct mthca_dev *dev)
1293 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1293 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1294 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1294 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1295 dev->ib_dev.dma_device = &dev->pdev->dev; 1295 dev->ib_dev.dma_device = &dev->pdev->dev;
1296 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
1297 dev->ib_dev.query_device = mthca_query_device; 1296 dev->ib_dev.query_device = mthca_query_device;
1298 dev->ib_dev.query_port = mthca_query_port; 1297 dev->ib_dev.query_port = mthca_query_port;
1299 dev->ib_dev.modify_device = mthca_modify_device; 1298 dev->ib_dev.modify_device = mthca_modify_device;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 1c6b63aca268..8fe6fee7a97a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1419,11 +1419,10 @@ void mthca_free_qp(struct mthca_dev *dev,
1419 * unref the mem-free tables and free the QPN in our table. 1419 * unref the mem-free tables and free the QPN in our table.
1420 */ 1420 */
1421 if (!qp->ibqp.uobject) { 1421 if (!qp->ibqp.uobject) {
1422 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 1422 mthca_cq_clean(dev, recv_cq, qp->qpn,
1423 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1423 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1424 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1424 if (send_cq != recv_cq)
1425 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 1425 mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1426 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1427 1426
1428 mthca_free_memfree(dev, qp); 1427 mthca_free_memfree(dev, qp);
1429 mthca_free_wqe_buf(dev, qp); 1428 mthca_free_wqe_buf(dev, qp);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2b242a4823f8..0c4e59b906cd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
228 struct net_device *dev = cm_id->context; 228 struct net_device *dev = cm_id->context;
229 struct ipoib_dev_priv *priv = netdev_priv(dev); 229 struct ipoib_dev_priv *priv = netdev_priv(dev);
230 struct ipoib_cm_rx *p; 230 struct ipoib_cm_rx *p;
231 unsigned long flags;
232 unsigned psn; 231 unsigned psn;
233 int ret; 232 int ret;
234 233
@@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
257 256
258 cm_id->context = p; 257 cm_id->context = p;
259 p->jiffies = jiffies; 258 p->jiffies = jiffies;
260 spin_lock_irqsave(&priv->lock, flags); 259 spin_lock_irq(&priv->lock);
261 list_add(&p->list, &priv->cm.passive_ids); 260 list_add(&p->list, &priv->cm.passive_ids);
262 spin_unlock_irqrestore(&priv->lock, flags); 261 spin_unlock_irq(&priv->lock);
263 queue_delayed_work(ipoib_workqueue, 262 queue_delayed_work(ipoib_workqueue,
264 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 263 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
265 return 0; 264 return 0;
@@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
277{ 276{
278 struct ipoib_cm_rx *p; 277 struct ipoib_cm_rx *p;
279 struct ipoib_dev_priv *priv; 278 struct ipoib_dev_priv *priv;
280 unsigned long flags;
281 int ret; 279 int ret;
282 280
283 switch (event->event) { 281 switch (event->event) {
@@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
290 case IB_CM_REJ_RECEIVED: 288 case IB_CM_REJ_RECEIVED:
291 p = cm_id->context; 289 p = cm_id->context;
292 priv = netdev_priv(p->dev); 290 priv = netdev_priv(p->dev);
293 spin_lock_irqsave(&priv->lock, flags); 291 spin_lock_irq(&priv->lock);
294 if (list_empty(&p->list)) 292 if (list_empty(&p->list))
295 ret = 0; /* Connection is going away already. */ 293 ret = 0; /* Connection is going away already. */
296 else { 294 else {
297 list_del_init(&p->list); 295 list_del_init(&p->list);
298 ret = -ECONNRESET; 296 ret = -ECONNRESET;
299 } 297 }
300 spin_unlock_irqrestore(&priv->lock, flags); 298 spin_unlock_irq(&priv->lock);
301 if (ret) { 299 if (ret) {
302 ib_destroy_qp(p->qp); 300 ib_destroy_qp(p->qp);
303 kfree(p); 301 kfree(p);
@@ -351,8 +349,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
351 u64 mapping[IPOIB_CM_RX_SG]; 349 u64 mapping[IPOIB_CM_RX_SG];
352 int frags; 350 int frags;
353 351
354 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", 352 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
355 wr_id, wc->opcode, wc->status); 353 wr_id, wc->status);
356 354
357 if (unlikely(wr_id >= ipoib_recvq_size)) { 355 if (unlikely(wr_id >= ipoib_recvq_size)) {
358 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 356 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -408,7 +406,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
408 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); 406 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
409 407
410 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 408 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
411 skb->mac.raw = skb->data; 409 skb_reset_mac_header(skb);
412 skb_pull(skb, IPOIB_ENCAP_LEN); 410 skb_pull(skb, IPOIB_ENCAP_LEN);
413 411
414 dev->last_rx = jiffies; 412 dev->last_rx = jiffies;
@@ -504,8 +502,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
504 struct ipoib_tx_buf *tx_req; 502 struct ipoib_tx_buf *tx_req;
505 unsigned long flags; 503 unsigned long flags;
506 504
507 ipoib_dbg_data(priv, "cm send completion: id %d, op %d, status: %d\n", 505 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
508 wr_id, wc->opcode, wc->status); 506 wr_id, wc->status);
509 507
510 if (unlikely(wr_id >= ipoib_sendq_size)) { 508 if (unlikely(wr_id >= ipoib_sendq_size)) {
511 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", 509 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
@@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev)
612{ 610{
613 struct ipoib_dev_priv *priv = netdev_priv(dev); 611 struct ipoib_dev_priv *priv = netdev_priv(dev);
614 struct ipoib_cm_rx *p; 612 struct ipoib_cm_rx *p;
615 unsigned long flags;
616 613
617 if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) 614 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
618 return; 615 return;
619 616
620 ib_destroy_cm_id(priv->cm.id); 617 ib_destroy_cm_id(priv->cm.id);
621 spin_lock_irqsave(&priv->lock, flags); 618 spin_lock_irq(&priv->lock);
622 while (!list_empty(&priv->cm.passive_ids)) { 619 while (!list_empty(&priv->cm.passive_ids)) {
623 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 620 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
624 list_del_init(&p->list); 621 list_del_init(&p->list);
625 spin_unlock_irqrestore(&priv->lock, flags); 622 spin_unlock_irq(&priv->lock);
626 ib_destroy_cm_id(p->id); 623 ib_destroy_cm_id(p->id);
627 ib_destroy_qp(p->qp); 624 ib_destroy_qp(p->qp);
628 kfree(p); 625 kfree(p);
629 spin_lock_irqsave(&priv->lock, flags); 626 spin_lock_irq(&priv->lock);
630 } 627 }
631 spin_unlock_irqrestore(&priv->lock, flags); 628 spin_unlock_irq(&priv->lock);
632 629
633 cancel_delayed_work(&priv->cm.stale_task); 630 cancel_delayed_work(&priv->cm.stale_task);
634} 631}
@@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
642 struct ib_qp_attr qp_attr; 639 struct ib_qp_attr qp_attr;
643 int qp_attr_mask, ret; 640 int qp_attr_mask, ret;
644 struct sk_buff *skb; 641 struct sk_buff *skb;
645 unsigned long flags;
646 642
647 p->mtu = be32_to_cpu(data->mtu); 643 p->mtu = be32_to_cpu(data->mtu);
648 644
@@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
680 676
681 skb_queue_head_init(&skqueue); 677 skb_queue_head_init(&skqueue);
682 678
683 spin_lock_irqsave(&priv->lock, flags); 679 spin_lock_irq(&priv->lock);
684 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 680 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
685 if (p->neigh) 681 if (p->neigh)
686 while ((skb = __skb_dequeue(&p->neigh->queue))) 682 while ((skb = __skb_dequeue(&p->neigh->queue)))
687 __skb_queue_tail(&skqueue, skb); 683 __skb_queue_tail(&skqueue, skb);
688 spin_unlock_irqrestore(&priv->lock, flags); 684 spin_unlock_irq(&priv->lock);
689 685
690 while ((skb = __skb_dequeue(&skqueue))) { 686 while ((skb = __skb_dequeue(&skqueue))) {
691 skb->dev = p->dev; 687 skb->dev = p->dev;
@@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
895 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 891 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
896 struct net_device *dev = priv->dev; 892 struct net_device *dev = priv->dev;
897 struct ipoib_neigh *neigh; 893 struct ipoib_neigh *neigh;
898 unsigned long flags;
899 int ret; 894 int ret;
900 895
901 switch (event->event) { 896 switch (event->event) {
@@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
914 case IB_CM_REJ_RECEIVED: 909 case IB_CM_REJ_RECEIVED:
915 case IB_CM_TIMEWAIT_EXIT: 910 case IB_CM_TIMEWAIT_EXIT:
916 ipoib_dbg(priv, "CM error %d.\n", event->event); 911 ipoib_dbg(priv, "CM error %d.\n", event->event);
917 spin_lock_irqsave(&priv->tx_lock, flags); 912 spin_lock_irq(&priv->tx_lock);
918 spin_lock(&priv->lock); 913 spin_lock(&priv->lock);
919 neigh = tx->neigh; 914 neigh = tx->neigh;
920 915
@@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
934 } 929 }
935 930
936 spin_unlock(&priv->lock); 931 spin_unlock(&priv->lock);
937 spin_unlock_irqrestore(&priv->tx_lock, flags); 932 spin_unlock_irq(&priv->tx_lock);
938 break; 933 break;
939 default: 934 default:
940 break; 935 break;
@@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
1023 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1018 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1024 cm.reap_task); 1019 cm.reap_task);
1025 struct ipoib_cm_tx *p; 1020 struct ipoib_cm_tx *p;
1026 unsigned long flags;
1027 1021
1028 spin_lock_irqsave(&priv->tx_lock, flags); 1022 spin_lock_irq(&priv->tx_lock);
1029 spin_lock(&priv->lock); 1023 spin_lock(&priv->lock);
1030 while (!list_empty(&priv->cm.reap_list)) { 1024 while (!list_empty(&priv->cm.reap_list)) {
1031 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1025 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1032 list_del(&p->list); 1026 list_del(&p->list);
1033 spin_unlock(&priv->lock); 1027 spin_unlock(&priv->lock);
1034 spin_unlock_irqrestore(&priv->tx_lock, flags); 1028 spin_unlock_irq(&priv->tx_lock);
1035 ipoib_cm_tx_destroy(p); 1029 ipoib_cm_tx_destroy(p);
1036 spin_lock_irqsave(&priv->tx_lock, flags); 1030 spin_lock_irq(&priv->tx_lock);
1037 spin_lock(&priv->lock); 1031 spin_lock(&priv->lock);
1038 } 1032 }
1039 spin_unlock(&priv->lock); 1033 spin_unlock(&priv->lock);
1040 spin_unlock_irqrestore(&priv->tx_lock, flags); 1034 spin_unlock_irq(&priv->tx_lock);
1041} 1035}
1042 1036
1043static void ipoib_cm_skb_reap(struct work_struct *work) 1037static void ipoib_cm_skb_reap(struct work_struct *work)
@@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1046 cm.skb_task); 1040 cm.skb_task);
1047 struct net_device *dev = priv->dev; 1041 struct net_device *dev = priv->dev;
1048 struct sk_buff *skb; 1042 struct sk_buff *skb;
1049 unsigned long flags;
1050 1043
1051 unsigned mtu = priv->mcast_mtu; 1044 unsigned mtu = priv->mcast_mtu;
1052 1045
1053 spin_lock_irqsave(&priv->tx_lock, flags); 1046 spin_lock_irq(&priv->tx_lock);
1054 spin_lock(&priv->lock); 1047 spin_lock(&priv->lock);
1055 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1048 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1056 spin_unlock(&priv->lock); 1049 spin_unlock(&priv->lock);
1057 spin_unlock_irqrestore(&priv->tx_lock, flags); 1050 spin_unlock_irq(&priv->tx_lock);
1058 if (skb->protocol == htons(ETH_P_IP)) 1051 if (skb->protocol == htons(ETH_P_IP))
1059 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1052 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1060#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1053#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1062 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1055 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1063#endif 1056#endif
1064 dev_kfree_skb_any(skb); 1057 dev_kfree_skb_any(skb);
1065 spin_lock_irqsave(&priv->tx_lock, flags); 1058 spin_lock_irq(&priv->tx_lock);
1066 spin_lock(&priv->lock); 1059 spin_lock(&priv->lock);
1067 } 1060 }
1068 spin_unlock(&priv->lock); 1061 spin_unlock(&priv->lock);
1069 spin_unlock_irqrestore(&priv->tx_lock, flags); 1062 spin_unlock_irq(&priv->tx_lock);
1070} 1063}
1071 1064
1072void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, 1065void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
@@ -1088,9 +1081,8 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1088 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1081 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1089 cm.stale_task.work); 1082 cm.stale_task.work);
1090 struct ipoib_cm_rx *p; 1083 struct ipoib_cm_rx *p;
1091 unsigned long flags;
1092 1084
1093 spin_lock_irqsave(&priv->lock, flags); 1085 spin_lock_irq(&priv->lock);
1094 while (!list_empty(&priv->cm.passive_ids)) { 1086 while (!list_empty(&priv->cm.passive_ids)) {
1095 /* List if sorted by LRU, start from tail, 1087 /* List if sorted by LRU, start from tail,
1096 * stop when we see a recently used entry */ 1088 * stop when we see a recently used entry */
@@ -1098,13 +1090,13 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1098 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1090 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1099 break; 1091 break;
1100 list_del_init(&p->list); 1092 list_del_init(&p->list);
1101 spin_unlock_irqrestore(&priv->lock, flags); 1093 spin_unlock_irq(&priv->lock);
1102 ib_destroy_cm_id(p->id); 1094 ib_destroy_cm_id(p->id);
1103 ib_destroy_qp(p->qp); 1095 ib_destroy_qp(p->qp);
1104 kfree(p); 1096 kfree(p);
1105 spin_lock_irqsave(&priv->lock, flags); 1097 spin_lock_irq(&priv->lock);
1106 } 1098 }
1107 spin_unlock_irqrestore(&priv->lock, flags); 1099 spin_unlock_irq(&priv->lock);
1108} 1100}
1109 1101
1110 1102
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ba0ee5cf2ad7..1bdb9101911a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -172,8 +172,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
172 struct sk_buff *skb; 172 struct sk_buff *skb;
173 u64 addr; 173 u64 addr;
174 174
175 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 175 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
176 wr_id, wc->opcode, wc->status); 176 wr_id, wc->status);
177 177
178 if (unlikely(wr_id >= ipoib_recvq_size)) { 178 if (unlikely(wr_id >= ipoib_recvq_size)) {
179 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", 179 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
@@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
216 if (wc->slid != priv->local_lid || 216 if (wc->slid != priv->local_lid ||
217 wc->src_qp != priv->qp->qp_num) { 217 wc->src_qp != priv->qp->qp_num) {
218 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 218 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
219 skb->mac.raw = skb->data; 219 skb_reset_mac_header(skb);
220 skb_pull(skb, IPOIB_ENCAP_LEN); 220 skb_pull(skb, IPOIB_ENCAP_LEN);
221 221
222 dev->last_rx = jiffies; 222 dev->last_rx = jiffies;
@@ -245,8 +245,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
245 struct ipoib_tx_buf *tx_req; 245 struct ipoib_tx_buf *tx_req;
246 unsigned long flags; 246 unsigned long flags;
247 247
248 ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n", 248 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
249 wr_id, wc->opcode, wc->status); 249 wr_id, wc->status);
250 250
251 if (unlikely(wr_id >= ipoib_sendq_size)) { 251 if (unlikely(wr_id >= ipoib_sendq_size)) {
252 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", 252 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f2a40ae8e7d0..b4c380c5a3ba 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -395,14 +395,10 @@ static void path_rec_completion(int status,
395 skb_queue_head_init(&skqueue); 395 skb_queue_head_init(&skqueue);
396 396
397 if (!status) { 397 if (!status) {
398 struct ib_ah_attr av = { 398 struct ib_ah_attr av;
399 .dlid = be16_to_cpu(pathrec->dlid), 399
400 .sl = pathrec->sl, 400 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
401 .port_num = priv->port, 401 ah = ipoib_create_ah(dev, priv->pd, &av);
402 .static_rate = pathrec->rate
403 };
404
405 ah = ipoib_create_ah(dev, priv->pd, &av);
406 } 402 }
407 403
408 spin_lock_irqsave(&priv->lock, flags); 404 spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a00fe470a829..bd686a2a517d 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -190,16 +190,14 @@ static void gameport_run_poll_handler(unsigned long d)
190 * Basic gameport -> driver core mappings 190 * Basic gameport -> driver core mappings
191 */ 191 */
192 192
193static void gameport_bind_driver(struct gameport *gameport, struct gameport_driver *drv) 193static int gameport_bind_driver(struct gameport *gameport, struct gameport_driver *drv)
194{ 194{
195 int error; 195 int error;
196 196
197 down_write(&gameport_bus.subsys.rwsem);
198
199 gameport->dev.driver = &drv->driver; 197 gameport->dev.driver = &drv->driver;
200 if (drv->connect(gameport, drv)) { 198 if (drv->connect(gameport, drv)) {
201 gameport->dev.driver = NULL; 199 gameport->dev.driver = NULL;
202 goto out; 200 return -ENODEV;
203 } 201 }
204 202
205 error = device_bind_driver(&gameport->dev); 203 error = device_bind_driver(&gameport->dev);
@@ -211,31 +209,21 @@ static void gameport_bind_driver(struct gameport *gameport, struct gameport_driv
211 drv->description, error); 209 drv->description, error);
212 drv->disconnect(gameport); 210 drv->disconnect(gameport);
213 gameport->dev.driver = NULL; 211 gameport->dev.driver = NULL;
214 goto out; 212 return error;
215 } 213 }
216 214
217 out: 215 return 0;
218 up_write(&gameport_bus.subsys.rwsem);
219}
220
221static void gameport_release_driver(struct gameport *gameport)
222{
223 down_write(&gameport_bus.subsys.rwsem);
224 device_release_driver(&gameport->dev);
225 up_write(&gameport_bus.subsys.rwsem);
226} 216}
227 217
228static void gameport_find_driver(struct gameport *gameport) 218static void gameport_find_driver(struct gameport *gameport)
229{ 219{
230 int error; 220 int error;
231 221
232 down_write(&gameport_bus.subsys.rwsem);
233 error = device_attach(&gameport->dev); 222 error = device_attach(&gameport->dev);
234 if (error < 0) 223 if (error < 0)
235 printk(KERN_WARNING 224 printk(KERN_WARNING
236 "gameport: device_attach() failed for %s (%s), error: %d\n", 225 "gameport: device_attach() failed for %s (%s), error: %d\n",
237 gameport->phys, gameport->name, error); 226 gameport->phys, gameport->name, error);
238 up_write(&gameport_bus.subsys.rwsem);
239} 227}
240 228
241 229
@@ -483,13 +471,12 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
483{ 471{
484 struct gameport *gameport = to_gameport_port(dev); 472 struct gameport *gameport = to_gameport_port(dev);
485 struct device_driver *drv; 473 struct device_driver *drv;
486 int retval; 474 int error;
487 475
488 retval = mutex_lock_interruptible(&gameport_mutex); 476 error = mutex_lock_interruptible(&gameport_mutex);
489 if (retval) 477 if (error)
490 return retval; 478 return error;
491 479
492 retval = count;
493 if (!strncmp(buf, "none", count)) { 480 if (!strncmp(buf, "none", count)) {
494 gameport_disconnect_port(gameport); 481 gameport_disconnect_port(gameport);
495 } else if (!strncmp(buf, "reconnect", count)) { 482 } else if (!strncmp(buf, "reconnect", count)) {
@@ -499,15 +486,15 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
499 gameport_find_driver(gameport); 486 gameport_find_driver(gameport);
500 } else if ((drv = driver_find(buf, &gameport_bus)) != NULL) { 487 } else if ((drv = driver_find(buf, &gameport_bus)) != NULL) {
501 gameport_disconnect_port(gameport); 488 gameport_disconnect_port(gameport);
502 gameport_bind_driver(gameport, to_gameport_driver(drv)); 489 error = gameport_bind_driver(gameport, to_gameport_driver(drv));
503 put_driver(drv); 490 put_driver(drv);
504 } else { 491 } else {
505 retval = -EINVAL; 492 error = -EINVAL;
506 } 493 }
507 494
508 mutex_unlock(&gameport_mutex); 495 mutex_unlock(&gameport_mutex);
509 496
510 return retval; 497 return error ? error : count;
511} 498}
512 499
513static struct device_attribute gameport_device_attrs[] = { 500static struct device_attribute gameport_device_attrs[] = {
@@ -655,7 +642,7 @@ static void gameport_disconnect_port(struct gameport *gameport)
655 do { 642 do {
656 parent = s->parent; 643 parent = s->parent;
657 644
658 gameport_release_driver(s); 645 device_release_driver(&s->dev);
659 gameport_destroy_port(s); 646 gameport_destroy_port(s);
660 } while ((s = parent) != gameport); 647 } while ((s = parent) != gameport);
661 } 648 }
@@ -663,7 +650,7 @@ static void gameport_disconnect_port(struct gameport *gameport)
663 /* 650 /*
664 * Ok, no children left, now disconnect this port 651 * Ok, no children left, now disconnect this port
665 */ 652 */
666 gameport_release_driver(gameport); 653 device_release_driver(&gameport->dev);
667} 654}
668 655
669void gameport_rescan(struct gameport *gameport) 656void gameport_rescan(struct gameport *gameport)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a15e531ec755..5895202b972c 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -115,18 +115,18 @@ static int serio_match_port(const struct serio_device_id *ids, struct serio *ser
115 * Basic serio -> driver core mappings 115 * Basic serio -> driver core mappings
116 */ 116 */
117 117
118static void serio_bind_driver(struct serio *serio, struct serio_driver *drv) 118static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
119{ 119{
120 int error; 120 int error;
121 121
122 down_write(&serio_bus.subsys.rwsem);
123
124 if (serio_match_port(drv->id_table, serio)) { 122 if (serio_match_port(drv->id_table, serio)) {
123
125 serio->dev.driver = &drv->driver; 124 serio->dev.driver = &drv->driver;
126 if (serio_connect_driver(serio, drv)) { 125 if (serio_connect_driver(serio, drv)) {
127 serio->dev.driver = NULL; 126 serio->dev.driver = NULL;
128 goto out; 127 return -ENODEV;
129 } 128 }
129
130 error = device_bind_driver(&serio->dev); 130 error = device_bind_driver(&serio->dev);
131 if (error) { 131 if (error) {
132 printk(KERN_WARNING 132 printk(KERN_WARNING
@@ -136,31 +136,21 @@ static void serio_bind_driver(struct serio *serio, struct serio_driver *drv)
136 drv->description, error); 136 drv->description, error);
137 serio_disconnect_driver(serio); 137 serio_disconnect_driver(serio);
138 serio->dev.driver = NULL; 138 serio->dev.driver = NULL;
139 goto out; 139 return error;
140 } 140 }
141 } 141 }
142 out: 142 return 0;
143 up_write(&serio_bus.subsys.rwsem);
144}
145
146static void serio_release_driver(struct serio *serio)
147{
148 down_write(&serio_bus.subsys.rwsem);
149 device_release_driver(&serio->dev);
150 up_write(&serio_bus.subsys.rwsem);
151} 143}
152 144
153static void serio_find_driver(struct serio *serio) 145static void serio_find_driver(struct serio *serio)
154{ 146{
155 int error; 147 int error;
156 148
157 down_write(&serio_bus.subsys.rwsem);
158 error = device_attach(&serio->dev); 149 error = device_attach(&serio->dev);
159 if (error < 0) 150 if (error < 0)
160 printk(KERN_WARNING 151 printk(KERN_WARNING
161 "serio: device_attach() failed for %s (%s), error: %d\n", 152 "serio: device_attach() failed for %s (%s), error: %d\n",
162 serio->phys, serio->name, error); 153 serio->phys, serio->name, error);
163 up_write(&serio_bus.subsys.rwsem);
164} 154}
165 155
166 156
@@ -470,13 +460,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
470{ 460{
471 struct serio *serio = to_serio_port(dev); 461 struct serio *serio = to_serio_port(dev);
472 struct device_driver *drv; 462 struct device_driver *drv;
473 int retval; 463 int error;
474 464
475 retval = mutex_lock_interruptible(&serio_mutex); 465 error = mutex_lock_interruptible(&serio_mutex);
476 if (retval) 466 if (error)
477 return retval; 467 return error;
478 468
479 retval = count;
480 if (!strncmp(buf, "none", count)) { 469 if (!strncmp(buf, "none", count)) {
481 serio_disconnect_port(serio); 470 serio_disconnect_port(serio);
482 } else if (!strncmp(buf, "reconnect", count)) { 471 } else if (!strncmp(buf, "reconnect", count)) {
@@ -486,15 +475,15 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
486 serio_find_driver(serio); 475 serio_find_driver(serio);
487 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 476 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
488 serio_disconnect_port(serio); 477 serio_disconnect_port(serio);
489 serio_bind_driver(serio, to_serio_driver(drv)); 478 error = serio_bind_driver(serio, to_serio_driver(drv));
490 put_driver(drv); 479 put_driver(drv);
491 } else { 480 } else {
492 retval = -EINVAL; 481 error = -EINVAL;
493 } 482 }
494 483
495 mutex_unlock(&serio_mutex); 484 mutex_unlock(&serio_mutex);
496 485
497 return retval; 486 return error ? error : count;
498} 487}
499 488
500static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 489static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
@@ -665,7 +654,7 @@ static void serio_disconnect_port(struct serio *serio)
665 do { 654 do {
666 parent = s->parent; 655 parent = s->parent;
667 656
668 serio_release_driver(s); 657 device_release_driver(&s->dev);
669 serio_destroy_port(s); 658 serio_destroy_port(s);
670 } while ((s = parent) != serio); 659 } while ((s = parent) != serio);
671 } 660 }
@@ -673,7 +662,7 @@ static void serio_disconnect_port(struct serio *serio)
673 /* 662 /*
674 * Ok, no children left, now disconnect this port 663 * Ok, no children left, now disconnect this port
675 */ 664 */
676 serio_release_driver(serio); 665 device_release_driver(&serio->dev);
677} 666}
678 667
679void serio_rescan(struct serio *serio) 668void serio_rescan(struct serio *serio)
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index e3e5c1399076..ee2b0b9f8f46 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb)
442 return 0; 442 return 0;
443 } 443 }
444 skb_reserve(xmit_skb, 19); 444 skb_reserve(xmit_skb, 19);
445 memcpy(skb_put(xmit_skb, len), skb->data, len); 445 skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len);
446 } else { 446 } else {
447 xmit_skb = skb_clone(skb, GFP_ATOMIC); 447 xmit_skb = skb_clone(skb, GFP_ATOMIC);
448 if (!xmit_skb) { 448 if (!xmit_skb) {
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 2baef349c12d..c8e1c357cec8 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs)
652 * transmit data 652 * transmit data
653 */ 653 */
654 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); 654 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
655 memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); 655 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
656 skb_pull(bcs->tx_skb, count); 656 skb_pull(bcs->tx_skb, count);
657 atomic_set(&ucs->busy, 1); 657 atomic_set(&ucs->busy, 1);
658 gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); 658 gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 1e2d38e3d68c..428872b653e9 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card)
404 printk(KERN_DEBUG "tx: put 0x%x len=%d\n", 404 printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
405 skb->data[2], txlen); 405 skb->data[2], txlen);
406#endif 406#endif
407 memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); 407 skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
408 skb->len - 2);
408 } 409 }
409 txlen = (txlen + 3) & ~3; 410 txlen = (txlen + 3) & ~3;
410 411
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 6f5efa8d78cb..d58f927e766a 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card)
457 printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", 457 printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
458 card->name, skb->data[2], txlen); 458 card->name, skb->data[2], txlen);
459#endif 459#endif
460 memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); 460 skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
461 skb->len - 2);
461 } 462 }
462 txlen = (txlen + 3) & ~3; 463 txlen = (txlen + 3) & ~3;
463 464
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index ae377e812775..1642dca988a1 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) {
254 count = len; 254 count = len;
255 if (count > MAX_MODEM_BUF - fp) { 255 if (count > MAX_MODEM_BUF - fp) {
256 count = MAX_MODEM_BUF - fp; 256 count = MAX_MODEM_BUF - fp;
257 memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count); 257 skb_copy_from_linear_data(bcs->tx_skb,
258 cs->hw.elsa.transbuf + fp, count);
258 skb_pull(bcs->tx_skb, count); 259 skb_pull(bcs->tx_skb, count);
259 cs->hw.elsa.transcnt += count; 260 cs->hw.elsa.transcnt += count;
260 ret = count; 261 ret = count;
261 count = len - count; 262 count = len - count;
262 fp = 0; 263 fp = 0;
263 } 264 }
264 memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count); 265 skb_copy_from_linear_data(bcs->tx_skb,
266 cs->hw.elsa.transbuf + fp, count);
265 skb_pull(bcs->tx_skb, count); 267 skb_pull(bcs->tx_skb, count);
266 cs->hw.elsa.transcnt += count; 268 cs->hw.elsa.transcnt += count;
267 ret += count; 269 ret += count;
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index cd3b5ad53491..3446f249d675 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1293 oskb = skb; 1293 oskb = skb;
1294 skb = alloc_skb(oskb->len + i, GFP_ATOMIC); 1294 skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
1295 memcpy(skb_put(skb, i), header, i); 1295 memcpy(skb_put(skb, i), header, i);
1296 memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len); 1296 skb_copy_from_linear_data(oskb,
1297 skb_put(skb, oskb->len), oskb->len);
1297 dev_kfree_skb(oskb); 1298 dev_kfree_skb(oskb);
1298 } 1299 }
1299 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1300 st->l2.l2l1(st, PH_PULL | INDICATION, skb);
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index b2ae4ec1e49e..f85450146bdc 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -398,8 +398,9 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
398 _len = CAPIMSG_LEN(skb->data); 398 _len = CAPIMSG_LEN(skb->data);
399 if (_len > 22) { 399 if (_len > 22) {
400 _len2 = _len - 22; 400 _len2 = _len - 22;
401 memcpy(msghead, skb->data, 22); 401 skb_copy_from_linear_data(skb, msghead, 22);
402 memcpy(skb->data + _len2, msghead, 22); 402 skb_copy_to_linear_data_offset(skb, _len2,
403 msghead, 22);
403 skb_pull(skb, _len2); 404 skb_pull(skb, _len2);
404 CAPIMSG_SETLEN(skb->data, 22); 405 CAPIMSG_SETLEN(skb->data, 22);
405 retval = capilib_data_b3_req(&cinfo->ncci_head, 406 retval = capilib_data_b3_req(&cinfo->ncci_head,
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index 557d96c78a62..cfa8fa5e44ab 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -214,8 +214,6 @@ hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len)
214 lp->stats.rx_dropped++; 214 lp->stats.rx_dropped++;
215 return; 215 return;
216 } 216 }
217 skb->dev = &lp->netdev;
218
219 /* copy the data */ 217 /* copy the data */
220 memcpy(skb_put(skb, len), buf, len); 218 memcpy(skb_put(skb, len), buf, len);
221 219
diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/isdn/hysdn/hysdn_sched.c
index b7b5aa4748a0..81db4a190d41 100644
--- a/drivers/isdn/hysdn/hysdn_sched.c
+++ b/drivers/isdn/hysdn/hysdn_sched.c
@@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
113 (skb = hysdn_tx_netget(card)) != NULL) 113 (skb = hysdn_tx_netget(card)) != NULL)
114 { 114 {
115 if (skb->len <= maxlen) { 115 if (skb->len <= maxlen) {
116 memcpy(buf, skb->data, skb->len); /* copy the packet to the buffer */ 116 /* copy the packet to the buffer */
117 skb_copy_from_linear_data(skb, buf, skb->len);
117 *len = skb->len; 118 *len = skb->len;
118 *chan = CHAN_NDIS_DATA; 119 *chan = CHAN_NDIS_DATA;
119 card->net_tx_busy = 1; /* we are busy sending network data */ 120 card->net_tx_busy = 1; /* we are busy sending network data */
@@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
126 ((skb = hycapi_tx_capiget(card)) != NULL) ) 127 ((skb = hycapi_tx_capiget(card)) != NULL) )
127 { 128 {
128 if (skb->len <= maxlen) { 129 if (skb->len <= maxlen) {
129 memcpy(buf, skb->data, skb->len); 130 skb_copy_from_linear_data(skb, buf, skb->len);
130 *len = skb->len; 131 *len = skb->len;
131 *chan = CHAN_CAPI; 132 *chan = CHAN_CAPI;
132 hycapi_tx_capiack(card); 133 hycapi_tx_capiack(card);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9c926e41b114..c97330b19877 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que
829 dflag = 0; 829 dflag = 0;
830 } 830 }
831 count_put = count_pull; 831 count_put = count_pull;
832 memcpy(cp, skb->data, count_put); 832 skb_copy_from_linear_data(skb, cp, count_put);
833 cp += count_put; 833 cp += count_put;
834 len -= count_put; 834 len -= count_put;
835#ifdef CONFIG_ISDN_AUDIO 835#ifdef CONFIG_ISDN_AUDIO
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 838b3734e2b6..aa83277aba74 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -872,7 +872,8 @@ typedef struct {
872static void 872static void
873isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) 873isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp)
874{ 874{
875 u_char *p = skb->nh.raw; /* hopefully, this was set correctly */ 875 /* hopefully, this was set correctly */
876 const u_char *p = skb_network_header(skb);
876 unsigned short proto = ntohs(skb->protocol); 877 unsigned short proto = ntohs(skb->protocol);
877 int data_ofs; 878 int data_ofs;
878 ip_ports *ipp; 879 ip_ports *ipp;
@@ -880,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp)
880 881
881 addinfo[0] = '\0'; 882 addinfo[0] = '\0';
882 /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ 883 /* This check stolen from 2.1.72 dev_queue_xmit_nit() */
883 if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) { 884 if (p < skb->data || skb->network_header >= skb->tail) {
884 /* fall back to old isdn_net_log_packet method() */ 885 /* fall back to old isdn_net_log_packet method() */
885 char * buf = skb->data; 886 char * buf = skb->data;
886 887
@@ -1121,7 +1122,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
1121 if (!skb) 1122 if (!skb)
1122 return; 1123 return;
1123 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { 1124 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
1124 int pullsize = (ulong)skb->nh.raw - (ulong)skb->data - ETH_HLEN; 1125 const int pullsize = skb_network_offset(skb) - ETH_HLEN;
1125 if (pullsize > 0) { 1126 if (pullsize > 0) {
1126 printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); 1127 printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize);
1127 skb_pull(skb, pullsize); 1128 skb_pull(skb, pullsize);
@@ -1366,7 +1367,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
1366 struct ethhdr *eth; 1367 struct ethhdr *eth;
1367 unsigned char *rawp; 1368 unsigned char *rawp;
1368 1369
1369 skb->mac.raw = skb->data; 1370 skb_reset_mac_header(skb);
1370 skb_pull(skb, ETH_HLEN); 1371 skb_pull(skb, ETH_HLEN);
1371 eth = eth_hdr(skb); 1372 eth = eth_hdr(skb);
1372 1373
@@ -1786,7 +1787,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1786 } 1787 }
1787 skb->dev = ndev; 1788 skb->dev = ndev;
1788 skb->pkt_type = PACKET_HOST; 1789 skb->pkt_type = PACKET_HOST;
1789 skb->mac.raw = skb->data; 1790 skb_reset_mac_header(skb);
1790#ifdef ISDN_DEBUG_NET_DUMP 1791#ifdef ISDN_DEBUG_NET_DUMP
1791 isdn_dumppkt("R:", skb->data, skb->len, 40); 1792 isdn_dumppkt("R:", skb->data, skb->len, 40);
1792#endif 1793#endif
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 1b2df80c3bce..387392cb3d68 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1100 goto drop_packet; 1100 goto drop_packet;
1101 } 1101 }
1102 skb_put(skb, skb_old->len + 128); 1102 skb_put(skb, skb_old->len + 128);
1103 memcpy(skb->data, skb_old->data, skb_old->len); 1103 skb_copy_from_linear_data(skb_old, skb->data,
1104 skb_old->len);
1104 if (net_dev->local->ppp_slot < 0) { 1105 if (net_dev->local->ppp_slot < 0) {
1105 printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", 1106 printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
1106 __FUNCTION__, net_dev->local->ppp_slot); 1107 __FUNCTION__, net_dev->local->ppp_slot);
@@ -1167,7 +1168,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1167 mlp->huptimer = 0; 1168 mlp->huptimer = 0;
1168#endif /* CONFIG_IPPP_FILTER */ 1169#endif /* CONFIG_IPPP_FILTER */
1169 skb->dev = dev; 1170 skb->dev = dev;
1170 skb->mac.raw = skb->data; 1171 skb_reset_mac_header(skb);
1171 netif_rx(skb); 1172 netif_rx(skb);
1172 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ 1173 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
1173 return; 1174 return;
@@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1902 while( from != to ) { 1903 while( from != to ) {
1903 unsigned int len = from->len - MP_HEADER_LEN; 1904 unsigned int len = from->len - MP_HEADER_LEN;
1904 1905
1905 memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len); 1906 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
1907 skb_put(skb,len),
1908 len);
1906 frag = from->next; 1909 frag = from->next;
1907 isdn_ppp_mp_free_skb(mp, from); 1910 isdn_ppp_mp_free_skb(mp, from);
1908 from = frag; 1911 from = frag;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index e3add27dd0e1..e93ad59f60bf 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
415 spin_lock_irqsave(&card->isdnloop_lock, flags); 415 spin_lock_irqsave(&card->isdnloop_lock, flags);
416 nskb = dev_alloc_skb(skb->len); 416 nskb = dev_alloc_skb(skb->len);
417 if (nskb) { 417 if (nskb) {
418 memcpy(skb_put(nskb, len), skb->data, len); 418 skb_copy_from_linear_data(skb,
419 skb_put(nskb, len), len);
419 skb_queue_tail(&card->bqueue[channel], nskb); 420 skb_queue_tail(&card->bqueue[channel], nskb);
420 dev_kfree_skb(skb); 421 dev_kfree_skb(skb);
421 } else 422 } else
diff --git a/drivers/isdn/pcbit/capi.c b/drivers/isdn/pcbit/capi.c
index 47c59e95898d..7b55e151f1b0 100644
--- a/drivers/isdn/pcbit/capi.c
+++ b/drivers/isdn/pcbit/capi.c
@@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
429 if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) 429 if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC)))
430 return -1; 430 return -1;
431 431
432 memcpy(info->data.setup.CallingPN, skb->data + count + 1, 432 skb_copy_from_linear_data_offset(skb, count + 1,
433 len - count); 433 info->data.setup.CallingPN,
434 len - count);
434 info->data.setup.CallingPN[len - count] = 0; 435 info->data.setup.CallingPN[len - count] = 0;
435 436
436 } 437 }
@@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
457 if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) 458 if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC)))
458 return -1; 459 return -1;
459 460
460 memcpy(info->data.setup.CalledPN, skb->data + count + 1, 461 skb_copy_from_linear_data_offset(skb, count + 1,
461 len - count); 462 info->data.setup.CalledPN,
463 len - count);
462 info->data.setup.CalledPN[len - count] = 0; 464 info->data.setup.CalledPN[len - count] = 0;
463 465
464 } 466 }
@@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb)
539 541
540#ifdef DEBUG 542#ifdef DEBUG
541 if (len > 1 && len < 31) { 543 if (len > 1 && len < 31) {
542 memcpy(str, skb->data + 2, len - 1); 544 skb_copy_from_linear_data_offset(skb, 2, str, len - 1);
543 str[len] = 0; 545 str[len] = 0;
544 printk(KERN_DEBUG "Connected Party Number: %s\n", str); 546 printk(KERN_DEBUG "Connected Party Number: %s\n", str);
545 } 547 }
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e85b4c7c36f7..cab26f301eab 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1171,6 +1171,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1171 * and zap two pdes instead of one. 1171 * and zap two pdes instead of one.
1172 */ 1172 */
1173 if (level == PT32_ROOT_LEVEL) { 1173 if (level == PT32_ROOT_LEVEL) {
1174 page_offset &= ~7; /* kill rounding error */
1174 page_offset <<= 1; 1175 page_offset <<= 1;
1175 npte = 2; 1176 npte = 2;
1176 } 1177 }
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 135f22eb1ad4..a98a328b1cfc 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1259,9 +1259,9 @@ static int smu_release(struct inode *inode, struct file *file)
1259 set_current_state(TASK_UNINTERRUPTIBLE); 1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 if (pp->cmd.status != 1) 1260 if (pp->cmd.status != 1)
1261 break; 1261 break;
1262 spin_lock_irqsave(&pp->lock, flags);
1263 schedule();
1264 spin_unlock_irqrestore(&pp->lock, flags); 1262 spin_unlock_irqrestore(&pp->lock, flags);
1263 schedule();
1264 spin_lock_irqsave(&pp->lock, flags);
1265 } 1265 }
1266 set_current_state(TASK_RUNNING); 1266 set_current_state(TASK_RUNNING);
1267 remove_wait_queue(&pp->wait, &wait); 1267 remove_wait_queue(&pp->wait, &wait);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5554adaa58f9..e61e0efe9ec7 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -863,9 +863,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
863 863
864 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ 864 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
865 bitmap->filemap_attr = kzalloc( 865 bitmap->filemap_attr = kzalloc(
866 (((num_pages*4/8)+sizeof(unsigned long)-1) 866 roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
867 /sizeof(unsigned long))
868 *sizeof(unsigned long),
869 GFP_KERNEL); 867 GFP_KERNEL);
870 if (!bitmap->filemap_attr) 868 if (!bitmap->filemap_attr)
871 goto out; 869 goto out;
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 03b47a262f27..cbd1184b5219 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -667,7 +667,7 @@ IR_KEYTAB_TYPE ir_codes_pinnacle_grey[IR_KEYTAB_SIZE] = {
667 [ 0x1f ] = KEY_L, 667 [ 0x1f ] = KEY_L,
668 [ 0x2b ] = KEY_I, 668 [ 0x2b ] = KEY_I,
669 669
670 [ 0x2d ] = KEY_ZOOM, 670 [ 0x2d ] = KEY_SCREEN,
671 [ 0x1e ] = KEY_ZOOM, 671 [ 0x1e ] = KEY_ZOOM,
672 [ 0x1b ] = KEY_VOLUMEUP, 672 [ 0x1b ] = KEY_VOLUMEUP,
673 [ 0x0f ] = KEY_VOLUMEDOWN, 673 [ 0x0f ] = KEY_VOLUMEDOWN,
@@ -682,12 +682,12 @@ IR_KEYTAB_TYPE ir_codes_pinnacle_grey[IR_KEYTAB_SIZE] = {
682 682
683 [ 0x3f ] = KEY_UP, 683 [ 0x3f ] = KEY_UP,
684 [ 0x3e ] = KEY_DOWN, 684 [ 0x3e ] = KEY_DOWN,
685 [ 0x1a ] = KEY_PAUSE, 685 [ 0x1a ] = KEY_ENTER,
686 686
687 [ 0x1d ] = KEY_MENU, 687 [ 0x1d ] = KEY_MENU,
688 [ 0x19 ] = KEY_PLAY, 688 [ 0x19 ] = KEY_AGAIN,
689 [ 0x16 ] = KEY_REWIND, 689 [ 0x16 ] = KEY_PREVIOUSSONG,
690 [ 0x13 ] = KEY_FORWARD, 690 [ 0x13 ] = KEY_NEXTSONG,
691 [ 0x15 ] = KEY_PAUSE, 691 [ 0x15 ] = KEY_PAUSE,
692 [ 0x0e ] = KEY_REWIND, 692 [ 0x0e ] = KEY_REWIND,
693 [ 0x0d ] = KEY_PLAY, 693 [ 0x0d ] = KEY_PLAY,
@@ -1739,7 +1739,7 @@ IR_KEYTAB_TYPE ir_codes_encore_enltv[IR_KEYTAB_SIZE] = {
1739 1739
1740EXPORT_SYMBOL_GPL(ir_codes_encore_enltv); 1740EXPORT_SYMBOL_GPL(ir_codes_encore_enltv);
1741 1741
1742/* for the Technotrend 1500 bundled remote: */ 1742/* for the Technotrend 1500 bundled remotes (grey and black): */
1743IR_KEYTAB_TYPE ir_codes_tt_1500[IR_KEYTAB_SIZE] = { 1743IR_KEYTAB_TYPE ir_codes_tt_1500[IR_KEYTAB_SIZE] = {
1744 [ 0x01 ] = KEY_POWER, 1744 [ 0x01 ] = KEY_POWER,
1745 [ 0x02 ] = KEY_SHUFFLE, /* ? double-arrow key */ 1745 [ 0x02 ] = KEY_SHUFFLE, /* ? double-arrow key */
@@ -1774,6 +1774,12 @@ IR_KEYTAB_TYPE ir_codes_tt_1500[IR_KEYTAB_SIZE] = {
1774 [ 0x25 ] = KEY_VOLUMEUP, 1774 [ 0x25 ] = KEY_VOLUMEUP,
1775 [ 0x26 ] = KEY_VOLUMEDOWN, 1775 [ 0x26 ] = KEY_VOLUMEDOWN,
1776 [ 0x27 ] = KEY_SETUP, 1776 [ 0x27 ] = KEY_SETUP,
1777 [ 0x3a ] = KEY_RECORD, /* these keys are only in the black remote */
1778 [ 0x3b ] = KEY_PLAY,
1779 [ 0x3c ] = KEY_STOP,
1780 [ 0x3d ] = KEY_REWIND,
1781 [ 0x3e ] = KEY_PAUSE,
1782 [ 0x3f ] = KEY_FORWARD,
1777}; 1783};
1778 1784
1779EXPORT_SYMBOL_GPL(ir_codes_tt_1500); 1785EXPORT_SYMBOL_GPL(ir_codes_tt_1500);
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 7e0cedc557df..e3d04a4cef4d 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1428,6 +1428,7 @@ static void video_close(struct saa7146_dev *dev, struct file *file)
1428{ 1428{
1429 struct saa7146_fh *fh = (struct saa7146_fh *)file->private_data; 1429 struct saa7146_fh *fh = (struct saa7146_fh *)file->private_data;
1430 struct saa7146_vv *vv = dev->vv_data; 1430 struct saa7146_vv *vv = dev->vv_data;
1431 struct videobuf_queue *q = &fh->video_q;
1431 int err; 1432 int err;
1432 1433
1433 if (IS_CAPTURE_ACTIVE(fh) != 0) { 1434 if (IS_CAPTURE_ACTIVE(fh) != 0) {
@@ -1436,6 +1437,11 @@ static void video_close(struct saa7146_dev *dev, struct file *file)
1436 err = saa7146_stop_preview(fh); 1437 err = saa7146_stop_preview(fh);
1437 } 1438 }
1438 1439
1440 // release all capture buffers
1441 mutex_lock(&q->lock);
1442 videobuf_read_stop(q);
1443 mutex_unlock(&q->lock);
1444
1439 /* hmm, why is this function declared void? */ 1445 /* hmm, why is this function declared void? */
1440 /* return err */ 1446 /* return err */
1441} 1447}
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index 79875958930e..a0dcd59da76e 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -9,7 +9,6 @@ config DVB_B2C2_FLEXCOP
9 select DVB_STV0297 if !DVB_FE_CUSTOMISE 9 select DVB_STV0297 if !DVB_FE_CUSTOMISE
10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE 10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE
11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
12 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
13 help 12 help
14 Support for the digital TV receiver chip made by B2C2 Inc. included in 13 Support for the digital TV receiver chip made by B2C2 Inc. included in
15 Technisats PCI cards and USB boxes. 14 Technisats PCI cards and USB boxes.
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index 752cf79c532f..b02c2fd65baa 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -14,7 +14,6 @@
14#include "stv0297.h" 14#include "stv0297.h"
15#include "mt312.h" 15#include "mt312.h"
16#include "lgdt330x.h" 16#include "lgdt330x.h"
17#include "lgh06xf.h"
18#include "dvb-pll.h" 17#include "dvb-pll.h"
19 18
20/* lnb control */ 19/* lnb control */
@@ -507,7 +506,7 @@ int flexcop_frontend_init(struct flexcop_device *fc)
507 /* try the air atsc 3nd generation (lgdt3303) */ 506 /* try the air atsc 3nd generation (lgdt3303) */
508 if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) { 507 if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) {
509 fc->dev_type = FC_AIR_ATSC3; 508 fc->dev_type = FC_AIR_ATSC3;
510 dvb_attach(lgh06xf_attach, fc->fe, &fc->i2c_adap); 509 dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->i2c_adap, &dvb_pll_lg_tdvs_h06xf);
511 info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address); 510 info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address);
512 } else 511 } else
513 /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */ 512 /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 6e166801505d..01af4d237eb1 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -127,10 +127,11 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
127{ 127{
128 struct flexcop_pci *fc_pci = dev_id; 128 struct flexcop_pci *fc_pci = dev_id;
129 struct flexcop_device *fc = fc_pci->fc_dev; 129 struct flexcop_device *fc = fc_pci->fc_dev;
130 unsigned long flags;
130 flexcop_ibi_value v; 131 flexcop_ibi_value v;
131 irqreturn_t ret = IRQ_HANDLED; 132 irqreturn_t ret = IRQ_HANDLED;
132 133
133 spin_lock_irq(&fc_pci->irq_lock); 134 spin_lock_irqsave(&fc_pci->irq_lock,flags);
134 135
135 v = fc->read_ibi_reg(fc,irq_20c); 136 v = fc->read_ibi_reg(fc,irq_20c);
136 137
@@ -194,7 +195,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
194 ret = IRQ_NONE; 195 ret = IRQ_NONE;
195 } 196 }
196 197
197 spin_unlock_irq(&fc_pci->irq_lock); 198 spin_unlock_irqrestore(&fc_pci->irq_lock,flags);
198 199
199 return ret; 200 return ret;
200} 201}
@@ -293,12 +294,12 @@ static int flexcop_pci_init(struct flexcop_pci *fc_pci)
293 } 294 }
294 295
295 pci_set_drvdata(fc_pci->pdev, fc_pci); 296 pci_set_drvdata(fc_pci->pdev, fc_pci);
296 297 spin_lock_init(&fc_pci->irq_lock);
297 if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr, 298 if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
298 IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0) 299 IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
299 goto err_pci_iounmap; 300 goto err_pci_iounmap;
300 301
301 spin_lock_init(&fc_pci->irq_lock); 302
302 303
303 fc_pci->init_state |= FC_PCI_INIT; 304 fc_pci->init_state |= FC_PCI_INIT;
304 return ret; 305 return ret;
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index dd66b60fbc98..cfd6fb729a61 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -7,7 +7,7 @@ config DVB_BT8XX
7 select DVB_CX24110 if !DVB_FE_CUSTOMISE 7 select DVB_CX24110 if !DVB_FE_CUSTOMISE
8 select DVB_OR51211 if !DVB_FE_CUSTOMISE 8 select DVB_OR51211 if !DVB_FE_CUSTOMISE
9 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 9 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
10 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE 10 select DVB_PLL
11 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 11 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
12 select FW_LOADER 12 select FW_LOADER
13 help 13 help
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index 83b090ef2445..df72b4b8ee10 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -393,9 +393,7 @@ static struct cards card_list[] __devinitdata = {
393 { 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE, "Ultraview DVB-T Lite" }, 393 { 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE, "Ultraview DVB-T Lite" },
394 { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" }, 394 { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" },
395 { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV" }, 395 { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV" },
396 { 0x00261822, BTTV_BOARD_TWINHAN_DST, "DNTV Live! Mini" }, 396 { 0x00261822, BTTV_BOARD_TWINHAN_DST, "DNTV Live! Mini" }
397
398 { 0, -1, NULL }
399}; 397};
400 398
401 399
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 58f69f6ae391..4f1c09bee538 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -610,7 +610,8 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
610 lgdt330x_reset(card); 610 lgdt330x_reset(card);
611 card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter); 611 card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter);
612 if (card->fe != NULL) { 612 if (card->fe != NULL) {
613 dvb_attach(lgh06xf_attach, card->fe, card->i2c_adapter); 613 dvb_attach(dvb_pll_attach, card->fe, 0x61,
614 card->i2c_adapter, &dvb_pll_lg_tdvs_h06xf);
614 dprintk ("dvb_bt8xx: lgdt330x detected\n"); 615 dprintk ("dvb_bt8xx: lgdt330x detected\n");
615 } 616 }
616 break; 617 break;
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.h b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
index e75f4173c059..436880e68672 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.h
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
@@ -37,8 +37,8 @@
37#include "cx24110.h" 37#include "cx24110.h"
38#include "or51211.h" 38#include "or51211.h"
39#include "lgdt330x.h" 39#include "lgdt330x.h"
40#include "lgh06xf.h"
41#include "zl10353.h" 40#include "zl10353.h"
41#include "dvb-pll.h"
42 42
43struct dvb_bt8xx_card { 43struct dvb_bt8xx_card {
44 struct mutex lock; 44 struct mutex lock;
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index a5c0e1a3e6d1..275df65fde99 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -132,6 +132,11 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
132 if (mutex_lock_interruptible(&dmxdev->mutex)) 132 if (mutex_lock_interruptible(&dmxdev->mutex))
133 return -ERESTARTSYS; 133 return -ERESTARTSYS;
134 134
135 if (dmxdev->exit) {
136 mutex_unlock(&dmxdev->mutex);
137 return -ENODEV;
138 }
139
135 if ((file->f_flags & O_ACCMODE) == O_RDWR) { 140 if ((file->f_flags & O_ACCMODE) == O_RDWR) {
136 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { 141 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
137 mutex_unlock(&dmxdev->mutex); 142 mutex_unlock(&dmxdev->mutex);
@@ -171,6 +176,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
171 dmxdev->demux->disconnect_frontend(dmxdev->demux); 176 dmxdev->demux->disconnect_frontend(dmxdev->demux);
172 dmxdev->demux->connect_frontend(dmxdev->demux, front); 177 dmxdev->demux->connect_frontend(dmxdev->demux, front);
173 } 178 }
179 dvbdev->users++;
174 mutex_unlock(&dmxdev->mutex); 180 mutex_unlock(&dmxdev->mutex);
175 return 0; 181 return 0;
176} 182}
@@ -198,7 +204,16 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
198 vfree(mem); 204 vfree(mem);
199 } 205 }
200 } 206 }
201 mutex_unlock(&dmxdev->mutex); 207 /* TODO */
208 dvbdev->users--;
209 if(dvbdev->users==-1 && dmxdev->exit==1) {
210 fops_put(file->f_op);
211 file->f_op = NULL;
212 mutex_unlock(&dmxdev->mutex);
213 wake_up(&dvbdev->wait_queue);
214 } else
215 mutex_unlock(&dmxdev->mutex);
216
202 return 0; 217 return 0;
203} 218}
204 219
@@ -215,6 +230,11 @@ static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
215 return -EINVAL; 230 return -EINVAL;
216 if (mutex_lock_interruptible(&dmxdev->mutex)) 231 if (mutex_lock_interruptible(&dmxdev->mutex))
217 return -ERESTARTSYS; 232 return -ERESTARTSYS;
233
234 if (dmxdev->exit) {
235 mutex_unlock(&dmxdev->mutex);
236 return -ENODEV;
237 }
218 ret = dmxdev->demux->write(dmxdev->demux, buf, count); 238 ret = dmxdev->demux->write(dmxdev->demux, buf, count);
219 mutex_unlock(&dmxdev->mutex); 239 mutex_unlock(&dmxdev->mutex);
220 return ret; 240 return ret;
@@ -227,6 +247,11 @@ static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
227 struct dmxdev *dmxdev = dvbdev->priv; 247 struct dmxdev *dmxdev = dvbdev->priv;
228 int ret; 248 int ret;
229 249
250 if (dmxdev->exit) {
251 mutex_unlock(&dmxdev->mutex);
252 return -ENODEV;
253 }
254
230 //mutex_lock(&dmxdev->mutex); 255 //mutex_lock(&dmxdev->mutex);
231 ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, 256 ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
232 file->f_flags & O_NONBLOCK, 257 file->f_flags & O_NONBLOCK,
@@ -665,6 +690,8 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
665 dmxdevfilter->feed.ts = NULL; 690 dmxdevfilter->feed.ts = NULL;
666 init_timer(&dmxdevfilter->timer); 691 init_timer(&dmxdevfilter->timer);
667 692
693 dvbdev->users++;
694
668 mutex_unlock(&dmxdev->mutex); 695 mutex_unlock(&dmxdev->mutex);
669 return 0; 696 return 0;
670} 697}
@@ -943,7 +970,21 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
943 struct dmxdev_filter *dmxdevfilter = file->private_data; 970 struct dmxdev_filter *dmxdevfilter = file->private_data;
944 struct dmxdev *dmxdev = dmxdevfilter->dev; 971 struct dmxdev *dmxdev = dmxdevfilter->dev;
945 972
946 return dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); 973 int ret;
974
975 ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
976
977 mutex_lock(&dmxdev->mutex);
978 dmxdev->dvbdev->users--;
979 if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
980 fops_put(file->f_op);
981 file->f_op = NULL;
982 mutex_unlock(&dmxdev->mutex);
983 wake_up(&dmxdev->dvbdev->wait_queue);
984 } else
985 mutex_unlock(&dmxdev->mutex);
986
987 return ret;
947} 988}
948 989
949static struct file_operations dvb_demux_fops = { 990static struct file_operations dvb_demux_fops = {
@@ -1027,6 +1068,7 @@ static struct file_operations dvb_dvr_fops = {
1027static struct dvb_device dvbdev_dvr = { 1068static struct dvb_device dvbdev_dvr = {
1028 .priv = NULL, 1069 .priv = NULL,
1029 .readers = 1, 1070 .readers = 1,
1071 .users = 1,
1030 .fops = &dvb_dvr_fops 1072 .fops = &dvb_dvr_fops
1031}; 1073};
1032 1074
@@ -1064,6 +1106,16 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
1064 1106
1065void dvb_dmxdev_release(struct dmxdev *dmxdev) 1107void dvb_dmxdev_release(struct dmxdev *dmxdev)
1066{ 1108{
1109 dmxdev->exit=1;
1110 if (dmxdev->dvbdev->users > 1) {
1111 wait_event(dmxdev->dvbdev->wait_queue,
1112 dmxdev->dvbdev->users==1);
1113 }
1114 if (dmxdev->dvr_dvbdev->users > 1) {
1115 wait_event(dmxdev->dvr_dvbdev->wait_queue,
1116 dmxdev->dvr_dvbdev->users==1);
1117 }
1118
1067 dvb_unregister_device(dmxdev->dvbdev); 1119 dvb_unregister_device(dmxdev->dvbdev);
1068 dvb_unregister_device(dmxdev->dvr_dvbdev); 1120 dvb_unregister_device(dmxdev->dvr_dvbdev);
1069 1121
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index d2bee9ffe43c..29746e70d325 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -91,6 +91,8 @@ struct dmxdev {
91 91
92 int filternum; 92 int filternum;
93 int capabilities; 93 int capabilities;
94
95 unsigned int exit:1;
94#define DMXDEV_CAP_DUPLEX 1 96#define DMXDEV_CAP_DUPLEX 1
95 struct dmx_frontend *dvr_orig_fe; 97 struct dmx_frontend *dvr_orig_fe;
96 98
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index a21a894d3f98..f4e4ca2dcade 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -606,6 +606,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
606 return; 606 return;
607 607
608 kthread_stop(fepriv->thread); 608 kthread_stop(fepriv->thread);
609
609 init_MUTEX (&fepriv->sem); 610 init_MUTEX (&fepriv->sem);
610 fepriv->state = FESTATE_IDLE; 611 fepriv->state = FESTATE_IDLE;
611 612
@@ -1023,6 +1024,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
1023 struct dvb_device *dvbdev = file->private_data; 1024 struct dvb_device *dvbdev = file->private_data;
1024 struct dvb_frontend *fe = dvbdev->priv; 1025 struct dvb_frontend *fe = dvbdev->priv;
1025 struct dvb_frontend_private *fepriv = fe->frontend_priv; 1026 struct dvb_frontend_private *fepriv = fe->frontend_priv;
1027 int ret;
1026 1028
1027 dprintk ("%s\n", __FUNCTION__); 1029 dprintk ("%s\n", __FUNCTION__);
1028 1030
@@ -1032,7 +1034,14 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
1032 if (fe->ops.ts_bus_ctrl) 1034 if (fe->ops.ts_bus_ctrl)
1033 fe->ops.ts_bus_ctrl (fe, 0); 1035 fe->ops.ts_bus_ctrl (fe, 0);
1034 1036
1035 return dvb_generic_release (inode, file); 1037 ret = dvb_generic_release (inode, file);
1038
1039 if (dvbdev->users==-1 && fepriv->exit==1) {
1040 fops_put(file->f_op);
1041 file->f_op = NULL;
1042 wake_up(&dvbdev->wait_queue);
1043 }
1044 return ret;
1036} 1045}
1037 1046
1038static struct file_operations dvb_frontend_fops = { 1047static struct file_operations dvb_frontend_fops = {
@@ -1092,8 +1101,15 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
1092 dprintk ("%s\n", __FUNCTION__); 1101 dprintk ("%s\n", __FUNCTION__);
1093 1102
1094 mutex_lock(&frontend_mutex); 1103 mutex_lock(&frontend_mutex);
1095 dvb_unregister_device (fepriv->dvbdev);
1096 dvb_frontend_stop (fe); 1104 dvb_frontend_stop (fe);
1105 mutex_unlock(&frontend_mutex);
1106
1107 if (fepriv->dvbdev->users < -1)
1108 wait_event(fepriv->dvbdev->wait_queue,
1109 fepriv->dvbdev->users==-1);
1110
1111 mutex_lock(&frontend_mutex);
1112 dvb_unregister_device (fepriv->dvbdev);
1097 1113
1098 /* fe is invalid now */ 1114 /* fe is invalid now */
1099 kfree(fepriv); 1115 kfree(fepriv);
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 76e9c36597eb..4ebf33a5ffa2 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -174,7 +174,7 @@ static unsigned short dvb_net_eth_type_trans(struct sk_buff *skb,
174 struct ethhdr *eth; 174 struct ethhdr *eth;
175 unsigned char *rawp; 175 unsigned char *rawp;
176 176
177 skb->mac.raw=skb->data; 177 skb_reset_mac_header(skb);
178 skb_pull(skb,dev->hard_header_len); 178 skb_pull(skb,dev->hard_header_len);
179 eth = eth_hdr(skb); 179 eth = eth_hdr(skb);
180 180
@@ -600,6 +600,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
600 /* Check CRC32, we've got it in our skb already. */ 600 /* Check CRC32, we've got it in our skb already. */
601 unsigned short ulen = htons(priv->ule_sndu_len); 601 unsigned short ulen = htons(priv->ule_sndu_len);
602 unsigned short utype = htons(priv->ule_sndu_type); 602 unsigned short utype = htons(priv->ule_sndu_type);
603 const u8 *tail;
603 struct kvec iov[3] = { 604 struct kvec iov[3] = {
604 { &ulen, sizeof ulen }, 605 { &ulen, sizeof ulen },
605 { &utype, sizeof utype }, 606 { &utype, sizeof utype },
@@ -613,10 +614,11 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
613 } 614 }
614 615
615 ule_crc = iov_crc32(ule_crc, iov, 3); 616 ule_crc = iov_crc32(ule_crc, iov, 3);
616 expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 | 617 tail = skb_tail_pointer(priv->ule_skb);
617 *((u8 *)priv->ule_skb->tail - 3) << 16 | 618 expected_crc = *(tail - 4) << 24 |
618 *((u8 *)priv->ule_skb->tail - 2) << 8 | 619 *(tail - 3) << 16 |
619 *((u8 *)priv->ule_skb->tail - 1); 620 *(tail - 2) << 8 |
621 *(tail - 1);
620 if (ule_crc != expected_crc) { 622 if (ule_crc != expected_crc) {
621 printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", 623 printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
622 priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); 624 priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
@@ -695,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
695 } 697 }
696 else 698 else
697 { 699 {
698 memcpy(dest_addr, priv->ule_skb->data, ETH_ALEN); 700 skb_copy_from_linear_data(priv->ule_skb,
701 dest_addr,
702 ETH_ALEN);
699 skb_pull(priv->ule_skb, ETH_ALEN); 703 skb_pull(priv->ule_skb, ETH_ALEN);
700 } 704 }
701 } 705 }
@@ -1435,11 +1439,36 @@ static int dvb_net_ioctl(struct inode *inode, struct file *file,
1435 return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl); 1439 return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl);
1436} 1440}
1437 1441
1442static int dvb_net_close(struct inode *inode, struct file *file)
1443{
1444 struct dvb_device *dvbdev = file->private_data;
1445 struct dvb_net *dvbnet = dvbdev->priv;
1446
1447 if (!dvbdev)
1448 return -ENODEV;
1449
1450 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
1451 dvbdev->readers++;
1452 } else {
1453 dvbdev->writers++;
1454 }
1455
1456 dvbdev->users++;
1457
1458 if(dvbdev->users == 1 && dvbnet->exit==1) {
1459 fops_put(file->f_op);
1460 file->f_op = NULL;
1461 wake_up(&dvbdev->wait_queue);
1462 }
1463 return 0;
1464}
1465
1466
1438static struct file_operations dvb_net_fops = { 1467static struct file_operations dvb_net_fops = {
1439 .owner = THIS_MODULE, 1468 .owner = THIS_MODULE,
1440 .ioctl = dvb_net_ioctl, 1469 .ioctl = dvb_net_ioctl,
1441 .open = dvb_generic_open, 1470 .open = dvb_generic_open,
1442 .release = dvb_generic_release, 1471 .release = dvb_net_close,
1443}; 1472};
1444 1473
1445static struct dvb_device dvbdev_net = { 1474static struct dvb_device dvbdev_net = {
@@ -1454,6 +1483,11 @@ void dvb_net_release (struct dvb_net *dvbnet)
1454{ 1483{
1455 int i; 1484 int i;
1456 1485
1486 dvbnet->exit = 1;
1487 if (dvbnet->dvbdev->users < 1)
1488 wait_event(dvbnet->dvbdev->wait_queue,
1489 dvbnet->dvbdev->users==1);
1490
1457 dvb_unregister_device(dvbnet->dvbdev); 1491 dvb_unregister_device(dvbnet->dvbdev);
1458 1492
1459 for (i=0; i<DVB_NET_DEVICES_MAX; i++) { 1493 for (i=0; i<DVB_NET_DEVICES_MAX; i++) {
diff --git a/drivers/media/dvb/dvb-core/dvb_net.h b/drivers/media/dvb/dvb-core/dvb_net.h
index f14e4ca38570..3a3126cae03b 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.h
+++ b/drivers/media/dvb/dvb-core/dvb_net.h
@@ -36,6 +36,7 @@ struct dvb_net {
36 struct dvb_device *dvbdev; 36 struct dvb_device *dvbdev;
37 struct net_device *device[DVB_NET_DEVICES_MAX]; 37 struct net_device *device[DVB_NET_DEVICES_MAX];
38 int state[DVB_NET_DEVICES_MAX]; 38 int state[DVB_NET_DEVICES_MAX];
39 unsigned int exit:1;
39 struct dmx_demux *demux; 40 struct dmx_demux *demux;
40}; 41};
41 42
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 14a372a0fe8b..e23d8a0ea1d3 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -233,6 +233,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
233 dvbdev->adapter = adap; 233 dvbdev->adapter = adap;
234 dvbdev->priv = priv; 234 dvbdev->priv = priv;
235 dvbdev->fops = dvbdevfops; 235 dvbdev->fops = dvbdevfops;
236 init_waitqueue_head (&dvbdev->wait_queue);
236 237
237 memcpy(dvbdev->fops, template->fops, sizeof(struct file_operations)); 238 memcpy(dvbdev->fops, template->fops, sizeof(struct file_operations));
238 dvbdev->fops->owner = adap->module; 239 dvbdev->fops->owner = adap->module;
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 620e7887b3d3..6dff10ebf470 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -69,6 +69,7 @@ struct dvb_device {
69 int writers; 69 int writers;
70 int users; 70 int users;
71 71
72 wait_queue_head_t wait_queue;
72 /* don't really need those !? -- FIXME: use video_usercopy */ 73 /* don't really need those !? -- FIXME: use video_usercopy */
73 int (*kernel_ioctl)(struct inode *inode, struct file *file, 74 int (*kernel_ioctl)(struct inode *inode, struct file *file,
74 unsigned int cmd, void *arg); 75 unsigned int cmd, void *arg);
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 80f67a51b908..54488737a08f 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -33,6 +33,7 @@ config DVB_USB_A800
33config DVB_USB_DIBUSB_MB 33config DVB_USB_DIBUSB_MB
34 tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)" 34 tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)"
35 depends on DVB_USB 35 depends on DVB_USB
36 select DVB_PLL
36 select DVB_DIB3000MB 37 select DVB_DIB3000MB
37 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 38 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
38 help 39 help
@@ -88,6 +89,7 @@ config DVB_USB_DIB0700
88config DVB_USB_UMT_010 89config DVB_USB_UMT_010
89 tristate "HanfTek UMT-010 DVB-T USB2.0 support" 90 tristate "HanfTek UMT-010 DVB-T USB2.0 support"
90 depends on DVB_USB 91 depends on DVB_USB
92 select DVB_PLL
91 select DVB_DIB3000MC 93 select DVB_DIB3000MC
92 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 94 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
93 help 95 help
@@ -96,9 +98,9 @@ config DVB_USB_UMT_010
96config DVB_USB_CXUSB 98config DVB_USB_CXUSB
97 tristate "Conexant USB2.0 hybrid reference design support" 99 tristate "Conexant USB2.0 hybrid reference design support"
98 depends on DVB_USB 100 depends on DVB_USB
101 select DVB_PLL
99 select DVB_CX22702 if !DVB_FE_CUSTOMISE 102 select DVB_CX22702 if !DVB_FE_CUSTOMISE
100 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 103 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
101 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
102 select DVB_MT352 if !DVB_FE_CUSTOMISE 104 select DVB_MT352 if !DVB_FE_CUSTOMISE
103 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 105 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
104 help 106 help
@@ -140,6 +142,7 @@ config DVB_USB_AU6610
140config DVB_USB_DIGITV 142config DVB_USB_DIGITV
141 tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support" 143 tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support"
142 depends on DVB_USB 144 depends on DVB_USB
145 select DVB_PLL
143 select DVB_NXT6000 if !DVB_FE_CUSTOMISE 146 select DVB_NXT6000 if !DVB_FE_CUSTOMISE
144 select DVB_MT352 if !DVB_FE_CUSTOMISE 147 select DVB_MT352 if !DVB_FE_CUSTOMISE
145 help 148 help
@@ -208,3 +211,10 @@ config DVB_USB_DTT200U
208 The receivers are also known as DTT200U (Yakumo) and UB300 (Yuan). 211 The receivers are also known as DTT200U (Yakumo) and UB300 (Yuan).
209 212
210 The WT-220U and its clones are pen-sized. 213 The WT-220U and its clones are pen-sized.
214
215config DVB_USB_OPERA1
216 tristate "Opera1 DVB-S USB2.0 receiver"
217 depends on DVB_USB
218 select DVB_STV0299 if !DVB_FE_CUSTOMISE
219 help
220 Say Y here to support the Opera DVB-S USB2.0 receiver.
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 40f28f559b54..976f840cc904 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -51,4 +51,8 @@ obj-$(CONFIG_DVB_USB_TTUSB2) += dvb-usb-ttusb2.o
51dvb-usb-dib0700-objs = dib0700_core.o dib0700_devices.o 51dvb-usb-dib0700-objs = dib0700_core.o dib0700_devices.o
52obj-$(CONFIG_DVB_USB_DIB0700) += dvb-usb-dib0700.o 52obj-$(CONFIG_DVB_USB_DIB0700) += dvb-usb-dib0700.o
53 53
54dvb-usb-opera-objs = opera1.o
55obj-$(CONFIG_DVB_USB_OPERA1) += dvb-usb-opera.o
56
57
54EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ 58EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/dvb-usb/au6610.c b/drivers/media/dvb/dvb-usb/au6610.c
index 0dc66a8d2baf..18e0b16fb2a9 100644
--- a/drivers/media/dvb/dvb-usb/au6610.c
+++ b/drivers/media/dvb/dvb-usb/au6610.c
@@ -40,7 +40,7 @@ static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr,
40 } 40 }
41 41
42 ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation, 42 ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation,
43 USB_TYPE_VENDOR|USB_DIR_IN, addr, index, usb_buf, 43 USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index, usb_buf,
44 sizeof(usb_buf), AU6610_USB_TIMEOUT); 44 sizeof(usb_buf), AU6610_USB_TIMEOUT);
45 45
46 if (ret < 0) 46 if (ret < 0)
@@ -124,7 +124,7 @@ static int au6610_identify_state(struct usb_device *udev,
124} 124}
125 125
126static struct zl10353_config au6610_zl10353_config = { 126static struct zl10353_config au6610_zl10353_config = {
127 .demod_address = 0x1e, 127 .demod_address = 0x0f,
128 .no_tuner = 1, 128 .no_tuner = 1,
129 .parallel_ts = 1, 129 .parallel_ts = 1,
130}; 130};
@@ -140,7 +140,7 @@ static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap)
140} 140}
141 141
142static struct qt1010_config au6610_qt1010_config = { 142static struct qt1010_config au6610_qt1010_config = {
143 .i2c_address = 0xc4 143 .i2c_address = 0x62
144}; 144};
145 145
146static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap) 146static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 127a94b9a1b5..bac2ae3b4a1f 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -27,7 +27,6 @@
27 27
28#include "cx22702.h" 28#include "cx22702.h"
29#include "lgdt330x.h" 29#include "lgdt330x.h"
30#include "lgh06xf.h"
31#include "mt352.h" 30#include "mt352.h"
32#include "mt352_priv.h" 31#include "mt352_priv.h"
33#include "zl10353.h" 32#include "zl10353.h"
@@ -388,7 +387,8 @@ static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap)
388 387
389static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap) 388static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
390{ 389{
391 dvb_attach(lgh06xf_attach, adap->fe, &adap->dev->i2c_adap); 390 dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
391 &dvb_pll_lg_tdvs_h06xf);
392 return 0; 392 return 0;
393} 393}
394 394
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 6a4d150784a6..dddf164f269a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -56,10 +56,6 @@ static int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u
56 if (txlen > 3) 56 if (txlen > 3)
57 index |= tx[3]; 57 index |= tx[3];
58 58
59 /* think about swapping here */
60 value = le16_to_cpu(value);
61 index = le16_to_cpu(index);
62
63 status = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), tx[0], 59 status = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), tx[0],
64 USB_TYPE_VENDOR | USB_DIR_IN, value, index, rx, rxlen, 60 USB_TYPE_VENDOR | USB_DIR_IN, value, index, rx, rxlen,
65 USB_CTRL_GET_TIMEOUT); 61 USB_CTRL_GET_TIMEOUT);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 148386aba275..97715f7514d6 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -13,6 +13,7 @@
13#define USB_VID_ADSTECH 0x06e1 13#define USB_VID_ADSTECH 0x06e1
14#define USB_VID_ALCOR_MICRO 0x058f 14#define USB_VID_ALCOR_MICRO 0x058f
15#define USB_VID_ANCHOR 0x0547 15#define USB_VID_ANCHOR 0x0547
16#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
16#define USB_VID_AVERMEDIA 0x07ca 17#define USB_VID_AVERMEDIA 0x07ca
17#define USB_VID_COMPRO 0x185b 18#define USB_VID_COMPRO 0x185b
18#define USB_VID_COMPRO_UNK 0x145f 19#define USB_VID_COMPRO_UNK 0x145f
@@ -31,6 +32,7 @@
31#define USB_VID_LITEON 0x04ca 32#define USB_VID_LITEON 0x04ca
32#define USB_VID_MEDION 0x1660 33#define USB_VID_MEDION 0x1660
33#define USB_VID_MSI 0x0db0 34#define USB_VID_MSI 0x0db0
35#define USB_VID_OPERA1 0x695c
34#define USB_VID_PINNACLE 0x2304 36#define USB_VID_PINNACLE 0x2304
35#define USB_VID_VISIONPLUS 0x13d3 37#define USB_VID_VISIONPLUS 0x13d3
36#define USB_VID_TWINHAN 0x1822 38#define USB_VID_TWINHAN 0x1822
@@ -127,6 +129,7 @@
127#define USB_PID_KYE_DVB_T_WARM 0x701f 129#define USB_PID_KYE_DVB_T_WARM 0x701f
128#define USB_PID_PCTV_200E 0x020e 130#define USB_PID_PCTV_200E 0x020e
129#define USB_PID_PCTV_400E 0x020f 131#define USB_PID_PCTV_400E 0x020f
132#define USB_PID_PCTV_450E 0x0222
130#define USB_PID_LITEON_DVB_T_COLD 0xf000 133#define USB_PID_LITEON_DVB_T_COLD 0xf000
131#define USB_PID_LITEON_DVB_T_WARM 0xf001 134#define USB_PID_LITEON_DVB_T_WARM 0xf001
132#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360 135#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360
@@ -139,6 +142,9 @@
139#define USB_PID_GENPIX_8PSK_COLD 0x0200 142#define USB_PID_GENPIX_8PSK_COLD 0x0200
140#define USB_PID_GENPIX_8PSK_WARM 0x0201 143#define USB_PID_GENPIX_8PSK_WARM 0x0201
141#define USB_PID_SIGMATEK_DVB_110 0x6610 144#define USB_PID_SIGMATEK_DVB_110 0x6610
145#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
146#define USB_PID_OPERA1_COLD 0x2830
147#define USB_PID_OPERA1_WARM 0x3829
142 148
143 149
144#endif 150#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 9511a31c8f50..68ed3a788083 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -107,8 +107,6 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
107 return -ENOMEM; 107 return -ENOMEM;
108 108
109 input_dev->evbit[0] = BIT(EV_KEY); 109 input_dev->evbit[0] = BIT(EV_KEY);
110 input_dev->keycodesize = sizeof(unsigned char);
111 input_dev->keycodemax = KEY_MAX;
112 input_dev->name = "IR-receiver inside an USB DVB receiver"; 110 input_dev->name = "IR-receiver inside an USB DVB receiver";
113 input_dev->phys = d->rc_phys; 111 input_dev->phys = d->rc_phys;
114 usb_to_input_id(d->udev, &input_dev->id); 112 usb_to_input_id(d->udev, &input_dev->id);
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index c9f38a5e70d3..e0587e663591 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -12,7 +12,7 @@
12#include "qt1010.h" 12#include "qt1010.h"
13 13
14/* debug */ 14/* debug */
15int dvb_usb_gl861_debug; 15static int dvb_usb_gl861_debug;
16module_param_named(debug,dvb_usb_gl861_debug, int, 0644); 16module_param_named(debug,dvb_usb_gl861_debug, int, 0644);
17MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); 17MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS);
18 18
@@ -20,7 +20,7 @@ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
20 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) 20 u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
21{ 21{
22 u16 index; 22 u16 index;
23 u16 value = addr << 8; 23 u16 value = addr << (8 + 1);
24 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 24 int wo = (rbuf == NULL || rlen == 0); /* write-only */
25 u8 req, type; 25 u8 req, type;
26 26
@@ -101,7 +101,7 @@ static int gl861_identify_state(struct usb_device *udev,
101} 101}
102 102
103static struct zl10353_config gl861_zl10353_config = { 103static struct zl10353_config gl861_zl10353_config = {
104 .demod_address = 0x1e, 104 .demod_address = 0x0f,
105 .no_tuner = 1, 105 .no_tuner = 1,
106 .parallel_ts = 1, 106 .parallel_ts = 1,
107}; 107};
@@ -117,7 +117,7 @@ static int gl861_frontend_attach(struct dvb_usb_adapter *adap)
117} 117}
118 118
119static struct qt1010_config gl861_qt1010_config = { 119static struct qt1010_config gl861_qt1010_config = {
120 .i2c_address = 0xc4 120 .i2c_address = 0x62
121}; 121};
122 122
123static int gl861_tuner_attach(struct dvb_usb_adapter *adap) 123static int gl861_tuner_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index d48b24d9abf4..45d7bc214c18 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -14,6 +14,8 @@
14#include "mt352.h" 14#include "mt352.h"
15#include "mt352_priv.h" 15#include "mt352_priv.h"
16#include "qt1010.h" 16#include "qt1010.h"
17#include "tda1004x.h"
18#include "tda827x.h"
17 19
18/* debug */ 20/* debug */
19static int dvb_usb_m920x_debug; 21static int dvb_usb_m920x_debug;
@@ -47,11 +49,15 @@ static inline int m9206_read(struct usb_device *udev, u8 request, u16 value,\
47 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 49 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
48 request, USB_TYPE_VENDOR | USB_DIR_IN, 50 request, USB_TYPE_VENDOR | USB_DIR_IN,
49 value, index, data, size, 2000); 51 value, index, data, size, 2000);
50 if (ret < 0) 52 if (ret < 0) {
53 printk(KERN_INFO "m920x_read = error: %d\n", ret);
51 return ret; 54 return ret;
55 }
52 56
53 if (ret != size) 57 if (ret != size) {
58 deb_rc("m920x_read = no data\n");
54 return -EIO; 59 return -EIO;
60 }
55 61
56 return 0; 62 return 0;
57} 63}
@@ -64,19 +70,22 @@ static inline int m9206_write(struct usb_device *udev, u8 request,
64 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 70 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
65 request, USB_TYPE_VENDOR | USB_DIR_OUT, 71 request, USB_TYPE_VENDOR | USB_DIR_OUT,
66 value, index, NULL, 0, 2000); 72 value, index, NULL, 0, 2000);
73
67 return ret; 74 return ret;
68} 75}
69 76
70static int m9206_rc_init(struct usb_device *udev) 77static int m9206_init(struct dvb_usb_device *d)
71{ 78{
72 int ret = 0; 79 int ret = 0;
73 80
74 /* Remote controller init. */ 81 /* Remote controller init. */
75 if ((ret = m9206_write(udev, M9206_CORE, 0xa8, M9206_RC_INIT2)) != 0) 82 if (d->props.rc_query) {
76 return ret; 83 if ((ret = m9206_write(d->udev, M9206_CORE, 0xa8, M9206_RC_INIT2)) != 0)
84 return ret;
77 85
78 if ((ret = m9206_write(udev, M9206_CORE, 0x51, M9206_RC_INIT1)) != 0) 86 if ((ret = m9206_write(d->udev, M9206_CORE, 0x51, M9206_RC_INIT1)) != 0)
79 return ret; 87 return ret;
88 }
80 89
81 return ret; 90 return ret;
82} 91}
@@ -87,16 +96,15 @@ static int m9206_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
87 int i, ret = 0; 96 int i, ret = 0;
88 u8 rc_state[2]; 97 u8 rc_state[2];
89 98
90
91 if ((ret = m9206_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0) 99 if ((ret = m9206_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1)) != 0)
92 goto unlock; 100 goto unlock;
93 101
94 if ((ret = m9206_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0) 102 if ((ret = m9206_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1)) != 0)
95 goto unlock; 103 goto unlock;
96 104
97 for (i = 0; i < ARRAY_SIZE(megasky_rc_keys); i++) 105 for (i = 0; i < d->props.rc_key_map_size; i++)
98 if (megasky_rc_keys[i].data == rc_state[1]) { 106 if (d->props.rc_key_map[i].data == rc_state[1]) {
99 *event = megasky_rc_keys[i].event; 107 *event = d->props.rc_key_map[i].event;
100 108
101 switch(rc_state[0]) { 109 switch(rc_state[0]) {
102 case 0x80: 110 case 0x80:
@@ -137,53 +145,51 @@ static int m9206_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
137 int num) 145 int num)
138{ 146{
139 struct dvb_usb_device *d = i2c_get_adapdata(adap); 147 struct dvb_usb_device *d = i2c_get_adapdata(adap);
140 struct m9206_state *m = d->priv; 148 int i, j;
141 int i;
142 int ret = 0; 149 int ret = 0;
143 150
151 if (!num)
152 return -EINVAL;
153
144 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 154 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
145 return -EAGAIN; 155 return -EAGAIN;
146 156
147 if (num > 2)
148 return -EINVAL;
149
150 for (i = 0; i < num; i++) { 157 for (i = 0; i < num; i++) {
151 if ((ret = m9206_write(d->udev, M9206_I2C, msg[i].addr, 0x80)) != 0) 158 if (msg[i].flags & (I2C_M_NO_RD_ACK|I2C_M_IGNORE_NAK|I2C_M_TEN) ||
152 goto unlock; 159 msg[i].len == 0) {
153 160 /* For a 0 byte message, I think sending the address to index 0x80|0x40
154 if ((ret = m9206_write(d->udev, M9206_I2C, msg[i].buf[0], 0x0)) != 0) 161 * would be the correct thing to do. However, zero byte messages are
162 * only used for probing, and since we don't know how to get the slave's
163 * ack, we can't probe. */
164 ret = -ENOTSUPP;
155 goto unlock; 165 goto unlock;
156 166 }
157 if (i + 1 < num && msg[i + 1].flags & I2C_M_RD) { 167 /* Send START & address/RW bit */
158 int i2c_i; 168 if (!(msg[i].flags & I2C_M_NOSTART)) {
159 169 if ((ret = m9206_write(d->udev, M9206_I2C, (msg[i].addr<<1)|(msg[i].flags&I2C_M_RD?0x01:0), 0x80)) != 0)
160 for (i2c_i = 0; i2c_i < M9206_I2C_MAX; i2c_i++)
161 if (msg[i].addr == m->i2c_r[i2c_i].addr)
162 break;
163
164 if (i2c_i >= M9206_I2C_MAX) {
165 deb_rc("No magic for i2c addr!\n");
166 ret = -EINVAL;
167 goto unlock; 170 goto unlock;
171 /* Should check for ack here, if we knew how. */
172 }
173 if (msg[i].flags & I2C_M_RD) {
174 for (j = 0; j < msg[i].len; j++) {
175 /* Last byte of transaction? Send STOP, otherwise send ACK. */
176 int stop = (i+1 == num && j+1 == msg[i].len)?0x40:0x01;
177 if ((ret = m9206_read(d->udev, M9206_I2C, 0x0, 0x20|stop, &msg[i].buf[j], 1)) != 0)
178 goto unlock;
168 } 179 }
169
170 if ((ret = m9206_write(d->udev, M9206_I2C, m->i2c_r[i2c_i].magic, 0x80)) != 0)
171 goto unlock;
172
173 if ((ret = m9206_read(d->udev, M9206_I2C, 0x0, 0x60, msg[i + 1].buf, msg[i + 1].len)) != 0)
174 goto unlock;
175
176 i++;
177 } else { 180 } else {
178 if (msg[i].len != 2) 181 for (j = 0; j < msg[i].len; j++) {
179 return -EINVAL; 182 /* Last byte of transaction? Then send STOP. */
180 183 int stop = (i+1 == num && j+1 == msg[i].len)?0x40:0x00;
181 if ((ret = m9206_write(d->udev, M9206_I2C, msg[i].buf[1], 0x40)) != 0) 184 if ((ret = m9206_write(d->udev, M9206_I2C, msg[i].buf[j], stop)) != 0)
182 goto unlock; 185 goto unlock;
186 /* Should check for ack here too. */
187 }
183 } 188 }
184 } 189 }
185 ret = i; 190 ret = num;
186 unlock: 191
192unlock:
187 mutex_unlock(&d->i2c_mutex); 193 mutex_unlock(&d->i2c_mutex);
188 194
189 return ret; 195 return ret;
@@ -324,6 +330,7 @@ static int m9206_firmware_download(struct usb_device *udev,
324 i += size; 330 i += size;
325 } 331 }
326 if (i != fw->size) { 332 if (i != fw->size) {
333 deb_rc("bad firmware file!\n");
327 ret = -EINVAL; 334 ret = -EINVAL;
328 goto done; 335 goto done;
329 } 336 }
@@ -342,10 +349,10 @@ static int m9206_firmware_download(struct usb_device *udev,
342} 349}
343 350
344/* Callbacks for DVB USB */ 351/* Callbacks for DVB USB */
345static int megasky_identify_state(struct usb_device *udev, 352static int m920x_identify_state(struct usb_device *udev,
346 struct dvb_usb_device_properties *props, 353 struct dvb_usb_device_properties *props,
347 struct dvb_usb_device_description **desc, 354 struct dvb_usb_device_description **desc,
348 int *cold) 355 int *cold)
349{ 356{
350 struct usb_host_interface *alt; 357 struct usb_host_interface *alt;
351 358
@@ -381,20 +388,15 @@ static int megasky_mt352_demod_init(struct dvb_frontend *fe)
381} 388}
382 389
383static struct mt352_config megasky_mt352_config = { 390static struct mt352_config megasky_mt352_config = {
384 .demod_address = 0x1e, 391 .demod_address = 0x0f,
385 .no_tuner = 1, 392 .no_tuner = 1,
386 .demod_init = megasky_mt352_demod_init, 393 .demod_init = megasky_mt352_demod_init,
387}; 394};
388 395
389static int megasky_mt352_frontend_attach(struct dvb_usb_adapter *adap) 396static int megasky_mt352_frontend_attach(struct dvb_usb_adapter *adap)
390{ 397{
391 struct m9206_state *m = adap->dev->priv;
392
393 deb_rc("megasky_frontend_attach!\n"); 398 deb_rc("megasky_frontend_attach!\n");
394 399
395 m->i2c_r[M9206_I2C_DEMOD].addr = megasky_mt352_config.demod_address;
396 m->i2c_r[M9206_I2C_DEMOD].magic = 0x1f;
397
398 if ((adap->fe = dvb_attach(mt352_attach, &megasky_mt352_config, &adap->dev->i2c_adap)) == NULL) 400 if ((adap->fe = dvb_attach(mt352_attach, &megasky_mt352_config, &adap->dev->i2c_adap)) == NULL)
399 return -EIO; 401 return -EIO;
400 402
@@ -402,16 +404,11 @@ static int megasky_mt352_frontend_attach(struct dvb_usb_adapter *adap)
402} 404}
403 405
404static struct qt1010_config megasky_qt1010_config = { 406static struct qt1010_config megasky_qt1010_config = {
405 .i2c_address = 0xc4 407 .i2c_address = 0x62
406}; 408};
407 409
408static int megasky_qt1010_tuner_attach(struct dvb_usb_adapter *adap) 410static int megasky_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
409{ 411{
410 struct m9206_state *m = adap->dev->priv;
411
412 m->i2c_r[M9206_I2C_TUNER].addr = megasky_qt1010_config.i2c_address;
413 m->i2c_r[M9206_I2C_TUNER].magic = 0xc5;
414
415 if (dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap, 412 if (dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap,
416 &megasky_qt1010_config) == NULL) 413 &megasky_qt1010_config) == NULL)
417 return -ENODEV; 414 return -ENODEV;
@@ -419,8 +416,40 @@ static int megasky_qt1010_tuner_attach(struct dvb_usb_adapter *adap)
419 return 0; 416 return 0;
420} 417}
421 418
419static struct tda1004x_config digivox_tda10046_config = {
420 .demod_address = 0x08,
421 .invert = 0,
422 .invert_oclk = 0,
423 .ts_mode = TDA10046_TS_SERIAL,
424 .xtal_freq = TDA10046_XTAL_16M,
425 .if_freq = TDA10046_FREQ_045,
426 .agc_config = TDA10046_AGC_TDA827X,
427 .gpio_config = TDA10046_GPTRI,
428 .request_firmware = NULL,
429};
430
431static int digivox_tda10046_frontend_attach(struct dvb_usb_adapter *adap)
432{
433 deb_rc("digivox_tda10046_frontend_attach!\n");
434
435 if ((adap->fe = dvb_attach(tda10046_attach, &digivox_tda10046_config,
436 &adap->dev->i2c_adap)) == NULL)
437 return -EIO;
438
439 return 0;
440}
441
442static int digivox_tda8275_tuner_attach(struct dvb_usb_adapter *adap)
443{
444 if (dvb_attach(tda827x_attach, adap->fe, 0x60, &adap->dev->i2c_adap,
445 NULL) == NULL)
446 return -ENODEV;
447 return 0;
448}
449
422/* DVB USB Driver stuff */ 450/* DVB USB Driver stuff */
423static struct dvb_usb_device_properties megasky_properties; 451static struct dvb_usb_device_properties megasky_properties;
452static struct dvb_usb_device_properties digivox_mini_ii_properties;
424 453
425static int m920x_probe(struct usb_interface *intf, 454static int m920x_probe(struct usb_interface *intf,
426 const struct usb_device_id *id) 455 const struct usb_device_id *id)
@@ -429,30 +458,36 @@ static int m920x_probe(struct usb_interface *intf,
429 struct usb_host_interface *alt; 458 struct usb_host_interface *alt;
430 int ret; 459 int ret;
431 460
432 if ((ret = dvb_usb_device_init(intf, &megasky_properties, THIS_MODULE, &d)) == 0) { 461 deb_rc("Probed!\n");
433 deb_rc("probed!\n");
434 462
435 alt = usb_altnum_to_altsetting(intf, 1); 463 if (((ret = dvb_usb_device_init(intf, &megasky_properties, THIS_MODULE, &d)) == 0) ||
436 if (alt == NULL) { 464 ((ret = dvb_usb_device_init(intf, &digivox_mini_ii_properties, THIS_MODULE, &d)) == 0))
437 deb_rc("not alt found!\n"); 465 goto found;
438 return -ENODEV;
439 }
440 466
441 ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber, 467 return ret;
442 alt->desc.bAlternateSetting);
443 if (ret < 0)
444 return ret;
445
446 deb_rc("Changed to alternate setting!\n");
447 468
448 if ((ret = m9206_rc_init(d->udev)) != 0) 469found:
449 return ret; 470 alt = usb_altnum_to_altsetting(intf, 1);
471 if (alt == NULL) {
472 deb_rc("No alt found!\n");
473 return -ENODEV;
450 } 474 }
475
476 ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber,
477 alt->desc.bAlternateSetting);
478 if (ret < 0)
479 return ret;
480
481 if ((ret = m9206_init(d)) != 0)
482 return ret;
483
451 return ret; 484 return ret;
452} 485}
453 486
454static struct usb_device_id m920x_table [] = { 487static struct usb_device_id m920x_table [] = {
455 { USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580) }, 488 { USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580) },
489 { USB_DEVICE(USB_VID_ANUBIS_ELECTRONIC,
490 USB_PID_MSI_DIGI_VOX_MINI_II) },
456 { } /* Terminating entry */ 491 { } /* Terminating entry */
457}; 492};
458MODULE_DEVICE_TABLE (usb, m920x_table); 493MODULE_DEVICE_TABLE (usb, m920x_table);
@@ -471,7 +506,7 @@ static struct dvb_usb_device_properties megasky_properties = {
471 506
472 .size_of_priv = sizeof(struct m9206_state), 507 .size_of_priv = sizeof(struct m9206_state),
473 508
474 .identify_state = megasky_identify_state, 509 .identify_state = m920x_identify_state,
475 .num_adapters = 1, 510 .num_adapters = 1,
476 .adapter = {{ 511 .adapter = {{
477 .caps = DVB_USB_ADAP_HAS_PID_FILTER | 512 .caps = DVB_USB_ADAP_HAS_PID_FILTER |
@@ -502,6 +537,50 @@ static struct dvb_usb_device_properties megasky_properties = {
502 { "MSI Mega Sky 580 DVB-T USB2.0", 537 { "MSI Mega Sky 580 DVB-T USB2.0",
503 { &m920x_table[0], NULL }, 538 { &m920x_table[0], NULL },
504 { NULL }, 539 { NULL },
540 }
541 }
542};
543
544static struct dvb_usb_device_properties digivox_mini_ii_properties = {
545 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
546
547 .usb_ctrl = DEVICE_SPECIFIC,
548 .firmware = "dvb-usb-digivox-02.fw",
549 .download_firmware = m9206_firmware_download,
550
551 .size_of_priv = sizeof(struct m9206_state),
552
553 .identify_state = m920x_identify_state,
554 .num_adapters = 1,
555 .adapter = {{
556 .caps = DVB_USB_ADAP_HAS_PID_FILTER |
557 DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
558
559 .pid_filter_count = 8,
560 .pid_filter = m9206_pid_filter,
561 .pid_filter_ctrl = m9206_pid_filter_ctrl,
562
563 .frontend_attach = digivox_tda10046_frontend_attach,
564 .tuner_attach = digivox_tda8275_tuner_attach,
565
566 .stream = {
567 .type = USB_BULK,
568 .count = 8,
569 .endpoint = 0x81,
570 .u = {
571 .bulk = {
572 .buffersize = 0x4000,
573 }
574 }
575 },
576 }},
577 .i2c_algo = &m9206_i2c_algo,
578
579 .num_device_descs = 1,
580 .devices = {
581 { "MSI DIGI VOX mini II DVB-T USB2.0",
582 { &m920x_table[1], NULL },
583 { NULL },
505 }, 584 },
506 } 585 }
507}; 586};
diff --git a/drivers/media/dvb/dvb-usb/m920x.h b/drivers/media/dvb/dvb-usb/m920x.h
index c354196ffe5d..7dd3db65c80e 100644
--- a/drivers/media/dvb/dvb-usb/m920x.h
+++ b/drivers/media/dvb/dvb-usb/m920x.h
@@ -19,17 +19,49 @@
19 19
20#define M9206_MAX_FILTERS 8 20#define M9206_MAX_FILTERS 8
21 21
22#define M9206_I2C_TUNER 0 22/*
23#define M9206_I2C_DEMOD 1 23sequences found in logs:
24#define M9206_I2C_MAX 2 24[index value]
250x80 write addr
26(0x00 out byte)*
270x40 out byte
28
290x80 write addr
30(0x00 out byte)*
310x80 read addr
32(0x21 in byte)*
330x60 in byte
34
35this sequence works:
360x80 read addr
37(0x21 in byte)*
380x60 in byte
39
40Guess at API of the I2C function:
41I2C operation is done one byte at a time with USB control messages. The
42index the messages is sent to is made up of a set of flags that control
43the I2C bus state:
440x80: Send START condition. After a START condition, one would normally
45 always send the 7-bit slave I2C address as the 7 MSB, followed by
46 the read/write bit as the LSB.
470x40: Send STOP condition. This should be set on the last byte of an
48 I2C transaction.
490x20: Read a byte from the slave. As opposed to writing a byte to the
50 slave. The slave will normally not produce any data unless you
51 set the R/W bit to 1 when sending the slave's address after the
52 START condition.
530x01: Respond with ACK, as opposed to a NACK. For a multi-byte read,
54 the master should send an ACK, that is pull SDA low during the 9th
55 clock cycle, after every byte but the last. This flags only makes
56 sense when bit 0x20 is set, indicating a read.
57
58What any other bits might mean, or how to get the slave's ACK/NACK
59response to a write, is unknown.
60*/
25 61
26struct m9206_state { 62struct m9206_state {
27 u16 filters[M9206_MAX_FILTERS]; 63 u16 filters[M9206_MAX_FILTERS];
28 int filtering_enabled; 64 int filtering_enabled;
29 int rep_count; 65 int rep_count;
30 struct {
31 unsigned char addr;
32 unsigned char magic;
33 }i2c_r[M9206_I2C_MAX];
34}; 66};
35#endif 67#endif
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
new file mode 100644
index 000000000000..518d7ad217df
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -0,0 +1,581 @@
1/* DVB USB framework compliant Linux driver for the Opera1 DVB-S Card
2*
3* Copyright (C) 2006 Mario Hlawitschka (dh1pa@amsat.org)
4* Copyright (C) 2006 Marco Gittler (g.marco@freenet.de)
5*
6* This program is free software; you can redistribute it and/or modify it
7* under the terms of the GNU General Public License as published by the Free
8* Software Foundation, version 2.
9*
10* see Documentation/dvb/README.dvb-usb for more information
11*/
12
13#include "opera1.h"
14#include "stv0299.h"
15
16#define OPERA_READ_MSG 0
17#define OPERA_WRITE_MSG 1
18#define OPERA_I2C_TUNER 0xd1
19
20#define READ_FX2_REG_REQ 0xba
21#define READ_MAC_ADDR 0x08
22#define OPERA_WRITE_FX2 0xbb
23#define OPERA_TUNER_REQ 0xb1
24#define REG_1F_SYMBOLRATE_BYTE0 0x1f
25#define REG_20_SYMBOLRATE_BYTE1 0x20
26#define REG_21_SYMBOLRATE_BYTE2 0x21
27
28#define ADDR_B600_VOLTAGE_13V (0x02)
29#define ADDR_B601_VOLTAGE_18V (0x03)
30#define ADDR_B1A6_STREAM_CTRL (0x04)
31#define ADDR_B880_READ_REMOTE (0x05)
32
33struct opera1_state {
34 u32 last_key_pressed;
35};
36struct opera_rc_keys {
37 u32 keycode;
38 u32 event;
39};
40
41int dvb_usb_opera1_debug;
42module_param_named(debug, dvb_usb_opera1_debug, int, 0644);
43MODULE_PARM_DESC(debug,
44 "set debugging level (1=info,xfer=2,pll=4,ts=8,err=16,rc=32,fw=64 (or-able))."
45 DVB_USB_DEBUG_STATUS);
46
47static int opera1_xilinx_rw(struct usb_device *dev, u8 request, u16 value,
48 u8 * data, u16 len, int flags)
49{
50 int ret;
51 u8 r;
52 u8 u8buf[len];
53
54 unsigned int pipe = (flags == OPERA_READ_MSG) ?
55 usb_rcvctrlpipe(dev,0) : usb_sndctrlpipe(dev, 0);
56 u8 request_type = (flags == OPERA_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT;
57
58 if (flags == OPERA_WRITE_MSG)
59 memcpy(u8buf, data, len);
60 ret =
61 usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR,
62 value, 0x0, u8buf, len, 2000);
63
64 if (request == OPERA_TUNER_REQ) {
65 if (usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
66 OPERA_TUNER_REQ, USB_DIR_IN | USB_TYPE_VENDOR,
67 0x01, 0x0, &r, 1, 2000)<1 || r!=0x08)
68 return 0;
69 }
70 if (flags == OPERA_READ_MSG)
71 memcpy(data, u8buf, len);
72 return ret;
73}
74
75/* I2C */
76
77static int opera1_usb_i2c_msgxfer(struct dvb_usb_device *dev, u16 addr,
78 u8 * buf, u16 len)
79{
80 int ret = 0;
81 u8 request;
82 u16 value;
83
84 if (!dev) {
85 info("no usb_device");
86 return -EINVAL;
87 }
88 if (mutex_lock_interruptible(&dev->usb_mutex) < 0)
89 return -EAGAIN;
90
91 switch (addr>>1){
92 case ADDR_B600_VOLTAGE_13V:
93 request=0xb6;
94 value=0x00;
95 break;
96 case ADDR_B601_VOLTAGE_18V:
97 request=0xb6;
98 value=0x01;
99 break;
100 case ADDR_B1A6_STREAM_CTRL:
101 request=0xb1;
102 value=0xa6;
103 break;
104 case ADDR_B880_READ_REMOTE:
105 request=0xb8;
106 value=0x80;
107 break;
108 default:
109 request=0xb1;
110 value=addr;
111 }
112 ret = opera1_xilinx_rw(dev->udev, request,
113 value, buf, len,
114 addr&0x01?OPERA_READ_MSG:OPERA_WRITE_MSG);
115
116 mutex_unlock(&dev->usb_mutex);
117 return ret;
118}
119
120static int opera1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
121 int num)
122{
123 struct dvb_usb_device *d = i2c_get_adapdata(adap);
124 int i = 0, tmp = 0;
125
126 if (!d)
127 return -ENODEV;
128 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
129 return -EAGAIN;
130
131 for (i = 0; i < num; i++) {
132 if ((tmp = opera1_usb_i2c_msgxfer(d,
133 (msg[i].addr<<1)|(msg[i].flags&I2C_M_RD?0x01:0),
134 msg[i].buf,
135 msg[i].len
136 )!= msg[i].len)) {
137 break;
138 }
139 if (dvb_usb_opera1_debug & 0x10)
140 info("sending i2c mesage %d %d", tmp, msg[i].len);
141 }
142 mutex_unlock(&d->i2c_mutex);
143 return num;
144}
145
146static u32 opera1_i2c_func(struct i2c_adapter *adapter)
147{
148 return I2C_FUNC_I2C;
149}
150
151static struct i2c_algorithm opera1_i2c_algo = {
152 .master_xfer = opera1_i2c_xfer,
153 .functionality = opera1_i2c_func,
154};
155
156static int opera1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
157{
158 static u8 command_13v[1]={0x00};
159 static u8 command_18v[1]={0x01};
160 struct i2c_msg msg[] = {
161 {.addr = ADDR_B600_VOLTAGE_13V,.flags = 0,.buf = command_13v,.len = 1},
162 };
163 struct dvb_usb_adapter *udev_adap =
164 (struct dvb_usb_adapter *)(fe->dvb->priv);
165 if (voltage == SEC_VOLTAGE_18) {
166 msg[0].addr = ADDR_B601_VOLTAGE_18V;
167 msg[0].buf = command_18v;
168 }
169 i2c_transfer(&udev_adap->dev->i2c_adap, msg, 1);
170 return 0;
171}
172
173static int opera1_stv0299_set_symbol_rate(struct dvb_frontend *fe, u32 srate,
174 u32 ratio)
175{
176 stv0299_writereg(fe, 0x13, 0x98);
177 stv0299_writereg(fe, 0x14, 0x95);
178 stv0299_writereg(fe, REG_1F_SYMBOLRATE_BYTE0, (ratio >> 16) & 0xff);
179 stv0299_writereg(fe, REG_20_SYMBOLRATE_BYTE1, (ratio >> 8) & 0xff);
180 stv0299_writereg(fe, REG_21_SYMBOLRATE_BYTE2, (ratio) & 0xf0);
181 return 0;
182
183}
184static u8 opera1_inittab[] = {
185 0x00, 0xa1,
186 0x01, 0x15,
187 0x02, 0x00,
188 0x03, 0x00,
189 0x04, 0x7d,
190 0x05, 0x05,
191 0x06, 0x02,
192 0x07, 0x00,
193 0x0b, 0x00,
194 0x0c, 0x01,
195 0x0d, 0x81,
196 0x0e, 0x44,
197 0x0f, 0x19,
198 0x10, 0x3f,
199 0x11, 0x84,
200 0x12, 0xda,
201 0x13, 0x98,
202 0x14, 0x95,
203 0x15, 0xc9,
204 0x16, 0xeb,
205 0x17, 0x00,
206 0x18, 0x19,
207 0x19, 0x8b,
208 0x1a, 0x00,
209 0x1b, 0x82,
210 0x1c, 0x7f,
211 0x1d, 0x00,
212 0x1e, 0x00,
213 REG_1F_SYMBOLRATE_BYTE0, 0x06,
214 REG_20_SYMBOLRATE_BYTE1, 0x50,
215 REG_21_SYMBOLRATE_BYTE2, 0x10,
216 0x22, 0x00,
217 0x23, 0x00,
218 0x24, 0x37,
219 0x25, 0xbc,
220 0x26, 0x00,
221 0x27, 0x00,
222 0x28, 0x00,
223 0x29, 0x1e,
224 0x2a, 0x14,
225 0x2b, 0x1f,
226 0x2c, 0x09,
227 0x2d, 0x0a,
228 0x2e, 0x00,
229 0x2f, 0x00,
230 0x30, 0x00,
231 0x31, 0x1f,
232 0x32, 0x19,
233 0x33, 0xfc,
234 0x34, 0x13,
235 0xff, 0xff,
236};
237
238static struct stv0299_config opera1_stv0299_config = {
239 .demod_address = 0xd0>>1,
240 .min_delay_ms = 100,
241 .mclk = 88000000UL,
242 .invert = 1,
243 .skip_reinit = 0,
244 .lock_output = STV0229_LOCKOUTPUT_0,
245 .volt13_op0_op1 = STV0299_VOLT13_OP0,
246 .inittab = opera1_inittab,
247 .set_symbol_rate = opera1_stv0299_set_symbol_rate,
248};
249
250static int opera1_frontend_attach(struct dvb_usb_adapter *d)
251{
252 if ((d->fe =
253 dvb_attach(stv0299_attach, &opera1_stv0299_config,
254 &d->dev->i2c_adap)) != NULL) {
255 d->fe->ops.set_voltage = opera1_set_voltage;
256 return 0;
257 }
258 info("not attached stv0299");
259 return -EIO;
260}
261
262static int opera1_tuner_attach(struct dvb_usb_adapter *adap)
263{
264 dvb_attach(
265 dvb_pll_attach, adap->fe, 0xc0>>1,
266 &adap->dev->i2c_adap, &dvb_pll_opera1
267 );
268 return 0;
269}
270
271static int opera1_power_ctrl(struct dvb_usb_device *d, int onoff)
272{
273 u8 val = onoff ? 0x01 : 0x00;
274
275 if (dvb_usb_opera1_debug)
276 info("power %s", onoff ? "on" : "off");
277 return opera1_xilinx_rw(d->udev, 0xb7, val,
278 &val, 1, OPERA_WRITE_MSG);
279}
280
281static int opera1_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
282{
283 static u8 buf_start[2] = { 0xff, 0x03 };
284 static u8 buf_stop[2] = { 0xff, 0x00 };
285 struct i2c_msg start_tuner[] = {
286 {.addr = ADDR_B1A6_STREAM_CTRL,.buf = onoff ? buf_start : buf_stop,.len = 2},
287 };
288 if (dvb_usb_opera1_debug)
289 info("streaming %s", onoff ? "on" : "off");
290 i2c_transfer(&adap->dev->i2c_adap, start_tuner, 1);
291 return 0;
292}
293
294static int opera1_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
295 int onoff)
296{
297 u8 b_pid[3];
298 struct i2c_msg msg[] = {
299 {.addr = ADDR_B1A6_STREAM_CTRL,.buf = b_pid,.len = 3},
300 };
301 if (dvb_usb_opera1_debug)
302 info("pidfilter index: %d pid: %d %s", index, pid,
303 onoff ? "on" : "off");
304 b_pid[0] = (2 * index) + 4;
305 b_pid[1] = onoff ? (pid & 0xff) : (0x00);
306 b_pid[2] = onoff ? ((pid >> 8) & 0xff) : (0x00);
307 i2c_transfer(&adap->dev->i2c_adap, msg, 1);
308 return 0;
309}
310
311static int opera1_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
312{
313 int u = 0x04;
314 u8 b_pid[3];
315 struct i2c_msg msg[] = {
316 {.addr = ADDR_B1A6_STREAM_CTRL,.buf = b_pid,.len = 3},
317 };
318 if (dvb_usb_opera1_debug)
319 info("%s hw-pidfilter", onoff ? "enable" : "disable");
320 for (; u < 0x7e; u += 2) {
321 b_pid[0] = u;
322 b_pid[1] = 0;
323 b_pid[2] = 0x80;
324 i2c_transfer(&adap->dev->i2c_adap, msg, 1);
325 }
326 return 0;
327}
328
329static struct dvb_usb_rc_key opera1_rc_keys[] = {
330 {0x5f, 0xa0, KEY_1},
331 {0x51, 0xaf, KEY_2},
332 {0x5d, 0xa2, KEY_3},
333 {0x41, 0xbe, KEY_4},
334 {0x0b, 0xf5, KEY_5},
335 {0x43, 0xbd, KEY_6},
336 {0x47, 0xb8, KEY_7},
337 {0x49, 0xb6, KEY_8},
338 {0x05, 0xfa, KEY_9},
339 {0x45, 0xba, KEY_0},
340 {0x09, 0xf6, KEY_UP}, /*chanup */
341 {0x1b, 0xe5, KEY_DOWN}, /*chandown */
342 {0x5d, 0xa3, KEY_LEFT}, /*voldown */
343 {0x5f, 0xa1, KEY_RIGHT}, /*volup */
344 {0x07, 0xf8, KEY_SPACE}, /*tab */
345 {0x1f, 0xe1, KEY_ENTER}, /*play ok */
346 {0x1b, 0xe4, KEY_Z}, /*zoom */
347 {0x59, 0xa6, KEY_M}, /*mute */
348 {0x5b, 0xa5, KEY_F}, /*tv/f */
349 {0x19, 0xe7, KEY_R}, /*rec */
350 {0x01, 0xfe, KEY_S}, /*Stop */
351 {0x03, 0xfd, KEY_P}, /*pause */
352 {0x03, 0xfc, KEY_W}, /*<- -> */
353 {0x07, 0xf9, KEY_C}, /*capture */
354 {0x47, 0xb9, KEY_Q}, /*exit */
355 {0x43, 0xbc, KEY_O}, /*power */
356
357};
358
359static int opera1_rc_query(struct dvb_usb_device *dev, u32 * event, int *state)
360{
361 struct opera1_state *opst = dev->priv;
362 u8 rcbuffer[32];
363 const u16 startmarker1 = 0x10ed;
364 const u16 startmarker2 = 0x11ec;
365 struct i2c_msg read_remote[] = {
366 {.addr = ADDR_B880_READ_REMOTE,.buf = rcbuffer,.flags = I2C_M_RD,.len = 32},
367 };
368 int i = 0;
369 u32 send_key = 0;
370
371 if (i2c_transfer(&dev->i2c_adap, read_remote, 1) == 1) {
372 for (i = 0; i < 32; i++) {
373 if (rcbuffer[i])
374 send_key |= 1;
375 if (i < 31)
376 send_key = send_key << 1;
377 }
378 if (send_key & 0x8000)
379 send_key = (send_key << 1) | (send_key >> 15 & 0x01);
380
381 if (send_key == 0xffff && opst->last_key_pressed != 0) {
382 *state = REMOTE_KEY_REPEAT;
383 *event = opst->last_key_pressed;
384 return 0;
385 }
386 for (; send_key != 0;) {
387 if (send_key >> 16 == startmarker2) {
388 break;
389 } else if (send_key >> 16 == startmarker1) {
390 send_key =
391 (send_key & 0xfffeffff) | (startmarker1 << 16);
392 break;
393 } else
394 send_key >>= 1;
395 }
396
397 if (send_key == 0)
398 return 0;
399
400 send_key = (send_key & 0xffff) | 0x0100;
401
402 for (i = 0; i < ARRAY_SIZE(opera1_rc_keys); i++) {
403 if ((opera1_rc_keys[i].custom * 256 +
404 opera1_rc_keys[i].data) == (send_key & 0xffff)) {
405 *state = REMOTE_KEY_PRESSED;
406 *event = opera1_rc_keys[i].event;
407 opst->last_key_pressed =
408 opera1_rc_keys[i].event;
409 break;
410 }
411 opst->last_key_pressed = 0;
412 }
413 } else
414 *state = REMOTE_NO_KEY_PRESSED;
415 return 0;
416}
417
418static struct usb_device_id opera1_table[] = {
419 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_OPERA1_COLD)},
420 {USB_DEVICE(USB_VID_OPERA1, USB_PID_OPERA1_WARM)},
421 {}
422};
423
424MODULE_DEVICE_TABLE(usb, opera1_table);
425
426static int opera1_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
427{
428 u8 command[] = { READ_MAC_ADDR };
429 opera1_xilinx_rw(d->udev, 0xb1, 0xa0, command, 1, OPERA_WRITE_MSG);
430 opera1_xilinx_rw(d->udev, 0xb1, 0xa1, mac, 6, OPERA_READ_MSG);
431 return 0;
432}
433static int opera1_xilinx_load_firmware(struct usb_device *dev,
434 const char *filename)
435{
436 const struct firmware *fw = NULL;
437 u8 *b, *p;
438 int ret = 0, i;
439 u8 testval;
440 info("start downloading fpga firmware");
441
442 if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
443 err("did not find the firmware file. (%s) "
444 "Please see linux/Documentation/dvb/ for more details on firmware-problems.",
445 filename);
446 return ret;
447 } else {
448 p = kmalloc(fw->size, GFP_KERNEL);
449 opera1_xilinx_rw(dev, 0xbc, 0x00, &testval, 1, OPERA_READ_MSG);
450 if (p != NULL && testval != 0x67) {
451
452 u8 reset = 0, fpga_command = 0;
453 memcpy(p, fw->data, fw->size);
454 /* clear fpga ? */
455 opera1_xilinx_rw(dev, 0xbc, 0xaa, &fpga_command, 1,
456 OPERA_WRITE_MSG);
457 for (i = 0; p[i] != 0 && i < fw->size;) {
458 b = (u8 *) p + i;
459 if (opera1_xilinx_rw
460 (dev, OPERA_WRITE_FX2, 0x0, b + 1, b[0],
461 OPERA_WRITE_MSG) != b[0]
462 ) {
463 err("error while transferring firmware");
464 ret = -EINVAL;
465 break;
466 }
467 i = i + 1 + b[0];
468 }
469 /* restart the CPU */
470 if (ret || opera1_xilinx_rw
471 (dev, 0xa0, 0xe600, &reset, 1,
472 OPERA_WRITE_MSG) != 1) {
473 err("could not restart the USB controller CPU.");
474 ret = -EINVAL;
475 }
476 kfree(p);
477 }
478 }
479 if (fw) {
480 release_firmware(fw);
481 }
482 return ret;
483}
484
485static struct dvb_usb_device_properties opera1_properties = {
486 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
487 .usb_ctrl = CYPRESS_FX2,
488 .firmware = "dvb-usb-opera-01.fw",
489 .size_of_priv = sizeof(struct opera1_state),
490
491 .power_ctrl = opera1_power_ctrl,
492 .i2c_algo = &opera1_i2c_algo,
493
494 .rc_key_map = opera1_rc_keys,
495 .rc_key_map_size = ARRAY_SIZE(opera1_rc_keys),
496 .rc_interval = 200,
497 .rc_query = opera1_rc_query,
498 .read_mac_address = opera1_read_mac_address,
499 .generic_bulk_ctrl_endpoint = 0x00,
500 /* parameter for the MPEG2-data transfer */
501 .num_adapters = 1,
502 .adapter = {
503 {
504 .frontend_attach = opera1_frontend_attach,
505 .streaming_ctrl = opera1_streaming_ctrl,
506 .tuner_attach = opera1_tuner_attach,
507 .caps =
508 DVB_USB_ADAP_HAS_PID_FILTER |
509 DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
510 .pid_filter = opera1_pid_filter,
511 .pid_filter_ctrl = opera1_pid_filter_control,
512 .pid_filter_count = 252,
513 .stream = {
514 .type = USB_BULK,
515 .count = 10,
516 .endpoint = 0x82,
517 .u = {
518 .bulk = {
519 .buffersize = 4096,
520 }
521 }
522 },
523 }
524 },
525 .num_device_descs = 1,
526 .devices = {
527 {"Opera1 DVB-S USB2.0",
528 {&opera1_table[0], NULL},
529 {&opera1_table[1], NULL},
530 },
531 }
532};
533
534static int opera1_probe(struct usb_interface *intf,
535 const struct usb_device_id *id)
536{
537 struct dvb_usb_device *d;
538 struct usb_device *udev = interface_to_usbdev(intf);
539
540 if (udev->descriptor.idProduct == USB_PID_OPERA1_WARM &&
541 udev->descriptor.idVendor == USB_VID_OPERA1 &&
542 (d == NULL
543 || opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga.fw") != 0)
544 ) {
545 return -EINVAL;
546 }
547
548 if (dvb_usb_device_init(intf, &opera1_properties, THIS_MODULE, &d) != 0)
549 return -EINVAL;
550 return 0;
551}
552
553static struct usb_driver opera1_driver = {
554 .name = "opera1",
555 .probe = opera1_probe,
556 .disconnect = dvb_usb_device_exit,
557 .id_table = opera1_table,
558};
559
560static int __init opera1_module_init(void)
561{
562 int result = 0;
563 if ((result = usb_register(&opera1_driver))) {
564 err("usb_register failed. Error number %d", result);
565 }
566 return result;
567}
568
569static void __exit opera1_module_exit(void)
570{
571 usb_deregister(&opera1_driver);
572}
573
574module_init(opera1_module_init);
575module_exit(opera1_module_exit);
576
577MODULE_AUTHOR("Mario Hlawitschka (c) dh1pa@amsat.org");
578MODULE_AUTHOR("Marco Gittler (c) g.marco@freenet.de");
579MODULE_DESCRIPTION("Driver for Opera1 DVB-S device");
580MODULE_VERSION("0.1");
581MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/opera1.h b/drivers/media/dvb/dvb-usb/opera1.h
new file mode 100644
index 000000000000..53174427902d
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/opera1.h
@@ -0,0 +1,9 @@
1#ifndef _OPERA1_H_
2#define _OPERA1_H_
3
4#define DVB_USB_LOG_PREFIX "opera"
5#include "dvb-usb.h"
6
7extern int dvb_usb_opera1_debug;
8#define deb_xfer(args...) dprintk(dvb_usb_opera1_debug,0x02,args)
9#endif
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index 95d29976ed78..88dc4367a2e3 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -184,6 +184,7 @@ static int ttusb2_probe(struct usb_interface *intf,
184 184
185static struct usb_device_id ttusb2_table [] = { 185static struct usb_device_id ttusb2_table [] = {
186 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_400E) }, 186 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_400E) },
187 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_450E) },
187 {} /* Terminating entry */ 188 {} /* Terminating entry */
188}; 189};
189MODULE_DEVICE_TABLE (usb, ttusb2_table); 190MODULE_DEVICE_TABLE (usb, ttusb2_table);
@@ -227,12 +228,16 @@ static struct dvb_usb_device_properties ttusb2_properties = {
227 228
228 .generic_bulk_ctrl_endpoint = 0x01, 229 .generic_bulk_ctrl_endpoint = 0x01,
229 230
230 .num_device_descs = 1, 231 .num_device_descs = 2,
231 .devices = { 232 .devices = {
232 { "Pinnacle 400e DVB-S USB2.0", 233 { "Pinnacle 400e DVB-S USB2.0",
233 { &ttusb2_table[0], NULL }, 234 { &ttusb2_table[0], NULL },
234 { NULL }, 235 { NULL },
235 }, 236 },
237 { "Pinnacle 450e DVB-S USB2.0",
238 { &ttusb2_table[1], NULL },
239 { NULL },
240 },
236 } 241 }
237}; 242};
238 243
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 22c2cf2cea98..ff448761dcef 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -205,6 +205,13 @@ config DVB_TDA10021
205 help 205 help
206 A DVB-C tuner module. Say Y when you want to support this frontend. 206 A DVB-C tuner module. Say Y when you want to support this frontend.
207 207
208config DVB_TDA10023
209 tristate "Philips TDA10023 based"
210 depends on DVB_CORE && I2C
211 default m if DVB_FE_CUSTOMISE
212 help
213 A DVB-C tuner module. Say Y when you want to support this frontend.
214
208config DVB_STV0297 215config DVB_STV0297
209 tristate "ST STV0297 based" 216 tristate "ST STV0297 based"
210 depends on DVB_CORE && I2C 217 depends on DVB_CORE && I2C
@@ -280,8 +287,12 @@ comment "Tuners/PLL support"
280 depends on DVB_CORE 287 depends on DVB_CORE
281 288
282config DVB_PLL 289config DVB_PLL
283 tristate 290 tristate "Generic I2C PLL based tuners"
284 depends on DVB_CORE && I2C 291 depends on DVB_CORE && I2C
292 default m if DVB_FE_CUSTOMISE
293 help
294 This module driver a number of tuners based on PLL chips with a
295 common I2C interface. Say Y when you want to support these tuners.
285 296
286config DVB_TDA826X 297config DVB_TDA826X
287 tristate "Philips TDA826X silicon tuner" 298 tristate "Philips TDA826X silicon tuner"
@@ -290,6 +301,13 @@ config DVB_TDA826X
290 help 301 help
291 A DVB-S silicon tuner module. Say Y when you want to support this tuner. 302 A DVB-S silicon tuner module. Say Y when you want to support this tuner.
292 303
304config DVB_TDA827X
305 tristate "Philips TDA827X silicon tuner"
306 depends on DVB_CORE && I2C
307 default m if DVB_FE_CUSTOMISE
308 help
309 A DVB-T silicon tuner module. Say Y when you want to support this tuner.
310
293config DVB_TUNER_QT1010 311config DVB_TUNER_QT1010
294 tristate "Quantek QT1010 silicon tuner" 312 tristate "Quantek QT1010 silicon tuner"
295 depends on DVB_CORE && I2C 313 depends on DVB_CORE && I2C
@@ -304,14 +322,6 @@ config DVB_TUNER_MT2060
304 help 322 help
305 A driver for the silicon IF tuner MT2060 from Microtune. 323 A driver for the silicon IF tuner MT2060 from Microtune.
306 324
307config DVB_TUNER_LGH06XF
308 tristate "LG TDVS-H06xF ATSC tuner"
309 depends on DVB_CORE && I2C
310 select DVB_PLL
311 default m if DVB_FE_CUSTOMISE
312 help
313 A driver for the LG TDVS-H06xF ATSC tuner family.
314
315comment "Miscellaneous devices" 325comment "Miscellaneous devices"
316 depends on DVB_CORE 326 depends on DVB_CORE
317 327
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index a646d9969b71..27f386585d43 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_DVB_MT352) += mt352.o
25obj-$(CONFIG_DVB_ZL10353) += zl10353.o 25obj-$(CONFIG_DVB_ZL10353) += zl10353.o
26obj-$(CONFIG_DVB_CX22702) += cx22702.o 26obj-$(CONFIG_DVB_CX22702) += cx22702.o
27obj-$(CONFIG_DVB_TDA10021) += tda10021.o 27obj-$(CONFIG_DVB_TDA10021) += tda10021.o
28obj-$(CONFIG_DVB_TDA10023) += tda10023.o
28obj-$(CONFIG_DVB_STV0297) += stv0297.o 29obj-$(CONFIG_DVB_STV0297) += stv0297.o
29obj-$(CONFIG_DVB_NXT200X) += nxt200x.o 30obj-$(CONFIG_DVB_NXT200X) += nxt200x.o
30obj-$(CONFIG_DVB_OR51211) += or51211.o 31obj-$(CONFIG_DVB_OR51211) += or51211.o
@@ -37,7 +38,7 @@ obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
37obj-$(CONFIG_DVB_ISL6421) += isl6421.o 38obj-$(CONFIG_DVB_ISL6421) += isl6421.o
38obj-$(CONFIG_DVB_TDA10086) += tda10086.o 39obj-$(CONFIG_DVB_TDA10086) += tda10086.o
39obj-$(CONFIG_DVB_TDA826X) += tda826x.o 40obj-$(CONFIG_DVB_TDA826X) += tda826x.o
41obj-$(CONFIG_DVB_TDA827X) += tda827x.o
40obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o 42obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o
41obj-$(CONFIG_DVB_TUNER_QT1010) += qt1010.o 43obj-$(CONFIG_DVB_TUNER_QT1010) += qt1010.o
42obj-$(CONFIG_DVB_TUA6100) += tua6100.o 44obj-$(CONFIG_DVB_TUA6100) += tua6100.o
43obj-$(CONFIG_DVB_TUNER_LGH06XF) += lgh06xf.o
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 62de760c844f..5f96ffda91ad 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -27,17 +27,29 @@
27/* ----------------------------------------------------------- */ 27/* ----------------------------------------------------------- */
28/* descriptions */ 28/* descriptions */
29 29
30/* Set AGC TOP value to 103 dBuV:
31 0x80 = Control Byte
32 0x40 = 250 uA charge pump (irrelevant)
33 0x18 = Aux Byte to follow
34 0x06 = 64.5 kHz divider (irrelevant)
35 0x01 = Disable Vt (aka sleep)
36
37 0x00 = AGC Time constant 2s Iagc = 300 nA (vs 0x80 = 9 nA)
38 0x50 = AGC Take over point = 103 dBuV */
39static u8 tua603x_agc103[] = { 2, 0x80|0x40|0x18|0x06|0x01, 0x00|0x50 };
40
30struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { 41struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
31 .name = "Thomson dtt7579", 42 .name = "Thomson dtt7579",
32 .min = 177000000, 43 .min = 177000000,
33 .max = 858000000, 44 .max = 858000000,
34 .count = 5, 45 .iffreq= 36166667,
46 .sleepdata = (u8[]){ 2, 0xb4, 0x03 },
47 .count = 4,
35 .entries = { 48 .entries = {
36 { 0, 36166667, 166666, 0xb4, 0x03 }, /* go sleep */ 49 { 443250000, 166667, 0xb4, 0x02 },
37 { 443250000, 36166667, 166666, 0xb4, 0x02 }, 50 { 542000000, 166667, 0xb4, 0x08 },
38 { 542000000, 36166667, 166666, 0xb4, 0x08 }, 51 { 771000000, 166667, 0xbc, 0x08 },
39 { 771000000, 36166667, 166666, 0xbc, 0x08 }, 52 { 999999999, 166667, 0xf4, 0x08 },
40 { 999999999, 36166667, 166666, 0xf4, 0x08 },
41 }, 53 },
42}; 54};
43EXPORT_SYMBOL(dvb_pll_thomson_dtt7579); 55EXPORT_SYMBOL(dvb_pll_thomson_dtt7579);
@@ -46,11 +58,12 @@ struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
46 .name = "Thomson dtt7610", 58 .name = "Thomson dtt7610",
47 .min = 44000000, 59 .min = 44000000,
48 .max = 958000000, 60 .max = 958000000,
61 .iffreq= 44000000,
49 .count = 3, 62 .count = 3,
50 .entries = { 63 .entries = {
51 { 157250000, 44000000, 62500, 0x8e, 0x39 }, 64 { 157250000, 62500, 0x8e, 0x39 },
52 { 454000000, 44000000, 62500, 0x8e, 0x3a }, 65 { 454000000, 62500, 0x8e, 0x3a },
53 { 999999999, 44000000, 62500, 0x8e, 0x3c }, 66 { 999999999, 62500, 0x8e, 0x3c },
54 }, 67 },
55}; 68};
56EXPORT_SYMBOL(dvb_pll_thomson_dtt7610); 69EXPORT_SYMBOL(dvb_pll_thomson_dtt7610);
@@ -66,14 +79,15 @@ struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
66 .min = 177000000, 79 .min = 177000000,
67 .max = 896000000, 80 .max = 896000000,
68 .setbw = thomson_dtt759x_bw, 81 .setbw = thomson_dtt759x_bw,
69 .count = 6, 82 .iffreq= 36166667,
83 .sleepdata = (u8[]){ 2, 0x84, 0x03 },
84 .count = 5,
70 .entries = { 85 .entries = {
71 { 0, 36166667, 166666, 0x84, 0x03 }, 86 { 264000000, 166667, 0xb4, 0x02 },
72 { 264000000, 36166667, 166666, 0xb4, 0x02 }, 87 { 470000000, 166667, 0xbc, 0x02 },
73 { 470000000, 36166667, 166666, 0xbc, 0x02 }, 88 { 735000000, 166667, 0xbc, 0x08 },
74 { 735000000, 36166667, 166666, 0xbc, 0x08 }, 89 { 835000000, 166667, 0xf4, 0x08 },
75 { 835000000, 36166667, 166666, 0xf4, 0x08 }, 90 { 999999999, 166667, 0xfc, 0x08 },
76 { 999999999, 36166667, 166666, 0xfc, 0x08 },
77 }, 91 },
78}; 92};
79EXPORT_SYMBOL(dvb_pll_thomson_dtt759x); 93EXPORT_SYMBOL(dvb_pll_thomson_dtt759x);
@@ -82,14 +96,15 @@ struct dvb_pll_desc dvb_pll_lg_z201 = {
82 .name = "LG z201", 96 .name = "LG z201",
83 .min = 174000000, 97 .min = 174000000,
84 .max = 862000000, 98 .max = 862000000,
85 .count = 6, 99 .iffreq= 36166667,
100 .sleepdata = (u8[]){ 2, 0xbc, 0x03 },
101 .count = 5,
86 .entries = { 102 .entries = {
87 { 0, 36166667, 166666, 0xbc, 0x03 }, 103 { 157500000, 166667, 0xbc, 0x01 },
88 { 157500000, 36166667, 166666, 0xbc, 0x01 }, 104 { 443250000, 166667, 0xbc, 0x02 },
89 { 443250000, 36166667, 166666, 0xbc, 0x02 }, 105 { 542000000, 166667, 0xbc, 0x04 },
90 { 542000000, 36166667, 166666, 0xbc, 0x04 }, 106 { 830000000, 166667, 0xf4, 0x04 },
91 { 830000000, 36166667, 166666, 0xf4, 0x04 }, 107 { 999999999, 166667, 0xfc, 0x04 },
92 { 999999999, 36166667, 166666, 0xfc, 0x04 },
93 }, 108 },
94}; 109};
95EXPORT_SYMBOL(dvb_pll_lg_z201); 110EXPORT_SYMBOL(dvb_pll_lg_z201);
@@ -98,11 +113,12 @@ struct dvb_pll_desc dvb_pll_microtune_4042 = {
98 .name = "Microtune 4042 FI5", 113 .name = "Microtune 4042 FI5",
99 .min = 57000000, 114 .min = 57000000,
100 .max = 858000000, 115 .max = 858000000,
116 .iffreq= 44000000,
101 .count = 3, 117 .count = 3,
102 .entries = { 118 .entries = {
103 { 162000000, 44000000, 62500, 0x8e, 0xa1 }, 119 { 162000000, 62500, 0x8e, 0xa1 },
104 { 457000000, 44000000, 62500, 0x8e, 0x91 }, 120 { 457000000, 62500, 0x8e, 0x91 },
105 { 999999999, 44000000, 62500, 0x8e, 0x31 }, 121 { 999999999, 62500, 0x8e, 0x31 },
106 }, 122 },
107}; 123};
108EXPORT_SYMBOL(dvb_pll_microtune_4042); 124EXPORT_SYMBOL(dvb_pll_microtune_4042);
@@ -112,11 +128,13 @@ struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
112 .name = "Thomson dtt761x", 128 .name = "Thomson dtt761x",
113 .min = 57000000, 129 .min = 57000000,
114 .max = 863000000, 130 .max = 863000000,
131 .iffreq= 44000000,
115 .count = 3, 132 .count = 3,
133 .initdata = tua603x_agc103,
116 .entries = { 134 .entries = {
117 { 147000000, 44000000, 62500, 0x8e, 0x39 }, 135 { 147000000, 62500, 0x8e, 0x39 },
118 { 417000000, 44000000, 62500, 0x8e, 0x3a }, 136 { 417000000, 62500, 0x8e, 0x3a },
119 { 999999999, 44000000, 62500, 0x8e, 0x3c }, 137 { 999999999, 62500, 0x8e, 0x3c },
120 }, 138 },
121}; 139};
122EXPORT_SYMBOL(dvb_pll_thomson_dtt761x); 140EXPORT_SYMBOL(dvb_pll_thomson_dtt761x);
@@ -125,17 +143,18 @@ struct dvb_pll_desc dvb_pll_unknown_1 = {
125 .name = "unknown 1", /* used by dntv live dvb-t */ 143 .name = "unknown 1", /* used by dntv live dvb-t */
126 .min = 174000000, 144 .min = 174000000,
127 .max = 862000000, 145 .max = 862000000,
146 .iffreq= 36166667,
128 .count = 9, 147 .count = 9,
129 .entries = { 148 .entries = {
130 { 150000000, 36166667, 166666, 0xb4, 0x01 }, 149 { 150000000, 166667, 0xb4, 0x01 },
131 { 173000000, 36166667, 166666, 0xbc, 0x01 }, 150 { 173000000, 166667, 0xbc, 0x01 },
132 { 250000000, 36166667, 166666, 0xb4, 0x02 }, 151 { 250000000, 166667, 0xb4, 0x02 },
133 { 400000000, 36166667, 166666, 0xbc, 0x02 }, 152 { 400000000, 166667, 0xbc, 0x02 },
134 { 420000000, 36166667, 166666, 0xf4, 0x02 }, 153 { 420000000, 166667, 0xf4, 0x02 },
135 { 470000000, 36166667, 166666, 0xfc, 0x02 }, 154 { 470000000, 166667, 0xfc, 0x02 },
136 { 600000000, 36166667, 166666, 0xbc, 0x08 }, 155 { 600000000, 166667, 0xbc, 0x08 },
137 { 730000000, 36166667, 166666, 0xf4, 0x08 }, 156 { 730000000, 166667, 0xf4, 0x08 },
138 { 999999999, 36166667, 166666, 0xfc, 0x08 }, 157 { 999999999, 166667, 0xfc, 0x08 },
139 }, 158 },
140}; 159};
141EXPORT_SYMBOL(dvb_pll_unknown_1); 160EXPORT_SYMBOL(dvb_pll_unknown_1);
@@ -147,11 +166,12 @@ struct dvb_pll_desc dvb_pll_tua6010xs = {
147 .name = "Infineon TUA6010XS", 166 .name = "Infineon TUA6010XS",
148 .min = 44250000, 167 .min = 44250000,
149 .max = 858000000, 168 .max = 858000000,
169 .iffreq= 36125000,
150 .count = 3, 170 .count = 3,
151 .entries = { 171 .entries = {
152 { 115750000, 36125000, 62500, 0x8e, 0x03 }, 172 { 115750000, 62500, 0x8e, 0x03 },
153 { 403250000, 36125000, 62500, 0x8e, 0x06 }, 173 { 403250000, 62500, 0x8e, 0x06 },
154 { 999999999, 36125000, 62500, 0x8e, 0x85 }, 174 { 999999999, 62500, 0x8e, 0x85 },
155 }, 175 },
156}; 176};
157EXPORT_SYMBOL(dvb_pll_tua6010xs); 177EXPORT_SYMBOL(dvb_pll_tua6010xs);
@@ -161,12 +181,13 @@ struct dvb_pll_desc dvb_pll_env57h1xd5 = {
161 .name = "Panasonic ENV57H1XD5", 181 .name = "Panasonic ENV57H1XD5",
162 .min = 44250000, 182 .min = 44250000,
163 .max = 858000000, 183 .max = 858000000,
184 .iffreq= 36125000,
164 .count = 4, 185 .count = 4,
165 .entries = { 186 .entries = {
166 { 153000000, 36291666, 166666, 0xc2, 0x41 }, 187 { 153000000, 166667, 0xc2, 0x41 },
167 { 470000000, 36291666, 166666, 0xc2, 0x42 }, 188 { 470000000, 166667, 0xc2, 0x42 },
168 { 526000000, 36291666, 166666, 0xc2, 0x84 }, 189 { 526000000, 166667, 0xc2, 0x84 },
169 { 999999999, 36291666, 166666, 0xc2, 0xa4 }, 190 { 999999999, 166667, 0xc2, 0xa4 },
170 }, 191 },
171}; 192};
172EXPORT_SYMBOL(dvb_pll_env57h1xd5); 193EXPORT_SYMBOL(dvb_pll_env57h1xd5);
@@ -185,20 +206,21 @@ struct dvb_pll_desc dvb_pll_tda665x = {
185 .min = 44250000, 206 .min = 44250000,
186 .max = 858000000, 207 .max = 858000000,
187 .setbw = tda665x_bw, 208 .setbw = tda665x_bw,
209 .iffreq= 36166667,
188 .count = 12, 210 .count = 12,
189 .entries = { 211 .entries = {
190 { 93834000, 36249333, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ }, 212 { 93834000, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ },
191 { 123834000, 36249333, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, 213 { 123834000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ },
192 { 161000000, 36249333, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, 214 { 161000000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ },
193 { 163834000, 36249333, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, 215 { 163834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ },
194 { 253834000, 36249333, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ }, 216 { 253834000, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ },
195 { 383834000, 36249333, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ }, 217 { 383834000, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ },
196 { 443834000, 36249333, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, 218 { 443834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ },
197 { 444000000, 36249333, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, 219 { 444000000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ },
198 { 583834000, 36249333, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ }, 220 { 583834000, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ },
199 { 793834000, 36249333, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ }, 221 { 793834000, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ },
200 { 444834000, 36249333, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, 222 { 444834000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ },
201 { 861000000, 36249333, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ }, 223 { 861000000, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ },
202 } 224 }
203}; 225};
204EXPORT_SYMBOL(dvb_pll_tda665x); 226EXPORT_SYMBOL(dvb_pll_tda665x);
@@ -216,12 +238,13 @@ struct dvb_pll_desc dvb_pll_tua6034 = {
216 .name = "Infineon TUA6034", 238 .name = "Infineon TUA6034",
217 .min = 44250000, 239 .min = 44250000,
218 .max = 858000000, 240 .max = 858000000,
241 .iffreq= 36166667,
219 .count = 3, 242 .count = 3,
220 .setbw = tua6034_bw, 243 .setbw = tua6034_bw,
221 .entries = { 244 .entries = {
222 { 174500000, 36166667, 62500, 0xce, 0x01 }, 245 { 174500000, 62500, 0xce, 0x01 },
223 { 230000000, 36166667, 62500, 0xce, 0x02 }, 246 { 230000000, 62500, 0xce, 0x02 },
224 { 999999999, 36166667, 62500, 0xce, 0x04 }, 247 { 999999999, 62500, 0xce, 0x04 },
225 }, 248 },
226}; 249};
227EXPORT_SYMBOL(dvb_pll_tua6034); 250EXPORT_SYMBOL(dvb_pll_tua6034);
@@ -233,11 +256,13 @@ struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
233 .name = "LG TDVS-H06xF", 256 .name = "LG TDVS-H06xF",
234 .min = 54000000, 257 .min = 54000000,
235 .max = 863000000, 258 .max = 863000000,
259 .iffreq= 44000000,
260 .initdata = tua603x_agc103,
236 .count = 3, 261 .count = 3,
237 .entries = { 262 .entries = {
238 { 165000000, 44000000, 62500, 0xce, 0x01 }, 263 { 165000000, 62500, 0xce, 0x01 },
239 { 450000000, 44000000, 62500, 0xce, 0x02 }, 264 { 450000000, 62500, 0xce, 0x02 },
240 { 999999999, 44000000, 62500, 0xce, 0x04 }, 265 { 999999999, 62500, 0xce, 0x04 },
241 }, 266 },
242}; 267};
243EXPORT_SYMBOL(dvb_pll_lg_tdvs_h06xf); 268EXPORT_SYMBOL(dvb_pll_lg_tdvs_h06xf);
@@ -255,16 +280,17 @@ struct dvb_pll_desc dvb_pll_fmd1216me = {
255 .name = "Philips FMD1216ME", 280 .name = "Philips FMD1216ME",
256 .min = 50870000, 281 .min = 50870000,
257 .max = 858000000, 282 .max = 858000000,
283 .iffreq= 36125000,
258 .setbw = fmd1216me_bw, 284 .setbw = fmd1216me_bw,
259 .count = 7, 285 .count = 7,
260 .entries = { 286 .entries = {
261 { 143870000, 36213333, 166667, 0xbc, 0x41 }, 287 { 143870000, 166667, 0xbc, 0x41 },
262 { 158870000, 36213333, 166667, 0xf4, 0x41 }, 288 { 158870000, 166667, 0xf4, 0x41 },
263 { 329870000, 36213333, 166667, 0xbc, 0x42 }, 289 { 329870000, 166667, 0xbc, 0x42 },
264 { 441870000, 36213333, 166667, 0xf4, 0x42 }, 290 { 441870000, 166667, 0xf4, 0x42 },
265 { 625870000, 36213333, 166667, 0xbc, 0x44 }, 291 { 625870000, 166667, 0xbc, 0x44 },
266 { 803870000, 36213333, 166667, 0xf4, 0x44 }, 292 { 803870000, 166667, 0xf4, 0x44 },
267 { 999999999, 36213333, 166667, 0xfc, 0x44 }, 293 { 999999999, 166667, 0xfc, 0x44 },
268 } 294 }
269}; 295};
270EXPORT_SYMBOL(dvb_pll_fmd1216me); 296EXPORT_SYMBOL(dvb_pll_fmd1216me);
@@ -282,13 +308,14 @@ struct dvb_pll_desc dvb_pll_tded4 = {
282 .name = "ALPS TDED4", 308 .name = "ALPS TDED4",
283 .min = 47000000, 309 .min = 47000000,
284 .max = 863000000, 310 .max = 863000000,
311 .iffreq= 36166667,
285 .setbw = tded4_bw, 312 .setbw = tded4_bw,
286 .count = 4, 313 .count = 4,
287 .entries = { 314 .entries = {
288 { 153000000, 36166667, 166667, 0x85, 0x01 }, 315 { 153000000, 166667, 0x85, 0x01 },
289 { 470000000, 36166667, 166667, 0x85, 0x02 }, 316 { 470000000, 166667, 0x85, 0x02 },
290 { 823000000, 36166667, 166667, 0x85, 0x08 }, 317 { 823000000, 166667, 0x85, 0x08 },
291 { 999999999, 36166667, 166667, 0x85, 0x88 }, 318 { 999999999, 166667, 0x85, 0x88 },
292 } 319 }
293}; 320};
294EXPORT_SYMBOL(dvb_pll_tded4); 321EXPORT_SYMBOL(dvb_pll_tded4);
@@ -300,12 +327,13 @@ struct dvb_pll_desc dvb_pll_tdhu2 = {
300 .name = "ALPS TDHU2", 327 .name = "ALPS TDHU2",
301 .min = 54000000, 328 .min = 54000000,
302 .max = 864000000, 329 .max = 864000000,
330 .iffreq= 44000000,
303 .count = 4, 331 .count = 4,
304 .entries = { 332 .entries = {
305 { 162000000, 44000000, 62500, 0x85, 0x01 }, 333 { 162000000, 62500, 0x85, 0x01 },
306 { 426000000, 44000000, 62500, 0x85, 0x02 }, 334 { 426000000, 62500, 0x85, 0x02 },
307 { 782000000, 44000000, 62500, 0x85, 0x08 }, 335 { 782000000, 62500, 0x85, 0x08 },
308 { 999999999, 44000000, 62500, 0x85, 0x88 }, 336 { 999999999, 62500, 0x85, 0x88 },
309 } 337 }
310}; 338};
311EXPORT_SYMBOL(dvb_pll_tdhu2); 339EXPORT_SYMBOL(dvb_pll_tdhu2);
@@ -317,11 +345,12 @@ struct dvb_pll_desc dvb_pll_tuv1236d = {
317 .name = "Philips TUV1236D", 345 .name = "Philips TUV1236D",
318 .min = 54000000, 346 .min = 54000000,
319 .max = 864000000, 347 .max = 864000000,
348 .iffreq= 44000000,
320 .count = 3, 349 .count = 3,
321 .entries = { 350 .entries = {
322 { 157250000, 44000000, 62500, 0xc6, 0x41 }, 351 { 157250000, 62500, 0xc6, 0x41 },
323 { 454000000, 44000000, 62500, 0xc6, 0x42 }, 352 { 454000000, 62500, 0xc6, 0x42 },
324 { 999999999, 44000000, 62500, 0xc6, 0x44 }, 353 { 999999999, 62500, 0xc6, 0x44 },
325 }, 354 },
326}; 355};
327EXPORT_SYMBOL(dvb_pll_tuv1236d); 356EXPORT_SYMBOL(dvb_pll_tuv1236d);
@@ -333,14 +362,15 @@ struct dvb_pll_desc dvb_pll_samsung_tbmv = {
333 .name = "Samsung TBMV30111IN / TBMV30712IN1", 362 .name = "Samsung TBMV30111IN / TBMV30712IN1",
334 .min = 54000000, 363 .min = 54000000,
335 .max = 860000000, 364 .max = 860000000,
365 .iffreq= 44000000,
336 .count = 6, 366 .count = 6,
337 .entries = { 367 .entries = {
338 { 172000000, 44000000, 166666, 0xb4, 0x01 }, 368 { 172000000, 166667, 0xb4, 0x01 },
339 { 214000000, 44000000, 166666, 0xb4, 0x02 }, 369 { 214000000, 166667, 0xb4, 0x02 },
340 { 467000000, 44000000, 166666, 0xbc, 0x02 }, 370 { 467000000, 166667, 0xbc, 0x02 },
341 { 721000000, 44000000, 166666, 0xbc, 0x08 }, 371 { 721000000, 166667, 0xbc, 0x08 },
342 { 841000000, 44000000, 166666, 0xf4, 0x08 }, 372 { 841000000, 166667, 0xf4, 0x08 },
343 { 999999999, 44000000, 166666, 0xfc, 0x02 }, 373 { 999999999, 166667, 0xfc, 0x02 },
344 } 374 }
345}; 375};
346EXPORT_SYMBOL(dvb_pll_samsung_tbmv); 376EXPORT_SYMBOL(dvb_pll_samsung_tbmv);
@@ -352,12 +382,13 @@ struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
352 .name = "Philips SD1878", 382 .name = "Philips SD1878",
353 .min = 950000, 383 .min = 950000,
354 .max = 2150000, 384 .max = 2150000,
385 .iffreq= 249, /* zero-IF, offset 249 is to round up */
355 .count = 4, 386 .count = 4,
356 .entries = { 387 .entries = {
357 { 1250000, 499, 500, 0xc4, 0x00}, 388 { 1250000, 500, 0xc4, 0x00},
358 { 1550000, 499, 500, 0xc4, 0x40}, 389 { 1550000, 500, 0xc4, 0x40},
359 { 2050000, 499, 500, 0xc4, 0x80}, 390 { 2050000, 500, 0xc4, 0x80},
360 { 2150000, 499, 500, 0xc4, 0xc0}, 391 { 2150000, 500, 0xc4, 0xc0},
361 }, 392 },
362}; 393};
363EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261); 394EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261);
@@ -388,18 +419,19 @@ struct dvb_pll_desc dvb_pll_philips_td1316 = {
388 .name = "Philips TD1316", 419 .name = "Philips TD1316",
389 .min = 87000000, 420 .min = 87000000,
390 .max = 895000000, 421 .max = 895000000,
422 .iffreq= 36166667,
391 .setbw = td1316_bw, 423 .setbw = td1316_bw,
392 .count = 9, 424 .count = 9,
393 .entries = { 425 .entries = {
394 { 93834000, 36166000, 166666, 0xca, 0x60}, 426 { 93834000, 166667, 0xca, 0x60},
395 { 123834000, 36166000, 166666, 0xca, 0xa0}, 427 { 123834000, 166667, 0xca, 0xa0},
396 { 163834000, 36166000, 166666, 0xca, 0xc0}, 428 { 163834000, 166667, 0xca, 0xc0},
397 { 253834000, 36166000, 166666, 0xca, 0x60}, 429 { 253834000, 166667, 0xca, 0x60},
398 { 383834000, 36166000, 166666, 0xca, 0xa0}, 430 { 383834000, 166667, 0xca, 0xa0},
399 { 443834000, 36166000, 166666, 0xca, 0xc0}, 431 { 443834000, 166667, 0xca, 0xc0},
400 { 583834000, 36166000, 166666, 0xca, 0x60}, 432 { 583834000, 166667, 0xca, 0x60},
401 { 793834000, 36166000, 166666, 0xca, 0xa0}, 433 { 793834000, 166667, 0xca, 0xa0},
402 { 858834000, 36166000, 166666, 0xca, 0xe0}, 434 { 858834000, 166667, 0xca, 0xe0},
403 }, 435 },
404}; 436};
405EXPORT_SYMBOL(dvb_pll_philips_td1316); 437EXPORT_SYMBOL(dvb_pll_philips_td1316);
@@ -409,15 +441,41 @@ struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
409 .name = "Thomson FE6600", 441 .name = "Thomson FE6600",
410 .min = 44250000, 442 .min = 44250000,
411 .max = 858000000, 443 .max = 858000000,
444 .iffreq= 36125000,
412 .count = 4, 445 .count = 4,
413 .entries = { 446 .entries = {
414 { 250000000, 36213333, 166667, 0xb4, 0x12 }, 447 { 250000000, 166667, 0xb4, 0x12 },
415 { 455000000, 36213333, 166667, 0xfe, 0x11 }, 448 { 455000000, 166667, 0xfe, 0x11 },
416 { 775500000, 36213333, 166667, 0xbc, 0x18 }, 449 { 775500000, 166667, 0xbc, 0x18 },
417 { 999999999, 36213333, 166667, 0xf4, 0x18 }, 450 { 999999999, 166667, 0xf4, 0x18 },
418 } 451 }
419}; 452};
420EXPORT_SYMBOL(dvb_pll_thomson_fe6600); 453EXPORT_SYMBOL(dvb_pll_thomson_fe6600);
454static void opera1_bw(u8 *buf, u32 freq, int bandwidth)
455{
456 if (bandwidth == BANDWIDTH_8_MHZ)
457 buf[2] |= 0x08;
458}
459
460struct dvb_pll_desc dvb_pll_opera1 = {
461 .name = "Opera Tuner",
462 .min = 900000,
463 .max = 2250000,
464 .iffreq= 0,
465 .setbw = opera1_bw,
466 .count = 8,
467 .entries = {
468 { 1064000, 500, 0xe5, 0xc6 },
469 { 1169000, 500, 0xe5, 0xe6 },
470 { 1299000, 500, 0xe5, 0x24 },
471 { 1444000, 500, 0xe5, 0x44 },
472 { 1606000, 500, 0xe5, 0x64 },
473 { 1777000, 500, 0xe5, 0x84 },
474 { 1941000, 500, 0xe5, 0xa4 },
475 { 2250000, 500, 0xe5, 0xc4 },
476 }
477};
478EXPORT_SYMBOL(dvb_pll_opera1);
421 479
422struct dvb_pll_priv { 480struct dvb_pll_priv {
423 /* i2c details */ 481 /* i2c details */
@@ -459,7 +517,8 @@ int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
459 if (i == desc->count) 517 if (i == desc->count)
460 return -EINVAL; 518 return -EINVAL;
461 519
462 div = (freq + desc->entries[i].offset) / desc->entries[i].stepsize; 520 div = (freq + desc->iffreq + desc->entries[i].stepsize/2) /
521 desc->entries[i].stepsize;
463 buf[0] = div >> 8; 522 buf[0] = div >> 8;
464 buf[1] = div & 0xff; 523 buf[1] = div & 0xff;
465 buf[2] = desc->entries[i].config; 524 buf[2] = desc->entries[i].config;
@@ -473,7 +532,7 @@ int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
473 desc->name, div, buf[0], buf[1], buf[2], buf[3]); 532 desc->name, div, buf[0], buf[1], buf[2], buf[3]);
474 533
475 // calculate the frequency we set it to 534 // calculate the frequency we set it to
476 return (div * desc->entries[i].stepsize) - desc->entries[i].offset; 535 return (div * desc->entries[i].stepsize) - desc->iffreq;
477} 536}
478EXPORT_SYMBOL(dvb_pll_configure); 537EXPORT_SYMBOL(dvb_pll_configure);
479 538
@@ -487,35 +546,27 @@ static int dvb_pll_release(struct dvb_frontend *fe)
487static int dvb_pll_sleep(struct dvb_frontend *fe) 546static int dvb_pll_sleep(struct dvb_frontend *fe)
488{ 547{
489 struct dvb_pll_priv *priv = fe->tuner_priv; 548 struct dvb_pll_priv *priv = fe->tuner_priv;
490 u8 buf[4];
491 struct i2c_msg msg =
492 { .addr = priv->pll_i2c_address, .flags = 0,
493 .buf = buf, .len = sizeof(buf) };
494 int i;
495 int result;
496 549
497 if (priv->i2c == NULL) 550 if (priv->i2c == NULL)
498 return -EINVAL; 551 return -EINVAL;
499 552
500 for (i = 0; i < priv->pll_desc->count; i++) { 553 if (priv->pll_desc->sleepdata) {
501 if (priv->pll_desc->entries[i].limit == 0) 554 struct i2c_msg msg = { .flags = 0,
502 break; 555 .addr = priv->pll_i2c_address,
503 } 556 .buf = priv->pll_desc->sleepdata + 1,
504 if (i == priv->pll_desc->count) 557 .len = priv->pll_desc->sleepdata[0] };
505 return 0;
506 558
507 buf[0] = 0; 559 int result;
508 buf[1] = 0;
509 buf[2] = priv->pll_desc->entries[i].config;
510 buf[3] = priv->pll_desc->entries[i].cb;
511 560
512 if (fe->ops.i2c_gate_ctrl) 561 if (fe->ops.i2c_gate_ctrl)
513 fe->ops.i2c_gate_ctrl(fe, 1); 562 fe->ops.i2c_gate_ctrl(fe, 1);
514 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { 563 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
515 return result; 564 return result;
565 }
566 return 0;
516 } 567 }
517 568 /* Shouldn't be called when initdata is NULL, maybe BUG()? */
518 return 0; 569 return -EINVAL;
519} 570}
520 571
521static int dvb_pll_set_params(struct dvb_frontend *fe, 572static int dvb_pll_set_params(struct dvb_frontend *fe,
@@ -599,9 +650,35 @@ static int dvb_pll_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
599 return 0; 650 return 0;
600} 651}
601 652
653static int dvb_pll_init(struct dvb_frontend *fe)
654{
655 struct dvb_pll_priv *priv = fe->tuner_priv;
656
657 if (priv->i2c == NULL)
658 return -EINVAL;
659
660 if (priv->pll_desc->initdata) {
661 struct i2c_msg msg = { .flags = 0,
662 .addr = priv->pll_i2c_address,
663 .buf = priv->pll_desc->initdata + 1,
664 .len = priv->pll_desc->initdata[0] };
665
666 int result;
667 if (fe->ops.i2c_gate_ctrl)
668 fe->ops.i2c_gate_ctrl(fe, 1);
669 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
670 return result;
671 }
672 return 0;
673 }
674 /* Shouldn't be called when initdata is NULL, maybe BUG()? */
675 return -EINVAL;
676}
677
602static struct dvb_tuner_ops dvb_pll_tuner_ops = { 678static struct dvb_tuner_ops dvb_pll_tuner_ops = {
603 .release = dvb_pll_release, 679 .release = dvb_pll_release,
604 .sleep = dvb_pll_sleep, 680 .sleep = dvb_pll_sleep,
681 .init = dvb_pll_init,
605 .set_params = dvb_pll_set_params, 682 .set_params = dvb_pll_set_params,
606 .calc_regs = dvb_pll_calc_regs, 683 .calc_regs = dvb_pll_calc_regs,
607 .get_frequency = dvb_pll_get_frequency, 684 .get_frequency = dvb_pll_get_frequency,
@@ -640,9 +717,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
640 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, 717 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
641 sizeof(struct dvb_tuner_ops)); 718 sizeof(struct dvb_tuner_ops));
642 719
643 strncpy(fe->ops.tuner_ops.info.name, desc->name, 128); 720 strncpy(fe->ops.tuner_ops.info.name, desc->name,
721 sizeof(fe->ops.tuner_ops.info.name));
644 fe->ops.tuner_ops.info.frequency_min = desc->min; 722 fe->ops.tuner_ops.info.frequency_min = desc->min;
645 fe->ops.tuner_ops.info.frequency_min = desc->max; 723 fe->ops.tuner_ops.info.frequency_min = desc->max;
724 if (!desc->initdata)
725 fe->ops.tuner_ops.init = NULL;
726 if (!desc->sleepdata)
727 fe->ops.tuner_ops.sleep = NULL;
646 728
647 fe->tuner_priv = priv; 729 fe->tuner_priv = priv;
648 return fe; 730 return fe;
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index 681186a5e5eb..5209f46f0893 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -12,11 +12,13 @@ struct dvb_pll_desc {
12 char *name; 12 char *name;
13 u32 min; 13 u32 min;
14 u32 max; 14 u32 max;
15 u32 iffreq;
15 void (*setbw)(u8 *buf, u32 freq, int bandwidth); 16 void (*setbw)(u8 *buf, u32 freq, int bandwidth);
17 u8 *initdata;
18 u8 *sleepdata;
16 int count; 19 int count;
17 struct { 20 struct {
18 u32 limit; 21 u32 limit;
19 u32 offset;
20 u32 stepsize; 22 u32 stepsize;
21 u8 config; 23 u8 config;
22 u8 cb; 24 u8 cb;
@@ -46,6 +48,7 @@ extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261;
46extern struct dvb_pll_desc dvb_pll_philips_td1316; 48extern struct dvb_pll_desc dvb_pll_philips_td1316;
47 49
48extern struct dvb_pll_desc dvb_pll_thomson_fe6600; 50extern struct dvb_pll_desc dvb_pll_thomson_fe6600;
51extern struct dvb_pll_desc dvb_pll_opera1;
49 52
50extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf, 53extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
51 u32 freq, int bandwidth); 54 u32 freq, int bandwidth);
@@ -59,9 +62,20 @@ extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
59 * @param desc dvb_pll_desc to use. 62 * @param desc dvb_pll_desc to use.
60 * @return Frontend pointer on success, NULL on failure 63 * @return Frontend pointer on success, NULL on failure
61 */ 64 */
65#if defined(CONFIG_DVB_PLL) || (defined(CONFIG_DVB_PLL_MODULE) && defined(MODULE))
62extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, 66extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
63 int pll_addr, 67 int pll_addr,
64 struct i2c_adapter *i2c, 68 struct i2c_adapter *i2c,
65 struct dvb_pll_desc *desc); 69 struct dvb_pll_desc *desc);
70#else
71static inline struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
72 int pll_addr,
73 struct i2c_adapter *i2c,
74 struct dvb_pll_desc *desc)
75{
76 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
77 return NULL;
78}
79#endif
66 80
67#endif 81#endif
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 68aad0f6519f..e25286e2d431 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -475,7 +475,7 @@ static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status)
475 *status |= FE_HAS_CARRIER; 475 *status |= FE_HAS_CARRIER;
476 break; 476 break;
477 default: 477 default:
478 printk("KERN_WARNING lgdt330x: %s: Modulation set to unsupported value\n", __FUNCTION__); 478 printk(KERN_WARNING "lgdt330x: %s: Modulation set to unsupported value\n", __FUNCTION__);
479 } 479 }
480 480
481 return 0; 481 return 0;
@@ -534,7 +534,7 @@ static int lgdt3303_read_status(struct dvb_frontend* fe, fe_status_t* status)
534 } 534 }
535 break; 535 break;
536 default: 536 default:
537 printk("KERN_WARNING lgdt330x: %s: Modulation set to unsupported value\n", __FUNCTION__); 537 printk(KERN_WARNING "lgdt330x: %s: Modulation set to unsupported value\n", __FUNCTION__);
538 } 538 }
539 return 0; 539 return 0;
540} 540}
diff --git a/drivers/media/dvb/frontends/lgh06xf.c b/drivers/media/dvb/frontends/lgh06xf.c
deleted file mode 100644
index 2202d0cc878b..000000000000
--- a/drivers/media/dvb/frontends/lgh06xf.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * lgh06xf.c - ATSC Tuner support for LG TDVS-H06xF
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include "dvb-pll.h"
20#include "lgh06xf.h"
21
22#define LG_H06XF_PLL_I2C_ADDR 0x61
23
24struct lgh06xf_priv {
25 struct i2c_adapter *i2c;
26 u32 frequency;
27};
28
29static int lgh06xf_release(struct dvb_frontend *fe)
30{
31 kfree(fe->tuner_priv);
32 fe->tuner_priv = NULL;
33 return 0;
34}
35
36static int lgh06xf_set_params(struct dvb_frontend* fe,
37 struct dvb_frontend_parameters* params)
38{
39 struct lgh06xf_priv *priv = fe->tuner_priv;
40 u8 buf[4];
41 struct i2c_msg msg = { .addr = LG_H06XF_PLL_I2C_ADDR, .flags = 0,
42 .buf = buf, .len = sizeof(buf) };
43 u32 frequency;
44 int result;
45
46 if ((result = dvb_pll_configure(&dvb_pll_lg_tdvs_h06xf, buf,
47 params->frequency, 0)) < 0)
48 return result;
49 else
50 frequency = result;
51
52 if (fe->ops.i2c_gate_ctrl)
53 fe->ops.i2c_gate_ctrl(fe, 1);
54 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
55 printk(KERN_WARNING "lgh06xf: %s error "
56 "(addr %02x <- %02x, result = %i)\n",
57 __FUNCTION__, buf[0], buf[1], result);
58 if (result < 0)
59 return result;
60 else
61 return -EREMOTEIO;
62 }
63
64 /* Set the Auxiliary Byte. */
65 buf[0] = buf[2];
66 buf[0] &= ~0x20;
67 buf[0] |= 0x18;
68 buf[1] = 0x50;
69 msg.len = 2;
70 if (fe->ops.i2c_gate_ctrl)
71 fe->ops.i2c_gate_ctrl(fe, 1);
72 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
73 printk(KERN_WARNING "lgh06xf: %s error "
74 "(addr %02x <- %02x, result = %i)\n",
75 __FUNCTION__, buf[0], buf[1], result);
76 if (result < 0)
77 return result;
78 else
79 return -EREMOTEIO;
80 }
81
82 priv->frequency = frequency;
83
84 return 0;
85}
86
87static int lgh06xf_get_frequency(struct dvb_frontend *fe, u32 *frequency)
88{
89 struct lgh06xf_priv *priv = fe->tuner_priv;
90 *frequency = priv->frequency;
91 return 0;
92}
93
94static struct dvb_tuner_ops lgh06xf_tuner_ops = {
95 .release = lgh06xf_release,
96 .set_params = lgh06xf_set_params,
97 .get_frequency = lgh06xf_get_frequency,
98};
99
100struct dvb_frontend* lgh06xf_attach(struct dvb_frontend *fe,
101 struct i2c_adapter *i2c)
102{
103 struct lgh06xf_priv *priv = NULL;
104
105 priv = kzalloc(sizeof(struct lgh06xf_priv), GFP_KERNEL);
106 if (priv == NULL)
107 return NULL;
108
109 priv->i2c = i2c;
110
111 memcpy(&fe->ops.tuner_ops, &lgh06xf_tuner_ops,
112 sizeof(struct dvb_tuner_ops));
113
114 strlcpy(fe->ops.tuner_ops.info.name, dvb_pll_lg_tdvs_h06xf.name,
115 sizeof(fe->ops.tuner_ops.info.name));
116
117 fe->ops.tuner_ops.info.frequency_min = dvb_pll_lg_tdvs_h06xf.min;
118 fe->ops.tuner_ops.info.frequency_max = dvb_pll_lg_tdvs_h06xf.max;
119
120 fe->tuner_priv = priv;
121 return fe;
122}
123
124EXPORT_SYMBOL(lgh06xf_attach);
125
126MODULE_DESCRIPTION("LG TDVS-H06xF ATSC Tuner support");
127MODULE_AUTHOR("Michael Krufky");
128MODULE_LICENSE("GPL");
129
130/*
131 * Local variables:
132 * c-basic-offset: 8
133 * End:
134 */
diff --git a/drivers/media/dvb/frontends/lgh06xf.h b/drivers/media/dvb/frontends/lgh06xf.h
deleted file mode 100644
index 510b4bedfb24..000000000000
--- a/drivers/media/dvb/frontends/lgh06xf.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * lgh06xf.h - ATSC Tuner support for LG TDVS-H06xF
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LGH06XF_H_
20#define _LGH06XF_H_
21#include "dvb_frontend.h"
22
23#if defined(CONFIG_DVB_TUNER_LGH06XF) || (defined(CONFIG_DVB_TUNER_LGH06XF_MODULE) && defined(MODULE))
24extern struct dvb_frontend* lgh06xf_attach(struct dvb_frontend* fe,
25 struct i2c_adapter *i2c);
26#else
27static inline struct dvb_frontend* lgh06xf_attach(struct dvb_frontend* fe,
28 struct i2c_adapter *i2c)
29{
30 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
31 return NULL;
32}
33#endif /* CONFIG_DVB_TUNER_LGH06XF */
34
35#endif /* _LGH06XF_H_ */
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 5a3a6e53cda2..4e0aca7c67aa 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * Support for OR51132 (pcHDTV HD-3000) - VSB/QAM 2 * Support for OR51132 (pcHDTV HD-3000) - VSB/QAM
3 * 3 *
4 *
5 * Copyright (C) 2007 Trent Piepho <xyzzy@speakeasy.org>
6 *
4 * Copyright (C) 2005 Kirk Lapray <kirk_lapray@bigfoot.com> 7 * Copyright (C) 2005 Kirk Lapray <kirk_lapray@bigfoot.com>
5 * 8 *
6 * Based on code from Jack Kelliher (kelliher@xmission.com) 9 * Based on code from Jack Kelliher (kelliher@xmission.com)
@@ -69,46 +72,70 @@ struct or51132_state
69 u32 current_frequency; 72 u32 current_frequency;
70}; 73};
71 74
72static int i2c_writebytes (struct or51132_state* state, u8 reg, u8 *buf, int len) 75
76/* Write buffer to demod */
77static int or51132_writebuf(struct or51132_state *state, const u8 *buf, int len)
73{ 78{
74 int err; 79 int err;
75 struct i2c_msg msg; 80 struct i2c_msg msg = { .addr = state->config->demod_address,
76 msg.addr = reg; 81 .flags = 0, .buf = (u8*)buf, .len = len };
77 msg.flags = 0;
78 msg.len = len;
79 msg.buf = buf;
80 82
83 /* msleep(20); */ /* doesn't appear to be necessary */
81 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 84 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
82 printk(KERN_WARNING "or51132: i2c_writebytes error (addr %02x, err == %i)\n", reg, err); 85 printk(KERN_WARNING "or51132: I2C write (addr 0x%02x len %d) error: %d\n",
86 msg.addr, msg.len, err);
83 return -EREMOTEIO; 87 return -EREMOTEIO;
84 } 88 }
85
86 return 0; 89 return 0;
87} 90}
88 91
89static u8 i2c_readbytes (struct or51132_state* state, u8 reg, u8* buf, int len) 92/* Write constant bytes, e.g. or51132_writebytes(state, 0x04, 0x42, 0x00);
93 Less code and more efficient that loading a buffer on the stack with
94 the bytes to send and then calling or51132_writebuf() on that. */
95#define or51132_writebytes(state, data...) \
96 ({ const static u8 _data[] = {data}; \
97 or51132_writebuf(state, _data, sizeof(_data)); })
98
99/* Read data from demod into buffer. Returns 0 on success. */
100static int or51132_readbuf(struct or51132_state *state, u8 *buf, int len)
90{ 101{
91 int err; 102 int err;
92 struct i2c_msg msg; 103 struct i2c_msg msg = { .addr = state->config->demod_address,
93 msg.addr = reg; 104 .flags = I2C_M_RD, .buf = buf, .len = len };
94 msg.flags = I2C_M_RD;
95 msg.len = len;
96 msg.buf = buf;
97 105
106 /* msleep(20); */ /* doesn't appear to be necessary */
98 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) { 107 if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
99 printk(KERN_WARNING "or51132: i2c_readbytes error (addr %02x, err == %i)\n", reg, err); 108 printk(KERN_WARNING "or51132: I2C read (addr 0x%02x len %d) error: %d\n",
109 msg.addr, msg.len, err);
100 return -EREMOTEIO; 110 return -EREMOTEIO;
101 } 111 }
102
103 return 0; 112 return 0;
104} 113}
105 114
115/* Reads a 16-bit demod register. Returns <0 on error. */
116static int or51132_readreg(struct or51132_state *state, u8 reg)
117{
118 u8 buf[2] = { 0x04, reg };
119 struct i2c_msg msg[2] = {
120 {.addr = state->config->demod_address, .flags = 0,
121 .buf = buf, .len = 2 },
122 {.addr = state->config->demod_address, .flags = I2C_M_RD,
123 .buf = buf, .len = 2 }};
124 int err;
125
126 if ((err = i2c_transfer(state->i2c, msg, 2)) != 2) {
127 printk(KERN_WARNING "or51132: I2C error reading register %d: %d\n",
128 reg, err);
129 return -EREMOTEIO;
130 }
131 return le16_to_cpup((u16*)buf);
132}
133
106static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) 134static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware *fw)
107{ 135{
108 struct or51132_state* state = fe->demodulator_priv; 136 struct or51132_state* state = fe->demodulator_priv;
109 static u8 run_buf[] = {0x7F,0x01}; 137 const static u8 run_buf[] = {0x7F,0x01};
110 u8 rec_buf[8]; 138 u8 rec_buf[8];
111 u8 cmd_buf[3];
112 u32 firmwareAsize, firmwareBsize; 139 u32 firmwareAsize, firmwareBsize;
113 int i,ret; 140 int i,ret;
114 141
@@ -121,30 +148,21 @@ static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware
121 dprintk("FirmwareB is %i bytes\n",firmwareBsize); 148 dprintk("FirmwareB is %i bytes\n",firmwareBsize);
122 149
123 /* Upload firmware */ 150 /* Upload firmware */
124 if ((ret = i2c_writebytes(state,state->config->demod_address, 151 if ((ret = or51132_writebuf(state, &fw->data[8], firmwareAsize))) {
125 &fw->data[8],firmwareAsize))) {
126 printk(KERN_WARNING "or51132: load_firmware error 1\n"); 152 printk(KERN_WARNING "or51132: load_firmware error 1\n");
127 return ret; 153 return ret;
128 } 154 }
129 msleep(1); /* 1ms */ 155 if ((ret = or51132_writebuf(state, &fw->data[8+firmwareAsize],
130 if ((ret = i2c_writebytes(state,state->config->demod_address, 156 firmwareBsize))) {
131 &fw->data[8+firmwareAsize],firmwareBsize))) {
132 printk(KERN_WARNING "or51132: load_firmware error 2\n"); 157 printk(KERN_WARNING "or51132: load_firmware error 2\n");
133 return ret; 158 return ret;
134 } 159 }
135 msleep(1); /* 1ms */
136 160
137 if ((ret = i2c_writebytes(state,state->config->demod_address, 161 if ((ret = or51132_writebuf(state, run_buf, 2))) {
138 run_buf,2))) {
139 printk(KERN_WARNING "or51132: load_firmware error 3\n"); 162 printk(KERN_WARNING "or51132: load_firmware error 3\n");
140 return ret; 163 return ret;
141 } 164 }
142 165 if ((ret = or51132_writebuf(state, run_buf, 2))) {
143 /* Wait at least 5 msec */
144 msleep(20); /* 10ms */
145
146 if ((ret = i2c_writebytes(state,state->config->demod_address,
147 run_buf,2))) {
148 printk(KERN_WARNING "or51132: load_firmware error 4\n"); 166 printk(KERN_WARNING "or51132: load_firmware error 4\n");
149 return ret; 167 return ret;
150 } 168 }
@@ -154,43 +172,25 @@ static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware
154 172
155 /* Read back ucode version to besure we loaded correctly and are really up and running */ 173 /* Read back ucode version to besure we loaded correctly and are really up and running */
156 /* Get uCode version */ 174 /* Get uCode version */
157 cmd_buf[0] = 0x10; 175 if ((ret = or51132_writebytes(state, 0x10, 0x10, 0x00))) {
158 cmd_buf[1] = 0x10;
159 cmd_buf[2] = 0x00;
160 msleep(20); /* 20ms */
161 if ((ret = i2c_writebytes(state,state->config->demod_address,
162 cmd_buf,3))) {
163 printk(KERN_WARNING "or51132: load_firmware error a\n"); 176 printk(KERN_WARNING "or51132: load_firmware error a\n");
164 return ret; 177 return ret;
165 } 178 }
166 179 if ((ret = or51132_writebytes(state, 0x04, 0x17))) {
167 cmd_buf[0] = 0x04;
168 cmd_buf[1] = 0x17;
169 msleep(20); /* 20ms */
170 if ((ret = i2c_writebytes(state,state->config->demod_address,
171 cmd_buf,2))) {
172 printk(KERN_WARNING "or51132: load_firmware error b\n"); 180 printk(KERN_WARNING "or51132: load_firmware error b\n");
173 return ret; 181 return ret;
174 } 182 }
175 183 if ((ret = or51132_writebytes(state, 0x00, 0x00))) {
176 cmd_buf[0] = 0x00;
177 cmd_buf[1] = 0x00;
178 msleep(20); /* 20ms */
179 if ((ret = i2c_writebytes(state,state->config->demod_address,
180 cmd_buf,2))) {
181 printk(KERN_WARNING "or51132: load_firmware error c\n"); 184 printk(KERN_WARNING "or51132: load_firmware error c\n");
182 return ret; 185 return ret;
183 } 186 }
184 187 for (i=0;i<4;i++) {
185 for(i=0;i<4;i++) {
186 msleep(20); /* 20ms */
187 /* Once upon a time, this command might have had something 188 /* Once upon a time, this command might have had something
188 to do with getting the firmware version, but it's 189 to do with getting the firmware version, but it's
189 not used anymore: 190 not used anymore:
190 {0x04,0x00,0x30,0x00,i+1} */ 191 {0x04,0x00,0x30,0x00,i+1} */
191 /* Read 8 bytes, two bytes at a time */ 192 /* Read 8 bytes, two bytes at a time */
192 if ((ret = i2c_readbytes(state,state->config->demod_address, 193 if ((ret = or51132_readbuf(state, &rec_buf[i*2], 2))) {
193 &rec_buf[i*2],2))) {
194 printk(KERN_WARNING 194 printk(KERN_WARNING
195 "or51132: load_firmware error d - %d\n",i); 195 "or51132: load_firmware error d - %d\n",i);
196 return ret; 196 return ret;
@@ -204,12 +204,7 @@ static int or51132_load_firmware (struct dvb_frontend* fe, const struct firmware
204 rec_buf[3],rec_buf[2]>>4,rec_buf[2]&0x0f, 204 rec_buf[3],rec_buf[2]>>4,rec_buf[2]&0x0f,
205 rec_buf[5],rec_buf[4]>>4,rec_buf[4]&0x0f); 205 rec_buf[5],rec_buf[4]>>4,rec_buf[4]&0x0f);
206 206
207 cmd_buf[0] = 0x10; 207 if ((ret = or51132_writebytes(state, 0x10, 0x00, 0x00))) {
208 cmd_buf[1] = 0x00;
209 cmd_buf[2] = 0x00;
210 msleep(20); /* 20ms */
211 if ((ret = i2c_writebytes(state,state->config->demod_address,
212 cmd_buf,3))) {
213 printk(KERN_WARNING "or51132: load_firmware error e\n"); 208 printk(KERN_WARNING "or51132: load_firmware error e\n");
214 return ret; 209 return ret;
215 } 210 }
@@ -241,70 +236,55 @@ static int or51132_sleep(struct dvb_frontend* fe)
241static int or51132_setmode(struct dvb_frontend* fe) 236static int or51132_setmode(struct dvb_frontend* fe)
242{ 237{
243 struct or51132_state* state = fe->demodulator_priv; 238 struct or51132_state* state = fe->demodulator_priv;
244 unsigned char cmd_buf[3]; 239 u8 cmd_buf1[3] = {0x04, 0x01, 0x5f};
240 u8 cmd_buf2[3] = {0x1c, 0x00, 0 };
245 241
246 dprintk("setmode %d\n",(int)state->current_modulation); 242 dprintk("setmode %d\n",(int)state->current_modulation);
247 /* set operation mode in Receiver 1 register; */ 243
248 cmd_buf[0] = 0x04;
249 cmd_buf[1] = 0x01;
250 switch (state->current_modulation) { 244 switch (state->current_modulation) {
251 case QAM_256:
252 case QAM_64:
253 case QAM_AUTO:
254 /* Auto-deinterleave; MPEG ser, MPEG2tr, phase noise-high*/
255 cmd_buf[2] = 0x5F;
256 break;
257 case VSB_8: 245 case VSB_8:
258 /* Auto CH, Auto NTSC rej, MPEGser, MPEG2tr, phase noise-high*/ 246 /* Auto CH, Auto NTSC rej, MPEGser, MPEG2tr, phase noise-high */
259 cmd_buf[2] = 0x50; 247 cmd_buf1[2] = 0x50;
248 /* REC MODE inv IF spectrum, Normal */
249 cmd_buf2[1] = 0x03;
250 /* Channel MODE ATSC/VSB8 */
251 cmd_buf2[2] = 0x06;
260 break; 252 break;
261 default: 253 /* All QAM modes are:
262 printk("setmode:Modulation set to unsupported value\n"); 254 Auto-deinterleave; MPEGser, MPEG2tr, phase noise-high
263 }; 255 REC MODE Normal Carrier Lock */
264 if (i2c_writebytes(state,state->config->demod_address,
265 cmd_buf,3)) {
266 printk(KERN_WARNING "or51132: set_mode error 1\n");
267 return -1;
268 }
269 dprintk("or51132: set #1 to %02x\n", cmd_buf[2]);
270
271 /* Set operation mode in Receiver 6 register */
272 cmd_buf[0] = 0x1C;
273 switch (state->current_modulation) {
274 case QAM_AUTO: 256 case QAM_AUTO:
275 /* REC MODE Normal Carrier Lock */
276 cmd_buf[1] = 0x00;
277 /* Channel MODE Auto QAM64/256 */ 257 /* Channel MODE Auto QAM64/256 */
278 cmd_buf[2] = 0x4f; 258 cmd_buf2[2] = 0x4f;
279 break; 259 break;
280 case QAM_256: 260 case QAM_256:
281 /* REC MODE Normal Carrier Lock */
282 cmd_buf[1] = 0x00;
283 /* Channel MODE QAM256 */ 261 /* Channel MODE QAM256 */
284 cmd_buf[2] = 0x45; 262 cmd_buf2[2] = 0x45;
285 break; 263 break;
286 case QAM_64: 264 case QAM_64:
287 /* REC MODE Normal Carrier Lock */
288 cmd_buf[1] = 0x00;
289 /* Channel MODE QAM64 */ 265 /* Channel MODE QAM64 */
290 cmd_buf[2] = 0x43; 266 cmd_buf2[2] = 0x43;
291 break;
292 case VSB_8:
293 /* REC MODE inv IF spectrum, Normal */
294 cmd_buf[1] = 0x03;
295 /* Channel MODE ATSC/VSB8 */
296 cmd_buf[2] = 0x06;
297 break; 267 break;
298 default: 268 default:
299 printk("setmode: Modulation set to unsupported value\n"); 269 printk(KERN_WARNING
300 }; 270 "or51132: setmode: Modulation set to unsupported value (%d)\n",
301 msleep(20); /* 20ms */ 271 state->current_modulation);
302 if (i2c_writebytes(state,state->config->demod_address, 272 return -EINVAL;
303 cmd_buf,3)) { 273 }
274
275 /* Set Receiver 1 register */
276 if (or51132_writebuf(state, cmd_buf1, 3)) {
277 printk(KERN_WARNING "or51132: set_mode error 1\n");
278 return -EREMOTEIO;
279 }
280 dprintk("set #1 to %02x\n", cmd_buf1[2]);
281
282 /* Set operation mode in Receiver 6 register */
283 if (or51132_writebuf(state, cmd_buf2, 3)) {
304 printk(KERN_WARNING "or51132: set_mode error 2\n"); 284 printk(KERN_WARNING "or51132: set_mode error 2\n");
305 return -1; 285 return -EREMOTEIO;
306 } 286 }
307 dprintk("or51132: set #6 to 0x%02x%02x\n", cmd_buf[1], cmd_buf[2]); 287 dprintk("set #6 to 0x%02x%02x\n", cmd_buf2[1], cmd_buf2[2]);
308 288
309 return 0; 289 return 0;
310} 290}
@@ -401,28 +381,23 @@ static int or51132_get_parameters(struct dvb_frontend* fe,
401 struct dvb_frontend_parameters *param) 381 struct dvb_frontend_parameters *param)
402{ 382{
403 struct or51132_state* state = fe->demodulator_priv; 383 struct or51132_state* state = fe->demodulator_priv;
404 u8 buf[2]; 384 int status;
385 int retry = 1;
405 386
387start:
406 /* Receiver Status */ 388 /* Receiver Status */
407 buf[0]=0x04; 389 if ((status = or51132_readreg(state, 0x00)) < 0) {
408 buf[1]=0x00; 390 printk(KERN_WARNING "or51132: get_parameters: error reading receiver status\n");
409 msleep(30); /* 30ms */
410 if (i2c_writebytes(state,state->config->demod_address,buf,2)) {
411 printk(KERN_WARNING "or51132: get_parameters write error\n");
412 return -EREMOTEIO;
413 }
414 msleep(30); /* 30ms */
415 if (i2c_readbytes(state,state->config->demod_address,buf,2)) {
416 printk(KERN_WARNING "or51132: get_parameters read error\n");
417 return -EREMOTEIO; 391 return -EREMOTEIO;
418 } 392 }
419 switch(buf[0]) { 393 switch(status&0xff) {
420 case 0x06: param->u.vsb.modulation = VSB_8; break; 394 case 0x06: param->u.vsb.modulation = VSB_8; break;
421 case 0x43: param->u.vsb.modulation = QAM_64; break; 395 case 0x43: param->u.vsb.modulation = QAM_64; break;
422 case 0x45: param->u.vsb.modulation = QAM_256; break; 396 case 0x45: param->u.vsb.modulation = QAM_256; break;
423 default: 397 default:
398 if (retry--) goto start;
424 printk(KERN_WARNING "or51132: unknown status 0x%02x\n", 399 printk(KERN_WARNING "or51132: unknown status 0x%02x\n",
425 buf[0]); 400 status&0xff);
426 return -EREMOTEIO; 401 return -EREMOTEIO;
427 } 402 }
428 403
@@ -438,32 +413,21 @@ static int or51132_get_parameters(struct dvb_frontend* fe,
438static int or51132_read_status(struct dvb_frontend* fe, fe_status_t* status) 413static int or51132_read_status(struct dvb_frontend* fe, fe_status_t* status)
439{ 414{
440 struct or51132_state* state = fe->demodulator_priv; 415 struct or51132_state* state = fe->demodulator_priv;
441 unsigned char rec_buf[2]; 416 int reg;
442 unsigned char snd_buf[2];
443 *status = 0;
444 417
445 /* Receiver Status */ 418 /* Receiver Status */
446 snd_buf[0]=0x04; 419 if ((reg = or51132_readreg(state, 0x00)) < 0) {
447 snd_buf[1]=0x00; 420 printk(KERN_WARNING "or51132: read_status: error reading receiver status: %d\n", reg);
448 msleep(30); /* 30ms */ 421 *status = 0;
449 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) { 422 return -EREMOTEIO;
450 printk(KERN_WARNING "or51132: read_status write error\n");
451 return -1;
452 }
453 msleep(30); /* 30ms */
454 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
455 printk(KERN_WARNING "or51132: read_status read error\n");
456 return -1;
457 }
458 dprintk("read_status %x %x\n",rec_buf[0],rec_buf[1]);
459
460 if (rec_buf[1] & 0x01) { /* Receiver Lock */
461 *status |= FE_HAS_SIGNAL;
462 *status |= FE_HAS_CARRIER;
463 *status |= FE_HAS_VITERBI;
464 *status |= FE_HAS_SYNC;
465 *status |= FE_HAS_LOCK;
466 } 423 }
424 dprintk("%s: read_status %04x\n", __FUNCTION__, reg);
425
426 if (reg & 0x0100) /* Receiver Lock */
427 *status = FE_HAS_SIGNAL|FE_HAS_CARRIER|FE_HAS_VITERBI|
428 FE_HAS_SYNC|FE_HAS_LOCK;
429 else
430 *status = 0;
467 return 0; 431 return 0;
468} 432}
469 433
@@ -506,47 +470,30 @@ static u32 calculate_snr(u32 mse, u32 c)
506static int or51132_read_snr(struct dvb_frontend* fe, u16* snr) 470static int or51132_read_snr(struct dvb_frontend* fe, u16* snr)
507{ 471{
508 struct or51132_state* state = fe->demodulator_priv; 472 struct or51132_state* state = fe->demodulator_priv;
509 u8 rec_buf[2]; 473 int noise, reg;
510 u8 snd_buf[2]; 474 u32 c, usK = 0;
511 u32 noise; 475 int retry = 1;
512 u32 c; 476
513 u32 usK; 477start:
514 478 /* SNR after Equalizer */
515 /* Register is same for VSB or QAM firmware */ 479 noise = or51132_readreg(state, 0x02);
516 snd_buf[0]=0x04; 480 if (noise < 0) {
517 snd_buf[1]=0x02; /* SNR after Equalizer */ 481 printk(KERN_WARNING "or51132: read_snr: error reading equalizer\n");
518 msleep(30); /* 30ms */
519 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) {
520 printk(KERN_WARNING "or51132: snr write error\n");
521 return -EREMOTEIO;
522 }
523 msleep(30); /* 30ms */
524 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
525 printk(KERN_WARNING "or51132: snr read error\n");
526 return -EREMOTEIO; 482 return -EREMOTEIO;
527 } 483 }
528 noise = rec_buf[0] | (rec_buf[1] << 8); 484 dprintk("read_snr noise (%d)\n", noise);
529 dprintk("read_snr noise %x %x (%i)\n",rec_buf[0],rec_buf[1],noise);
530 485
531 /* Read status, contains modulation type for QAM_AUTO and 486 /* Read status, contains modulation type for QAM_AUTO and
532 NTSC filter for VSB */ 487 NTSC filter for VSB */
533 snd_buf[0]=0x04; 488 reg = or51132_readreg(state, 0x00);
534 snd_buf[1]=0x00; /* Status register */ 489 if (reg < 0) {
535 msleep(30); /* 30ms */ 490 printk(KERN_WARNING "or51132: read_snr: error reading receiver status\n");
536 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) {
537 printk(KERN_WARNING "or51132: status write error\n");
538 return -EREMOTEIO;
539 }
540 msleep(30); /* 30ms */
541 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
542 printk(KERN_WARNING "or51132: status read error\n");
543 return -EREMOTEIO; 491 return -EREMOTEIO;
544 } 492 }
545 493
546 usK = 0; 494 switch (reg&0xff) {
547 switch (rec_buf[0]) {
548 case 0x06: 495 case 0x06:
549 usK = (rec_buf[1] & 0x10) ? 0x03000000 : 0; 496 if (reg & 0x1000) usK = 3 << 24;
550 /* Fall through to QAM64 case */ 497 /* Fall through to QAM64 case */
551 case 0x43: 498 case 0x43:
552 c = 150204167; 499 c = 150204167;
@@ -555,11 +502,12 @@ static int or51132_read_snr(struct dvb_frontend* fe, u16* snr)
555 c = 150290396; 502 c = 150290396;
556 break; 503 break;
557 default: 504 default:
558 printk(KERN_ERR "or51132: unknown status 0x%02x\n", rec_buf[0]); 505 printk(KERN_WARNING "or51132: unknown status 0x%02x\n", reg&0xff);
506 if (retry--) goto start;
559 return -EREMOTEIO; 507 return -EREMOTEIO;
560 } 508 }
561 dprintk("%s: modulation %02x, NTSC rej O%s\n", __FUNCTION__, 509 dprintk("%s: modulation %02x, NTSC rej O%s\n", __FUNCTION__,
562 rec_buf[0], rec_buf[1]&0x10?"n":"ff"); 510 reg&0xff, reg&0x1000?"n":"ff");
563 511
564 /* Calculate SNR using noise, c, and NTSC rejection correction */ 512 /* Calculate SNR using noise, c, and NTSC rejection correction */
565 state->snr = calculate_snr(noise, c) - usK; 513 state->snr = calculate_snr(noise, c) - usK;
@@ -671,6 +619,7 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
671 619
672MODULE_DESCRIPTION("OR51132 ATSC [pcHDTV HD-3000] (8VSB & ITU J83 AnnexB FEC QAM64/256) Demodulator Driver"); 620MODULE_DESCRIPTION("OR51132 ATSC [pcHDTV HD-3000] (8VSB & ITU J83 AnnexB FEC QAM64/256) Demodulator Driver");
673MODULE_AUTHOR("Kirk Lapray"); 621MODULE_AUTHOR("Kirk Lapray");
622MODULE_AUTHOR("Trent Piepho");
674MODULE_LICENSE("GPL"); 623MODULE_LICENSE("GPL");
675 624
676EXPORT_SYMBOL(or51132_attach); 625EXPORT_SYMBOL(or51132_attach);
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 5b9c5bb29b23..110536843e8e 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -30,13 +30,13 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include "dvb_frontend.h" 32#include "dvb_frontend.h"
33#include "tda10021.h" 33#include "tda1002x.h"
34 34
35 35
36struct tda10021_state { 36struct tda10021_state {
37 struct i2c_adapter* i2c; 37 struct i2c_adapter* i2c;
38 /* configuration settings */ 38 /* configuration settings */
39 const struct tda10021_config* config; 39 const struct tda1002x_config* config;
40 struct dvb_frontend frontend; 40 struct dvb_frontend frontend;
41 41
42 u8 pwm; 42 u8 pwm;
@@ -53,9 +53,6 @@ struct tda10021_state {
53static int verbose; 53static int verbose;
54 54
55#define XIN 57840000UL 55#define XIN 57840000UL
56#define DISABLE_INVERSION(reg0) do { reg0 |= 0x20; } while (0)
57#define ENABLE_INVERSION(reg0) do { reg0 &= ~0x20; } while (0)
58#define HAS_INVERSION(reg0) (!(reg0 & 0x20))
59 56
60#define FIN (XIN >> 4) 57#define FIN (XIN >> 4)
61 58
@@ -64,7 +61,7 @@ static u8 tda10021_inittab[0x40]=
64{ 61{
65 0x73, 0x6a, 0x23, 0x0a, 0x02, 0x37, 0x77, 0x1a, 62 0x73, 0x6a, 0x23, 0x0a, 0x02, 0x37, 0x77, 0x1a,
66 0x37, 0x6a, 0x17, 0x8a, 0x1e, 0x86, 0x43, 0x40, 63 0x37, 0x6a, 0x17, 0x8a, 0x1e, 0x86, 0x43, 0x40,
67 0xb8, 0x3f, 0xa0, 0x00, 0xcd, 0x01, 0x00, 0xff, 64 0xb8, 0x3f, 0xa1, 0x00, 0xcd, 0x01, 0x00, 0xff,
68 0x11, 0x00, 0x7c, 0x31, 0x30, 0x20, 0x00, 0x00, 65 0x11, 0x00, 0x7c, 0x31, 0x30, 0x20, 0x00, 0x00,
69 0x02, 0x00, 0x00, 0x7d, 0x00, 0x00, 0x00, 0x00, 66 0x02, 0x00, 0x00, 0x7d, 0x00, 0x00, 0x00, 0x00,
70 0x07, 0x00, 0x33, 0x11, 0x0d, 0x95, 0x08, 0x58, 67 0x07, 0x00, 0x33, 0x11, 0x0d, 0x95, 0x08, 0x58,
@@ -97,7 +94,8 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg)
97 int ret; 94 int ret;
98 95
99 ret = i2c_transfer (state->i2c, msg, 2); 96 ret = i2c_transfer (state->i2c, msg, 2);
100 if (ret != 2) 97 // Don't print an error message if the id is read.
98 if (ret != 2 && reg != 0x1a)
101 printk("DVB: TDA10021: %s: readreg error (ret == %i)\n", 99 printk("DVB: TDA10021: %s: readreg error (ret == %i)\n",
102 __FUNCTION__, ret); 100 __FUNCTION__, ret);
103 return b1[0]; 101 return b1[0];
@@ -136,10 +134,10 @@ static int tda10021_setup_reg0 (struct tda10021_state* state, u8 reg0,
136{ 134{
137 reg0 |= state->reg0 & 0x63; 135 reg0 |= state->reg0 & 0x63;
138 136
139 if (INVERSION_ON == inversion) 137 if ((INVERSION_ON == inversion) ^ (state->config->invert == 0))
140 ENABLE_INVERSION(reg0); 138 reg0 &= ~0x20;
141 else if (INVERSION_OFF == inversion) 139 else
142 DISABLE_INVERSION(reg0); 140 reg0 |= 0x20;
143 141
144 _tda10021_writereg (state, 0x00, reg0 & 0xfe); 142 _tda10021_writereg (state, 0x00, reg0 & 0xfe);
145 _tda10021_writereg (state, 0x00, reg0 | 0x01); 143 _tda10021_writereg (state, 0x00, reg0 | 0x01);
@@ -201,16 +199,6 @@ static int tda10021_set_symbolrate (struct tda10021_state* state, u32 symbolrate
201 return 0; 199 return 0;
202} 200}
203 201
204static int tda10021_write(struct dvb_frontend* fe, u8 *buf, int len)
205{
206 struct tda10021_state* state = fe->demodulator_priv;
207
208 if (len != 2)
209 return -EINVAL;
210
211 return _tda10021_writereg(state, buf[0], buf[1]);
212}
213
214static int tda10021_init (struct dvb_frontend *fe) 202static int tda10021_init (struct dvb_frontend *fe)
215{ 203{
216 struct tda10021_state* state = fe->demodulator_priv; 204 struct tda10021_state* state = fe->demodulator_priv;
@@ -258,6 +246,9 @@ static int tda10021_set_parameters (struct dvb_frontend *fe,
258 if (qam < 0 || qam > 5) 246 if (qam < 0 || qam > 5)
259 return -EINVAL; 247 return -EINVAL;
260 248
249 if (p->inversion != INVERSION_ON && p->inversion != INVERSION_OFF)
250 return -EINVAL;
251
261 //printk("tda10021: set frequency to %d qam=%d symrate=%d\n", p->frequency,qam,p->u.qam.symbol_rate); 252 //printk("tda10021: set frequency to %d qam=%d symrate=%d\n", p->frequency,qam,p->u.qam.symbol_rate);
262 253
263 if (fe->ops.tuner_ops.set_params) { 254 if (fe->ops.tuner_ops.set_params) {
@@ -366,7 +357,7 @@ static int tda10021_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_pa
366 -((s32)p->u.qam.symbol_rate * afc) >> 10); 357 -((s32)p->u.qam.symbol_rate * afc) >> 10);
367 } 358 }
368 359
369 p->inversion = HAS_INVERSION(state->reg0) ? INVERSION_ON : INVERSION_OFF; 360 p->inversion = ((state->reg0 & 0x20) == 0x20) ^ (state->config->invert != 0) ? INVERSION_ON : INVERSION_OFF;
370 p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16; 361 p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16;
371 362
372 p->u.qam.fec_inner = FEC_NONE; 363 p->u.qam.fec_inner = FEC_NONE;
@@ -408,11 +399,12 @@ static void tda10021_release(struct dvb_frontend* fe)
408 399
409static struct dvb_frontend_ops tda10021_ops; 400static struct dvb_frontend_ops tda10021_ops;
410 401
411struct dvb_frontend* tda10021_attach(const struct tda10021_config* config, 402struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
412 struct i2c_adapter* i2c, 403 struct i2c_adapter* i2c,
413 u8 pwm) 404 u8 pwm)
414{ 405{
415 struct tda10021_state* state = NULL; 406 struct tda10021_state* state = NULL;
407 u8 id;
416 408
417 /* allocate memory for the internal state */ 409 /* allocate memory for the internal state */
418 state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL); 410 state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL);
@@ -425,7 +417,11 @@ struct dvb_frontend* tda10021_attach(const struct tda10021_config* config,
425 state->reg0 = tda10021_inittab[0]; 417 state->reg0 = tda10021_inittab[0];
426 418
427 /* check if the demod is there */ 419 /* check if the demod is there */
428 if ((tda10021_readreg(state, 0x1a) & 0xf0) != 0x70) goto error; 420 id = tda10021_readreg(state, 0x1a);
421 if ((id & 0xf0) != 0x70) goto error;
422
423 printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n",
424 state->config->demod_address, id);
429 425
430 /* create dvb_frontend */ 426 /* create dvb_frontend */
431 memcpy(&state->frontend.ops, &tda10021_ops, sizeof(struct dvb_frontend_ops)); 427 memcpy(&state->frontend.ops, &tda10021_ops, sizeof(struct dvb_frontend_ops));
@@ -447,7 +443,7 @@ static struct dvb_frontend_ops tda10021_ops = {
447 .frequency_max = 858000000, 443 .frequency_max = 858000000,
448 .symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */ 444 .symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */
449 .symbol_rate_max = (XIN/2)/4, /* SACLK/4 */ 445 .symbol_rate_max = (XIN/2)/4, /* SACLK/4 */
450#if 0 446 #if 0
451 .frequency_tolerance = ???, 447 .frequency_tolerance = ???,
452 .symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */ 448 .symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */
453 #endif 449 #endif
@@ -461,7 +457,6 @@ static struct dvb_frontend_ops tda10021_ops = {
461 457
462 .init = tda10021_init, 458 .init = tda10021_init,
463 .sleep = tda10021_sleep, 459 .sleep = tda10021_sleep,
464 .write = tda10021_write,
465 .i2c_gate_ctrl = tda10021_i2c_gate_ctrl, 460 .i2c_gate_ctrl = tda10021_i2c_gate_ctrl,
466 461
467 .set_frontend = tda10021_set_parameters, 462 .set_frontend = tda10021_set_parameters,
diff --git a/drivers/media/dvb/frontends/tda10023.c b/drivers/media/dvb/frontends/tda10023.c
new file mode 100644
index 000000000000..da796e784be3
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda10023.c
@@ -0,0 +1,540 @@
1/*
2 TDA10023 - DVB-C decoder
3 (as used in Philips CU1216-3 NIM and the Reelbox DVB-C tuner card)
4
5 Copyright (C) 2005 Georg Acher, BayCom GmbH (acher at baycom dot de)
6 Copyright (c) 2006 Hartmut Birr (e9hack at gmail dot com)
7
8 Remotely based on tda10021.c
9 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
10 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
11 Support for TDA10021
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26*/
27
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/string.h>
34#include <linux/slab.h>
35
36#include <asm/div64.h>
37
38#include "dvb_frontend.h"
39#include "tda1002x.h"
40
41
42struct tda10023_state {
43 struct i2c_adapter* i2c;
44 /* configuration settings */
45 const struct tda1002x_config* config;
46 struct dvb_frontend frontend;
47
48 u8 pwm;
49 u8 reg0;
50};
51
52
53#define dprintk(x...)
54
55static int verbose;
56
57#define XTAL 28920000UL
58#define PLL_M 8UL
59#define PLL_P 4UL
60#define PLL_N 1UL
61#define SYSCLK (XTAL*PLL_M/(PLL_N*PLL_P)) // -> 57840000
62
63static u8 tda10023_inittab[]={
64 // reg mask val
65 0x2a,0xff,0x02, // PLL3, Bypass, Power Down
66 0xff,0x64,0x00, // Sleep 100ms
67 0x2a,0xff,0x03, // PLL3, Bypass, Power Down
68 0xff,0x64,0x00, // Sleep 100ms
69 0x28,0xff,PLL_M-1, // PLL1 M=8
70 0x29,0xff,((PLL_P-1)<<6)|(PLL_N-1), // PLL2
71 0x00,0xff,0x23, // GPR FSAMPLING=1
72 0x2a,0xff,0x08, // PLL3 PSACLK=1
73 0xff,0x64,0x00, // Sleep 100ms
74 0x1f,0xff,0x00, // RESET
75 0xff,0x64,0x00, // Sleep 100ms
76 0xe6,0x0c,0x04, // RSCFG_IND
77 0x10,0xc0,0x80, // DECDVBCFG1 PBER=1
78
79 0x0e,0xff,0x82, // GAIN1
80 0x03,0x08,0x08, // CLKCONF DYN=1
81 0x2e,0xbf,0x30, // AGCCONF2 TRIAGC=0,POSAGC=ENAGCIF=1 PPWMTUN=0 PPWMIF=0
82 0x01,0xff,0x30, // AGCREF
83 0x1e,0x84,0x84, // CONTROL SACLK_ON=1
84 0x1b,0xff,0xc8, // ADC TWOS=1
85 0x3b,0xff,0xff, // IFMAX
86 0x3c,0xff,0x00, // IFMIN
87 0x34,0xff,0x00, // PWMREF
88 0x35,0xff,0xff, // TUNMAX
89 0x36,0xff,0x00, // TUNMIN
90 0x06,0xff,0x7f, // EQCONF1 POSI=7 ENADAPT=ENEQUAL=DFE=1 // 0x77
91 0x1c,0x30,0x30, // EQCONF2 STEPALGO=SGNALGO=1
92 0x37,0xff,0xf6, // DELTAF_LSB
93 0x38,0xff,0xff, // DELTAF_MSB
94 0x02,0xff,0x93, // AGCCONF1 IFS=1 KAGCIF=2 KAGCTUN=3
95 0x2d,0xff,0xf6, // SWEEP SWPOS=1 SWDYN=7 SWSTEP=1 SWLEN=2
96 0x04,0x10,0x00, // SWRAMP=1
97 0x12,0xff,0xa1, // INTP1 POCLKP=1 FEL=1 MFS=0
98 0x2b,0x01,0xa1, // INTS1
99 0x20,0xff,0x04, // INTP2 SWAPP=? MSBFIRSTP=? INTPSEL=?
100 0x2c,0xff,0x0d, // INTP/S TRIP=0 TRIS=0
101 0xc4,0xff,0x00,
102 0xc3,0x30,0x00,
103 0xb5,0xff,0x19, // ERAGC_THD
104 0x00,0x03,0x01, // GPR, CLBS soft reset
105 0x00,0x03,0x03, // GPR, CLBS soft reset
106 0xff,0x64,0x00, // Sleep 100ms
107 0xff,0xff,0xff
108};
109
110static u8 tda10023_readreg (struct tda10023_state* state, u8 reg)
111{
112 u8 b0 [] = { reg };
113 u8 b1 [] = { 0 };
114 struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
115 { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
116 int ret;
117
118 ret = i2c_transfer (state->i2c, msg, 2);
119 if (ret != 2)
120 printk("DVB: TDA10023: %s: readreg error (ret == %i)\n",
121 __FUNCTION__, ret);
122 return b1[0];
123}
124
125static int tda10023_writereg (struct tda10023_state* state, u8 reg, u8 data)
126{
127 u8 buf[] = { reg, data };
128 struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
129 int ret;
130
131 ret = i2c_transfer (state->i2c, &msg, 1);
132 if (ret != 1)
133 printk("DVB: TDA10023(%d): %s, writereg error "
134 "(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
135 state->frontend.dvb->num, __FUNCTION__, reg, data, ret);
136
137 return (ret != 1) ? -EREMOTEIO : 0;
138}
139
140
141static int tda10023_writebit (struct tda10023_state* state, u8 reg, u8 mask,u8 data)
142{
143 if (mask==0xff)
144 return tda10023_writereg(state, reg, data);
145 else {
146 u8 val;
147 val=tda10023_readreg(state,reg);
148 val&=~mask;
149 val|=(data&mask);
150 return tda10023_writereg(state, reg, val);
151 }
152}
153
154static void tda10023_writetab(struct tda10023_state* state, u8* tab)
155{
156 u8 r,m,v;
157 while (1) {
158 r=*tab++;
159 m=*tab++;
160 v=*tab++;
161 if (r==0xff) {
162 if (m==0xff)
163 break;
164 else
165 msleep(m);
166 }
167 else
168 tda10023_writebit(state,r,m,v);
169 }
170}
171
172//get access to tuner
173static int lock_tuner(struct tda10023_state* state)
174{
175 u8 buf[2] = { 0x0f, 0xc0 };
176 struct i2c_msg msg = {.addr=state->config->demod_address, .flags=0, .buf=buf, .len=2};
177
178 if(i2c_transfer(state->i2c, &msg, 1) != 1)
179 {
180 printk("tda10023: lock tuner fails\n");
181 return -EREMOTEIO;
182 }
183 return 0;
184}
185
186//release access from tuner
187static int unlock_tuner(struct tda10023_state* state)
188{
189 u8 buf[2] = { 0x0f, 0x40 };
190 struct i2c_msg msg_post={.addr=state->config->demod_address, .flags=0, .buf=buf, .len=2};
191
192 if(i2c_transfer(state->i2c, &msg_post, 1) != 1)
193 {
194 printk("tda10023: unlock tuner fails\n");
195 return -EREMOTEIO;
196 }
197 return 0;
198}
199
200static int tda10023_setup_reg0 (struct tda10023_state* state, u8 reg0)
201{
202 reg0 |= state->reg0 & 0x63;
203
204 tda10023_writereg (state, 0x00, reg0 & 0xfe);
205 tda10023_writereg (state, 0x00, reg0 | 0x01);
206
207 state->reg0 = reg0;
208 return 0;
209}
210
211static int tda10023_set_symbolrate (struct tda10023_state* state, u32 sr)
212{
213 s32 BDR;
214 s32 BDRI;
215 s16 SFIL=0;
216 u16 NDEC = 0;
217
218 if (sr > (SYSCLK/(2*4)))
219 sr=SYSCLK/(2*4);
220
221 if (sr<870000)
222 sr=870000;
223
224 if (sr < (u32)(SYSCLK/98.40)) {
225 NDEC=3;
226 SFIL=1;
227 } else if (sr<(u32)(SYSCLK/64.0)) {
228 NDEC=3;
229 SFIL=0;
230 } else if (sr<(u32)(SYSCLK/49.2)) {
231 NDEC=2;
232 SFIL=1;
233 } else if (sr<(u32)(SYSCLK/32.0)) {
234 NDEC=2;
235 SFIL=0;
236 } else if (sr<(u32)(SYSCLK/24.6)) {
237 NDEC=1;
238 SFIL=1;
239 } else if (sr<(u32)(SYSCLK/16.0)) {
240 NDEC=1;
241 SFIL=0;
242 } else if (sr<(u32)(SYSCLK/12.3)) {
243 NDEC=0;
244 SFIL=1;
245 }
246
247 BDRI=SYSCLK*16;
248 BDRI>>=NDEC;
249 BDRI +=sr/2;
250 BDRI /=sr;
251
252 if (BDRI>255)
253 BDRI=255;
254
255 {
256 u64 BDRX;
257
258 BDRX=1<<(24+NDEC);
259 BDRX*=sr;
260 do_div(BDRX,SYSCLK); // BDRX/=SYSCLK;
261
262 BDR=(s32)BDRX;
263 }
264// printk("Symbolrate %i, BDR %i BDRI %i, NDEC %i\n",sr,BDR,BDRI,NDEC);
265 tda10023_writebit (state, 0x03, 0xc0, NDEC<<6);
266 tda10023_writereg (state, 0x0a, BDR&255);
267 tda10023_writereg (state, 0x0b, (BDR>>8)&255);
268 tda10023_writereg (state, 0x0c, (BDR>>16)&31);
269 tda10023_writereg (state, 0x0d, BDRI);
270 tda10023_writereg (state, 0x3d, (SFIL<<7));
271 return 0;
272}
273
274static int tda10023_init (struct dvb_frontend *fe)
275{
276 struct tda10023_state* state = fe->demodulator_priv;
277
278 dprintk("DVB: TDA10023(%d): init chip\n", fe->adapter->num);
279
280 tda10023_writetab(state, tda10023_inittab);
281
282 return 0;
283}
284
285static int tda10023_set_parameters (struct dvb_frontend *fe,
286 struct dvb_frontend_parameters *p)
287{
288 struct tda10023_state* state = fe->demodulator_priv;
289
290 static int qamvals[6][6] = {
291 // QAM LOCKTHR MSETH AREF AGCREFNYQ ERAGCNYQ_THD
292 { (5<<2), 0x78, 0x8c, 0x96, 0x78, 0x4c }, // 4 QAM
293 { (0<<2), 0x87, 0xa2, 0x91, 0x8c, 0x57 }, // 16 QAM
294 { (1<<2), 0x64, 0x74, 0x96, 0x8c, 0x57 }, // 32 QAM
295 { (2<<2), 0x46, 0x43, 0x6a, 0x6a, 0x44 }, // 64 QAM
296 { (3<<2), 0x36, 0x34, 0x7e, 0x78, 0x4c }, // 128 QAM
297 { (4<<2), 0x26, 0x23, 0x6c, 0x5c, 0x3c }, // 256 QAM
298 };
299
300 int qam = p->u.qam.modulation;
301
302 if (qam < 0 || qam > 5)
303 return -EINVAL;
304
305 if (fe->ops.tuner_ops.set_params) {
306 fe->ops.tuner_ops.set_params(fe, p);
307 if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
308 }
309
310 tda10023_set_symbolrate (state, p->u.qam.symbol_rate);
311 tda10023_writereg (state, 0x05, qamvals[qam][1]);
312 tda10023_writereg (state, 0x08, qamvals[qam][2]);
313 tda10023_writereg (state, 0x09, qamvals[qam][3]);
314 tda10023_writereg (state, 0xb4, qamvals[qam][4]);
315 tda10023_writereg (state, 0xb6, qamvals[qam][5]);
316
317// tda10023_writereg (state, 0x04, (p->inversion?0x12:0x32));
318// tda10023_writebit (state, 0x04, 0x60, (p->inversion?0:0x20));
319 tda10023_writebit (state, 0x04, 0x40, 0x40);
320 tda10023_setup_reg0 (state, qamvals[qam][0]);
321
322 return 0;
323}
324
325static int tda10023_read_status(struct dvb_frontend* fe, fe_status_t* status)
326{
327 struct tda10023_state* state = fe->demodulator_priv;
328 int sync;
329
330 *status = 0;
331
332 //0x11[1] == CARLOCK -> Carrier locked
333 //0x11[2] == FSYNC -> Frame synchronisation
334 //0x11[3] == FEL -> Front End locked
335 //0x11[6] == NODVB -> DVB Mode Information
336 sync = tda10023_readreg (state, 0x11);
337
338 if (sync & 2)
339 *status |= FE_HAS_SIGNAL|FE_HAS_CARRIER;
340
341 if (sync & 4)
342 *status |= FE_HAS_SYNC|FE_HAS_VITERBI;
343
344 if (sync & 8)
345 *status |= FE_HAS_LOCK;
346
347 return 0;
348}
349
350static int tda10023_read_ber(struct dvb_frontend* fe, u32* ber)
351{
352 struct tda10023_state* state = fe->demodulator_priv;
353 u8 a,b,c;
354 a=tda10023_readreg(state, 0x14);
355 b=tda10023_readreg(state, 0x15);
356 c=tda10023_readreg(state, 0x16)&0xf;
357 tda10023_writebit (state, 0x10, 0xc0, 0x00);
358
359 *ber = a | (b<<8)| (c<<16);
360 return 0;
361}
362
363static int tda10023_read_signal_strength(struct dvb_frontend* fe, u16* strength)
364{
365 struct tda10023_state* state = fe->demodulator_priv;
366 u8 ifgain=tda10023_readreg(state, 0x2f);
367
368 u16 gain = ((255-tda10023_readreg(state, 0x17))) + (255-ifgain)/16;
369 // Max raw value is about 0xb0 -> Normalize to >0xf0 after 0x90
370 if (gain>0x90)
371 gain=gain+2*(gain-0x90);
372 if (gain>255)
373 gain=255;
374
375 *strength = (gain<<8)|gain;
376 return 0;
377}
378
379static int tda10023_read_snr(struct dvb_frontend* fe, u16* snr)
380{
381 struct tda10023_state* state = fe->demodulator_priv;
382
383 u8 quality = ~tda10023_readreg(state, 0x18);
384 *snr = (quality << 8) | quality;
385 return 0;
386}
387
388static int tda10023_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
389{
390 struct tda10023_state* state = fe->demodulator_priv;
391 u8 a,b,c,d;
392 a= tda10023_readreg (state, 0x74);
393 b= tda10023_readreg (state, 0x75);
394 c= tda10023_readreg (state, 0x76);
395 d= tda10023_readreg (state, 0x77);
396 *ucblocks = a | (b<<8)|(c<<16)|(d<<24);
397
398 tda10023_writebit (state, 0x10, 0x20,0x00);
399 tda10023_writebit (state, 0x10, 0x20,0x20);
400 tda10023_writebit (state, 0x13, 0x01, 0x00);
401
402 return 0;
403}
404
405static int tda10023_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
406{
407 struct tda10023_state* state = fe->demodulator_priv;
408 int sync,inv;
409 s8 afc = 0;
410
411 sync = tda10023_readreg(state, 0x11);
412 afc = tda10023_readreg(state, 0x19);
413 inv = tda10023_readreg(state, 0x04);
414
415 if (verbose) {
416 /* AFC only valid when carrier has been recovered */
417 printk(sync & 2 ? "DVB: TDA10023(%d): AFC (%d) %dHz\n" :
418 "DVB: TDA10023(%d): [AFC (%d) %dHz]\n",
419 state->frontend.dvb->num, afc,
420 -((s32)p->u.qam.symbol_rate * afc) >> 10);
421 }
422
423 p->inversion = (inv&0x20?0:1);
424 p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16;
425
426 p->u.qam.fec_inner = FEC_NONE;
427 p->frequency = ((p->frequency + 31250) / 62500) * 62500;
428
429 if (sync & 2)
430 p->frequency -= ((s32)p->u.qam.symbol_rate * afc) >> 10;
431
432 return 0;
433}
434
435static int tda10023_sleep(struct dvb_frontend* fe)
436{
437 struct tda10023_state* state = fe->demodulator_priv;
438
439 tda10023_writereg (state, 0x1b, 0x02); /* pdown ADC */
440 tda10023_writereg (state, 0x00, 0x80); /* standby */
441
442 return 0;
443}
444
445static int tda10023_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
446{
447 struct tda10023_state* state = fe->demodulator_priv;
448
449 if (enable) {
450 lock_tuner(state);
451 } else {
452 unlock_tuner(state);
453 }
454 return 0;
455}
456
457static void tda10023_release(struct dvb_frontend* fe)
458{
459 struct tda10023_state* state = fe->demodulator_priv;
460 kfree(state);
461}
462
463static struct dvb_frontend_ops tda10023_ops;
464
465struct dvb_frontend* tda10023_attach(const struct tda1002x_config* config,
466 struct i2c_adapter* i2c,
467 u8 pwm)
468{
469 struct tda10023_state* state = NULL;
470 int i;
471
472 /* allocate memory for the internal state */
473 state = kmalloc(sizeof(struct tda10023_state), GFP_KERNEL);
474 if (state == NULL) goto error;
475
476 /* setup the state */
477 state->config = config;
478 state->i2c = i2c;
479 memcpy(&state->frontend.ops, &tda10023_ops, sizeof(struct dvb_frontend_ops));
480 state->pwm = pwm;
481 for (i=0; i < sizeof(tda10023_inittab)/sizeof(*tda10023_inittab);i+=3) {
482 if (tda10023_inittab[i] == 0x00) {
483 state->reg0 = tda10023_inittab[i+2];
484 break;
485 }
486 }
487
488 // Wakeup if in standby
489 tda10023_writereg (state, 0x00, 0x33);
490 /* check if the demod is there */
491 if ((tda10023_readreg(state, 0x1a) & 0xf0) != 0x70) goto error;
492
493 /* create dvb_frontend */
494 memcpy(&state->frontend.ops, &tda10023_ops, sizeof(struct dvb_frontend_ops));
495 state->frontend.demodulator_priv = state;
496 return &state->frontend;
497
498error:
499 kfree(state);
500 return NULL;
501}
502
503static struct dvb_frontend_ops tda10023_ops = {
504
505 .info = {
506 .name = "Philips TDA10023 DVB-C",
507 .type = FE_QAM,
508 .frequency_stepsize = 62500,
509 .frequency_min = 51000000,
510 .frequency_max = 858000000,
511 .symbol_rate_min = (SYSCLK/2)/64, /* SACLK/64 == (SYSCLK/2)/64 */
512 .symbol_rate_max = (SYSCLK/2)/4, /* SACLK/4 */
513 .caps = 0x400 | //FE_CAN_QAM_4
514 FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
515 FE_CAN_QAM_128 | FE_CAN_QAM_256 |
516 FE_CAN_FEC_AUTO
517 },
518
519 .release = tda10023_release,
520
521 .init = tda10023_init,
522 .sleep = tda10023_sleep,
523 .i2c_gate_ctrl = tda10023_i2c_gate_ctrl,
524
525 .set_frontend = tda10023_set_parameters,
526 .get_frontend = tda10023_get_frontend,
527
528 .read_status = tda10023_read_status,
529 .read_ber = tda10023_read_ber,
530 .read_signal_strength = tda10023_read_signal_strength,
531 .read_snr = tda10023_read_snr,
532 .read_ucblocks = tda10023_read_ucblocks,
533};
534
535
536MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
537MODULE_AUTHOR("Georg Acher, Hartmut Birr");
538MODULE_LICENSE("GPL");
539
540EXPORT_SYMBOL(tda10023_attach);
diff --git a/drivers/media/dvb/frontends/tda10021.h b/drivers/media/dvb/frontends/tda1002x.h
index e3da780108f6..e9094d8123f6 100644
--- a/drivers/media/dvb/frontends/tda10021.h
+++ b/drivers/media/dvb/frontends/tda1002x.h
@@ -1,6 +1,6 @@
1/* 1/*
2 TDA10021 - Single Chip Cable Channel Receiver driver module 2 TDA10021/TDA10023 - Single Chip Cable Channel Receiver driver module
3 used on the the Siemens DVB-C cards 3 used on the the Siemens DVB-C cards
4 4
5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> 5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de> 6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
@@ -21,22 +21,23 @@
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/ 22*/
23 23
24#ifndef TDA10021_H 24#ifndef TDA1002x_H
25#define TDA10021_H 25#define TDA1002x_H
26 26
27#include <linux/dvb/frontend.h> 27#include <linux/dvb/frontend.h>
28 28
29struct tda10021_config 29struct tda1002x_config
30{ 30{
31 /* the demodulator's i2c address */ 31 /* the demodulator's i2c address */
32 u8 demod_address; 32 u8 demod_address;
33 u8 invert;
33}; 34};
34 35
35#if defined(CONFIG_DVB_TDA10021) || (defined(CONFIG_DVB_TDA10021_MODULE) && defined(MODULE)) 36#if defined(CONFIG_DVB_TDA10021) || (defined(CONFIG_DVB_TDA10021_MODULE) && defined(MODULE))
36extern struct dvb_frontend* tda10021_attach(const struct tda10021_config* config, 37extern struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
37 struct i2c_adapter* i2c, u8 pwm); 38 struct i2c_adapter* i2c, u8 pwm);
38#else 39#else
39static inline struct dvb_frontend* tda10021_attach(const struct tda10021_config* config, 40static inline struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
40 struct i2c_adapter* i2c, u8 pwm) 41 struct i2c_adapter* i2c, u8 pwm)
41{ 42{
42 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__); 43 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
@@ -44,12 +45,16 @@ static inline struct dvb_frontend* tda10021_attach(const struct tda10021_config*
44} 45}
45#endif // CONFIG_DVB_TDA10021 46#endif // CONFIG_DVB_TDA10021
46 47
47static inline int tda10021_writereg(struct dvb_frontend *fe, u8 reg, u8 val) { 48#if defined(CONFIG_DVB_TDA10023) || (defined(CONFIG_DVB_TDA10023_MODULE) && defined(MODULE))
48 int r = 0; 49extern struct dvb_frontend* tda10023_attach(const struct tda1002x_config* config,
49 u8 buf[] = {reg, val}; 50 struct i2c_adapter* i2c, u8 pwm);
50 if (fe->ops.write) 51#else
51 r = fe->ops.write(fe, buf, 2); 52static inline struct dvb_frontend* tda10023_attach(const struct tda1002x_config* config,
52 return r; 53 struct i2c_adapter* i2c, u8 pwm)
54{
55 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
56 return NULL;
53} 57}
58#endif // CONFIG_DVB_TDA10023
54 59
55#endif // TDA10021_H 60#endif // TDA1002x_H
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index f4a9cf9d26d0..33a84372c9e6 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -40,20 +40,6 @@
40#include "dvb_frontend.h" 40#include "dvb_frontend.h"
41#include "tda1004x.h" 41#include "tda1004x.h"
42 42
43enum tda1004x_demod {
44 TDA1004X_DEMOD_TDA10045,
45 TDA1004X_DEMOD_TDA10046,
46};
47
48struct tda1004x_state {
49 struct i2c_adapter* i2c;
50 const struct tda1004x_config* config;
51 struct dvb_frontend frontend;
52
53 /* private demod data */
54 enum tda1004x_demod demod_type;
55};
56
57static int debug; 43static int debug;
58#define dprintk(args...) \ 44#define dprintk(args...) \
59 do { \ 45 do { \
@@ -507,35 +493,51 @@ static int tda10046_fwupload(struct dvb_frontend* fe)
507 tda1004x_write_byteI(state, TDA1004X_CONFC4, 0x80); 493 tda1004x_write_byteI(state, TDA1004X_CONFC4, 0x80);
508 } 494 }
509 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0); 495 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0);
496 /* set GPIO 1 and 3 */
497 if (state->config->gpio_config != TDA10046_GPTRI) {
498 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE2, 0x33);
499 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f, state->config->gpio_config &0x0f);
500 }
510 /* let the clocks recover from sleep */ 501 /* let the clocks recover from sleep */
511 msleep(5); 502 msleep(10);
512 503
513 /* The PLLs need to be reprogrammed after sleep */ 504 /* The PLLs need to be reprogrammed after sleep */
514 tda10046_init_plls(fe); 505 tda10046_init_plls(fe);
506 tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0);
515 507
516 /* don't re-upload unless necessary */ 508 /* don't re-upload unless necessary */
517 if (tda1004x_check_upload_ok(state) == 0) 509 if (tda1004x_check_upload_ok(state) == 0)
518 return 0; 510 return 0;
519 511
512 printk(KERN_INFO "tda1004x: trying to boot from eeprom\n");
513 tda1004x_write_mask(state, TDA1004X_CONFC4, 4, 4);
514 msleep(300);
515 /* don't re-upload unless necessary */
516 if (tda1004x_check_upload_ok(state) == 0)
517 return 0;
518
520 if (state->config->request_firmware != NULL) { 519 if (state->config->request_firmware != NULL) {
521 /* request the firmware, this will block until someone uploads it */ 520 /* request the firmware, this will block until someone uploads it */
522 printk(KERN_INFO "tda1004x: waiting for firmware upload...\n"); 521 printk(KERN_INFO "tda1004x: waiting for firmware upload...\n");
523 ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE); 522 ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE);
524 if (ret) { 523 if (ret) {
525 printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n"); 524 /* remain compatible to old bug: try to load with tda10045 image name */
526 return ret; 525 ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE);
526 if (ret) {
527 printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n");
528 return ret;
529 } else {
530 printk(KERN_INFO "tda1004x: please rename the firmware file to %s\n",
531 TDA10046_DEFAULT_FIRMWARE);
532 }
527 } 533 }
528 tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST
529 ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN);
530 release_firmware(fw);
531 if (ret)
532 return ret;
533 } else { 534 } else {
534 /* boot from firmware eeprom */ 535 printk(KERN_ERR "tda1004x: no request function defined, can't upload from file\n");
535 printk(KERN_INFO "tda1004x: booting from eeprom\n"); 536 return -EIO;
536 tda1004x_write_mask(state, TDA1004X_CONFC4, 4, 4);
537 msleep(300);
538 } 537 }
538 tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST
539 ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN);
540 release_firmware(fw);
539 return tda1004x_check_upload_ok(state); 541 return tda1004x_check_upload_ok(state);
540} 542}
541 543
@@ -638,37 +640,33 @@ static int tda10046_init(struct dvb_frontend* fe)
638 switch (state->config->agc_config) { 640 switch (state->config->agc_config) {
639 case TDA10046_AGC_DEFAULT: 641 case TDA10046_AGC_DEFAULT:
640 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup 642 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup
641 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities 643 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
642 break; 644 break;
643 case TDA10046_AGC_IFO_AUTO_NEG: 645 case TDA10046_AGC_IFO_AUTO_NEG:
644 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup 646 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup
645 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities 647 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
646 break; 648 break;
647 case TDA10046_AGC_IFO_AUTO_POS: 649 case TDA10046_AGC_IFO_AUTO_POS:
648 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup 650 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup
649 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x00); // set AGC polarities 651 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x00); // set AGC polarities
650 break;
651 case TDA10046_AGC_TDA827X_GP11:
652 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
653 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
654 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
655 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities
656 break;
657 case TDA10046_AGC_TDA827X_GP00:
658 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
659 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
660 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
661 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities
662 break; 652 break;
663 case TDA10046_AGC_TDA827X_GP01: 653 case TDA10046_AGC_TDA827X:
664 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup 654 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
665 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold 655 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
666 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize 656 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
667 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x62); // set AGC polarities 657 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
668 break; 658 break;
669 } 659 }
660 if (state->config->ts_mode == 0) {
661 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x40);
662 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
663 } else {
664 tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x80);
665 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x10,
666 state->config->invert_oclk << 4);
667 }
670 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38); 668 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38);
671 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on 669 tda1004x_write_mask (state, TDA10046H_CONF_TRISTATE1, 0x3e, 0x38); // Turn IF AGC output on
672 tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // } 670 tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // }
673 tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values 671 tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values
674 tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // } 672 tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // }
@@ -678,7 +676,6 @@ static int tda10046_init(struct dvb_frontend* fe)
678 tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config 676 tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config
679 tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config 677 tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config
680 // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes 678 // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes
681 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
682 679
683 return 0; 680 return 0;
684} 681}
@@ -705,7 +702,8 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
705 // set frequency 702 // set frequency
706 if (fe->ops.tuner_ops.set_params) { 703 if (fe->ops.tuner_ops.set_params) {
707 fe->ops.tuner_ops.set_params(fe, fe_params); 704 fe->ops.tuner_ops.set_params(fe, fe_params);
708 if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); 705 if (fe->ops.i2c_gate_ctrl)
706 fe->ops.i2c_gate_ctrl(fe, 0);
709 } 707 }
710 708
711 // Hardcoded to use auto as much as possible on the TDA10045 as it 709 // Hardcoded to use auto as much as possible on the TDA10045 as it
@@ -1165,6 +1163,7 @@ static int tda1004x_read_ber(struct dvb_frontend* fe, u32* ber)
1165static int tda1004x_sleep(struct dvb_frontend* fe) 1163static int tda1004x_sleep(struct dvb_frontend* fe)
1166{ 1164{
1167 struct tda1004x_state* state = fe->demodulator_priv; 1165 struct tda1004x_state* state = fe->demodulator_priv;
1166 int gpio_conf;
1168 1167
1169 switch (state->demod_type) { 1168 switch (state->demod_type) {
1170 case TDA1004X_DEMOD_TDA10045: 1169 case TDA1004X_DEMOD_TDA10045:
@@ -1174,6 +1173,13 @@ static int tda1004x_sleep(struct dvb_frontend* fe)
1174 case TDA1004X_DEMOD_TDA10046: 1173 case TDA1004X_DEMOD_TDA10046:
1175 /* set outputs to tristate */ 1174 /* set outputs to tristate */
1176 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0xff); 1175 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0xff);
1176 /* invert GPIO 1 and 3 if desired*/
1177 gpio_conf = state->config->gpio_config;
1178 if (gpio_conf >= TDA10046_GP00_I)
1179 tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f,
1180 (gpio_conf & 0x0f) ^ 0x0a);
1181
1182 tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0xc0);
1177 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1); 1183 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1);
1178 break; 1184 break;
1179 } 1185 }
diff --git a/drivers/media/dvb/frontends/tda1004x.h b/drivers/media/dvb/frontends/tda1004x.h
index ec502d71b83c..abae84350142 100644
--- a/drivers/media/dvb/frontends/tda1004x.h
+++ b/drivers/media/dvb/frontends/tda1004x.h
@@ -35,9 +35,23 @@ enum tda10046_agc {
35 TDA10046_AGC_DEFAULT, /* original configuration */ 35 TDA10046_AGC_DEFAULT, /* original configuration */
36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */ 36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */
37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */ 37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */
38 TDA10046_AGC_TDA827X_GP11, /* IF AGC only, special setup for tda827x */ 38 TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */
39 TDA10046_AGC_TDA827X_GP00, /* same as above, but GPIOs 0 */ 39};
40 TDA10046_AGC_TDA827X_GP01, /* same as above, but GPIO3=0 GPIO1=1*/ 40
41/* Many (hybrid) boards use GPIO 1 and 3
42 GPIO1 analog - dvb switch
43 GPIO3 firmware eeprom address switch
44*/
45enum tda10046_gpio {
46 TDA10046_GPTRI = 0x00, /* All GPIOs tristate */
47 TDA10046_GP00 = 0x40, /* GPIO3=0, GPIO1=0 */
48 TDA10046_GP01 = 0x42, /* GPIO3=0, GPIO1=1 */
49 TDA10046_GP10 = 0x48, /* GPIO3=1, GPIO1=0 */
50 TDA10046_GP11 = 0x4a, /* GPIO3=1, GPIO1=1 */
51 TDA10046_GP00_I = 0x80, /* GPIO3=0, GPIO1=0, invert in sleep mode*/
52 TDA10046_GP01_I = 0x82, /* GPIO3=0, GPIO1=1, invert in sleep mode */
53 TDA10046_GP10_I = 0x88, /* GPIO3=1, GPIO1=0, invert in sleep mode */
54 TDA10046_GP11_I = 0x8a, /* GPIO3=1, GPIO1=1, invert in sleep mode */
41}; 55};
42 56
43enum tda10046_if { 57enum tda10046_if {
@@ -47,6 +61,11 @@ enum tda10046_if {
47 TDA10046_FREQ_052, /* low IF, 5.1667 MHZ for tda9889 */ 61 TDA10046_FREQ_052, /* low IF, 5.1667 MHZ for tda9889 */
48}; 62};
49 63
64enum tda10046_tsout {
65 TDA10046_TS_PARALLEL = 0x00, /* parallel transport stream, default */
66 TDA10046_TS_SERIAL = 0x01, /* serial transport stream */
67};
68
50struct tda1004x_config 69struct tda1004x_config
51{ 70{
52 /* the demodulator's i2c address */ 71 /* the demodulator's i2c address */
@@ -58,6 +77,9 @@ struct tda1004x_config
58 /* Does the OCLK signal need inverted? */ 77 /* Does the OCLK signal need inverted? */
59 u8 invert_oclk; 78 u8 invert_oclk;
60 79
80 /* parallel or serial transport stream */
81 enum tda10046_tsout ts_mode;
82
61 /* Xtal frequency, 4 or 16MHz*/ 83 /* Xtal frequency, 4 or 16MHz*/
62 enum tda10046_xtal xtal_freq; 84 enum tda10046_xtal xtal_freq;
63 85
@@ -67,11 +89,35 @@ struct tda1004x_config
67 /* AGC configuration */ 89 /* AGC configuration */
68 enum tda10046_agc agc_config; 90 enum tda10046_agc agc_config;
69 91
92 /* setting of GPIO1 and 3 */
93 enum tda10046_gpio gpio_config;
94
95 /* slave address and configuration of the tuner */
96 u8 tuner_address;
97 u8 tuner_config;
98 u8 antenna_switch;
99
100 /* if the board uses another I2c Bridge (tda8290), its address */
101 u8 i2c_gate;
102
70 /* request firmware for device */ 103 /* request firmware for device */
71 /* set this to NULL if the card has a firmware EEPROM */
72 int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name); 104 int (*request_firmware)(struct dvb_frontend* fe, const struct firmware **fw, char* name);
73}; 105};
74 106
107enum tda1004x_demod {
108 TDA1004X_DEMOD_TDA10045,
109 TDA1004X_DEMOD_TDA10046,
110};
111
112struct tda1004x_state {
113 struct i2c_adapter* i2c;
114 const struct tda1004x_config* config;
115 struct dvb_frontend frontend;
116
117 /* private demod data */
118 enum tda1004x_demod demod_type;
119};
120
75#if defined(CONFIG_DVB_TDA1004X) || (defined(CONFIG_DVB_TDA1004X_MODULE) && defined(MODULE)) 121#if defined(CONFIG_DVB_TDA1004X) || (defined(CONFIG_DVB_TDA1004X_MODULE) && defined(MODULE))
76extern struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config, 122extern struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
77 struct i2c_adapter* i2c); 123 struct i2c_adapter* i2c);
diff --git a/drivers/media/dvb/frontends/tda827x.c b/drivers/media/dvb/frontends/tda827x.c
new file mode 100644
index 000000000000..256fc4bf500b
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda827x.c
@@ -0,0 +1,512 @@
1/*
2 *
3 * (c) 2005 Hartmut Hackmann
4 * (c) 2007 Michael Krufky
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/dvb/frontend.h>
23#include <asm/types.h>
24
25#include "tda827x.h"
26
27static int debug = 0;
28#define dprintk(args...) \
29 do { \
30 if (debug) printk(KERN_DEBUG "tda827x: " args); \
31 } while (0)
32
33struct tda827x_priv {
34 int i2c_addr;
35 struct i2c_adapter *i2c_adap;
36 struct tda827x_config *cfg;
37 u32 frequency;
38 u32 bandwidth;
39};
40
41struct tda827x_data {
42 u32 lomax;
43 u8 spd;
44 u8 bs;
45 u8 bp;
46 u8 cp;
47 u8 gc3;
48 u8 div1p5;
49};
50
51static const struct tda827x_data tda827x_dvbt[] = {
52 { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
53 { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
54 { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
55 { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
56 { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
57 { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
58 { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
59 { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
60 { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
61 { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
62 { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
63 { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0},
64 { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
65 { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
66 { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
67 { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
68 { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
69 { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
70 { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
71 { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
72 { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
73 { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
74 { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
75 { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
76 { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
77 { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
78 { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
79 { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
80 { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0}
81};
82
83static int tda827xo_set_params(struct dvb_frontend *fe,
84 struct dvb_frontend_parameters *params)
85{
86 struct tda827x_priv *priv = fe->tuner_priv;
87 u8 buf[14];
88
89 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
90 .buf = buf, .len = sizeof(buf) };
91 int i, tuner_freq, if_freq;
92 u32 N;
93
94 dprintk("%s:\n", __FUNCTION__);
95 switch (params->u.ofdm.bandwidth) {
96 case BANDWIDTH_6_MHZ:
97 if_freq = 4000000;
98 break;
99 case BANDWIDTH_7_MHZ:
100 if_freq = 4500000;
101 break;
102 default: /* 8 MHz or Auto */
103 if_freq = 5000000;
104 break;
105 }
106 tuner_freq = params->frequency + if_freq;
107
108 i = 0;
109 while (tda827x_dvbt[i].lomax < tuner_freq) {
110 if(tda827x_dvbt[i + 1].lomax == 0)
111 break;
112 i++;
113 }
114
115 N = ((tuner_freq + 125000) / 250000) << (tda827x_dvbt[i].spd + 2);
116 buf[0] = 0;
117 buf[1] = (N>>8) | 0x40;
118 buf[2] = N & 0xff;
119 buf[3] = 0;
120 buf[4] = 0x52;
121 buf[5] = (tda827x_dvbt[i].spd << 6) + (tda827x_dvbt[i].div1p5 << 5) +
122 (tda827x_dvbt[i].bs << 3) + tda827x_dvbt[i].bp;
123 buf[6] = (tda827x_dvbt[i].gc3 << 4) + 0x8f;
124 buf[7] = 0xbf;
125 buf[8] = 0x2a;
126 buf[9] = 0x05;
127 buf[10] = 0xff;
128 buf[11] = 0x00;
129 buf[12] = 0x00;
130 buf[13] = 0x40;
131
132 msg.len = 14;
133 if (fe->ops.i2c_gate_ctrl)
134 fe->ops.i2c_gate_ctrl(fe, 1);
135 if (i2c_transfer(priv->i2c_adap, &msg, 1) != 1) {
136 printk("%s: could not write to tuner at addr: 0x%02x\n",
137 __FUNCTION__, priv->i2c_addr << 1);
138 return -EIO;
139 }
140 msleep(500);
141 /* correct CP value */
142 buf[0] = 0x30;
143 buf[1] = 0x50 + tda827x_dvbt[i].cp;
144 msg.len = 2;
145
146 if (fe->ops.i2c_gate_ctrl)
147 fe->ops.i2c_gate_ctrl(fe, 1);
148 i2c_transfer(priv->i2c_adap, &msg, 1);
149
150 priv->frequency = tuner_freq - if_freq; // FIXME
151 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
152
153 return 0;
154}
155
156static int tda827xo_sleep(struct dvb_frontend *fe)
157{
158 struct tda827x_priv *priv = fe->tuner_priv;
159 static u8 buf[] = { 0x30, 0xd0 };
160 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
161 .buf = buf, .len = sizeof(buf) };
162
163 dprintk("%s:\n", __FUNCTION__);
164 if (fe->ops.i2c_gate_ctrl)
165 fe->ops.i2c_gate_ctrl(fe, 1);
166 i2c_transfer(priv->i2c_adap, &msg, 1);
167
168 if (priv->cfg && priv->cfg->sleep)
169 priv->cfg->sleep(fe);
170
171 return 0;
172}
173
174/* ------------------------------------------------------------------ */
175
176struct tda827xa_data {
177 u32 lomax;
178 u8 svco;
179 u8 spd;
180 u8 scr;
181 u8 sbs;
182 u8 gc3;
183};
184
185static const struct tda827xa_data tda827xa_dvbt[] = {
186 { .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 1},
187 { .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
188 { .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
189 { .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
190 { .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1},
191 { .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
192 { .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
193 { .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
194 { .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
195 { .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
196 { .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
197 { .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
198 { .lomax = 290000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
199 { .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
200 { .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
201 { .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
202 { .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
203 { .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1},
204 { .lomax = 550000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
205 { .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
206 { .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
207 { .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
208 { .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
209 { .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
210 { .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
211 { .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0},
212 { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0}
213};
214
215static int tda827xa_set_params(struct dvb_frontend *fe,
216 struct dvb_frontend_parameters *params)
217{
218 struct tda827x_priv *priv = fe->tuner_priv;
219 u8 buf[11];
220
221 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
222 .buf = buf, .len = sizeof(buf) };
223
224 int i, tuner_freq, if_freq;
225 u32 N;
226
227 dprintk("%s:\n", __FUNCTION__);
228 if (priv->cfg && priv->cfg->lna_gain)
229 priv->cfg->lna_gain(fe, 1);
230 msleep(20);
231
232 switch (params->u.ofdm.bandwidth) {
233 case BANDWIDTH_6_MHZ:
234 if_freq = 4000000;
235 break;
236 case BANDWIDTH_7_MHZ:
237 if_freq = 4500000;
238 break;
239 default: /* 8 MHz or Auto */
240 if_freq = 5000000;
241 break;
242 }
243 tuner_freq = params->frequency + if_freq;
244
245 i = 0;
246 while (tda827xa_dvbt[i].lomax < tuner_freq) {
247 if(tda827xa_dvbt[i + 1].lomax == 0)
248 break;
249 i++;
250 }
251
252 N = ((tuner_freq + 31250) / 62500) << tda827xa_dvbt[i].spd;
253 buf[0] = 0; // subaddress
254 buf[1] = N >> 8;
255 buf[2] = N & 0xff;
256 buf[3] = 0;
257 buf[4] = 0x16;
258 buf[5] = (tda827xa_dvbt[i].spd << 5) + (tda827xa_dvbt[i].svco << 3) +
259 tda827xa_dvbt[i].sbs;
260 buf[6] = 0x4b + (tda827xa_dvbt[i].gc3 << 4);
261 buf[7] = 0x1c;
262 buf[8] = 0x06;
263 buf[9] = 0x24;
264 buf[10] = 0x00;
265 msg.len = 11;
266 if (fe->ops.i2c_gate_ctrl)
267 fe->ops.i2c_gate_ctrl(fe, 1);
268 if (i2c_transfer(priv->i2c_adap, &msg, 1) != 1) {
269 printk("%s: could not write to tuner at addr: 0x%02x\n",
270 __FUNCTION__, priv->i2c_addr << 1);
271 return -EIO;
272 }
273 buf[0] = 0x90;
274 buf[1] = 0xff;
275 buf[2] = 0x60;
276 buf[3] = 0x00;
277 buf[4] = 0x59; // lpsel, for 6MHz + 2
278 msg.len = 5;
279 if (fe->ops.i2c_gate_ctrl)
280 fe->ops.i2c_gate_ctrl(fe, 1);
281 i2c_transfer(priv->i2c_adap, &msg, 1);
282
283 buf[0] = 0xa0;
284 buf[1] = 0x40;
285 msg.len = 2;
286 if (fe->ops.i2c_gate_ctrl)
287 fe->ops.i2c_gate_ctrl(fe, 1);
288 i2c_transfer(priv->i2c_adap, &msg, 1);
289
290 msleep(11);
291 msg.flags = I2C_M_RD;
292 if (fe->ops.i2c_gate_ctrl)
293 fe->ops.i2c_gate_ctrl(fe, 1);
294 i2c_transfer(priv->i2c_adap, &msg, 1);
295 msg.flags = 0;
296
297 buf[1] >>= 4;
298 dprintk("tda8275a AGC2 gain is: %d\n", buf[1]);
299 if ((buf[1]) < 2) {
300 if (priv->cfg && priv->cfg->lna_gain)
301 priv->cfg->lna_gain(fe, 0);
302 buf[0] = 0x60;
303 buf[1] = 0x0c;
304 if (fe->ops.i2c_gate_ctrl)
305 fe->ops.i2c_gate_ctrl(fe, 1);
306 i2c_transfer(priv->i2c_adap, &msg, 1);
307 }
308
309 buf[0] = 0xc0;
310 buf[1] = 0x99; // lpsel, for 6MHz + 2
311 if (fe->ops.i2c_gate_ctrl)
312 fe->ops.i2c_gate_ctrl(fe, 1);
313 i2c_transfer(priv->i2c_adap, &msg, 1);
314
315 buf[0] = 0x60;
316 buf[1] = 0x3c;
317 if (fe->ops.i2c_gate_ctrl)
318 fe->ops.i2c_gate_ctrl(fe, 1);
319 i2c_transfer(priv->i2c_adap, &msg, 1);
320
321 /* correct CP value */
322 buf[0] = 0x30;
323 buf[1] = 0x10 + tda827xa_dvbt[i].scr;
324 if (fe->ops.i2c_gate_ctrl)
325 fe->ops.i2c_gate_ctrl(fe, 1);
326 i2c_transfer(priv->i2c_adap, &msg, 1);
327
328 msleep(163);
329 buf[0] = 0xc0;
330 buf[1] = 0x39; // lpsel, for 6MHz + 2
331 if (fe->ops.i2c_gate_ctrl)
332 fe->ops.i2c_gate_ctrl(fe, 1);
333 i2c_transfer(priv->i2c_adap, &msg, 1);
334
335 msleep(3);
336 /* freeze AGC1 */
337 buf[0] = 0x50;
338 buf[1] = 0x4f + (tda827xa_dvbt[i].gc3 << 4);
339 if (fe->ops.i2c_gate_ctrl)
340 fe->ops.i2c_gate_ctrl(fe, 1);
341 i2c_transfer(priv->i2c_adap, &msg, 1);
342
343 priv->frequency = tuner_freq - if_freq; // FIXME
344 priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
345
346 return 0;
347}
348
349static int tda827xa_sleep(struct dvb_frontend *fe)
350{
351 struct tda827x_priv *priv = fe->tuner_priv;
352 static u8 buf[] = { 0x30, 0x90 };
353 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0,
354 .buf = buf, .len = sizeof(buf) };
355
356 dprintk("%s:\n", __FUNCTION__);
357 if (fe->ops.i2c_gate_ctrl)
358 fe->ops.i2c_gate_ctrl(fe, 1);
359
360 i2c_transfer(priv->i2c_adap, &msg, 1);
361
362 if (fe->ops.i2c_gate_ctrl)
363 fe->ops.i2c_gate_ctrl(fe, 0);
364
365 if (priv->cfg && priv->cfg->sleep)
366 priv->cfg->sleep(fe);
367
368 return 0;
369}
370
371static int tda827x_release(struct dvb_frontend *fe)
372{
373 kfree(fe->tuner_priv);
374 fe->tuner_priv = NULL;
375 return 0;
376}
377
378static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
379{
380 struct tda827x_priv *priv = fe->tuner_priv;
381 *frequency = priv->frequency;
382 return 0;
383}
384
385static int tda827x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
386{
387 struct tda827x_priv *priv = fe->tuner_priv;
388 *bandwidth = priv->bandwidth;
389 return 0;
390}
391
392static int tda827x_init(struct dvb_frontend *fe)
393{
394 struct tda827x_priv *priv = fe->tuner_priv;
395 dprintk("%s:\n", __FUNCTION__);
396 if (priv->cfg && priv->cfg->init)
397 priv->cfg->init(fe);
398
399 return 0;
400}
401
402static int tda827x_probe_version(struct dvb_frontend *fe);
403
404static int tda827x_initial_init(struct dvb_frontend *fe)
405{
406 int ret;
407 ret = tda827x_probe_version(fe);
408 if (ret)
409 return ret;
410 return fe->ops.tuner_ops.init(fe);
411}
412
413static int tda827x_initial_sleep(struct dvb_frontend *fe)
414{
415 int ret;
416 ret = tda827x_probe_version(fe);
417 if (ret)
418 return ret;
419 return fe->ops.tuner_ops.sleep(fe);
420}
421
422static struct dvb_tuner_ops tda827xo_tuner_ops = {
423 .info = {
424 .name = "Philips TDA827X",
425 .frequency_min = 55000000,
426 .frequency_max = 860000000,
427 .frequency_step = 250000
428 },
429 .release = tda827x_release,
430 .init = tda827x_initial_init,
431 .sleep = tda827x_initial_sleep,
432 .set_params = tda827xo_set_params,
433 .get_frequency = tda827x_get_frequency,
434 .get_bandwidth = tda827x_get_bandwidth,
435};
436
437static struct dvb_tuner_ops tda827xa_tuner_ops = {
438 .info = {
439 .name = "Philips TDA827XA",
440 .frequency_min = 44000000,
441 .frequency_max = 906000000,
442 .frequency_step = 62500
443 },
444 .release = tda827x_release,
445 .init = tda827x_init,
446 .sleep = tda827xa_sleep,
447 .set_params = tda827xa_set_params,
448 .get_frequency = tda827x_get_frequency,
449 .get_bandwidth = tda827x_get_bandwidth,
450};
451
452static int tda827x_probe_version(struct dvb_frontend *fe)
453{ u8 data;
454 struct tda827x_priv *priv = fe->tuner_priv;
455 struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = I2C_M_RD,
456 .buf = &data, .len = 1 };
457 if (fe->ops.i2c_gate_ctrl)
458 fe->ops.i2c_gate_ctrl(fe, 1);
459 if (i2c_transfer(priv->i2c_adap, &msg, 1) != 1) {
460 printk("%s: could not read from tuner at addr: 0x%02x\n",
461 __FUNCTION__, msg.addr << 1);
462 return -EIO;
463 }
464 if ((data & 0x3c) == 0) {
465 dprintk("tda827x tuner found\n");
466 fe->ops.tuner_ops.init = tda827x_init;
467 fe->ops.tuner_ops.sleep = tda827xo_sleep;
468 } else {
469 dprintk("tda827xa tuner found\n");
470 memcpy(&fe->ops.tuner_ops, &tda827xa_tuner_ops, sizeof(struct dvb_tuner_ops));
471 }
472 return 0;
473}
474
475struct dvb_frontend *tda827x_attach(struct dvb_frontend *fe, int addr,
476 struct i2c_adapter *i2c,
477 struct tda827x_config *cfg)
478{
479 struct tda827x_priv *priv = NULL;
480
481 dprintk("%s:\n", __FUNCTION__);
482 priv = kzalloc(sizeof(struct tda827x_priv), GFP_KERNEL);
483 if (priv == NULL)
484 return NULL;
485
486 priv->i2c_addr = addr;
487 priv->i2c_adap = i2c;
488 priv->cfg = cfg;
489 memcpy(&fe->ops.tuner_ops, &tda827xo_tuner_ops, sizeof(struct dvb_tuner_ops));
490
491 fe->tuner_priv = priv;
492
493 return fe;
494}
495
496EXPORT_SYMBOL(tda827x_attach);
497
498module_param(debug, int, 0644);
499MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
500
501MODULE_DESCRIPTION("DVB TDA827x driver");
502MODULE_AUTHOR("Hartmut Hackmann <hartmut.hackmann@t-online.de>");
503MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
504MODULE_LICENSE("GPL");
505
506/*
507 * Overrides for Emacs so that we follow Linus's tabbing style.
508 * ---------------------------------------------------------------------------
509 * Local variables:
510 * c-basic-offset: 8
511 * End:
512 */
diff --git a/drivers/media/dvb/frontends/tda827x.h b/drivers/media/dvb/frontends/tda827x.h
new file mode 100644
index 000000000000..69e8263d6d59
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda827x.h
@@ -0,0 +1,62 @@
1 /*
2 DVB Driver for Philips tda827x / tda827xa Silicon tuners
3
4 (c) 2005 Hartmut Hackmann
5 (c) 2007 Michael Krufky
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21
22 */
23
24#ifndef __DVB_TDA827X_H__
25#define __DVB_TDA827X_H__
26
27#include <linux/i2c.h>
28#include "dvb_frontend.h"
29
30struct tda827x_config
31{
32 void (*lna_gain) (struct dvb_frontend *fe, int high);
33 int (*init) (struct dvb_frontend *fe);
34 int (*sleep) (struct dvb_frontend *fe);
35};
36
37
38/**
39 * Attach a tda827x tuner to the supplied frontend structure.
40 *
41 * @param fe Frontend to attach to.
42 * @param addr i2c address of the tuner.
43 * @param i2c i2c adapter to use.
44 * @param cfg optional callback function pointers.
45 * @return FE pointer on success, NULL on failure.
46 */
47#if defined(CONFIG_DVB_TDA827X) || (defined(CONFIG_DVB_TDA827X_MODULE) && defined(MODULE))
48extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
49 struct i2c_adapter *i2c,
50 struct tda827x_config *cfg);
51#else
52static inline struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe,
53 int addr,
54 struct i2c_adapter *i2c,
55 struct tda827x_config *cfg)
56{
57 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
58 return NULL;
59}
60#endif // CONFIG_DVB_TDA827X
61
62#endif // __DVB_TDA827X_H__
diff --git a/drivers/media/dvb/pluto2/Kconfig b/drivers/media/dvb/pluto2/Kconfig
index 9b84b1bdc313..7d8e6e87bdbb 100644
--- a/drivers/media/dvb/pluto2/Kconfig
+++ b/drivers/media/dvb/pluto2/Kconfig
@@ -2,7 +2,6 @@ config DVB_PLUTO2
2 tristate "Pluto2 cards" 2 tristate "Pluto2 cards"
3 depends on DVB_CORE && PCI && I2C 3 depends on DVB_CORE && PCI && I2C
4 select I2C_ALGOBIT 4 select I2C_ALGOBIT
5 select DVB_PLL
6 select DVB_TDA1004X 5 select DVB_TDA1004X
7 help 6 help
8 Support for PCI cards based on the Pluto2 FPGA like the Satelco 7 Support for PCI cards based on the Pluto2 FPGA like the Satelco
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index eec7ccf41f8b..7751628e1415 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -3,7 +3,6 @@ config DVB_AV7110
3 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 3 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
4 select FW_LOADER if !DVB_AV7110_FIRMWARE 4 select FW_LOADER if !DVB_AV7110_FIRMWARE
5 select VIDEO_SAA7146_VV 5 select VIDEO_SAA7146_VV
6 select DVB_PLL
7 select DVB_VES1820 if !DVB_FE_CUSTOMISE 6 select DVB_VES1820 if !DVB_FE_CUSTOMISE
8 select DVB_VES1X93 if !DVB_FE_CUSTOMISE 7 select DVB_VES1X93 if !DVB_FE_CUSTOMISE
9 select DVB_STV0299 if !DVB_FE_CUSTOMISE 8 select DVB_STV0299 if !DVB_FE_CUSTOMISE
@@ -62,13 +61,13 @@ config DVB_BUDGET
62 tristate "Budget cards" 61 tristate "Budget cards"
63 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 62 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
64 select VIDEO_SAA7146 63 select VIDEO_SAA7146
65 select DVB_PLL
66 select DVB_STV0299 if !DVB_FE_CUSTOMISE 64 select DVB_STV0299 if !DVB_FE_CUSTOMISE
67 select DVB_VES1X93 if !DVB_FE_CUSTOMISE 65 select DVB_VES1X93 if !DVB_FE_CUSTOMISE
68 select DVB_VES1820 if !DVB_FE_CUSTOMISE 66 select DVB_VES1820 if !DVB_FE_CUSTOMISE
69 select DVB_L64781 if !DVB_FE_CUSTOMISE 67 select DVB_L64781 if !DVB_FE_CUSTOMISE
70 select DVB_TDA8083 if !DVB_FE_CUSTOMISE 68 select DVB_TDA8083 if !DVB_FE_CUSTOMISE
71 select DVB_TDA10021 if !DVB_FE_CUSTOMISE 69 select DVB_TDA10021 if !DVB_FE_CUSTOMISE
70 select DVB_TDA10023 if !DVB_FE_CUSTOMISE
72 select DVB_S5H1420 if !DVB_FE_CUSTOMISE 71 select DVB_S5H1420 if !DVB_FE_CUSTOMISE
73 select DVB_TDA10086 if !DVB_FE_CUSTOMISE 72 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
74 select DVB_TDA826X if !DVB_FE_CUSTOMISE 73 select DVB_TDA826X if !DVB_FE_CUSTOMISE
@@ -87,7 +86,6 @@ config DVB_BUDGET_CI
87 tristate "Budget cards with onboard CI connector" 86 tristate "Budget cards with onboard CI connector"
88 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1 87 depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
89 select VIDEO_SAA7146 88 select VIDEO_SAA7146
90 select DVB_PLL
91 select DVB_STV0297 if !DVB_FE_CUSTOMISE 89 select DVB_STV0297 if !DVB_FE_CUSTOMISE
92 select DVB_STV0299 if !DVB_FE_CUSTOMISE 90 select DVB_STV0299 if !DVB_FE_CUSTOMISE
93 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 91 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
@@ -114,6 +112,7 @@ config DVB_BUDGET_AV
114 select DVB_STV0299 if !DVB_FE_CUSTOMISE 112 select DVB_STV0299 if !DVB_FE_CUSTOMISE
115 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 113 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
116 select DVB_TDA10021 if !DVB_FE_CUSTOMISE 114 select DVB_TDA10021 if !DVB_FE_CUSTOMISE
115 select DVB_TDA10023 if !DVB_FE_CUSTOMISE
117 select DVB_TUA6100 if !DVB_FE_CUSTOMISE 116 select DVB_TUA6100 if !DVB_FE_CUSTOMISE
118 select FW_LOADER 117 select FW_LOADER
119 help 118 help
@@ -130,7 +129,6 @@ config DVB_BUDGET_PATCH
130 tristate "AV7110 cards with Budget Patch" 129 tristate "AV7110 cards with Budget Patch"
131 depends on DVB_CORE && DVB_BUDGET && VIDEO_V4L1 130 depends on DVB_CORE && DVB_BUDGET && VIDEO_V4L1
132 select DVB_AV7110 131 select DVB_AV7110
133 select DVB_PLL
134 select DVB_STV0299 if !DVB_FE_CUSTOMISE 132 select DVB_STV0299 if !DVB_FE_CUSTOMISE
135 select DVB_VES1X93 if !DVB_FE_CUSTOMISE 133 select DVB_VES1X93 if !DVB_FE_CUSTOMISE
136 select DVB_TDA8083 if !DVB_FE_CUSTOMISE 134 select DVB_TDA8083 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 29ed532ba966..67becdd4db60 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -219,7 +219,10 @@ static void recover_arm(struct av7110 *av7110)
219 av7110->recover(av7110); 219 av7110->recover(av7110);
220 220
221 restart_feeds(av7110); 221 restart_feeds(av7110);
222 av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1, av7110->ir_config); 222
223#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
224 av7110_check_ir_config(av7110, true);
225#endif
223} 226}
224 227
225static void av7110_arm_sync(struct av7110 *av7110) 228static void av7110_arm_sync(struct av7110 *av7110)
@@ -250,6 +253,10 @@ static int arm_thread(void *data)
250 if (!av7110->arm_ready) 253 if (!av7110->arm_ready)
251 continue; 254 continue;
252 255
256#if defined(CONFIG_INPUT_EVDEV) || defined(CONFIG_INPUT_EVDEV_MODULE)
257 av7110_check_ir_config(av7110, false);
258#endif
259
253 if (mutex_lock_interruptible(&av7110->dcomlock)) 260 if (mutex_lock_interruptible(&av7110->dcomlock))
254 break; 261 break;
255 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2); 262 newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
@@ -667,8 +674,8 @@ static void gpioirq(unsigned long data)
667 return; 674 return;
668 675
669 case DATA_IRCOMMAND: 676 case DATA_IRCOMMAND:
670 if (av7110->ir_handler) 677 if (av7110->ir.ir_handler)
671 av7110->ir_handler(av7110, 678 av7110->ir.ir_handler(av7110,
672 swahw32(irdebi(av7110, DEBINOSWAP, Reserved, 0, 4))); 679 swahw32(irdebi(av7110, DEBINOSWAP, Reserved, 0, 4)));
673 iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2); 680 iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
674 break; 681 break;
@@ -1907,8 +1914,10 @@ static int av7110_fe_lock_fix(struct av7110* av7110, fe_status_t status)
1907 if (av7110->fe_synced == synced) 1914 if (av7110->fe_synced == synced)
1908 return 0; 1915 return 0;
1909 1916
1910 if (av7110->playing) 1917 if (av7110->playing) {
1918 av7110->fe_synced = synced;
1911 return 0; 1919 return 0;
1920 }
1912 1921
1913 if (mutex_lock_interruptible(&av7110->pid_mutex)) 1922 if (mutex_lock_interruptible(&av7110->pid_mutex))
1914 return -ERESTARTSYS; 1923 return -ERESTARTSYS;
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index b98bd453cade..115002b0390c 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -5,6 +5,7 @@
5#include <linux/socket.h> 5#include <linux/socket.h>
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <linux/i2c.h> 7#include <linux/i2c.h>
8#include <linux/input.h>
8 9
9#include <linux/dvb/video.h> 10#include <linux/dvb/video.h>
10#include <linux/dvb/audio.h> 11#include <linux/dvb/audio.h>
@@ -66,6 +67,27 @@ struct dvb_video_events {
66}; 67};
67 68
68 69
70struct av7110;
71
72/* infrared remote control */
73struct infrared {
74 u16 key_map[256];
75 struct input_dev *input_dev;
76 char input_phys[32];
77 struct timer_list keyup_timer;
78 struct tasklet_struct ir_tasklet;
79 void (*ir_handler)(struct av7110 *av7110, u32 ircom);
80 u32 ir_command;
81 u32 ir_config;
82 u32 device_mask;
83 u8 protocol;
84 u8 inversion;
85 u16 last_key;
86 u16 last_toggle;
87 u8 delay_timer_finished;
88};
89
90
69/* place to store all the necessary device information */ 91/* place to store all the necessary device information */
70struct av7110 { 92struct av7110 {
71 93
@@ -227,10 +249,7 @@ struct av7110 {
227 u16 wssMode; 249 u16 wssMode;
228 u16 wssData; 250 u16 wssData;
229 251
230 u32 ir_config; 252 struct infrared ir;
231 u32 ir_command;
232 void (*ir_handler)(struct av7110 *av7110, u32 ircom);
233 struct tasklet_struct ir_tasklet;
234 253
235 /* firmware stuff */ 254 /* firmware stuff */
236 unsigned char *bin_fw; 255 unsigned char *bin_fw;
@@ -268,6 +287,7 @@ struct av7110 {
268extern int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid, 287extern int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
269 u16 subpid, u16 pcrpid); 288 u16 subpid, u16 pcrpid);
270 289
290extern int av7110_check_ir_config(struct av7110 *av7110, int force);
271extern int av7110_ir_init(struct av7110 *av7110); 291extern int av7110_ir_init(struct av7110 *av7110);
272extern void av7110_ir_exit(struct av7110 *av7110); 292extern void av7110_ir_exit(struct av7110 *av7110);
273 293
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index e719af807685..654c9e919e04 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1009,7 +1009,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1009 if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) 1009 if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY)
1010 ret = av7110_av_stop(av7110, RP_VIDEO); 1010 ret = av7110_av_stop(av7110, RP_VIDEO);
1011 else 1011 else
1012 ret = vidcom(av7110, VIDEO_CMD_STOP, 1012 ret = vidcom(av7110, AV_VIDEO_CMD_STOP,
1013 av7110->videostate.video_blank ? 0 : 1); 1013 av7110->videostate.video_blank ? 0 : 1);
1014 if (!ret) 1014 if (!ret)
1015 av7110->trickmode = TRICK_NONE; 1015 av7110->trickmode = TRICK_NONE;
@@ -1019,7 +1019,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1019 av7110->trickmode = TRICK_NONE; 1019 av7110->trickmode = TRICK_NONE;
1020 if (av7110->videostate.play_state == VIDEO_FREEZED) { 1020 if (av7110->videostate.play_state == VIDEO_FREEZED) {
1021 av7110->videostate.play_state = VIDEO_PLAYING; 1021 av7110->videostate.play_state = VIDEO_PLAYING;
1022 ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); 1022 ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
1023 if (ret) 1023 if (ret)
1024 break; 1024 break;
1025 } 1025 }
@@ -1034,7 +1034,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1034 ret = av7110_av_start_play(av7110, RP_VIDEO); 1034 ret = av7110_av_start_play(av7110, RP_VIDEO);
1035 } 1035 }
1036 if (!ret) 1036 if (!ret)
1037 ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); 1037 ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
1038 if (!ret) 1038 if (!ret)
1039 av7110->videostate.play_state = VIDEO_PLAYING; 1039 av7110->videostate.play_state = VIDEO_PLAYING;
1040 break; 1040 break;
@@ -1044,7 +1044,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1044 if (av7110->playing & RP_VIDEO) 1044 if (av7110->playing & RP_VIDEO)
1045 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0); 1045 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0);
1046 else 1046 else
1047 ret = vidcom(av7110, VIDEO_CMD_FREEZE, 1); 1047 ret = vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
1048 if (!ret) 1048 if (!ret)
1049 av7110->trickmode = TRICK_FREEZE; 1049 av7110->trickmode = TRICK_FREEZE;
1050 break; 1050 break;
@@ -1053,7 +1053,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1053 if (av7110->playing & RP_VIDEO) 1053 if (av7110->playing & RP_VIDEO)
1054 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0); 1054 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0);
1055 if (!ret) 1055 if (!ret)
1056 ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); 1056 ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
1057 if (!ret) { 1057 if (!ret) {
1058 av7110->videostate.play_state = VIDEO_PLAYING; 1058 av7110->videostate.play_state = VIDEO_PLAYING;
1059 av7110->trickmode = TRICK_NONE; 1059 av7110->trickmode = TRICK_NONE;
@@ -1136,7 +1136,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1136 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, 1136 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
1137 __Scan_I, 2, AV_PES, 0); 1137 __Scan_I, 2, AV_PES, 0);
1138 else 1138 else
1139 ret = vidcom(av7110, VIDEO_CMD_FFWD, arg); 1139 ret = vidcom(av7110, AV_VIDEO_CMD_FFWD, arg);
1140 if (!ret) { 1140 if (!ret) {
1141 av7110->trickmode = TRICK_FAST; 1141 av7110->trickmode = TRICK_FAST;
1142 av7110->videostate.play_state = VIDEO_PLAYING; 1142 av7110->videostate.play_state = VIDEO_PLAYING;
@@ -1147,13 +1147,13 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1147 if (av7110->playing&RP_VIDEO) { 1147 if (av7110->playing&RP_VIDEO) {
1148 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); 1148 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
1149 if (!ret) 1149 if (!ret)
1150 ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); 1150 ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
1151 } else { 1151 } else {
1152 ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); 1152 ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
1153 if (!ret) 1153 if (!ret)
1154 ret = vidcom(av7110, VIDEO_CMD_STOP, 0); 1154 ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 0);
1155 if (!ret) 1155 if (!ret)
1156 ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); 1156 ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
1157 } 1157 }
1158 if (!ret) { 1158 if (!ret) {
1159 av7110->trickmode = TRICK_SLOW; 1159 av7110->trickmode = TRICK_SLOW;
@@ -1182,10 +1182,10 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1182 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, 1182 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
1183 __Slow, 2, 0, 0); 1183 __Slow, 2, 0, 0);
1184 if (!ret) 1184 if (!ret)
1185 ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); 1185 ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
1186 } 1186 }
1187 if (av7110->trickmode == TRICK_FREEZE) 1187 if (av7110->trickmode == TRICK_FREEZE)
1188 ret = vidcom(av7110, VIDEO_CMD_STOP, 1); 1188 ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 1);
1189 } 1189 }
1190 break; 1190 break;
1191 1191
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h
index 4e173c67fbb2..673d9b3f064c 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.h
+++ b/drivers/media/dvb/ttpci/av7110_hw.h
@@ -216,11 +216,11 @@ enum av7110_command_type {
216#define VID_CENTRE_CUT_PREF 0x05 /* PanScan with zero vector */ 216#define VID_CENTRE_CUT_PREF 0x05 /* PanScan with zero vector */
217 217
218/* MPEG video decoder commands */ 218/* MPEG video decoder commands */
219#define VIDEO_CMD_STOP 0x000e 219#define AV_VIDEO_CMD_STOP 0x000e
220#define VIDEO_CMD_PLAY 0x000d 220#define AV_VIDEO_CMD_PLAY 0x000d
221#define VIDEO_CMD_FREEZE 0x0102 221#define AV_VIDEO_CMD_FREEZE 0x0102
222#define VIDEO_CMD_FFWD 0x0016 222#define AV_VIDEO_CMD_FFWD 0x0016
223#define VIDEO_CMD_SLOW 0x0022 223#define AV_VIDEO_CMD_SLOW 0x0022
224 224
225/* MPEG audio decoder commands */ 225/* MPEG audio decoder commands */
226#define AUDIO_CMD_MUTE 0x0001 226#define AUDIO_CMD_MUTE 0x0001
diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c
index f59465bb0af3..a97f166bb523 100644
--- a/drivers/media/dvb/ttpci/av7110_ir.c
+++ b/drivers/media/dvb/ttpci/av7110_ir.c
@@ -1,8 +1,31 @@
1/*
2 * Driver for the remote control of SAA7146 based AV7110 cards
3 *
4 * Copyright (C) 1999-2003 Holger Waechtler <holger@convergence.de>
5 * Copyright (C) 2003-2007 Oliver Endriss <o.endriss@gmx.de>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
21 *
22 */
23
24
1#include <linux/types.h> 25#include <linux/types.h>
2#include <linux/init.h> 26#include <linux/init.h>
3#include <linux/module.h> 27#include <linux/module.h>
4#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
5#include <linux/input.h>
6#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
7#include <linux/kernel.h> 30#include <linux/kernel.h>
8#include <asm/bitops.h> 31#include <asm/bitops.h>
@@ -10,18 +33,37 @@
10#include "av7110.h" 33#include "av7110.h"
11#include "av7110_hw.h" 34#include "av7110_hw.h"
12 35
13#define UP_TIMEOUT (HZ*7/25)
14 36
15/* enable ir debugging by or'ing debug with 16 */ 37#define AV_CNT 4
38
39#define IR_RC5 0
40#define IR_RCMM 1
41#define IR_RC5_EXT 2 /* internal only */
42
43#define IR_ALL 0xffffffff
44
45#define UP_TIMEOUT (HZ*7/25)
16 46
17static int av_cnt;
18static struct av7110 *av_list[4];
19static struct input_dev *input_dev;
20static char input_phys[32];
21 47
22static u8 delay_timer_finished; 48/* Note: enable ir debugging by or'ing debug with 16 */
49
50static int ir_protocol[AV_CNT] = { IR_RCMM, IR_RCMM, IR_RCMM, IR_RCMM};
51module_param_array(ir_protocol, int, NULL, 0644);
52MODULE_PARM_DESC(ir_protocol, "Infrared protocol: 0 RC5, 1 RCMM (default)");
53
54static int ir_inversion[AV_CNT];
55module_param_array(ir_inversion, int, NULL, 0644);
56MODULE_PARM_DESC(ir_inversion, "Inversion of infrared signal: 0 not inverted (default), 1 inverted");
57
58static uint ir_device_mask[AV_CNT] = { IR_ALL, IR_ALL, IR_ALL, IR_ALL };
59module_param_array(ir_device_mask, uint, NULL, 0644);
60MODULE_PARM_DESC(ir_device_mask, "Bitmask of infrared devices: bit 0..31 = device 0..31 (default: all)");
61
62
63static int av_cnt;
64static struct av7110 *av_list[AV_CNT];
23 65
24static u16 key_map [256] = { 66static u16 default_key_map [256] = {
25 KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, 67 KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7,
26 KEY_8, KEY_9, KEY_BACK, 0, KEY_POWER, KEY_MUTE, 0, KEY_INFO, 68 KEY_8, KEY_9, KEY_BACK, 0, KEY_POWER, KEY_MUTE, 0, KEY_INFO,
27 KEY_VOLUMEUP, KEY_VOLUMEDOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69 KEY_VOLUMEUP, KEY_VOLUMEDOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -45,141 +87,194 @@ static u16 key_map [256] = {
45}; 87};
46 88
47 89
48static void av7110_emit_keyup(unsigned long data) 90/* key-up timer */
91static void av7110_emit_keyup(unsigned long parm)
49{ 92{
50 if (!data || !test_bit(data, input_dev->key)) 93 struct infrared *ir = (struct infrared *) parm;
94
95 if (!ir || !test_bit(ir->last_key, ir->input_dev->key))
51 return; 96 return;
52 97
53 input_report_key(input_dev, data, 0); 98 input_report_key(ir->input_dev, ir->last_key, 0);
54 input_sync(input_dev); 99 input_sync(ir->input_dev);
55} 100}
56 101
57 102
58static struct timer_list keyup_timer = { .function = av7110_emit_keyup }; 103/* tasklet */
59
60
61static void av7110_emit_key(unsigned long parm) 104static void av7110_emit_key(unsigned long parm)
62{ 105{
63 struct av7110 *av7110 = (struct av7110 *) parm; 106 struct infrared *ir = (struct infrared *) parm;
64 u32 ir_config = av7110->ir_config; 107 u32 ircom = ir->ir_command;
65 u32 ircom = av7110->ir_command;
66 u8 data; 108 u8 data;
67 u8 addr; 109 u8 addr;
68 static u16 old_toggle = 0; 110 u16 toggle;
69 u16 new_toggle;
70 u16 keycode; 111 u16 keycode;
71 112
72 /* extract device address and data */ 113 /* extract device address and data */
73 switch (ir_config & 0x0003) { 114 switch (ir->protocol) {
74 case 0: /* RC5: 5 bits device address, 6 bits data */ 115 case IR_RC5: /* RC5: 5 bits device address, 6 bits data */
75 data = ircom & 0x3f; 116 data = ircom & 0x3f;
76 addr = (ircom >> 6) & 0x1f; 117 addr = (ircom >> 6) & 0x1f;
118 toggle = ircom & 0x0800;
77 break; 119 break;
78 120
79 case 1: /* RCMM: 8(?) bits device address, 8(?) bits data */ 121 case IR_RCMM: /* RCMM: ? bits device address, ? bits data */
80 data = ircom & 0xff; 122 data = ircom & 0xff;
81 addr = (ircom >> 8) & 0xff; 123 addr = (ircom >> 8) & 0x1f;
124 toggle = ircom & 0x8000;
82 break; 125 break;
83 126
84 case 2: /* extended RC5: 5 bits device address, 7 bits data */ 127 case IR_RC5_EXT: /* extended RC5: 5 bits device address, 7 bits data */
85 data = ircom & 0x3f; 128 data = ircom & 0x3f;
86 addr = (ircom >> 6) & 0x1f; 129 addr = (ircom >> 6) & 0x1f;
87 /* invert 7th data bit for backward compatibility with RC5 keymaps */ 130 /* invert 7th data bit for backward compatibility with RC5 keymaps */
88 if (!(ircom & 0x1000)) 131 if (!(ircom & 0x1000))
89 data |= 0x40; 132 data |= 0x40;
133 toggle = ircom & 0x0800;
90 break; 134 break;
91 135
92 default: 136 default:
93 printk("invalid ir_config %x\n", ir_config); 137 printk("%s invalid protocol %x\n", __FUNCTION__, ir->protocol);
94 return; 138 return;
95 } 139 }
96 140
97 keycode = key_map[data]; 141 input_event(ir->input_dev, EV_MSC, MSC_RAW, (addr << 16) | data);
142 input_event(ir->input_dev, EV_MSC, MSC_SCAN, data);
98 143
99 dprintk(16, "code %08x -> addr %i data 0x%02x -> keycode %i\n", 144 keycode = ir->key_map[data];
100 ircom, addr, data, keycode);
101 145
102 /* check device address (if selected) */ 146 dprintk(16, "%s: code %08x -> addr %i data 0x%02x -> keycode %i\n",
103 if (ir_config & 0x4000) 147 __FUNCTION__, ircom, addr, data, keycode);
104 if (addr != ((ir_config >> 16) & 0xff)) 148
105 return; 149 /* check device address */
150 if (!(ir->device_mask & (1 << addr)))
151 return;
106 152
107 if (!keycode) { 153 if (!keycode) {
108 printk ("%s: unknown key 0x%02x!!\n", __FUNCTION__, data); 154 printk ("%s: code %08x -> addr %i data 0x%02x -> unknown key!\n",
155 __FUNCTION__, ircom, addr, data);
109 return; 156 return;
110 } 157 }
111 158
112 if ((ir_config & 0x0003) == 1) 159 if (timer_pending(&ir->keyup_timer)) {
113 new_toggle = 0; /* RCMM */ 160 del_timer(&ir->keyup_timer);
114 else 161 if (ir->last_key != keycode || toggle != ir->last_toggle) {
115 new_toggle = (ircom & 0x800); /* RC5, extended RC5 */ 162 ir->delay_timer_finished = 0;
116 163 input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
117 if (timer_pending(&keyup_timer)) { 164 input_event(ir->input_dev, EV_KEY, keycode, 1);
118 del_timer(&keyup_timer); 165 input_sync(ir->input_dev);
119 if (keyup_timer.data != keycode || new_toggle != old_toggle) { 166 } else if (ir->delay_timer_finished) {
120 delay_timer_finished = 0; 167 input_event(ir->input_dev, EV_KEY, keycode, 2);
121 input_event(input_dev, EV_KEY, keyup_timer.data, 0); 168 input_sync(ir->input_dev);
122 input_event(input_dev, EV_KEY, keycode, 1);
123 input_sync(input_dev);
124 } else if (delay_timer_finished) {
125 input_event(input_dev, EV_KEY, keycode, 2);
126 input_sync(input_dev);
127 } 169 }
128 } else { 170 } else {
129 delay_timer_finished = 0; 171 ir->delay_timer_finished = 0;
130 input_event(input_dev, EV_KEY, keycode, 1); 172 input_event(ir->input_dev, EV_KEY, keycode, 1);
131 input_sync(input_dev); 173 input_sync(ir->input_dev);
132 } 174 }
133 175
134 keyup_timer.expires = jiffies + UP_TIMEOUT; 176 ir->last_key = keycode;
135 keyup_timer.data = keycode; 177 ir->last_toggle = toggle;
136 178
137 add_timer(&keyup_timer); 179 ir->keyup_timer.expires = jiffies + UP_TIMEOUT;
180 add_timer(&ir->keyup_timer);
138 181
139 old_toggle = new_toggle;
140} 182}
141 183
142static void input_register_keys(void) 184
185/* register with input layer */
186static void input_register_keys(struct infrared *ir)
143{ 187{
144 int i; 188 int i;
145 189
146 memset(input_dev->keybit, 0, sizeof(input_dev->keybit)); 190 set_bit(EV_KEY, ir->input_dev->evbit);
191 set_bit(EV_REP, ir->input_dev->evbit);
192 set_bit(EV_MSC, ir->input_dev->evbit);
147 193
148 for (i = 0; i < ARRAY_SIZE(key_map); i++) { 194 set_bit(MSC_RAW, ir->input_dev->mscbit);
149 if (key_map[i] > KEY_MAX) 195 set_bit(MSC_SCAN, ir->input_dev->mscbit);
150 key_map[i] = 0; 196
151 else if (key_map[i] > KEY_RESERVED) 197 memset(ir->input_dev->keybit, 0, sizeof(ir->input_dev->keybit));
152 set_bit(key_map[i], input_dev->keybit); 198
199 for (i = 0; i < ARRAY_SIZE(ir->key_map); i++) {
200 if (ir->key_map[i] > KEY_MAX)
201 ir->key_map[i] = 0;
202 else if (ir->key_map[i] > KEY_RESERVED)
203 set_bit(ir->key_map[i], ir->input_dev->keybit);
153 } 204 }
205
206 ir->input_dev->keycode = ir->key_map;
207 ir->input_dev->keycodesize = sizeof(ir->key_map[0]);
208 ir->input_dev->keycodemax = ARRAY_SIZE(ir->key_map);
154} 209}
155 210
156 211
157static void input_repeat_key(unsigned long data) 212/* called by the input driver after rep[REP_DELAY] ms */
213static void input_repeat_key(unsigned long parm)
158{ 214{
159 /* called by the input driver after rep[REP_DELAY] ms */ 215 struct infrared *ir = (struct infrared *) parm;
160 delay_timer_finished = 1; 216
217 ir->delay_timer_finished = 1;
161} 218}
162 219
163 220
164static int av7110_setup_irc_config(struct av7110 *av7110, u32 ir_config) 221/* check for configuration changes */
222int av7110_check_ir_config(struct av7110 *av7110, int force)
165{ 223{
166 int ret = 0; 224 int i;
225 int modified = force;
226 int ret = -ENODEV;
167 227
168 dprintk(4, "%p\n", av7110); 228 for (i = 0; i < av_cnt; i++)
169 if (av7110) { 229 if (av7110 == av_list[i])
170 ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1, ir_config); 230 break;
171 av7110->ir_config = ir_config; 231
232 if (i < av_cnt && av7110) {
233 if ((av7110->ir.protocol & 1) != ir_protocol[i] ||
234 av7110->ir.inversion != ir_inversion[i])
235 modified = true;
236
237 if (modified) {
238 /* protocol */
239 if (ir_protocol[i]) {
240 ir_protocol[i] = 1;
241 av7110->ir.protocol = IR_RCMM;
242 av7110->ir.ir_config = 0x0001;
243 } else if (FW_VERSION(av7110->arm_app) >= 0x2620) {
244 av7110->ir.protocol = IR_RC5_EXT;
245 av7110->ir.ir_config = 0x0002;
246 } else {
247 av7110->ir.protocol = IR_RC5;
248 av7110->ir.ir_config = 0x0000;
249 }
250 /* inversion */
251 if (ir_inversion[i]) {
252 ir_inversion[i] = 1;
253 av7110->ir.ir_config |= 0x8000;
254 }
255 av7110->ir.inversion = ir_inversion[i];
256 /* update ARM */
257 ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1,
258 av7110->ir.ir_config);
259 } else
260 ret = 0;
261
262 /* address */
263 if (av7110->ir.device_mask != ir_device_mask[i])
264 av7110->ir.device_mask = ir_device_mask[i];
172 } 265 }
266
173 return ret; 267 return ret;
174} 268}
175 269
176 270
271/* /proc/av7110_ir interface */
177static int av7110_ir_write_proc(struct file *file, const char __user *buffer, 272static int av7110_ir_write_proc(struct file *file, const char __user *buffer,
178 unsigned long count, void *data) 273 unsigned long count, void *data)
179{ 274{
180 char *page; 275 char *page;
181 int size = 4 + 256 * sizeof(u16);
182 u32 ir_config; 276 u32 ir_config;
277 int size = sizeof ir_config + sizeof av_list[0]->ir.key_map;
183 int i; 278 int i;
184 279
185 if (count < size) 280 if (count < size)
@@ -194,71 +289,86 @@ static int av7110_ir_write_proc(struct file *file, const char __user *buffer,
194 return -EFAULT; 289 return -EFAULT;
195 } 290 }
196 291
197 memcpy(&ir_config, page, 4); 292 memcpy(&ir_config, page, sizeof ir_config);
198 memcpy(&key_map, page + 4, 256 * sizeof(u16)); 293
294 for (i = 0; i < av_cnt; i++) {
295 /* keymap */
296 memcpy(av_list[i]->ir.key_map, page + sizeof ir_config,
297 sizeof(av_list[i]->ir.key_map));
298 /* protocol, inversion, address */
299 ir_protocol[i] = ir_config & 0x0001;
300 ir_inversion[i] = ir_config & 0x8000 ? 1 : 0;
301 if (ir_config & 0x4000)
302 ir_device_mask[i] = 1 << ((ir_config >> 16) & 0x1f);
303 else
304 ir_device_mask[i] = IR_ALL;
305 /* update configuration */
306 av7110_check_ir_config(av_list[i], false);
307 input_register_keys(&av_list[i]->ir);
308 }
199 vfree(page); 309 vfree(page);
200 if (FW_VERSION(av_list[0]->arm_app) >= 0x2620 && !(ir_config & 0x0001))
201 ir_config |= 0x0002; /* enable extended RC5 */
202 for (i = 0; i < av_cnt; i++)
203 av7110_setup_irc_config(av_list[i], ir_config);
204 input_register_keys();
205 return count; 310 return count;
206} 311}
207 312
208 313
314/* interrupt handler */
209static void ir_handler(struct av7110 *av7110, u32 ircom) 315static void ir_handler(struct av7110 *av7110, u32 ircom)
210{ 316{
211 dprintk(4, "ircommand = %08x\n", ircom); 317 dprintk(4, "ir command = %08x\n", ircom);
212 av7110->ir_command = ircom; 318 av7110->ir.ir_command = ircom;
213 tasklet_schedule(&av7110->ir_tasklet); 319 tasklet_schedule(&av7110->ir.ir_tasklet);
214} 320}
215 321
216 322
217int __devinit av7110_ir_init(struct av7110 *av7110) 323int __devinit av7110_ir_init(struct av7110 *av7110)
218{ 324{
325 struct input_dev *input_dev;
219 static struct proc_dir_entry *e; 326 static struct proc_dir_entry *e;
220 int err; 327 int err;
221 328
222 if (av_cnt >= ARRAY_SIZE(av_list)) 329 if (av_cnt >= ARRAY_SIZE(av_list))
223 return -ENOSPC; 330 return -ENOSPC;
224 331
225 av7110_setup_irc_config(av7110, 0x0001);
226 av_list[av_cnt++] = av7110; 332 av_list[av_cnt++] = av7110;
333 av7110_check_ir_config(av7110, true);
227 334
228 if (av_cnt == 1) { 335 init_timer(&av7110->ir.keyup_timer);
229 init_timer(&keyup_timer); 336 av7110->ir.keyup_timer.function = av7110_emit_keyup;
230 keyup_timer.data = 0; 337 av7110->ir.keyup_timer.data = (unsigned long) &av7110->ir;
231 338
232 input_dev = input_allocate_device(); 339 input_dev = input_allocate_device();
233 if (!input_dev) 340 if (!input_dev)
234 return -ENOMEM; 341 return -ENOMEM;
235
236 snprintf(input_phys, sizeof(input_phys),
237 "pci-%s/ir0", pci_name(av7110->dev->pci));
238
239 input_dev->name = "DVB on-card IR receiver";
240
241 input_dev->phys = input_phys;
242 input_dev->id.bustype = BUS_PCI;
243 input_dev->id.version = 1;
244 if (av7110->dev->pci->subsystem_vendor) {
245 input_dev->id.vendor = av7110->dev->pci->subsystem_vendor;
246 input_dev->id.product = av7110->dev->pci->subsystem_device;
247 } else {
248 input_dev->id.vendor = av7110->dev->pci->vendor;
249 input_dev->id.product = av7110->dev->pci->device;
250 }
251 input_dev->cdev.dev = &av7110->dev->pci->dev;
252 set_bit(EV_KEY, input_dev->evbit);
253 set_bit(EV_REP, input_dev->evbit);
254 input_register_keys();
255 err = input_register_device(input_dev);
256 if (err) {
257 input_free_device(input_dev);
258 return err;
259 }
260 input_dev->timer.function = input_repeat_key;
261 342
343 av7110->ir.input_dev = input_dev;
344 snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys),
345 "pci-%s/ir0", pci_name(av7110->dev->pci));
346
347 input_dev->name = "DVB on-card IR receiver";
348
349 input_dev->phys = av7110->ir.input_phys;
350 input_dev->id.bustype = BUS_PCI;
351 input_dev->id.version = 2;
352 if (av7110->dev->pci->subsystem_vendor) {
353 input_dev->id.vendor = av7110->dev->pci->subsystem_vendor;
354 input_dev->id.product = av7110->dev->pci->subsystem_device;
355 } else {
356 input_dev->id.vendor = av7110->dev->pci->vendor;
357 input_dev->id.product = av7110->dev->pci->device;
358 }
359 input_dev->cdev.dev = &av7110->dev->pci->dev;
360 /* initial keymap */
361 memcpy(av7110->ir.key_map, default_key_map, sizeof av7110->ir.key_map);
362 input_register_keys(&av7110->ir);
363 err = input_register_device(input_dev);
364 if (err) {
365 input_free_device(input_dev);
366 return err;
367 }
368 input_dev->timer.function = input_repeat_key;
369 input_dev->timer.data = (unsigned long) &av7110->ir;
370
371 if (av_cnt == 1) {
262 e = create_proc_entry("av7110_ir", S_IFREG | S_IRUGO | S_IWUSR, NULL); 372 e = create_proc_entry("av7110_ir", S_IFREG | S_IRUGO | S_IWUSR, NULL);
263 if (e) { 373 if (e) {
264 e->write_proc = av7110_ir_write_proc; 374 e->write_proc = av7110_ir_write_proc;
@@ -266,8 +376,8 @@ int __devinit av7110_ir_init(struct av7110 *av7110)
266 } 376 }
267 } 377 }
268 378
269 tasklet_init(&av7110->ir_tasklet, av7110_emit_key, (unsigned long) av7110); 379 tasklet_init(&av7110->ir.ir_tasklet, av7110_emit_key, (unsigned long) &av7110->ir);
270 av7110->ir_handler = ir_handler; 380 av7110->ir.ir_handler = ir_handler;
271 381
272 return 0; 382 return 0;
273} 383}
@@ -280,8 +390,10 @@ void __devexit av7110_ir_exit(struct av7110 *av7110)
280 if (av_cnt == 0) 390 if (av_cnt == 0)
281 return; 391 return;
282 392
283 av7110->ir_handler = NULL; 393 del_timer_sync(&av7110->ir.keyup_timer);
284 tasklet_kill(&av7110->ir_tasklet); 394 av7110->ir.ir_handler = NULL;
395 tasklet_kill(&av7110->ir.ir_tasklet);
396
285 for (i = 0; i < av_cnt; i++) 397 for (i = 0; i < av_cnt; i++)
286 if (av_list[i] == av7110) { 398 if (av_list[i] == av7110) {
287 av_list[i] = av_list[av_cnt-1]; 399 av_list[i] = av_list[av_cnt-1];
@@ -289,14 +401,13 @@ void __devexit av7110_ir_exit(struct av7110 *av7110)
289 break; 401 break;
290 } 402 }
291 403
292 if (av_cnt == 1) { 404 if (av_cnt == 1)
293 del_timer_sync(&keyup_timer);
294 remove_proc_entry("av7110_ir", NULL); 405 remove_proc_entry("av7110_ir", NULL);
295 input_unregister_device(input_dev); 406
296 } 407 input_unregister_device(av7110->ir.input_dev);
297 408
298 av_cnt--; 409 av_cnt--;
299} 410}
300 411
301//MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>"); 412//MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>, Oliver Endriss <o.endriss@gmx.de>");
302//MODULE_LICENSE("GPL"); 413//MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 3035b224c7a3..0e817d6f1ce5 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -35,7 +35,7 @@
35 35
36#include "budget.h" 36#include "budget.h"
37#include "stv0299.h" 37#include "stv0299.h"
38#include "tda10021.h" 38#include "tda1002x.h"
39#include "tda1004x.h" 39#include "tda1004x.h"
40#include "tua6100.h" 40#include "tua6100.h"
41#include "dvb-pll.h" 41#include "dvb-pll.h"
@@ -66,9 +66,6 @@ struct budget_av {
66 int slot_status; 66 int slot_status;
67 struct dvb_ca_en50221 ca; 67 struct dvb_ca_en50221 ca;
68 u8 reinitialise_demod:1; 68 u8 reinitialise_demod:1;
69 u8 tda10021_poclkp:1;
70 u8 tda10021_ts_enabled;
71 int (*tda10021_set_frontend)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
72}; 69};
73 70
74static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot); 71static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
@@ -234,12 +231,6 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
234 if (budget_av->reinitialise_demod) 231 if (budget_av->reinitialise_demod)
235 dvb_frontend_reinitialise(budget_av->budget.dvb_frontend); 232 dvb_frontend_reinitialise(budget_av->budget.dvb_frontend);
236 233
237 /* set tda10021 back to original clock configuration on reset */
238 if (budget_av->tda10021_poclkp) {
239 tda10021_writereg(budget_av->budget.dvb_frontend, 0x12, 0xa0);
240 budget_av->tda10021_ts_enabled = 0;
241 }
242
243 return 0; 234 return 0;
244} 235}
245 236
@@ -256,11 +247,6 @@ static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
256 ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB); 247 ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
257 budget_av->slot_status = SLOTSTATUS_NONE; 248 budget_av->slot_status = SLOTSTATUS_NONE;
258 249
259 /* set tda10021 back to original clock configuration when cam removed */
260 if (budget_av->tda10021_poclkp) {
261 tda10021_writereg(budget_av->budget.dvb_frontend, 0x12, 0xa0);
262 budget_av->tda10021_ts_enabled = 0;
263 }
264 return 0; 250 return 0;
265} 251}
266 252
@@ -276,12 +262,6 @@ static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
276 262
277 ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA); 263 ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
278 264
279 /* tda10021 seems to need a different TS clock config when data is routed to the CAM */
280 if (budget_av->tda10021_poclkp) {
281 tda10021_writereg(budget_av->budget.dvb_frontend, 0x12, 0xa1);
282 budget_av->tda10021_ts_enabled = 1;
283 }
284
285 return 0; 265 return 0;
286} 266}
287 267
@@ -631,37 +611,62 @@ static struct stv0299_config cinergy_1200s_1894_0010_config = {
631static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 611static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
632{ 612{
633 struct budget *budget = (struct budget *) fe->dvb->priv; 613 struct budget *budget = (struct budget *) fe->dvb->priv;
634 u8 buf[4]; 614 u8 buf[6];
635 struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) }; 615 struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
616 int i;
636 617
618#define CU1216_IF 36125000
637#define TUNER_MUL 62500 619#define TUNER_MUL 62500
638 620
639 u32 div = (params->frequency + 36125000 + TUNER_MUL / 2) / TUNER_MUL; 621 u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
640 622
641 buf[0] = (div >> 8) & 0x7f; 623 buf[0] = (div >> 8) & 0x7f;
642 buf[1] = div & 0xff; 624 buf[1] = div & 0xff;
643 buf[2] = 0x86; 625 buf[2] = 0xce;
644 buf[3] = (params->frequency < 150000000 ? 0x01 : 626 buf[3] = (params->frequency < 150000000 ? 0x01 :
645 params->frequency < 445000000 ? 0x02 : 0x04); 627 params->frequency < 445000000 ? 0x02 : 0x04);
628 buf[4] = 0xde;
629 buf[5] = 0x20;
630
631 if (fe->ops.i2c_gate_ctrl)
632 fe->ops.i2c_gate_ctrl(fe, 1);
633 if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
634 return -EIO;
646 635
636 /* wait for the pll lock */
637 msg.flags = I2C_M_RD;
638 msg.len = 1;
639 for (i = 0; i < 20; i++) {
640 if (fe->ops.i2c_gate_ctrl)
641 fe->ops.i2c_gate_ctrl(fe, 1);
642 if (i2c_transfer(&budget->i2c_adap, &msg, 1) == 1 && (buf[0] & 0x40))
643 break;
644 msleep(10);
645 }
646
647 /* switch the charge pump to the lower current */
648 msg.flags = 0;
649 msg.len = 2;
650 msg.buf = &buf[2];
651 buf[2] &= ~0x40;
647 if (fe->ops.i2c_gate_ctrl) 652 if (fe->ops.i2c_gate_ctrl)
648 fe->ops.i2c_gate_ctrl(fe, 1); 653 fe->ops.i2c_gate_ctrl(fe, 1);
649 if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1) 654 if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1)
650 return -EIO; 655 return -EIO;
656
651 return 0; 657 return 0;
652} 658}
653 659
654static struct tda10021_config philips_cu1216_config = { 660static struct tda1002x_config philips_cu1216_config = {
655 .demod_address = 0x0c, 661 .demod_address = 0x0c,
662 .invert = 1,
656}; 663};
657 664
658static struct tda10021_config philips_cu1216_config_altaddress = { 665static struct tda1002x_config philips_cu1216_config_altaddress = {
659 .demod_address = 0x0d, 666 .demod_address = 0x0d,
667 .invert = 0,
660}; 668};
661 669
662
663
664
665static int philips_tu1216_tuner_init(struct dvb_frontend *fe) 670static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
666{ 671{
667 struct budget *budget = (struct budget *) fe->dvb->priv; 672 struct budget *budget = (struct budget *) fe->dvb->priv;
@@ -908,41 +913,28 @@ static u8 read_pwm(struct budget_av *budget_av)
908 return pwm; 913 return pwm;
909} 914}
910 915
911#define SUBID_DVBS_KNC1 0x0010 916#define SUBID_DVBS_KNC1 0x0010
912#define SUBID_DVBS_KNC1_PLUS 0x0011 917#define SUBID_DVBS_KNC1_PLUS 0x0011
913#define SUBID_DVBS_TYPHOON 0x4f56 918#define SUBID_DVBS_TYPHOON 0x4f56
914#define SUBID_DVBS_CINERGY1200 0x1154 919#define SUBID_DVBS_CINERGY1200 0x1154
915#define SUBID_DVBS_CYNERGY1200N 0x1155 920#define SUBID_DVBS_CYNERGY1200N 0x1155
916 921#define SUBID_DVBS_TV_STAR 0x0014
917#define SUBID_DVBS_TV_STAR 0x0014 922#define SUBID_DVBS_TV_STAR_CI 0x0016
918#define SUBID_DVBS_TV_STAR_CI 0x0016 923#define SUBID_DVBS_EASYWATCH_1 0x001a
919#define SUBID_DVBS_EASYWATCH_1 0x001a 924#define SUBID_DVBS_EASYWATCH 0x001e
920#define SUBID_DVBS_EASYWATCH 0x001e 925
921#define SUBID_DVBC_EASYWATCH 0x002a 926#define SUBID_DVBC_EASYWATCH 0x002a
922#define SUBID_DVBC_KNC1 0x0020 927#define SUBID_DVBC_EASYWATCH_MK3 0x002c
923#define SUBID_DVBC_KNC1_PLUS 0x0021 928#define SUBID_DVBC_KNC1 0x0020
924#define SUBID_DVBC_CINERGY1200 0x1156 929#define SUBID_DVBC_KNC1_PLUS 0x0021
925 930#define SUBID_DVBC_KNC1_MK3 0x0022
926#define SUBID_DVBT_KNC1_PLUS 0x0031 931#define SUBID_DVBC_KNC1_PLUS_MK3 0x0023
927#define SUBID_DVBT_KNC1 0x0030 932#define SUBID_DVBC_CINERGY1200 0x1156
928#define SUBID_DVBT_CINERGY1200 0x1157 933#define SUBID_DVBC_CINERGY1200_MK3 0x1176
929 934
930 935#define SUBID_DVBT_KNC1_PLUS 0x0031
931static int tda10021_set_frontend(struct dvb_frontend *fe, 936#define SUBID_DVBT_KNC1 0x0030
932 struct dvb_frontend_parameters *p) 937#define SUBID_DVBT_CINERGY1200 0x1157
933{
934 struct budget_av* budget_av = fe->dvb->priv;
935 int result;
936
937 result = budget_av->tda10021_set_frontend(fe, p);
938 if (budget_av->tda10021_ts_enabled) {
939 tda10021_writereg(budget_av->budget.dvb_frontend, 0x12, 0xa1);
940 } else {
941 tda10021_writereg(budget_av->budget.dvb_frontend, 0x12, 0xa0);
942 }
943
944 return result;
945}
946 938
947static void frontend_init(struct budget_av *budget_av) 939static void frontend_init(struct budget_av *budget_av)
948{ 940{
@@ -961,6 +953,7 @@ static void frontend_init(struct budget_av *budget_av)
961 case SUBID_DVBC_KNC1_PLUS: 953 case SUBID_DVBC_KNC1_PLUS:
962 case SUBID_DVBT_KNC1_PLUS: 954 case SUBID_DVBT_KNC1_PLUS:
963 case SUBID_DVBC_EASYWATCH: 955 case SUBID_DVBC_EASYWATCH:
956 case SUBID_DVBC_KNC1_PLUS_MK3:
964 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI); 957 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
965 break; 958 break;
966 } 959 }
@@ -1017,6 +1010,7 @@ static void frontend_init(struct budget_av *budget_av)
1017 case SUBID_DVBC_CINERGY1200: 1010 case SUBID_DVBC_CINERGY1200:
1018 case SUBID_DVBC_EASYWATCH: 1011 case SUBID_DVBC_EASYWATCH:
1019 budget_av->reinitialise_demod = 1; 1012 budget_av->reinitialise_demod = 1;
1013 budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
1020 fe = dvb_attach(tda10021_attach, &philips_cu1216_config, 1014 fe = dvb_attach(tda10021_attach, &philips_cu1216_config,
1021 &budget_av->budget.i2c_adap, 1015 &budget_av->budget.i2c_adap,
1022 read_pwm(budget_av)); 1016 read_pwm(budget_av));
@@ -1025,9 +1019,20 @@ static void frontend_init(struct budget_av *budget_av)
1025 &budget_av->budget.i2c_adap, 1019 &budget_av->budget.i2c_adap,
1026 read_pwm(budget_av)); 1020 read_pwm(budget_av));
1027 if (fe) { 1021 if (fe) {
1028 budget_av->tda10021_poclkp = 1; 1022 fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
1029 budget_av->tda10021_set_frontend = fe->ops.set_frontend; 1023 }
1030 fe->ops.set_frontend = tda10021_set_frontend; 1024 break;
1025
1026 case SUBID_DVBC_EASYWATCH_MK3:
1027 case SUBID_DVBC_CINERGY1200_MK3:
1028 case SUBID_DVBC_KNC1_MK3:
1029 case SUBID_DVBC_KNC1_PLUS_MK3:
1030 budget_av->reinitialise_demod = 1;
1031 budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
1032 fe = dvb_attach(tda10023_attach, &philips_cu1216_config,
1033 &budget_av->budget.i2c_adap,
1034 read_pwm(budget_av));
1035 if (fe) {
1031 fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params; 1036 fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params;
1032 } 1037 }
1033 break; 1038 break;
@@ -1260,12 +1265,16 @@ MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
1260MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR); 1265MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
1261MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S); 1266MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
1262MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP); 1267MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
1268MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
1263MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP); 1269MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
1264MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP); 1270MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
1271MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3);
1272MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3);
1265MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP); 1273MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
1266MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S); 1274MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
1267MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S); 1275MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
1268MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C); 1276MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C);
1277MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3);
1269MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T); 1278MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T);
1270 1279
1271static struct pci_device_id pci_tbl[] = { 1280static struct pci_device_id pci_tbl[] = {
@@ -1279,13 +1288,17 @@ static struct pci_device_id pci_tbl[] = {
1279 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e), 1288 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
1280 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a), 1289 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
1281 MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a), 1290 MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
1291 MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
1282 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020), 1292 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
1283 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021), 1293 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
1294 MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022),
1295 MAKE_EXTENSION_PCI(knc1cpmk3, 0x1894, 0x0023),
1284 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030), 1296 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
1285 MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031), 1297 MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031),
1286 MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154), 1298 MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154),
1287 MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155), 1299 MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155),
1288 MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156), 1300 MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156),
1301 MAKE_EXTENSION_PCI(cin1200cmk3, 0x153b, 0x1176),
1289 MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157), 1302 MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157),
1290 { 1303 {
1291 .vendor = 0, 1304 .vendor = 0,
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 464feaf1a9ad..4ed4599ce816 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -73,21 +73,15 @@
73#define SLOTSTATUS_READY 8 73#define SLOTSTATUS_READY 8
74#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY) 74#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
75 75
76/* Milliseconds during which key presses are regarded as key repeat and during 76/*
77 * which the debounce logic is active 77 * Milliseconds during which a key is regarded as pressed.
78 * If an identical command arrives within this time, the timer will start over.
78 */ 79 */
79#define IR_REPEAT_TIMEOUT 350 80#define IR_KEYPRESS_TIMEOUT 250
80 81
81/* RC5 device wildcard */ 82/* RC5 device wildcard */
82#define IR_DEVICE_ANY 255 83#define IR_DEVICE_ANY 255
83 84
84/* Some remotes sends multiple sequences per keypress (e.g. Zenith sends two),
85 * this setting allows the superflous sequences to be ignored
86 */
87static int debounce = 0;
88module_param(debounce, int, 0644);
89MODULE_PARM_DESC(debounce, "ignore repeated IR sequences (default: 0 = ignore no sequences)");
90
91static int rc5_device = -1; 85static int rc5_device = -1;
92module_param(rc5_device, int, 0644); 86module_param(rc5_device, int, 0644);
93MODULE_PARM_DESC(rc5_device, "only IR commands to given RC5 device (device = 0 - 31, any device = 255, default: autodetect)"); 87MODULE_PARM_DESC(rc5_device, "only IR commands to given RC5 device (device = 0 - 31, any device = 255, default: autodetect)");
@@ -99,10 +93,14 @@ MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
99struct budget_ci_ir { 93struct budget_ci_ir {
100 struct input_dev *dev; 94 struct input_dev *dev;
101 struct tasklet_struct msp430_irq_tasklet; 95 struct tasklet_struct msp430_irq_tasklet;
96 struct timer_list timer_keyup;
102 char name[72]; /* 40 + 32 for (struct saa7146_dev).name */ 97 char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
103 char phys[32]; 98 char phys[32];
104 struct ir_input_state state; 99 struct ir_input_state state;
105 int rc5_device; 100 int rc5_device;
101 u32 last_raw;
102 u32 ir_key;
103 bool have_command;
106}; 104};
107 105
108struct budget_ci { 106struct budget_ci {
@@ -125,13 +123,8 @@ static void msp430_ir_interrupt(unsigned long data)
125{ 123{
126 struct budget_ci *budget_ci = (struct budget_ci *) data; 124 struct budget_ci *budget_ci = (struct budget_ci *) data;
127 struct input_dev *dev = budget_ci->ir.dev; 125 struct input_dev *dev = budget_ci->ir.dev;
128 static int bounces = 0;
129 int device;
130 int toggle;
131 static int prev_toggle = -1;
132 static u32 ir_key;
133 static int state = 0;
134 u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8; 126 u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
127 u32 raw;
135 128
136 /* 129 /*
137 * The msp430 chip can generate two different bytes, command and device 130 * The msp430 chip can generate two different bytes, command and device
@@ -143,7 +136,7 @@ static void msp430_ir_interrupt(unsigned long data)
143 * bytes and one or more device bytes. For the repeated bytes, the 136 * bytes and one or more device bytes. For the repeated bytes, the
144 * highest bit (X) is set. The first command byte is always generated 137 * highest bit (X) is set. The first command byte is always generated
145 * before the first device byte. Other than that, no specific order 138 * before the first device byte. Other than that, no specific order
146 * seems to apply. 139 * seems to apply. To make life interesting, bytes can also be lost.
147 * 140 *
148 * Only when we have a command and device byte, a keypress is 141 * Only when we have a command and device byte, a keypress is
149 * generated. 142 * generated.
@@ -152,53 +145,35 @@ static void msp430_ir_interrupt(unsigned long data)
152 if (ir_debug) 145 if (ir_debug)
153 printk("budget_ci: received byte 0x%02x\n", command); 146 printk("budget_ci: received byte 0x%02x\n", command);
154 147
155 /* Is this a repeated byte? */ 148 /* Remove repeat bit, we use every command */
156 if (command & 0x80) 149 command = command & 0x7f;
157 return;
158 150
159 /* Is this a RC5 command byte? */ 151 /* Is this a RC5 command byte? */
160 if (command & 0x40) { 152 if (command & 0x40) {
161 state = 1; 153 budget_ci->ir.have_command = true;
162 ir_key = command & 0x3f; 154 budget_ci->ir.ir_key = command & 0x3f;
163 return; 155 return;
164 } 156 }
165 157
166 /* It's a RC5 device byte */ 158 /* It's a RC5 device byte */
167 if (!state) 159 if (!budget_ci->ir.have_command)
168 return; 160 return;
169 state = 0; 161 budget_ci->ir.have_command = false;
170 device = command & 0x1f;
171 toggle = command & 0x20;
172 162
173 if (budget_ci->ir.rc5_device != IR_DEVICE_ANY && budget_ci->ir.rc5_device != device) 163 if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
164 budget_ci->ir.rc5_device != (command & 0x1f))
174 return; 165 return;
175 166
176 /* Ignore repeated key sequences if requested */ 167 /* Is this a repeated key sequence? (same device, command, toggle) */
177 if (toggle == prev_toggle && ir_key == dev->repeat_key && 168 raw = budget_ci->ir.ir_key | (command << 8);
178 bounces > 0 && timer_pending(&dev->timer)) { 169 if (budget_ci->ir.last_raw != raw || !timer_pending(&budget_ci->ir.timer_keyup)) {
179 if (ir_debug)
180 printk("budget_ci: debounce logic ignored IR command\n");
181 bounces--;
182 return;
183 }
184 prev_toggle = toggle;
185
186 /* Are we still waiting for a keyup event? */
187 if (del_timer(&dev->timer))
188 ir_input_nokey(dev, &budget_ci->ir.state);
189
190 /* Generate keypress */
191 if (ir_debug)
192 printk("budget_ci: generating keypress 0x%02x\n", ir_key);
193 ir_input_keydown(dev, &budget_ci->ir.state, ir_key, (ir_key & (command << 8)));
194
195 /* Do we want to delay the keyup event? */
196 if (debounce) {
197 bounces = debounce;
198 mod_timer(&dev->timer, jiffies + msecs_to_jiffies(IR_REPEAT_TIMEOUT));
199 } else {
200 ir_input_nokey(dev, &budget_ci->ir.state); 170 ir_input_nokey(dev, &budget_ci->ir.state);
171 ir_input_keydown(dev, &budget_ci->ir.state,
172 budget_ci->ir.ir_key, raw);
173 budget_ci->ir.last_raw = raw;
201 } 174 }
175
176 mod_timer(&budget_ci->ir.timer_keyup, jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT));
202} 177}
203 178
204static int msp430_ir_init(struct budget_ci *budget_ci) 179static int msp430_ir_init(struct budget_ci *budget_ci)
@@ -271,16 +246,21 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
271 break; 246 break;
272 } 247 }
273 248
274 /* initialise the key-up debounce timeout handler */ 249 /* initialise the key-up timeout handler */
275 input_dev->timer.function = msp430_ir_keyup; 250 init_timer(&budget_ci->ir.timer_keyup);
276 input_dev->timer.data = (unsigned long) &budget_ci->ir; 251 budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
277 252 budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
253 budget_ci->ir.last_raw = 0xffff; /* An impossible value */
278 error = input_register_device(input_dev); 254 error = input_register_device(input_dev);
279 if (error) { 255 if (error) {
280 printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error); 256 printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
281 goto out2; 257 goto out2;
282 } 258 }
283 259
260 /* note: these must be after input_register_device */
261 input_dev->rep[REP_DELAY] = 400;
262 input_dev->rep[REP_PERIOD] = 250;
263
284 tasklet_init(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt, 264 tasklet_init(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt,
285 (unsigned long) budget_ci); 265 (unsigned long) budget_ci);
286 266
@@ -304,10 +284,8 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
304 saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT); 284 saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
305 tasklet_kill(&budget_ci->ir.msp430_irq_tasklet); 285 tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
306 286
307 if (del_timer(&dev->timer)) { 287 del_timer_sync(&dev->timer);
308 ir_input_nokey(dev, &budget_ci->ir.state); 288 ir_input_nokey(dev, &budget_ci->ir.state);
309 input_sync(dev);
310 }
311 289
312 input_unregister_device(dev); 290 input_unregister_device(dev);
313} 291}
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index e15562f81664..6b97dc1e6b65 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -41,11 +41,14 @@
41 41
42#define TS_WIDTH (2 * TS_SIZE) 42#define TS_WIDTH (2 * TS_SIZE)
43#define TS_WIDTH_ACTIVY TS_SIZE 43#define TS_WIDTH_ACTIVY TS_SIZE
44#define TS_WIDTH_DVBC TS_SIZE
44#define TS_HEIGHT_MASK 0xf00 45#define TS_HEIGHT_MASK 0xf00
45#define TS_HEIGHT_MASK_ACTIVY 0xc00 46#define TS_HEIGHT_MASK_ACTIVY 0xc00
47#define TS_HEIGHT_MASK_DVBC 0xe00
46#define TS_MIN_BUFSIZE_K 188 48#define TS_MIN_BUFSIZE_K 188
47#define TS_MAX_BUFSIZE_K 1410 49#define TS_MAX_BUFSIZE_K 1410
48#define TS_MAX_BUFSIZE_K_ACTIVY 564 50#define TS_MAX_BUFSIZE_K_ACTIVY 564
51#define TS_MAX_BUFSIZE_K_DVBC 1316
49#define BUFFER_WARNING_WAIT (30*HZ) 52#define BUFFER_WARNING_WAIT (30*HZ)
50 53
51int budget_debug; 54int budget_debug;
@@ -106,6 +109,19 @@ static int start_ts_capture(struct budget *budget)
106 saa7146_write(dev, MC2, (MASK_10 | MASK_26)); 109 saa7146_write(dev, MC2, (MASK_10 | MASK_26));
107 saa7146_write(dev, BRS_CTRL, 0x60000000); 110 saa7146_write(dev, BRS_CTRL, 0x60000000);
108 break; 111 break;
112 case BUDGET_CIN1200C_MK3:
113 case BUDGET_KNC1C_MK3:
114 case BUDGET_KNC1CP_MK3:
115 if (budget->video_port == BUDGET_VIDEO_PORTA) {
116 saa7146_write(dev, DD1_INIT, 0x06000200);
117 saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
118 saa7146_write(dev, BRS_CTRL, 0x00000000);
119 } else {
120 saa7146_write(dev, DD1_INIT, 0x00000600);
121 saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
122 saa7146_write(dev, BRS_CTRL, 0x60000000);
123 }
124 break;
109 default: 125 default:
110 if (budget->video_port == BUDGET_VIDEO_PORTA) { 126 if (budget->video_port == BUDGET_VIDEO_PORTA) {
111 saa7146_write(dev, DD1_INIT, 0x06000200); 127 saa7146_write(dev, DD1_INIT, 0x06000200);
@@ -122,7 +138,13 @@ static int start_ts_capture(struct budget *budget)
122 mdelay(10); 138 mdelay(10);
123 139
124 saa7146_write(dev, BASE_ODD3, 0); 140 saa7146_write(dev, BASE_ODD3, 0);
125 saa7146_write(dev, BASE_EVEN3, 0); 141 if (budget->buffer_size > budget->buffer_height * budget->buffer_width) {
142 // using odd/even buffers
143 saa7146_write(dev, BASE_EVEN3, budget->buffer_height * budget->buffer_width);
144 } else {
145 // using a single buffer
146 saa7146_write(dev, BASE_EVEN3, 0);
147 }
126 saa7146_write(dev, PROT_ADDR3, budget->buffer_size); 148 saa7146_write(dev, PROT_ADDR3, budget->buffer_size);
127 saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90); 149 saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90);
128 150
@@ -399,11 +421,25 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
399 budget->card = bi; 421 budget->card = bi;
400 budget->dev = (struct saa7146_dev *) dev; 422 budget->dev = (struct saa7146_dev *) dev;
401 423
402 if (budget->card->type == BUDGET_FS_ACTIVY) { 424 switch(budget->card->type) {
425 case BUDGET_FS_ACTIVY:
403 budget->buffer_width = TS_WIDTH_ACTIVY; 426 budget->buffer_width = TS_WIDTH_ACTIVY;
404 max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY; 427 max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY;
405 height_mask = TS_HEIGHT_MASK_ACTIVY; 428 height_mask = TS_HEIGHT_MASK_ACTIVY;
406 } else { 429 break;
430
431 case BUDGET_KNC1C:
432 case BUDGET_KNC1CP:
433 case BUDGET_CIN1200C:
434 case BUDGET_KNC1C_MK3:
435 case BUDGET_KNC1CP_MK3:
436 case BUDGET_CIN1200C_MK3:
437 budget->buffer_width = TS_WIDTH_DVBC;
438 max_bufsize = TS_MAX_BUFSIZE_K_DVBC;
439 height_mask = TS_HEIGHT_MASK_DVBC;
440 break;
441
442 default:
407 budget->buffer_width = TS_WIDTH; 443 budget->buffer_width = TS_WIDTH;
408 max_bufsize = TS_MAX_BUFSIZE_K; 444 max_bufsize = TS_MAX_BUFSIZE_K;
409 height_mask = TS_HEIGHT_MASK; 445 height_mask = TS_HEIGHT_MASK;
@@ -415,14 +451,22 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
415 dma_buffer_size = max_bufsize; 451 dma_buffer_size = max_bufsize;
416 452
417 budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width; 453 budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width;
418 budget->buffer_height &= height_mask; 454 if (budget->buffer_height > 0xfff) {
419 budget->buffer_size = budget->buffer_height * budget->buffer_width; 455 budget->buffer_height /= 2;
456 budget->buffer_height &= height_mask;
457 budget->buffer_size = 2 * budget->buffer_height * budget->buffer_width;
458 } else {
459 budget->buffer_height &= height_mask;
460 budget->buffer_size = budget->buffer_height * budget->buffer_width;
461 }
420 budget->buffer_warning_threshold = budget->buffer_size * 80/100; 462 budget->buffer_warning_threshold = budget->buffer_size * 80/100;
421 budget->buffer_warnings = 0; 463 budget->buffer_warnings = 0;
422 budget->buffer_warning_time = jiffies; 464 budget->buffer_warning_time = jiffies;
423 465
424 dprintk(2, "%s: width = %d, height = %d\n", 466 dprintk(2, "%s: buffer type = %s, width = %d, height = %d\n",
425 budget->dev->name, budget->buffer_width, budget->buffer_height); 467 budget->dev->name,
468 budget->buffer_size > budget->buffer_width * budget->buffer_height ? "odd/even" : "single",
469 budget->buffer_width, budget->buffer_height);
426 printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size); 470 printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
427 471
428 if ((ret = dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner, &budget->dev->pci->dev)) < 0) { 472 if ((ret = dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner, &budget->dev->pci->dev)) < 0) {
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index e8a5c79178e1..d764ffa728b0 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -99,6 +99,9 @@ static struct saa7146_pci_extension_data x_var = { \
99#define BUDGET_KNC1CP 12 99#define BUDGET_KNC1CP 12
100#define BUDGET_KNC1TP 13 100#define BUDGET_KNC1TP 13
101#define BUDGET_TVSTAR 14 101#define BUDGET_TVSTAR 14
102#define BUDGET_CIN1200C_MK3 15
103#define BUDGET_KNC1C_MK3 16
104#define BUDGET_KNC1CP_MK3 17
102 105
103#define BUDGET_VIDEO_PORTA 0 106#define BUDGET_VIDEO_PORTA 0
104#define BUDGET_VIDEO_PORTB 1 107#define BUDGET_VIDEO_PORTB 1
diff --git a/drivers/media/dvb/ttusb-budget/Kconfig b/drivers/media/dvb/ttusb-budget/Kconfig
index e78ea9227b0e..f546bccdb997 100644
--- a/drivers/media/dvb/ttusb-budget/Kconfig
+++ b/drivers/media/dvb/ttusb-budget/Kconfig
@@ -1,7 +1,6 @@
1config DVB_TTUSB_BUDGET 1config DVB_TTUSB_BUDGET
2 tristate "Technotrend/Hauppauge Nova-USB devices" 2 tristate "Technotrend/Hauppauge Nova-USB devices"
3 depends on DVB_CORE && USB && I2C 3 depends on DVB_CORE && USB && I2C
4 select DVB_PLL
5 select DVB_CX22700 if !DVB_FE_CUSTOMISE 4 select DVB_CX22700 if !DVB_FE_CUSTOMISE
6 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 5 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
7 select DVB_VES1820 if !DVB_FE_CUSTOMISE 6 select DVB_VES1820 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index b2e88ad28977..5adc27c3ced9 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -231,129 +231,149 @@ static struct v4l2_queryctrl radio_qctrl[] = {
231 } 231 }
232}; 232};
233 233
234static int rt_do_ioctl(struct inode *inode, struct file *file, 234static int vidioc_querycap(struct file *file, void *priv,
235 unsigned int cmd, void *arg) 235 struct v4l2_capability *v)
236{
237 strlcpy(v->driver, "radio-aimslab", sizeof(v->driver));
238 strlcpy(v->card, "RadioTrack", sizeof(v->card));
239 sprintf(v->bus_info, "ISA");
240 v->version = RADIO_VERSION;
241 v->capabilities = V4L2_CAP_TUNER;
242 return 0;
243}
244
245static int vidioc_g_tuner(struct file *file, void *priv,
246 struct v4l2_tuner *v)
236{ 247{
237 struct video_device *dev = video_devdata(file); 248 struct video_device *dev = video_devdata(file);
238 struct rt_device *rt=dev->priv; 249 struct rt_device *rt = dev->priv;
239 250
240 switch(cmd) 251 if (v->index > 0)
241 { 252 return -EINVAL;
242 case VIDIOC_QUERYCAP:
243 {
244 struct v4l2_capability *v = arg;
245 memset(v,0,sizeof(*v));
246 strlcpy(v->driver, "radio-aimslab", sizeof (v->driver));
247 strlcpy(v->card, "RadioTrack", sizeof (v->card));
248 sprintf(v->bus_info,"ISA");
249 v->version = RADIO_VERSION;
250 v->capabilities = V4L2_CAP_TUNER;
251 253
252 return 0; 254 strcpy(v->name, "FM");
253 } 255 v->type = V4L2_TUNER_RADIO;
254 case VIDIOC_G_TUNER: 256 v->rangelow = (87*16000);
255 { 257 v->rangehigh = (108*16000);
256 struct v4l2_tuner *v = arg; 258 v->rxsubchans = V4L2_TUNER_SUB_MONO;
259 v->capability = V4L2_TUNER_CAP_LOW;
260 v->audmode = V4L2_TUNER_MODE_MONO;
261 v->signal = 0xffff*rt_getsigstr(rt);
262 return 0;
263}
264
265static int vidioc_s_tuner(struct file *file, void *priv,
266 struct v4l2_tuner *v)
267{
268 if (v->index > 0)
269 return -EINVAL;
270 return 0;
271}
257 272
258 if (v->index > 0) 273static int vidioc_s_frequency(struct file *file, void *priv,
259 return -EINVAL; 274 struct v4l2_frequency *f)
275{
276 struct video_device *dev = video_devdata(file);
277 struct rt_device *rt = dev->priv;
260 278
261 memset(v,0,sizeof(*v)); 279 rt->curfreq = f->frequency;
262 strcpy(v->name, "FM"); 280 rt_setfreq(rt, rt->curfreq);
263 v->type = V4L2_TUNER_RADIO; 281 return 0;
282}
264 283
265 v->rangelow=(87*16000); 284static int vidioc_g_frequency(struct file *file, void *priv,
266 v->rangehigh=(108*16000); 285 struct v4l2_frequency *f)
267 v->rxsubchans =V4L2_TUNER_SUB_MONO; 286{
268 v->capability=V4L2_TUNER_CAP_LOW; 287 struct video_device *dev = video_devdata(file);
269 v->audmode = V4L2_TUNER_MODE_MONO; 288 struct rt_device *rt = dev->priv;
270 v->signal=0xFFFF*rt_getsigstr(rt);
271 289
272 return 0; 290 f->type = V4L2_TUNER_RADIO;
273 } 291 f->frequency = rt->curfreq;
274 case VIDIOC_S_TUNER: 292 return 0;
275 { 293}
276 struct v4l2_tuner *v = arg;
277 294
278 if (v->index > 0) 295static int vidioc_queryctrl(struct file *file, void *priv,
279 return -EINVAL; 296 struct v4l2_queryctrl *qc)
297{
298 int i;
280 299
300 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
301 if (qc->id && qc->id == radio_qctrl[i].id) {
302 memcpy(qc, &(radio_qctrl[i]),
303 sizeof(*qc));
281 return 0; 304 return 0;
282 } 305 }
283 case VIDIOC_S_FREQUENCY: 306 }
284 { 307 return -EINVAL;
285 struct v4l2_frequency *f = arg; 308}
286 309
287 rt->curfreq = f->frequency; 310static int vidioc_g_ctrl(struct file *file, void *priv,
288 rt_setfreq(rt, rt->curfreq); 311 struct v4l2_control *ctrl)
289 return 0; 312{
290 } 313 struct video_device *dev = video_devdata(file);
291 case VIDIOC_G_FREQUENCY: 314 struct rt_device *rt = dev->priv;
292 {
293 struct v4l2_frequency *f = arg;
294 315
295 f->type = V4L2_TUNER_RADIO; 316 switch (ctrl->id) {
296 f->frequency = rt->curfreq; 317 case V4L2_CID_AUDIO_MUTE:
318 ctrl->value = rt->muted;
319 return 0;
320 case V4L2_CID_AUDIO_VOLUME:
321 ctrl->value = rt->curvol * 6554;
322 return 0;
323 }
324 return -EINVAL;
325}
297 326
298 return 0; 327static int vidioc_s_ctrl(struct file *file, void *priv,
299 } 328 struct v4l2_control *ctrl)
300 case VIDIOC_QUERYCTRL: 329{
301 { 330 struct video_device *dev = video_devdata(file);
302 struct v4l2_queryctrl *qc = arg; 331 struct rt_device *rt = dev->priv;
303 int i;
304
305 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
306 if (qc->id && qc->id == radio_qctrl[i].id) {
307 memcpy(qc, &(radio_qctrl[i]),
308 sizeof(*qc));
309 return (0);
310 }
311 }
312 return -EINVAL;
313 }
314 case VIDIOC_G_CTRL:
315 {
316 struct v4l2_control *ctrl= arg;
317
318 switch (ctrl->id) {
319 case V4L2_CID_AUDIO_MUTE:
320 ctrl->value=rt->muted;
321 return (0);
322 case V4L2_CID_AUDIO_VOLUME:
323 ctrl->value=rt->curvol * 6554;
324 return (0);
325 }
326 return -EINVAL;
327 }
328 case VIDIOC_S_CTRL:
329 {
330 struct v4l2_control *ctrl= arg;
331
332 switch (ctrl->id) {
333 case V4L2_CID_AUDIO_MUTE:
334 if (ctrl->value) {
335 rt_mute(rt);
336 } else {
337 rt_setvol(rt,rt->curvol);
338 }
339 return (0);
340 case V4L2_CID_AUDIO_VOLUME:
341 rt_setvol(rt,ctrl->value);
342 return (0);
343 }
344 return -EINVAL;
345 }
346 332
347 default: 333 switch (ctrl->id) {
348 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 334 case V4L2_CID_AUDIO_MUTE:
349 rt_do_ioctl); 335 if (ctrl->value)
336 rt_mute(rt);
337 else
338 rt_setvol(rt,rt->curvol);
339 return 0;
340 case V4L2_CID_AUDIO_VOLUME:
341 rt_setvol(rt,ctrl->value);
342 return 0;
350 } 343 }
344 return -EINVAL;
345}
346
347static int vidioc_g_audio (struct file *file, void *priv,
348 struct v4l2_audio *a)
349{
350 if (a->index > 1)
351 return -EINVAL;
352
353 strcpy(a->name, "Radio");
354 a->capability = V4L2_AUDCAP_STEREO;
355 return 0;
356}
357
358static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
359{
360 *i = 0;
361 return 0;
362}
363
364static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
365{
366 if (i != 0)
367 return -EINVAL;
368 return 0;
351} 369}
352 370
353static int rt_ioctl(struct inode *inode, struct file *file, 371static int vidioc_s_audio(struct file *file, void *priv,
354 unsigned int cmd, unsigned long arg) 372 struct v4l2_audio *a)
355{ 373{
356 return video_usercopy(inode, file, cmd, arg, rt_do_ioctl); 374 if (a->index != 0)
375 return -EINVAL;
376 return 0;
357} 377}
358 378
359static struct rt_device rtrack_unit; 379static struct rt_device rtrack_unit;
@@ -362,7 +382,7 @@ static const struct file_operations rtrack_fops = {
362 .owner = THIS_MODULE, 382 .owner = THIS_MODULE,
363 .open = video_exclusive_open, 383 .open = video_exclusive_open,
364 .release = video_exclusive_release, 384 .release = video_exclusive_release,
365 .ioctl = rt_ioctl, 385 .ioctl = video_ioctl2,
366 .compat_ioctl = v4l_compat_ioctl32, 386 .compat_ioctl = v4l_compat_ioctl32,
367 .llseek = no_llseek, 387 .llseek = no_llseek,
368}; 388};
@@ -374,6 +394,18 @@ static struct video_device rtrack_radio=
374 .type = VID_TYPE_TUNER, 394 .type = VID_TYPE_TUNER,
375 .hardware = 0, 395 .hardware = 0,
376 .fops = &rtrack_fops, 396 .fops = &rtrack_fops,
397 .vidioc_querycap = vidioc_querycap,
398 .vidioc_g_tuner = vidioc_g_tuner,
399 .vidioc_s_tuner = vidioc_s_tuner,
400 .vidioc_g_audio = vidioc_g_audio,
401 .vidioc_s_audio = vidioc_s_audio,
402 .vidioc_g_input = vidioc_g_input,
403 .vidioc_s_input = vidioc_s_input,
404 .vidioc_g_frequency = vidioc_g_frequency,
405 .vidioc_s_frequency = vidioc_s_frequency,
406 .vidioc_queryctrl = vidioc_queryctrl,
407 .vidioc_g_ctrl = vidioc_g_ctrl,
408 .vidioc_s_ctrl = vidioc_s_ctrl,
377}; 409};
378 410
379static int __init rtrack_init(void) 411static int __init rtrack_init(void)
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index 74976cba869f..fdf5d6e46eac 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -192,131 +192,158 @@ static inline unsigned int gemtek_pci_getsignal( struct gemtek_pci_card *card )
192 return ( inb( card->iobase ) & 0x08 ) ? 0 : 1; 192 return ( inb( card->iobase ) & 0x08 ) ? 0 : 1;
193} 193}
194 194
195static int gemtek_pci_do_ioctl(struct inode *inode, struct file *file, 195static int vidioc_querycap(struct file *file, void *priv,
196 unsigned int cmd, void *arg) 196 struct v4l2_capability *v)
197{
198 strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
199 strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
200 sprintf(v->bus_info, "ISA");
201 v->version = RADIO_VERSION;
202 v->capabilities = V4L2_CAP_TUNER;
203 return 0;
204}
205
206static int vidioc_g_tuner(struct file *file, void *priv,
207 struct v4l2_tuner *v)
197{ 208{
198 struct video_device *dev = video_devdata(file); 209 struct video_device *dev = video_devdata(file);
199 struct gemtek_pci_card *card = dev->priv; 210 struct gemtek_pci_card *card = dev->priv;
200 211
201 switch ( cmd ) { 212 if (v->index > 0)
202 case VIDIOC_QUERYCAP: 213 return -EINVAL;
203 { 214
204 struct v4l2_capability *v = arg; 215 strcpy(v->name, "FM");
205 memset(v,0,sizeof(*v)); 216 v->type = V4L2_TUNER_RADIO;
206 strlcpy(v->driver, "radio-gemtek-pci", sizeof (v->driver)); 217 v->rangelow = GEMTEK_PCI_RANGE_LOW;
207 strlcpy(v->card, "GemTek PCI Radio", sizeof (v->card)); 218 v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
208 sprintf(v->bus_info,"ISA"); 219 v->rxsubchans = V4L2_TUNER_SUB_MONO;
209 v->version = RADIO_VERSION; 220 v->capability = V4L2_TUNER_CAP_LOW;
210 v->capabilities = V4L2_CAP_TUNER; 221 v->audmode = V4L2_TUNER_MODE_MONO;
211 222 v->signal = 0xffff * gemtek_pci_getsignal(card);
212 return 0; 223 return 0;
213 } 224}
214 case VIDIOC_G_TUNER:
215 {
216 struct v4l2_tuner *v = arg;
217 225
218 if (v->index > 0) 226static int vidioc_s_tuner(struct file *file, void *priv,
219 return -EINVAL; 227 struct v4l2_tuner *v)
228{
229 if (v->index > 0)
230 return -EINVAL;
231 return 0;
232}
220 233
221 memset(v,0,sizeof(*v)); 234static int vidioc_s_frequency(struct file *file, void *priv,
222 strcpy(v->name, "FM"); 235 struct v4l2_frequency *f)
223 v->type = V4L2_TUNER_RADIO; 236{
237 struct video_device *dev = video_devdata(file);
238 struct gemtek_pci_card *card = dev->priv;
224 239
225 v->rangelow = GEMTEK_PCI_RANGE_LOW; 240 if ( (f->frequency < GEMTEK_PCI_RANGE_LOW) ||
226 v->rangehigh = GEMTEK_PCI_RANGE_HIGH; 241 (f->frequency > GEMTEK_PCI_RANGE_HIGH) )
227 v->rxsubchans =V4L2_TUNER_SUB_MONO; 242 return -EINVAL;
228 v->capability=V4L2_TUNER_CAP_LOW; 243 gemtek_pci_setfrequency(card, f->frequency);
229 v->audmode = V4L2_TUNER_MODE_MONO; 244 card->current_frequency = f->frequency;
230 v->signal=0xFFFF*gemtek_pci_getsignal( card ); 245 card->mute = false;
246 return 0;
247}
231 248
232 return 0; 249static int vidioc_g_frequency(struct file *file, void *priv,
233 } 250 struct v4l2_frequency *f)
234 case VIDIOC_S_TUNER: 251{
235 { 252 struct video_device *dev = video_devdata(file);
236 struct v4l2_tuner *v = arg; 253 struct gemtek_pci_card *card = dev->priv;
237 254
238 if (v->index > 0) 255 f->type = V4L2_TUNER_RADIO;
239 return -EINVAL; 256 f->frequency = card->current_frequency;
257 return 0;
258}
240 259
260static int vidioc_queryctrl(struct file *file, void *priv,
261 struct v4l2_queryctrl *qc)
262{
263 int i;
264 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
265 if (qc->id && qc->id == radio_qctrl[i].id) {
266 memcpy(qc, &(radio_qctrl[i]),
267 sizeof(*qc));
241 return 0; 268 return 0;
242 } 269 }
243 case VIDIOC_S_FREQUENCY: 270 }
244 { 271 return -EINVAL;
245 struct v4l2_frequency *f = arg; 272}
246 273
247 if ( (f->frequency < GEMTEK_PCI_RANGE_LOW) || 274static int vidioc_g_ctrl(struct file *file, void *priv,
248 (f->frequency > GEMTEK_PCI_RANGE_HIGH) ) 275 struct v4l2_control *ctrl)
249 return -EINVAL; 276{
277 struct video_device *dev = video_devdata(file);
278 struct gemtek_pci_card *card = dev->priv;
250 279
280 switch (ctrl->id) {
281 case V4L2_CID_AUDIO_MUTE:
282 ctrl->value = card->mute;
283 return 0;
284 case V4L2_CID_AUDIO_VOLUME:
285 if (card->mute)
286 ctrl->value = 0;
287 else
288 ctrl->value = 65535;
289 return 0;
290 }
291 return -EINVAL;
292}
251 293
252 gemtek_pci_setfrequency( card, f->frequency ); 294static int vidioc_s_ctrl(struct file *file, void *priv,
253 card->current_frequency = f->frequency; 295 struct v4l2_control *ctrl)
254 card->mute = false; 296{
255 return 0; 297 struct video_device *dev = video_devdata(file);
256 } 298 struct gemtek_pci_card *card = dev->priv;
257 case VIDIOC_QUERYCTRL: 299
258 { 300 switch (ctrl->id) {
259 struct v4l2_queryctrl *qc = arg; 301 case V4L2_CID_AUDIO_MUTE:
260 int i; 302 if (ctrl->value)
261 303 gemtek_pci_mute(card);
262 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 304 else
263 if (qc->id && qc->id == radio_qctrl[i].id) { 305 gemtek_pci_unmute(card);
264 memcpy(qc, &(radio_qctrl[i]), 306 return 0;
265 sizeof(*qc)); 307 case V4L2_CID_AUDIO_VOLUME:
266 return (0); 308 if (ctrl->value)
267 } 309 gemtek_pci_unmute(card);
268 } 310 else
269 return -EINVAL; 311 gemtek_pci_mute(card);
270 } 312 return 0;
271 case VIDIOC_G_CTRL:
272 {
273 struct v4l2_control *ctrl= arg;
274
275 switch (ctrl->id) {
276 case V4L2_CID_AUDIO_MUTE:
277 ctrl->value=card->mute;
278 return (0);
279 case V4L2_CID_AUDIO_VOLUME:
280 if (card->mute)
281 ctrl->value=0;
282 else
283 ctrl->value=65535;
284 return (0);
285 }
286 return -EINVAL;
287 }
288 case VIDIOC_S_CTRL:
289 {
290 struct v4l2_control *ctrl= arg;
291
292 switch (ctrl->id) {
293 case V4L2_CID_AUDIO_MUTE:
294 if (ctrl->value) {
295 gemtek_pci_mute(card);
296 } else {
297 gemtek_pci_unmute(card);
298 }
299 return (0);
300 case V4L2_CID_AUDIO_VOLUME:
301 if (ctrl->value) {
302 gemtek_pci_unmute(card);
303 } else {
304 gemtek_pci_mute(card);
305 }
306 return (0);
307 }
308 return -EINVAL;
309 }
310 default:
311 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
312 gemtek_pci_do_ioctl);
313 } 313 }
314 return -EINVAL;
314} 315}
315 316
316static int gemtek_pci_ioctl(struct inode *inode, struct file *file, 317static int vidioc_g_audio(struct file *file, void *priv,
317 unsigned int cmd, unsigned long arg) 318 struct v4l2_audio *a)
318{ 319{
319 return video_usercopy(inode, file, cmd, arg, gemtek_pci_do_ioctl); 320 if (a->index > 1)
321 return -EINVAL;
322
323 strcpy(a->name, "Radio");
324 a->capability = V4L2_AUDCAP_STEREO;
325 return 0;
326}
327
328static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
329{
330 *i = 0;
331 return 0;
332}
333
334static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
335{
336 if (i != 0)
337 return -EINVAL;
338 return 0;
339}
340
341static int vidioc_s_audio(struct file *file, void *priv,
342 struct v4l2_audio *a)
343{
344 if (a->index != 0)
345 return -EINVAL;
346 return 0;
320} 347}
321 348
322enum { 349enum {
@@ -342,7 +369,7 @@ static const struct file_operations gemtek_pci_fops = {
342 .owner = THIS_MODULE, 369 .owner = THIS_MODULE,
343 .open = video_exclusive_open, 370 .open = video_exclusive_open,
344 .release = video_exclusive_release, 371 .release = video_exclusive_release,
345 .ioctl = gemtek_pci_ioctl, 372 .ioctl = video_ioctl2,
346 .compat_ioctl = v4l_compat_ioctl32, 373 .compat_ioctl = v4l_compat_ioctl32,
347 .llseek = no_llseek, 374 .llseek = no_llseek,
348}; 375};
@@ -353,6 +380,18 @@ static struct video_device vdev_template = {
353 .type = VID_TYPE_TUNER, 380 .type = VID_TYPE_TUNER,
354 .hardware = 0, 381 .hardware = 0,
355 .fops = &gemtek_pci_fops, 382 .fops = &gemtek_pci_fops,
383 .vidioc_querycap = vidioc_querycap,
384 .vidioc_g_tuner = vidioc_g_tuner,
385 .vidioc_s_tuner = vidioc_s_tuner,
386 .vidioc_g_audio = vidioc_g_audio,
387 .vidioc_s_audio = vidioc_s_audio,
388 .vidioc_g_input = vidioc_g_input,
389 .vidioc_s_input = vidioc_s_input,
390 .vidioc_g_frequency = vidioc_g_frequency,
391 .vidioc_s_frequency = vidioc_s_frequency,
392 .vidioc_queryctrl = vidioc_queryctrl,
393 .vidioc_g_ctrl = vidioc_g_ctrl,
394 .vidioc_s_ctrl = vidioc_s_ctrl,
356}; 395};
357 396
358static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci_device_id *pci_id ) 397static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci_device_id *pci_id )
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 36c4be6622c7..b04b6a7fff7c 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -161,137 +161,157 @@ static int gemtek_getsigstr(struct gemtek_device *dev)
161 return 1; /* signal present */ 161 return 1; /* signal present */
162} 162}
163 163
164static int gemtek_do_ioctl(struct inode *inode, struct file *file, 164static int vidioc_querycap(struct file *file, void *priv,
165 unsigned int cmd, void *arg) 165 struct v4l2_capability *v)
166{
167 strlcpy(v->driver, "radio-gemtek", sizeof(v->driver));
168 strlcpy(v->card, "GemTek", sizeof(v->card));
169 sprintf(v->bus_info, "ISA");
170 v->version = RADIO_VERSION;
171 v->capabilities = V4L2_CAP_TUNER;
172 return 0;
173}
174
175static int vidioc_g_tuner(struct file *file, void *priv,
176 struct v4l2_tuner *v)
166{ 177{
167 struct video_device *dev = video_devdata(file); 178 struct video_device *dev = video_devdata(file);
168 struct gemtek_device *rt=dev->priv; 179 struct gemtek_device *rt = dev->priv;
169 180
170 switch(cmd) 181 if (v->index > 0)
171 { 182 return -EINVAL;
172 case VIDIOC_QUERYCAP:
173 {
174 struct v4l2_capability *v = arg;
175 memset(v,0,sizeof(*v));
176 strlcpy(v->driver, "radio-gemtek", sizeof (v->driver));
177 strlcpy(v->card, "GemTek", sizeof (v->card));
178 sprintf(v->bus_info,"ISA");
179 v->version = RADIO_VERSION;
180 v->capabilities = V4L2_CAP_TUNER;
181 183
182 return 0; 184 strcpy(v->name, "FM");
183 } 185 v->type = V4L2_TUNER_RADIO;
184 case VIDIOC_G_TUNER: 186 v->rangelow = (87*16000);
185 { 187 v->rangehigh = (108*16000);
186 struct v4l2_tuner *v = arg; 188 v->rxsubchans = V4L2_TUNER_SUB_MONO;
189 v->capability = V4L2_TUNER_CAP_LOW;
190 v->audmode = V4L2_TUNER_MODE_MONO;
191 v->signal = 0xffff*gemtek_getsigstr(rt);
192 return 0;
193}
194
195static int vidioc_s_tuner(struct file *file, void *priv,
196 struct v4l2_tuner *v)
197{
198 if (v->index > 0)
199 return -EINVAL;
200 return 0;
201}
187 202
188 if (v->index > 0) 203static int vidioc_s_frequency(struct file *file, void *priv,
189 return -EINVAL; 204 struct v4l2_frequency *f)
205{
206 struct video_device *dev = video_devdata(file);
207 struct gemtek_device *rt = dev->priv;
190 208
191 memset(v,0,sizeof(*v)); 209 rt->curfreq = f->frequency;
192 strcpy(v->name, "FM"); 210 /* needs to be called twice in order for getsigstr to work */
193 v->type = V4L2_TUNER_RADIO; 211 gemtek_setfreq(rt, rt->curfreq);
212 gemtek_setfreq(rt, rt->curfreq);
213 return 0;
214}
194 215
195 v->rangelow=(87*16000); 216static int vidioc_g_frequency(struct file *file, void *priv,
196 v->rangehigh=(108*16000); 217 struct v4l2_frequency *f)
197 v->rxsubchans =V4L2_TUNER_SUB_MONO; 218{
198 v->capability=V4L2_TUNER_CAP_LOW; 219 struct video_device *dev = video_devdata(file);
199 v->audmode = V4L2_TUNER_MODE_MONO; 220 struct gemtek_device *rt = dev->priv;
200 v->signal=0xFFFF*gemtek_getsigstr(rt);
201 221
202 return 0; 222 f->type = V4L2_TUNER_RADIO;
203 } 223 f->frequency = rt->curfreq;
204 case VIDIOC_S_TUNER: 224 return 0;
205 { 225}
206 struct v4l2_tuner *v = arg;
207 226
208 if (v->index > 0) 227static int vidioc_queryctrl(struct file *file, void *priv,
209 return -EINVAL; 228 struct v4l2_queryctrl *qc)
229{
230 int i;
210 231
232 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
233 if (qc->id && qc->id == radio_qctrl[i].id) {
234 memcpy(qc, &(radio_qctrl[i]),
235 sizeof(*qc));
211 return 0; 236 return 0;
212 } 237 }
213 case VIDIOC_S_FREQUENCY: 238 }
214 { 239 return -EINVAL;
215 struct v4l2_frequency *f = arg; 240}
216
217 rt->curfreq = f->frequency;
218 /* needs to be called twice in order for getsigstr to work */
219 gemtek_setfreq(rt, rt->curfreq);
220 gemtek_setfreq(rt, rt->curfreq);
221 return 0;
222 }
223 case VIDIOC_G_FREQUENCY:
224 {
225 struct v4l2_frequency *f = arg;
226 241
227 f->type = V4L2_TUNER_RADIO; 242static int vidioc_g_ctrl(struct file *file, void *priv,
228 f->frequency = rt->curfreq; 243 struct v4l2_control *ctrl)
244{
245 struct video_device *dev = video_devdata(file);
246 struct gemtek_device *rt = dev->priv;
229 247
230 return 0; 248 switch (ctrl->id) {
231 } 249 case V4L2_CID_AUDIO_MUTE:
232 case VIDIOC_QUERYCTRL: 250 ctrl->value = rt->muted;
233 { 251 return 0;
234 struct v4l2_queryctrl *qc = arg; 252 case V4L2_CID_AUDIO_VOLUME:
235 int i; 253 if (rt->muted)
236 254 ctrl->value = 0;
237 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 255 else
238 if (qc->id && qc->id == radio_qctrl[i].id) { 256 ctrl->value = 65535;
239 memcpy(qc, &(radio_qctrl[i]), 257 return 0;
240 sizeof(*qc)); 258 }
241 return (0); 259 return -EINVAL;
242 } 260}
243 } 261
244 return -EINVAL; 262static int vidioc_s_ctrl(struct file *file, void *priv,
245 } 263 struct v4l2_control *ctrl)
246 case VIDIOC_G_CTRL: 264{
247 { 265 struct video_device *dev = video_devdata(file);
248 struct v4l2_control *ctrl= arg; 266 struct gemtek_device *rt = dev->priv;
249 267
250 switch (ctrl->id) { 268 switch (ctrl->id) {
251 case V4L2_CID_AUDIO_MUTE: 269 case V4L2_CID_AUDIO_MUTE:
252 ctrl->value=rt->muted; 270 if (ctrl->value)
253 return (0); 271 gemtek_mute(rt);
254 case V4L2_CID_AUDIO_VOLUME: 272 else
255 if (rt->muted) 273 gemtek_unmute(rt);
256 ctrl->value=0; 274 return 0;
257 else 275 case V4L2_CID_AUDIO_VOLUME:
258 ctrl->value=65535; 276 if (ctrl->value)
259 return (0); 277 gemtek_unmute(rt);
260 } 278 else
261 return -EINVAL; 279 gemtek_mute(rt);
262 } 280 return 0;
263 case VIDIOC_S_CTRL:
264 {
265 struct v4l2_control *ctrl= arg;
266
267 switch (ctrl->id) {
268 case V4L2_CID_AUDIO_MUTE:
269 if (ctrl->value) {
270 gemtek_mute(rt);
271 } else {
272 gemtek_unmute(rt);
273 }
274 return (0);
275 case V4L2_CID_AUDIO_VOLUME:
276 if (ctrl->value) {
277 gemtek_unmute(rt);
278 } else {
279 gemtek_mute(rt);
280 }
281 return (0);
282 }
283 return -EINVAL;
284 }
285 default:
286 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
287 gemtek_do_ioctl);
288 } 281 }
282 return -EINVAL;
289} 283}
290 284
291static int gemtek_ioctl(struct inode *inode, struct file *file, 285static int vidioc_g_audio (struct file *file, void *priv,
292 unsigned int cmd, unsigned long arg) 286 struct v4l2_audio *a)
293{ 287{
294 return video_usercopy(inode, file, cmd, arg, gemtek_do_ioctl); 288 if (a->index > 1)
289 return -EINVAL;
290
291 strcpy(a->name, "Radio");
292 a->capability = V4L2_AUDCAP_STEREO;
293 return 0;
294}
295
296static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
297{
298 *i = 0;
299 return 0;
300}
301
302static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
303{
304 if (i != 0)
305 return -EINVAL;
306 return 0;
307}
308
309static int vidioc_s_audio(struct file *file, void *priv,
310 struct v4l2_audio *a)
311{
312 if (a->index != 0)
313 return -EINVAL;
314 return 0;
295} 315}
296 316
297static struct gemtek_device gemtek_unit; 317static struct gemtek_device gemtek_unit;
@@ -300,7 +320,7 @@ static const struct file_operations gemtek_fops = {
300 .owner = THIS_MODULE, 320 .owner = THIS_MODULE,
301 .open = video_exclusive_open, 321 .open = video_exclusive_open,
302 .release = video_exclusive_release, 322 .release = video_exclusive_release,
303 .ioctl = gemtek_ioctl, 323 .ioctl = video_ioctl2,
304 .compat_ioctl = v4l_compat_ioctl32, 324 .compat_ioctl = v4l_compat_ioctl32,
305 .llseek = no_llseek, 325 .llseek = no_llseek,
306}; 326};
@@ -312,6 +332,18 @@ static struct video_device gemtek_radio=
312 .type = VID_TYPE_TUNER, 332 .type = VID_TYPE_TUNER,
313 .hardware = 0, 333 .hardware = 0,
314 .fops = &gemtek_fops, 334 .fops = &gemtek_fops,
335 .vidioc_querycap = vidioc_querycap,
336 .vidioc_g_tuner = vidioc_g_tuner,
337 .vidioc_s_tuner = vidioc_s_tuner,
338 .vidioc_g_audio = vidioc_g_audio,
339 .vidioc_s_audio = vidioc_s_audio,
340 .vidioc_g_input = vidioc_g_input,
341 .vidioc_s_input = vidioc_s_input,
342 .vidioc_g_frequency = vidioc_g_frequency,
343 .vidioc_s_frequency = vidioc_s_frequency,
344 .vidioc_queryctrl = vidioc_queryctrl,
345 .vidioc_g_ctrl = vidioc_g_ctrl,
346 .vidioc_s_ctrl = vidioc_s_ctrl,
315}; 347};
316 348
317static int __init gemtek_init(void) 349static int __init gemtek_init(void)
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index e67b7f258029..11f80cacd6ed 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -75,8 +75,6 @@ static struct v4l2_queryctrl radio_qctrl[] = {
75static int radio_nr = -1; 75static int radio_nr = -1;
76module_param(radio_nr, int, 0); 76module_param(radio_nr, int, 0);
77 77
78static int radio_ioctl(struct inode *inode, struct file *file,
79 unsigned int cmd, unsigned long arg);
80static int maestro_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 78static int maestro_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
81static void maestro_remove(struct pci_dev *pdev); 79static void maestro_remove(struct pci_dev *pdev);
82 80
@@ -102,18 +100,11 @@ static const struct file_operations maestro_fops = {
102 .owner = THIS_MODULE, 100 .owner = THIS_MODULE,
103 .open = video_exclusive_open, 101 .open = video_exclusive_open,
104 .release = video_exclusive_release, 102 .release = video_exclusive_release,
105 .ioctl = radio_ioctl, 103 .ioctl = video_ioctl2,
106 .compat_ioctl = v4l_compat_ioctl32, 104 .compat_ioctl = v4l_compat_ioctl32,
107 .llseek = no_llseek, 105 .llseek = no_llseek,
108}; 106};
109 107
110static struct video_device maestro_radio = {
111 .name = "Maestro radio",
112 .type = VID_TYPE_TUNER,
113 .hardware = 0,
114 .fops = &maestro_fops,
115};
116
117struct radio_device { 108struct radio_device {
118 u16 io, /* base of Maestro card radio io (GPIO_DATA)*/ 109 u16 io, /* base of Maestro card radio io (GPIO_DATA)*/
119 muted, /* VIDEO_AUDIO_MUTE */ 110 muted, /* VIDEO_AUDIO_MUTE */
@@ -190,142 +181,153 @@ static void radio_bits_set(struct radio_device *dev, u32 data)
190 msleep(125); 181 msleep(125);
191} 182}
192 183
193static inline int radio_function(struct inode *inode, struct file *file, 184static int vidioc_querycap(struct file *file, void *priv,
194 unsigned int cmd, void *arg) 185 struct v4l2_capability *v)
186{
187 strlcpy(v->driver, "radio-maestro", sizeof(v->driver));
188 strlcpy(v->card, "Maestro Radio", sizeof(v->card));
189 sprintf(v->bus_info, "PCI");
190 v->version = RADIO_VERSION;
191 v->capabilities = V4L2_CAP_TUNER;
192 return 0;
193}
194
195static int vidioc_g_tuner(struct file *file, void *priv,
196 struct v4l2_tuner *v)
195{ 197{
196 struct video_device *dev = video_devdata(file); 198 struct video_device *dev = video_devdata(file);
197 struct radio_device *card = video_get_drvdata(dev); 199 struct radio_device *card = video_get_drvdata(dev);
198 200
199 switch (cmd) { 201 if (v->index > 0)
200 case VIDIOC_QUERYCAP: 202 return -EINVAL;
201 { 203
202 struct v4l2_capability *v = arg; 204 (void)radio_bits_get(card);
203 memset(v,0,sizeof(*v)); 205
204 strlcpy(v->driver, "radio-maestro", sizeof (v->driver)); 206 strcpy(v->name, "FM");
205 strlcpy(v->card, "Maestro Radio", sizeof (v->card)); 207 v->type = V4L2_TUNER_RADIO;
206 sprintf(v->bus_info,"PCI"); 208 v->rangelow = FREQ_LO;
207 v->version = RADIO_VERSION; 209 v->rangehigh = FREQ_HI;
208 v->capabilities = V4L2_CAP_TUNER; 210 v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
209 211 v->capability = V4L2_TUNER_CAP_LOW;
210 return 0; 212 if(card->stereo)
211 } 213 v->audmode = V4L2_TUNER_MODE_STEREO;
212 case VIDIOC_G_TUNER: 214 else
213 { 215 v->audmode = V4L2_TUNER_MODE_MONO;
214 struct v4l2_tuner *v = arg; 216 v->signal = card->tuned;
215 217 return 0;
216 if (v->index > 0) 218}
217 return -EINVAL;
218
219 (void)radio_bits_get(card);
220 219
221 memset(v,0,sizeof(*v)); 220static int vidioc_s_tuner(struct file *file, void *priv,
222 strcpy(v->name, "FM"); 221 struct v4l2_tuner *v)
223 v->type = V4L2_TUNER_RADIO; 222{
223 if (v->index > 0)
224 return -EINVAL;
225 return 0;
226}
224 227
225 v->rangelow = FREQ_LO; 228static int vidioc_s_frequency(struct file *file, void *priv,
226 v->rangehigh = FREQ_HI; 229 struct v4l2_frequency *f)
227 v->rxsubchans =V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO; 230{
228 v->capability=V4L2_TUNER_CAP_LOW; 231 struct video_device *dev = video_devdata(file);
229 if(card->stereo) 232 struct radio_device *card = video_get_drvdata(dev);
230 v->audmode = V4L2_TUNER_MODE_STEREO;
231 else
232 v->audmode = V4L2_TUNER_MODE_MONO;
233 v->signal=card->tuned;
234 233
235 return 0; 234 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI)
236 } 235 return -EINVAL;
237 case VIDIOC_S_TUNER: 236 radio_bits_set(card, FREQ2BITS(f->frequency));
238 { 237 return 0;
239 struct v4l2_tuner *v = arg; 238}
240 239
241 if (v->index > 0) 240static int vidioc_g_frequency(struct file *file, void *priv,
242 return -EINVAL; 241 struct v4l2_frequency *f)
242{
243 struct video_device *dev = video_devdata(file);
244 struct radio_device *card = video_get_drvdata(dev);
243 245
244 return 0; 246 f->type = V4L2_TUNER_RADIO;
245 } 247 f->frequency = BITS2FREQ(radio_bits_get(card));
246 case VIDIOC_S_FREQUENCY: 248 return 0;
247 { 249}
248 struct v4l2_frequency *f = arg;
249 250
250 if (f->frequency < FREQ_LO || f->frequency > FREQ_HI) 251static int vidioc_queryctrl(struct file *file, void *priv,
251 return -EINVAL; 252 struct v4l2_queryctrl *qc)
252 radio_bits_set(card, FREQ2BITS(f->frequency)); 253{
254 int i;
253 255
256 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
257 if (qc->id && qc->id == radio_qctrl[i].id) {
258 memcpy(qc, &(radio_qctrl[i]),
259 sizeof(*qc));
254 return 0; 260 return 0;
255 } 261 }
256 case VIDIOC_G_FREQUENCY: 262 }
257 { 263 return -EINVAL;
258 struct v4l2_frequency *f = arg; 264}
259 265
260 f->type = V4L2_TUNER_RADIO; 266static int vidioc_g_ctrl(struct file *file, void *priv,
261 f->frequency = BITS2FREQ(radio_bits_get(card)); 267 struct v4l2_control *ctrl)
268{
269 struct video_device *dev = video_devdata(file);
270 struct radio_device *card = video_get_drvdata(dev);
262 271
263 return 0; 272 switch (ctrl->id) {
264 } 273 case V4L2_CID_AUDIO_MUTE:
265 case VIDIOC_QUERYCTRL: 274 ctrl->value = card->muted;
266 { 275 return 0;
267 struct v4l2_queryctrl *qc = arg;
268 int i;
269
270 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
271 if (qc->id && qc->id == radio_qctrl[i].id) {
272 memcpy(qc, &(radio_qctrl[i]),
273 sizeof(*qc));
274 return (0);
275 }
276 }
277 return -EINVAL;
278 }
279 case VIDIOC_G_CTRL:
280 {
281 struct v4l2_control *ctrl= arg;
282
283 switch (ctrl->id) {
284 case V4L2_CID_AUDIO_MUTE:
285 ctrl->value=card->muted;
286 return (0);
287 }
288 return -EINVAL;
289 }
290 case VIDIOC_S_CTRL:
291 {
292 struct v4l2_control *ctrl= arg;
293
294 switch (ctrl->id) {
295 case V4L2_CID_AUDIO_MUTE:
296 {
297 register u16 io = card->io;
298 register u16 omask = inw(io + IO_MASK);
299 outw(~STR_WREN, io + IO_MASK);
300 outw((card->muted = ctrl->value ) ?
301 STR_WREN : 0, io);
302 udelay(4);
303 outw(omask, io + IO_MASK);
304 msleep(125);
305
306 return (0);
307 }
308 }
309 return -EINVAL;
310 }
311 default:
312 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
313 radio_function);
314 } 276 }
277 return -EINVAL;
315} 278}
316 279
317static int radio_ioctl(struct inode *inode, struct file *file, 280static int vidioc_s_ctrl(struct file *file, void *priv,
318 unsigned int cmd, unsigned long arg) 281 struct v4l2_control *ctrl)
319{ 282{
320 struct video_device *dev = video_devdata(file); 283 struct video_device *dev = video_devdata(file);
321 struct radio_device *card = video_get_drvdata(dev); 284 struct radio_device *card = video_get_drvdata(dev);
322 int ret; 285 register u16 io = card->io;
286 register u16 omask = inw(io + IO_MASK);
287
288 switch (ctrl->id) {
289 case V4L2_CID_AUDIO_MUTE:
290 outw(~STR_WREN, io + IO_MASK);
291 outw((card->muted = ctrl->value ) ?
292 STR_WREN : 0, io);
293 udelay(4);
294 outw(omask, io + IO_MASK);
295 msleep(125);
296 return 0;
297 }
298 return -EINVAL;
299}
323 300
324 mutex_lock(&card->lock); 301static int vidioc_g_audio(struct file *file, void *priv,
325 ret = video_usercopy(inode, file, cmd, arg, radio_function); 302 struct v4l2_audio *a)
326 mutex_unlock(&card->lock); 303{
304 if (a->index > 1)
305 return -EINVAL;
327 306
328 return ret; 307 strcpy(a->name, "Radio");
308 a->capability = V4L2_AUDCAP_STEREO;
309 return 0;
310}
311
312static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
313{
314 *i = 0;
315 return 0;
316}
317
318static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
319{
320 if (i != 0)
321 return -EINVAL;
322 return 0;
323}
324
325static int vidioc_s_audio(struct file *file, void *priv,
326 struct v4l2_audio *a)
327{
328 if (a->index != 0)
329 return -EINVAL;
330 return 0;
329} 331}
330 332
331static u16 __devinit radio_power_on(struct radio_device *dev) 333static u16 __devinit radio_power_on(struct radio_device *dev)
@@ -352,6 +354,24 @@ static u16 __devinit radio_power_on(struct radio_device *dev)
352 return (ofreq == radio_bits_get(dev)); 354 return (ofreq == radio_bits_get(dev));
353} 355}
354 356
357static struct video_device maestro_radio = {
358 .name = "Maestro radio",
359 .type = VID_TYPE_TUNER,
360 .fops = &maestro_fops,
361 .vidioc_querycap = vidioc_querycap,
362 .vidioc_g_tuner = vidioc_g_tuner,
363 .vidioc_s_tuner = vidioc_s_tuner,
364 .vidioc_g_audio = vidioc_g_audio,
365 .vidioc_s_audio = vidioc_s_audio,
366 .vidioc_g_input = vidioc_g_input,
367 .vidioc_s_input = vidioc_s_input,
368 .vidioc_g_frequency = vidioc_g_frequency,
369 .vidioc_s_frequency = vidioc_s_frequency,
370 .vidioc_queryctrl = vidioc_queryctrl,
371 .vidioc_g_ctrl = vidioc_g_ctrl,
372 .vidioc_s_ctrl = vidioc_s_ctrl,
373};
374
355static int __devinit maestro_probe(struct pci_dev *pdev, 375static int __devinit maestro_probe(struct pci_dev *pdev,
356 const struct pci_device_id *ent) 376 const struct pci_device_id *ent)
357{ 377{
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index f6683872251e..9b493b3298cd 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -122,6 +122,26 @@ static int rt_setfreq(struct rt_device *dev, unsigned long freq)
122 return 0; 122 return 0;
123} 123}
124 124
125static int vidioc_querycap(struct file *file, void *priv,
126 struct v4l2_capability *v)
127{
128 strlcpy(v->driver, "radio-rtrack2", sizeof(v->driver));
129 strlcpy(v->card, "RadioTrack II", sizeof(v->card));
130 sprintf(v->bus_info, "ISA");
131 v->version = RADIO_VERSION;
132 v->capabilities = V4L2_CAP_TUNER;
133 return 0;
134}
135
136static int vidioc_s_tuner(struct file *file, void *priv,
137 struct v4l2_tuner *v)
138{
139 if (v->index > 0)
140 return -EINVAL;
141
142 return 0;
143}
144
125static int rt_getsigstr(struct rt_device *dev) 145static int rt_getsigstr(struct rt_device *dev)
126{ 146{
127 if (inb(io) & 2) /* bit set = no signal present */ 147 if (inb(io) & 2) /* bit set = no signal present */
@@ -129,135 +149,136 @@ static int rt_getsigstr(struct rt_device *dev)
129 return 1; /* signal present */ 149 return 1; /* signal present */
130} 150}
131 151
132static int rt_do_ioctl(struct inode *inode, struct file *file, 152static int vidioc_g_tuner(struct file *file, void *priv,
133 unsigned int cmd, void *arg) 153 struct v4l2_tuner *v)
134{ 154{
135 struct video_device *dev = video_devdata(file); 155 struct video_device *dev = video_devdata(file);
136 struct rt_device *rt=dev->priv; 156 struct rt_device *rt = dev->priv;
137 157
138 switch(cmd) 158 if (v->index > 0)
139 { 159 return -EINVAL;
140 case VIDIOC_QUERYCAP:
141 {
142 struct v4l2_capability *v = arg;
143 memset(v,0,sizeof(*v));
144 strlcpy(v->driver, "radio-rtrack2", sizeof (v->driver));
145 strlcpy(v->card, "RadioTrack II", sizeof (v->card));
146 sprintf(v->bus_info,"ISA");
147 v->version = RADIO_VERSION;
148 v->capabilities = V4L2_CAP_TUNER;
149 160
150 return 0; 161 strcpy(v->name, "FM");
151 } 162 v->type = V4L2_TUNER_RADIO;
152 case VIDIOC_G_TUNER: 163 v->rangelow = (88*16000);
153 { 164 v->rangehigh = (108*16000);
154 struct v4l2_tuner *v = arg; 165 v->rxsubchans = V4L2_TUNER_SUB_MONO;
166 v->capability = V4L2_TUNER_CAP_LOW;
167 v->audmode = V4L2_TUNER_MODE_MONO;
168 v->signal = 0xFFFF*rt_getsigstr(rt);
169 return 0;
170}
155 171
156 if (v->index > 0) 172static int vidioc_s_frequency(struct file *file, void *priv,
157 return -EINVAL; 173 struct v4l2_frequency *f)
174{
175 struct video_device *dev = video_devdata(file);
176 struct rt_device *rt = dev->priv;
158 177
159 memset(v,0,sizeof(*v)); 178 rt->curfreq = f->frequency;
160 strcpy(v->name, "FM"); 179 rt_setfreq(rt, rt->curfreq);
161 v->type = V4L2_TUNER_RADIO; 180 return 0;
181}
162 182
163 v->rangelow=(88*16000); 183static int vidioc_g_frequency(struct file *file, void *priv,
164 v->rangehigh=(108*16000); 184 struct v4l2_frequency *f)
165 v->rxsubchans =V4L2_TUNER_SUB_MONO; 185{
166 v->capability=V4L2_TUNER_CAP_LOW; 186 struct video_device *dev = video_devdata(file);
167 v->audmode = V4L2_TUNER_MODE_MONO; 187 struct rt_device *rt = dev->priv;
168 v->signal=0xFFFF*rt_getsigstr(rt);
169 188
170 return 0; 189 f->type = V4L2_TUNER_RADIO;
171 } 190 f->frequency = rt->curfreq;
172 case VIDIOC_S_TUNER: 191 return 0;
173 { 192}
174 struct v4l2_tuner *v = arg;
175 193
176 if (v->index > 0) 194static int vidioc_queryctrl(struct file *file, void *priv,
177 return -EINVAL; 195 struct v4l2_queryctrl *qc)
196{
197 int i;
178 198
199 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
200 if (qc->id && qc->id == radio_qctrl[i].id) {
201 memcpy(qc, &(radio_qctrl[i]),
202 sizeof(*qc));
179 return 0; 203 return 0;
180 } 204 }
181 case VIDIOC_S_FREQUENCY: 205 }
182 { 206 return -EINVAL;
183 struct v4l2_frequency *f = arg; 207}
184 208
185 rt->curfreq = f->frequency; 209static int vidioc_g_ctrl(struct file *file, void *priv,
186 rt_setfreq(rt, rt->curfreq); 210 struct v4l2_control *ctrl)
187 return 0; 211{
188 } 212 struct video_device *dev = video_devdata(file);
189 case VIDIOC_G_FREQUENCY: 213 struct rt_device *rt = dev->priv;
190 {
191 struct v4l2_frequency *f = arg;
192 214
193 f->type = V4L2_TUNER_RADIO; 215 switch (ctrl->id) {
194 f->frequency = rt->curfreq; 216 case V4L2_CID_AUDIO_MUTE:
217 ctrl->value = rt->muted;
218 return 0;
219 case V4L2_CID_AUDIO_VOLUME:
220 if (rt->muted)
221 ctrl->value = 0;
222 else
223 ctrl->value = 65535;
224 return 0;
225 }
226 return -EINVAL;
227}
195 228
196 return 0; 229static int vidioc_s_ctrl(struct file *file, void *priv,
197 } 230 struct v4l2_control *ctrl)
198 case VIDIOC_QUERYCTRL: 231{
199 { 232 struct video_device *dev = video_devdata(file);
200 struct v4l2_queryctrl *qc = arg; 233 struct rt_device *rt = dev->priv;
201 int i; 234
202 235 switch (ctrl->id) {
203 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 236 case V4L2_CID_AUDIO_MUTE:
204 if (qc->id && qc->id == radio_qctrl[i].id) { 237 if (ctrl->value)
205 memcpy(qc, &(radio_qctrl[i]), 238 rt_mute(rt);
206 sizeof(*qc)); 239 else
207 return (0); 240 rt_unmute(rt);
208 } 241 return 0;
209 } 242 case V4L2_CID_AUDIO_VOLUME:
210 return -EINVAL; 243 if (ctrl->value)
211 } 244 rt_unmute(rt);
212 case VIDIOC_G_CTRL: 245 else
213 { 246 rt_mute(rt);
214 struct v4l2_control *ctrl= arg; 247 return 0;
215
216 switch (ctrl->id) {
217 case V4L2_CID_AUDIO_MUTE:
218 ctrl->value=rt->muted;
219 return (0);
220 case V4L2_CID_AUDIO_VOLUME:
221 if (rt->muted)
222 ctrl->value=0;
223 else
224 ctrl->value=65535;
225 return (0);
226 }
227 return -EINVAL;
228 }
229 case VIDIOC_S_CTRL:
230 {
231 struct v4l2_control *ctrl= arg;
232
233 switch (ctrl->id) {
234 case V4L2_CID_AUDIO_MUTE:
235 if (ctrl->value) {
236 rt_mute(rt);
237 } else {
238 rt_unmute(rt);
239 }
240 return (0);
241 case V4L2_CID_AUDIO_VOLUME:
242 if (ctrl->value) {
243 rt_unmute(rt);
244 } else {
245 rt_mute(rt);
246 }
247 return (0);
248 }
249 return -EINVAL;
250 }
251 default:
252 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
253 rt_do_ioctl);
254 } 248 }
249 return -EINVAL;
255} 250}
256 251
257static int rt_ioctl(struct inode *inode, struct file *file, 252static int vidioc_g_audio(struct file *file, void *priv,
258 unsigned int cmd, unsigned long arg) 253 struct v4l2_audio *a)
259{ 254{
260 return video_usercopy(inode, file, cmd, arg, rt_do_ioctl); 255 if (a->index > 1)
256 return -EINVAL;
257
258 strcpy(a->name, "Radio");
259 a->capability = V4L2_AUDCAP_STEREO;
260 return 0;
261}
262
263static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
264{
265 *i = 0;
266 return 0;
267}
268
269static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
270{
271 if (i != 0)
272 return -EINVAL;
273 return 0;
274}
275
276static int vidioc_s_audio(struct file *file, void *priv,
277 struct v4l2_audio *a)
278{
279 if (a->index != 0)
280 return -EINVAL;
281 return 0;
261} 282}
262 283
263static struct rt_device rtrack2_unit; 284static struct rt_device rtrack2_unit;
@@ -266,7 +287,7 @@ static const struct file_operations rtrack2_fops = {
266 .owner = THIS_MODULE, 287 .owner = THIS_MODULE,
267 .open = video_exclusive_open, 288 .open = video_exclusive_open,
268 .release = video_exclusive_release, 289 .release = video_exclusive_release,
269 .ioctl = rt_ioctl, 290 .ioctl = video_ioctl2,
270 .compat_ioctl = v4l_compat_ioctl32, 291 .compat_ioctl = v4l_compat_ioctl32,
271 .llseek = no_llseek, 292 .llseek = no_llseek,
272}; 293};
@@ -278,6 +299,18 @@ static struct video_device rtrack2_radio=
278 .type = VID_TYPE_TUNER, 299 .type = VID_TYPE_TUNER,
279 .hardware = 0, 300 .hardware = 0,
280 .fops = &rtrack2_fops, 301 .fops = &rtrack2_fops,
302 .vidioc_querycap = vidioc_querycap,
303 .vidioc_g_tuner = vidioc_g_tuner,
304 .vidioc_s_tuner = vidioc_s_tuner,
305 .vidioc_g_frequency = vidioc_g_frequency,
306 .vidioc_s_frequency = vidioc_s_frequency,
307 .vidioc_queryctrl = vidioc_queryctrl,
308 .vidioc_g_ctrl = vidioc_g_ctrl,
309 .vidioc_s_ctrl = vidioc_s_ctrl,
310 .vidioc_g_audio = vidioc_g_audio,
311 .vidioc_s_audio = vidioc_s_audio,
312 .vidioc_g_input = vidioc_g_input,
313 .vidioc_s_input = vidioc_s_input,
281}; 314};
282 315
283static int __init rtrack2_init(void) 316static int __init rtrack2_init(void)
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index f4619e4dda4f..dc33f19c0e2c 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -130,137 +130,155 @@ static inline int fmi_getsigstr(struct fmi_device *dev)
130 return (res & 2) ? 0 : 0xFFFF; 130 return (res & 2) ? 0 : 0xFFFF;
131} 131}
132 132
133static int fmi_do_ioctl(struct inode *inode, struct file *file, 133static int vidioc_querycap(struct file *file, void *priv,
134 unsigned int cmd, void *arg) 134 struct v4l2_capability *v)
135{ 135{
136 strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
137 strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
138 sprintf(v->bus_info, "ISA");
139 v->version = RADIO_VERSION;
140 v->capabilities = V4L2_CAP_TUNER;
141 return 0;
142}
143
144static int vidioc_g_tuner(struct file *file, void *priv,
145 struct v4l2_tuner *v)
146{
147 int mult;
136 struct video_device *dev = video_devdata(file); 148 struct video_device *dev = video_devdata(file);
137 struct fmi_device *fmi=dev->priv; 149 struct fmi_device *fmi = dev->priv;
138 150
139 switch(cmd) 151 if (v->index > 0)
140 { 152 return -EINVAL;
141 case VIDIOC_QUERYCAP:
142 {
143 struct v4l2_capability *v = arg;
144 memset(v,0,sizeof(*v));
145 strlcpy(v->driver, "radio-sf16fmi", sizeof (v->driver));
146 strlcpy(v->card, "SF16-FMx radio", sizeof (v->card));
147 sprintf(v->bus_info,"ISA");
148 v->version = RADIO_VERSION;
149 v->capabilities = V4L2_CAP_TUNER;
150 153
151 return 0; 154 strcpy(v->name, "FM");
152 } 155 v->type = V4L2_TUNER_RADIO;
153 case VIDIOC_G_TUNER: 156 mult = (fmi->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
154 { 157 v->rangelow = RSF16_MINFREQ/mult;
155 struct v4l2_tuner *v = arg; 158 v->rangehigh = RSF16_MAXFREQ/mult;
156 int mult; 159 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO;
157 160 v->capability = fmi->flags&V4L2_TUNER_CAP_LOW;
158 if (v->index > 0) 161 v->audmode = V4L2_TUNER_MODE_STEREO;
159 return -EINVAL; 162 v->signal = fmi_getsigstr(fmi);
160 163 return 0;
161 memset(v,0,sizeof(*v)); 164}
162 strcpy(v->name, "FM");
163 v->type = V4L2_TUNER_RADIO;
164
165 mult = (fmi->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
166 v->rangelow = RSF16_MINFREQ/mult;
167 v->rangehigh = RSF16_MAXFREQ/mult;
168 v->rxsubchans =V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO;
169 v->capability=fmi->flags&V4L2_TUNER_CAP_LOW;
170 v->audmode = V4L2_TUNER_MODE_STEREO;
171 v->signal = fmi_getsigstr(fmi);
172 165
173 return 0; 166static int vidioc_s_tuner(struct file *file, void *priv,
174 } 167 struct v4l2_tuner *v)
175 case VIDIOC_S_TUNER: 168{
176 { 169 if (v->index > 0)
177 struct v4l2_tuner *v = arg; 170 return -EINVAL;
171 return 0;
172}
178 173
179 if (v->index > 0) 174static int vidioc_s_frequency(struct file *file, void *priv,
180 return -EINVAL; 175 struct v4l2_frequency *f)
176{
177 struct video_device *dev = video_devdata(file);
178 struct fmi_device *fmi = dev->priv;
181 179
182 return 0; 180 if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
183 } 181 f->frequency *= 1000;
184 case VIDIOC_S_FREQUENCY: 182 if (f->frequency < RSF16_MINFREQ ||
185 { 183 f->frequency > RSF16_MAXFREQ )
186 struct v4l2_frequency *f = arg; 184 return -EINVAL;
187 185 /*rounding in steps of 800 to match th freq
188 if (!(fmi->flags & V4L2_TUNER_CAP_LOW)) 186 that will be used */
189 f->frequency *= 1000; 187 fmi->curfreq = (f->frequency/800)*800;
190 if (f->frequency < RSF16_MINFREQ || 188 fmi_setfreq(fmi);
191 f->frequency > RSF16_MAXFREQ ) 189 return 0;
192 return -EINVAL; 190}
193 /*rounding in steps of 800 to match th freq
194 that will be used */
195 fmi->curfreq = (f->frequency/800)*800;
196 fmi_setfreq(fmi);
197 191
198 return 0; 192static int vidioc_g_frequency(struct file *file, void *priv,
199 } 193 struct v4l2_frequency *f)
200 case VIDIOC_G_FREQUENCY: 194{
201 { 195 struct video_device *dev = video_devdata(file);
202 struct v4l2_frequency *f = arg; 196 struct fmi_device *fmi = dev->priv;
203 197
204 f->type = V4L2_TUNER_RADIO; 198 f->type = V4L2_TUNER_RADIO;
205 f->frequency = fmi->curfreq; 199 f->frequency = fmi->curfreq;
206 if (!(fmi->flags & V4L2_TUNER_CAP_LOW)) 200 if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
207 f->frequency /= 1000; 201 f->frequency /= 1000;
202 return 0;
203}
204
205static int vidioc_queryctrl(struct file *file, void *priv,
206 struct v4l2_queryctrl *qc)
207{
208 int i;
208 209
210 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
211 if (qc->id && qc->id == radio_qctrl[i].id) {
212 memcpy(qc, &(radio_qctrl[i]),
213 sizeof(*qc));
209 return 0; 214 return 0;
210 } 215 }
211 case VIDIOC_QUERYCTRL:
212 {
213 struct v4l2_queryctrl *qc = arg;
214 int i;
215
216 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
217 if (qc->id && qc->id == radio_qctrl[i].id) {
218 memcpy(qc, &(radio_qctrl[i]),
219 sizeof(*qc));
220 return (0);
221 }
222 }
223 return -EINVAL;
224 }
225 case VIDIOC_G_CTRL:
226 {
227 struct v4l2_control *ctrl= arg;
228
229 switch (ctrl->id) {
230 case V4L2_CID_AUDIO_MUTE:
231 ctrl->value=fmi->curvol;
232 return (0);
233 }
234 return -EINVAL;
235 }
236 case VIDIOC_S_CTRL:
237 {
238 struct v4l2_control *ctrl= arg;
239
240 switch (ctrl->id) {
241 case V4L2_CID_AUDIO_MUTE:
242 {
243 if (ctrl->value)
244 fmi_mute(fmi->port);
245 else
246 fmi_unmute(fmi->port);
247
248 fmi->curvol=ctrl->value;
249 return (0);
250 }
251 }
252 return -EINVAL;
253 }
254 default:
255 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
256 fmi_do_ioctl);
257 } 216 }
217 return -EINVAL;
218}
219
220static int vidioc_g_ctrl(struct file *file, void *priv,
221 struct v4l2_control *ctrl)
222{
223 struct video_device *dev = video_devdata(file);
224 struct fmi_device *fmi = dev->priv;
225
226 switch (ctrl->id) {
227 case V4L2_CID_AUDIO_MUTE:
228 ctrl->value = fmi->curvol;
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static int vidioc_s_ctrl(struct file *file, void *priv,
235 struct v4l2_control *ctrl)
236{
237 struct video_device *dev = video_devdata(file);
238 struct fmi_device *fmi = dev->priv;
239
240 switch (ctrl->id) {
241 case V4L2_CID_AUDIO_MUTE:
242 if (ctrl->value)
243 fmi_mute(fmi->port);
244 else
245 fmi_unmute(fmi->port);
246 fmi->curvol = ctrl->value;
247 return 0;
248 }
249 return -EINVAL;
258} 250}
259 251
260static int fmi_ioctl(struct inode *inode, struct file *file, 252static int vidioc_g_audio(struct file *file, void *priv,
261 unsigned int cmd, unsigned long arg) 253 struct v4l2_audio *a)
262{ 254{
263 return video_usercopy(inode, file, cmd, arg, fmi_do_ioctl); 255 if (a->index > 1)
256 return -EINVAL;
257
258 strcpy(a->name, "Radio");
259 a->capability = V4L2_AUDCAP_STEREO;
260 return 0;
261}
262
263static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
264{
265 *i = 0;
266 return 0;
267}
268
269static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
270{
271 if (i != 0)
272 return -EINVAL;
273 return 0;
274}
275
276static int vidioc_s_audio(struct file *file, void *priv,
277 struct v4l2_audio *a)
278{
279 if (a->index != 0)
280 return -EINVAL;
281 return 0;
264} 282}
265 283
266static struct fmi_device fmi_unit; 284static struct fmi_device fmi_unit;
@@ -269,7 +287,7 @@ static const struct file_operations fmi_fops = {
269 .owner = THIS_MODULE, 287 .owner = THIS_MODULE,
270 .open = video_exclusive_open, 288 .open = video_exclusive_open,
271 .release = video_exclusive_release, 289 .release = video_exclusive_release,
272 .ioctl = fmi_ioctl, 290 .ioctl = video_ioctl2,
273 .compat_ioctl = v4l_compat_ioctl32, 291 .compat_ioctl = v4l_compat_ioctl32,
274 .llseek = no_llseek, 292 .llseek = no_llseek,
275}; 293};
@@ -281,6 +299,18 @@ static struct video_device fmi_radio=
281 .type = VID_TYPE_TUNER, 299 .type = VID_TYPE_TUNER,
282 .hardware = 0, 300 .hardware = 0,
283 .fops = &fmi_fops, 301 .fops = &fmi_fops,
302 .vidioc_querycap = vidioc_querycap,
303 .vidioc_g_tuner = vidioc_g_tuner,
304 .vidioc_s_tuner = vidioc_s_tuner,
305 .vidioc_g_audio = vidioc_g_audio,
306 .vidioc_s_audio = vidioc_s_audio,
307 .vidioc_g_input = vidioc_g_input,
308 .vidioc_s_input = vidioc_s_input,
309 .vidioc_g_frequency = vidioc_g_frequency,
310 .vidioc_s_frequency = vidioc_s_frequency,
311 .vidioc_queryctrl = vidioc_queryctrl,
312 .vidioc_g_ctrl = vidioc_g_ctrl,
313 .vidioc_s_ctrl = vidioc_s_ctrl,
284}; 314};
285 315
286/* ladis: this is my card. does any other types exist? */ 316/* ladis: this is my card. does any other types exist? */
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index b96fafe1f9da..e6c125def5cb 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -226,186 +226,204 @@ static int fmr2_setvolume(struct fmr2_device *dev)
226 return 0; 226 return 0;
227} 227}
228 228
229static int fmr2_do_ioctl(struct inode *inode, struct file *file, 229static int vidioc_querycap(struct file *file, void *priv,
230 unsigned int cmd, void *arg) 230 struct v4l2_capability *v)
231{ 231{
232 strlcpy(v->driver, "radio-sf16fmr2", sizeof(v->driver));
233 strlcpy(v->card, "SF16-FMR2 radio", sizeof(v->card));
234 sprintf(v->bus_info, "ISA");
235 v->version = RADIO_VERSION;
236 v->capabilities = V4L2_CAP_TUNER;
237 return 0;
238}
239
240static int vidioc_g_tuner(struct file *file, void *priv,
241 struct v4l2_tuner *v)
242{
243 int mult;
232 struct video_device *dev = video_devdata(file); 244 struct video_device *dev = video_devdata(file);
233 struct fmr2_device *fmr2 = dev->priv; 245 struct fmr2_device *fmr2 = dev->priv;
234 debug_print((KERN_DEBUG "freq %ld flags %d vol %d mute %d "
235 "stereo %d type %d\n",
236 fmr2->curfreq, fmr2->flags, fmr2->curvol, fmr2->mute,
237 fmr2->stereo, fmr2->card_type));
238 246
239 switch(cmd) 247 if (v->index > 0)
240 { 248 return -EINVAL;
241 case VIDIOC_QUERYCAP:
242 {
243 struct v4l2_capability *v = arg;
244 memset(v,0,sizeof(*v));
245 strlcpy(v->driver, "radio-sf16fmr2", sizeof (v->driver));
246 strlcpy(v->card, "SF16-FMR2 radio", sizeof (v->card));
247 sprintf(v->bus_info,"ISA");
248 v->version = RADIO_VERSION;
249 v->capabilities = V4L2_CAP_TUNER;
250 249
251 return 0; 250 strcpy(v->name, "FM");
252 } 251 v->type = V4L2_TUNER_RADIO;
253 case VIDIOC_G_TUNER:
254 {
255 struct v4l2_tuner *v = arg;
256 int mult;
257
258 if (v->index > 0)
259 return -EINVAL;
260
261 memset(v,0,sizeof(*v));
262 strcpy(v->name, "FM");
263 v->type = V4L2_TUNER_RADIO;
264
265 mult = (fmr2->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
266 v->rangelow = RSF16_MINFREQ/mult;
267 v->rangehigh = RSF16_MAXFREQ/mult;
268 v->rxsubchans =V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO;
269 v->capability=fmr2->flags&V4L2_TUNER_CAP_LOW;
270
271 v->audmode = fmr2->stereo ? V4L2_TUNER_MODE_STEREO:
272 V4L2_TUNER_MODE_MONO;
273 mutex_lock(&lock);
274 v->signal = fmr2_getsigstr(fmr2);
275 mutex_unlock(&lock);
276 252
277 return 0; 253 mult = (fmr2->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000;
278 } 254 v->rangelow = RSF16_MINFREQ/mult;
279 case VIDIOC_S_TUNER: 255 v->rangehigh = RSF16_MAXFREQ/mult;
280 { 256 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_MODE_STEREO;
281 struct v4l2_tuner *v = arg; 257 v->capability = fmr2->flags&V4L2_TUNER_CAP_LOW;
258 v->audmode = fmr2->stereo ? V4L2_TUNER_MODE_STEREO:
259 V4L2_TUNER_MODE_MONO;
260 mutex_lock(&lock);
261 v->signal = fmr2_getsigstr(fmr2);
262 mutex_unlock(&lock);
263 return 0;
264}
282 265
283 if (v->index > 0) 266static int vidioc_s_tuner(struct file *file, void *priv,
284 return -EINVAL; 267 struct v4l2_tuner *v)
268{
269 if (v->index > 0)
270 return -EINVAL;
271 return 0;
272}
285 273
286 return 0; 274static int vidioc_s_frequency(struct file *file, void *priv,
287 } 275 struct v4l2_frequency *f)
288 case VIDIOC_S_FREQUENCY: 276{
289 { 277 struct video_device *dev = video_devdata(file);
290 struct v4l2_frequency *f = arg; 278 struct fmr2_device *fmr2 = dev->priv;
291
292 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
293 f->frequency *= 1000;
294 if (f->frequency < RSF16_MINFREQ ||
295 f->frequency > RSF16_MAXFREQ )
296 return -EINVAL;
297 /*rounding in steps of 200 to match th freq
298 that will be used */
299 fmr2->curfreq = (f->frequency/200)*200;
300
301 /* set card freq (if not muted) */
302 if (fmr2->curvol && !fmr2->mute)
303 {
304 mutex_lock(&lock);
305 fmr2_setfreq(fmr2);
306 mutex_unlock(&lock);
307 }
308 279
309 return 0; 280 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
310 } 281 f->frequency *= 1000;
311 case VIDIOC_G_FREQUENCY: 282 if (f->frequency < RSF16_MINFREQ ||
312 { 283 f->frequency > RSF16_MAXFREQ )
313 struct v4l2_frequency *f = arg; 284 return -EINVAL;
285 /*rounding in steps of 200 to match th freq
286 that will be used */
287 fmr2->curfreq = (f->frequency/200)*200;
288
289 /* set card freq (if not muted) */
290 if (fmr2->curvol && !fmr2->mute) {
291 mutex_lock(&lock);
292 fmr2_setfreq(fmr2);
293 mutex_unlock(&lock);
294 }
295 return 0;
296}
297
298static int vidioc_g_frequency(struct file *file, void *priv,
299 struct v4l2_frequency *f)
300{
301 struct video_device *dev = video_devdata(file);
302 struct fmr2_device *fmr2 = dev->priv;
314 303
315 f->type = V4L2_TUNER_RADIO; 304 f->type = V4L2_TUNER_RADIO;
316 f->frequency = fmr2->curfreq; 305 f->frequency = fmr2->curfreq;
317 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW)) 306 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
318 f->frequency /= 1000; 307 f->frequency /= 1000;
308 return 0;
309}
319 310
311static int vidioc_queryctrl(struct file *file, void *priv,
312 struct v4l2_queryctrl *qc)
313{
314 int i;
315 struct video_device *dev = video_devdata(file);
316 struct fmr2_device *fmr2 = dev->priv;
317
318 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
319 if ((fmr2->card_type != 11)
320 && V4L2_CID_AUDIO_VOLUME)
321 radio_qctrl[i].step = 65535;
322 if (qc->id && qc->id == radio_qctrl[i].id) {
323 memcpy(qc, &(radio_qctrl[i]),
324 sizeof(*qc));
320 return 0; 325 return 0;
321 } 326 }
322 case VIDIOC_QUERYCTRL: 327 }
323 { 328 return -EINVAL;
324 struct v4l2_queryctrl *qc = arg; 329}
325 int i; 330
326 331static int vidioc_g_ctrl(struct file *file, void *priv,
327 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 332 struct v4l2_control *ctrl)
328 if ((fmr2->card_type != 11) 333{
329 && V4L2_CID_AUDIO_VOLUME) 334 struct video_device *dev = video_devdata(file);
330 radio_qctrl[i].step=65535; 335 struct fmr2_device *fmr2 = dev->priv;
331 if (qc->id && qc->id == radio_qctrl[i].id) { 336
332 memcpy(qc, &(radio_qctrl[i]), 337 switch (ctrl->id) {
333 sizeof(*qc)); 338 case V4L2_CID_AUDIO_MUTE:
334 return (0); 339 ctrl->value = fmr2->mute;
335 } 340 return 0;
336 } 341 case V4L2_CID_AUDIO_VOLUME:
337 return -EINVAL; 342 ctrl->value = fmr2->curvol;
343 return 0;
344 }
345 return -EINVAL;
346}
347
348static int vidioc_s_ctrl(struct file *file, void *priv,
349 struct v4l2_control *ctrl)
350{
351 struct video_device *dev = video_devdata(file);
352 struct fmr2_device *fmr2 = dev->priv;
353
354 switch (ctrl->id) {
355 case V4L2_CID_AUDIO_MUTE:
356 fmr2->mute = ctrl->value;
357 if (fmr2->card_type != 11) {
358 if (!fmr2->mute)
359 fmr2->curvol = 65535;
360 else
361 fmr2->curvol = 0;
338 } 362 }
339 case VIDIOC_G_CTRL: 363 break;
340 { 364 case V4L2_CID_AUDIO_VOLUME:
341 struct v4l2_control *ctrl= arg; 365 fmr2->curvol = ctrl->value;
342 366 if (fmr2->card_type != 11) {
343 switch (ctrl->id) { 367 if (fmr2->curvol) {
344 case V4L2_CID_AUDIO_MUTE: 368 fmr2->curvol = 65535;
345 ctrl->value=fmr2->mute; 369 fmr2->mute = 0;
346 return (0); 370 } else {
347 case V4L2_CID_AUDIO_VOLUME: 371 fmr2->curvol = 0;
348 ctrl->value=fmr2->curvol; 372 fmr2->mute = 1;
349 return (0);
350 } 373 }
351 return -EINVAL;
352 } 374 }
353 case VIDIOC_S_CTRL: 375 break;
354 { 376 default:
355 struct v4l2_control *ctrl= arg; 377 return -EINVAL;
356 378 }
357 switch (ctrl->id) { 379
358 case V4L2_CID_AUDIO_MUTE:
359 fmr2->mute=ctrl->value;
360 if (fmr2->card_type != 11) {
361 if (!fmr2->mute) {
362 fmr2->curvol = 65535;
363 } else {
364 fmr2->curvol = 0;
365 }
366 }
367 break;
368 case V4L2_CID_AUDIO_VOLUME:
369 fmr2->curvol = ctrl->value;
370 if (fmr2->card_type != 11) {
371 if (fmr2->curvol) {
372 fmr2->curvol = 65535;
373 fmr2->mute = 0;
374 } else {
375 fmr2->curvol = 0;
376 fmr2->mute = 1;
377 }
378 }
379 break;
380 default:
381 return -EINVAL;
382 }
383#ifdef DEBUG 380#ifdef DEBUG
384 if (fmr2->curvol && !fmr2->mute) 381 if (fmr2->curvol && !fmr2->mute)
385 printk(KERN_DEBUG "unmute\n"); 382 printk(KERN_DEBUG "unmute\n");
386 else 383 else
387 printk(KERN_DEBUG "mute\n"); 384 printk(KERN_DEBUG "mute\n");
388#endif 385#endif
389 mutex_lock(&lock);
390 if (fmr2->curvol && !fmr2->mute) {
391 fmr2_setvolume(fmr2);
392 fmr2_setfreq(fmr2);
393 } else
394 fmr2_mute(fmr2->port);
395 mutex_unlock(&lock);
396 return (0);
397 }
398 default:
399 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
400 fmr2_do_ioctl);
401 386
402 } 387 mutex_lock(&lock);
388 if (fmr2->curvol && !fmr2->mute) {
389 fmr2_setvolume(fmr2);
390 fmr2_setfreq(fmr2);
391 } else
392 fmr2_mute(fmr2->port);
393 mutex_unlock(&lock);
394 return 0;
403} 395}
404 396
405static int fmr2_ioctl(struct inode *inode, struct file *file, 397static int vidioc_g_audio(struct file *file, void *priv,
406 unsigned int cmd, unsigned long arg) 398 struct v4l2_audio *a)
407 { 399{
408 return video_usercopy(inode, file, cmd, arg, fmr2_do_ioctl); 400 if (a->index > 1)
401 return -EINVAL;
402
403 strcpy(a->name, "Radio");
404 a->capability = V4L2_AUDCAP_STEREO;
405 return 0;
406}
407
408static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
409{
410 *i = 0;
411 return 0;
412}
413
414static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
415{
416 if (i != 0)
417 return -EINVAL;
418 return 0;
419}
420
421static int vidioc_s_audio(struct file *file, void *priv,
422 struct v4l2_audio *a)
423{
424 if (a->index != 0)
425 return -EINVAL;
426 return 0;
409} 427}
410 428
411static struct fmr2_device fmr2_unit; 429static struct fmr2_device fmr2_unit;
@@ -414,7 +432,7 @@ static const struct file_operations fmr2_fops = {
414 .owner = THIS_MODULE, 432 .owner = THIS_MODULE,
415 .open = video_exclusive_open, 433 .open = video_exclusive_open,
416 .release = video_exclusive_release, 434 .release = video_exclusive_release,
417 .ioctl = fmr2_ioctl, 435 .ioctl = video_ioctl2,
418 .compat_ioctl = v4l_compat_ioctl32, 436 .compat_ioctl = v4l_compat_ioctl32,
419 .llseek = no_llseek, 437 .llseek = no_llseek,
420}; 438};
@@ -426,6 +444,18 @@ static struct video_device fmr2_radio=
426 . type = VID_TYPE_TUNER, 444 . type = VID_TYPE_TUNER,
427 .hardware = 0, 445 .hardware = 0,
428 .fops = &fmr2_fops, 446 .fops = &fmr2_fops,
447 .vidioc_querycap = vidioc_querycap,
448 .vidioc_g_tuner = vidioc_g_tuner,
449 .vidioc_s_tuner = vidioc_s_tuner,
450 .vidioc_g_audio = vidioc_g_audio,
451 .vidioc_s_audio = vidioc_s_audio,
452 .vidioc_g_input = vidioc_g_input,
453 .vidioc_s_input = vidioc_s_input,
454 .vidioc_g_frequency = vidioc_g_frequency,
455 .vidioc_s_frequency = vidioc_s_frequency,
456 .vidioc_queryctrl = vidioc_queryctrl,
457 .vidioc_g_ctrl = vidioc_g_ctrl,
458 .vidioc_s_ctrl = vidioc_s_ctrl,
429}; 459};
430 460
431static int __init fmr2_init(void) 461static int __init fmr2_init(void)
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index d59a27accb84..e43acfd7e533 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -205,135 +205,152 @@ static int tt_getsigstr(struct tt_device *dev) /* TODO */
205 return 1; /* signal present */ 205 return 1; /* signal present */
206} 206}
207 207
208static int vidioc_querycap(struct file *file, void *priv,
209 struct v4l2_capability *v)
210{
211 strlcpy(v->driver, "radio-terratec", sizeof(v->driver));
212 strlcpy(v->card, "ActiveRadio", sizeof(v->card));
213 sprintf(v->bus_info, "ISA");
214 v->version = RADIO_VERSION;
215 v->capabilities = V4L2_CAP_TUNER;
216 return 0;
217}
208 218
209/* implement the video4linux api */ 219static int vidioc_g_tuner(struct file *file, void *priv,
210 220 struct v4l2_tuner *v)
211static int tt_do_ioctl(struct inode *inode, struct file *file,
212 unsigned int cmd, void *arg)
213{ 221{
214 struct video_device *dev = video_devdata(file); 222 struct video_device *dev = video_devdata(file);
215 struct tt_device *tt=dev->priv; 223 struct tt_device *tt = dev->priv;
216 224
217 switch(cmd) 225 if (v->index > 0)
218 { 226 return -EINVAL;
219 case VIDIOC_QUERYCAP:
220 {
221 struct v4l2_capability *v = arg;
222 memset(v,0,sizeof(*v));
223 strlcpy(v->driver, "radio-terratec", sizeof (v->driver));
224 strlcpy(v->card, "ActiveRadio", sizeof (v->card));
225 sprintf(v->bus_info,"ISA");
226 v->version = RADIO_VERSION;
227 v->capabilities = V4L2_CAP_TUNER;
228 227
229 return 0; 228 strcpy(v->name, "FM");
230 } 229 v->type = V4L2_TUNER_RADIO;
231 case VIDIOC_G_TUNER: 230 v->rangelow = (87*16000);
232 { 231 v->rangehigh = (108*16000);
233 struct v4l2_tuner *v = arg; 232 v->rxsubchans = V4L2_TUNER_SUB_MONO;
233 v->capability = V4L2_TUNER_CAP_LOW;
234 v->audmode = V4L2_TUNER_MODE_MONO;
235 v->signal = 0xFFFF*tt_getsigstr(tt);
236 return 0;
237}
234 238
235 if (v->index > 0) 239static int vidioc_s_tuner(struct file *file, void *priv,
236 return -EINVAL; 240 struct v4l2_tuner *v)
241{
242 if (v->index > 0)
243 return -EINVAL;
244 return 0;
245}
237 246
238 memset(v,0,sizeof(*v)); 247static int vidioc_s_frequency(struct file *file, void *priv,
239 strcpy(v->name, "FM"); 248 struct v4l2_frequency *f)
240 v->type = V4L2_TUNER_RADIO; 249{
250 struct video_device *dev = video_devdata(file);
251 struct tt_device *tt = dev->priv;
241 252
242 v->rangelow=(87*16000); 253 tt->curfreq = f->frequency;
243 v->rangehigh=(108*16000); 254 tt_setfreq(tt, tt->curfreq);
244 v->rxsubchans =V4L2_TUNER_SUB_MONO; 255 return 0;
245 v->capability=V4L2_TUNER_CAP_LOW; 256}
246 v->audmode = V4L2_TUNER_MODE_MONO;
247 v->signal=0xFFFF*tt_getsigstr(tt);
248 257
249 return 0; 258static int vidioc_g_frequency(struct file *file, void *priv,
250 } 259 struct v4l2_frequency *f)
251 case VIDIOC_S_TUNER: 260{
252 { 261 struct video_device *dev = video_devdata(file);
253 struct v4l2_tuner *v = arg; 262 struct tt_device *tt = dev->priv;
254 263
255 if (v->index > 0) 264 f->type = V4L2_TUNER_RADIO;
256 return -EINVAL; 265 f->frequency = tt->curfreq;
266 return 0;
267}
257 268
258 return 0; 269static int vidioc_queryctrl(struct file *file, void *priv,
259 } 270 struct v4l2_queryctrl *qc)
260 case VIDIOC_S_FREQUENCY: 271{
261 { 272 int i;
262 struct v4l2_frequency *f = arg;
263 273
264 tt->curfreq = f->frequency; 274 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
265 tt_setfreq(tt, tt->curfreq); 275 if (qc->id && qc->id == radio_qctrl[i].id) {
276 memcpy(qc, &(radio_qctrl[i]),
277 sizeof(*qc));
266 return 0; 278 return 0;
267 } 279 }
268 case VIDIOC_G_FREQUENCY: 280 }
269 { 281 return -EINVAL;
270 struct v4l2_frequency *f = arg; 282}
271 283
272 f->type = V4L2_TUNER_RADIO; 284static int vidioc_g_ctrl(struct file *file, void *priv,
273 f->frequency = tt->curfreq; 285 struct v4l2_control *ctrl)
286{
287 struct video_device *dev = video_devdata(file);
288 struct tt_device *tt = dev->priv;
274 289
275 return 0; 290 switch (ctrl->id) {
276 } 291 case V4L2_CID_AUDIO_MUTE:
277 case VIDIOC_QUERYCTRL: 292 if (tt->muted)
278 { 293 ctrl->value = 1;
279 struct v4l2_queryctrl *qc = arg; 294 else
280 int i; 295 ctrl->value = 0;
281 296 return 0;
282 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 297 case V4L2_CID_AUDIO_VOLUME:
283 if (qc->id && qc->id == radio_qctrl[i].id) { 298 ctrl->value = tt->curvol * 6554;
284 memcpy(qc, &(radio_qctrl[i]), 299 return 0;
285 sizeof(*qc)); 300 }
286 return (0); 301 return -EINVAL;
287 } 302}
288 }
289 return -EINVAL;
290 }
291 case VIDIOC_G_CTRL:
292 {
293 struct v4l2_control *ctrl= arg;
294
295 switch (ctrl->id) {
296 case V4L2_CID_AUDIO_MUTE:
297 if (tt->muted)
298 ctrl->value=1;
299 else
300 ctrl->value=0;
301 return (0);
302 case V4L2_CID_AUDIO_VOLUME:
303 ctrl->value=tt->curvol * 6554;
304 return (0);
305 }
306 return -EINVAL;
307 }
308 case VIDIOC_S_CTRL:
309 {
310 struct v4l2_control *ctrl= arg;
311
312 switch (ctrl->id) {
313 case V4L2_CID_AUDIO_MUTE:
314 if (ctrl->value) {
315 tt_mute(tt);
316 } else {
317 tt_setvol(tt,tt->curvol);
318 }
319 return (0);
320 case V4L2_CID_AUDIO_VOLUME:
321 tt_setvol(tt,ctrl->value);
322 return (0);
323 }
324 return -EINVAL;
325 }
326 303
327 default: 304static int vidioc_s_ctrl(struct file *file, void *priv,
328 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 305 struct v4l2_control *ctrl)
329 tt_do_ioctl); 306{
307 struct video_device *dev = video_devdata(file);
308 struct tt_device *tt = dev->priv;
309
310 switch (ctrl->id) {
311 case V4L2_CID_AUDIO_MUTE:
312 if (ctrl->value)
313 tt_mute(tt);
314 else
315 tt_setvol(tt,tt->curvol);
316 return 0;
317 case V4L2_CID_AUDIO_VOLUME:
318 tt_setvol(tt,ctrl->value);
319 return 0;
330 } 320 }
321 return -EINVAL;
322}
323
324static int vidioc_g_audio(struct file *file, void *priv,
325 struct v4l2_audio *a)
326{
327 if (a->index > 1)
328 return -EINVAL;
329
330 strcpy(a->name, "Radio");
331 a->capability = V4L2_AUDCAP_STEREO;
332 return 0;
333}
334
335static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
336{
337 *i = 0;
338 return 0;
331} 339}
332 340
333static int tt_ioctl(struct inode *inode, struct file *file, 341static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
334 unsigned int cmd, unsigned long arg)
335{ 342{
336 return video_usercopy(inode, file, cmd, arg, tt_do_ioctl); 343 if (i != 0)
344 return -EINVAL;
345 return 0;
346}
347
348static int vidioc_s_audio(struct file *file, void *priv,
349 struct v4l2_audio *a)
350{
351 if (a->index != 0)
352 return -EINVAL;
353 return 0;
337} 354}
338 355
339static struct tt_device terratec_unit; 356static struct tt_device terratec_unit;
@@ -342,7 +359,7 @@ static const struct file_operations terratec_fops = {
342 .owner = THIS_MODULE, 359 .owner = THIS_MODULE,
343 .open = video_exclusive_open, 360 .open = video_exclusive_open,
344 .release = video_exclusive_release, 361 .release = video_exclusive_release,
345 .ioctl = tt_ioctl, 362 .ioctl = video_ioctl2,
346 .compat_ioctl = v4l_compat_ioctl32, 363 .compat_ioctl = v4l_compat_ioctl32,
347 .llseek = no_llseek, 364 .llseek = no_llseek,
348}; 365};
@@ -354,6 +371,18 @@ static struct video_device terratec_radio=
354 .type = VID_TYPE_TUNER, 371 .type = VID_TYPE_TUNER,
355 .hardware = 0, 372 .hardware = 0,
356 .fops = &terratec_fops, 373 .fops = &terratec_fops,
374 .vidioc_querycap = vidioc_querycap,
375 .vidioc_g_tuner = vidioc_g_tuner,
376 .vidioc_s_tuner = vidioc_s_tuner,
377 .vidioc_g_frequency = vidioc_g_frequency,
378 .vidioc_s_frequency = vidioc_s_frequency,
379 .vidioc_queryctrl = vidioc_queryctrl,
380 .vidioc_g_ctrl = vidioc_g_ctrl,
381 .vidioc_s_ctrl = vidioc_s_ctrl,
382 .vidioc_g_audio = vidioc_g_audio,
383 .vidioc_s_audio = vidioc_s_audio,
384 .vidioc_g_input = vidioc_g_input,
385 .vidioc_s_input = vidioc_s_input,
357}; 386};
358 387
359static int __init terratec_init(void) 388static int __init terratec_init(void)
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 6d7f1e7116ea..c27c629d99df 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -192,144 +192,154 @@ static void tr_setfreq(unsigned long f)
192 write_i2c(5, TSA6060T_ADDR, (f << 1) | 1, f >> 7, 0x60 | ((f >> 15) & 1), 0); 192 write_i2c(5, TSA6060T_ADDR, (f << 1) | 1, f >> 7, 0x60 | ((f >> 15) & 1), 0);
193} 193}
194 194
195static int tr_do_ioctl(struct inode *inode, struct file *file, 195static int vidioc_querycap(struct file *file, void *priv,
196 unsigned int cmd, void *arg) 196 struct v4l2_capability *v)
197{ 197{
198 switch(cmd) 198 strlcpy(v->driver, "radio-trust", sizeof(v->driver));
199 { 199 strlcpy(v->card, "Trust FM Radio", sizeof(v->card));
200 case VIDIOC_QUERYCAP: 200 sprintf(v->bus_info, "ISA");
201 { 201 v->version = RADIO_VERSION;
202 struct v4l2_capability *v = arg; 202 v->capabilities = V4L2_CAP_TUNER;
203 memset(v,0,sizeof(*v)); 203 return 0;
204 strlcpy(v->driver, "radio-trust", sizeof (v->driver)); 204}
205 strlcpy(v->card, "Trust FM Radio", sizeof (v->card));
206 sprintf(v->bus_info,"ISA");
207 v->version = RADIO_VERSION;
208 v->capabilities = V4L2_CAP_TUNER;
209 205
210 return 0; 206static int vidioc_g_tuner(struct file *file, void *priv,
211 } 207 struct v4l2_tuner *v)
212 case VIDIOC_G_TUNER: 208{
213 { 209 if (v->index > 0)
214 struct v4l2_tuner *v = arg; 210 return -EINVAL;
215
216 if (v->index > 0)
217 return -EINVAL;
218
219 memset(v,0,sizeof(*v));
220 strcpy(v->name, "FM");
221 v->type = V4L2_TUNER_RADIO;
222
223 v->rangelow=(87.5*16000);
224 v->rangehigh=(108*16000);
225 v->rxsubchans =V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
226 v->capability=V4L2_TUNER_CAP_LOW;
227 if(tr_getstereo())
228 v->audmode = V4L2_TUNER_MODE_STEREO;
229 else
230 v->audmode = V4L2_TUNER_MODE_MONO;
231 v->signal=tr_getsigstr();
232 211
233 return 0; 212 strcpy(v->name, "FM");
234 } 213 v->type = V4L2_TUNER_RADIO;
235 case VIDIOC_S_TUNER: 214 v->rangelow = (87.5*16000);
236 { 215 v->rangehigh = (108*16000);
237 struct v4l2_tuner *v = arg; 216 v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
217 v->capability = V4L2_TUNER_CAP_LOW;
218 if (tr_getstereo())
219 v->audmode = V4L2_TUNER_MODE_STEREO;
220 else
221 v->audmode = V4L2_TUNER_MODE_MONO;
222 v->signal = tr_getsigstr();
223 return 0;
224}
225
226static int vidioc_s_tuner(struct file *file, void *priv,
227 struct v4l2_tuner *v)
228{
229 if (v->index > 0)
230 return -EINVAL;
238 231
239 if (v->index > 0) 232 return 0;
240 return -EINVAL; 233}
241 234
242 return 0; 235static int vidioc_s_frequency(struct file *file, void *priv,
243 } 236 struct v4l2_frequency *f)
244 case VIDIOC_S_FREQUENCY: 237{
245 { 238 curfreq = f->frequency;
246 struct v4l2_frequency *f = arg; 239 tr_setfreq(curfreq);
240 return 0;
241}
247 242
248 curfreq = f->frequency; 243static int vidioc_g_frequency(struct file *file, void *priv,
249 tr_setfreq(curfreq); 244 struct v4l2_frequency *f)
250 return 0; 245{
251 } 246 f->type = V4L2_TUNER_RADIO;
252 case VIDIOC_G_FREQUENCY: 247 f->frequency = curfreq;
253 { 248 return 0;
254 struct v4l2_frequency *f = arg; 249}
255 250
256 f->type = V4L2_TUNER_RADIO; 251static int vidioc_queryctrl(struct file *file, void *priv,
257 f->frequency = curfreq; 252 struct v4l2_queryctrl *qc)
253{
254 int i;
258 255
256 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
257 if (qc->id && qc->id == radio_qctrl[i].id) {
258 memcpy(qc, &(radio_qctrl[i]),
259 sizeof(*qc));
259 return 0; 260 return 0;
260 } 261 }
261 case VIDIOC_QUERYCTRL: 262 }
262 { 263 return -EINVAL;
263 struct v4l2_queryctrl *qc = arg; 264}
264 int i; 265
265 266static int vidioc_g_ctrl(struct file *file, void *priv,
266 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 267 struct v4l2_control *ctrl)
267 if (qc->id && qc->id == radio_qctrl[i].id) { 268{
268 memcpy(qc, &(radio_qctrl[i]), 269 switch (ctrl->id) {
269 sizeof(*qc)); 270 case V4L2_CID_AUDIO_MUTE:
270 return (0); 271 ctrl->value = curmute;
271 } 272 return 0;
272 } 273 case V4L2_CID_AUDIO_VOLUME:
273 return -EINVAL; 274 ctrl->value = curvol * 2048;
274 } 275 return 0;
275 case VIDIOC_G_CTRL: 276 case V4L2_CID_AUDIO_BASS:
276 { 277 ctrl->value = curbass * 4370;
277 struct v4l2_control *ctrl= arg; 278 return 0;
278 279 case V4L2_CID_AUDIO_TREBLE:
279 switch (ctrl->id) { 280 ctrl->value = curtreble * 4370;
280 case V4L2_CID_AUDIO_MUTE: 281 return 0;
281 ctrl->value=curmute; 282 }
282 return (0); 283 return -EINVAL;
283 case V4L2_CID_AUDIO_VOLUME: 284}
284 ctrl->value= curvol * 2048;
285 return (0);
286 case V4L2_CID_AUDIO_BASS:
287 ctrl->value= curbass * 4370;
288 return (0);
289 case V4L2_CID_AUDIO_TREBLE:
290 ctrl->value= curtreble * 4370;
291 return (0);
292 }
293 return -EINVAL;
294 }
295 case VIDIOC_S_CTRL:
296 {
297 struct v4l2_control *ctrl= arg;
298
299 switch (ctrl->id) {
300 case V4L2_CID_AUDIO_MUTE:
301 tr_setmute(ctrl->value);
302 return 0;
303 case V4L2_CID_AUDIO_VOLUME:
304 tr_setvol(ctrl->value);
305 return 0;
306 case V4L2_CID_AUDIO_BASS:
307 tr_setbass(ctrl->value);
308 return 0;
309 case V4L2_CID_AUDIO_TREBLE:
310 tr_settreble(ctrl->value);
311 return (0);
312 }
313 return -EINVAL;
314 }
315 285
316 default: 286static int vidioc_s_ctrl(struct file *file, void *priv,
317 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 287 struct v4l2_control *ctrl)
318 tr_do_ioctl); 288{
289 switch (ctrl->id) {
290 case V4L2_CID_AUDIO_MUTE:
291 tr_setmute(ctrl->value);
292 return 0;
293 case V4L2_CID_AUDIO_VOLUME:
294 tr_setvol(ctrl->value);
295 return 0;
296 case V4L2_CID_AUDIO_BASS:
297 tr_setbass(ctrl->value);
298 return 0;
299 case V4L2_CID_AUDIO_TREBLE:
300 tr_settreble(ctrl->value);
301 return 0;
319 } 302 }
303 return -EINVAL;
320} 304}
321 305
322static int tr_ioctl(struct inode *inode, struct file *file, 306static int vidioc_g_audio(struct file *file, void *priv,
323 unsigned int cmd, unsigned long arg) 307 struct v4l2_audio *a)
324{ 308{
325 return video_usercopy(inode, file, cmd, arg, tr_do_ioctl); 309 if (a->index > 1)
310 return -EINVAL;
311
312 strcpy(a->name, "Radio");
313 a->capability = V4L2_AUDCAP_STEREO;
314 return 0;
315}
316
317static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
318{
319 *i = 0;
320 return 0;
321}
322
323static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
324{
325 if (i != 0)
326 return -EINVAL;
327 return 0;
328}
329
330static int vidioc_s_audio(struct file *file, void *priv,
331 struct v4l2_audio *a)
332{
333 if (a->index != 0)
334 return -EINVAL;
335 return 0;
326} 336}
327 337
328static const struct file_operations trust_fops = { 338static const struct file_operations trust_fops = {
329 .owner = THIS_MODULE, 339 .owner = THIS_MODULE,
330 .open = video_exclusive_open, 340 .open = video_exclusive_open,
331 .release = video_exclusive_release, 341 .release = video_exclusive_release,
332 .ioctl = tr_ioctl, 342 .ioctl = video_ioctl2,
333 .compat_ioctl = v4l_compat_ioctl32, 343 .compat_ioctl = v4l_compat_ioctl32,
334 .llseek = no_llseek, 344 .llseek = no_llseek,
335}; 345};
@@ -341,6 +351,18 @@ static struct video_device trust_radio=
341 .type = VID_TYPE_TUNER, 351 .type = VID_TYPE_TUNER,
342 .hardware = 0, 352 .hardware = 0,
343 .fops = &trust_fops, 353 .fops = &trust_fops,
354 .vidioc_querycap = vidioc_querycap,
355 .vidioc_g_tuner = vidioc_g_tuner,
356 .vidioc_s_tuner = vidioc_s_tuner,
357 .vidioc_g_frequency = vidioc_g_frequency,
358 .vidioc_s_frequency = vidioc_s_frequency,
359 .vidioc_queryctrl = vidioc_queryctrl,
360 .vidioc_g_ctrl = vidioc_g_ctrl,
361 .vidioc_s_ctrl = vidioc_s_ctrl,
362 .vidioc_g_audio = vidioc_g_audio,
363 .vidioc_s_audio = vidioc_s_audio,
364 .vidioc_g_input = vidioc_g_input,
365 .vidioc_s_input = vidioc_s_input,
344}; 366};
345 367
346static int __init trust_init(void) 368static int __init trust_init(void)
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 3031fef178cb..8ff5a23a9f01 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -93,8 +93,6 @@ static int typhoon_setfreq(struct typhoon_device *dev, unsigned long frequency);
93static void typhoon_mute(struct typhoon_device *dev); 93static void typhoon_mute(struct typhoon_device *dev);
94static void typhoon_unmute(struct typhoon_device *dev); 94static void typhoon_unmute(struct typhoon_device *dev);
95static int typhoon_setvol(struct typhoon_device *dev, int vol); 95static int typhoon_setvol(struct typhoon_device *dev, int vol);
96static int typhoon_ioctl(struct inode *inode, struct file *file,
97 unsigned int cmd, unsigned long arg);
98#ifdef CONFIG_RADIO_TYPHOON_PROC_FS 96#ifdef CONFIG_RADIO_TYPHOON_PROC_FS
99static int typhoon_get_info(char *buf, char **start, off_t offset, int len); 97static int typhoon_get_info(char *buf, char **start, off_t offset, int len);
100#endif 98#endif
@@ -186,129 +184,148 @@ static int typhoon_setvol(struct typhoon_device *dev, int vol)
186 return 0; 184 return 0;
187} 185}
188 186
187static int vidioc_querycap(struct file *file, void *priv,
188 struct v4l2_capability *v)
189{
190 strlcpy(v->driver, "radio-typhoon", sizeof(v->driver));
191 strlcpy(v->card, "Typhoon Radio", sizeof(v->card));
192 sprintf(v->bus_info, "ISA");
193 v->version = RADIO_VERSION;
194 v->capabilities = V4L2_CAP_TUNER;
195 return 0;
196}
197
198static int vidioc_g_tuner(struct file *file, void *priv,
199 struct v4l2_tuner *v)
200{
201 if (v->index > 0)
202 return -EINVAL;
203
204 strcpy(v->name, "FM");
205 v->type = V4L2_TUNER_RADIO;
206 v->rangelow = (87.5*16000);
207 v->rangehigh = (108*16000);
208 v->rxsubchans = V4L2_TUNER_SUB_MONO;
209 v->capability = V4L2_TUNER_CAP_LOW;
210 v->audmode = V4L2_TUNER_MODE_MONO;
211 v->signal = 0xFFFF; /* We can't get the signal strength */
212 return 0;
213}
189 214
190static int typhoon_do_ioctl(struct inode *inode, struct file *file, 215static int vidioc_s_tuner(struct file *file, void *priv,
191 unsigned int cmd, void *arg) 216 struct v4l2_tuner *v)
217{
218 if (v->index > 0)
219 return -EINVAL;
220
221 return 0;
222}
223
224static int vidioc_s_frequency(struct file *file, void *priv,
225 struct v4l2_frequency *f)
192{ 226{
193 struct video_device *dev = video_devdata(file); 227 struct video_device *dev = video_devdata(file);
194 struct typhoon_device *typhoon = dev->priv; 228 struct typhoon_device *typhoon = dev->priv;
195 229
196 switch (cmd) { 230 typhoon->curfreq = f->frequency;
197 case VIDIOC_QUERYCAP: 231 typhoon_setfreq(typhoon, typhoon->curfreq);
198 { 232 return 0;
199 struct v4l2_capability *v = arg; 233}
200 memset(v,0,sizeof(*v));
201 strlcpy(v->driver, "radio-typhoon", sizeof (v->driver));
202 strlcpy(v->card, "Typhoon Radio", sizeof (v->card));
203 sprintf(v->bus_info,"ISA");
204 v->version = RADIO_VERSION;
205 v->capabilities = V4L2_CAP_TUNER;
206 234
207 return 0; 235static int vidioc_g_frequency(struct file *file, void *priv,
208 } 236 struct v4l2_frequency *f)
209 case VIDIOC_G_TUNER: 237{
210 { 238 struct video_device *dev = video_devdata(file);
211 struct v4l2_tuner *v = arg; 239 struct typhoon_device *typhoon = dev->priv;
212 240
213 if (v->index > 0) 241 f->type = V4L2_TUNER_RADIO;
214 return -EINVAL; 242 f->frequency = typhoon->curfreq;
215 243
216 memset(v,0,sizeof(*v)); 244 return 0;
217 strcpy(v->name, "FM"); 245}
218 v->type = V4L2_TUNER_RADIO;
219 246
220 v->rangelow=(87.5*16000); 247static int vidioc_queryctrl(struct file *file, void *priv,
221 v->rangehigh=(108*16000); 248 struct v4l2_queryctrl *qc)
222 v->rxsubchans =V4L2_TUNER_SUB_MONO; 249{
223 v->capability=V4L2_TUNER_CAP_LOW; 250 int i;
224 v->audmode = V4L2_TUNER_MODE_MONO;
225 v->signal = 0xFFFF; /* We can't get the signal strength */
226 251
252 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
253 if (qc->id && qc->id == radio_qctrl[i].id) {
254 memcpy(qc, &(radio_qctrl[i]),
255 sizeof(*qc));
227 return 0; 256 return 0;
228 } 257 }
229 case VIDIOC_S_TUNER: 258 }
230 { 259 return -EINVAL;
231 struct v4l2_tuner *v = arg; 260}
232 261
233 if (v->index > 0) 262static int vidioc_g_ctrl(struct file *file, void *priv,
234 return -EINVAL; 263 struct v4l2_control *ctrl)
264{
265 struct video_device *dev = video_devdata(file);
266 struct typhoon_device *typhoon = dev->priv;
235 267
236 return 0; 268 switch (ctrl->id) {
237 } 269 case V4L2_CID_AUDIO_MUTE:
238 case VIDIOC_S_FREQUENCY: 270 ctrl->value = typhoon->muted;
239 { 271 return 0;
240 struct v4l2_frequency *f = arg; 272 case V4L2_CID_AUDIO_VOLUME:
273 ctrl->value = typhoon->curvol;
274 return 0;
275 }
276 return -EINVAL;
277}
241 278
242 typhoon->curfreq = f->frequency; 279static int vidioc_s_ctrl (struct file *file, void *priv,
243 typhoon_setfreq(typhoon, typhoon->curfreq); 280 struct v4l2_control *ctrl)
244 return 0; 281{
245 } 282 struct video_device *dev = video_devdata(file);
246 case VIDIOC_G_FREQUENCY: 283 struct typhoon_device *typhoon = dev->priv;
247 {
248 struct v4l2_frequency *f = arg;
249 284
250 f->type = V4L2_TUNER_RADIO; 285 switch (ctrl->id) {
251 f->frequency = typhoon->curfreq; 286 case V4L2_CID_AUDIO_MUTE:
287 if (ctrl->value)
288 typhoon_mute(typhoon);
289 else
290 typhoon_unmute(typhoon);
291 return 0;
292 case V4L2_CID_AUDIO_VOLUME:
293 typhoon_setvol(typhoon, ctrl->value);
294 return 0;
295 }
296 return -EINVAL;
297}
252 298
253 return 0; 299static int vidioc_g_audio(struct file *file, void *priv,
254 } 300 struct v4l2_audio *a)
255 case VIDIOC_QUERYCTRL: 301{
256 { 302 if (a->index > 1)
257 struct v4l2_queryctrl *qc = arg; 303 return -EINVAL;
258 int i;
259
260 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
261 if (qc->id && qc->id == radio_qctrl[i].id) {
262 memcpy(qc, &(radio_qctrl[i]),
263 sizeof(*qc));
264 return (0);
265 }
266 }
267 return -EINVAL;
268 }
269 case VIDIOC_G_CTRL:
270 {
271 struct v4l2_control *ctrl= arg;
272
273 switch (ctrl->id) {
274 case V4L2_CID_AUDIO_MUTE:
275 ctrl->value=typhoon->muted;
276 return (0);
277 case V4L2_CID_AUDIO_VOLUME:
278 ctrl->value=typhoon->curvol;
279 return (0);
280 }
281 return -EINVAL;
282 }
283 case VIDIOC_S_CTRL:
284 {
285 struct v4l2_control *ctrl= arg;
286
287 switch (ctrl->id) {
288 case V4L2_CID_AUDIO_MUTE:
289 if (ctrl->value) {
290 typhoon_mute(typhoon);
291 } else {
292 typhoon_unmute(typhoon);
293 }
294 return (0);
295 case V4L2_CID_AUDIO_VOLUME:
296 typhoon_setvol(typhoon, ctrl->value);
297 return (0);
298 }
299 return -EINVAL;
300 }
301 304
302 default: 305 strcpy(a->name, "Radio");
303 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 306 a->capability = V4L2_AUDCAP_STEREO;
304 typhoon_do_ioctl); 307 return 0;
305 } 308}
309
310static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
311{
312 *i = 0;
313 return 0;
314}
315
316static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
317{
318 if (i != 0)
319 return -EINVAL;
320 return 0;
306} 321}
307 322
308static int typhoon_ioctl(struct inode *inode, struct file *file, 323static int vidioc_s_audio(struct file *file, void *priv,
309 unsigned int cmd, unsigned long arg) 324 struct v4l2_audio *a)
310{ 325{
311 return video_usercopy(inode, file, cmd, arg, typhoon_do_ioctl); 326 if (a->index != 0)
327 return -EINVAL;
328 return 0;
312} 329}
313 330
314static struct typhoon_device typhoon_unit = 331static struct typhoon_device typhoon_unit =
@@ -322,7 +339,7 @@ static const struct file_operations typhoon_fops = {
322 .owner = THIS_MODULE, 339 .owner = THIS_MODULE,
323 .open = video_exclusive_open, 340 .open = video_exclusive_open,
324 .release = video_exclusive_release, 341 .release = video_exclusive_release,
325 .ioctl = typhoon_ioctl, 342 .ioctl = video_ioctl2,
326 .compat_ioctl = v4l_compat_ioctl32, 343 .compat_ioctl = v4l_compat_ioctl32,
327 .llseek = no_llseek, 344 .llseek = no_llseek,
328}; 345};
@@ -334,6 +351,18 @@ static struct video_device typhoon_radio =
334 .type = VID_TYPE_TUNER, 351 .type = VID_TYPE_TUNER,
335 .hardware = 0, 352 .hardware = 0,
336 .fops = &typhoon_fops, 353 .fops = &typhoon_fops,
354 .vidioc_querycap = vidioc_querycap,
355 .vidioc_g_tuner = vidioc_g_tuner,
356 .vidioc_s_tuner = vidioc_s_tuner,
357 .vidioc_g_audio = vidioc_g_audio,
358 .vidioc_s_audio = vidioc_s_audio,
359 .vidioc_g_input = vidioc_g_input,
360 .vidioc_s_input = vidioc_s_input,
361 .vidioc_g_frequency = vidioc_g_frequency,
362 .vidioc_s_frequency = vidioc_s_frequency,
363 .vidioc_queryctrl = vidioc_queryctrl,
364 .vidioc_g_ctrl = vidioc_g_ctrl,
365 .vidioc_s_ctrl = vidioc_s_ctrl,
337}; 366};
338 367
339#ifdef CONFIG_RADIO_TYPHOON_PROC_FS 368#ifdef CONFIG_RADIO_TYPHOON_PROC_FS
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index ec08491fb7c5..a4715901512d 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -230,121 +230,123 @@ static int zol_is_stereo (struct zol_device *dev)
230 return 0; 230 return 0;
231} 231}
232 232
233static int zol_do_ioctl(struct inode *inode, struct file *file, 233static int vidioc_querycap(struct file *file, void *priv,
234 unsigned int cmd, void *arg) 234 struct v4l2_capability *v)
235{
236 strlcpy(v->driver, "radio-zoltrix", sizeof(v->driver));
237 strlcpy(v->card, "Zoltrix Radio", sizeof(v->card));
238 sprintf(v->bus_info, "ISA");
239 v->version = RADIO_VERSION;
240 v->capabilities = V4L2_CAP_TUNER;
241 return 0;
242}
243
244static int vidioc_g_tuner(struct file *file, void *priv,
245 struct v4l2_tuner *v)
235{ 246{
236 struct video_device *dev = video_devdata(file); 247 struct video_device *dev = video_devdata(file);
237 struct zol_device *zol = dev->priv; 248 struct zol_device *zol = dev->priv;
238 249
239 switch (cmd) { 250 if (v->index > 0)
240 case VIDIOC_QUERYCAP: 251 return -EINVAL;
241 {
242 struct v4l2_capability *v = arg;
243 memset(v,0,sizeof(*v));
244 strlcpy(v->driver, "radio-zoltrix", sizeof (v->driver));
245 strlcpy(v->card, "Zoltrix Radio", sizeof (v->card));
246 sprintf(v->bus_info,"ISA");
247 v->version = RADIO_VERSION;
248 v->capabilities = V4L2_CAP_TUNER;
249 252
250 return 0; 253 strcpy(v->name, "FM");
251 } 254 v->type = V4L2_TUNER_RADIO;
252 case VIDIOC_G_TUNER: 255 v->rangelow = (88*16000);
253 { 256 v->rangehigh = (108*16000);
254 struct v4l2_tuner *v = arg; 257 v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
255 258 v->capability = V4L2_TUNER_CAP_LOW;
256 if (v->index > 0) 259 if (zol_is_stereo(zol))
257 return -EINVAL; 260 v->audmode = V4L2_TUNER_MODE_STEREO;
258 261 else
259 memset(v,0,sizeof(*v)); 262 v->audmode = V4L2_TUNER_MODE_MONO;
260 strcpy(v->name, "FM"); 263 v->signal = 0xFFFF*zol_getsigstr(zol);
261 v->type = V4L2_TUNER_RADIO; 264 return 0;
262 265}
263 v->rangelow=(88*16000);
264 v->rangehigh=(108*16000);
265 v->rxsubchans =V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
266 v->capability=V4L2_TUNER_CAP_LOW;
267 if(zol_is_stereo(zol))
268 v->audmode = V4L2_TUNER_MODE_STEREO;
269 else
270 v->audmode = V4L2_TUNER_MODE_MONO;
271 v->signal=0xFFFF*zol_getsigstr(zol);
272 266
273 return 0; 267static int vidioc_s_tuner(struct file *file, void *priv,
274 } 268 struct v4l2_tuner *v)
275 case VIDIOC_S_TUNER: 269{
276 { 270 if (v->index > 0)
277 struct v4l2_tuner *v = arg; 271 return -EINVAL;
272 return 0;
273}
278 274
279 if (v->index > 0) 275static int vidioc_s_frequency(struct file *file, void *priv,
280 return -EINVAL; 276 struct v4l2_frequency *f)
277{
278 struct video_device *dev = video_devdata(file);
279 struct zol_device *zol = dev->priv;
281 280
282 return 0; 281 zol->curfreq = f->frequency;
283 } 282 zol_setfreq(zol, zol->curfreq);
284 case VIDIOC_S_FREQUENCY: 283 return 0;
285 { 284}
286 struct v4l2_frequency *f = arg;
287 285
288 zol->curfreq = f->frequency; 286static int vidioc_g_frequency(struct file *file, void *priv,
289 zol_setfreq(zol, zol->curfreq); 287 struct v4l2_frequency *f)
290 return 0; 288{
291 } 289 struct video_device *dev = video_devdata(file);
292 case VIDIOC_G_FREQUENCY: 290 struct zol_device *zol = dev->priv;
293 { 291
294 struct v4l2_frequency *f = arg; 292 f->type = V4L2_TUNER_RADIO;
293 f->frequency = zol->curfreq;
294 return 0;
295}
295 296
296 f->type = V4L2_TUNER_RADIO; 297static int vidioc_queryctrl(struct file *file, void *priv,
297 f->frequency = zol->curfreq; 298 struct v4l2_queryctrl *qc)
299{
300 int i;
298 301
302 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
303 if (qc->id && qc->id == radio_qctrl[i].id) {
304 memcpy(qc, &(radio_qctrl[i]),
305 sizeof(*qc));
299 return 0; 306 return 0;
300 } 307 }
301 case VIDIOC_QUERYCTRL: 308 }
302 { 309 return -EINVAL;
303 struct v4l2_queryctrl *qc = arg; 310}
304 int i; 311
305 312static int vidioc_g_ctrl(struct file *file, void *priv,
306 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 313 struct v4l2_control *ctrl)
307 if (qc->id && qc->id == radio_qctrl[i].id) { 314{
308 memcpy(qc, &(radio_qctrl[i]), 315 struct video_device *dev = video_devdata(file);
309 sizeof(*qc)); 316 struct zol_device *zol = dev->priv;
310 return (0); 317
311 } 318 switch (ctrl->id) {
312 } 319 case V4L2_CID_AUDIO_MUTE:
313 return -EINVAL; 320 ctrl->value = zol->muted;
314 } 321 return 0;
315 case VIDIOC_G_CTRL: 322 case V4L2_CID_AUDIO_VOLUME:
316 { 323 ctrl->value = zol->curvol * 4096;
317 struct v4l2_control *ctrl= arg; 324 return 0;
318 325 }
319 switch (ctrl->id) { 326 return -EINVAL;
320 case V4L2_CID_AUDIO_MUTE: 327}
321 ctrl->value=zol->muted; 328
322 return (0); 329static int vidioc_s_ctrl(struct file *file, void *priv,
323 case V4L2_CID_AUDIO_VOLUME: 330 struct v4l2_control *ctrl)
324 ctrl->value=zol->curvol * 4096; 331{
325 return (0); 332 struct video_device *dev = video_devdata(file);
326 } 333 struct zol_device *zol = dev->priv;
327 return -EINVAL; 334
335 switch (ctrl->id) {
336 case V4L2_CID_AUDIO_MUTE:
337 if (ctrl->value)
338 zol_mute(zol);
339 else {
340 zol_unmute(zol);
341 zol_setvol(zol,zol->curvol);
328 } 342 }
329 case VIDIOC_S_CTRL: 343 return 0;
330 { 344 case V4L2_CID_AUDIO_VOLUME:
331 struct v4l2_control *ctrl= arg; 345 zol_setvol(zol,ctrl->value/4096);
332 346 return 0;
333 switch (ctrl->id) { 347 }
334 case V4L2_CID_AUDIO_MUTE: 348 zol->stereo = 1;
335 if (ctrl->value) { 349 zol_setfreq(zol, zol->curfreq);
336 zol_mute(zol);
337 } else {
338 zol_unmute(zol);
339 zol_setvol(zol,zol->curvol);
340 }
341 return (0);
342 case V4L2_CID_AUDIO_VOLUME:
343 zol_setvol(zol,ctrl->value/4096);
344 return (0);
345 }
346 zol->stereo = 1;
347 zol_setfreq(zol, zol->curfreq);
348#if 0 350#if 0
349/* FIXME: Implement stereo/mono switch on V4L2 */ 351/* FIXME: Implement stereo/mono switch on V4L2 */
350 if (v->mode & VIDEO_SOUND_STEREO) { 352 if (v->mode & VIDEO_SOUND_STEREO) {
@@ -356,19 +358,39 @@ static int zol_do_ioctl(struct inode *inode, struct file *file,
356 zol_setfreq(zol, zol->curfreq); 358 zol_setfreq(zol, zol->curfreq);
357 } 359 }
358#endif 360#endif
359 return -EINVAL; 361 return -EINVAL;
360 } 362}
361 363
362 default: 364static int vidioc_g_audio(struct file *file, void *priv,
363 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 365 struct v4l2_audio *a)
364 zol_do_ioctl); 366{
365 } 367 if (a->index > 1)
368 return -EINVAL;
369
370 strcpy(a->name, "Radio");
371 a->capability = V4L2_AUDCAP_STEREO;
372 return 0;
373}
374
375static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
376{
377 *i = 0;
378 return 0;
366} 379}
367 380
368static int zol_ioctl(struct inode *inode, struct file *file, 381static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
369 unsigned int cmd, unsigned long arg)
370{ 382{
371 return video_usercopy(inode, file, cmd, arg, zol_do_ioctl); 383 if (i != 0)
384 return -EINVAL;
385 return 0;
386}
387
388static int vidioc_s_audio(struct file *file, void *priv,
389 struct v4l2_audio *a)
390{
391 if (a->index != 0)
392 return -EINVAL;
393 return 0;
372} 394}
373 395
374static struct zol_device zoltrix_unit; 396static struct zol_device zoltrix_unit;
@@ -378,7 +400,7 @@ static const struct file_operations zoltrix_fops =
378 .owner = THIS_MODULE, 400 .owner = THIS_MODULE,
379 .open = video_exclusive_open, 401 .open = video_exclusive_open,
380 .release = video_exclusive_release, 402 .release = video_exclusive_release,
381 .ioctl = zol_ioctl, 403 .ioctl = video_ioctl2,
382 .compat_ioctl = v4l_compat_ioctl32, 404 .compat_ioctl = v4l_compat_ioctl32,
383 .llseek = no_llseek, 405 .llseek = no_llseek,
384}; 406};
@@ -390,6 +412,18 @@ static struct video_device zoltrix_radio =
390 .type = VID_TYPE_TUNER, 412 .type = VID_TYPE_TUNER,
391 .hardware = 0, 413 .hardware = 0,
392 .fops = &zoltrix_fops, 414 .fops = &zoltrix_fops,
415 .vidioc_querycap = vidioc_querycap,
416 .vidioc_g_tuner = vidioc_g_tuner,
417 .vidioc_s_tuner = vidioc_s_tuner,
418 .vidioc_g_audio = vidioc_g_audio,
419 .vidioc_s_audio = vidioc_s_audio,
420 .vidioc_g_input = vidioc_g_input,
421 .vidioc_s_input = vidioc_s_input,
422 .vidioc_g_frequency = vidioc_g_frequency,
423 .vidioc_s_frequency = vidioc_s_frequency,
424 .vidioc_queryctrl = vidioc_queryctrl,
425 .vidioc_g_ctrl = vidioc_g_ctrl,
426 .vidioc_s_ctrl = vidioc_s_ctrl,
393}; 427};
394 428
395static int __init zoltrix_init(void) 429static int __init zoltrix_init(void)
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 7a6105153f23..639e8b6c35b1 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -647,6 +647,8 @@ config VIDEO_HEXIUM_GEMINI
647 647
648source "drivers/media/video/cx88/Kconfig" 648source "drivers/media/video/cx88/Kconfig"
649 649
650source "drivers/media/video/ivtv/Kconfig"
651
650config VIDEO_M32R_AR 652config VIDEO_M32R_AR
651 tristate "AR devices" 653 tristate "AR devices"
652 depends on M32R && VIDEO_V4L1 654 depends on M32R && VIDEO_V4L1
@@ -761,6 +763,18 @@ source "drivers/media/video/zc0301/Kconfig"
761 763
762source "drivers/media/video/pwc/Kconfig" 764source "drivers/media/video/pwc/Kconfig"
763 765
766config USB_ZR364XX
767 tristate "USB ZR364XX Camera support"
768 depends on USB && VIDEO_V4L2
769 ---help---
770 Say Y here if you want to connect this type of camera to your
771 computer's USB port.
772 See <file:Documentation/video4linux/zr364xx.txt> for more info
773 and list of supported cameras.
774
775 To compile this driver as a module, choose M here: the
776 module will be called zr364xx.
777
764endmenu # V4L USB devices 778endmenu # V4L USB devices
765 779
766endmenu 780endmenu
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 44ccaed40b49..9c2de501612f 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
61obj-$(CONFIG_VIDEO_MEYE) += meye.o 61obj-$(CONFIG_VIDEO_MEYE) += meye.o
62obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/ 62obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
63obj-$(CONFIG_VIDEO_CX88) += cx88/ 63obj-$(CONFIG_VIDEO_CX88) += cx88/
64obj-$(CONFIG_VIDEO_IVTV) += ivtv/
64obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 65obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
65obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ 66obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
66obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o 67obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_USB_OV511) += ov511.o
99obj-$(CONFIG_USB_SE401) += se401.o 100obj-$(CONFIG_USB_SE401) += se401.o
100obj-$(CONFIG_USB_STV680) += stv680.o 101obj-$(CONFIG_USB_STV680) += stv680.o
101obj-$(CONFIG_USB_W9968CF) += w9968cf.o 102obj-$(CONFIG_USB_W9968CF) += w9968cf.o
103obj-$(CONFIG_USB_ZR364XX) += zr364xx.o
102 104
103obj-$(CONFIG_USB_SN9C102) += sn9c102/ 105obj-$(CONFIG_USB_SN9C102) += sn9c102/
104obj-$(CONFIG_USB_ET61X251) += et61x251/ 106obj-$(CONFIG_USB_ET61X251) += et61x251/
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 6addc42df045..6b31e50fb951 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -291,6 +291,9 @@ static struct CARD {
291 291
292 { 0x15409511, BTTV_BOARD_ACORP_Y878F, "Acorp Y878F" }, 292 { 0x15409511, BTTV_BOARD_ACORP_Y878F, "Acorp Y878F" },
293 293
294 { 0x53534149, BTTV_BOARD_SSAI_SECURITY, "SSAI Security Video Interface" },
295 { 0x5353414a, BTTV_BOARD_SSAI_ULTRASOUND, "SSAI Ultrasound Video Interface" },
296
294 /* likely broken, vendor id doesn't match the other magic views ... 297 /* likely broken, vendor id doesn't match the other magic views ...
295 * { 0xa0fca04f, BTTV_BOARD_MAGICTVIEW063, "Guillemot Maxi TV Video 3" }, */ 298 * { 0xa0fca04f, BTTV_BOARD_MAGICTVIEW063, "Guillemot Maxi TV Video 3" }, */
296 299
@@ -2907,6 +2910,28 @@ struct tvcard bttv_tvcards[] = {
2907 .has_radio = 1, 2910 .has_radio = 1,
2908 .has_remote = 1, 2911 .has_remote = 1,
2909 }, 2912 },
2913 [BTTV_BOARD_SSAI_SECURITY] = {
2914 .name = "SSAI Security Video Interface",
2915 .video_inputs = 4,
2916 .audio_inputs = 0,
2917 .tuner = -1,
2918 .svhs = -1,
2919 .muxsel = { 0, 1, 2, 3 },
2920 .tuner_type = -1,
2921 .tuner_addr = ADDR_UNSET,
2922 .radio_addr = ADDR_UNSET,
2923 },
2924 [BTTV_BOARD_SSAI_ULTRASOUND] = {
2925 .name = "SSAI Ultrasound Video Interface",
2926 .video_inputs = 2,
2927 .audio_inputs = 0,
2928 .tuner = -1,
2929 .svhs = 1,
2930 .muxsel = { 2, 0, 1, 3 },
2931 .tuner_type = -1,
2932 .tuner_addr = ADDR_UNSET,
2933 .radio_addr = ADDR_UNSET,
2934 },
2910}; 2935};
2911 2936
2912static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards); 2937static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
@@ -2970,20 +2995,20 @@ void __devinit bttv_idcard(struct bttv *btv)
2970 2995
2971 if (UNSET != audiomux[0]) { 2996 if (UNSET != audiomux[0]) {
2972 gpiobits = 0; 2997 gpiobits = 0;
2973 for (i = 0; i < 4; i++) { 2998 for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) {
2974 bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i]; 2999 bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i];
2975 gpiobits |= audiomux[i]; 3000 gpiobits |= audiomux[i];
2976 } 3001 }
2977 } else { 3002 } else {
2978 gpiobits = audioall; 3003 gpiobits = audioall;
2979 for (i = 0; i < 4; i++) { 3004 for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) {
2980 bttv_tvcards[btv->c.type].gpiomux[i] = audioall; 3005 bttv_tvcards[btv->c.type].gpiomux[i] = audioall;
2981 } 3006 }
2982 } 3007 }
2983 bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits; 3008 bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits;
2984 printk(KERN_INFO "bttv%d: gpio config override: mask=0x%x, mux=", 3009 printk(KERN_INFO "bttv%d: gpio config override: mask=0x%x, mux=",
2985 btv->c.nr,bttv_tvcards[btv->c.type].gpiomask); 3010 btv->c.nr,bttv_tvcards[btv->c.type].gpiomask);
2986 for (i = 0; i < 5; i++) { 3011 for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) {
2987 printk("%s0x%x", i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]); 3012 printk("%s0x%x", i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]);
2988 } 3013 }
2989 printk("\n"); 3014 printk("\n");
@@ -3638,7 +3663,7 @@ static int __devinit pvr_altera_load(struct bttv *btv, u8 *micro, u32 microlen)
3638 3663
3639 for (n = 0; n < microlen; n++) { 3664 for (n = 0; n < microlen; n++) {
3640 bits = micro[n]; 3665 bits = micro[n];
3641 for ( i = 0 ; i < 8 ; i++ ) { 3666 for (i = 0 ; i < 8 ; i++) {
3642 gpio_bits(BTTV_ALT_DCLK,0); 3667 gpio_bits(BTTV_ALT_DCLK,0);
3643 if (bits & 0x01) 3668 if (bits & 0x01)
3644 gpio_bits(BTTV_ALT_DATA,BTTV_ALT_DATA); 3669 gpio_bits(BTTV_ALT_DATA,BTTV_ALT_DATA);
@@ -3691,7 +3716,7 @@ static void __devinit osprey_eeprom(struct bttv *btv)
3691 /* this might be an antique... check for MMAC label in eeprom */ 3716 /* this might be an antique... check for MMAC label in eeprom */
3692 if ((ee[0]=='M') && (ee[1]=='M') && (ee[2]=='A') && (ee[3]=='C')) { 3717 if ((ee[0]=='M') && (ee[1]=='M') && (ee[2]=='A') && (ee[3]=='C')) {
3693 unsigned char checksum = 0; 3718 unsigned char checksum = 0;
3694 for (i =0; i<21; i++) 3719 for (i = 0; i < 21; i++)
3695 checksum += ee[i]; 3720 checksum += ee[i];
3696 if (checksum != ee[21]) 3721 if (checksum != ee[21])
3697 return; 3722 return;
@@ -3703,12 +3728,13 @@ static void __devinit osprey_eeprom(struct bttv *btv)
3703 unsigned short type; 3728 unsigned short type;
3704 int offset = 4*16; 3729 int offset = 4*16;
3705 3730
3706 for(; offset < 8*16; offset += 16) { 3731 for (; offset < 8*16; offset += 16) {
3707 unsigned short checksum = 0; 3732 unsigned short checksum = 0;
3708 /* verify the checksum */ 3733 /* verify the checksum */
3709 for(i = 0; i<14; i++) checksum += ee[i+offset]; 3734 for (i = 0; i < 14; i++)
3710 checksum = ~checksum; /* no idea why */ 3735 checksum += ee[i+offset];
3711 if ((((checksum>>8)&0x0FF) == ee[offset+14]) && 3736 checksum = ~checksum; /* no idea why */
3737 if ((((checksum>>8)&0x0FF) == ee[offset+14]) &&
3712 ((checksum & 0x0FF) == ee[offset+15])) { 3738 ((checksum & 0x0FF) == ee[offset+15])) {
3713 break; 3739 break;
3714 } 3740 }
@@ -3721,7 +3747,6 @@ static void __devinit osprey_eeprom(struct bttv *btv)
3721 type = (ee[offset+4]<<8) | (ee[offset+5]); 3747 type = (ee[offset+4]<<8) | (ee[offset+5]);
3722 3748
3723 switch(type) { 3749 switch(type) {
3724
3725 /* 848 based */ 3750 /* 848 based */
3726 case 0x0004: 3751 case 0x0004:
3727 btv->c.type = BTTV_BOARD_OSPREY1x0_848; 3752 btv->c.type = BTTV_BOARD_OSPREY1x0_848;
@@ -4149,8 +4174,7 @@ static int tea5757_read(struct bttv *btv)
4149 } 4174 }
4150 4175
4151 dprintk("bttv%d: tea5757:",btv->c.nr); 4176 dprintk("bttv%d: tea5757:",btv->c.nr);
4152 for(i = 0; i < 24; i++) 4177 for (i = 0; i < 24; i++) {
4153 {
4154 udelay(5); 4178 udelay(5);
4155 bus_high(btv,btv->mbox_clk); 4179 bus_high(btv,btv->mbox_clk);
4156 udelay(5); 4180 udelay(5);
@@ -4182,8 +4206,7 @@ static int tea5757_write(struct bttv *btv, int value)
4182 dprintk("bttv%d: tea5757: write 0x%X\n", btv->c.nr, value); 4206 dprintk("bttv%d: tea5757: write 0x%X\n", btv->c.nr, value);
4183 bus_low(btv,btv->mbox_clk); 4207 bus_low(btv,btv->mbox_clk);
4184 bus_high(btv,btv->mbox_we); 4208 bus_high(btv,btv->mbox_we);
4185 for(i = 0; i < 25; i++) 4209 for (i = 0; i < 25; i++) {
4186 {
4187 if (reg & 0x1000000) 4210 if (reg & 0x1000000)
4188 bus_high(btv,btv->mbox_data); 4211 bus_high(btv,btv->mbox_data);
4189 else 4212 else
@@ -4755,7 +4778,7 @@ static void kodicom4400r_init(struct bttv *btv)
4755 gpio_write(1 << 9); /* reset MUX */ 4778 gpio_write(1 << 9); /* reset MUX */
4756 gpio_write(0); 4779 gpio_write(0);
4757 /* Preset camera 0 to the 4 controllers */ 4780 /* Preset camera 0 to the 4 controllers */
4758 for (ix=0; ix<4; ix++) { 4781 for (ix = 0; ix < 4; ix++) {
4759 sw_status[ix] = ix; 4782 sw_status[ix] = ix;
4760 kodicom4400r_write(btv, ix, ix, 1); 4783 kodicom4400r_write(btv, ix, ix, 1);
4761 } 4784 }
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5720b77ac9a7..1c38723d3169 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -164,6 +164,24 @@ static ssize_t show_card(struct class_device *cd, char *buf)
164static CLASS_DEVICE_ATTR(card, S_IRUGO, show_card, NULL); 164static CLASS_DEVICE_ATTR(card, S_IRUGO, show_card, NULL);
165 165
166/* ----------------------------------------------------------------------- */ 166/* ----------------------------------------------------------------------- */
167/* dvb auto-load setup */
168#if defined(CONFIG_MODULES) && defined(MODULE)
169static void request_module_async(struct work_struct *work)
170{
171 request_module("dvb-bt8xx");
172}
173
174static void request_modules(struct bttv *dev)
175{
176 INIT_WORK(&dev->request_module_wk, request_module_async);
177 schedule_work(&dev->request_module_wk);
178}
179#else
180#define request_modules(dev)
181#endif /* CONFIG_MODULES */
182
183
184/* ----------------------------------------------------------------------- */
167/* static data */ 185/* static data */
168 186
169/* special timing tables from conexant... */ 187/* special timing tables from conexant... */
@@ -4769,9 +4787,11 @@ static int __devinit bttv_probe(struct pci_dev *dev,
4769 disclaim_video_lines(btv); 4787 disclaim_video_lines(btv);
4770 } 4788 }
4771 4789
4772 /* add subdevices */ 4790 /* add subdevices and autoload dvb-bt8xx if needed */
4773 if (bttv_tvcards[btv->c.type].has_dvb) 4791 if (bttv_tvcards[btv->c.type].has_dvb) {
4774 bttv_sub_add_device(&btv->c, "dvb"); 4792 bttv_sub_add_device(&btv->c, "dvb");
4793 request_modules(btv);
4794 }
4775 4795
4776 bttv_input_init(btv); 4796 bttv_input_init(btv);
4777 4797
diff --git a/drivers/media/video/bt8xx/bttv-gpio.c b/drivers/media/video/bt8xx/bttv-gpio.c
index ba081f6f8c82..84154c26f9c5 100644
--- a/drivers/media/video/bt8xx/bttv-gpio.c
+++ b/drivers/media/video/bt8xx/bttv-gpio.c
@@ -71,7 +71,6 @@ struct bus_type bttv_sub_bus_type = {
71 .probe = bttv_sub_probe, 71 .probe = bttv_sub_probe,
72 .remove = bttv_sub_remove, 72 .remove = bttv_sub_remove,
73}; 73};
74EXPORT_SYMBOL(bttv_sub_bus_type);
75 74
76static void release_sub_device(struct device *dev) 75static void release_sub_device(struct device *dev)
77{ 76{
@@ -152,7 +151,6 @@ void bttv_gpio_inout(struct bttv_core *core, u32 mask, u32 outbits)
152 btwrite(data,BT848_GPIO_OUT_EN); 151 btwrite(data,BT848_GPIO_OUT_EN);
153 spin_unlock_irqrestore(&btv->gpio_lock,flags); 152 spin_unlock_irqrestore(&btv->gpio_lock,flags);
154} 153}
155EXPORT_SYMBOL(bttv_gpio_inout);
156 154
157u32 bttv_gpio_read(struct bttv_core *core) 155u32 bttv_gpio_read(struct bttv_core *core)
158{ 156{
@@ -162,7 +160,6 @@ u32 bttv_gpio_read(struct bttv_core *core)
162 value = btread(BT848_GPIO_DATA); 160 value = btread(BT848_GPIO_DATA);
163 return value; 161 return value;
164} 162}
165EXPORT_SYMBOL(bttv_gpio_read);
166 163
167void bttv_gpio_write(struct bttv_core *core, u32 value) 164void bttv_gpio_write(struct bttv_core *core, u32 value)
168{ 165{
@@ -170,7 +167,6 @@ void bttv_gpio_write(struct bttv_core *core, u32 value)
170 167
171 btwrite(value,BT848_GPIO_DATA); 168 btwrite(value,BT848_GPIO_DATA);
172} 169}
173EXPORT_SYMBOL(bttv_gpio_write);
174 170
175void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits) 171void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits)
176{ 172{
@@ -185,7 +181,6 @@ void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits)
185 btwrite(data,BT848_GPIO_DATA); 181 btwrite(data,BT848_GPIO_DATA);
186 spin_unlock_irqrestore(&btv->gpio_lock,flags); 182 spin_unlock_irqrestore(&btv->gpio_lock,flags);
187} 183}
188EXPORT_SYMBOL(bttv_gpio_bits);
189 184
190/* 185/*
191 * Local variables: 186 * Local variables:
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 62b873076e09..0dfa49b66418 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -412,7 +412,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
412 unsigned char buf; 412 unsigned char buf;
413 int i,rc; 413 int i,rc;
414 414
415 for (i = 0; i < 128; i++) { 415 for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
416 c->addr = i; 416 c->addr = i;
417 rc = i2c_master_recv(c,&buf,0); 417 rc = i2c_master_recv(c,&buf,0);
418 if (rc < 0) 418 if (rc < 0)
diff --git a/drivers/media/video/bt8xx/bttv-if.c b/drivers/media/video/bt8xx/bttv-if.c
index 19b564ab0e92..ecf07988cd33 100644
--- a/drivers/media/video/bt8xx/bttv-if.c
+++ b/drivers/media/video/bt8xx/bttv-if.c
@@ -33,32 +33,16 @@
33 33
34#include "bttvp.h" 34#include "bttvp.h"
35 35
36EXPORT_SYMBOL(bttv_get_cardinfo);
37EXPORT_SYMBOL(bttv_get_pcidev); 36EXPORT_SYMBOL(bttv_get_pcidev);
38EXPORT_SYMBOL(bttv_get_id);
39EXPORT_SYMBOL(bttv_gpio_enable); 37EXPORT_SYMBOL(bttv_gpio_enable);
40EXPORT_SYMBOL(bttv_read_gpio); 38EXPORT_SYMBOL(bttv_read_gpio);
41EXPORT_SYMBOL(bttv_write_gpio); 39EXPORT_SYMBOL(bttv_write_gpio);
42EXPORT_SYMBOL(bttv_get_gpio_queue);
43EXPORT_SYMBOL(bttv_i2c_call);
44 40
45/* ----------------------------------------------------------------------- */ 41/* ----------------------------------------------------------------------- */
46/* Exported functions - for other modules which want to access the */ 42/* Exported functions - for other modules which want to access the */
47/* gpio ports (IR for example) */ 43/* gpio ports (IR for example) */
48/* see bttv.h for comments */ 44/* see bttv.h for comments */
49 45
50int bttv_get_cardinfo(unsigned int card, int *type, unsigned *cardid)
51{
52 printk("The bttv_* interface is obsolete and will go away,\n"
53 "please use the new, sysfs based interface instead.\n");
54 if (card >= bttv_num) {
55 return -1;
56 }
57 *type = bttvs[card].c.type;
58 *cardid = bttvs[card].cardid;
59 return 0;
60}
61
62struct pci_dev* bttv_get_pcidev(unsigned int card) 46struct pci_dev* bttv_get_pcidev(unsigned int card)
63{ 47{
64 if (card >= bttv_num) 48 if (card >= bttv_num)
@@ -66,16 +50,6 @@ struct pci_dev* bttv_get_pcidev(unsigned int card)
66 return bttvs[card].c.pci; 50 return bttvs[card].c.pci;
67} 51}
68 52
69int bttv_get_id(unsigned int card)
70{
71 printk("The bttv_* interface is obsolete and will go away,\n"
72 "please use the new, sysfs based interface instead.\n");
73 if (card >= bttv_num) {
74 return -1;
75 }
76 return bttvs[card].c.type;
77}
78
79 53
80int bttv_gpio_enable(unsigned int card, unsigned long mask, unsigned long data) 54int bttv_gpio_enable(unsigned int card, unsigned long mask, unsigned long data)
81{ 55{
@@ -130,28 +104,6 @@ int bttv_write_gpio(unsigned int card, unsigned long mask, unsigned long data)
130 return 0; 104 return 0;
131} 105}
132 106
133wait_queue_head_t* bttv_get_gpio_queue(unsigned int card)
134{
135 struct bttv *btv;
136
137 if (card >= bttv_num) {
138 return NULL;
139 }
140
141 btv = &bttvs[card];
142 if (bttvs[card].shutdown) {
143 return NULL;
144 }
145 return &btv->gpioq;
146}
147
148void bttv_i2c_call(unsigned int card, unsigned int cmd, void *arg)
149{
150 if (card >= bttv_num)
151 return;
152 bttv_call_i2c_clients(&bttvs[card], cmd, arg);
153}
154
155/* 107/*
156 * Local variables: 108 * Local variables:
157 * c-basic-offset: 8 109 * c-basic-offset: 8
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index 5491acbdaf63..f821ba69db99 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -168,6 +168,8 @@
168#define BTTV_BOARD_SABRENT_TVFM 0x8e 168#define BTTV_BOARD_SABRENT_TVFM 0x8e
169#define BTTV_BOARD_HAUPPAUGE_IMPACTVCB 0x8f 169#define BTTV_BOARD_HAUPPAUGE_IMPACTVCB 0x8f
170#define BTTV_BOARD_MACHTV_MAGICTV 0x90 170#define BTTV_BOARD_MACHTV_MAGICTV 0x90
171#define BTTV_BOARD_SSAI_SECURITY 0x91
172#define BTTV_BOARD_SSAI_ULTRASOUND 0x92
171 173
172/* more card-specific defines */ 174/* more card-specific defines */
173#define PT2254_L_CHANNEL 0x10 175#define PT2254_L_CHANNEL 0x10
@@ -260,17 +262,8 @@ extern int bttv_handle_chipset(struct bttv *btv);
260/* this obsolete -- please use the sysfs-based 262/* this obsolete -- please use the sysfs-based
261 interface below for new code */ 263 interface below for new code */
262 264
263/* returns card type + card ID (for bt878-based ones)
264 for possible values see lines below beginning with #define BTTV_BOARD_UNKNOWN
265 returns negative value if error occurred
266*/
267extern int bttv_get_cardinfo(unsigned int card, int *type,
268 unsigned int *cardid);
269extern struct pci_dev* bttv_get_pcidev(unsigned int card); 265extern struct pci_dev* bttv_get_pcidev(unsigned int card);
270 266
271/* obsolete, use bttv_get_cardinfo instead */
272extern int bttv_get_id(unsigned int card);
273
274/* sets GPOE register (BT848_GPIO_OUT_EN) to new value: 267/* sets GPOE register (BT848_GPIO_OUT_EN) to new value:
275 data | (current_GPOE_value & ~mask) 268 data | (current_GPOE_value & ~mask)
276 returns negative value if error occurred 269 returns negative value if error occurred
@@ -290,20 +283,6 @@ extern int bttv_read_gpio(unsigned int card, unsigned long *data);
290extern int bttv_write_gpio(unsigned int card, 283extern int bttv_write_gpio(unsigned int card,
291 unsigned long mask, unsigned long data); 284 unsigned long mask, unsigned long data);
292 285
293/* returns pointer to task queue which can be used as parameter to
294 interruptible_sleep_on
295 in interrupt handler if BT848_INT_GPINT bit is set - this queue is activated
296 (wake_up_interruptible) and following call to the function bttv_read_gpio
297 should return new value of GPDATA,
298 returns NULL value if error occurred or queue is not available
299 WARNING: because there is no buffer for GPIO data, one MUST
300 process data ASAP
301*/
302extern wait_queue_head_t* bttv_get_gpio_queue(unsigned int card);
303
304/* call i2c clients
305*/
306extern void bttv_i2c_call(unsigned int card, unsigned int cmd, void *arg);
307 286
308 287
309 288
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index ad79b8d53430..8f44f02029be 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -434,6 +434,9 @@ struct bttv {
434 unsigned int users; 434 unsigned int users;
435 struct bttv_fh init; 435 struct bttv_fh init;
436 436
437 /* used to make dvb-bt8xx autoloadable */
438 struct work_struct request_module_wk;
439
437 /* Default (0) and current (1) video capturing and overlay 440 /* Default (0) and current (1) video capturing and overlay
438 cropping parameters in bttv_tvnorm.cropcap units. Protected 441 cropping parameters in bttv_tvnorm.cropcap units. Protected
439 by bttv.lock. */ 442 by bttv.lock. */
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 710c11a68296..96254dbaf625 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -4,6 +4,7 @@
4 * sensor. 4 * sensor.
5 * 5 *
6 * Copyright 2006 One Laptop Per Child Association, Inc. 6 * Copyright 2006 One Laptop Per Child Association, Inc.
7 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
7 * 8 *
8 * Written by Jonathan Corbet, corbet@lwn.net. 9 * Written by Jonathan Corbet, corbet@lwn.net.
9 * 10 *
@@ -22,6 +23,7 @@
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/videodev2.h> 24#include <linux/videodev2.h>
24#include <media/v4l2-common.h> 25#include <media/v4l2-common.h>
26#include <media/v4l2-chip-ident.h>
25#include <linux/device.h> 27#include <linux/device.h>
26#include <linux/wait.h> 28#include <linux/wait.h>
27#include <linux/list.h> 29#include <linux/list.h>
@@ -36,7 +38,7 @@
36 38
37#include "cafe_ccic-regs.h" 39#include "cafe_ccic-regs.h"
38 40
39#define CAFE_VERSION 0x000001 41#define CAFE_VERSION 0x000002
40 42
41 43
42/* 44/*
@@ -164,7 +166,7 @@ struct cafe_camera
164 struct tasklet_struct s_tasklet; 166 struct tasklet_struct s_tasklet;
165 167
166 /* Current operating parameters */ 168 /* Current operating parameters */
167 enum v4l2_chip_ident sensor_type; /* Currently ov7670 only */ 169 u32 sensor_type; /* Currently ov7670 only */
168 struct v4l2_pix_format pix_format; 170 struct v4l2_pix_format pix_format;
169 171
170 /* Locks */ 172 /* Locks */
@@ -704,7 +706,13 @@ static void cafe_ctlr_init(struct cafe_camera *cam)
704 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */ 706 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
705 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC); 707 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
706 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS); 708 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
709 /*
710 * Here we must wait a bit for the controller to come around.
711 */
712 spin_unlock_irqrestore(&cam->dev_lock, flags);
707 mdelay(5); /* FIXME revisit this */ 713 mdelay(5); /* FIXME revisit this */
714 spin_lock_irqsave(&cam->dev_lock, flags);
715
708 cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC); 716 cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
709 cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN); 717 cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN);
710 /* 718 /*
@@ -772,9 +780,9 @@ static void cafe_ctlr_power_up(struct cafe_camera *cam)
772 * Control 1 is power down, set to 0 to operate. 780 * Control 1 is power down, set to 0 to operate.
773 */ 781 */
774 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */ 782 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
775 mdelay(1); /* Marvell says 1ms will do it */ 783// mdelay(1); /* Marvell says 1ms will do it */
776 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0); 784 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
777 mdelay(1); /* Enough? */ 785// mdelay(1); /* Enough? */
778 spin_unlock_irqrestore(&cam->dev_lock, flags); 786 spin_unlock_irqrestore(&cam->dev_lock, flags);
779} 787}
780 788
@@ -818,6 +826,7 @@ static int __cafe_cam_reset(struct cafe_camera *cam)
818 */ 826 */
819static int cafe_cam_init(struct cafe_camera *cam) 827static int cafe_cam_init(struct cafe_camera *cam)
820{ 828{
829 struct v4l2_chip_ident chip = { V4L2_CHIP_MATCH_I2C_ADDR, 0, 0, 0 };
821 int ret; 830 int ret;
822 831
823 mutex_lock(&cam->s_mutex); 832 mutex_lock(&cam->s_mutex);
@@ -827,9 +836,11 @@ static int cafe_cam_init(struct cafe_camera *cam)
827 ret = __cafe_cam_reset(cam); 836 ret = __cafe_cam_reset(cam);
828 if (ret) 837 if (ret)
829 goto out; 838 goto out;
830 ret = __cafe_cam_cmd(cam, VIDIOC_INT_G_CHIP_IDENT, &cam->sensor_type); 839 chip.match_chip = cam->sensor->addr;
840 ret = __cafe_cam_cmd(cam, VIDIOC_G_CHIP_IDENT, &chip);
831 if (ret) 841 if (ret)
832 goto out; 842 goto out;
843 cam->sensor_type = chip.ident;
833// if (cam->sensor->addr != OV7xx0_SID) { 844// if (cam->sensor->addr != OV7xx0_SID) {
834 if (cam->sensor_type != V4L2_IDENT_OV7670) { 845 if (cam->sensor_type != V4L2_IDENT_OV7670) {
835 cam_err(cam, "Unsupported sensor type %d", cam->sensor->addr); 846 cam_err(cam, "Unsupported sensor type %d", cam->sensor->addr);
@@ -1792,18 +1803,19 @@ static void cafe_frame_tasklet(unsigned long data)
1792 if (list_empty(&cam->sb_avail)) 1803 if (list_empty(&cam->sb_avail))
1793 break; /* Leave it valid, hope for better later */ 1804 break; /* Leave it valid, hope for better later */
1794 clear_bit(bufno, &cam->flags); 1805 clear_bit(bufno, &cam->flags);
1795 /*
1796 * We could perhaps drop the spinlock during this
1797 * big copy. Something to consider.
1798 */
1799 sbuf = list_entry(cam->sb_avail.next, 1806 sbuf = list_entry(cam->sb_avail.next,
1800 struct cafe_sio_buffer, list); 1807 struct cafe_sio_buffer, list);
1808 /*
1809 * Drop the lock during the big copy. This *should* be safe...
1810 */
1811 spin_unlock_irqrestore(&cam->dev_lock, flags);
1801 memcpy(sbuf->buffer, cam->dma_bufs[bufno], 1812 memcpy(sbuf->buffer, cam->dma_bufs[bufno],
1802 cam->pix_format.sizeimage); 1813 cam->pix_format.sizeimage);
1803 sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage; 1814 sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage;
1804 sbuf->v4lbuf.sequence = cam->buf_seq[bufno]; 1815 sbuf->v4lbuf.sequence = cam->buf_seq[bufno];
1805 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED; 1816 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
1806 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE; 1817 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
1818 spin_lock_irqsave(&cam->dev_lock, flags);
1807 list_move_tail(&sbuf->list, &cam->sb_full); 1819 list_move_tail(&sbuf->list, &cam->sb_full);
1808 } 1820 }
1809 if (! list_empty(&cam->sb_full)) 1821 if (! list_empty(&cam->sb_full))
@@ -2107,6 +2119,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
2107 cam->v4ldev = cafe_v4l_template; 2119 cam->v4ldev = cafe_v4l_template;
2108 cam->v4ldev.debug = 0; 2120 cam->v4ldev.debug = 0;
2109// cam->v4ldev.debug = V4L2_DEBUG_IOCTL_ARG; 2121// cam->v4ldev.debug = V4L2_DEBUG_IOCTL_ARG;
2122 cam->v4ldev.dev = &pdev->dev;
2110 ret = video_register_device(&cam->v4ldev, VFL_TYPE_GRABBER, -1); 2123 ret = video_register_device(&cam->v4ldev, VFL_TYPE_GRABBER, -1);
2111 if (ret) 2124 if (ret)
2112 goto out_smbus; 2125 goto out_smbus;
@@ -2176,10 +2189,52 @@ static void cafe_pci_remove(struct pci_dev *pdev)
2176} 2189}
2177 2190
2178 2191
2192#ifdef CONFIG_PM
2193/*
2194 * Basic power management.
2195 */
2196static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2197{
2198 struct cafe_camera *cam = cafe_find_by_pdev(pdev);
2199 int ret;
2200
2201 ret = pci_save_state(pdev);
2202 if (ret)
2203 return ret;
2204 cafe_ctlr_stop_dma(cam);
2205 cafe_ctlr_power_down(cam);
2206 pci_disable_device(pdev);
2207 return 0;
2208}
2209
2210
2211static int cafe_pci_resume(struct pci_dev *pdev)
2212{
2213 struct cafe_camera *cam = cafe_find_by_pdev(pdev);
2214 int ret = 0;
2215
2216 ret = pci_restore_state(pdev);
2217 if (ret)
2218 return ret;
2219 ret = pci_enable_device(pdev);
2220 if (ret) {
2221 cam_warn(cam, "Unable to re-enable device on resume!\n");
2222 return ret;
2223 }
2224 cafe_ctlr_init(cam);
2225 cafe_ctlr_power_up(cam);
2226 set_bit(CF_CONFIG_NEEDED, &cam->flags);
2227 if (cam->state == S_SPECREAD)
2228 cam->state = S_IDLE; /* Don't bother restarting */
2229 else if (cam->state == S_SINGLEREAD || cam->state == S_STREAMING)
2230 ret = cafe_read_setup(cam, cam->state);
2231 return ret;
2232}
2233
2234#endif /* CONFIG_PM */
2179 2235
2180 2236
2181static struct pci_device_id cafe_ids[] = { 2237static struct pci_device_id cafe_ids[] = {
2182 { PCI_DEVICE(0x1148, 0x4340) }, /* Temporary ID on devel board */
2183 { PCI_DEVICE(0x11ab, 0x4100) }, /* Eventual real ID */ 2238 { PCI_DEVICE(0x11ab, 0x4100) }, /* Eventual real ID */
2184 { PCI_DEVICE(0x11ab, 0x4102) }, /* Really eventual real ID */ 2239 { PCI_DEVICE(0x11ab, 0x4102) }, /* Really eventual real ID */
2185 { 0, } 2240 { 0, }
@@ -2192,6 +2247,10 @@ static struct pci_driver cafe_pci_driver = {
2192 .id_table = cafe_ids, 2247 .id_table = cafe_ids,
2193 .probe = cafe_pci_probe, 2248 .probe = cafe_pci_probe,
2194 .remove = cafe_pci_remove, 2249 .remove = cafe_pci_remove,
2250#ifdef CONFIG_PM
2251 .suspend = cafe_pci_suspend,
2252 .resume = cafe_pci_resume,
2253#endif
2195}; 2254};
2196 2255
2197 2256
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index b12cec94f4cc..19711aaf9a3e 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -62,7 +62,6 @@ static int cpia_pp_close(void *privdata);
62#define PPCPIA_PARPORT_OFF -2 62#define PPCPIA_PARPORT_OFF -2
63#define PPCPIA_PARPORT_NONE -1 63#define PPCPIA_PARPORT_NONE -1
64 64
65#ifdef MODULE
66static int parport_nr[PARPORT_MAX] = {[0 ... PARPORT_MAX - 1] = PPCPIA_PARPORT_UNSPEC}; 65static int parport_nr[PARPORT_MAX] = {[0 ... PARPORT_MAX - 1] = PPCPIA_PARPORT_UNSPEC};
67static char *parport[PARPORT_MAX] = {NULL,}; 66static char *parport[PARPORT_MAX] = {NULL,};
68 67
@@ -72,11 +71,6 @@ MODULE_LICENSE("GPL");
72 71
73module_param_array(parport, charp, NULL, 0); 72module_param_array(parport, charp, NULL, 0);
74MODULE_PARM_DESC(parport, "'auto' or a list of parallel port numbers. Just like lp."); 73MODULE_PARM_DESC(parport, "'auto' or a list of parallel port numbers. Just like lp.");
75#else
76static int parport_nr[PARPORT_MAX] __initdata =
77 {[0 ... PARPORT_MAX - 1] = PPCPIA_PARPORT_UNSPEC};
78static int parport_ptr = 0;
79#endif
80 74
81struct pp_cam_entry { 75struct pp_cam_entry {
82 struct pardevice *pdev; 76 struct pardevice *pdev;
@@ -141,7 +135,6 @@ static void cpia_pp_run_callback(struct work_struct *work)
141 cam = container_of(work, struct pp_cam_entry, cb_task); 135 cam = container_of(work, struct pp_cam_entry, cb_task);
142 cb_func = cam->cb_func; 136 cb_func = cam->cb_func;
143 cb_data = cam->cb_data; 137 cb_data = cam->cb_data;
144 work_release(work);
145 138
146 cb_func(cb_data); 139 cb_func(cb_data);
147} 140}
@@ -682,7 +675,7 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo
682 if(cam->port->irq != PARPORT_IRQ_NONE) { 675 if(cam->port->irq != PARPORT_IRQ_NONE) {
683 cam->cb_func = cb; 676 cam->cb_func = cb;
684 cam->cb_data = cbdata; 677 cam->cb_data = cbdata;
685 INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback); 678 INIT_WORK(&cam->cb_task, cpia_pp_run_callback);
686 } else { 679 } else {
687 retval = -1; 680 retval = -1;
688 } 681 }
@@ -820,7 +813,7 @@ static struct parport_driver cpia_pp_driver = {
820 .detach = cpia_pp_detach, 813 .detach = cpia_pp_detach,
821}; 814};
822 815
823static int cpia_pp_init(void) 816static int __init cpia_pp_init(void)
824{ 817{
825 printk(KERN_INFO "%s v%d.%d.%d\n",ABOUT, 818 printk(KERN_INFO "%s v%d.%d.%d\n",ABOUT,
826 CPIA_PP_MAJ_VER,CPIA_PP_MIN_VER,CPIA_PP_PATCH_VER); 819 CPIA_PP_MAJ_VER,CPIA_PP_MIN_VER,CPIA_PP_PATCH_VER);
@@ -839,8 +832,7 @@ static int cpia_pp_init(void)
839 return 0; 832 return 0;
840} 833}
841 834
842#ifdef MODULE 835static int __init cpia_init(void)
843int init_module(void)
844{ 836{
845 if (parport[0]) { 837 if (parport[0]) {
846 /* The user gave some parameters. Let's see what they were. */ 838 /* The user gave some parameters. Let's see what they were. */
@@ -867,38 +859,11 @@ int init_module(void)
867 return cpia_pp_init(); 859 return cpia_pp_init();
868} 860}
869 861
870void cleanup_module(void) 862static void __exit cpia_cleanup(void)
871{ 863{
872 parport_unregister_driver (&cpia_pp_driver); 864 parport_unregister_driver(&cpia_pp_driver);
873 return; 865 return;
874} 866}
875 867
876#else /* !MODULE */ 868module_init(cpia_init);
877 869module_exit(cpia_cleanup);
878static int __init cpia_pp_setup(char *str)
879{
880 int err;
881
882 if (!strncmp(str, "parport", 7)) {
883 int n = simple_strtoul(str + 7, NULL, 10);
884 if (parport_ptr < PARPORT_MAX) {
885 parport_nr[parport_ptr++] = n;
886 } else {
887 LOG("too many ports, %s ignored.\n", str);
888 }
889 } else if (!strcmp(str, "auto")) {
890 parport_nr[0] = PPCPIA_PARPORT_AUTO;
891 } else if (!strcmp(str, "none")) {
892 parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE;
893 }
894
895 err=cpia_pp_init();
896 if (err)
897 return err;
898
899 return 1;
900}
901
902__setup("cpia_pp=", cpia_pp_setup);
903
904#endif /* !MODULE */
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index de87247c74ee..a73e285af730 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -28,6 +28,7 @@
28#include <linux/i2c-id.h> 28#include <linux/i2c-id.h>
29#include <linux/videodev.h> 29#include <linux/videodev.h>
30#include <media/v4l2-common.h> 30#include <media/v4l2-common.h>
31#include <media/v4l2-chip-ident.h>
31 32
32MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC"); 33MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
33MODULE_AUTHOR("Martin Vaughan"); 34MODULE_AUTHOR("Martin Vaughan");
@@ -103,6 +104,9 @@ static int cs53l32a_command(struct i2c_client *client, unsigned int cmd,
103 cs53l32a_write(client, 0x05, (u8) ctrl->value); 104 cs53l32a_write(client, 0x05, (u8) ctrl->value);
104 break; 105 break;
105 106
107 case VIDIOC_G_CHIP_IDENT:
108 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_CS53l32A, 0);
109
106 case VIDIOC_LOG_STATUS: 110 case VIDIOC_LOG_STATUS:
107 { 111 {
108 u8 v = cs53l32a_read(client, 0x01); 112 u8 v = cs53l32a_read(client, 0x01);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index d60cd5ecf821..88dbdddeec42 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -51,6 +51,7 @@ const u32 cx2341x_mpeg_ctrls[] = {
51 V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, 51 V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
52 V4L2_CID_MPEG_AUDIO_EMPHASIS, 52 V4L2_CID_MPEG_AUDIO_EMPHASIS,
53 V4L2_CID_MPEG_AUDIO_CRC, 53 V4L2_CID_MPEG_AUDIO_CRC,
54 V4L2_CID_MPEG_AUDIO_MUTE,
54 V4L2_CID_MPEG_VIDEO_ENCODING, 55 V4L2_CID_MPEG_VIDEO_ENCODING,
55 V4L2_CID_MPEG_VIDEO_ASPECT, 56 V4L2_CID_MPEG_VIDEO_ASPECT,
56 V4L2_CID_MPEG_VIDEO_B_FRAMES, 57 V4L2_CID_MPEG_VIDEO_B_FRAMES,
@@ -60,6 +61,8 @@ const u32 cx2341x_mpeg_ctrls[] = {
60 V4L2_CID_MPEG_VIDEO_BITRATE, 61 V4L2_CID_MPEG_VIDEO_BITRATE,
61 V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 62 V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
62 V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 63 V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION,
64 V4L2_CID_MPEG_VIDEO_MUTE,
65 V4L2_CID_MPEG_VIDEO_MUTE_YUV,
63 V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, 66 V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
64 V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, 67 V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
65 V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, 68 V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
@@ -71,6 +74,7 @@ const u32 cx2341x_mpeg_ctrls[] = {
71 V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, 74 V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
72 V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, 75 V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
73 V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, 76 V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
77 V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS,
74 0 78 0
75}; 79};
76 80
@@ -102,6 +106,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params,
102 case V4L2_CID_MPEG_AUDIO_CRC: 106 case V4L2_CID_MPEG_AUDIO_CRC:
103 ctrl->value = params->audio_crc; 107 ctrl->value = params->audio_crc;
104 break; 108 break;
109 case V4L2_CID_MPEG_AUDIO_MUTE:
110 ctrl->value = params->audio_mute;
111 break;
105 case V4L2_CID_MPEG_VIDEO_ENCODING: 112 case V4L2_CID_MPEG_VIDEO_ENCODING:
106 ctrl->value = params->video_encoding; 113 ctrl->value = params->video_encoding;
107 break; 114 break;
@@ -129,6 +136,12 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params,
129 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: 136 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
130 ctrl->value = params->video_temporal_decimation; 137 ctrl->value = params->video_temporal_decimation;
131 break; 138 break;
139 case V4L2_CID_MPEG_VIDEO_MUTE:
140 ctrl->value = params->video_mute;
141 break;
142 case V4L2_CID_MPEG_VIDEO_MUTE_YUV:
143 ctrl->value = params->video_mute_yuv;
144 break;
132 case V4L2_CID_MPEG_STREAM_TYPE: 145 case V4L2_CID_MPEG_STREAM_TYPE:
133 ctrl->value = params->stream_type; 146 ctrl->value = params->stream_type;
134 break; 147 break;
@@ -168,6 +181,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params,
168 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: 181 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
169 ctrl->value = params->video_chroma_median_filter_bottom; 182 ctrl->value = params->video_chroma_median_filter_bottom;
170 break; 183 break;
184 case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
185 ctrl->value = params->stream_insert_nav_packets;
186 break;
171 default: 187 default:
172 return -EINVAL; 188 return -EINVAL;
173 } 189 }
@@ -201,6 +217,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params,
201 case V4L2_CID_MPEG_AUDIO_CRC: 217 case V4L2_CID_MPEG_AUDIO_CRC:
202 params->audio_crc = ctrl->value; 218 params->audio_crc = ctrl->value;
203 break; 219 break;
220 case V4L2_CID_MPEG_AUDIO_MUTE:
221 params->audio_mute = ctrl->value;
222 break;
204 case V4L2_CID_MPEG_VIDEO_ASPECT: 223 case V4L2_CID_MPEG_VIDEO_ASPECT:
205 params->video_aspect = ctrl->value; 224 params->video_aspect = ctrl->value;
206 break; 225 break;
@@ -243,6 +262,12 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params,
243 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: 262 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
244 params->video_temporal_decimation = ctrl->value; 263 params->video_temporal_decimation = ctrl->value;
245 break; 264 break;
265 case V4L2_CID_MPEG_VIDEO_MUTE:
266 params->video_mute = (ctrl->value != 0);
267 break;
268 case V4L2_CID_MPEG_VIDEO_MUTE_YUV:
269 params->video_mute_yuv = ctrl->value;
270 break;
246 case V4L2_CID_MPEG_STREAM_TYPE: 271 case V4L2_CID_MPEG_STREAM_TYPE:
247 params->stream_type = ctrl->value; 272 params->stream_type = ctrl->value;
248 params->video_encoding = 273 params->video_encoding =
@@ -290,6 +315,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params,
290 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: 315 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
291 params->video_chroma_median_filter_bottom = ctrl->value; 316 params->video_chroma_median_filter_bottom = ctrl->value;
292 break; 317 break;
318 case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
319 params->stream_insert_nav_packets = ctrl->value;
320 break;
293 default: 321 default:
294 return -EINVAL; 322 return -EINVAL;
295 } 323 }
@@ -336,6 +364,9 @@ static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 ma
336 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: 364 case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
337 name = "Median Chroma Filter Minimum"; 365 name = "Median Chroma Filter Minimum";
338 break; 366 break;
367 case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
368 name = "Insert Navigation Packets";
369 break;
339 370
340 default: 371 default:
341 return v4l2_ctrl_query_fill(qctrl, min, max, step, def); 372 return v4l2_ctrl_query_fill(qctrl, min, max, step, def);
@@ -350,6 +381,12 @@ static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 ma
350 min = 0; 381 min = 0;
351 step = 1; 382 step = 1;
352 break; 383 break;
384 case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
385 qctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
386 min = 0;
387 max = 1;
388 step = 1;
389 break;
353 default: 390 default:
354 qctrl->type = V4L2_CTRL_TYPE_INTEGER; 391 qctrl->type = V4L2_CTRL_TYPE_INTEGER;
355 break; 392 break;
@@ -505,6 +542,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, struct v4l2_queryctrl
505 qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; 542 qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
506 return 0; 543 return 0;
507 544
545 case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
546 return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1, 0);
547
508 default: 548 default:
509 return v4l2_ctrl_query_fill_std(qctrl); 549 return v4l2_ctrl_query_fill_std(qctrl);
510 550
@@ -656,6 +696,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
656 /* stream */ 696 /* stream */
657 .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, 697 .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
658 .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE, 698 .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE,
699 .stream_insert_nav_packets = 0,
659 700
660 /* audio */ 701 /* audio */
661 .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, 702 .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000,
@@ -665,6 +706,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
665 .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, 706 .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4,
666 .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE, 707 .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE,
667 .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE, 708 .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE,
709 .audio_mute = 0,
668 710
669 /* video */ 711 /* video */
670 .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 712 .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2,
@@ -676,6 +718,8 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
676 .video_bitrate = 6000000, 718 .video_bitrate = 6000000,
677 .video_bitrate_peak = 8000000, 719 .video_bitrate_peak = 8000000,
678 .video_temporal_decimation = 0, 720 .video_temporal_decimation = 0,
721 .video_mute = 0,
722 .video_mute_yuv = 0x008080, /* YCbCr value for black */
679 723
680 /* encoding filters */ 724 /* encoding filters */
681 .video_spatial_filter_mode = V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, 725 .video_spatial_filter_mode = V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
@@ -779,6 +823,10 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
779 err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new->audio_properties); 823 err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new->audio_properties);
780 if (err) return err; 824 if (err) return err;
781 } 825 }
826 if (old == NULL || old->audio_mute != new->audio_mute) {
827 err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1, new->audio_mute);
828 if (err) return err;
829 }
782 if (old == NULL || old->video_bitrate_mode != new->video_bitrate_mode || 830 if (old == NULL || old->video_bitrate_mode != new->video_bitrate_mode ||
783 old->video_bitrate != new->video_bitrate || 831 old->video_bitrate != new->video_bitrate ||
784 old->video_bitrate_peak != new->video_bitrate_peak) { 832 old->video_bitrate_peak != new->video_bitrate_peak) {
@@ -826,6 +874,15 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
826 new->video_temporal_decimation); 874 new->video_temporal_decimation);
827 if (err) return err; 875 if (err) return err;
828 } 876 }
877 if (old == NULL || old->video_mute != new->video_mute ||
878 (new->video_mute && old->video_mute_yuv != new->video_mute_yuv)) {
879 err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1, new->video_mute | (new->video_mute_yuv << 8));
880 if (err) return err;
881 }
882 if (old == NULL || old->stream_insert_nav_packets != new->stream_insert_nav_packets) {
883 err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2, 7, new->stream_insert_nav_packets);
884 if (err) return err;
885 }
829 return 0; 886 return 0;
830} 887}
831 888
@@ -854,18 +911,22 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix)
854 int temporal = p->video_temporal_filter; 911 int temporal = p->video_temporal_filter;
855 912
856 /* Stream */ 913 /* Stream */
857 printk(KERN_INFO "%s: Stream: %s\n", 914 printk(KERN_INFO "%s: Stream: %s",
858 prefix, 915 prefix,
859 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); 916 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
917 if (p->stream_insert_nav_packets)
918 printk(" (with navigation packets)");
919 printk("\n");
860 printk(KERN_INFO "%s: VBI Format: %s\n", 920 printk(KERN_INFO "%s: VBI Format: %s\n",
861 prefix, 921 prefix,
862 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT)); 922 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
863 923
864 /* Video */ 924 /* Video */
865 printk(KERN_INFO "%s: Video: %dx%d, %d fps\n", 925 printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n",
866 prefix, 926 prefix,
867 p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), 927 p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
868 p->is_50hz ? 25 : 30); 928 p->is_50hz ? 25 : 30,
929 (p->video_mute) ? " (muted)" : "");
869 printk(KERN_INFO "%s: Video: %s, %s, %s, %d", 930 printk(KERN_INFO "%s: Video: %s, %s, %s, %d",
870 prefix, 931 prefix,
871 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), 932 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
@@ -886,12 +947,13 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix)
886 } 947 }
887 948
888 /* Audio */ 949 /* Audio */
889 printk(KERN_INFO "%s: Audio: %s, %s, %s, %s", 950 printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s",
890 prefix, 951 prefix,
891 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), 952 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
892 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), 953 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
893 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE), 954 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE),
894 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE)); 955 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
956 p->audio_mute ? " (muted)" : "");
895 if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) { 957 if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) {
896 printk(", %s", 958 printk(", %s",
897 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION)); 959 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION));
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 774d2536555b..1757a588970f 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -35,6 +35,7 @@
35#include <linux/videodev2.h> 35#include <linux/videodev2.h>
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <media/v4l2-common.h> 37#include <media/v4l2-common.h>
38#include <media/v4l2-chip-ident.h>
38#include <media/cx25840.h> 39#include <media/cx25840.h>
39 40
40#include "cx25840-core.h" 41#include "cx25840-core.h"
@@ -827,9 +828,8 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
827 cx25840_initialize(client, 0); 828 cx25840_initialize(client, 0);
828 break; 829 break;
829 830
830 case VIDIOC_INT_G_CHIP_IDENT: 831 case VIDIOC_G_CHIP_IDENT:
831 *(enum v4l2_chip_ident *)arg = state->id; 832 return v4l2_chip_ident_i2c_client(client, arg, state->id, state->rev);
832 break;
833 833
834 default: 834 default:
835 return -EINVAL; 835 return -EINVAL;
@@ -847,7 +847,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
847{ 847{
848 struct i2c_client *client; 848 struct i2c_client *client;
849 struct cx25840_state *state; 849 struct cx25840_state *state;
850 enum v4l2_chip_ident id; 850 u32 id;
851 u16 device_id; 851 u16 device_id;
852 852
853 /* Check if the adapter supports the needed features 853 /* Check if the adapter supports the needed features
@@ -902,6 +902,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
902 state->audmode = V4L2_TUNER_MODE_LANG1; 902 state->audmode = V4L2_TUNER_MODE_LANG1;
903 state->vbi_line_offset = 8; 903 state->vbi_line_offset = 8;
904 state->id = id; 904 state->id = id;
905 state->rev = device_id;
905 906
906 i2c_attach_client(client); 907 i2c_attach_client(client);
907 908
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index 28049064dd7d..f4b56d2fd6b6 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -43,7 +43,8 @@ struct cx25840_state {
43 u32 audclk_freq; 43 u32 audclk_freq;
44 int audmode; 44 int audmode;
45 int vbi_line_offset; 45 int vbi_line_offset;
46 enum v4l2_chip_ident id; 46 u32 id;
47 u32 rev;
47 int is_cx25836; 48 int is_cx25836;
48}; 49};
49 50
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index 0e86b9d033ac..e852024a5ea3 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/i2c-algo-bit.h>
21#include <linux/firmware.h> 20#include <linux/firmware.h>
22#include <media/v4l2-common.h> 21#include <media/v4l2-common.h>
23#include <media/cx25840.h> 22#include <media/cx25840.h>
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index b2a66ba625f9..0f9d96963618 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -53,7 +53,6 @@ config VIDEO_CX88_DVB
53 select DVB_OR51132 if !DVB_FE_CUSTOMISE 53 select DVB_OR51132 if !DVB_FE_CUSTOMISE
54 select DVB_CX22702 if !DVB_FE_CUSTOMISE 54 select DVB_CX22702 if !DVB_FE_CUSTOMISE
55 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 55 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
56 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
57 select DVB_NXT200X if !DVB_FE_CUSTOMISE 56 select DVB_NXT200X if !DVB_FE_CUSTOMISE
58 select DVB_CX24123 if !DVB_FE_CUSTOMISE 57 select DVB_CX24123 if !DVB_FE_CUSTOMISE
59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 58 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index e4355fdc3b6d..3956c257556c 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -232,7 +232,8 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
232 cx_write(MO_AUD_INTSTAT, status); 232 cx_write(MO_AUD_INTSTAT, status);
233 if (debug > 1 || (status & mask & ~0xff)) 233 if (debug > 1 || (status & mask & ~0xff))
234 cx88_print_irqbits(core->name, "irq aud", 234 cx88_print_irqbits(core->name, "irq aud",
235 cx88_aud_irqs, status, mask); 235 cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs),
236 status, mask);
236 /* risc op code error */ 237 /* risc op code error */
237 if (status & (1 << 16)) { 238 if (status & (1 << 16)) {
238 printk(KERN_WARNING "%s/0: audio risc op code error\n",core->name); 239 printk(KERN_WARNING "%s/0: audio risc op code error\n",core->name);
@@ -413,11 +414,9 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
413 414
414 dprintk(1,"Setting buffer\n"); 415 dprintk(1,"Setting buffer\n");
415 416
416 buf = kmalloc(sizeof(*buf),GFP_KERNEL); 417 buf = kzalloc(sizeof(*buf),GFP_KERNEL);
417 if (NULL == buf) 418 if (NULL == buf)
418 return -ENOMEM; 419 return -ENOMEM;
419 memset(buf,0,sizeof(*buf));
420
421 420
422 buf->vb.memory = V4L2_MEMORY_MMAP; 421 buf->vb.memory = V4L2_MEMORY_MMAP;
423 buf->vb.width = chip->period_size; 422 buf->vb.width = chip->period_size;
@@ -682,7 +681,7 @@ static int __devinit snd_cx88_create(struct snd_card *card,
682 return err; 681 return err;
683 } 682 }
684 683
685 if (!pci_dma_supported(pci,0xffffffff)) { 684 if (!pci_dma_supported(pci,DMA_32BIT_MASK)) {
686 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 685 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
687 err = -EIO; 686 err = -EIO;
688 cx88_core_put(core,pci); 687 cx88_core_put(core,pci);
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 65e9d8096b74..e61102dc8ad7 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -885,6 +885,12 @@ struct cx88_board cx88_boards[] = {
885 .input = {{ 885 .input = {{
886 .type = CX88_VMUX_DVB, 886 .type = CX88_VMUX_DVB,
887 .vmux = 0, 887 .vmux = 0,
888 },{
889 .type = CX88_VMUX_COMPOSITE1,
890 .vmux = 1,
891 },{
892 .type = CX88_VMUX_SVIDEO,
893 .vmux = 2,
888 }}, 894 }},
889 .mpeg = CX88_MPEG_DVB, 895 .mpeg = CX88_MPEG_DVB,
890 }, 896 },
@@ -1537,10 +1543,10 @@ struct cx88_subid cx88_subids[] = {
1537 },{ 1543 },{
1538 .subvendor = 0x17de, 1544 .subvendor = 0x17de,
1539 .subdevice = 0x0840, 1545 .subdevice = 0x0840,
1540 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT, 1546 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
1541 },{ 1547 },{
1542 .subvendor = 0x1421, 1548 .subvendor = 0x1421,
1543 .subdevice = 0x0305, 1549 .subdevice = 0x0305,
1544 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT, 1550 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
1545 },{ 1551 },{
1546 .subvendor = 0x18ac, 1552 .subvendor = 0x18ac,
@@ -1631,6 +1637,10 @@ struct cx88_subid cx88_subids[] = {
1631 .subvendor = 0x0070, 1637 .subvendor = 0x0070,
1632 .subdevice = 0x1402, 1638 .subdevice = 0x1402,
1633 .card = CX88_BOARD_HAUPPAUGE_HVR3000, 1639 .card = CX88_BOARD_HAUPPAUGE_HVR3000,
1640 },{
1641 .subvendor = 0x1421,
1642 .subdevice = 0x0341, /* ADS Tech InstantTV DVB-S */
1643 .card = CX88_BOARD_KWORLD_DVBS_100,
1634 }, 1644 },
1635}; 1645};
1636const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids); 1646const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
@@ -1786,7 +1796,7 @@ static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
1786 { 0x03, 0x0C }, 1796 { 0x03, 0x0C },
1787 }; 1797 };
1788 1798
1789 for (i = 0; i < 13; i++) { 1799 for (i = 0; i < ARRAY_SIZE(init_bufs); i++) {
1790 msg.buf = init_bufs[i]; 1800 msg.buf = init_bufs[i];
1791 msg.len = (i != 12 ? 5 : 2); 1801 msg.len = (i != 12 ? 5 : 2);
1792 err = i2c_transfer(&core->i2c_adap, &msg, 1); 1802 err = i2c_transfer(&core->i2c_adap, &msg, 1);
@@ -1913,12 +1923,21 @@ void cx88_card_setup(struct cx88_core *core)
1913 if (0 == core->i2c_rc) { 1923 if (0 == core->i2c_rc) {
1914 /* enable tuner */ 1924 /* enable tuner */
1915 int i; 1925 int i;
1916 static const u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 }; 1926 static const u8 buffer [][2] = {
1927 {0x10,0x12},
1928 {0x13,0x04},
1929 {0x16,0x00},
1930 {0x14,0x04},
1931 {0x17,0x00}
1932 };
1917 core->i2c_client.addr = 0x0a; 1933 core->i2c_client.addr = 0x0a;
1918 1934
1919 for (i = 0; i < 5; i++) 1935 for (i = 0; i < ARRAY_SIZE(buffer); i++)
1920 if (2 != i2c_master_send(&core->i2c_client,&buffer[i*2],2)) 1936 if (2 != i2c_master_send(&core->i2c_client,
1921 printk(KERN_WARNING "%s: Unable to enable tuner(%i).\n", 1937 buffer[i],2))
1938 printk(KERN_WARNING
1939 "%s: Unable to enable "
1940 "tuner(%i).\n",
1922 core->name, i); 1941 core->name, i);
1923 } 1942 }
1924 break; 1943 break;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index d86813be56de..f31ec96924b9 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -489,12 +489,12 @@ static char *cx88_pci_irqs[32] = {
489}; 489};
490 490
491void cx88_print_irqbits(char *name, char *tag, char **strings, 491void cx88_print_irqbits(char *name, char *tag, char **strings,
492 u32 bits, u32 mask) 492 int len, u32 bits, u32 mask)
493{ 493{
494 unsigned int i; 494 unsigned int i;
495 495
496 printk(KERN_DEBUG "%s: %s [0x%x]", name, tag, bits); 496 printk(KERN_DEBUG "%s: %s [0x%x]", name, tag, bits);
497 for (i = 0; i < 32; i++) { 497 for (i = 0; i < len; i++) {
498 if (!(bits & (1 << i))) 498 if (!(bits & (1 << i)))
499 continue; 499 continue;
500 if (strings[i]) 500 if (strings[i])
@@ -520,8 +520,8 @@ int cx88_core_irq(struct cx88_core *core, u32 status)
520 } 520 }
521 if (!handled) 521 if (!handled)
522 cx88_print_irqbits(core->name, "irq pci", 522 cx88_print_irqbits(core->name, "irq pci",
523 cx88_pci_irqs, status, 523 cx88_pci_irqs, ARRAY_SIZE(cx88_pci_irqs),
524 core->pci_irqmask); 524 status, core->pci_irqmask);
525 return handled; 525 return handled;
526} 526}
527 527
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 4f5560285770..dbfe4dc9cf8c 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -42,7 +42,6 @@
42#include "cx22702.h" 42#include "cx22702.h"
43#include "or51132.h" 43#include "or51132.h"
44#include "lgdt330x.h" 44#include "lgdt330x.h"
45#include "lgh06xf.h"
46#include "nxt200x.h" 45#include "nxt200x.h"
47#include "cx24123.h" 46#include "cx24123.h"
48#include "isl6421.h" 47#include "isl6421.h"
@@ -476,6 +475,8 @@ static int dvb_register(struct cx8802_dev *dev)
476 case CX88_BOARD_WINFAST_DTV2000H: 475 case CX88_BOARD_WINFAST_DTV2000H:
477 case CX88_BOARD_HAUPPAUGE_HVR1100: 476 case CX88_BOARD_HAUPPAUGE_HVR1100:
478 case CX88_BOARD_HAUPPAUGE_HVR1100LP: 477 case CX88_BOARD_HAUPPAUGE_HVR1100LP:
478 case CX88_BOARD_HAUPPAUGE_HVR1300:
479 case CX88_BOARD_HAUPPAUGE_HVR3000:
479 dev->dvb.frontend = dvb_attach(cx22702_attach, 480 dev->dvb.frontend = dvb_attach(cx22702_attach,
480 &hauppauge_hvr_config, 481 &hauppauge_hvr_config,
481 &dev->core->i2c_adap); 482 &dev->core->i2c_adap);
@@ -631,8 +632,9 @@ static int dvb_register(struct cx8802_dev *dev)
631 &fusionhdtv_5_gold, 632 &fusionhdtv_5_gold,
632 &dev->core->i2c_adap); 633 &dev->core->i2c_adap);
633 if (dev->dvb.frontend != NULL) { 634 if (dev->dvb.frontend != NULL) {
634 dvb_attach(lgh06xf_attach, dev->dvb.frontend, 635 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
635 &dev->core->i2c_adap); 636 &dev->core->i2c_adap,
637 &dvb_pll_lg_tdvs_h06xf);
636 } 638 }
637 } 639 }
638 break; 640 break;
@@ -650,8 +652,9 @@ static int dvb_register(struct cx8802_dev *dev)
650 &pchdtv_hd5500, 652 &pchdtv_hd5500,
651 &dev->core->i2c_adap); 653 &dev->core->i2c_adap);
652 if (dev->dvb.frontend != NULL) { 654 if (dev->dvb.frontend != NULL) {
653 dvb_attach(lgh06xf_attach, dev->dvb.frontend, 655 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
654 &dev->core->i2c_adap); 656 &dev->core->i2c_adap,
657 &dvb_pll_lg_tdvs_h06xf);
655 } 658 }
656 } 659 }
657 break; 660 break;
@@ -692,24 +695,6 @@ static int dvb_register(struct cx8802_dev *dev)
692 dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage; 695 dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
693 } 696 }
694 break; 697 break;
695 case CX88_BOARD_HAUPPAUGE_HVR1300:
696 dev->dvb.frontend = dvb_attach(cx22702_attach,
697 &hauppauge_hvr_config,
698 &dev->core->i2c_adap);
699 if (dev->dvb.frontend != NULL) {
700 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
701 &dev->core->i2c_adap, &dvb_pll_fmd1216me);
702 }
703 break;
704 case CX88_BOARD_HAUPPAUGE_HVR3000:
705 dev->dvb.frontend = dvb_attach(cx22702_attach,
706 &hauppauge_hvr_config,
707 &dev->core->i2c_adap);
708 if (dev->dvb.frontend != NULL) {
709 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
710 &dev->core->i2c_adap, &dvb_pll_fmd1216me);
711 }
712 break;
713 default: 698 default:
714 printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n", 699 printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
715 dev->core->name); 700 dev->core->name);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 9830d5c43921..7919a1f9da06 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -1,3 +1,4 @@
1
1/* 2/*
2 3
3 cx88-i2c.c -- all the i2c code is here 4 cx88-i2c.c -- all the i2c code is here
@@ -195,7 +196,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
195 unsigned char buf; 196 unsigned char buf;
196 int i,rc; 197 int i,rc;
197 198
198 for (i = 0; i < 128; i++) { 199 for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
199 c->addr = i; 200 c->addr = i;
200 rc = i2c_master_recv(c,&buf,0); 201 rc = i2c_master_recv(c,&buf,0);
201 if (rc < 0) 202 if (rc < 0)
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 1fe1a833c7c7..b2eb32e01aee 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -49,6 +49,27 @@ MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
49#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \ 49#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \
50 printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg) 50 printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg)
51 51
52#if defined(CONFIG_MODULES) && defined(MODULE)
53static void request_module_async(struct work_struct *work)
54{
55 struct cx8802_dev *dev=container_of(work, struct cx8802_dev, request_module_wk);
56
57 if (cx88_boards[dev->core->board].mpeg & CX88_MPEG_DVB)
58 request_module("cx88-dvb");
59 if (cx88_boards[dev->core->board].mpeg & CX88_MPEG_BLACKBIRD)
60 request_module("cx88-blackbird");
61}
62
63static void request_modules(struct cx8802_dev *dev)
64{
65 INIT_WORK(&dev->request_module_wk, request_module_async);
66 schedule_work(&dev->request_module_wk);
67}
68#else
69#define request_modules(dev)
70#endif /* CONFIG_MODULES */
71
72
52static LIST_HEAD(cx8802_devlist); 73static LIST_HEAD(cx8802_devlist);
53/* ------------------------------------------------------------------ */ 74/* ------------------------------------------------------------------ */
54 75
@@ -345,7 +366,8 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
345 366
346 if (debug || (status & mask & ~0xff)) 367 if (debug || (status & mask & ~0xff))
347 cx88_print_irqbits(core->name, "irq mpeg ", 368 cx88_print_irqbits(core->name, "irq mpeg ",
348 cx88_mpeg_irqs, status, mask); 369 cx88_mpeg_irqs, ARRAY_SIZE(cx88_mpeg_irqs),
370 status, mask);
349 371
350 /* risc op code error */ 372 /* risc op code error */
351 if (status & (1 << 16)) { 373 if (status & (1 << 16)) {
@@ -427,7 +449,7 @@ int cx8802_init_common(struct cx8802_dev *dev)
427 if (pci_enable_device(dev->pci)) 449 if (pci_enable_device(dev->pci))
428 return -EIO; 450 return -EIO;
429 pci_set_master(dev->pci); 451 pci_set_master(dev->pci);
430 if (!pci_dma_supported(dev->pci,0xffffffff)) { 452 if (!pci_dma_supported(dev->pci,DMA_32BIT_MASK)) {
431 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 453 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
432 return -EIO; 454 return -EIO;
433 } 455 }
@@ -778,6 +800,9 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
778 800
779 /* Maintain a reference so cx88-video can query the 8802 device. */ 801 /* Maintain a reference so cx88-video can query the 8802 device. */
780 core->dvbdev = dev; 802 core->dvbdev = dev;
803
804 /* now autoload cx88-dvb or cx88-blackbird */
805 request_modules(dev);
781 return 0; 806 return 0;
782 807
783 fail_free: 808 fail_free:
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index bdfe2af70124..fbce1d50578b 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1555,7 +1555,8 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
1555 cx_write(MO_VID_INTSTAT, status); 1555 cx_write(MO_VID_INTSTAT, status);
1556 if (irq_debug || (status & mask & ~0xff)) 1556 if (irq_debug || (status & mask & ~0xff))
1557 cx88_print_irqbits(core->name, "irq vid", 1557 cx88_print_irqbits(core->name, "irq vid",
1558 cx88_vid_irqs, status, mask); 1558 cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs),
1559 status, mask);
1559 1560
1560 /* risc op code error */ 1561 /* risc op code error */
1561 if (status & (1 << 16)) { 1562 if (status & (1 << 16)) {
@@ -1778,7 +1779,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1778 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1779 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
1779 1780
1780 pci_set_master(pci_dev); 1781 pci_set_master(pci_dev);
1781 if (!pci_dma_supported(pci_dev,0xffffffff)) { 1782 if (!pci_dma_supported(pci_dev,DMA_32BIT_MASK)) {
1782 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1783 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
1783 err = -EIO; 1784 err = -EIO;
1784 goto fail_core; 1785 goto fail_core;
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index a4f7befda5b0..738d4f20c580 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -481,6 +481,8 @@ struct cx8802_dev {
481 481
482 /* List of attached drivers */ 482 /* List of attached drivers */
483 struct cx8802_driver drvlist; 483 struct cx8802_driver drvlist;
484 struct work_struct request_module_wk;
485
484}; 486};
485 487
486/* ----------------------------------------------------------- */ 488/* ----------------------------------------------------------- */
@@ -510,7 +512,7 @@ struct cx8802_dev {
510/* cx88-core.c */ 512/* cx88-core.c */
511 513
512extern void cx88_print_irqbits(char *name, char *tag, char **strings, 514extern void cx88_print_irqbits(char *name, char *tag, char **strings,
513 u32 bits, u32 mask); 515 int len, u32 bits, u32 mask);
514 516
515extern int cx88_core_irq(struct cx88_core *core, u32 status); 517extern int cx88_core_irq(struct cx88_core *core, u32 status);
516extern void cx88_wakeup(struct cx88_core *core, 518extern void cx88_wakeup(struct cx88_core *core,
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index d829d8f8c1f6..563a8319e608 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -523,7 +523,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
523 unsigned char buf; 523 unsigned char buf;
524 int i, rc; 524 int i, rc;
525 525
526 for (i = 0; i < 128; i++) { 526 for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
527 c->addr = i; 527 c->addr = i;
528 rc = i2c_master_recv(c, &buf, 0); 528 rc = i2c_master_recv(c, &buf, 0);
529 if (rc < 0) 529 if (rc < 0)
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 210582d420f8..ed92b6f7187a 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -173,7 +173,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
173 return -EIO; 173 return -EIO;
174 } 174 }
175 175
176 for (start = 0; start<4; start++) { 176 for (start = 0; start < ARRAY_SIZE(b); start++) {
177 if (b[start] == marker) { 177 if (b[start] == marker) {
178 code=b[(start+parity_offset+1)%4]; 178 code=b[(start+parity_offset+1)%4];
179 parity=b[(start+parity_offset)%4]; 179 parity=b[(start+parity_offset)%4];
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
new file mode 100644
index 000000000000..e854f3f1b70f
--- /dev/null
+++ b/drivers/media/video/ivtv/Kconfig
@@ -0,0 +1,26 @@
1config VIDEO_IVTV
2 tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support"
3 depends on VIDEO_V4L1 && VIDEO_V4L2 && USB && I2C && EXPERIMENTAL
4 select FW_LOADER
5 select VIDEO_TUNER
6 select VIDEO_TVEEPROM
7 select VIDEO_CX2341X
8 select VIDEO_CX25840
9 select VIDEO_MSP3400
10 select VIDEO_SAA711X
11 select VIDEO_SAA7127
12 select VIDEO_TVAUDIO
13 select VIDEO_CS53L32A
14 select VIDEO_WM8775
15 select VIDEO_WM8739
16 select VIDEO_UPD64031A
17 select VIDEO_UPD64083
18 ---help---
19 This is a video4linux driver for Conexant cx23416 or cx23416 based
20 PCI personal video recorder devices.
21
22 This is used in devices such as the Hauppauge PVR-150/250/350/500
23 cards.
24
25 To compile this driver as a module, choose M here: the
26 module will be called ivtv.
diff --git a/drivers/media/video/ivtv/Makefile b/drivers/media/video/ivtv/Makefile
new file mode 100644
index 000000000000..7e95148fbf4f
--- /dev/null
+++ b/drivers/media/video/ivtv/Makefile
@@ -0,0 +1,7 @@
1ivtv-objs := ivtv-audio.o ivtv-cards.o ivtv-controls.o \
2 ivtv-driver.o ivtv-fileops.o ivtv-firmware.o \
3 ivtv-gpio.o ivtv-i2c.o ivtv-ioctl.o ivtv-irq.o \
4 ivtv-mailbox.o ivtv-queue.o ivtv-streams.o ivtv-udma.o \
5 ivtv-vbi.o ivtv-video.o ivtv-yuv.o
6
7obj-$(CONFIG_VIDEO_IVTV) += ivtv.o
diff --git a/drivers/media/video/ivtv/ivtv-audio.c b/drivers/media/video/ivtv/ivtv-audio.c
new file mode 100644
index 000000000000..d702b8b539a1
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-audio.c
@@ -0,0 +1,74 @@
1/*
2 Audio-related ivtv functions.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-mailbox.h"
23#include "ivtv-i2c.h"
24#include "ivtv-gpio.h"
25#include "ivtv-cards.h"
26#include "ivtv-audio.h"
27#include <media/msp3400.h>
28#include <linux/videodev.h>
29
30/* Selects the audio input and output according to the current
31 settings. */
32int ivtv_audio_set_io(struct ivtv *itv)
33{
34 struct v4l2_routing route;
35 u32 audio_input;
36 int mux_input;
37
38 /* Determine which input to use */
39 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
40 audio_input = itv->card->radio_input.audio_input;
41 mux_input = itv->card->radio_input.muxer_input;
42 } else {
43 audio_input = itv->card->audio_inputs[itv->audio_input].audio_input;
44 mux_input = itv->card->audio_inputs[itv->audio_input].muxer_input;
45 }
46
47 /* handle muxer chips */
48 route.input = mux_input;
49 route.output = 0;
50 ivtv_i2c_hw(itv, itv->card->hw_muxer, VIDIOC_INT_S_AUDIO_ROUTING, &route);
51
52 route.input = audio_input;
53 if (itv->card->hw_audio & IVTV_HW_MSP34XX) {
54 route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
55 }
56 return ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, &route);
57}
58
59void ivtv_audio_set_route(struct ivtv *itv, struct v4l2_routing *route)
60{
61 ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, route);
62}
63
64void ivtv_audio_set_audio_clock_freq(struct ivtv *itv, u8 freq)
65{
66 static u32 freqs[3] = { 44100, 48000, 32000 };
67
68 /* The audio clock of the digitizer must match the codec sample
69 rate otherwise you get some very strange effects. */
70 if (freq > 2)
71 return;
72 ivtv_call_i2c_clients(itv, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freqs[freq]);
73}
74
diff --git a/drivers/media/video/ivtv/ivtv-audio.h b/drivers/media/video/ivtv/ivtv-audio.h
new file mode 100644
index 000000000000..9c42846d8124
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-audio.h
@@ -0,0 +1,23 @@
1/*
2 Audio-related ivtv functions.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21int ivtv_audio_set_io(struct ivtv *itv);
22void ivtv_audio_set_route(struct ivtv *itv, struct v4l2_routing *route);
23void ivtv_audio_set_audio_clock_freq(struct ivtv *itv, u8 freq);
diff --git a/drivers/media/video/ivtv/ivtv-cards.c b/drivers/media/video/ivtv/ivtv-cards.c
new file mode 100644
index 000000000000..8eab02083887
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-cards.c
@@ -0,0 +1,964 @@
1/*
2 Functions to query card hardware
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-cards.h"
23#include "ivtv-i2c.h"
24
25#include <media/msp3400.h>
26#include <media/wm8775.h>
27#include <media/cs53l32a.h>
28#include <media/cx25840.h>
29#include <media/upd64031a.h>
30
31#define MSP_TUNER MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
32 MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER)
33#define MSP_SCART1 MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
34 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
35#define MSP_SCART2 MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, \
36 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
37#define MSP_SCART3 MSP_INPUT(MSP_IN_SCART3, MSP_IN_TUNER1, \
38 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
39#define MSP_MONO MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \
40 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
41
42/********************** card configuration *******************************/
43
44/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii
45 This keeps the PCI ID database up to date. Note that the entries
46 must be added under vendor 0x4444 (Conexant) as subsystem IDs.
47 New vendor IDs should still be added to the vendor ID list. */
48
49/* Hauppauge PVR-250 cards */
50
51/* Note: for Hauppauge cards the tveeprom information is used instead of PCI IDs */
52static const struct ivtv_card ivtv_card_pvr250 = {
53 .type = IVTV_CARD_PVR_250,
54 .name = "Hauppauge WinTV PVR-250",
55 .v4l2_capabilities = IVTV_CAP_ENCODER,
56 .hw_video = IVTV_HW_SAA7115,
57 .hw_audio = IVTV_HW_MSP34XX,
58 .hw_audio_ctrl = IVTV_HW_MSP34XX,
59 .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
60 IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
61 .video_inputs = {
62 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
63 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
64 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
65 { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
66 { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
67 { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
68 },
69 .audio_inputs = {
70 { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
71 { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
72 { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
73 },
74 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
75};
76
77/* ------------------------------------------------------------------------- */
78
79/* Hauppauge PVR-350 cards */
80
81/* Outputs for Hauppauge PVR350 cards */
82static struct ivtv_card_output ivtv_pvr350_outputs[] = {
83 {
84 .name = "S-Video + Composite",
85 .video_output = 0,
86 }, {
87 .name = "Composite",
88 .video_output = 1,
89 }, {
90 .name = "S-Video",
91 .video_output = 2,
92 }, {
93 .name = "RGB",
94 .video_output = 3,
95 }, {
96 .name = "YUV C",
97 .video_output = 4,
98 }, {
99 .name = "YUV V",
100 .video_output = 5,
101 }
102};
103
104static const struct ivtv_card ivtv_card_pvr350 = {
105 .type = IVTV_CARD_PVR_350,
106 .name = "Hauppauge WinTV PVR-350",
107 .v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER,
108 .video_outputs = ivtv_pvr350_outputs,
109 .nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs),
110 .hw_video = IVTV_HW_SAA7115,
111 .hw_audio = IVTV_HW_MSP34XX,
112 .hw_audio_ctrl = IVTV_HW_MSP34XX,
113 .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
114 IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
115 .video_inputs = {
116 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
117 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
118 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
119 { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
120 { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
121 { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
122 },
123 .audio_inputs = {
124 { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
125 { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
126 { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
127 },
128 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
129};
130
131/* PVR-350 V1 boards have a different audio tuner input and use a
132 saa7114 instead of a saa7115.
133 Note that the info below comes from a pre-production model so it may
134 not be correct. Especially the audio behaves strangely (mono only it seems) */
135static const struct ivtv_card ivtv_card_pvr350_v1 = {
136 .type = IVTV_CARD_PVR_350_V1,
137 .name = "Hauppauge WinTV PVR-350 (V1)",
138 .v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER,
139 .video_outputs = ivtv_pvr350_outputs,
140 .nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs),
141 .hw_video = IVTV_HW_SAA7114,
142 .hw_audio = IVTV_HW_MSP34XX,
143 .hw_audio_ctrl = IVTV_HW_MSP34XX,
144 .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7114 |
145 IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
146 .video_inputs = {
147 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
148 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
149 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
150 { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 },
151 { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 },
152 { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 },
153 },
154 .audio_inputs = {
155 { IVTV_CARD_INPUT_AUD_TUNER, MSP_MONO },
156 { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 },
157 { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 },
158 },
159 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 },
160};
161
162/* ------------------------------------------------------------------------- */
163
164/* Hauppauge PVR-150/PVR-500 cards */
165
166static const struct ivtv_card ivtv_card_pvr150 = {
167 .type = IVTV_CARD_PVR_150,
168 .name = "Hauppauge WinTV PVR-150",
169 .v4l2_capabilities = IVTV_CAP_ENCODER,
170 .hw_video = IVTV_HW_CX25840,
171 .hw_audio = IVTV_HW_CX25840,
172 .hw_audio_ctrl = IVTV_HW_CX25840,
173 .hw_muxer = IVTV_HW_WM8775,
174 .hw_all = IVTV_HW_WM8775 | IVTV_HW_CX25840 |
175 IVTV_HW_TVEEPROM | IVTV_HW_TUNER,
176 .video_inputs = {
177 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE7 },
178 { IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO1 },
179 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE3 },
180 { IVTV_CARD_INPUT_SVIDEO2, 2, CX25840_SVIDEO2 },
181 { IVTV_CARD_INPUT_COMPOSITE2, 2, CX25840_COMPOSITE4 },
182 },
183 .audio_inputs = {
184 { IVTV_CARD_INPUT_AUD_TUNER,
185 CX25840_AUDIO8, WM8775_AIN2 },
186 { IVTV_CARD_INPUT_LINE_IN1,
187 CX25840_AUDIO_SERIAL, WM8775_AIN2 },
188 { IVTV_CARD_INPUT_LINE_IN2,
189 CX25840_AUDIO_SERIAL, WM8775_AIN3 },
190 },
191 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER,
192 CX25840_AUDIO_SERIAL, WM8775_AIN4 },
193 /* apparently needed for the IR blaster */
194 .gpio_init = { .direction = 0x1f01, .initial_value = 0x26f3 },
195};
196
197/* ------------------------------------------------------------------------- */
198
199/* AVerMedia M179 cards */
200
201static const struct ivtv_card_pci_info ivtv_pci_m179[] = {
202 { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3cf },
203 { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3ce },
204 { 0, 0, 0 }
205};
206
207static const struct ivtv_card ivtv_card_m179 = {
208 .type = IVTV_CARD_M179,
209 .name = "AVerMedia M179",
210 .v4l2_capabilities = IVTV_CAP_ENCODER,
211 .hw_video = IVTV_HW_SAA7114,
212 .hw_audio = IVTV_HW_GPIO,
213 .hw_audio_ctrl = IVTV_HW_GPIO,
214 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER,
215 .video_inputs = {
216 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
217 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
218 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
219 },
220 .audio_inputs = {
221 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
222 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
223 },
224 .gpio_init = { .direction = 0xe380, .initial_value = 0x8290 },
225 .gpio_audio_input = { .mask = 0x8040, .tuner = 0x8000, .linein = 0x0000 },
226 .gpio_audio_mute = { .mask = 0x2000, .mute = 0x2000 },
227 .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
228 .lang1 = 0x0200, .lang2 = 0x0100, .both = 0x0000 },
229 .gpio_audio_freq = { .mask = 0x0018, .f32000 = 0x0000,
230 .f44100 = 0x0008, .f48000 = 0x0010 },
231 .gpio_audio_detect = { .mask = 0x4000, .stereo = 0x0000 },
232 .tuners = {
233 /* As far as we know all M179 cards use this tuner */
234 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_NTSC },
235 },
236 .pci_list = ivtv_pci_m179,
237};
238
239/* ------------------------------------------------------------------------- */
240
241/* Yuan MPG600/Kuroutoshikou ITVC16-STVLP cards */
242
243static const struct ivtv_card_pci_info ivtv_pci_mpg600[] = {
244 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xfff3 },
245 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xffff },
246 { 0, 0, 0 }
247};
248
249static const struct ivtv_card ivtv_card_mpg600 = {
250 .type = IVTV_CARD_MPG600,
251 .name = "Yuan MPG600, Kuroutoshikou ITVC16-STVLP",
252 .v4l2_capabilities = IVTV_CAP_ENCODER,
253 .hw_video = IVTV_HW_SAA7115,
254 .hw_audio = IVTV_HW_GPIO,
255 .hw_audio_ctrl = IVTV_HW_GPIO,
256 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER,
257 .video_inputs = {
258 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
259 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
260 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
261 },
262 .audio_inputs = {
263 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
264 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
265 },
266 .gpio_init = { .direction = 0x3080, .initial_value = 0x0004 },
267 .gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 },
268 .gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 },
269 .gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004,
270 .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 },
271 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
272 .tuners = {
273 /* The PAL tuner is confirmed */
274 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
275 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
276 },
277 .pci_list = ivtv_pci_mpg600,
278};
279
280/* ------------------------------------------------------------------------- */
281
282/* Yuan MPG160/Kuroutoshikou ITVC15-STVLP cards */
283
284static const struct ivtv_card_pci_info ivtv_pci_mpg160[] = {
285 { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_YUAN1, 0 },
286 { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_IODATA, 0x40a0 },
287 { 0, 0, 0 }
288};
289
290static const struct ivtv_card ivtv_card_mpg160 = {
291 .type = IVTV_CARD_MPG160,
292 .name = "YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI",
293 .v4l2_capabilities = IVTV_CAP_ENCODER,
294 .hw_video = IVTV_HW_SAA7114,
295 .hw_audio = IVTV_HW_GPIO,
296 .hw_audio_ctrl = IVTV_HW_GPIO,
297 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER,
298 .video_inputs = {
299 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
300 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
301 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
302 },
303 .audio_inputs = {
304 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
305 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
306 },
307 .gpio_init = { .direction = 0x7080, .initial_value = 0x400c },
308 .gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 },
309 .gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 },
310 .gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004,
311 .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 },
312 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
313 .tuners = {
314 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
315 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
316 },
317 .pci_list = ivtv_pci_mpg160,
318};
319
320/* ------------------------------------------------------------------------- */
321
322/* Yuan PG600/Diamond PVR-550 cards */
323
324static const struct ivtv_card_pci_info ivtv_pci_pg600[] = {
325 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_DIAMONDMM, 0x0070 },
326 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 },
327 { 0, 0, 0 }
328};
329
330static const struct ivtv_card ivtv_card_pg600 = {
331 .type = IVTV_CARD_PG600,
332 .name = "Yuan PG600, Diamond PVR-550",
333 .v4l2_capabilities = IVTV_CAP_ENCODER,
334 .hw_video = IVTV_HW_CX25840,
335 .hw_audio = IVTV_HW_CX25840,
336 .hw_audio_ctrl = IVTV_HW_CX25840,
337 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
338 .video_inputs = {
339 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
340 { IVTV_CARD_INPUT_SVIDEO1, 1,
341 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
342 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
343 },
344 .audio_inputs = {
345 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
346 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
347 },
348 .tuners = {
349 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
350 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
351 },
352 .pci_list = ivtv_pci_pg600,
353};
354
355/* ------------------------------------------------------------------------- */
356
357/* Adaptec VideOh! AVC-2410 card */
358
359static const struct ivtv_card_pci_info ivtv_pci_avc2410[] = {
360 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0093 },
361 { 0, 0, 0 }
362};
363
364static const struct ivtv_card ivtv_card_avc2410 = {
365 .type = IVTV_CARD_AVC2410,
366 .name = "Adaptec VideOh! AVC-2410",
367 .v4l2_capabilities = IVTV_CAP_ENCODER,
368 .hw_video = IVTV_HW_SAA7115,
369 .hw_audio = IVTV_HW_MSP34XX,
370 .hw_audio_ctrl = IVTV_HW_MSP34XX,
371 .hw_muxer = IVTV_HW_CS53L32A,
372 .hw_all = IVTV_HW_MSP34XX | IVTV_HW_CS53L32A |
373 IVTV_HW_SAA7115 | IVTV_HW_TUNER,
374 .video_inputs = {
375 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
376 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
377 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
378 },
379 .audio_inputs = {
380 { IVTV_CARD_INPUT_AUD_TUNER,
381 MSP_TUNER, CS53L32A_IN0 },
382 { IVTV_CARD_INPUT_LINE_IN1,
383 MSP_SCART1, CS53L32A_IN2 },
384 },
385 /* This card has no eeprom and in fact the Windows driver relies
386 on the country/region setting of the user to decide which tuner
387 is available. */
388 .tuners = {
389 /* This tuner has been verified for the AVC2410 */
390 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
391 /* This is a good guess, but I'm not totally sure this is
392 the correct tuner for NTSC. */
393 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
394 },
395 .pci_list = ivtv_pci_avc2410,
396};
397
398/* ------------------------------------------------------------------------- */
399
400/* Adaptec VideOh! AVC-2010 card */
401
402static const struct ivtv_card_pci_info ivtv_pci_avc2010[] = {
403 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0092 },
404 { 0, 0, 0 }
405};
406
407static const struct ivtv_card ivtv_card_avc2010 = {
408 .type = IVTV_CARD_AVC2010,
409 .name = "Adaptec VideOh! AVC-2010",
410 .v4l2_capabilities = IVTV_CAP_ENCODER,
411 .hw_video = IVTV_HW_SAA7115,
412 .hw_audio = IVTV_HW_CS53L32A,
413 .hw_audio_ctrl = IVTV_HW_CS53L32A,
414 .hw_all = IVTV_HW_CS53L32A | IVTV_HW_SAA7115,
415 .video_inputs = {
416 { IVTV_CARD_INPUT_SVIDEO1, 0, IVTV_SAA71XX_SVIDEO0 },
417 { IVTV_CARD_INPUT_COMPOSITE1, 0, IVTV_SAA71XX_COMPOSITE3 },
418 },
419 .audio_inputs = {
420 { IVTV_CARD_INPUT_LINE_IN1, CS53L32A_IN2 },
421 },
422 /* Does not have a tuner */
423 .pci_list = ivtv_pci_avc2010,
424};
425
426/* ------------------------------------------------------------------------- */
427
428/* Nagase Transgear 5000TV card */
429
430static const struct ivtv_card_pci_info ivtv_pci_tg5000tv[] = {
431 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff },
432 { 0, 0, 0 }
433};
434
435static const struct ivtv_card ivtv_card_tg5000tv = {
436 .type = IVTV_CARD_TG5000TV,
437 .name = "Nagase Transgear 5000TV",
438 .v4l2_capabilities = IVTV_CAP_ENCODER,
439 .hw_video = IVTV_HW_SAA7114 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X |
440 IVTV_HW_GPIO,
441 .hw_audio = IVTV_HW_GPIO,
442 .hw_audio_ctrl = IVTV_HW_GPIO,
443 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER |
444 IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
445 .video_inputs = {
446 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
447 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 },
448 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
449 },
450 .audio_inputs = {
451 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
452 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
453 },
454 .gr_config = UPD64031A_VERTICAL_EXTERNAL,
455 .gpio_init = { .direction = 0xe080, .initial_value = 0x8000 },
456 .gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 },
457 .gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 },
458 .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
459 .lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 },
460 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
461 .composite = 0x0010, .svideo = 0x0020 },
462 .tuners = {
463 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
464 },
465 .pci_list = ivtv_pci_tg5000tv,
466};
467
468/* ------------------------------------------------------------------------- */
469
470/* AOpen VA2000MAX-SNT6 card */
471
472static const struct ivtv_card_pci_info ivtv_pci_va2000[] = {
473 { PCI_DEVICE_ID_IVTV16, 0, 0xff5f },
474 { 0, 0, 0 }
475};
476
477static const struct ivtv_card ivtv_card_va2000 = {
478 .type = IVTV_CARD_VA2000MAX_SNT6,
479 .name = "AOpen VA2000MAX-SNT6",
480 .v4l2_capabilities = IVTV_CAP_ENCODER,
481 .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD6408X,
482 .hw_audio = IVTV_HW_MSP34XX,
483 .hw_audio_ctrl = IVTV_HW_MSP34XX,
484 .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 |
485 IVTV_HW_UPD6408X | IVTV_HW_TUNER,
486 .video_inputs = {
487 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
488 },
489 .audio_inputs = {
490 { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
491 },
492 .tuners = {
493 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
494 },
495 .pci_list = ivtv_pci_va2000,
496};
497
498/* ------------------------------------------------------------------------- */
499
500/* Yuan MPG600GR/Kuroutoshikou CX23416GYC-STVLP cards */
501
502static const struct ivtv_card_pci_info ivtv_pci_cx23416gyc[] = {
503 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 },
504 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN4, 0x0600 },
505 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_MELCO, 0x0523 },
506 { 0, 0, 0 }
507};
508
509static const struct ivtv_card ivtv_card_cx23416gyc = {
510 .type = IVTV_CARD_CX23416GYC,
511 .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP",
512 .v4l2_capabilities = IVTV_CAP_ENCODER,
513 .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO |
514 IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
515 .hw_audio = IVTV_HW_SAA717X,
516 .hw_audio_ctrl = IVTV_HW_SAA717X,
517 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER |
518 IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
519 .video_inputs = {
520 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO3 |
521 IVTV_SAA717X_TUNER_FLAG },
522 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
523 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO3 },
524 },
525 .audio_inputs = {
526 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
527 { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
528 },
529 .gr_config = UPD64031A_VERTICAL_EXTERNAL,
530 .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
531 .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
532 .composite = 0x0020, .svideo = 0x0020 },
533 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
534 .f44100 = 0x4000, .f48000 = 0x8000 },
535 .tuners = {
536 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
537 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
538 },
539 .pci_list = ivtv_pci_cx23416gyc,
540};
541
542static const struct ivtv_card ivtv_card_cx23416gyc_nogr = {
543 .type = IVTV_CARD_CX23416GYC_NOGR,
544 .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR)",
545 .v4l2_capabilities = IVTV_CAP_ENCODER,
546 .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO | IVTV_HW_UPD6408X,
547 .hw_audio = IVTV_HW_SAA717X,
548 .hw_audio_ctrl = IVTV_HW_SAA717X,
549 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER |
550 IVTV_HW_UPD6408X,
551 .video_inputs = {
552 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 |
553 IVTV_SAA717X_TUNER_FLAG },
554 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
555 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
556 },
557 .audio_inputs = {
558 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
559 { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
560 },
561 .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
562 .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
563 .composite = 0x0020, .svideo = 0x0020 },
564 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
565 .f44100 = 0x4000, .f48000 = 0x8000 },
566 .tuners = {
567 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
568 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
569 },
570};
571
572static const struct ivtv_card ivtv_card_cx23416gyc_nogrycs = {
573 .type = IVTV_CARD_CX23416GYC_NOGRYCS,
574 .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS)",
575 .v4l2_capabilities = IVTV_CAP_ENCODER,
576 .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO,
577 .hw_audio = IVTV_HW_SAA717X,
578 .hw_audio_ctrl = IVTV_HW_SAA717X,
579 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER,
580 .video_inputs = {
581 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 |
582 IVTV_SAA717X_TUNER_FLAG },
583 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
584 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 },
585 },
586 .audio_inputs = {
587 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 },
588 { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 },
589 },
590 .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 },
591 .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000,
592 .composite = 0x0020, .svideo = 0x0020 },
593 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
594 .f44100 = 0x4000, .f48000 = 0x8000 },
595 .tuners = {
596 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
597 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
598 },
599};
600
601/* ------------------------------------------------------------------------- */
602
603/* I/O Data GV-MVP/RX & GV-MVP/RX2W (dual tuner) cards */
604
605static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx[] = {
606 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd01e },
607 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd038 }, /* 2W unit #1 */
608 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd039 }, /* 2W unit #2 */
609 { 0, 0, 0 }
610};
611
612static const struct ivtv_card ivtv_card_gv_mvprx = {
613 .type = IVTV_CARD_GV_MVPRX,
614 .name = "I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner)",
615 .v4l2_capabilities = IVTV_CAP_ENCODER,
616 .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
617 .hw_audio = IVTV_HW_GPIO,
618 .hw_audio_ctrl = IVTV_HW_WM8739,
619 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TVAUDIO |
620 IVTV_HW_TUNER | IVTV_HW_WM8739 |
621 IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
622 .video_inputs = {
623 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
624 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO1 },
625 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
626 },
627 .audio_inputs = {
628 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
629 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
630 },
631 .gpio_init = { .direction = 0xc301, .initial_value = 0x0200 },
632 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
633 .tuners = {
634 /* This card has the Panasonic VP27 tuner */
635 { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 },
636 },
637 .pci_list = ivtv_pci_gv_mvprx,
638};
639
640/* ------------------------------------------------------------------------- */
641
642/* I/O Data GV-MVP/RX2E card */
643
644static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx2e[] = {
645 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd025 },
646 {0, 0, 0}
647};
648
649static const struct ivtv_card ivtv_card_gv_mvprx2e = {
650 .type = IVTV_CARD_GV_MVPRX2E,
651 .name = "I/O Data GV-MVP/RX2E",
652 .v4l2_capabilities = IVTV_CAP_ENCODER,
653 .hw_video = IVTV_HW_SAA7115,
654 .hw_audio = IVTV_HW_GPIO,
655 .hw_audio_ctrl = IVTV_HW_WM8739,
656 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER |
657 IVTV_HW_TVAUDIO | IVTV_HW_WM8739,
658 .video_inputs = {
659 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 },
660 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 },
661 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 },
662 },
663 .audio_inputs = {
664 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
665 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
666 },
667 .gpio_init = { .direction = 0xc301, .initial_value = 0x0200 },
668 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
669 .tuners = {
670 /* This card has the Panasonic VP27 tuner */
671 { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 },
672 },
673 .pci_list = ivtv_pci_gv_mvprx2e,
674};
675
676/* ------------------------------------------------------------------------- */
677
678/* GotVIEW PCI DVD card */
679
680static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd[] = {
681 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 },
682 { 0, 0, 0 }
683};
684
685static const struct ivtv_card ivtv_card_gotview_pci_dvd = {
686 .type = IVTV_CARD_GOTVIEW_PCI_DVD,
687 .name = "GotView PCI DVD",
688 .v4l2_capabilities = IVTV_CAP_ENCODER,
689 .hw_video = IVTV_HW_SAA717X,
690 .hw_audio = IVTV_HW_SAA717X,
691 .hw_audio_ctrl = IVTV_HW_SAA717X,
692 .hw_all = IVTV_HW_SAA717X | IVTV_HW_TUNER,
693 .video_inputs = {
694 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE1 }, /* pin 116 */
695 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, /* pin 114/109 */
696 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, /* pin 118 */
697 },
698 .audio_inputs = {
699 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN0 },
700 { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN2 },
701 },
702 .gpio_init = { .direction = 0xf000, .initial_value = 0xA000 },
703 .tuners = {
704 /* This card has a Philips FQ1216ME MK3 tuner */
705 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
706 },
707 .pci_list = ivtv_pci_gotview_pci_dvd,
708};
709
710/* ------------------------------------------------------------------------- */
711
712/* GotVIEW PCI DVD2 Deluxe card */
713
714static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd2[] = {
715 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW1, 0x0600 },
716 { 0, 0, 0 }
717};
718
719static const struct ivtv_card ivtv_card_gotview_pci_dvd2 = {
720 .type = IVTV_CARD_GOTVIEW_PCI_DVD2,
721 .name = "GotView PCI DVD2 Deluxe",
722 .v4l2_capabilities = IVTV_CAP_ENCODER,
723 .hw_video = IVTV_HW_CX25840,
724 .hw_audio = IVTV_HW_CX25840,
725 .hw_audio_ctrl = IVTV_HW_CX25840,
726 .hw_muxer = IVTV_HW_GPIO,
727 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
728 .video_inputs = {
729 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
730 { IVTV_CARD_INPUT_SVIDEO1, 1,
731 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
732 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
733 },
734 .audio_inputs = {
735 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, 0 },
736 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
737 },
738 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
739 .gpio_init = { .direction = 0x0800, .initial_value = 0 },
740 .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
741 .tuners = {
742 /* This card has a Philips FQ1216ME MK5 tuner */
743 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
744 },
745 .pci_list = ivtv_pci_gotview_pci_dvd2,
746};
747
748/* ------------------------------------------------------------------------- */
749
750/* Yuan MPC622 miniPCI card */
751
752static const struct ivtv_card_pci_info ivtv_pci_yuan_mpc622[] = {
753 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN2, 0xd998 },
754 { 0, 0, 0 }
755};
756
757static const struct ivtv_card ivtv_card_yuan_mpc622 = {
758 .type = IVTV_CARD_YUAN_MPC622,
759 .name = "Yuan MPC622",
760 .v4l2_capabilities = IVTV_CAP_ENCODER,
761 .hw_video = IVTV_HW_CX25840,
762 .hw_audio = IVTV_HW_CX25840,
763 .hw_audio_ctrl = IVTV_HW_CX25840,
764 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
765 .video_inputs = {
766 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
767 { IVTV_CARD_INPUT_SVIDEO1, 1,
768 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
769 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
770 },
771 .audio_inputs = {
772 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
773 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
774 },
775 .gpio_init = { .direction = 0x00ff, .initial_value = 0x0002 },
776 .tuners = {
777 /* This card has the TDA8290/TDA8275 tuner chips */
778 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_TDA8290 },
779 },
780 .pci_list = ivtv_pci_yuan_mpc622,
781};
782
783/* ------------------------------------------------------------------------- */
784
785/* DIGITAL COWBOY DCT-MTVP1 card */
786
787static const struct ivtv_card_pci_info ivtv_pci_dctmvtvp1[] = {
788 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff },
789 { 0, 0, 0 }
790};
791
792static const struct ivtv_card ivtv_card_dctmvtvp1 = {
793 .type = IVTV_CARD_DCTMTVP1,
794 .name = "Digital Cowboy DCT-MTVP1",
795 .v4l2_capabilities = IVTV_CAP_ENCODER,
796 .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X |
797 IVTV_HW_GPIO,
798 .hw_audio = IVTV_HW_GPIO,
799 .hw_audio_ctrl = IVTV_HW_GPIO,
800 .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER |
801 IVTV_HW_UPD64031A | IVTV_HW_UPD6408X,
802 .video_inputs = {
803 { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 },
804 { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 },
805 { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 },
806 },
807 .audio_inputs = {
808 { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER },
809 { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN },
810 },
811 .gpio_init = { .direction = 0xe080, .initial_value = 0x8000 },
812 .gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 },
813 .gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 },
814 .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200,
815 .lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 },
816 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
817 .composite = 0x0010, .svideo = 0x0020},
818 .tuners = {
819 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
820 },
821 .pci_list = ivtv_pci_dctmvtvp1,
822};
823
824/* ------------------------------------------------------------------------- */
825
826#ifdef HAVE_XC3028
827
828/* Yuan PG600-2/GotView PCI DVD Lite/Club3D ZAP-TV1x01 cards */
829
830static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = {
831 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 },
832 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW2, 0x0600 },
833 { 0, 0, 0 }
834};
835
836static const struct ivtv_card ivtv_card_pg600v2 = {
837 .type = IVTV_CARD_PG600V2,
838 .name = "Yuan PG600-2, GotView PCI DVD Lite, Club3D ZAP-TV1x01",
839 .v4l2_capabilities = IVTV_CAP_ENCODER,
840 .hw_video = IVTV_HW_CX25840,
841 .hw_audio = IVTV_HW_CX25840,
842 .hw_audio_ctrl = IVTV_HW_CX25840,
843 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER,
844 .video_inputs = {
845 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
846 { IVTV_CARD_INPUT_SVIDEO1, 1,
847 CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 },
848 },
849 .audio_inputs = {
850 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
851 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
852 },
853 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
854 .tuners = {
855 { .std = V4L2_STD_ALL, .tuner = TUNER_XCEIVE_XC3028 },
856 },
857 .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 }, /* tuner reset */
858 .pci_list = ivtv_pci_pg600v2,
859};
860#endif
861
862static const struct ivtv_card *ivtv_card_list[] = {
863 &ivtv_card_pvr250,
864 &ivtv_card_pvr350,
865 &ivtv_card_pvr150,
866 &ivtv_card_m179,
867 &ivtv_card_mpg600,
868 &ivtv_card_mpg160,
869 &ivtv_card_pg600,
870 &ivtv_card_avc2410,
871 &ivtv_card_avc2010,
872 &ivtv_card_tg5000tv,
873 &ivtv_card_va2000,
874 &ivtv_card_cx23416gyc,
875 &ivtv_card_gv_mvprx,
876 &ivtv_card_gv_mvprx2e,
877 &ivtv_card_gotview_pci_dvd,
878 &ivtv_card_gotview_pci_dvd2,
879 &ivtv_card_yuan_mpc622,
880 &ivtv_card_dctmvtvp1,
881#ifdef HAVE_XC3028
882 &ivtv_card_pg600v2,
883#endif
884
885 /* Variations of standard cards but with the same PCI IDs.
886 These cards must come last in this list. */
887 &ivtv_card_pvr350_v1,
888 &ivtv_card_cx23416gyc_nogr,
889 &ivtv_card_cx23416gyc_nogrycs,
890};
891
892const struct ivtv_card *ivtv_get_card(u16 index)
893{
894 if (index >= ARRAY_SIZE(ivtv_card_list))
895 return NULL;
896 return ivtv_card_list[index];
897}
898
899int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input)
900{
901 const struct ivtv_card_video_input *card_input = itv->card->video_inputs + index;
902 static const char * const input_strs[] = {
903 "Tuner 1",
904 "S-Video 1",
905 "S-Video 2",
906 "Composite 1",
907 "Composite 2",
908 "Composite 3"
909 };
910
911 memset(input, 0, sizeof(*input));
912 if (index >= itv->nof_inputs)
913 return -EINVAL;
914 input->index = index;
915 strcpy(input->name, input_strs[card_input->video_type - 1]);
916 input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ?
917 V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
918 input->audioset = (1 << itv->nof_audio_inputs) - 1;
919 input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ?
920 itv->tuner_std : V4L2_STD_ALL;
921 return 0;
922}
923
924int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output)
925{
926 const struct ivtv_card_output *card_output = itv->card->video_outputs + index;
927
928 memset(output, 0, sizeof(*output));
929 if (index >= itv->card->nof_outputs)
930 return -EINVAL;
931 output->index = index;
932 strcpy(output->name, card_output->name);
933 output->type = V4L2_OUTPUT_TYPE_ANALOG;
934 output->audioset = 1;
935 output->std = V4L2_STD_ALL;
936 return 0;
937}
938
939int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *audio)
940{
941 const struct ivtv_card_audio_input *aud_input = itv->card->audio_inputs + index;
942 static const char * const input_strs[] = {
943 "Tuner 1",
944 "Line In 1",
945 "Line In 2"
946 };
947
948 memset(audio, 0, sizeof(*audio));
949 if (index >= itv->nof_audio_inputs)
950 return -EINVAL;
951 strcpy(audio->name, input_strs[aud_input->audio_type - 1]);
952 audio->index = index;
953 audio->capability = V4L2_AUDCAP_STEREO;
954 return 0;
955}
956
957int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *aud_output)
958{
959 memset(aud_output, 0, sizeof(*aud_output));
960 if (itv->card->video_outputs == NULL || index != 0)
961 return -EINVAL;
962 strcpy(aud_output->name, "A/V Audio Out");
963 return 0;
964}
diff --git a/drivers/media/video/ivtv/ivtv-cards.h b/drivers/media/video/ivtv/ivtv-cards.h
new file mode 100644
index 000000000000..15012f88b802
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-cards.h
@@ -0,0 +1,207 @@
1/*
2 Functions to query card hardware
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* hardware flags */
22#define IVTV_HW_CX25840 (1 << 0)
23#define IVTV_HW_SAA7115 (1 << 1)
24#define IVTV_HW_SAA7127 (1 << 2)
25#define IVTV_HW_MSP34XX (1 << 3)
26#define IVTV_HW_TUNER (1 << 4)
27#define IVTV_HW_WM8775 (1 << 5)
28#define IVTV_HW_CS53L32A (1 << 6)
29#define IVTV_HW_TVEEPROM (1 << 7)
30#define IVTV_HW_SAA7114 (1 << 8)
31#define IVTV_HW_TVAUDIO (1 << 9)
32#define IVTV_HW_UPD64031A (1 << 10)
33#define IVTV_HW_UPD6408X (1 << 11)
34#define IVTV_HW_SAA717X (1 << 12)
35#define IVTV_HW_WM8739 (1 << 13)
36#define IVTV_HW_GPIO (1 << 14)
37
38#define IVTV_HW_SAA711X (IVTV_HW_SAA7115 | IVTV_HW_SAA7114)
39
40/* video inputs */
41#define IVTV_CARD_INPUT_VID_TUNER 1
42#define IVTV_CARD_INPUT_SVIDEO1 2
43#define IVTV_CARD_INPUT_SVIDEO2 3
44#define IVTV_CARD_INPUT_COMPOSITE1 4
45#define IVTV_CARD_INPUT_COMPOSITE2 5
46#define IVTV_CARD_INPUT_COMPOSITE3 6
47
48/* audio inputs */
49#define IVTV_CARD_INPUT_AUD_TUNER 1
50#define IVTV_CARD_INPUT_LINE_IN1 2
51#define IVTV_CARD_INPUT_LINE_IN2 3
52
53#define IVTV_CARD_MAX_VIDEO_INPUTS 6
54#define IVTV_CARD_MAX_AUDIO_INPUTS 3
55#define IVTV_CARD_MAX_TUNERS 2
56
57/* SAA71XX HW inputs */
58#define IVTV_SAA71XX_COMPOSITE0 0
59#define IVTV_SAA71XX_COMPOSITE1 1
60#define IVTV_SAA71XX_COMPOSITE2 2
61#define IVTV_SAA71XX_COMPOSITE3 3
62#define IVTV_SAA71XX_COMPOSITE4 4
63#define IVTV_SAA71XX_COMPOSITE5 5
64#define IVTV_SAA71XX_SVIDEO0 6
65#define IVTV_SAA71XX_SVIDEO1 7
66#define IVTV_SAA71XX_SVIDEO2 8
67#define IVTV_SAA71XX_SVIDEO3 9
68
69/* SAA717X needs to mark the tuner input by ORing with this flag */
70#define IVTV_SAA717X_TUNER_FLAG 0x80
71
72/* Dummy HW input */
73#define IVTV_DUMMY_AUDIO 0
74
75/* GPIO HW inputs */
76#define IVTV_GPIO_TUNER 0
77#define IVTV_GPIO_LINE_IN 1
78
79/* SAA717X HW inputs */
80#define IVTV_SAA717X_IN0 0
81#define IVTV_SAA717X_IN1 1
82#define IVTV_SAA717X_IN2 2
83
84/* V4L2 capability aliases */
85#define IVTV_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
86 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE | \
87 V4L2_CAP_SLICED_VBI_CAPTURE)
88#define IVTV_CAP_DECODER (V4L2_CAP_VBI_OUTPUT | V4L2_CAP_VIDEO_OUTPUT | \
89 V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_OVERLAY | V4L2_CAP_VIDEO_OUTPUT_POS)
90
91struct ivtv_card_video_input {
92 u8 video_type; /* video input type */
93 u8 audio_index; /* index in ivtv_card_audio_input array */
94 u16 video_input; /* hardware video input */
95};
96
97struct ivtv_card_audio_input {
98 u8 audio_type; /* audio input type */
99 u32 audio_input; /* hardware audio input */
100 u16 muxer_input; /* hardware muxer input for boards with a
101 multiplexer chip */
102};
103
104struct ivtv_card_output {
105 u8 name[32];
106 u16 video_output; /* hardware video output */
107};
108
109struct ivtv_card_pci_info {
110 u16 device;
111 u16 subsystem_vendor;
112 u16 subsystem_device;
113};
114
115/* GPIO definitions */
116
117/* The mask is the set of bits used by the operation */
118
119struct ivtv_gpio_init { /* set initial GPIO DIR and OUT values */
120 u16 direction; /* DIR setting. Leave to 0 if no init is needed */
121 u16 initial_value;
122};
123
124struct ivtv_gpio_video_input { /* select tuner/line in input */
125 u16 mask; /* leave to 0 if not supported */
126 u16 tuner;
127 u16 composite;
128 u16 svideo;
129};
130
131struct ivtv_gpio_audio_input { /* select tuner/line in input */
132 u16 mask; /* leave to 0 if not supported */
133 u16 tuner;
134 u16 linein;
135 u16 radio;
136};
137
138struct ivtv_gpio_audio_mute {
139 u16 mask; /* leave to 0 if not supported */
140 u16 mute; /* set this value to mute, 0 to unmute */
141};
142
143struct ivtv_gpio_audio_mode {
144 u16 mask; /* leave to 0 if not supported */
145 u16 mono; /* set audio to mono */
146 u16 stereo; /* set audio to stereo */
147 u16 lang1; /* set audio to the first language */
148 u16 lang2; /* set audio to the second language */
149 u16 both; /* both languages are output */
150};
151
152struct ivtv_gpio_audio_freq {
153 u16 mask; /* leave to 0 if not supported */
154 u16 f32000;
155 u16 f44100;
156 u16 f48000;
157};
158
159struct ivtv_gpio_audio_detect {
160 u16 mask; /* leave to 0 if not supported */
161 u16 stereo; /* if the input matches this value then
162 stereo is detected */
163};
164
165struct ivtv_card_tuner {
166 v4l2_std_id std; /* standard for which the tuner is suitable */
167 int tuner; /* tuner ID (from tuner.h) */
168};
169
170/* for card information/parameters */
171struct ivtv_card {
172 int type;
173 char *name;
174 u32 v4l2_capabilities;
175 u32 hw_video; /* hardware used to process video */
176 u32 hw_audio; /* hardware used to process audio */
177 u32 hw_audio_ctrl; /* hardware used for the V4L2 controls (only 1 dev allowed) */
178 u32 hw_muxer; /* hardware used to multiplex audio input */
179 u32 hw_all; /* all hardware used by the board */
180 struct ivtv_card_video_input video_inputs[IVTV_CARD_MAX_VIDEO_INPUTS];
181 struct ivtv_card_audio_input audio_inputs[IVTV_CARD_MAX_AUDIO_INPUTS];
182 struct ivtv_card_audio_input radio_input;
183 int nof_outputs;
184 const struct ivtv_card_output *video_outputs;
185 u8 gr_config; /* config byte for the ghost reduction device */
186
187 /* GPIO card-specific settings */
188 struct ivtv_gpio_init gpio_init;
189 struct ivtv_gpio_video_input gpio_video_input;
190 struct ivtv_gpio_audio_input gpio_audio_input;
191 struct ivtv_gpio_audio_mute gpio_audio_mute;
192 struct ivtv_gpio_audio_mode gpio_audio_mode;
193 struct ivtv_gpio_audio_freq gpio_audio_freq;
194 struct ivtv_gpio_audio_detect gpio_audio_detect;
195
196 struct ivtv_card_tuner tuners[IVTV_CARD_MAX_TUNERS];
197
198 /* list of device and subsystem vendor/devices that
199 correspond to this card type. */
200 const struct ivtv_card_pci_info *pci_list;
201};
202
203int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input);
204int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output);
205int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *input);
206int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *output);
207const struct ivtv_card *ivtv_get_card(u16 index);
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
new file mode 100644
index 000000000000..7a876c3e5b19
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -0,0 +1,303 @@
1/*
2 ioctl control functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-cards.h"
23#include "ivtv-ioctl.h"
24#include "ivtv-audio.h"
25#include "ivtv-i2c.h"
26#include "ivtv-mailbox.h"
27#include "ivtv-controls.h"
28
29static const u32 user_ctrls[] = {
30 V4L2_CID_USER_CLASS,
31 V4L2_CID_BRIGHTNESS,
32 V4L2_CID_CONTRAST,
33 V4L2_CID_SATURATION,
34 V4L2_CID_HUE,
35 V4L2_CID_AUDIO_VOLUME,
36 V4L2_CID_AUDIO_BALANCE,
37 V4L2_CID_AUDIO_BASS,
38 V4L2_CID_AUDIO_TREBLE,
39 V4L2_CID_AUDIO_MUTE,
40 V4L2_CID_AUDIO_LOUDNESS,
41 0
42};
43
44static const u32 *ctrl_classes[] = {
45 user_ctrls,
46 cx2341x_mpeg_ctrls,
47 NULL
48};
49
50static int ivtv_queryctrl(struct ivtv *itv, struct v4l2_queryctrl *qctrl)
51{
52 const char *name;
53
54 IVTV_DEBUG_IOCTL("VIDIOC_QUERYCTRL(%08x)\n", qctrl->id);
55
56 qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
57 if (qctrl->id == 0)
58 return -EINVAL;
59
60 switch (qctrl->id) {
61 /* Standard V4L2 controls */
62 case V4L2_CID_BRIGHTNESS:
63 case V4L2_CID_HUE:
64 case V4L2_CID_SATURATION:
65 case V4L2_CID_CONTRAST:
66 if (itv->video_dec_func(itv, VIDIOC_QUERYCTRL, qctrl))
67 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
68 return 0;
69
70 case V4L2_CID_AUDIO_VOLUME:
71 case V4L2_CID_AUDIO_MUTE:
72 case V4L2_CID_AUDIO_BALANCE:
73 case V4L2_CID_AUDIO_BASS:
74 case V4L2_CID_AUDIO_TREBLE:
75 case V4L2_CID_AUDIO_LOUDNESS:
76 if (ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_QUERYCTRL, qctrl))
77 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
78 return 0;
79
80 default:
81 if (cx2341x_ctrl_query(&itv->params, qctrl))
82 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
83 return 0;
84 }
85 strncpy(qctrl->name, name, sizeof(qctrl->name) - 1);
86 qctrl->name[sizeof(qctrl->name) - 1] = 0;
87 return 0;
88}
89
90static int ivtv_querymenu(struct ivtv *itv, struct v4l2_querymenu *qmenu)
91{
92 struct v4l2_queryctrl qctrl;
93
94 qctrl.id = qmenu->id;
95 ivtv_queryctrl(itv, &qctrl);
96 return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(qmenu->id));
97}
98
99static int ivtv_s_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
100{
101 s32 v = vctrl->value;
102
103 IVTV_DEBUG_IOCTL("VIDIOC_S_CTRL(%08x, %x)\n", vctrl->id, v);
104
105 switch (vctrl->id) {
106 /* Standard V4L2 controls */
107 case V4L2_CID_BRIGHTNESS:
108 case V4L2_CID_HUE:
109 case V4L2_CID_SATURATION:
110 case V4L2_CID_CONTRAST:
111 return itv->video_dec_func(itv, VIDIOC_S_CTRL, vctrl);
112
113 case V4L2_CID_AUDIO_VOLUME:
114 case V4L2_CID_AUDIO_MUTE:
115 case V4L2_CID_AUDIO_BALANCE:
116 case V4L2_CID_AUDIO_BASS:
117 case V4L2_CID_AUDIO_TREBLE:
118 case V4L2_CID_AUDIO_LOUDNESS:
119 return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_S_CTRL, vctrl);
120
121 default:
122 IVTV_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
123 return -EINVAL;
124 }
125 return 0;
126}
127
128static int ivtv_g_ctrl(struct ivtv *itv, struct v4l2_control *vctrl)
129{
130 IVTV_DEBUG_IOCTL("VIDIOC_G_CTRL(%08x)\n", vctrl->id);
131
132 switch (vctrl->id) {
133 /* Standard V4L2 controls */
134 case V4L2_CID_BRIGHTNESS:
135 case V4L2_CID_HUE:
136 case V4L2_CID_SATURATION:
137 case V4L2_CID_CONTRAST:
138 return itv->video_dec_func(itv, VIDIOC_G_CTRL, vctrl);
139
140 case V4L2_CID_AUDIO_VOLUME:
141 case V4L2_CID_AUDIO_MUTE:
142 case V4L2_CID_AUDIO_BALANCE:
143 case V4L2_CID_AUDIO_BASS:
144 case V4L2_CID_AUDIO_TREBLE:
145 case V4L2_CID_AUDIO_LOUDNESS:
146 return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_G_CTRL, vctrl);
147 default:
148 IVTV_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
149 return -EINVAL;
150 }
151 return 0;
152}
153
154static int ivtv_setup_vbi_fmt(struct ivtv *itv, enum v4l2_mpeg_stream_vbi_fmt fmt)
155{
156 if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE))
157 return -EINVAL;
158 if (atomic_read(&itv->capturing) > 0)
159 return -EBUSY;
160
161 /* First try to allocate sliced VBI buffers if needed. */
162 if (fmt && itv->vbi.sliced_mpeg_data[0] == NULL) {
163 int i;
164
165 for (i = 0; i < IVTV_VBI_FRAMES; i++) {
166 /* Yuck, hardcoded. Needs to be a define */
167 itv->vbi.sliced_mpeg_data[i] = kmalloc(2049, GFP_KERNEL);
168 if (itv->vbi.sliced_mpeg_data[i] == NULL) {
169 while (--i >= 0) {
170 kfree(itv->vbi.sliced_mpeg_data[i]);
171 itv->vbi.sliced_mpeg_data[i] = NULL;
172 }
173 return -ENOMEM;
174 }
175 }
176 }
177
178 itv->vbi.insert_mpeg = fmt;
179
180 if (itv->vbi.insert_mpeg == 0) {
181 return 0;
182 }
183 /* Need sliced data for mpeg insertion */
184 if (get_service_set(itv->vbi.sliced_in) == 0) {
185 if (itv->is_60hz)
186 itv->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525;
187 else
188 itv->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625;
189 expand_service_set(itv->vbi.sliced_in, itv->is_50hz);
190 }
191 return 0;
192}
193
194int ivtv_control_ioctls(struct ivtv *itv, unsigned int cmd, void *arg)
195{
196 struct v4l2_control ctrl;
197
198 switch (cmd) {
199 case VIDIOC_QUERYMENU:
200 IVTV_DEBUG_IOCTL("VIDIOC_QUERYMENU\n");
201 return ivtv_querymenu(itv, arg);
202
203 case VIDIOC_QUERYCTRL:
204 return ivtv_queryctrl(itv, arg);
205
206 case VIDIOC_S_CTRL:
207 return ivtv_s_ctrl(itv, arg);
208
209 case VIDIOC_G_CTRL:
210 return ivtv_g_ctrl(itv, arg);
211
212 case VIDIOC_S_EXT_CTRLS:
213 {
214 struct v4l2_ext_controls *c = arg;
215
216 if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
217 int i;
218 int err = 0;
219
220 for (i = 0; i < c->count; i++) {
221 ctrl.id = c->controls[i].id;
222 ctrl.value = c->controls[i].value;
223 err = ivtv_s_ctrl(itv, &ctrl);
224 c->controls[i].value = ctrl.value;
225 if (err) {
226 c->error_idx = i;
227 break;
228 }
229 }
230 return err;
231 }
232 IVTV_DEBUG_IOCTL("VIDIOC_S_EXT_CTRLS\n");
233 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
234 struct cx2341x_mpeg_params p = itv->params;
235 int err = cx2341x_ext_ctrls(&p, arg, cmd);
236
237 if (err)
238 return err;
239
240 if (p.video_encoding != itv->params.video_encoding) {
241 int is_mpeg1 = p.video_encoding ==
242 V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
243 struct v4l2_format fmt;
244
245 /* fix videodecoder resolution */
246 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
247 fmt.fmt.pix.width = itv->params.width / (is_mpeg1 ? 2 : 1);
248 fmt.fmt.pix.height = itv->params.height;
249 itv->video_dec_func(itv, VIDIOC_S_FMT, &fmt);
250 }
251 err = cx2341x_update(itv, ivtv_api_func, &itv->params, &p);
252 if (!err && itv->params.stream_vbi_fmt != p.stream_vbi_fmt) {
253 err = ivtv_setup_vbi_fmt(itv, p.stream_vbi_fmt);
254 }
255 itv->params = p;
256 itv->dualwatch_stereo_mode = p.audio_properties & 0x0300;
257 ivtv_audio_set_audio_clock_freq(itv, p.audio_properties & 0x03);
258 return err;
259 }
260 return -EINVAL;
261 }
262
263 case VIDIOC_G_EXT_CTRLS:
264 {
265 struct v4l2_ext_controls *c = arg;
266
267 if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
268 int i;
269 int err = 0;
270
271 for (i = 0; i < c->count; i++) {
272 ctrl.id = c->controls[i].id;
273 ctrl.value = c->controls[i].value;
274 err = ivtv_g_ctrl(itv, &ctrl);
275 c->controls[i].value = ctrl.value;
276 if (err) {
277 c->error_idx = i;
278 break;
279 }
280 }
281 return err;
282 }
283 IVTV_DEBUG_IOCTL("VIDIOC_G_EXT_CTRLS\n");
284 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
285 return cx2341x_ext_ctrls(&itv->params, arg, cmd);
286 return -EINVAL;
287 }
288
289 case VIDIOC_TRY_EXT_CTRLS:
290 {
291 struct v4l2_ext_controls *c = arg;
292
293 IVTV_DEBUG_IOCTL("VIDIOC_TRY_EXT_CTRLS\n");
294 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
295 return cx2341x_ext_ctrls(&itv->params, arg, cmd);
296 return -EINVAL;
297 }
298
299 default:
300 return -EINVAL;
301 }
302 return 0;
303}
diff --git a/drivers/media/video/ivtv/ivtv-controls.h b/drivers/media/video/ivtv/ivtv-controls.h
new file mode 100644
index 000000000000..5a11149725ad
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-controls.h
@@ -0,0 +1,21 @@
1/*
2 ioctl control functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21int ivtv_control_ioctls(struct ivtv *itv, unsigned int cmd, void *arg);
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
new file mode 100644
index 000000000000..45b9328a538f
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -0,0 +1,1374 @@
1/*
2 ivtv driver initialization and card probing
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/* Main Driver file for the ivtv project:
23 * Driver for the Conexant CX23415/CX23416 chip.
24 * Author: Kevin Thayer (nufan_wfk at yahoo.com)
25 * License: GPL
26 * http://www.ivtvdriver.org
27 *
28 * -----
29 * MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
30 * and Takeru KOMORIYA<komoriya@paken.org>
31 *
32 * AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org>
33 * using information provided by Jiun-Kuei Jung @ AVerMedia.
34 *
35 * Kurouto Sikou CX23416GYC-STVLP tested by K.Ohta <alpha292@bremen.or.jp>
36 * using information from T.Adachi,Takeru KOMORIYA and others :-)
37 *
38 * Nagase TRANSGEAR 5000TV, Aopen VA2000MAX-STN6 and I/O data GV-MVP/RX
39 * version by T.Adachi. Special thanks Mr.Suzuki
40 */
41
42#include "ivtv-driver.h"
43#include "ivtv-version.h"
44#include "ivtv-fileops.h"
45#include "ivtv-i2c.h"
46#include "ivtv-firmware.h"
47#include "ivtv-queue.h"
48#include "ivtv-udma.h"
49#include "ivtv-irq.h"
50#include "ivtv-mailbox.h"
51#include "ivtv-streams.h"
52#include "ivtv-ioctl.h"
53#include "ivtv-cards.h"
54#include "ivtv-vbi.h"
55#include "ivtv-audio.h"
56#include "ivtv-gpio.h"
57#include "ivtv-yuv.h"
58
59#include <linux/vermagic.h>
60#include <media/tveeprom.h>
61#include <media/v4l2-chip-ident.h>
62
63/* var to keep track of the number of array elements in use */
64int ivtv_cards_active = 0;
65
66/* If you have already X v4l cards, then set this to X. This way
67 the device numbers stay matched. Example: you have a WinTV card
68 without radio and a PVR-350 with. Normally this would give a
69 video1 device together with a radio0 device for the PVR. By
70 setting this to 1 you ensure that radio0 is now also radio1. */
71int ivtv_first_minor = 0;
72
73/* Master variable for all ivtv info */
74struct ivtv *ivtv_cards[IVTV_MAX_CARDS];
75
76/* Protects ivtv_cards_active */
77spinlock_t ivtv_cards_lock = SPIN_LOCK_UNLOCKED;
78
79/* add your revision and whatnot here */
80static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
81 {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
82 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83 {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16,
84 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85 {0,}
86};
87
88MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
89
90const u32 yuv_offset[4] = {
91 IVTV_YUV_BUFFER_OFFSET,
92 IVTV_YUV_BUFFER_OFFSET_1,
93 IVTV_YUV_BUFFER_OFFSET_2,
94 IVTV_YUV_BUFFER_OFFSET_3
95};
96
97/* Parameter declarations */
98static int cardtype[IVTV_MAX_CARDS];
99static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
100static int radio[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
101
102static int cardtype_c = 1;
103static int tuner_c = 1;
104static int radio_c = 1;
105static char pal[] = "--";
106static char secam[] = "--";
107static char ntsc[] = "-";
108
109/* Buffers */
110static int enc_mpg_buffers = IVTV_DEFAULT_ENC_MPG_BUFFERS;
111static int enc_yuv_buffers = IVTV_DEFAULT_ENC_YUV_BUFFERS;
112static int enc_vbi_buffers = IVTV_DEFAULT_ENC_VBI_BUFFERS;
113static int enc_pcm_buffers = IVTV_DEFAULT_ENC_PCM_BUFFERS;
114static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS;
115static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS;
116static int dec_vbi_buffers = IVTV_DEFAULT_DEC_VBI_BUFFERS;
117
118static int ivtv_yuv_mode = 0;
119static int ivtv_yuv_threshold=-1;
120static int ivtv_pci_latency = 1;
121
122int ivtv_debug = 0;
123
124static int newi2c = -1;
125
126module_param_array(tuner, int, &tuner_c, 0644);
127module_param_array(radio, bool, &radio_c, 0644);
128module_param_array(cardtype, int, &cardtype_c, 0644);
129module_param_string(pal, pal, sizeof(pal), 0644);
130module_param_string(secam, secam, sizeof(secam), 0644);
131module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
132module_param_named(debug,ivtv_debug, int, 0644);
133module_param(ivtv_pci_latency, int, 0644);
134module_param(ivtv_yuv_mode, int, 0644);
135module_param(ivtv_yuv_threshold, int, 0644);
136module_param(ivtv_first_minor, int, 0644);
137
138module_param(enc_mpg_buffers, int, 0644);
139module_param(enc_yuv_buffers, int, 0644);
140module_param(enc_vbi_buffers, int, 0644);
141module_param(enc_pcm_buffers, int, 0644);
142module_param(dec_mpg_buffers, int, 0644);
143module_param(dec_yuv_buffers, int, 0644);
144module_param(dec_vbi_buffers, int, 0644);
145
146module_param(newi2c, int, 0644);
147
148MODULE_PARM_DESC(tuner, "Tuner type selection,\n"
149 "\t\t\tsee tuner.h for values");
150MODULE_PARM_DESC(radio,
151 "Enable or disable the radio. Use only if autodetection\n"
152 "\t\t\tfails. 0 = disable, 1 = enable");
153MODULE_PARM_DESC(cardtype,
154 "Only use this option if your card is not detected properly.\n"
155 "\t\tSpecify card type:\n"
156 "\t\t\t 1 = WinTV PVR 250\n"
157 "\t\t\t 2 = WinTV PVR 350\n"
158 "\t\t\t 3 = WinTV PVR-150 or PVR-500\n"
159 "\t\t\t 4 = AVerMedia M179\n"
160 "\t\t\t 5 = YUAN MPG600/Kuroutoshikou iTVC16-STVLP\n"
161 "\t\t\t 6 = YUAN MPG160/Kuroutoshikou iTVC15-STVLP\n"
162 "\t\t\t 7 = YUAN PG600/DIAMONDMM PVR-550 (CX Falcon 2)\n"
163 "\t\t\t 8 = Adaptec AVC-2410\n"
164 "\t\t\t 9 = Adaptec AVC-2010\n"
165 "\t\t\t10 = NAGASE TRANSGEAR 5000TV\n"
166 "\t\t\t11 = AOpen VA2000MAX-STN6\n"
167 "\t\t\t12 = YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP\n"
168 "\t\t\t13 = I/O Data GV-MVP/RX\n"
169 "\t\t\t14 = I/O Data GV-MVP/RX2E\n"
170 "\t\t\t15 = GOTVIEW PCI DVD\n"
171 "\t\t\t16 = GOTVIEW PCI DVD2 Deluxe\n"
172 "\t\t\t17 = Yuan MPC622\n"
173 "\t\t\t18 = Digital Cowboy DCT-MTVP1\n"
174#ifdef HAVE_XC3028
175 "\t\t\t19 = Yuan PG600V2/GotView PCI DVD Lite/Club3D ZAP-TV1x01\n"
176#endif
177 "\t\t\t 0 = Autodetect (default)\n"
178 "\t\t\t-1 = Ignore this card\n\t\t");
179MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
180MODULE_PARM_DESC(secam, "Set SECAM standard: B, G, H, D, K, L, LC");
181MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K");
182MODULE_PARM_DESC(debug,
183 "Debug level (bitmask). Default: errors only\n"
184 "\t\t\t(debug = 511 gives full debugging)");
185MODULE_PARM_DESC(ivtv_pci_latency,
186 "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
187 "\t\t\tDefault: Yes");
188MODULE_PARM_DESC(ivtv_yuv_mode,
189 "Specify the yuv playback mode:\n"
190 "\t\t\t0 = interlaced\n\t\t\t1 = progressive\n\t\t\t2 = auto\n"
191 "\t\t\tDefault: 0 (interlaced)");
192MODULE_PARM_DESC(ivtv_yuv_threshold,
193 "If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n"
194 "\t\t\tDefault: 480");;
195MODULE_PARM_DESC(enc_mpg_buffers,
196 "Encoder MPG Buffers (in MB)\n"
197 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS));
198MODULE_PARM_DESC(enc_yuv_buffers,
199 "Encoder YUV Buffers (in MB)\n"
200 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_YUV_BUFFERS));
201MODULE_PARM_DESC(enc_vbi_buffers,
202 "Encoder VBI Buffers (in MB)\n"
203 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_VBI_BUFFERS));
204MODULE_PARM_DESC(enc_pcm_buffers,
205 "Encoder PCM buffers (in MB)\n"
206 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_PCM_BUFFERS));
207MODULE_PARM_DESC(dec_mpg_buffers,
208 "Decoder MPG buffers (in MB)\n"
209 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_MPG_BUFFERS));
210MODULE_PARM_DESC(dec_yuv_buffers,
211 "Decoder YUV buffers (in MB)\n"
212 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_YUV_BUFFERS));
213MODULE_PARM_DESC(dec_vbi_buffers,
214 "Decoder VBI buffers (in MB)\n"
215 "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_VBI_BUFFERS));
216MODULE_PARM_DESC(newi2c,
217 "Use new I2C implementation\n"
218 "\t\t\t-1 is autodetect, 0 is off, 1 is on\n"
219 "\t\t\tDefault is autodetect");
220
221MODULE_PARM_DESC(ivtv_first_minor, "Set minor assigned to first card");
222
223MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
224MODULE_DESCRIPTION("CX23415/CX23416 driver");
225MODULE_SUPPORTED_DEVICE
226 ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n"
227 "\t\t\tYuan MPG series and similar)");
228MODULE_LICENSE("GPL");
229
230MODULE_VERSION(IVTV_VERSION);
231
232void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask)
233{
234 itv->irqmask &= ~mask;
235 write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK);
236}
237
238void ivtv_set_irq_mask(struct ivtv *itv, u32 mask)
239{
240 itv->irqmask |= mask;
241 write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK);
242}
243
244int ivtv_set_output_mode(struct ivtv *itv, int mode)
245{
246 int old_mode;
247
248 spin_lock(&itv->lock);
249 old_mode = itv->output_mode;
250 if (old_mode == 0)
251 itv->output_mode = old_mode = mode;
252 spin_unlock(&itv->lock);
253 return old_mode;
254}
255
256struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv)
257{
258 switch (itv->output_mode) {
259 case OUT_MPG:
260 return &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
261 case OUT_YUV:
262 return &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
263 default:
264 return NULL;
265 }
266}
267
268int ivtv_waitq(wait_queue_head_t *waitq)
269{
270 DEFINE_WAIT(wait);
271
272 prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
273 schedule();
274 finish_wait(waitq, &wait);
275 return signal_pending(current) ? -EINTR : 0;
276}
277
278/* Generic utility functions */
279int ivtv_sleep_timeout(int timeout, int intr)
280{
281 int ret;
282
283 do {
284 set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
285 timeout = schedule_timeout(timeout);
286 if (intr && (ret = signal_pending(current)))
287 return ret;
288 } while (timeout);
289 return 0;
290}
291
292/* Release ioremapped memory */
293static void ivtv_iounmap(struct ivtv *itv)
294{
295 if (itv == NULL)
296 return;
297
298 /* Release registers memory */
299 if (itv->reg_mem != NULL) {
300 IVTV_DEBUG_INFO("releasing reg_mem\n");
301 iounmap(itv->reg_mem);
302 itv->reg_mem = NULL;
303 }
304 /* Release io memory */
305 if (itv->has_cx23415 && itv->dec_mem != NULL) {
306 IVTV_DEBUG_INFO("releasing dec_mem\n");
307 iounmap(itv->dec_mem);
308 }
309 itv->dec_mem = NULL;
310
311 /* Release io memory */
312 if (itv->enc_mem != NULL) {
313 IVTV_DEBUG_INFO("releasing enc_mem\n");
314 iounmap(itv->enc_mem);
315 itv->enc_mem = NULL;
316 }
317}
318
319/* Hauppauge card? get values from tveeprom */
320void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv)
321{
322 u8 eedata[256];
323
324 itv->i2c_client.addr = 0xA0 >> 1;
325 tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata));
326 tveeprom_hauppauge_analog(&itv->i2c_client, tv, eedata);
327}
328
329static void ivtv_process_eeprom(struct ivtv *itv)
330{
331 struct tveeprom tv;
332 int pci_slot = PCI_SLOT(itv->dev->devfn);
333
334 ivtv_read_eeprom(itv, &tv);
335
336 /* Many thanks to Steven Toth from Hauppauge for providing the
337 model numbers */
338 switch (tv.model) {
339 /* In a few cases the PCI subsystem IDs do not correctly
340 identify the card. A better method is to check the
341 model number from the eeprom instead. */
342 case 32000 ... 32999:
343 case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */
344 case 48400 ... 48599:
345 itv->card = ivtv_get_card(IVTV_CARD_PVR_250);
346 break;
347 case 48100 ... 48399:
348 case 48600 ... 48999:
349 itv->card = ivtv_get_card(IVTV_CARD_PVR_350);
350 break;
351 case 23000 ... 23999: /* PVR500 */
352 case 25000 ... 25999: /* Low profile PVR150 */
353 case 26000 ... 26999: /* Regular PVR150 */
354 itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
355 break;
356 case 0:
357 IVTV_ERR("Invalid EEPROM\n");
358 return;
359 default:
360 IVTV_ERR("Unknown model %d, defaulting to PVR-150\n", tv.model);
361 itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
362 break;
363 }
364
365 switch (tv.model) {
366 /* Old style PVR350 (with an saa7114) uses this input for
367 the tuner. */
368 case 48254:
369 itv->card = ivtv_get_card(IVTV_CARD_PVR_350_V1);
370 break;
371 default:
372 break;
373 }
374
375 itv->v4l2_cap = itv->card->v4l2_capabilities;
376 itv->card_name = itv->card->name;
377
378 /* If this is a PVR500 then it should be possible to detect whether it is the
379 first or second unit by looking at the subsystem device ID: is bit 4 is
380 set, then it is the second unit (according to info from Hauppauge).
381
382 However, while this works for most cards, I have seen a few PVR500 cards
383 where both units have the same subsystem ID.
384
385 So instead I look at the reported 'PCI slot' (which is the slot on the PVR500
386 PCI bridge) and if it is 8, then it is assumed to be the first unit, otherwise
387 it is the second unit. It is possible that it is a different slot when ivtv is
388 used in Xen, in that case I ignore this card here. The worst that can happen
389 is that the card presents itself with a non-working radio device.
390
391 This detection is needed since the eeprom reports incorrectly that a radio is
392 present on the second unit. */
393 if (tv.model / 1000 == 23) {
394 itv->card_name = "WinTV PVR 500";
395 if (pci_slot == 8 || pci_slot == 9) {
396 int is_first = (pci_slot & 1) == 0;
397
398 itv->card_name = is_first ? "WinTV PVR 500 (unit #1)" :
399 "WinTV PVR 500 (unit #2)";
400 if (!is_first) {
401 IVTV_INFO("Correcting tveeprom data: no radio present on second unit\n");
402 tv.has_radio = 0;
403 }
404 }
405 }
406 IVTV_INFO("Autodetected %s\n", itv->card_name);
407
408 switch (tv.tuner_hauppauge_model) {
409 case 85:
410 case 99:
411 case 112:
412 itv->pvr150_workaround = 1;
413 break;
414 default:
415 break;
416 }
417 if (tv.tuner_type == TUNER_ABSENT)
418 IVTV_ERR("tveeprom cannot autodetect tuner!");
419
420 if (itv->options.tuner == -1)
421 itv->options.tuner = tv.tuner_type;
422 if (itv->options.radio == -1)
423 itv->options.radio = (tv.has_radio != 0);
424 /* only enable newi2c if an IR blaster is present */
425 /* FIXME: for 2.6.20 the test against 2 should be removed */
426 if (itv->options.newi2c == -1 && tv.has_ir != -1 && tv.has_ir != 2) {
427 itv->options.newi2c = (tv.has_ir & 2) ? 1 : 0;
428 if (itv->options.newi2c) {
429 IVTV_INFO("reopen i2c bus for IR-blaster support\n");
430 exit_ivtv_i2c(itv);
431 init_ivtv_i2c(itv);
432 }
433 }
434
435 if (itv->std != 0)
436 /* user specified tuner standard */
437 return;
438
439 /* autodetect tuner standard */
440 if (tv.tuner_formats & V4L2_STD_PAL) {
441 IVTV_DEBUG_INFO("PAL tuner detected\n");
442 itv->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
443 } else if (tv.tuner_formats & V4L2_STD_NTSC) {
444 IVTV_DEBUG_INFO("NTSC tuner detected\n");
445 itv->std |= V4L2_STD_NTSC_M;
446 } else if (tv.tuner_formats & V4L2_STD_SECAM) {
447 IVTV_DEBUG_INFO("SECAM tuner detected\n");
448 itv->std |= V4L2_STD_SECAM_L;
449 } else {
450 IVTV_INFO("No tuner detected, default to NTSC-M\n");
451 itv->std |= V4L2_STD_NTSC_M;
452 }
453}
454
455static v4l2_std_id ivtv_parse_std(struct ivtv *itv)
456{
457 switch (pal[0]) {
458 case '6':
459 return V4L2_STD_PAL_60;
460 case 'b':
461 case 'B':
462 case 'g':
463 case 'G':
464 return V4L2_STD_PAL_BG;
465 case 'h':
466 case 'H':
467 return V4L2_STD_PAL_H;
468 case 'n':
469 case 'N':
470 if (pal[1] == 'c' || pal[1] == 'C')
471 return V4L2_STD_PAL_Nc;
472 return V4L2_STD_PAL_N;
473 case 'i':
474 case 'I':
475 return V4L2_STD_PAL_I;
476 case 'd':
477 case 'D':
478 case 'k':
479 case 'K':
480 return V4L2_STD_PAL_DK;
481 case 'M':
482 case 'm':
483 return V4L2_STD_PAL_M;
484 case '-':
485 break;
486 default:
487 IVTV_WARN("pal= argument not recognised\n");
488 return 0;
489 }
490
491 switch (secam[0]) {
492 case 'b':
493 case 'B':
494 case 'g':
495 case 'G':
496 case 'h':
497 case 'H':
498 return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
499 case 'd':
500 case 'D':
501 case 'k':
502 case 'K':
503 return V4L2_STD_SECAM_DK;
504 case 'l':
505 case 'L':
506 if (secam[1] == 'C' || secam[1] == 'c')
507 return V4L2_STD_SECAM_LC;
508 return V4L2_STD_SECAM_L;
509 case '-':
510 break;
511 default:
512 IVTV_WARN("secam= argument not recognised\n");
513 return 0;
514 }
515
516 switch (ntsc[0]) {
517 case 'm':
518 case 'M':
519 return V4L2_STD_NTSC_M;
520 case 'j':
521 case 'J':
522 return V4L2_STD_NTSC_M_JP;
523 case 'k':
524 case 'K':
525 return V4L2_STD_NTSC_M_KR;
526 case '-':
527 break;
528 default:
529 IVTV_WARN("ntsc= argument not recognised\n");
530 return 0;
531 }
532
533 /* no match found */
534 return 0;
535}
536
537static void ivtv_process_options(struct ivtv *itv)
538{
539 const char *chipname;
540 int i, j;
541
542 itv->options.megabytes[IVTV_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers;
543 itv->options.megabytes[IVTV_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers;
544 itv->options.megabytes[IVTV_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers;
545 itv->options.megabytes[IVTV_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers;
546 itv->options.megabytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers;
547 itv->options.megabytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers;
548 itv->options.megabytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers;
549 itv->options.cardtype = cardtype[itv->num];
550 itv->options.tuner = tuner[itv->num];
551 itv->options.radio = radio[itv->num];
552 itv->options.newi2c = newi2c;
553
554 itv->std = ivtv_parse_std(itv);
555 itv->has_cx23415 = (itv->dev->device == PCI_DEVICE_ID_IVTV15);
556 chipname = itv->has_cx23415 ? "cx23415" : "cx23416";
557 if (itv->options.cardtype == -1) {
558 IVTV_INFO("Ignore card (detected %s based chip)\n", chipname);
559 return;
560 }
561 if ((itv->card = ivtv_get_card(itv->options.cardtype - 1))) {
562 IVTV_INFO("User specified %s card (detected %s based chip)\n",
563 itv->card->name, chipname);
564 } else if (itv->options.cardtype != 0) {
565 IVTV_ERR("Unknown user specified type, trying to autodetect card\n");
566 }
567 if (itv->card == NULL) {
568 if (itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE ||
569 itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT1 ||
570 itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT2) {
571 itv->card = ivtv_get_card(itv->has_cx23415 ? IVTV_CARD_PVR_350 : IVTV_CARD_PVR_150);
572 IVTV_INFO("Autodetected Hauppauge card (%s based)\n",
573 chipname);
574 }
575 }
576 if (itv->card == NULL) {
577 for (i = 0; (itv->card = ivtv_get_card(i)); i++) {
578 if (itv->card->pci_list == NULL)
579 continue;
580 for (j = 0; itv->card->pci_list[j].device; j++) {
581 if (itv->dev->device !=
582 itv->card->pci_list[j].device)
583 continue;
584 if (itv->dev->subsystem_vendor !=
585 itv->card->pci_list[j].subsystem_vendor)
586 continue;
587 if (itv->dev->subsystem_device !=
588 itv->card->pci_list[j].subsystem_device)
589 continue;
590 IVTV_INFO("Autodetected %s card (%s based)\n",
591 itv->card->name, chipname);
592 goto done;
593 }
594 }
595 }
596done:
597
598 if (itv->card == NULL) {
599 itv->card = ivtv_get_card(IVTV_CARD_PVR_150);
600 IVTV_ERR("Unknown card: vendor/device: %04x/%04x\n",
601 itv->dev->vendor, itv->dev->device);
602 IVTV_ERR(" subsystem vendor/device: %04x/%04x\n",
603 itv->dev->subsystem_vendor, itv->dev->subsystem_device);
604 IVTV_ERR(" %s based\n", chipname);
605 IVTV_ERR("Defaulting to %s card\n", itv->card->name);
606 IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
607 IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
608 IVTV_ERR("Prefix your subject line with [UNKNOWN CARD].\n");
609 }
610 itv->v4l2_cap = itv->card->v4l2_capabilities;
611 itv->card_name = itv->card->name;
612}
613
614/* Precondition: the ivtv structure has been memset to 0. Only
615 the dev and num fields have been filled in.
616 No assumptions on the card type may be made here (see ivtv_init_struct2
617 for that).
618 */
619static int __devinit ivtv_init_struct1(struct ivtv *itv)
620{
621 itv->base_addr = pci_resource_start(itv->dev, 0);
622 itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
623 itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
624
625 mutex_init(&itv->i2c_bus_lock);
626 mutex_init(&itv->udma.lock);
627
628 spin_lock_init(&itv->lock);
629 spin_lock_init(&itv->dma_reg_lock);
630
631 itv->irq_work_queues = create_workqueue(itv->name);
632 if (itv->irq_work_queues == NULL) {
633 IVTV_ERR("Could not create ivtv workqueue\n");
634 return -1;
635 }
636
637 INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler);
638
639 /* start counting open_id at 1 */
640 itv->open_id = 1;
641
642 /* Initial settings */
643 cx2341x_fill_defaults(&itv->params);
644 itv->params.port = CX2341X_PORT_MEMORY;
645 itv->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
646 init_waitqueue_head(&itv->cap_w);
647 init_waitqueue_head(&itv->event_waitq);
648 init_waitqueue_head(&itv->vsync_waitq);
649 init_waitqueue_head(&itv->dma_waitq);
650 init_timer(&itv->dma_timer);
651 itv->dma_timer.function = ivtv_unfinished_dma;
652 itv->dma_timer.data = (unsigned long)itv;
653
654 itv->cur_dma_stream = -1;
655 itv->audio_stereo_mode = AUDIO_STEREO;
656 itv->audio_bilingual_mode = AUDIO_MONO_LEFT;
657
658 /* Ctrls */
659 itv->speed = 1000;
660
661 /* VBI */
662 itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
663 itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced;
664
665 /* OSD */
666 itv->osd_global_alpha_state = 1;
667 itv->osd_global_alpha = 255;
668
669 /* YUV */
670 atomic_set(&itv->yuv_info.next_dma_frame, -1);
671 itv->yuv_info.lace_mode = ivtv_yuv_mode;
672 itv->yuv_info.lace_threshold = ivtv_yuv_threshold;
673 return 0;
674}
675
676/* Second initialization part. Here the card type has been
677 autodetected. */
678static void __devinit ivtv_init_struct2(struct ivtv *itv)
679{
680 int i;
681
682 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
683 if (itv->card->video_inputs[i].video_type == 0)
684 break;
685 itv->nof_inputs = i;
686 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
687 if (itv->card->audio_inputs[i].audio_type == 0)
688 break;
689 itv->nof_audio_inputs = i;
690
691 /* 0x00EF = saa7114(239) 0x00F0 = saa7115(240) 0x0106 = micro */
692 if (itv->card->hw_all & (IVTV_HW_SAA7115 | IVTV_HW_SAA717X))
693 itv->digitizer = 0xF1;
694 else if (itv->card->hw_all & IVTV_HW_SAA7114)
695 itv->digitizer = 0xEF;
696 else /* cx25840 */
697 itv->digitizer = 0x140;
698
699 if (itv->card->hw_all & IVTV_HW_CX25840) {
700 itv->vbi.sliced_size = 288; /* multiple of 16, real size = 284 */
701 } else {
702 itv->vbi.sliced_size = 64; /* multiple of 16, real size = 52 */
703 }
704
705 /* Find tuner input */
706 for (i = 0; i < itv->nof_inputs; i++) {
707 if (itv->card->video_inputs[i].video_type ==
708 IVTV_CARD_INPUT_VID_TUNER)
709 break;
710 }
711 if (i == itv->nof_inputs)
712 i = 0;
713 itv->active_input = i;
714 itv->audio_input = itv->card->video_inputs[i].audio_index;
715 if (itv->card->hw_all & IVTV_HW_CX25840)
716 itv->video_dec_func = ivtv_cx25840;
717 else if (itv->card->hw_all & IVTV_HW_SAA717X)
718 itv->video_dec_func = ivtv_saa717x;
719 else
720 itv->video_dec_func = ivtv_saa7115;
721}
722
723static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
724 const struct pci_device_id *pci_id)
725{
726 u16 cmd;
727 unsigned char pci_latency;
728
729 IVTV_DEBUG_INFO("Enabling pci device\n");
730
731 if (pci_enable_device(dev)) {
732 IVTV_ERR("Can't enable device %d!\n", itv->num);
733 return -EIO;
734 }
735 if (pci_set_dma_mask(dev, 0xffffffff)) {
736 IVTV_ERR("No suitable DMA available on card %d.\n", itv->num);
737 return -EIO;
738 }
739 if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) {
740 IVTV_ERR("Cannot request encoder memory region on card %d.\n", itv->num);
741 return -EIO;
742 }
743
744 if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET,
745 IVTV_REG_SIZE, "ivtv registers")) {
746 IVTV_ERR("Cannot request register memory region on card %d.\n", itv->num);
747 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
748 return -EIO;
749 }
750
751 if (itv->has_cx23415 &&
752 !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET,
753 IVTV_DECODER_SIZE, "ivtv decoder")) {
754 IVTV_ERR("Cannot request decoder memory region on card %d.\n", itv->num);
755 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
756 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
757 return -EIO;
758 }
759
760 /* Check for bus mastering */
761 pci_read_config_word(dev, PCI_COMMAND, &cmd);
762 if (!(cmd & PCI_COMMAND_MASTER)) {
763 IVTV_DEBUG_INFO("Attempting to enable Bus Mastering\n");
764 pci_set_master(dev);
765 pci_read_config_word(dev, PCI_COMMAND, &cmd);
766 if (!(cmd & PCI_COMMAND_MASTER)) {
767 IVTV_ERR("Bus Mastering is not enabled\n");
768 return -ENXIO;
769 }
770 }
771 IVTV_DEBUG_INFO("Bus Mastering Enabled.\n");
772
773 pci_read_config_byte(dev, PCI_CLASS_REVISION, &itv->card_rev);
774 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
775
776 if (pci_latency < 64 && ivtv_pci_latency) {
777 IVTV_INFO("Unreasonably low latency timer, "
778 "setting to 64 (was %d)\n", pci_latency);
779 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
780 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
781 }
782 /* This config space value relates to DMA latencies. The
783 default value 0x8080 is too low however and will lead
784 to DMA errors. 0xffff is the max value which solves
785 these problems. */
786 pci_write_config_dword(dev, 0x40, 0xffff);
787
788 IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
789 "irq: %d, latency: %d, memory: 0x%lx\n",
790 itv->dev->device, itv->card_rev, dev->bus->number,
791 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
792 itv->dev->irq, pci_latency, (unsigned long)itv->base_addr);
793
794 return 0;
795}
796
797static void ivtv_request_module(struct ivtv *itv, const char *name)
798{
799 if (request_module(name) != 0) {
800 IVTV_ERR("Failed to load module %s\n", name);
801 } else {
802 IVTV_DEBUG_INFO("Loaded module %s\n", name);
803 }
804}
805
806static void ivtv_load_and_init_modules(struct ivtv *itv)
807{
808 struct v4l2_control ctrl;
809 u32 hw = itv->card->hw_all;
810 int i;
811
812 /* load modules */
813#ifndef CONFIG_VIDEO_TUNER
814 if (hw & IVTV_HW_TUNER) {
815 ivtv_request_module(itv, "tuner");
816#ifdef HAVE_XC3028
817 if (itv->options.tuner == TUNER_XCEIVE_XC3028)
818 ivtv_request_module(itv, "xc3028-tuner");
819#endif
820 }
821#endif
822#ifndef CONFIG_VIDEO_CX25840
823 if (hw & IVTV_HW_CX25840)
824 ivtv_request_module(itv, "cx25840");
825#endif
826#ifndef CONFIG_VIDEO_SAA711X
827 if (hw & IVTV_HW_SAA711X)
828 ivtv_request_module(itv, "saa7115");
829#endif
830#ifndef CONFIG_VIDEO_SAA7127
831 if (hw & IVTV_HW_SAA7127)
832 ivtv_request_module(itv, "saa7127");
833#endif
834 if (hw & IVTV_HW_SAA717X)
835 ivtv_request_module(itv, "saa717x");
836#ifndef CONFIG_VIDEO_UPD64031A
837 if (hw & IVTV_HW_UPD64031A)
838 ivtv_request_module(itv, "upd64031a");
839#endif
840#ifndef CONFIG_VIDEO_UPD64083
841 if (hw & IVTV_HW_UPD6408X)
842 ivtv_request_module(itv, "upd64083");
843#endif
844#ifndef CONFIG_VIDEO_MSP3400
845 if (hw & IVTV_HW_MSP34XX)
846 ivtv_request_module(itv, "msp3400");
847#endif
848 if (hw & IVTV_HW_TVAUDIO)
849 ivtv_request_module(itv, "tvaudio");
850#ifndef CONFIG_VIDEO_WM8775
851 if (hw & IVTV_HW_WM8775)
852 ivtv_request_module(itv, "wm8775");
853#endif
854#ifndef CONFIG_VIDEO_WM8739
855 if (hw & IVTV_HW_WM8739)
856 ivtv_request_module(itv, "wm8739");
857#endif
858#ifndef CONFIG_VIDEO_CS53L32A
859 if (hw & IVTV_HW_CS53L32A)
860 ivtv_request_module(itv, "cs53l32a");
861#endif
862
863 /* check which i2c devices are actually found */
864 for (i = 0; i < 32; i++) {
865 u32 device = 1 << i;
866
867 if (!(device & hw))
868 continue;
869 if (device == IVTV_HW_GPIO) {
870 /* GPIO is always available */
871 itv->hw_flags |= IVTV_HW_GPIO;
872 continue;
873 }
874 if (ivtv_i2c_hw_addr(itv, device) > 0)
875 itv->hw_flags |= device;
876 }
877
878 hw = itv->hw_flags;
879
880 if (itv->card->type == IVTV_CARD_CX23416GYC) {
881 /* Several variations of this card exist, detect which card
882 type should be used. */
883 if ((hw & (IVTV_HW_UPD64031A | IVTV_HW_UPD6408X)) == 0)
884 itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGRYCS);
885 else if ((hw & IVTV_HW_UPD64031A) == 0)
886 itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGR);
887 }
888
889 if (hw & IVTV_HW_CX25840) {
890 /* CX25840_CID_ENABLE_PVR150_WORKAROUND */
891 ctrl.id = V4L2_CID_PRIVATE_BASE;
892 ctrl.value = itv->pvr150_workaround;
893 itv->video_dec_func(itv, VIDIOC_S_CTRL, &ctrl);
894
895 itv->vbi.raw_decoder_line_size = 1444;
896 itv->vbi.raw_decoder_sav_odd_field = 0x20;
897 itv->vbi.raw_decoder_sav_even_field = 0x60;
898 itv->vbi.sliced_decoder_line_size = 272;
899 itv->vbi.sliced_decoder_sav_odd_field = 0xB0;
900 itv->vbi.sliced_decoder_sav_even_field = 0xF0;
901 }
902
903 if (hw & IVTV_HW_SAA711X) {
904 struct v4l2_chip_ident v = { V4L2_CHIP_MATCH_I2C_DRIVER, I2C_DRIVERID_SAA711X };
905
906 /* determine the exact saa711x model */
907 itv->hw_flags &= ~IVTV_HW_SAA711X;
908
909 ivtv_saa7115(itv, VIDIOC_G_CHIP_IDENT, &v);
910 if (v.ident == V4L2_IDENT_SAA7114) {
911 itv->hw_flags |= IVTV_HW_SAA7114;
912 /* VBI is not yet supported by the saa7114 driver. */
913 itv->v4l2_cap &= ~(V4L2_CAP_SLICED_VBI_CAPTURE|V4L2_CAP_VBI_CAPTURE);
914 }
915 else {
916 itv->hw_flags |= IVTV_HW_SAA7115;
917 }
918 itv->vbi.raw_decoder_line_size = 1443;
919 itv->vbi.raw_decoder_sav_odd_field = 0x25;
920 itv->vbi.raw_decoder_sav_even_field = 0x62;
921 itv->vbi.sliced_decoder_line_size = 51;
922 itv->vbi.sliced_decoder_sav_odd_field = 0xAB;
923 itv->vbi.sliced_decoder_sav_even_field = 0xEC;
924 }
925
926 if (hw & IVTV_HW_SAA717X) {
927 itv->vbi.raw_decoder_line_size = 1443;
928 itv->vbi.raw_decoder_sav_odd_field = 0x25;
929 itv->vbi.raw_decoder_sav_even_field = 0x62;
930 itv->vbi.sliced_decoder_line_size = 51;
931 itv->vbi.sliced_decoder_sav_odd_field = 0xAB;
932 itv->vbi.sliced_decoder_sav_even_field = 0xEC;
933 }
934}
935
936static int __devinit ivtv_probe(struct pci_dev *dev,
937 const struct pci_device_id *pci_id)
938{
939 int retval = 0;
940 int video_input;
941 int yuv_buf_size;
942 int vbi_buf_size;
943 int fw_retry_count = 3;
944 struct ivtv *itv;
945 struct v4l2_frequency vf;
946
947 spin_lock(&ivtv_cards_lock);
948
949 /* Make sure we've got a place for this card */
950 if (ivtv_cards_active == IVTV_MAX_CARDS) {
951 printk(KERN_ERR "ivtv: Maximum number of cards detected (%d).\n",
952 ivtv_cards_active);
953 spin_unlock(&ivtv_cards_lock);
954 return -ENOMEM;
955 }
956
957 itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC);
958 if (itv == 0) {
959 spin_unlock(&ivtv_cards_lock);
960 return -ENOMEM;
961 }
962 ivtv_cards[ivtv_cards_active] = itv;
963 itv->dev = dev;
964 itv->num = ivtv_cards_active++;
965 snprintf(itv->name, sizeof(itv->name) - 1, "ivtv%d", itv->num);
966 if (itv->num) {
967 printk(KERN_INFO "ivtv: ====================== NEXT CARD ======================\n");
968 }
969
970 spin_unlock(&ivtv_cards_lock);
971
972 ivtv_process_options(itv);
973 if (itv->options.cardtype == -1) {
974 retval = -ENODEV;
975 goto err;
976 }
977 if (ivtv_init_struct1(itv)) {
978 retval = -ENOMEM;
979 goto err;
980 }
981
982 IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
983
984 /* PCI Device Setup */
985 if ((retval = ivtv_setup_pci(itv, dev, pci_id)) != 0) {
986 if (retval == -EIO)
987 goto free_workqueue;
988 else if (retval == -ENXIO)
989 goto free_mem;
990 }
991 /* save itv in the pci struct for later use */
992 pci_set_drvdata(dev, itv);
993
994 /* map io memory */
995 IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
996 itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
997 itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
998 IVTV_ENCODER_SIZE);
999 if (!itv->enc_mem) {
1000 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1001 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
1002 retval = -ENOMEM;
1003 goto free_mem;
1004 }
1005
1006 if (itv->has_cx23415) {
1007 IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
1008 itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1009 itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
1010 IVTV_DECODER_SIZE);
1011 if (!itv->dec_mem) {
1012 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1013 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
1014 retval = -ENOMEM;
1015 goto free_mem;
1016 }
1017 }
1018 else {
1019 itv->dec_mem = itv->enc_mem;
1020 }
1021
1022 /* map registers memory */
1023 IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
1024 itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1025 itv->reg_mem =
1026 ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1027 if (!itv->reg_mem) {
1028 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1029 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
1030 retval = -ENOMEM;
1031 goto free_io;
1032 }
1033
1034 while (--fw_retry_count > 0) {
1035 /* load firmware */
1036 if (ivtv_firmware_init(itv) == 0)
1037 break;
1038 if (fw_retry_count > 1)
1039 IVTV_WARN("Retry loading firmware\n");
1040 }
1041 if (fw_retry_count == 0) {
1042 IVTV_ERR("Error initializing firmware\n");
1043 goto free_i2c;
1044 }
1045
1046 /* Try and get firmware versions */
1047 IVTV_DEBUG_INFO("Getting firmware version..\n");
1048 ivtv_firmware_versions(itv);
1049
1050 /* Check yuv output filter table */
1051 if (itv->has_cx23415) ivtv_yuv_filter_check(itv);
1052
1053 ivtv_gpio_init(itv);
1054
1055 /* active i2c */
1056 IVTV_DEBUG_INFO("activating i2c...\n");
1057 if (init_ivtv_i2c(itv)) {
1058 IVTV_ERR("Could not initialize i2c\n");
1059 goto free_irq;
1060 }
1061
1062 IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active);
1063
1064 if (itv->card->hw_all & IVTV_HW_TVEEPROM) {
1065#ifdef CONFIG_VIDEO_TVEEPROM_MODULE
1066 ivtv_request_module(itv, "tveeprom");
1067#endif
1068 /* Based on the model number the cardtype may be changed.
1069 The PCI IDs are not always reliable. */
1070 ivtv_process_eeprom(itv);
1071 }
1072
1073 if (itv->std == 0) {
1074 itv->std = V4L2_STD_NTSC_M;
1075 }
1076
1077 if (itv->options.tuner == -1) {
1078 int i;
1079
1080 for (i = 0; i < IVTV_CARD_MAX_TUNERS; i++) {
1081 if ((itv->std & itv->card->tuners[i].std) == 0)
1082 continue;
1083 itv->options.tuner = itv->card->tuners[i].tuner;
1084 break;
1085 }
1086 }
1087 /* if no tuner was found, then pick the first tuner in the card list */
1088 if (itv->options.tuner == -1 && itv->card->tuners[0].std) {
1089 itv->std = itv->card->tuners[0].std;
1090 itv->options.tuner = itv->card->tuners[0].tuner;
1091 }
1092 if (itv->options.radio == -1)
1093 itv->options.radio = (itv->card->radio_input.audio_type != 0);
1094
1095 /* The card is now fully identified, continue with card-specific
1096 initialization. */
1097 ivtv_init_struct2(itv);
1098
1099 ivtv_load_and_init_modules(itv);
1100
1101 if (itv->std & V4L2_STD_525_60) {
1102 itv->is_60hz = 1;
1103 itv->is_out_60hz = 1;
1104 } else {
1105 itv->is_50hz = 1;
1106 itv->is_out_50hz = 1;
1107 }
1108 itv->params.video_gop_size = itv->is_60hz ? 15 : 12;
1109
1110 itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000;
1111 itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200;
1112 itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_MPG] = 0x10000;
1113
1114 /* 0x15180 == 720 * 480 / 4, 0x19500 == 720 * 576 / 4 */
1115 yuv_buf_size = itv->is_60hz ? 0x15180 : 0x19500;
1116 itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_YUV] = yuv_buf_size / 2;
1117 itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_YUV] = yuv_buf_size / 8;
1118
1119 /* Setup VBI Raw Size. Should be big enough to hold PAL.
1120 It is possible to switch between PAL and NTSC, so we need to
1121 take the largest size here. */
1122 /* 1456 is multiple of 16, real size = 1444 */
1123 itv->vbi.raw_size = 1456;
1124 /* We use a buffer size of 1/2 of the total size needed for a
1125 frame. This is actually very useful, since we now receive
1126 a field at a time and that makes 'compressing' the raw data
1127 down to size by stripping off the SAV codes a lot easier.
1128 Note: having two different buffer sizes prevents standard
1129 switching on the fly. We need to find a better solution... */
1130 vbi_buf_size = itv->vbi.raw_size * (itv->is_60hz ? 24 : 36) / 2;
1131 itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_VBI] = vbi_buf_size;
1132 itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_VBI] = sizeof(struct v4l2_sliced_vbi_data) * 36;
1133
1134 if (itv->options.radio > 0)
1135 itv->v4l2_cap |= V4L2_CAP_RADIO;
1136
1137 if (itv->options.tuner > -1) {
1138 struct tuner_setup setup;
1139
1140 setup.addr = ADDR_UNSET;
1141 setup.type = itv->options.tuner;
1142 setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
1143#ifdef HAVE_XC3028
1144 setup.initmode = V4L2_TUNER_ANALOG_TV;
1145 if (itv->options.tuner == TUNER_XCEIVE_XC3028) {
1146 setup.gpio_write = ivtv_reset_tuner_gpio;
1147 setup.gpio_priv = itv;
1148 }
1149#endif
1150 ivtv_call_i2c_clients(itv, TUNER_SET_TYPE_ADDR, &setup);
1151 }
1152
1153 vf.tuner = 0;
1154 vf.type = V4L2_TUNER_ANALOG_TV;
1155 vf.frequency = 6400; /* the tuner 'baseline' frequency */
1156 if (itv->std & V4L2_STD_NTSC_M) {
1157 /* Why on earth? */
1158 vf.frequency = 1076; /* ch. 4 67250*16/1000 */
1159 }
1160
1161 /* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
1162 are not. */
1163 itv->tuner_std = itv->std;
1164
1165 video_input = itv->active_input;
1166 itv->active_input++; /* Force update of input */
1167 ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_INPUT, &video_input);
1168
1169 /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
1170 in one place. */
1171 itv->std++; /* Force full standard initialization */
1172 itv->std_out = itv->std;
1173 ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_FREQUENCY, &vf);
1174
1175 retval = ivtv_streams_setup(itv);
1176 if (retval) {
1177 IVTV_ERR("Error %d setting up streams\n", retval);
1178 goto free_i2c;
1179 }
1180
1181 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
1182 ivtv_init_mpeg_decoder(itv);
1183 }
1184 ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_STD, &itv->tuner_std);
1185
1186 IVTV_DEBUG_IRQ("Masking interrupts\n");
1187 /* clear interrupt mask, effectively disabling interrupts */
1188 ivtv_set_irq_mask(itv, 0xffffffff);
1189
1190 /* Register IRQ */
1191 retval = request_irq(itv->dev->irq, ivtv_irq_handler,
1192 IRQF_SHARED | IRQF_DISABLED, itv->name, (void *)itv);
1193 if (retval) {
1194 IVTV_ERR("Failed to register irq %d\n", retval);
1195 goto free_streams;
1196 }
1197
1198 /* On a cx23416 this seems to be able to enable DMA to the chip? */
1199 if (!itv->has_cx23415)
1200 write_reg_sync(0x03, IVTV_REG_DMACONTROL);
1201
1202 /* Default interrupts enabled. For the PVR350 this includes the
1203 decoder VSYNC interrupt, which is always on. It is not only used
1204 during decoding but also by the OSD.
1205 Some old PVR250 cards had a cx23415, so testing for that is too
1206 general. Instead test if the card has video output capability. */
1207 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1208 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
1209 else
1210 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
1211
1212 if (itv->has_cx23415)
1213 ivtv_set_osd_alpha(itv);
1214
1215 IVTV_INFO("Initialized %s, card #%d\n", itv->card_name, itv->num);
1216
1217 return 0;
1218
1219 free_irq:
1220 free_irq(itv->dev->irq, (void *)itv);
1221 free_streams:
1222 ivtv_streams_cleanup(itv);
1223 free_i2c:
1224 exit_ivtv_i2c(itv);
1225 free_io:
1226 ivtv_iounmap(itv);
1227 free_mem:
1228 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
1229 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1230 if (itv->has_cx23415)
1231 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1232 free_workqueue:
1233 destroy_workqueue(itv->irq_work_queues);
1234 err:
1235 if (retval == 0)
1236 retval = -ENODEV;
1237 IVTV_ERR("Error %d on initialization\n", retval);
1238
1239 kfree(ivtv_cards[ivtv_cards_active]);
1240 ivtv_cards[ivtv_cards_active] = NULL;
1241 return retval;
1242}
1243
1244static void ivtv_remove(struct pci_dev *pci_dev)
1245{
1246 struct ivtv *itv = pci_get_drvdata(pci_dev);
1247
1248 IVTV_DEBUG_INFO("Removing Card #%d.\n", itv->num);
1249
1250 /* Stop all captures */
1251 IVTV_DEBUG_INFO(" Stopping all streams.\n");
1252 if (atomic_read(&itv->capturing) > 0)
1253 ivtv_stop_all_captures(itv);
1254
1255 /* Stop all decoding */
1256 IVTV_DEBUG_INFO(" Stopping decoding.\n");
1257 if (atomic_read(&itv->decoding) > 0) {
1258 int type;
1259
1260 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
1261 type = IVTV_DEC_STREAM_TYPE_YUV;
1262 else
1263 type = IVTV_DEC_STREAM_TYPE_MPG;
1264 ivtv_stop_v4l2_decode_stream(&itv->streams[type],
1265 VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
1266 }
1267
1268 /* Interrupts */
1269 IVTV_DEBUG_INFO(" Disabling interrupts.\n");
1270 ivtv_set_irq_mask(itv, 0xffffffff);
1271 del_timer_sync(&itv->dma_timer);
1272
1273 /* Stop all Work Queues */
1274 IVTV_DEBUG_INFO(" Stop Work Queues.\n");
1275 flush_workqueue(itv->irq_work_queues);
1276 destroy_workqueue(itv->irq_work_queues);
1277
1278 IVTV_DEBUG_INFO(" Stopping Firmware.\n");
1279 ivtv_halt_firmware(itv);
1280
1281 IVTV_DEBUG_INFO(" Unregistering v4l devices.\n");
1282 ivtv_streams_cleanup(itv);
1283 IVTV_DEBUG_INFO(" Freeing dma resources.\n");
1284 ivtv_udma_free(itv);
1285
1286 exit_ivtv_i2c(itv);
1287
1288 IVTV_DEBUG_INFO(" Releasing irq.\n");
1289 free_irq(itv->dev->irq, (void *)itv);
1290
1291 if (itv->dev) {
1292 ivtv_iounmap(itv);
1293 }
1294
1295 IVTV_DEBUG_INFO(" Releasing mem.\n");
1296 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
1297 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1298 if (itv->has_cx23415)
1299 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1300
1301 pci_disable_device(itv->dev);
1302
1303 IVTV_INFO("Removed %s, card #%d\n", itv->card_name, itv->num);
1304}
1305
1306/* define a pci_driver for card detection */
1307static struct pci_driver ivtv_pci_driver = {
1308 .name = "ivtv",
1309 .id_table = ivtv_pci_tbl,
1310 .probe = ivtv_probe,
1311 .remove = ivtv_remove,
1312};
1313
1314static int module_start(void)
1315{
1316 printk(KERN_INFO "ivtv: ==================== START INIT IVTV ====================\n");
1317 printk(KERN_INFO "ivtv: version %s (" VERMAGIC_STRING ") loading\n", IVTV_VERSION);
1318
1319 memset(ivtv_cards, 0, sizeof(ivtv_cards));
1320
1321 /* Validate parameters */
1322 if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) {
1323 printk(KERN_ERR "ivtv: ivtv_first_minor must be between 0 and %d. Exiting...\n",
1324 IVTV_MAX_CARDS - 1);
1325 return -1;
1326 }
1327
1328 if (ivtv_debug < 0 || ivtv_debug > 511) {
1329 ivtv_debug = 0;
1330 printk(KERN_INFO "ivtv: debug value must be >= 0 and <= 511!\n");
1331 }
1332
1333 if (pci_register_driver(&ivtv_pci_driver)) {
1334 printk(KERN_ERR "ivtv: Error detecting PCI card\n");
1335 return -ENODEV;
1336 }
1337 printk(KERN_INFO "ivtv: ==================== END INIT IVTV ====================\n");
1338 return 0;
1339}
1340
1341static void module_cleanup(void)
1342{
1343 int i, j;
1344
1345 pci_unregister_driver(&ivtv_pci_driver);
1346
1347 for (i = 0; i < ivtv_cards_active; i++) {
1348 if (ivtv_cards[i] == NULL)
1349 continue;
1350 for (j = 0; j < IVTV_VBI_FRAMES; j++) {
1351 kfree(ivtv_cards[i]->vbi.sliced_mpeg_data[j]);
1352 }
1353 kfree(ivtv_cards[i]);
1354 }
1355}
1356
1357/* Note: These symbols are exported because they are used by the ivtv-fb
1358 framebuffer module and an infrared module for the IR-blaster. */
1359EXPORT_SYMBOL(ivtv_set_irq_mask);
1360EXPORT_SYMBOL(ivtv_cards_active);
1361EXPORT_SYMBOL(ivtv_cards);
1362EXPORT_SYMBOL(ivtv_api);
1363EXPORT_SYMBOL(ivtv_vapi);
1364EXPORT_SYMBOL(ivtv_vapi_result);
1365EXPORT_SYMBOL(ivtv_clear_irq_mask);
1366EXPORT_SYMBOL(ivtv_debug);
1367EXPORT_SYMBOL(ivtv_reset_ir_gpio);
1368EXPORT_SYMBOL(ivtv_udma_setup);
1369EXPORT_SYMBOL(ivtv_udma_unmap);
1370EXPORT_SYMBOL(ivtv_udma_alloc);
1371EXPORT_SYMBOL(ivtv_udma_prepare);
1372
1373module_init(module_start);
1374module_exit(module_cleanup);
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
new file mode 100644
index 000000000000..9a412d6c6d06
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -0,0 +1,868 @@
1/*
2 ivtv driver internal defines and structures
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef IVTV_DRIVER_H
23#define IVTV_DRIVER_H
24
25/* Internal header for ivtv project:
26 * Driver for the cx23415/6 chip.
27 * Author: Kevin Thayer (nufan_wfk at yahoo.com)
28 * License: GPL
29 * http://www.ivtvdriver.org
30 *
31 * -----
32 * MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
33 * and Takeru KOMORIYA<komoriya@paken.org>
34 *
35 * AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org>
36 * using information provided by Jiun-Kuei Jung @ AVerMedia.
37 */
38
39#include <linux/version.h>
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/init.h>
43#include <linux/delay.h>
44#include <linux/sched.h>
45#include <linux/fs.h>
46#include <linux/pci.h>
47#include <linux/interrupt.h>
48#include <linux/spinlock.h>
49#include <linux/i2c.h>
50#include <linux/i2c-algo-bit.h>
51#include <linux/list.h>
52#include <linux/unistd.h>
53#include <linux/byteorder/swab.h>
54#include <linux/pagemap.h>
55#include <linux/workqueue.h>
56#include <linux/mutex.h>
57#include <asm/uaccess.h>
58#include <asm/system.h>
59
60#include <linux/dvb/video.h>
61#include <linux/dvb/audio.h>
62#include <media/v4l2-common.h>
63#include <media/tuner.h>
64#include <media/cx2341x.h>
65
66/* #define HAVE_XC3028 1 */
67
68#include <media/ivtv.h>
69
70#ifdef CONFIG_LIRC_I2C
71# error "This driver is not compatible with the LIRC I2C kernel configuration option."
72#endif /* CONFIG_LIRC_I2C */
73
74#ifndef CONFIG_PCI
75# error "This driver requires kernel PCI support."
76#endif /* CONFIG_PCI */
77
78#define IVTV_ENCODER_OFFSET 0x00000000
79#define IVTV_ENCODER_SIZE 0x00800000 /* Last half isn't needed 0x01000000 */
80
81#define IVTV_DECODER_OFFSET 0x01000000
82#define IVTV_DECODER_SIZE 0x00800000 /* Last half isn't needed 0x01000000 */
83
84#define IVTV_REG_OFFSET 0x02000000
85#define IVTV_REG_SIZE 0x00010000
86
87/* Buffers on hardware offsets */
88#define IVTV_YUV_BUFFER_OFFSET 0x001a8600 /* First YUV Buffer */
89#define IVTV_YUV_BUFFER_OFFSET_1 0x00240400 /* Second YUV Buffer */
90#define IVTV_YUV_BUFFER_OFFSET_2 0x002d8200 /* Third YUV Buffer */
91#define IVTV_YUV_BUFFER_OFFSET_3 0x00370000 /* Fourth YUV Buffer */
92#define IVTV_YUV_BUFFER_UV_OFFSET 0x65400 /* Offset to UV Buffer */
93
94/* Offset to filter table in firmware */
95#define IVTV_YUV_HORIZONTAL_FILTER_OFFSET 0x025d8
96#define IVTV_YUV_VERTICAL_FILTER_OFFSET 0x03358
97
98extern const u32 yuv_offset[4];
99
100/* Maximum ivtv driver instances.
101 Based on 6 PVR500s each with two PVR15s...
102 TODO: make this dynamic. I believe it is only a global in order to support
103 ivtv-fb. There must be a better way to do that. */
104#define IVTV_MAX_CARDS 12
105
106/* Supported cards */
107#define IVTV_CARD_PVR_250 0 /* WinTV PVR 250 */
108#define IVTV_CARD_PVR_350 1 /* encoder, decoder, tv-out */
109#define IVTV_CARD_PVR_150 2 /* WinTV PVR 150 and PVR 500 (really just two
110 PVR150s on one PCI board) */
111#define IVTV_CARD_M179 3 /* AVerMedia M179 (encoder only) */
112#define IVTV_CARD_MPG600 4 /* Kuroutoshikou ITVC16-STVLP/YUAN MPG600, encoder only */
113#define IVTV_CARD_MPG160 5 /* Kuroutoshikou ITVC15-STVLP/YUAN MPG160
114 cx23415 based, but does not have tv-out */
115#define IVTV_CARD_PG600 6 /* YUAN PG600/DIAMONDMM PVR-550 based on the CX Falcon 2 */
116#define IVTV_CARD_AVC2410 7 /* Adaptec AVC-2410 */
117#define IVTV_CARD_AVC2010 8 /* Adaptec AVD-2010 (No Tuner) */
118#define IVTV_CARD_TG5000TV 9 /* NAGASE TRANSGEAR 5000TV, encoder only */
119#define IVTV_CARD_VA2000MAX_SNT6 10 /* VA2000MAX-STN6 */
120#define IVTV_CARD_CX23416GYC 11 /* Kuroutoshikou CX23416GYC-STVLP (Yuan MPG600GR OEM) */
121#define IVTV_CARD_GV_MVPRX 12 /* I/O Data GV-MVP/RX, RX2, RX2W */
122#define IVTV_CARD_GV_MVPRX2E 13 /* I/O Data GV-MVP/RX2E */
123#define IVTV_CARD_GOTVIEW_PCI_DVD 14 /* GotView PCI DVD */
124#define IVTV_CARD_GOTVIEW_PCI_DVD2 15 /* GotView PCI DVD2 */
125#define IVTV_CARD_YUAN_MPC622 16 /* Yuan MPC622 miniPCI */
126#define IVTV_CARD_DCTMTVP1 17 /* DIGITAL COWBOY DCT-MTVP1 */
127#ifdef HAVE_XC3028
128#define IVTV_CARD_PG600V2 18 /* Yuan PG600V2/GotView PCI DVD Lite/Club3D ZAP-TV1x01 */
129#define IVTV_CARD_LAST 18
130#else
131#define IVTV_CARD_LAST 17
132#endif
133
134/* Variants of existing cards but with the same PCI IDs. The driver
135 detects these based on other device information.
136 These cards must always come last.
137 New cards must be inserted above, and the indices of the cards below
138 must be adjusted accordingly. */
139
140/* PVR-350 V1 (uses saa7114) */
141#define IVTV_CARD_PVR_350_V1 (IVTV_CARD_LAST+1)
142/* 2 variants of Kuroutoshikou CX23416GYC-STVLP (Yuan MPG600GR OEM) */
143#define IVTV_CARD_CX23416GYC_NOGR (IVTV_CARD_LAST+2)
144#define IVTV_CARD_CX23416GYC_NOGRYCS (IVTV_CARD_LAST+3)
145
146#define IVTV_ENC_STREAM_TYPE_MPG 0
147#define IVTV_ENC_STREAM_TYPE_YUV 1
148#define IVTV_ENC_STREAM_TYPE_VBI 2
149#define IVTV_ENC_STREAM_TYPE_PCM 3
150#define IVTV_ENC_STREAM_TYPE_RAD 4
151#define IVTV_DEC_STREAM_TYPE_MPG 5
152#define IVTV_DEC_STREAM_TYPE_VBI 6
153#define IVTV_DEC_STREAM_TYPE_VOUT 7
154#define IVTV_DEC_STREAM_TYPE_YUV 8
155#define IVTV_MAX_STREAMS 9
156
157#define IVTV_V4L2_DEC_MPG_OFFSET 16 /* offset from 0 to register decoder mpg v4l2 minors on */
158#define IVTV_V4L2_ENC_PCM_OFFSET 24 /* offset from 0 to register pcm v4l2 minors on */
159#define IVTV_V4L2_ENC_YUV_OFFSET 32 /* offset from 0 to register yuv v4l2 minors on */
160#define IVTV_V4L2_DEC_YUV_OFFSET 48 /* offset from 0 to register decoder yuv v4l2 minors on */
161#define IVTV_V4L2_DEC_VBI_OFFSET 8 /* offset from 0 to register decoder vbi input v4l2 minors on */
162#define IVTV_V4L2_DEC_VOUT_OFFSET 16 /* offset from 0 to register vbi output v4l2 minors on */
163
164#define IVTV_ENC_MEM_START 0x00000000
165#define IVTV_DEC_MEM_START 0x01000000
166
167/* system vendor and device IDs */
168#define PCI_VENDOR_ID_ICOMP 0x4444
169#define PCI_DEVICE_ID_IVTV15 0x0803
170#define PCI_DEVICE_ID_IVTV16 0x0016
171
172/* subsystem vendor ID */
173#define IVTV_PCI_ID_HAUPPAUGE 0x0070
174#define IVTV_PCI_ID_HAUPPAUGE_ALT1 0x0270
175#define IVTV_PCI_ID_HAUPPAUGE_ALT2 0x4070
176#define IVTV_PCI_ID_ADAPTEC 0x9005
177#define IVTV_PCI_ID_AVERMEDIA 0x1461
178#define IVTV_PCI_ID_YUAN1 0x12ab
179#define IVTV_PCI_ID_YUAN2 0xff01
180#define IVTV_PCI_ID_YUAN3 0xffab
181#define IVTV_PCI_ID_YUAN4 0xfbab
182#define IVTV_PCI_ID_DIAMONDMM 0xff92
183#define IVTV_PCI_ID_IODATA 0x10fc
184#define IVTV_PCI_ID_MELCO 0x1154
185#define IVTV_PCI_ID_GOTVIEW1 0xffac
186#define IVTV_PCI_ID_GOTVIEW2 0xffad
187
188/* Decoder Buffer hardware size on Chip */
189#define IVTV_DEC_MAX_BUF 0x00100000 /* max bytes in decoder buffer */
190#define IVTV_DEC_MIN_BUF 0x00010000 /* min bytes in dec buffer */
191
192/* ======================================================================== */
193/* ========================== START USER SETTABLE DMA VARIABLES =========== */
194/* ======================================================================== */
195
196#define IVTV_DMA_SG_OSD_ENT (2883584/PAGE_SIZE) /* sg entities */
197
198/* DMA Buffers, Default size in MB allocated */
199#define IVTV_DEFAULT_ENC_MPG_BUFFERS 4
200#define IVTV_DEFAULT_ENC_YUV_BUFFERS 2
201#define IVTV_DEFAULT_ENC_VBI_BUFFERS 1
202#define IVTV_DEFAULT_ENC_PCM_BUFFERS 1
203#define IVTV_DEFAULT_DEC_MPG_BUFFERS 1
204#define IVTV_DEFAULT_DEC_YUV_BUFFERS 1
205#define IVTV_DEFAULT_DEC_VBI_BUFFERS 1
206
207/* ======================================================================== */
208/* ========================== END USER SETTABLE DMA VARIABLES ============= */
209/* ======================================================================== */
210
211/* Decoder Status Register */
212#define IVTV_DMA_ERR_LIST 0x00000010
213#define IVTV_DMA_ERR_WRITE 0x00000008
214#define IVTV_DMA_ERR_READ 0x00000004
215#define IVTV_DMA_SUCCESS_WRITE 0x00000002
216#define IVTV_DMA_SUCCESS_READ 0x00000001
217#define IVTV_DMA_READ_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_READ)
218#define IVTV_DMA_WRITE_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE)
219#define IVTV_DMA_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE | IVTV_DMA_ERR_READ)
220
221/* DMA Registers */
222#define IVTV_REG_DMAXFER (0x0000)
223#define IVTV_REG_DMASTATUS (0x0004)
224#define IVTV_REG_DECDMAADDR (0x0008)
225#define IVTV_REG_ENCDMAADDR (0x000c)
226#define IVTV_REG_DMACONTROL (0x0010)
227#define IVTV_REG_IRQSTATUS (0x0040)
228#define IVTV_REG_IRQMASK (0x0048)
229
230/* Setup Registers */
231#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8)
232#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC)
233#define IVTV_REG_DEC_SDRAM_REFRESH (0x08F8)
234#define IVTV_REG_DEC_SDRAM_PRECHARGE (0x08FC)
235#define IVTV_REG_VDM (0x2800)
236#define IVTV_REG_AO (0x2D00)
237#define IVTV_REG_BYTEFLUSH (0x2D24)
238#define IVTV_REG_SPU (0x9050)
239#define IVTV_REG_HW_BLOCKS (0x9054)
240#define IVTV_REG_VPU (0x9058)
241#define IVTV_REG_APU (0xA064)
242
243#define IVTV_IRQ_ENC_START_CAP (0x1 << 31)
244#define IVTV_IRQ_ENC_EOS (0x1 << 30)
245#define IVTV_IRQ_ENC_VBI_CAP (0x1 << 29)
246#define IVTV_IRQ_ENC_VIM_RST (0x1 << 28)
247#define IVTV_IRQ_ENC_DMA_COMPLETE (0x1 << 27)
248#define IVTV_IRQ_DEC_AUD_MODE_CHG (0x1 << 24)
249#define IVTV_IRQ_DEC_DATA_REQ (0x1 << 22)
250#define IVTV_IRQ_DEC_DMA_COMPLETE (0x1 << 20)
251#define IVTV_IRQ_DEC_VBI_RE_INSERT (0x1 << 19)
252#define IVTV_IRQ_DMA_ERR (0x1 << 18)
253#define IVTV_IRQ_DMA_WRITE (0x1 << 17)
254#define IVTV_IRQ_DMA_READ (0x1 << 16)
255#define IVTV_IRQ_DEC_VSYNC (0x1 << 10)
256
257/* IRQ Masks */
258#define IVTV_IRQ_MASK_INIT (IVTV_IRQ_DMA_ERR|IVTV_IRQ_ENC_DMA_COMPLETE|IVTV_IRQ_DMA_READ)
259
260#define IVTV_IRQ_MASK_CAPTURE (IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_EOS)
261#define IVTV_IRQ_MASK_DECODE (IVTV_IRQ_DEC_DATA_REQ|IVTV_IRQ_DEC_AUD_MODE_CHG)
262
263/* i2c stuff */
264#define I2C_CLIENTS_MAX 16
265
266/* debugging */
267
268#define IVTV_DBGFLG_WARN (1 << 0)
269#define IVTV_DBGFLG_INFO (1 << 1)
270#define IVTV_DBGFLG_API (1 << 2)
271#define IVTV_DBGFLG_DMA (1 << 3)
272#define IVTV_DBGFLG_IOCTL (1 << 4)
273#define IVTV_DBGFLG_I2C (1 << 5)
274#define IVTV_DBGFLG_IRQ (1 << 6)
275#define IVTV_DBGFLG_DEC (1 << 7)
276#define IVTV_DBGFLG_YUV (1 << 8)
277
278/* NOTE: extra space before comma in 'itv->num , ## args' is required for
279 gcc-2.95, otherwise it won't compile. */
280#define IVTV_DEBUG(x, type, fmt, args...) \
281 do { \
282 if ((x) & ivtv_debug) \
283 printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \
284 } while (0)
285#define IVTV_DEBUG_WARN(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_WARN, "warning", fmt , ## args)
286#define IVTV_DEBUG_INFO(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_INFO, "info",fmt , ## args)
287#define IVTV_DEBUG_API(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_API, "api", fmt , ## args)
288#define IVTV_DEBUG_DMA(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_DMA, "dma", fmt , ## args)
289#define IVTV_DEBUG_IOCTL(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args)
290#define IVTV_DEBUG_I2C(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_I2C, "i2c", fmt , ## args)
291#define IVTV_DEBUG_IRQ(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_IRQ, "irq", fmt , ## args)
292#define IVTV_DEBUG_DEC(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
293#define IVTV_DEBUG_YUV(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
294
295#define IVTV_FB_DEBUG(x, type, fmt, args...) \
296 do { \
297 if ((x) & ivtv_debug) \
298 printk(KERN_INFO "ivtv%d-fb " type ": " fmt, itv->num , ## args); \
299 } while (0)
300#define IVTV_FB_DEBUG_WARN(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_WARN, "warning", fmt , ## args)
301#define IVTV_FB_DEBUG_INFO(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_INFO, "info", fmt , ## args)
302#define IVTV_FB_DEBUG_API(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_API, "api", fmt , ## args)
303#define IVTV_FB_DEBUG_DMA(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_DMA, "dma", fmt , ## args)
304#define IVTV_FB_DEBUG_IOCTL(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args)
305#define IVTV_FB_DEBUG_I2C(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_I2C, "i2c", fmt , ## args)
306#define IVTV_FB_DEBUG_IRQ(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_IRQ, "irq", fmt , ## args)
307#define IVTV_FB_DEBUG_DEC(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
308#define IVTV_FB_DEBUG_YUV(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
309
310/* Standard kernel messages */
311#define IVTV_ERR(fmt, args...) printk(KERN_ERR "ivtv%d: " fmt, itv->num , ## args)
312#define IVTV_WARN(fmt, args...) printk(KERN_WARNING "ivtv%d: " fmt, itv->num , ## args)
313#define IVTV_INFO(fmt, args...) printk(KERN_INFO "ivtv%d: " fmt, itv->num , ## args)
314#define IVTV_FB_ERR(fmt, args...) printk(KERN_ERR "ivtv%d-fb: " fmt, itv->num , ## args)
315#define IVTV_FB_WARN(fmt, args...) printk(KERN_WARNING "ivtv%d-fb: " fmt, itv->num , ## args)
316#define IVTV_FB_INFO(fmt, args...) printk(KERN_INFO "ivtv%d-fb: " fmt, itv->num , ## args)
317
318/* Values for IVTV_API_DEC_PLAYBACK_SPEED mpeg_frame_type_mask parameter: */
319#define MPEG_FRAME_TYPE_IFRAME 1
320#define MPEG_FRAME_TYPE_IFRAME_PFRAME 3
321#define MPEG_FRAME_TYPE_ALL 7
322
323/* output modes (cx23415 only) */
324#define OUT_NONE 0
325#define OUT_MPG 1
326#define OUT_YUV 2
327#define OUT_UDMA_YUV 3
328#define OUT_PASSTHROUGH 4
329
330#define IVTV_MAX_PGM_INDEX (400)
331
332extern int ivtv_debug;
333
334
335struct ivtv_options {
336 int megabytes[IVTV_MAX_STREAMS]; /* Size in megabytes of each stream */
337 int cardtype; /* force card type on load */
338 int tuner; /* set tuner on load */
339 int radio; /* enable/disable radio */
340 int newi2c; /* New I2C algorithm */
341};
342
343#define IVTV_MBOX_DMA_START 6
344#define IVTV_MBOX_DMA_END 8
345#define IVTV_MBOX_DMA 9
346#define IVTV_MBOX_FIELD_DISPLAYED 8
347
348/* ivtv-specific mailbox template */
349struct ivtv_mailbox {
350 u32 flags;
351 u32 cmd;
352 u32 retval;
353 u32 timeout;
354 u32 data[CX2341X_MBOX_MAX_DATA];
355};
356
357struct ivtv_api_cache {
358 unsigned long last_jiffies; /* when last command was issued */
359 u32 data[CX2341X_MBOX_MAX_DATA]; /* last sent api data */
360};
361
362struct ivtv_mailbox_data {
363 volatile struct ivtv_mailbox __iomem *mbox;
364 /* Bits 0-2 are for the encoder mailboxes, 0-1 are for the decoder mailboxes.
365 If the bit is set, then the corresponding mailbox is in use by the driver. */
366 unsigned long busy;
367 u8 max_mbox;
368};
369
370/* per-buffer bit flags */
371#define IVTV_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */
372
373/* per-stream, s_flags */
374#define IVTV_F_S_DMA_PENDING 0 /* this stream has pending DMA */
375#define IVTV_F_S_DMA_HAS_VBI 1 /* the current DMA request also requests VBI data */
376#define IVTV_F_S_NEEDS_DATA 2 /* this decoding stream needs more data */
377
378#define IVTV_F_S_CLAIMED 3 /* this stream is claimed */
379#define IVTV_F_S_STREAMING 4 /* the fw is decoding/encoding this stream */
380#define IVTV_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */
381#define IVTV_F_S_PASSTHROUGH 6 /* this stream is in passthrough mode */
382#define IVTV_F_S_STREAMOFF 7 /* signal end of stream EOS */
383#define IVTV_F_S_APPL_IO 8 /* this stream is used read/written by an application */
384
385/* per-ivtv, i_flags */
386#define IVTV_F_I_DMA 0 /* DMA in progress */
387#define IVTV_F_I_UDMA 1 /* UDMA in progress */
388#define IVTV_F_I_UDMA_PENDING 2 /* UDMA pending */
389#define IVTV_F_I_SPEED_CHANGE 3 /* A speed change is in progress */
390#define IVTV_F_I_EOS 4 /* End of encoder stream reached */
391#define IVTV_F_I_RADIO_USER 5 /* The radio tuner is selected */
392#define IVTV_F_I_DIG_RST 6 /* Reset digitizer */
393#define IVTV_F_I_DEC_YUV 7 /* YUV instead of MPG is being decoded */
394#define IVTV_F_I_ENC_VBI 8 /* VBI DMA */
395#define IVTV_F_I_UPDATE_CC 9 /* CC should be updated */
396#define IVTV_F_I_UPDATE_WSS 10 /* WSS should be updated */
397#define IVTV_F_I_UPDATE_VPS 11 /* VPS should be updated */
398#define IVTV_F_I_DECODING_YUV 12 /* this stream is YUV frame decoding */
399#define IVTV_F_I_ENC_PAUSED 13 /* the encoder is paused */
400#define IVTV_F_I_VALID_DEC_TIMINGS 14 /* last_dec_timing is valid */
401#define IVTV_F_I_WORK_HANDLER_VBI 15 /* there is work to be done for VBI */
402#define IVTV_F_I_WORK_HANDLER_YUV 16 /* there is work to be done for YUV */
403
404/* Event notifications */
405#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */
406#define IVTV_F_I_EV_VSYNC 29 /* VSYNC event */
407#define IVTV_F_I_EV_VSYNC_FIELD 30 /* VSYNC event field (0 = first, 1 = second field) */
408#define IVTV_F_I_EV_VSYNC_ENABLED 31 /* VSYNC event enabled */
409
410/* Scatter-Gather array element, used in DMA transfers */
411struct ivtv_SG_element {
412 u32 src;
413 u32 dst;
414 u32 size;
415};
416
417struct ivtv_user_dma {
418 struct mutex lock;
419 int page_count;
420 struct page *map[IVTV_DMA_SG_OSD_ENT];
421
422 /* Base Dev SG Array for cx23415/6 */
423 struct ivtv_SG_element SGarray[IVTV_DMA_SG_OSD_ENT];
424 dma_addr_t SG_handle;
425 int SG_length;
426
427 /* SG List of Buffers */
428 struct scatterlist SGlist[IVTV_DMA_SG_OSD_ENT];
429};
430
431struct ivtv_dma_page_info {
432 unsigned long uaddr;
433 unsigned long first;
434 unsigned long last;
435 unsigned int offset;
436 unsigned int tail;
437 int page_count;
438};
439
440struct ivtv_buffer {
441 struct list_head list;
442 dma_addr_t dma_handle;
443 unsigned long b_flags;
444 char *buf;
445
446 u32 bytesused;
447 u32 readpos;
448};
449
450struct ivtv_queue {
451 struct list_head list;
452 u32 buffers;
453 u32 length;
454 u32 bytesused;
455};
456
457struct ivtv; /* forward reference */
458
459struct ivtv_stream {
460 /* These first four fields are always set, even if the stream
461 is not actually created. */
462 struct video_device *v4l2dev; /* NULL when stream not created */
463 struct ivtv *itv; /* for ease of use */
464 const char *name; /* name of the stream */
465 int type; /* stream type */
466
467 u32 id;
468 spinlock_t qlock; /* locks access to the queues */
469 unsigned long s_flags; /* status flags, see above */
470 int dma; /* can be PCI_DMA_TODEVICE,
471 PCI_DMA_FROMDEVICE or
472 PCI_DMA_NONE */
473 u32 dma_offset;
474 u32 dma_backup;
475 u64 dma_pts;
476
477 int subtype;
478 wait_queue_head_t waitq;
479 u32 dma_last_offset;
480
481 /* Buffer Stats */
482 u32 buffers;
483 u32 buf_size;
484 u32 buffers_stolen;
485
486 /* Buffer Queues */
487 struct ivtv_queue q_free; /* free buffers */
488 struct ivtv_queue q_full; /* full buffers */
489 struct ivtv_queue q_io; /* waiting for I/O */
490 struct ivtv_queue q_dma; /* waiting for DMA */
491 struct ivtv_queue q_predma; /* waiting for DMA */
492
493 /* Base Dev SG Array for cx23415/6 */
494 struct ivtv_SG_element *SGarray;
495 dma_addr_t SG_handle;
496 int SG_length;
497
498 /* SG List of Buffers */
499 struct scatterlist *SGlist;
500};
501
502struct ivtv_open_id {
503 u32 open_id;
504 int type;
505 enum v4l2_priority prio;
506 struct ivtv *itv;
507};
508
509#define IVTV_YUV_UPDATE_HORIZONTAL 0x01
510#define IVTV_YUV_UPDATE_VERTICAL 0x02
511
512struct yuv_frame_info
513{
514 u32 update;
515 int src_x;
516 int src_y;
517 unsigned int src_w;
518 unsigned int src_h;
519 int dst_x;
520 int dst_y;
521 unsigned int dst_w;
522 unsigned int dst_h;
523 int pan_x;
524 int pan_y;
525 u32 vis_w;
526 u32 vis_h;
527 u32 interlaced_y;
528 u32 interlaced_uv;
529 int tru_x;
530 u32 tru_w;
531 u32 tru_h;
532 u32 offset_y;
533};
534
535#define IVTV_YUV_MODE_INTERLACED 0x00
536#define IVTV_YUV_MODE_PROGRESSIVE 0x01
537#define IVTV_YUV_MODE_AUTO 0x02
538#define IVTV_YUV_MODE_MASK 0x03
539
540#define IVTV_YUV_SYNC_EVEN 0x00
541#define IVTV_YUV_SYNC_ODD 0x04
542#define IVTV_YUV_SYNC_MASK 0x04
543
544struct yuv_playback_info
545{
546 u32 reg_2834;
547 u32 reg_2838;
548 u32 reg_283c;
549 u32 reg_2840;
550 u32 reg_2844;
551 u32 reg_2848;
552 u32 reg_2854;
553 u32 reg_285c;
554 u32 reg_2864;
555
556 u32 reg_2870;
557 u32 reg_2874;
558 u32 reg_2890;
559 u32 reg_2898;
560 u32 reg_289c;
561
562 u32 reg_2918;
563 u32 reg_291c;
564 u32 reg_2920;
565 u32 reg_2924;
566 u32 reg_2928;
567 u32 reg_292c;
568 u32 reg_2930;
569
570 u32 reg_2934;
571
572 u32 reg_2938;
573 u32 reg_293c;
574 u32 reg_2940;
575 u32 reg_2944;
576 u32 reg_2948;
577 u32 reg_294c;
578 u32 reg_2950;
579 u32 reg_2954;
580 u32 reg_2958;
581 u32 reg_295c;
582 u32 reg_2960;
583 u32 reg_2964;
584 u32 reg_2968;
585 u32 reg_296c;
586
587 u32 reg_2970;
588
589 int v_filter_1;
590 int v_filter_2;
591 int h_filter;
592
593 u32 osd_x_offset;
594 u32 osd_y_offset;
595
596 u32 osd_x_pan;
597 u32 osd_y_pan;
598
599 u32 osd_vis_w;
600 u32 osd_vis_h;
601
602 int decode_height;
603
604 int frame_interlaced;
605 int frame_interlaced_last;
606
607 int lace_mode;
608 int lace_threshold;
609 int lace_sync_field;
610
611 atomic_t next_dma_frame;
612 atomic_t next_fill_frame;
613
614 u32 yuv_forced_update;
615 int update_frame;
616 struct yuv_frame_info new_frame_info[4];
617 struct yuv_frame_info old_frame_info;
618 struct yuv_frame_info old_frame_info_args;
619
620 void *blanking_ptr;
621 dma_addr_t blanking_dmaptr;
622};
623
624#define IVTV_VBI_FRAMES 32
625
626/* VBI data */
627struct vbi_info {
628 u32 dec_start;
629 u32 enc_start, enc_size;
630 int fpi;
631 u32 frame;
632 u32 dma_offset;
633 u8 cc_data_odd[256];
634 u8 cc_data_even[256];
635 int cc_pos;
636 u8 cc_no_update;
637 u8 vps[5];
638 u8 vps_found;
639 int wss;
640 u8 wss_found;
641 u8 wss_no_update;
642 u32 raw_decoder_line_size;
643 u8 raw_decoder_sav_odd_field;
644 u8 raw_decoder_sav_even_field;
645 u32 sliced_decoder_line_size;
646 u8 sliced_decoder_sav_odd_field;
647 u8 sliced_decoder_sav_even_field;
648 struct v4l2_format in;
649 /* convenience pointer to sliced struct in vbi_in union */
650 struct v4l2_sliced_vbi_format *sliced_in;
651 u32 service_set_in;
652 u32 service_set_out;
653 int insert_mpeg;
654
655 /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
656 One for /dev/vbi0 and one for /dev/vbi8 */
657 struct v4l2_sliced_vbi_data sliced_data[36];
658 struct v4l2_sliced_vbi_data sliced_dec_data[36];
659
660 /* Buffer for VBI data inserted into MPEG stream.
661 The first byte is a dummy byte that's never used.
662 The next 16 bytes contain the MPEG header for the VBI data,
663 the remainder is the actual VBI data.
664 The max size accepted by the MPEG VBI reinsertion turns out
665 to be 1552 bytes, which happens to be 4 + (1 + 42) * (2 * 18) bytes,
666 where 4 is a four byte header, 42 is the max sliced VBI payload, 1 is
667 a single line header byte and 2 * 18 is the number of VBI lines per frame.
668
669 However, it seems that the data must be 1K aligned, so we have to
670 pad the data until the 1 or 2 K boundary.
671
672 This pointer array will allocate 2049 bytes to store each VBI frame. */
673 u8 *sliced_mpeg_data[IVTV_VBI_FRAMES];
674 u32 sliced_mpeg_size[IVTV_VBI_FRAMES];
675 struct ivtv_buffer sliced_mpeg_buf;
676 u32 inserted_frame;
677
678 u32 start[2], count;
679 u32 raw_size;
680 u32 sliced_size;
681};
682
683/* forward declaration of struct defined in ivtv-cards.h */
684struct ivtv_card;
685
686/* Struct to hold info about ivtv cards */
687struct ivtv {
688 int num; /* board number, -1 during init! */
689 char name[8]; /* board name for printk and interrupts (e.g. 'ivtv0') */
690 struct pci_dev *dev; /* PCI device */
691 const struct ivtv_card *card; /* card information */
692 const char *card_name; /* full name of the card */
693 u8 has_cx23415; /* 1 if it is a cx23415 based card, 0 for cx23416 */
694 u8 is_50hz;
695 u8 is_60hz;
696 u8 is_out_50hz;
697 u8 is_out_60hz;
698 u8 pvr150_workaround; /* 1 if the cx25840 needs to workaround a PVR150 bug */
699 u8 nof_inputs; /* number of video inputs */
700 u8 nof_audio_inputs; /* number of audio inputs */
701 u32 v4l2_cap; /* V4L2 capabilities of card */
702 u32 hw_flags; /* Hardware description of the board */
703
704 /* controlling Video decoder function */
705 int (*video_dec_func)(struct ivtv *, unsigned int, void *);
706
707 struct ivtv_options options; /* User options */
708 int stream_buf_size[IVTV_MAX_STREAMS]; /* Stream buffer size */
709 struct ivtv_stream streams[IVTV_MAX_STREAMS]; /* Stream data */
710 int speed;
711 u8 speed_mute_audio;
712 unsigned long i_flags; /* global ivtv flags */
713 atomic_t capturing; /* count number of active capture streams */
714 atomic_t decoding; /* count number of active decoding streams */
715 u32 irq_rr_idx; /* Round-robin stream index */
716 int cur_dma_stream; /* index of stream doing DMA */
717 u32 dma_data_req_offset;
718 u32 dma_data_req_size;
719 int output_mode; /* NONE, MPG, YUV, UDMA YUV, passthrough */
720 spinlock_t lock; /* lock access to this struct */
721 int search_pack_header;
722
723 spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
724
725 /* User based DMA for OSD */
726 struct ivtv_user_dma udma;
727
728 int open_id; /* incremented each time an open occurs, used as unique ID.
729 starts at 1, so 0 can be used as uninitialized value
730 in the stream->id. */
731
732 u32 base_addr;
733 u32 irqmask;
734
735 struct v4l2_prio_state prio;
736 struct workqueue_struct *irq_work_queues;
737 struct work_struct irq_work_queue;
738 struct timer_list dma_timer; /* Timer used to catch unfinished DMAs */
739
740 struct vbi_info vbi;
741
742 struct ivtv_mailbox_data enc_mbox;
743 struct ivtv_mailbox_data dec_mbox;
744 struct ivtv_api_cache api_cache[256]; /* Cached API Commands */
745
746 u8 card_rev;
747 volatile void __iomem *enc_mem, *dec_mem, *reg_mem;
748
749 u32 pgm_info_offset;
750 u32 pgm_info_num;
751 u32 pgm_info_write_idx;
752 u32 pgm_info_read_idx;
753 struct v4l2_enc_idx_entry pgm_info[IVTV_MAX_PGM_INDEX];
754
755 u64 mpg_data_received;
756 u64 vbi_data_inserted;
757
758 wait_queue_head_t cap_w;
759 /* when the next decoder event arrives this queue is woken up */
760 wait_queue_head_t event_waitq;
761 /* when the next decoder vsync arrives this queue is woken up */
762 wait_queue_head_t vsync_waitq;
763 /* when the current DMA is finished this queue is woken up */
764 wait_queue_head_t dma_waitq;
765
766 /* OSD support */
767 unsigned long osd_video_pbase;
768 int osd_global_alpha_state; /* 0=off : 1=on */
769 int osd_local_alpha_state; /* 0=off : 1=on */
770 int osd_color_key_state; /* 0=off : 1=on */
771 u8 osd_global_alpha; /* Current global alpha */
772 u32 osd_color_key; /* Current color key */
773 u32 osd_pixelformat; /* Current pixel format */
774 struct v4l2_rect osd_rect; /* Current OSD position and size */
775 struct v4l2_rect main_rect; /* Current Main window position and size */
776
777 u32 last_dec_timing[3]; /* Store last retrieved pts/scr/frame values */
778
779 /* i2c */
780 struct i2c_adapter i2c_adap;
781 struct i2c_algo_bit_data i2c_algo;
782 struct i2c_client i2c_client;
783 struct mutex i2c_bus_lock;
784 int i2c_state;
785 struct i2c_client *i2c_clients[I2C_CLIENTS_MAX];
786
787 /* v4l2 and User settings */
788
789 /* codec settings */
790 struct cx2341x_mpeg_params params;
791 u32 audio_input;
792 u32 active_input;
793 u32 active_output;
794 v4l2_std_id std;
795 v4l2_std_id std_out;
796 v4l2_std_id tuner_std; /* The norm of the tuner (fixed) */
797 u8 audio_stereo_mode;
798 u8 audio_bilingual_mode;
799
800 /* dualwatch */
801 unsigned long dualwatch_jiffies;
802 u16 dualwatch_stereo_mode;
803
804 /* Digitizer type */
805 int digitizer; /* 0x00EF = saa7114 0x00FO = saa7115 0x0106 = mic */
806
807 u32 lastVsyncFrame;
808
809 struct yuv_playback_info yuv_info;
810 struct osd_info *osd_info;
811};
812
813/* Globals */
814extern struct ivtv *ivtv_cards[];
815extern int ivtv_cards_active;
816extern int ivtv_first_minor;
817extern spinlock_t ivtv_cards_lock;
818
819/*==============Prototypes==================*/
820
821/* Hardware/IRQ */
822void ivtv_set_irq_mask(struct ivtv *itv, u32 mask);
823void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask);
824
825/* try to set output mode, return current mode. */
826int ivtv_set_output_mode(struct ivtv *itv, int mode);
827
828/* return current output stream based on current mode */
829struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv);
830
831/* Return non-zero if a signal is pending */
832int ivtv_sleep_timeout(int timeout, int intr);
833
834/* Wait on queue, returns -EINTR if interrupted */
835int ivtv_waitq(wait_queue_head_t *waitq);
836
837/* Read Hauppauge eeprom */
838struct tveeprom; /* forward reference */
839void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv);
840
841/* This is a PCI post thing, where if the pci register is not read, then
842 the write doesn't always take effect right away. By reading back the
843 register any pending PCI writes will be performed (in order), and so
844 you can be sure that the writes are guaranteed to be done.
845
846 Rarely needed, only in some timing sensitive cases.
847 Apparently if this is not done some motherboards seem
848 to kill the firmware and get into the broken state until computer is
849 rebooted. */
850#define write_sync(val, reg) \
851 do { writel(val, reg); readl(reg); } while (0)
852
853#define read_reg(reg) readl(itv->reg_mem + (reg))
854#define write_reg(val, reg) writel(val, itv->reg_mem + (reg))
855#define write_reg_sync(val, reg) \
856 do { write_reg(val, reg); read_reg(reg); } while (0)
857
858#define read_enc(addr) readl(itv->enc_mem + (u32)(addr))
859#define write_enc(val, addr) writel(val, itv->enc_mem + (u32)(addr))
860#define write_enc_sync(val, addr) \
861 do { write_enc(val, addr); read_enc(addr); } while (0)
862
863#define read_dec(addr) readl(itv->dec_mem + (u32)(addr))
864#define write_dec(val, addr) writel(val, itv->dec_mem + (u32)(addr))
865#define write_dec_sync(val, addr) \
866 do { write_dec(val, addr); read_dec(addr); } while (0)
867
868#endif /* IVTV_DRIVER_H */
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
new file mode 100644
index 000000000000..1637097ddec7
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -0,0 +1,921 @@
1/*
2 file operation functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "ivtv-driver.h"
23#include "ivtv-fileops.h"
24#include "ivtv-i2c.h"
25#include "ivtv-queue.h"
26#include "ivtv-udma.h"
27#include "ivtv-irq.h"
28#include "ivtv-vbi.h"
29#include "ivtv-mailbox.h"
30#include "ivtv-audio.h"
31#include "ivtv-streams.h"
32#include "ivtv-yuv.h"
33#include "ivtv-controls.h"
34#include "ivtv-ioctl.h"
35
36/* This function tries to claim the stream for a specific file descriptor.
37 If no one else is using this stream then the stream is claimed and
38 associated VBI streams are also automatically claimed.
39 Possible error returns: -EBUSY if someone else has claimed
40 the stream or 0 on success. */
41int ivtv_claim_stream(struct ivtv_open_id *id, int type)
42{
43 struct ivtv *itv = id->itv;
44 struct ivtv_stream *s = &itv->streams[type];
45 struct ivtv_stream *s_vbi;
46 int vbi_type;
47
48 if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
49 /* someone already claimed this stream */
50 if (s->id == id->open_id) {
51 /* yes, this file descriptor did. So that's OK. */
52 return 0;
53 }
54 if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
55 type == IVTV_ENC_STREAM_TYPE_VBI)) {
56 /* VBI is handled already internally, now also assign
57 the file descriptor to this stream for external
58 reading of the stream. */
59 s->id = id->open_id;
60 IVTV_DEBUG_INFO("Start Read VBI\n");
61 return 0;
62 }
63 /* someone else is using this stream already */
64 IVTV_DEBUG_INFO("Stream %d is busy\n", type);
65 return -EBUSY;
66 }
67 s->id = id->open_id;
68 if (type == IVTV_DEC_STREAM_TYPE_VBI) {
69 /* Enable reinsertion interrupt */
70 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
71 }
72
73 /* IVTV_DEC_STREAM_TYPE_MPG needs to claim IVTV_DEC_STREAM_TYPE_VBI,
74 IVTV_ENC_STREAM_TYPE_MPG needs to claim IVTV_ENC_STREAM_TYPE_VBI
75 (provided VBI insertion is on and sliced VBI is selected), for all
76 other streams we're done */
77 if (type == IVTV_DEC_STREAM_TYPE_MPG) {
78 vbi_type = IVTV_DEC_STREAM_TYPE_VBI;
79 } else if (type == IVTV_ENC_STREAM_TYPE_MPG &&
80 itv->vbi.insert_mpeg && itv->vbi.sliced_in->service_set) {
81 vbi_type = IVTV_ENC_STREAM_TYPE_VBI;
82 } else {
83 return 0;
84 }
85 s_vbi = &itv->streams[vbi_type];
86
87 if (!test_and_set_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags)) {
88 /* Enable reinsertion interrupt */
89 if (vbi_type == IVTV_DEC_STREAM_TYPE_VBI)
90 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
91 }
92 /* mark that it is used internally */
93 set_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags);
94 return 0;
95}
96
97/* This function releases a previously claimed stream. It will take into
98 account associated VBI streams. */
99void ivtv_release_stream(struct ivtv_stream *s)
100{
101 struct ivtv *itv = s->itv;
102 struct ivtv_stream *s_vbi;
103
104 s->id = -1;
105 if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
106 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
107 /* this stream is still in use internally */
108 return;
109 }
110 if (!test_and_clear_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
111 IVTV_DEBUG_WARN("Release stream %s not in use!\n", s->name);
112 return;
113 }
114
115 ivtv_flush_queues(s);
116
117 /* disable reinsertion interrupt */
118 if (s->type == IVTV_DEC_STREAM_TYPE_VBI)
119 ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
120
121 /* IVTV_DEC_STREAM_TYPE_MPG needs to release IVTV_DEC_STREAM_TYPE_VBI,
122 IVTV_ENC_STREAM_TYPE_MPG needs to release IVTV_ENC_STREAM_TYPE_VBI,
123 for all other streams we're done */
124 if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
125 s_vbi = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
126 else if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
127 s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
128 else
129 return;
130
131 /* clear internal use flag */
132 if (!test_and_clear_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags)) {
133 /* was already cleared */
134 return;
135 }
136 if (s_vbi->id != -1) {
137 /* VBI stream still claimed by a file descriptor */
138 return;
139 }
140 /* disable reinsertion interrupt */
141 if (s_vbi->type == IVTV_DEC_STREAM_TYPE_VBI)
142 ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
143 clear_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags);
144 ivtv_flush_queues(s_vbi);
145}
146
147static void ivtv_dualwatch(struct ivtv *itv)
148{
149 struct v4l2_tuner vt;
150 u16 new_bitmap;
151 u16 new_stereo_mode;
152 const u16 stereo_mask = 0x0300;
153 const u16 dual = 0x0200;
154
155 new_stereo_mode = itv->params.audio_properties & stereo_mask;
156 memset(&vt, 0, sizeof(vt));
157 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, &vt);
158 if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
159 new_stereo_mode = dual;
160
161 if (new_stereo_mode == itv->dualwatch_stereo_mode)
162 return;
163
164 new_bitmap = new_stereo_mode | (itv->params.audio_properties & ~stereo_mask);
165
166 IVTV_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. new audio_bitmask=0x%ux\n",
167 itv->dualwatch_stereo_mode, new_stereo_mode, new_bitmap);
168
169 if (ivtv_vapi(itv, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new_bitmap) == 0) {
170 itv->dualwatch_stereo_mode = new_stereo_mode;
171 return;
172 }
173 IVTV_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
174}
175
176static void ivtv_update_pgm_info(struct ivtv *itv)
177{
178 u32 wr_idx = (read_enc(itv->pgm_info_offset) - itv->pgm_info_offset - 4) / 24;
179 int cnt;
180 int i = 0;
181
182 if (wr_idx >= itv->pgm_info_num) {
183 IVTV_DEBUG_WARN("Invalid PGM index %d (>= %d)\n", wr_idx, itv->pgm_info_num);
184 return;
185 }
186 cnt = (wr_idx + itv->pgm_info_num - itv->pgm_info_write_idx) % itv->pgm_info_num;
187 while (i < cnt) {
188 int idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num;
189 struct v4l2_enc_idx_entry *e = itv->pgm_info + idx;
190 u32 addr = itv->pgm_info_offset + 4 + idx * 24;
191 const int mapping[] = { V4L2_ENC_IDX_FRAME_P, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_B, 0 };
192
193 e->offset = read_enc(addr + 4) + ((u64)read_enc(addr + 8) << 32);
194 if (e->offset > itv->mpg_data_received) {
195 break;
196 }
197 e->offset += itv->vbi_data_inserted;
198 e->length = read_enc(addr);
199 e->pts = read_enc(addr + 16) + ((u64)(read_enc(addr + 20) & 1) << 32);
200 e->flags = mapping[read_enc(addr + 12) & 3];
201 i++;
202 }
203 itv->pgm_info_write_idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num;
204}
205
206static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
207{
208 struct ivtv *itv = s->itv;
209 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
210 struct ivtv_buffer *buf;
211 DEFINE_WAIT(wait);
212
213 *err = 0;
214 while (1) {
215 if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
216 /* Process pending program info updates and pending VBI data */
217 ivtv_update_pgm_info(itv);
218
219 if (jiffies - itv->dualwatch_jiffies > HZ) {
220 itv->dualwatch_jiffies = jiffies;
221 ivtv_dualwatch(itv);
222 }
223
224 if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
225 !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
226 while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
227 /* byteswap and process VBI data */
228 ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
229 ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
230 }
231 }
232 buf = &itv->vbi.sliced_mpeg_buf;
233 if (buf->readpos != buf->bytesused) {
234 return buf;
235 }
236 }
237
238 /* do we have leftover data? */
239 buf = ivtv_dequeue(s, &s->q_io);
240 if (buf)
241 return buf;
242
243 /* do we have new data? */
244 buf = ivtv_dequeue(s, &s->q_full);
245 if (buf) {
246 if (!test_and_clear_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags))
247 return buf;
248 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
249 /* byteswap MPG data */
250 ivtv_buf_swap(buf);
251 else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
252 /* byteswap and process VBI data */
253 ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
254 }
255 return buf;
256 }
257 /* return if file was opened with O_NONBLOCK */
258 if (non_block) {
259 *err = -EAGAIN;
260 return NULL;
261 }
262
263 /* return if end of stream */
264 if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
265 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
266 IVTV_DEBUG_INFO("EOS %s\n", s->name);
267 return NULL;
268 }
269
270 /* wait for more data to arrive */
271 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
272 /* New buffers might have become available before we were added to the waitqueue */
273 if (!s->q_full.buffers)
274 schedule();
275 finish_wait(&s->waitq, &wait);
276 if (signal_pending(current)) {
277 /* return if a signal was received */
278 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
279 *err = -EINTR;
280 return NULL;
281 }
282 }
283}
284
285static void ivtv_setup_sliced_vbi_buf(struct ivtv *itv)
286{
287 int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES;
288
289 itv->vbi.sliced_mpeg_buf.buf = itv->vbi.sliced_mpeg_data[idx];
290 itv->vbi.sliced_mpeg_buf.bytesused = itv->vbi.sliced_mpeg_size[idx];
291 itv->vbi.sliced_mpeg_buf.readpos = 0;
292}
293
294static size_t ivtv_copy_buf_to_user(struct ivtv_stream *s, struct ivtv_buffer *buf,
295 char __user *ubuf, size_t ucount)
296{
297 struct ivtv *itv = s->itv;
298 size_t len = buf->bytesused - buf->readpos;
299
300 if (len > ucount) len = ucount;
301 if (itv->vbi.insert_mpeg && s->type == IVTV_ENC_STREAM_TYPE_MPG &&
302 itv->vbi.sliced_in->service_set && buf != &itv->vbi.sliced_mpeg_buf) {
303 const char *start = buf->buf + buf->readpos;
304 const char *p = start + 1;
305 const u8 *q;
306 u8 ch = itv->search_pack_header ? 0xba : 0xe0;
307 int stuffing, i;
308
309 while (start + len > p && (q = memchr(p, 0, start + len - p))) {
310 p = q + 1;
311 if ((char *)q + 15 >= buf->buf + buf->bytesused ||
312 q[1] != 0 || q[2] != 1 || q[3] != ch) {
313 continue;
314 }
315 if (!itv->search_pack_header) {
316 if ((q[6] & 0xc0) != 0x80)
317 continue;
318 if (((q[7] & 0xc0) == 0x80 && (q[9] & 0xf0) == 0x20) ||
319 ((q[7] & 0xc0) == 0xc0 && (q[9] & 0xf0) == 0x30)) {
320 ch = 0xba;
321 itv->search_pack_header = 1;
322 p = q + 9;
323 }
324 continue;
325 }
326 stuffing = q[13] & 7;
327 /* all stuffing bytes must be 0xff */
328 for (i = 0; i < stuffing; i++)
329 if (q[14 + i] != 0xff)
330 break;
331 if (i == stuffing && (q[4] & 0xc4) == 0x44 && (q[12] & 3) == 3 &&
332 q[14 + stuffing] == 0 && q[15 + stuffing] == 0 &&
333 q[16 + stuffing] == 1) {
334 itv->search_pack_header = 0;
335 len = (char *)q - start;
336 ivtv_setup_sliced_vbi_buf(itv);
337 break;
338 }
339 }
340 }
341 if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) {
342 IVTV_DEBUG_WARN("copy %zd bytes to user failed for %s\n", len, s->name);
343 return -EFAULT;
344 }
345 /*IVTV_INFO("copied %lld %d %d %d %d %d vbi %d\n", itv->mpg_data_received, len, ucount,
346 buf->readpos, buf->bytesused, buf->bytesused - buf->readpos - len,
347 buf == &itv->vbi.sliced_mpeg_buf); */
348 buf->readpos += len;
349 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && buf != &itv->vbi.sliced_mpeg_buf)
350 itv->mpg_data_received += len;
351 return len;
352}
353
354static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_count, int non_block)
355{
356 struct ivtv *itv = s->itv;
357 size_t tot_written = 0;
358 int single_frame = 0;
359
360 if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
361 /* shouldn't happen */
362 IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
363 return -EIO;
364 }
365
366 /* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should
367 arrive one-by-one, so make sure we never output more than one VBI frame at a time */
368 if (s->type == IVTV_DEC_STREAM_TYPE_VBI ||
369 (s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set))
370 single_frame = 1;
371
372 for (;;) {
373 struct ivtv_buffer *buf;
374 int rc;
375
376 buf = ivtv_get_buffer(s, non_block, &rc);
377 if (buf == NULL && rc == -EAGAIN && tot_written)
378 break;
379 if (buf == NULL)
380 return rc;
381 rc = ivtv_copy_buf_to_user(s, buf, ubuf + tot_written, tot_count - tot_written);
382 if (buf != &itv->vbi.sliced_mpeg_buf) {
383 ivtv_enqueue(s, buf, (buf->readpos == buf->bytesused) ? &s->q_free : &s->q_io);
384 }
385 else if (buf->readpos == buf->bytesused) {
386 int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES;
387 itv->vbi.sliced_mpeg_size[idx] = 0;
388 itv->vbi.inserted_frame++;
389 itv->vbi_data_inserted += buf->bytesused;
390 }
391 if (rc < 0)
392 return rc;
393 tot_written += rc;
394
395 if (tot_written == tot_count || single_frame)
396 break;
397 }
398 return tot_written;
399}
400
401static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t count,
402 loff_t *pos, int non_block)
403{
404 ssize_t rc = count ? ivtv_read(s, ubuf, count, non_block) : 0;
405 struct ivtv *itv = s->itv;
406
407 IVTV_DEBUG_INFO("read %zd from %s, got %zd\n", count, s->name, rc);
408 if (rc > 0)
409 pos += rc;
410 return rc;
411}
412
413int ivtv_start_capture(struct ivtv_open_id *id)
414{
415 struct ivtv *itv = id->itv;
416 struct ivtv_stream *s = &itv->streams[id->type];
417 struct ivtv_stream *s_vbi;
418
419 if (s->type == IVTV_ENC_STREAM_TYPE_RAD ||
420 s->type == IVTV_DEC_STREAM_TYPE_MPG ||
421 s->type == IVTV_DEC_STREAM_TYPE_YUV ||
422 s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
423 /* you cannot read from these stream types. */
424 return -EPERM;
425 }
426
427 /* Try to claim this stream. */
428 if (ivtv_claim_stream(id, s->type))
429 return -EBUSY;
430
431 /* This stream does not need to start capturing */
432 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
433 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
434 return 0;
435 }
436
437 /* If capture is already in progress, then we also have to
438 do nothing extra. */
439 if (test_bit(IVTV_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
440 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
441 return 0;
442 }
443
444 /* Start VBI capture if required */
445 s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
446 if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
447 test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
448 !test_and_set_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
449 /* Note: the IVTV_ENC_STREAM_TYPE_VBI is claimed
450 automatically when the MPG stream is claimed.
451 We only need to start the VBI capturing. */
452 if (ivtv_start_v4l2_encode_stream(s_vbi)) {
453 IVTV_DEBUG_WARN("VBI capture start failed\n");
454
455 /* Failure, clean up and return an error */
456 clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
457 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
458 /* also releases the associated VBI stream */
459 ivtv_release_stream(s);
460 return -EIO;
461 }
462 IVTV_DEBUG_INFO("VBI insertion started\n");
463 }
464
465 /* Tell the card to start capturing */
466 if (!ivtv_start_v4l2_encode_stream(s)) {
467 /* We're done */
468 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
469 /* Resume a possibly paused encoder */
470 if (test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
471 ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1);
472 return 0;
473 }
474
475 /* failure, clean up */
476 IVTV_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name);
477
478 /* Note: the IVTV_ENC_STREAM_TYPE_VBI is released
479 automatically when the MPG stream is released.
480 We only need to stop the VBI capturing. */
481 if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
482 test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
483 ivtv_stop_v4l2_encode_stream(s_vbi, 0);
484 clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
485 }
486 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
487 ivtv_release_stream(s);
488 return -EIO;
489}
490
491ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
492{
493 struct ivtv_open_id *id = filp->private_data;
494 struct ivtv *itv = id->itv;
495 struct ivtv_stream *s = &itv->streams[id->type];
496 int rc;
497
498 IVTV_DEBUG_IOCTL("read %zd bytes from %s\n", count, s->name);
499
500 rc = ivtv_start_capture(id);
501 if (rc)
502 return rc;
503 return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
504}
505
506int ivtv_start_decoding(struct ivtv_open_id *id, int speed)
507{
508 struct ivtv *itv = id->itv;
509 struct ivtv_stream *s = &itv->streams[id->type];
510
511 if (atomic_read(&itv->decoding) == 0) {
512 if (ivtv_claim_stream(id, s->type)) {
513 /* someone else is using this stream already */
514 IVTV_DEBUG_WARN("start decode, stream already claimed\n");
515 return -EBUSY;
516 }
517 ivtv_start_v4l2_decode_stream(s, 0);
518 }
519 if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
520 return ivtv_set_speed(itv, speed);
521 return 0;
522}
523
524ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
525{
526 struct ivtv_open_id *id = filp->private_data;
527 struct ivtv *itv = id->itv;
528 struct ivtv_stream *s = &itv->streams[id->type];
529 struct ivtv_buffer *buf;
530 struct ivtv_queue q;
531 int bytes_written = 0;
532 int mode;
533 int rc;
534 DEFINE_WAIT(wait);
535
536 IVTV_DEBUG_IOCTL("write %zd bytes to %s\n", count, s->name);
537
538 if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
539 s->type != IVTV_DEC_STREAM_TYPE_YUV &&
540 s->type != IVTV_DEC_STREAM_TYPE_VOUT)
541 /* not decoder streams */
542 return -EPERM;
543
544 /* Try to claim this stream */
545 if (ivtv_claim_stream(id, s->type))
546 return -EBUSY;
547
548 /* This stream does not need to start any decoding */
549 if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
550 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
551 return ivtv_write_vbi(itv, user_buf, count);
552 }
553
554 mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;
555
556 if (ivtv_set_output_mode(itv, mode) != mode) {
557 ivtv_release_stream(s);
558 return -EBUSY;
559 }
560 ivtv_queue_init(&q);
561 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
562
563retry:
564 for (;;) {
565 /* Gather buffers */
566 while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
567 ivtv_enqueue(s, buf, &q);
568 while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
569 ivtv_enqueue(s, buf, &q);
570 }
571 if (q.buffers)
572 break;
573 if (filp->f_flags & O_NONBLOCK)
574 return -EAGAIN;
575 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
576 /* New buffers might have become free before we were added to the waitqueue */
577 if (!s->q_free.buffers)
578 schedule();
579 finish_wait(&s->waitq, &wait);
580 if (signal_pending(current)) {
581 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
582 return -EINTR;
583 }
584 }
585
586 /* copy user data into buffers */
587 while ((buf = ivtv_dequeue(s, &q))) {
588 /* Make sure we really got all the user data */
589 rc = ivtv_buf_copy_from_user(s, buf, user_buf, count);
590
591 if (rc < 0) {
592 ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
593 return rc;
594 }
595 user_buf += rc;
596 count -= rc;
597 bytes_written += rc;
598
599 if (buf->bytesused != s->buf_size) {
600 /* incomplete, leave in q_io for next time */
601 ivtv_enqueue(s, buf, &s->q_io);
602 break;
603 }
604 /* Byteswap MPEG buffer */
605 if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
606 ivtv_buf_swap(buf);
607 ivtv_enqueue(s, buf, &s->q_full);
608 }
609
610 /* Start decoder (returns 0 if already started) */
611 rc = ivtv_start_decoding(id, itv->speed);
612 if (rc) {
613 IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
614
615 /* failure, clean up */
616 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
617 clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
618 return rc;
619 }
620 if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) {
621 if (s->q_full.length >= itv->dma_data_req_size) {
622 int got_sig;
623
624 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
625 while (!(got_sig = signal_pending(current)) &&
626 test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
627 schedule();
628 }
629 finish_wait(&itv->dma_waitq, &wait);
630 if (got_sig) {
631 IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
632 return -EINTR;
633 }
634
635 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
636 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
637 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1);
638 }
639 }
640 /* more user data is available, wait until buffers become free
641 to transfer the rest. */
642 if (count && !(filp->f_flags & O_NONBLOCK))
643 goto retry;
644 IVTV_DEBUG_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
645 return bytes_written;
646}
647
648unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
649{
650 struct ivtv_open_id *id = filp->private_data;
651 struct ivtv *itv = id->itv;
652 struct ivtv_stream *s = &itv->streams[id->type];
653 int res = 0;
654
655 /* add stream's waitq to the poll list */
656 poll_wait(filp, &s->waitq, wait);
657
658 set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
659 if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
660 test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
661 res = POLLPRI;
662
663 /* Allow write if buffers are available for writing */
664 if (s->q_free.buffers)
665 res |= POLLOUT | POLLWRNORM;
666 return res;
667}
668
669unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
670{
671 struct ivtv_open_id *id = filp->private_data;
672 struct ivtv *itv = id->itv;
673 struct ivtv_stream *s = &itv->streams[id->type];
674 int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
675
676 /* Start a capture if there is none */
677 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
678 int rc = ivtv_start_capture(id);
679
680 if (rc) {
681 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
682 s->name, rc);
683 return POLLERR;
684 }
685 }
686
687 /* add stream's waitq to the poll list */
688 poll_wait(filp, &s->waitq, wait);
689
690 if (eof || s->q_full.length)
691 return POLLIN | POLLRDNORM;
692 return 0;
693}
694
695void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
696{
697 struct ivtv *itv = id->itv;
698 struct ivtv_stream *s = &itv->streams[id->type];
699
700 IVTV_DEBUG_IOCTL("close() of %s\n", s->name);
701
702 /* 'Unclaim' this stream */
703
704 /* Stop capturing */
705 if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
706 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
707
708 IVTV_DEBUG_INFO("close stopping capture\n");
709 /* Special case: a running VBI capture for VBI insertion
710 in the mpeg stream. Need to stop that too. */
711 if (id->type == IVTV_ENC_STREAM_TYPE_MPG &&
712 test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags) &&
713 !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
714 IVTV_DEBUG_INFO("close stopping embedded VBI capture\n");
715 ivtv_stop_v4l2_encode_stream(s_vbi, 0);
716 }
717 if ((id->type == IVTV_DEC_STREAM_TYPE_VBI ||
718 id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
719 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
720 /* Also used internally, don't stop capturing */
721 s->id = -1;
722 }
723 else {
724 ivtv_stop_v4l2_encode_stream(s, gop_end);
725 }
726 }
727 clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
728 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
729
730 ivtv_release_stream(s);
731}
732
733static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
734{
735 struct ivtv *itv = id->itv;
736 struct ivtv_stream *s = &itv->streams[id->type];
737
738 IVTV_DEBUG_IOCTL("close() of %s\n", s->name);
739
740 /* Stop decoding */
741 if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
742 IVTV_DEBUG_INFO("close stopping decode\n");
743
744 ivtv_stop_v4l2_decode_stream(s, flags, pts);
745 }
746 clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
747 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
748 if (id->type == IVTV_DEC_STREAM_TYPE_YUV && test_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags)) {
749 /* Restore registers we've changed & clean up any mess we've made */
750 ivtv_yuv_close(itv);
751 }
752 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
753 itv->output_mode = OUT_NONE;
754 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
755 itv->output_mode = OUT_NONE;
756
757 itv->speed = 0;
758 ivtv_release_stream(s);
759}
760
761int ivtv_v4l2_close(struct inode *inode, struct file *filp)
762{
763 struct ivtv_open_id *id = filp->private_data;
764 struct ivtv *itv = id->itv;
765 struct ivtv_stream *s = &itv->streams[id->type];
766
767 IVTV_DEBUG_IOCTL("close() of %s\n", s->name);
768
769 v4l2_prio_close(&itv->prio, &id->prio);
770
771 /* Easy case first: this stream was never claimed by us */
772 if (s->id != id->open_id) {
773 kfree(id);
774 return 0;
775 }
776
777 /* 'Unclaim' this stream */
778
779 /* Stop radio */
780 if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
781 /* Closing radio device, return to TV mode */
782 ivtv_mute(itv);
783 /* Mark that the radio is no longer in use */
784 clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
785 /* Switch tuner to TV */
786 ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std);
787 /* Select correct audio input (i.e. TV tuner or Line in) */
788 ivtv_audio_set_io(itv);
789 /* Done! Unmute and continue. */
790 ivtv_unmute(itv);
791 ivtv_release_stream(s);
792 } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
793 ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
794 } else {
795 ivtv_stop_capture(id, 0);
796 }
797 kfree(id);
798 return 0;
799}
800
801int ivtv_v4l2_open(struct inode *inode, struct file *filp)
802{
803 int x, y = 0;
804 struct ivtv_open_id *item;
805 struct ivtv *itv = NULL;
806 struct ivtv_stream *s = NULL;
807 int minor = MINOR(inode->i_rdev);
808
809 /* Find which card this open was on */
810 spin_lock(&ivtv_cards_lock);
811 for (x = 0; itv == NULL && x < ivtv_cards_active; x++) {
812 /* find out which stream this open was on */
813 for (y = 0; y < IVTV_MAX_STREAMS; y++) {
814 s = &ivtv_cards[x]->streams[y];
815 if (s->v4l2dev && s->v4l2dev->minor == minor) {
816 itv = ivtv_cards[x];
817 break;
818 }
819 }
820 }
821 spin_unlock(&ivtv_cards_lock);
822
823 if (itv == NULL) {
824 /* Couldn't find a device registered
825 on that minor, shouldn't happen! */
826 printk(KERN_WARNING "ivtv: no ivtv device found on minor %d\n", minor);
827 return -ENXIO;
828 }
829
830 if (y == IVTV_DEC_STREAM_TYPE_MPG &&
831 test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_YUV].s_flags))
832 return -EBUSY;
833
834 if (y == IVTV_DEC_STREAM_TYPE_YUV &&
835 test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_MPG].s_flags))
836 return -EBUSY;
837
838 if (y == IVTV_DEC_STREAM_TYPE_YUV) {
839 if (read_reg(0x82c) == 0) {
840 IVTV_ERR("Tried to open YUV output device but need to send data to mpeg decoder before it can be used\n");
841 /* return -ENODEV; */
842 }
843 ivtv_udma_alloc(itv);
844 }
845
846 /* Allocate memory */
847 item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
848 if (NULL == item) {
849 IVTV_DEBUG_WARN("nomem on v4l2 open\n");
850 return -ENOMEM;
851 }
852 item->itv = itv;
853 item->type = y;
854 v4l2_prio_open(&itv->prio, &item->prio);
855
856 item->open_id = itv->open_id++;
857 filp->private_data = item;
858
859 if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
860 /* Try to claim this stream */
861 if (ivtv_claim_stream(item, item->type)) {
862 /* No, it's already in use */
863 kfree(item);
864 return -EBUSY;
865 }
866
867 /* We have the radio */
868 ivtv_mute(itv);
869 /* Switch tuner to radio */
870 ivtv_call_i2c_clients(itv, AUDC_SET_RADIO, NULL);
871 /* Mark that the radio is being used. */
872 set_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
873 /* Select the correct audio input (i.e. radio tuner) */
874 ivtv_audio_set_io(itv);
875 /* Done! Unmute and continue. */
876 ivtv_unmute(itv);
877 }
878
879 /* YUV or MPG Decoding Mode? */
880 if (y == IVTV_DEC_STREAM_TYPE_MPG)
881 clear_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
882 else if (y == IVTV_DEC_STREAM_TYPE_YUV)
883 {
884 set_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
885 }
886
887 return 0;
888}
889
890void ivtv_mute(struct ivtv *itv)
891{
892 struct v4l2_control ctrl = { V4L2_CID_AUDIO_MUTE, 1 };
893
894 /* Mute sound to avoid pop */
895 ivtv_control_ioctls(itv, VIDIOC_S_CTRL, &ctrl);
896
897 if (atomic_read(&itv->capturing))
898 ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 1);
899
900 IVTV_DEBUG_INFO("Mute\n");
901}
902
903void ivtv_unmute(struct ivtv *itv)
904{
905 struct v4l2_control ctrl = { V4L2_CID_AUDIO_MUTE, 0 };
906
907 /* initialize or refresh input */
908 if (atomic_read(&itv->capturing) == 0)
909 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
910
911 ivtv_sleep_timeout(HZ / 10, 0);
912
913 if (atomic_read(&itv->capturing)) {
914 ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
915 ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 0);
916 }
917
918 /* Unmute */
919 ivtv_control_ioctls(itv, VIDIOC_S_CTRL, &ctrl);
920 IVTV_DEBUG_INFO("Unmute\n");
921}
diff --git a/drivers/media/video/ivtv/ivtv-fileops.h b/drivers/media/video/ivtv/ivtv-fileops.h
new file mode 100644
index 000000000000..74a1745fabbc
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-fileops.h
@@ -0,0 +1,44 @@
1/*
2 file operation functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* Testing/Debugging */
22int ivtv_v4l2_open(struct inode *inode, struct file *filp);
23ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count,
24 loff_t * pos);
25ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count,
26 loff_t * pos);
27int ivtv_v4l2_close(struct inode *inode, struct file *filp);
28unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
29unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
30int ivtv_start_capture(struct ivtv_open_id *id);
31void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end);
32int ivtv_start_decoding(struct ivtv_open_id *id, int speed);
33void ivtv_mute(struct ivtv *itv);
34void ivtv_unmute(struct ivtv *itv);
35
36/* Utilities */
37
38/* Try to claim a stream for the filehandle. Return 0 on success,
39 -EBUSY if stream already claimed. Once a stream is claimed, it
40 remains claimed until the associated filehandle is closed. */
41int ivtv_claim_stream(struct ivtv_open_id *id, int type);
42
43/* Release a previously claimed stream. */
44void ivtv_release_stream(struct ivtv_stream *s);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
new file mode 100644
index 000000000000..d4c910b782af
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -0,0 +1,272 @@
1/*
2 ivtv firmware functions.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "ivtv-driver.h"
23#include "ivtv-mailbox.h"
24#include "ivtv-firmware.h"
25#include <linux/firmware.h>
26
27#define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE
28#define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6
29#define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB
30#define IVTV_CMD_VDM_STOP 0x00000000
31#define IVTV_CMD_AO_STOP 0x00000005
32#define IVTV_CMD_APU_PING 0x00000000
33#define IVTV_CMD_VPU_STOP15 0xFFFFFFFE
34#define IVTV_CMD_VPU_STOP16 0xFFFFFFEE
35#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
36#define IVTV_CMD_SPU_STOP 0x00000001
37#define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A
38#define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640
39#define IVTV_SDRAM_SLEEPTIME (60 * HZ / 100) /* 600 ms */
40
41#define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg"
42#define IVTV_DECODE_INIT_MPEG_SIZE (152*1024)
43
44/* Encoder/decoder firmware sizes */
45#define IVTV_FW_ENC_SIZE (376836)
46#define IVTV_FW_DEC_SIZE (256*1024)
47
48static int load_fw_direct(const char *fn, volatile u8 __iomem *mem, struct ivtv *itv, long size)
49{
50 const struct firmware *fw = NULL;
51 int retries = 3;
52
53retry:
54 if (retries && request_firmware(&fw, fn, &itv->dev->dev) == 0) {
55 int i;
56 volatile u32 __iomem *dst = (volatile u32 __iomem *)mem;
57 const u32 *src = (const u32 *)fw->data;
58
59 /* temporarily allow 256 KB encoding firmwares as well for
60 compatibility with blackbird cards */
61 if (fw->size != size && fw->size != 256 * 1024) {
62 /* Due to race conditions in firmware loading (esp. with udev <0.95)
63 the wrong file was sometimes loaded. So we check filesizes to
64 see if at least the right-sized file was loaded. If not, then we
65 retry. */
66 IVTV_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size);
67 release_firmware(fw);
68 retries--;
69 goto retry;
70 }
71 for (i = 0; i < fw->size; i += 4) {
72 /* no need for endianness conversion on the ppc */
73 __raw_writel(*src, dst);
74 dst++;
75 src++;
76 }
77 release_firmware(fw);
78 IVTV_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
79 return size;
80 }
81 IVTV_ERR("unable to open firmware %s (must be %ld bytes)\n", fn, size);
82 IVTV_ERR("did you put the firmware in the hotplug firmware directory?\n");
83 return -ENOMEM;
84}
85
86void ivtv_halt_firmware(struct ivtv *itv)
87{
88 IVTV_DEBUG_INFO("Preparing for firmware halt.\n");
89 if (itv->has_cx23415 && itv->dec_mbox.mbox)
90 ivtv_vapi(itv, CX2341X_DEC_HALT_FW, 0);
91 if (itv->enc_mbox.mbox)
92 ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0);
93
94 ivtv_sleep_timeout(HZ / 100, 0);
95 itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL;
96
97 IVTV_DEBUG_INFO("Stopping VDM\n");
98 write_reg(IVTV_CMD_VDM_STOP, IVTV_REG_VDM);
99
100 IVTV_DEBUG_INFO("Stopping AO\n");
101 write_reg(IVTV_CMD_AO_STOP, IVTV_REG_AO);
102
103 IVTV_DEBUG_INFO("pinging (?) APU\n");
104 write_reg(IVTV_CMD_APU_PING, IVTV_REG_APU);
105
106 IVTV_DEBUG_INFO("Stopping VPU\n");
107 if (!itv->has_cx23415)
108 write_reg(IVTV_CMD_VPU_STOP16, IVTV_REG_VPU);
109 else
110 write_reg(IVTV_CMD_VPU_STOP15, IVTV_REG_VPU);
111
112 IVTV_DEBUG_INFO("Resetting Hw Blocks\n");
113 write_reg(IVTV_CMD_HW_BLOCKS_RST, IVTV_REG_HW_BLOCKS);
114
115 IVTV_DEBUG_INFO("Stopping SPU\n");
116 write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU);
117
118 ivtv_sleep_timeout(HZ / 100, 0);
119
120 IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n");
121 write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE);
122
123 IVTV_DEBUG_INFO("init Encoder SDRAM refresh to 1us\n");
124 write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_ENC_SDRAM_REFRESH);
125
126 if (itv->has_cx23415) {
127 IVTV_DEBUG_INFO("init Decoder SDRAM pre-charge\n");
128 write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_DEC_SDRAM_PRECHARGE);
129
130 IVTV_DEBUG_INFO("init Decoder SDRAM refresh to 1us\n");
131 write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH);
132 }
133
134 IVTV_DEBUG_INFO("Sleeping for %dms (600 recommended)\n",
135 (int)(IVTV_SDRAM_SLEEPTIME * 1000 / HZ));
136 ivtv_sleep_timeout(IVTV_SDRAM_SLEEPTIME, 0);
137}
138
139void ivtv_firmware_versions(struct ivtv *itv)
140{
141 u32 data[CX2341X_MBOX_MAX_DATA];
142
143 /* Encoder */
144 ivtv_vapi_result(itv, data, CX2341X_ENC_GET_VERSION, 0);
145 IVTV_INFO("Encoder revision: 0x%08x\n", data[0]);
146
147 if (data[0] != 0x02060039)
148 IVTV_WARN("Recommended firmware version is 0x02060039.\n");
149
150 if (itv->has_cx23415) {
151 /* Decoder */
152 ivtv_vapi_result(itv, data, CX2341X_DEC_GET_VERSION, 0);
153 IVTV_INFO("Decoder revision: 0x%08x\n", data[0]);
154 }
155}
156
157static int ivtv_firmware_copy(struct ivtv *itv)
158{
159 IVTV_DEBUG_INFO("Loading encoder image\n");
160 if (load_fw_direct(CX2341X_FIRM_ENC_FILENAME,
161 itv->enc_mem, itv, IVTV_FW_ENC_SIZE) != IVTV_FW_ENC_SIZE) {
162 IVTV_DEBUG_WARN("failed loading encoder firmware\n");
163 return -3;
164 }
165 if (!itv->has_cx23415)
166 return 0;
167
168 IVTV_DEBUG_INFO("Loading decoder image\n");
169 if (load_fw_direct(CX2341X_FIRM_DEC_FILENAME,
170 itv->dec_mem, itv, IVTV_FW_DEC_SIZE) != IVTV_FW_DEC_SIZE) {
171 IVTV_DEBUG_WARN("failed loading decoder firmware\n");
172 return -1;
173 }
174 return 0;
175}
176
177static volatile struct ivtv_mailbox __iomem *ivtv_search_mailbox(const volatile u8 __iomem *mem, u32 size)
178{
179 int i;
180
181 /* mailbox is preceeded by a 16 byte 'magic cookie' starting at a 256-byte
182 address boundary */
183 for (i = 0; i < size; i += 0x100) {
184 if (readl(mem + i) == 0x12345678 &&
185 readl(mem + i + 4) == 0x34567812 &&
186 readl(mem + i + 8) == 0x56781234 &&
187 readl(mem + i + 12) == 0x78123456) {
188 return (volatile struct ivtv_mailbox __iomem *)(mem + i + 16);
189 }
190 }
191 return NULL;
192}
193
194int ivtv_firmware_init(struct ivtv *itv)
195{
196 int err;
197
198 ivtv_halt_firmware(itv);
199
200 /* load firmware */
201 err = ivtv_firmware_copy(itv);
202 if (err) {
203 IVTV_DEBUG_WARN("Error %d loading firmware\n", err);
204 return err;
205 }
206
207 /* start firmware */
208 write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU);
209 ivtv_sleep_timeout(HZ / 10, 0);
210 if (itv->has_cx23415)
211 write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU);
212 else
213 write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU);
214 ivtv_sleep_timeout(HZ / 10, 0);
215
216 /* find mailboxes and ping firmware */
217 itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE);
218 if (itv->enc_mbox.mbox == NULL)
219 IVTV_ERR("Encoder mailbox not found\n");
220 else if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0)) {
221 IVTV_ERR("Encoder firmware dead!\n");
222 itv->enc_mbox.mbox = NULL;
223 }
224 if (itv->enc_mbox.mbox == NULL)
225 return -ENODEV;
226
227 if (!itv->has_cx23415)
228 return 0;
229
230 itv->dec_mbox.mbox = ivtv_search_mailbox(itv->dec_mem, IVTV_DECODER_SIZE);
231 if (itv->dec_mbox.mbox == NULL)
232 IVTV_ERR("Decoder mailbox not found\n");
233 else if (itv->has_cx23415 && ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0)) {
234 IVTV_ERR("Decoder firmware dead!\n");
235 itv->dec_mbox.mbox = NULL;
236 }
237 return itv->dec_mbox.mbox ? 0 : -ENODEV;
238}
239
240void ivtv_init_mpeg_decoder(struct ivtv *itv)
241{
242 u32 data[CX2341X_MBOX_MAX_DATA];
243 long readbytes;
244 volatile u8 __iomem *mem_offset;
245
246 data[0] = 0;
247 data[1] = itv->params.width; /* YUV source width */
248 data[2] = itv->params.height;
249 data[3] = itv->params.audio_properties; /* Audio settings to use,
250 bitmap. see docs. */
251 if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) {
252 IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n");
253 return;
254 }
255
256 if (ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1) != 0) {
257 IVTV_ERR("ivtv_init_mpeg_decoder failed to start playback\n");
258 return;
259 }
260 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
261 mem_offset = itv->dec_mem + data[1];
262
263 if ((readbytes = load_fw_direct(IVTV_DECODE_INIT_MPEG_FILENAME,
264 mem_offset, itv, IVTV_DECODE_INIT_MPEG_SIZE)) <= 0) {
265 IVTV_DEBUG_WARN("failed to read mpeg decoder initialisation file %s\n",
266 IVTV_DECODE_INIT_MPEG_FILENAME);
267 } else {
268 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0);
269 ivtv_sleep_timeout(HZ / 10, 0);
270 }
271 ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1);
272}
diff --git a/drivers/media/video/ivtv/ivtv-firmware.h b/drivers/media/video/ivtv/ivtv-firmware.h
new file mode 100644
index 000000000000..8b2ffe658905
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-firmware.h
@@ -0,0 +1,25 @@
1/*
2 ivtv firmware functions.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22int ivtv_firmware_init(struct ivtv *itv);
23void ivtv_firmware_versions(struct ivtv *itv);
24void ivtv_halt_firmware(struct ivtv *itv);
25void ivtv_init_mpeg_decoder(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
new file mode 100644
index 000000000000..bc8f8ca2961f
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -0,0 +1,307 @@
1/*
2 gpio functions.
3 Merging GPIO support into driver:
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "ivtv-driver.h"
23#include "ivtv-cards.h"
24#include "ivtv-gpio.h"
25#include <media/tuner.h>
26
27/*
28 * GPIO assignment of Yuan MPG600/MPG160
29 *
30 * bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
31 * OUTPUT IN1 IN0 AM3 AM2 AM1 AM0
32 * INPUT DM1 DM0
33 *
34 * IN* : Input selection
35 * IN1 IN0
36 * 1 1 N/A
37 * 1 0 Line
38 * 0 1 N/A
39 * 0 0 Tuner
40 *
41 * AM* : Audio Mode
42 * AM3 0: Normal 1: Mixed(Sub+Main channel)
43 * AM2 0: Subchannel 1: Main channel
44 * AM1 0: Stereo 1: Mono
45 * AM0 0: Normal 1: Mute
46 *
47 * DM* : Detected tuner audio Mode
48 * DM1 0: Stereo 1: Mono
49 * DM0 0: Multiplex 1: Normal
50 *
51 * GPIO Initial Settings
52 * MPG600 MPG160
53 * DIR 0x3080 0x7080
54 * OUTPUT 0x000C 0x400C
55 *
56 * Special thanks to Makoto Iguchi <iguchi@tahoo.org> and Mr. Anonymous
57 * for analyzing GPIO of MPG160.
58 *
59 *****************************************************************************
60 *
61 * GPIO assignment of Avermedia M179 (per information direct from AVerMedia)
62 *
63 * bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0
64 * OUTPUT IN0 AM0 IN1 AM1 AM2 IN2 BR0 BR1
65 * INPUT
66 *
67 * IN* : Input selection
68 * IN0 IN1 IN2
69 * * 1 * Mute
70 * 0 0 0 Line-In
71 * 1 0 0 TV Tuner Audio
72 * 0 0 1 FM Audio
73 * 1 0 1 Mute
74 *
75 * AM* : Audio Mode
76 * AM0 AM1 AM2
77 * 0 0 0 TV Tuner Audio: L_OUT=(L+R)/2, R_OUT=SAP
78 * 0 0 1 TV Tuner Audio: L_OUT=R_OUT=SAP (SAP)
79 * 0 1 0 TV Tuner Audio: L_OUT=L, R_OUT=R (stereo)
80 * 0 1 1 TV Tuner Audio: mute
81 * 1 * * TV Tuner Audio: L_OUT=R_OUT=(L+R)/2 (mono)
82 *
83 * BR* : Audio Sample Rate (BR stands for bitrate for some reason)
84 * BR0 BR1
85 * 0 0 32 kHz
86 * 0 1 44.1 kHz
87 * 1 0 48 kHz
88 *
89 * DM* : Detected tuner audio Mode
90 * Unknown currently
91 *
92 * Special thanks to AVerMedia Technologies, Inc. and Jiun-Kuei Jung at
93 * AVerMedia for providing the GPIO information used to add support
94 * for the M179 cards.
95 */
96
97/********************* GPIO stuffs *********************/
98
99/* GPIO registers */
100#define IVTV_REG_GPIO_IN 0x9008
101#define IVTV_REG_GPIO_OUT 0x900c
102#define IVTV_REG_GPIO_DIR 0x9020
103
104void ivtv_reset_ir_gpio(struct ivtv *itv)
105{
106 int curdir, curout;
107
108 if (itv->card->type != IVTV_CARD_PVR_150)
109 return;
110 IVTV_DEBUG_INFO("Resetting PVR150 IR\n");
111 curout = read_reg(IVTV_REG_GPIO_OUT);
112 curdir = read_reg(IVTV_REG_GPIO_DIR);
113 curdir |= 0x80;
114 write_reg(curdir, IVTV_REG_GPIO_DIR);
115 curout = (curout & ~0xF) | 1;
116 write_reg(curout, IVTV_REG_GPIO_OUT);
117 /* We could use something else for smaller time */
118 current->state = TASK_INTERRUPTIBLE;
119 schedule_timeout(1);
120 curout |= 2;
121 write_reg(curout, IVTV_REG_GPIO_OUT);
122 curdir &= ~0x80;
123 write_reg(curdir, IVTV_REG_GPIO_DIR);
124}
125
126#ifdef HAVE_XC3028
127int ivtv_reset_tuner_gpio(enum v4l2_tuner_type mode, void *priv, int ptr)
128{
129 int curdir, curout;
130 struct ivtv *itv = (struct ivtv *) priv;
131
132 if (itv->card->type != IVTV_CARD_PG600V2 || itv->options.tuner != TUNER_XCEIVE_XC3028)
133 return -EINVAL;
134 IVTV_INFO("Resetting tuner.\n");
135 curout = read_reg(IVTV_REG_GPIO_OUT);
136 curdir = read_reg(IVTV_REG_GPIO_DIR);
137 curdir |= (1 << 12); /* GPIO bit 12 */
138
139 curout &= ~(1 << 12);
140 write_reg(curout, IVTV_REG_GPIO_OUT);
141 current->state = TASK_INTERRUPTIBLE;
142 schedule_timeout(1);
143
144 curout |= (1 << 12);
145 write_reg(curout, IVTV_REG_GPIO_OUT);
146 current->state = TASK_INTERRUPTIBLE;
147 schedule_timeout(1);
148
149 return 0;
150}
151#endif
152
153void ivtv_gpio_init(struct ivtv *itv)
154{
155 if (itv->card->gpio_init.direction == 0)
156 return;
157
158 IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
159 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT));
160
161 /* init output data then direction */
162 write_reg(itv->card->gpio_init.initial_value, IVTV_REG_GPIO_OUT);
163 write_reg(itv->card->gpio_init.direction, IVTV_REG_GPIO_DIR);
164}
165
166static struct v4l2_queryctrl gpio_ctrl_mute = {
167 .id = V4L2_CID_AUDIO_MUTE,
168 .type = V4L2_CTRL_TYPE_BOOLEAN,
169 .name = "Mute",
170 .minimum = 0,
171 .maximum = 1,
172 .step = 1,
173 .default_value = 1,
174 .flags = 0,
175};
176
177int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg)
178{
179 struct v4l2_tuner *tuner = arg;
180 struct v4l2_control *ctrl = arg;
181 struct v4l2_routing *route = arg;
182 u16 mask, data;
183
184 switch (command) {
185 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
186 mask = itv->card->gpio_audio_freq.mask;
187 switch (*(u32 *)arg) {
188 case 32000:
189 data = itv->card->gpio_audio_freq.f32000;
190 break;
191 case 44100:
192 data = itv->card->gpio_audio_freq.f44100;
193 break;
194 case 48000:
195 default:
196 data = itv->card->gpio_audio_freq.f48000;
197 break;
198 }
199 break;
200
201 case VIDIOC_G_TUNER:
202 mask = itv->card->gpio_audio_detect.mask;
203 if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask))
204 tuner->rxsubchans = V4L2_TUNER_MODE_STEREO |
205 V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
206 else
207 tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
208 return 0;
209
210 case VIDIOC_S_TUNER:
211 mask = itv->card->gpio_audio_mode.mask;
212 switch (tuner->audmode) {
213 case V4L2_TUNER_MODE_LANG1:
214 data = itv->card->gpio_audio_mode.lang1;
215 break;
216 case V4L2_TUNER_MODE_LANG2:
217 data = itv->card->gpio_audio_mode.lang2;
218 break;
219 case V4L2_TUNER_MODE_MONO:
220 data = itv->card->gpio_audio_mode.mono;
221 break;
222 case V4L2_TUNER_MODE_STEREO:
223 case V4L2_TUNER_MODE_LANG1_LANG2:
224 default:
225 data = itv->card->gpio_audio_mode.stereo;
226 break;
227 }
228 break;
229
230 case AUDC_SET_RADIO:
231 mask = itv->card->gpio_audio_input.mask;
232 data = itv->card->gpio_audio_input.radio;
233 break;
234
235 case VIDIOC_S_STD:
236 mask = itv->card->gpio_audio_input.mask;
237 data = itv->card->gpio_audio_input.tuner;
238 break;
239
240 case VIDIOC_INT_S_AUDIO_ROUTING:
241 if (route->input > 2)
242 return -EINVAL;
243 mask = itv->card->gpio_audio_input.mask;
244 switch (route->input) {
245 case 0:
246 data = itv->card->gpio_audio_input.tuner;
247 break;
248 case 1:
249 data = itv->card->gpio_audio_input.linein;
250 break;
251 case 2:
252 default:
253 data = itv->card->gpio_audio_input.radio;
254 break;
255 }
256 break;
257
258 case VIDIOC_G_CTRL:
259 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
260 return -EINVAL;
261 mask = itv->card->gpio_audio_mute.mask;
262 data = itv->card->gpio_audio_mute.mute;
263 ctrl->value = (read_reg(IVTV_REG_GPIO_OUT) & mask) == data;
264 return 0;
265
266 case VIDIOC_S_CTRL:
267 if (ctrl->id != V4L2_CID_AUDIO_MUTE)
268 return -EINVAL;
269 mask = itv->card->gpio_audio_mute.mask;
270 data = ctrl->value ? itv->card->gpio_audio_mute.mute : 0;
271 break;
272
273 case VIDIOC_QUERYCTRL:
274 {
275 struct v4l2_queryctrl *qc = arg;
276
277 if (qc->id != V4L2_CID_AUDIO_MUTE)
278 return -EINVAL;
279 *qc = gpio_ctrl_mute;
280 return 0;
281 }
282
283 case VIDIOC_LOG_STATUS:
284 IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n",
285 read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT),
286 read_reg(IVTV_REG_GPIO_IN));
287 return 0;
288
289 case VIDIOC_INT_S_VIDEO_ROUTING:
290 if (route->input > 2) /* 0:Tuner 1:Composite 2:S-Video */
291 return -EINVAL;
292 mask = itv->card->gpio_video_input.mask;
293 if (route->input == 0)
294 data = itv->card->gpio_video_input.tuner;
295 else if (route->input == 1)
296 data = itv->card->gpio_video_input.composite;
297 else
298 data = itv->card->gpio_video_input.svideo;
299 break;
300
301 default:
302 return -EINVAL;
303 }
304 if (mask)
305 write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT);
306 return 0;
307}
diff --git a/drivers/media/video/ivtv/ivtv-gpio.h b/drivers/media/video/ivtv/ivtv-gpio.h
new file mode 100644
index 000000000000..c301d2a39346
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-gpio.h
@@ -0,0 +1,25 @@
1/*
2 gpio functions.
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* GPIO stuff */
22void ivtv_gpio_init(struct ivtv *itv);
23void ivtv_reset_ir_gpio(struct ivtv *itv);
24int ivtv_reset_tuner_gpio(enum v4l2_tuner_type mode, void *priv, int ptr);
25int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg);
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
new file mode 100644
index 000000000000..50624c6a62a5
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -0,0 +1,748 @@
1/*
2 I2C functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 This file includes an i2c implementation that was reverse engineered
23 from the Hauppauge windows driver. Older ivtv versions used i2c-algo-bit,
24 which whilst fine under most circumstances, had trouble with the Zilog
25 CPU on the PVR-150 which handles IR functions (occasional inability to
26 communicate with the chip until it was reset) and also with the i2c
27 bus being completely unreachable when multiple PVR cards were present.
28
29 The implementation is very similar to i2c-algo-bit, but there are enough
30 subtle differences that the two are hard to merge. The general strategy
31 employed by i2c-algo-bit is to use udelay() to implement the timing
32 when putting out bits on the scl/sda lines. The general strategy taken
33 here is to poll the lines for state changes (see ivtv_waitscl and
34 ivtv_waitsda). In addition there are small delays at various locations
35 which poll the SCL line 5 times (ivtv_scldelay). I would guess that
36 since this is memory mapped I/O that the length of those delays is tied
37 to the PCI bus clock. There is some extra code to do with recovery
38 and retries. Since it is not known what causes the actual i2c problems
39 in the first place, the only goal if one was to attempt to use
40 i2c-algo-bit would be to try to make it follow the same code path.
41 This would be a lot of work, and I'm also not convinced that it would
42 provide a generic benefit to i2c-algo-bit. Therefore consider this
43 an engineering solution -- not pretty, but it works.
44
45 Some more general comments about what we are doing:
46
47 The i2c bus is a 2 wire serial bus, with clock (SCL) and data (SDA)
48 lines. To communicate on the bus (as a master, we don't act as a slave),
49 we first initiate a start condition (ivtv_start). We then write the
50 address of the device that we want to communicate with, along with a flag
51 that indicates whether this is a read or a write. The slave then issues
52 an ACK signal (ivtv_ack), which tells us that it is ready for reading /
53 writing. We then proceed with reading or writing (ivtv_read/ivtv_write),
54 and finally issue a stop condition (ivtv_stop) to make the bus available
55 to other masters.
56
57 There is an additional form of transaction where a write may be
58 immediately followed by a read. In this case, there is no intervening
59 stop condition. (Only the msp3400 chip uses this method of data transfer).
60 */
61
62#include "ivtv-driver.h"
63#include "ivtv-cards.h"
64#include "ivtv-gpio.h"
65#include "ivtv-i2c.h"
66
67#include <media/ir-kbd-i2c.h>
68
69/* i2c implementation for cx23415/6 chip, ivtv project.
70 * Author: Kevin Thayer (nufan_wfk at yahoo.com)
71 */
72/* i2c stuff */
73#define IVTV_REG_I2C_SETSCL_OFFSET 0x7000
74#define IVTV_REG_I2C_SETSDA_OFFSET 0x7004
75#define IVTV_REG_I2C_GETSCL_OFFSET 0x7008
76#define IVTV_REG_I2C_GETSDA_OFFSET 0x700c
77
78#ifndef I2C_ADAP_CLASS_TV_ANALOG
79#define I2C_ADAP_CLASS_TV_ANALOG I2C_CLASS_TV_ANALOG
80#endif /* I2C_ADAP_CLASS_TV_ANALOG */
81
82#define IVTV_CS53L32A_I2C_ADDR 0x11
83#define IVTV_CX25840_I2C_ADDR 0x44
84#define IVTV_SAA7115_I2C_ADDR 0x21
85#define IVTV_SAA7127_I2C_ADDR 0x44
86#define IVTV_SAA717x_I2C_ADDR 0x21
87#define IVTV_MSP3400_I2C_ADDR 0x40
88#define IVTV_HAUPPAUGE_I2C_ADDR 0x50
89#define IVTV_WM8739_I2C_ADDR 0x1a
90#define IVTV_WM8775_I2C_ADDR 0x1b
91#define IVTV_TEA5767_I2C_ADDR 0x60
92#define IVTV_UPD64031A_I2C_ADDR 0x12
93#define IVTV_UPD64083_I2C_ADDR 0x5c
94#define IVTV_TDA985X_I2C_ADDR 0x5b
95
96/* This array should match the IVTV_HW_ defines */
97static const u8 hw_driverids[] = {
98 I2C_DRIVERID_CX25840,
99 I2C_DRIVERID_SAA711X,
100 I2C_DRIVERID_SAA7127,
101 I2C_DRIVERID_MSP3400,
102 I2C_DRIVERID_TUNER,
103 I2C_DRIVERID_WM8775,
104 I2C_DRIVERID_CS53L32A,
105 I2C_DRIVERID_TVEEPROM,
106 I2C_DRIVERID_SAA711X,
107 I2C_DRIVERID_TVAUDIO,
108 I2C_DRIVERID_UPD64031A,
109 I2C_DRIVERID_UPD64083,
110 I2C_DRIVERID_SAA717X,
111 I2C_DRIVERID_WM8739,
112 0 /* IVTV_HW_GPIO dummy driver ID */
113};
114
115/* This array should match the IVTV_HW_ defines */
116static const char * const hw_drivernames[] = {
117 "cx2584x",
118 "saa7115",
119 "saa7127",
120 "msp3400",
121 "tuner",
122 "wm8775",
123 "cs53l32a",
124 "tveeprom",
125 "saa7114",
126 "tvaudio",
127 "upd64031a",
128 "upd64083",
129 "saa717x",
130 "wm8739",
131 "gpio",
132};
133
134static int attach_inform(struct i2c_client *client)
135{
136 struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter);
137 int i;
138
139 IVTV_DEBUG_I2C("i2c client attach\n");
140 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
141 if (itv->i2c_clients[i] == NULL) {
142 itv->i2c_clients[i] = client;
143 break;
144 }
145 }
146 if (i == I2C_CLIENTS_MAX) {
147 IVTV_ERR("insufficient room for new I2C client!\n");
148 }
149 return 0;
150}
151
152static int detach_inform(struct i2c_client *client)
153{
154 int i;
155 struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter);
156
157 IVTV_DEBUG_I2C("i2c client detach\n");
158 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
159 if (itv->i2c_clients[i] == client) {
160 itv->i2c_clients[i] = NULL;
161 break;
162 }
163 }
164 IVTV_DEBUG_I2C("i2c detach [client=%s,%s]\n",
165 client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed");
166
167 return 0;
168}
169
170/* Set the serial clock line to the desired state */
171static void ivtv_setscl(struct ivtv *itv, int state)
172{
173 /* write them out */
174 /* write bits are inverted */
175 write_reg(~state, IVTV_REG_I2C_SETSCL_OFFSET);
176}
177
178/* Set the serial data line to the desired state */
179static void ivtv_setsda(struct ivtv *itv, int state)
180{
181 /* write them out */
182 /* write bits are inverted */
183 write_reg(~state & 1, IVTV_REG_I2C_SETSDA_OFFSET);
184}
185
186/* Read the serial clock line */
187static int ivtv_getscl(struct ivtv *itv)
188{
189 return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1;
190}
191
192/* Read the serial data line */
193static int ivtv_getsda(struct ivtv *itv)
194{
195 return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1;
196}
197
198/* Implement a short delay by polling the serial clock line */
199static void ivtv_scldelay(struct ivtv *itv)
200{
201 int i;
202
203 for (i = 0; i < 5; ++i)
204 ivtv_getscl(itv);
205}
206
207/* Wait for the serial clock line to become set to a specific value */
208static int ivtv_waitscl(struct ivtv *itv, int val)
209{
210 int i;
211
212 ivtv_scldelay(itv);
213 for (i = 0; i < 1000; ++i) {
214 if (ivtv_getscl(itv) == val)
215 return 1;
216 }
217 return 0;
218}
219
220/* Wait for the serial data line to become set to a specific value */
221static int ivtv_waitsda(struct ivtv *itv, int val)
222{
223 int i;
224
225 ivtv_scldelay(itv);
226 for (i = 0; i < 1000; ++i) {
227 if (ivtv_getsda(itv) == val)
228 return 1;
229 }
230 return 0;
231}
232
233/* Wait for the slave to issue an ACK */
234static int ivtv_ack(struct ivtv *itv)
235{
236 int ret = 0;
237
238 if (ivtv_getscl(itv) == 1) {
239 IVTV_DEBUG_I2C("SCL was high starting an ack\n");
240 ivtv_setscl(itv, 0);
241 if (!ivtv_waitscl(itv, 0)) {
242 IVTV_DEBUG_I2C("Could not set SCL low starting an ack\n");
243 return -EREMOTEIO;
244 }
245 }
246 ivtv_setsda(itv, 1);
247 ivtv_scldelay(itv);
248 ivtv_setscl(itv, 1);
249 if (!ivtv_waitsda(itv, 0)) {
250 IVTV_DEBUG_I2C("Slave did not ack\n");
251 ret = -EREMOTEIO;
252 }
253 ivtv_setscl(itv, 0);
254 if (!ivtv_waitscl(itv, 0)) {
255 IVTV_DEBUG_I2C("Failed to set SCL low after ACK\n");
256 ret = -EREMOTEIO;
257 }
258 return ret;
259}
260
261/* Write a single byte to the i2c bus and wait for the slave to ACK */
262static int ivtv_sendbyte(struct ivtv *itv, unsigned char byte)
263{
264 int i, bit;
265
266 IVTV_DEBUG_I2C("write %x\n",byte);
267 for (i = 0; i < 8; ++i, byte<<=1) {
268 ivtv_setscl(itv, 0);
269 if (!ivtv_waitscl(itv, 0)) {
270 IVTV_DEBUG_I2C("Error setting SCL low\n");
271 return -EREMOTEIO;
272 }
273 bit = (byte>>7)&1;
274 ivtv_setsda(itv, bit);
275 if (!ivtv_waitsda(itv, bit)) {
276 IVTV_DEBUG_I2C("Error setting SDA\n");
277 return -EREMOTEIO;
278 }
279 ivtv_setscl(itv, 1);
280 if (!ivtv_waitscl(itv, 1)) {
281 IVTV_DEBUG_I2C("Slave not ready for bit\n");
282 return -EREMOTEIO;
283 }
284 }
285 ivtv_setscl(itv, 0);
286 if (!ivtv_waitscl(itv, 0)) {
287 IVTV_DEBUG_I2C("Error setting SCL low\n");
288 return -EREMOTEIO;
289 }
290 return ivtv_ack(itv);
291}
292
293/* Read a byte from the i2c bus and send a NACK if applicable (i.e. for the
294 final byte) */
295static int ivtv_readbyte(struct ivtv *itv, unsigned char *byte, int nack)
296{
297 int i;
298
299 *byte = 0;
300
301 ivtv_setsda(itv, 1);
302 ivtv_scldelay(itv);
303 for (i = 0; i < 8; ++i) {
304 ivtv_setscl(itv, 0);
305 ivtv_scldelay(itv);
306 ivtv_setscl(itv, 1);
307 if (!ivtv_waitscl(itv, 1)) {
308 IVTV_DEBUG_I2C("Error setting SCL high\n");
309 return -EREMOTEIO;
310 }
311 *byte = ((*byte)<<1)|ivtv_getsda(itv);
312 }
313 ivtv_setscl(itv, 0);
314 ivtv_scldelay(itv);
315 ivtv_setsda(itv, nack);
316 ivtv_scldelay(itv);
317 ivtv_setscl(itv, 1);
318 ivtv_scldelay(itv);
319 ivtv_setscl(itv, 0);
320 ivtv_scldelay(itv);
321 IVTV_DEBUG_I2C("read %x\n",*byte);
322 return 0;
323}
324
325/* Issue a start condition on the i2c bus to alert slaves to prepare for
326 an address write */
327static int ivtv_start(struct ivtv *itv)
328{
329 int sda;
330
331 sda = ivtv_getsda(itv);
332 if (sda != 1) {
333 IVTV_DEBUG_I2C("SDA was low at start\n");
334 ivtv_setsda(itv, 1);
335 if (!ivtv_waitsda(itv, 1)) {
336 IVTV_DEBUG_I2C("SDA stuck low\n");
337 return -EREMOTEIO;
338 }
339 }
340 if (ivtv_getscl(itv) != 1) {
341 ivtv_setscl(itv, 1);
342 if (!ivtv_waitscl(itv, 1)) {
343 IVTV_DEBUG_I2C("SCL stuck low at start\n");
344 return -EREMOTEIO;
345 }
346 }
347 ivtv_setsda(itv, 0);
348 ivtv_scldelay(itv);
349 return 0;
350}
351
352/* Issue a stop condition on the i2c bus to release it */
353static int ivtv_stop(struct ivtv *itv)
354{
355 int i;
356
357 if (ivtv_getscl(itv) != 0) {
358 IVTV_DEBUG_I2C("SCL not low when stopping\n");
359 ivtv_setscl(itv, 0);
360 if (!ivtv_waitscl(itv, 0)) {
361 IVTV_DEBUG_I2C("SCL could not be set low\n");
362 }
363 }
364 ivtv_setsda(itv, 0);
365 ivtv_scldelay(itv);
366 ivtv_setscl(itv, 1);
367 if (!ivtv_waitscl(itv, 1)) {
368 IVTV_DEBUG_I2C("SCL could not be set high\n");
369 return -EREMOTEIO;
370 }
371 ivtv_scldelay(itv);
372 ivtv_setsda(itv, 1);
373 if (!ivtv_waitsda(itv, 1)) {
374 IVTV_DEBUG_I2C("resetting I2C\n");
375 for (i = 0; i < 16; ++i) {
376 ivtv_setscl(itv, 0);
377 ivtv_scldelay(itv);
378 ivtv_setscl(itv, 1);
379 ivtv_scldelay(itv);
380 ivtv_setsda(itv, 1);
381 }
382 ivtv_waitsda(itv, 1);
383 return -EREMOTEIO;
384 }
385 return 0;
386}
387
388/* Write a message to the given i2c slave. do_stop may be 0 to prevent
389 issuing the i2c stop condition (when following with a read) */
390static int ivtv_write(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len, int do_stop)
391{
392 int retry, ret = -EREMOTEIO;
393 u32 i;
394
395 for (retry = 0; ret != 0 && retry < 8; ++retry) {
396 ret = ivtv_start(itv);
397
398 if (ret == 0) {
399 ret = ivtv_sendbyte(itv, addr<<1);
400 for (i = 0; ret == 0 && i < len; ++i)
401 ret = ivtv_sendbyte(itv, data[i]);
402 }
403 if (ret != 0 || do_stop) {
404 ivtv_stop(itv);
405 }
406 }
407 if (ret)
408 IVTV_DEBUG_I2C("i2c write to %x failed\n", addr);
409 return ret;
410}
411
412/* Read data from the given i2c slave. A stop condition is always issued. */
413static int ivtv_read(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len)
414{
415 int retry, ret = -EREMOTEIO;
416 u32 i;
417
418 for (retry = 0; ret != 0 && retry < 8; ++retry) {
419 ret = ivtv_start(itv);
420 if (ret == 0)
421 ret = ivtv_sendbyte(itv, (addr << 1) | 1);
422 for (i = 0; ret == 0 && i < len; ++i) {
423 ret = ivtv_readbyte(itv, &data[i], i == len - 1);
424 }
425 ivtv_stop(itv);
426 }
427 if (ret)
428 IVTV_DEBUG_I2C("i2c read from %x failed\n", addr);
429 return ret;
430}
431
432/* Kernel i2c transfer implementation. Takes a number of messages to be read
433 or written. If a read follows a write, this will occur without an
434 intervening stop condition */
435static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
436{
437 struct ivtv *itv = i2c_get_adapdata(i2c_adap);
438 int retval;
439 int i;
440
441 mutex_lock(&itv->i2c_bus_lock);
442 for (i = retval = 0; retval == 0 && i < num; i++) {
443 if (msgs[i].flags & I2C_M_RD)
444 retval = ivtv_read(itv, msgs[i].addr, msgs[i].buf, msgs[i].len);
445 else {
446 /* if followed by a read, don't stop */
447 int stop = !(i + 1 < num && msgs[i + 1].flags == I2C_M_RD);
448
449 retval = ivtv_write(itv, msgs[i].addr, msgs[i].buf, msgs[i].len, stop);
450 }
451 }
452 mutex_unlock(&itv->i2c_bus_lock);
453 return retval ? retval : num;
454}
455
456/* Kernel i2c capabilities */
457static u32 ivtv_functionality(struct i2c_adapter *adap)
458{
459 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
460}
461
462static struct i2c_algorithm ivtv_algo = {
463 .master_xfer = ivtv_xfer,
464 .functionality = ivtv_functionality,
465};
466
467/* template for our-bit banger */
468static struct i2c_adapter ivtv_i2c_adap_hw_template = {
469 .name = "ivtv i2c driver",
470 .id = I2C_HW_B_CX2341X,
471 .algo = &ivtv_algo,
472 .algo_data = NULL, /* filled from template */
473 .client_register = attach_inform,
474 .client_unregister = detach_inform,
475 .owner = THIS_MODULE,
476#ifdef I2C_ADAP_CLASS_TV_ANALOG
477 .class = I2C_ADAP_CLASS_TV_ANALOG,
478#endif
479};
480
481static void ivtv_setscl_old(void *data, int state)
482{
483 struct ivtv *itv = (struct ivtv *)data;
484
485 if (state)
486 itv->i2c_state |= 0x01;
487 else
488 itv->i2c_state &= ~0x01;
489
490 /* write them out */
491 /* write bits are inverted */
492 write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSCL_OFFSET);
493}
494
495static void ivtv_setsda_old(void *data, int state)
496{
497 struct ivtv *itv = (struct ivtv *)data;
498
499 if (state)
500 itv->i2c_state |= 0x01;
501 else
502 itv->i2c_state &= ~0x01;
503
504 /* write them out */
505 /* write bits are inverted */
506 write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSDA_OFFSET);
507}
508
509static int ivtv_getscl_old(void *data)
510{
511 struct ivtv *itv = (struct ivtv *)data;
512
513 return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1;
514}
515
516static int ivtv_getsda_old(void *data)
517{
518 struct ivtv *itv = (struct ivtv *)data;
519
520 return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1;
521}
522
523/* template for i2c-bit-algo */
524static struct i2c_adapter ivtv_i2c_adap_template = {
525 .name = "ivtv i2c driver",
526 .id = I2C_HW_B_CX2341X, /* algo-bit is OR'd with this */
527 .algo = NULL, /* set by i2c-algo-bit */
528 .algo_data = NULL, /* filled from template */
529 .client_register = attach_inform,
530 .client_unregister = detach_inform,
531 .owner = THIS_MODULE,
532#ifdef I2C_ADAP_CLASS_TV_ANALOG
533 .class = I2C_ADAP_CLASS_TV_ANALOG,
534#endif
535};
536
537static struct i2c_algo_bit_data ivtv_i2c_algo_template = {
538 NULL, /* ?? */
539 ivtv_setsda_old, /* setsda function */
540 ivtv_setscl_old, /* " */
541 ivtv_getsda_old, /* " */
542 ivtv_getscl_old, /* " */
543 10, /* udelay */
544 200 /* timeout */
545};
546
547static struct i2c_client ivtv_i2c_client_template = {
548 .name = "ivtv internal",
549};
550
551int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg)
552{
553 struct i2c_client *client;
554 int retval;
555 int i;
556
557 IVTV_DEBUG_I2C("call_i2c_client addr=%02x\n", addr);
558 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
559 client = itv->i2c_clients[i];
560 if (client == NULL) {
561 continue;
562 }
563 if (client->driver->command == NULL) {
564 continue;
565 }
566 if (addr == client->addr) {
567 retval = client->driver->command(client, cmd, arg);
568 return retval;
569 }
570 }
571 if (cmd != VIDIOC_G_CHIP_IDENT)
572 IVTV_ERR("i2c addr 0x%02x not found for command 0x%x!\n", addr, cmd);
573 return -ENODEV;
574}
575
576/* Find the i2c device based on the driver ID and return
577 its i2c address or -ENODEV if no matching device was found. */
578static int ivtv_i2c_id_addr(struct ivtv *itv, u32 id)
579{
580 struct i2c_client *client;
581 int retval = -ENODEV;
582 int i;
583
584 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
585 client = itv->i2c_clients[i];
586 if (client == NULL)
587 continue;
588 if (id == client->driver->id) {
589 retval = client->addr;
590 break;
591 }
592 }
593 return retval;
594}
595
596/* Find the i2c device name matching the DRIVERID */
597static const char *ivtv_i2c_id_name(u32 id)
598{
599 int i;
600
601 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
602 if (hw_driverids[i] == id)
603 return hw_drivernames[i];
604 return "unknown device";
605}
606
607/* Find the i2c device name matching the IVTV_HW_ flag */
608static const char *ivtv_i2c_hw_name(u32 hw)
609{
610 int i;
611
612 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
613 if (1 << i == hw)
614 return hw_drivernames[i];
615 return "unknown device";
616}
617
618/* Find the i2c device matching the IVTV_HW_ flag and return
619 its i2c address or -ENODEV if no matching device was found. */
620int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw)
621{
622 int i;
623
624 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
625 if (1 << i == hw)
626 return ivtv_i2c_id_addr(itv, hw_driverids[i]);
627 return -ENODEV;
628}
629
630/* Calls i2c device based on IVTV_HW_ flag. If hw == 0, then do nothing.
631 If hw == IVTV_HW_GPIO then call the gpio handler. */
632int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg)
633{
634 int addr;
635
636 if (hw == IVTV_HW_GPIO)
637 return ivtv_gpio(itv, cmd, arg);
638 if (hw == 0)
639 return 0;
640
641 addr = ivtv_i2c_hw_addr(itv, hw);
642 if (addr < 0) {
643 IVTV_ERR("i2c hardware 0x%08x (%s) not found for command 0x%x!\n",
644 hw, ivtv_i2c_hw_name(hw), cmd);
645 return addr;
646 }
647 return ivtv_call_i2c_client(itv, addr, cmd, arg);
648}
649
650/* Calls i2c device based on I2C driver ID. */
651int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg)
652{
653 int addr;
654
655 addr = ivtv_i2c_id_addr(itv, id);
656 if (addr < 0) {
657 if (cmd != VIDIOC_G_CHIP_IDENT)
658 IVTV_ERR("i2c ID 0x%08x (%s) not found for command 0x%x!\n",
659 id, ivtv_i2c_id_name(id), cmd);
660 return addr;
661 }
662 return ivtv_call_i2c_client(itv, addr, cmd, arg);
663}
664
665int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg)
666{
667 return ivtv_call_i2c_client(itv, IVTV_CX25840_I2C_ADDR, cmd, arg);
668}
669
670int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg)
671{
672 return ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, cmd, arg);
673}
674
675int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg)
676{
677 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
678}
679
680int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
681{
682 return ivtv_call_i2c_client(itv, IVTV_SAA717x_I2C_ADDR, cmd, arg);
683}
684
685int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg)
686{
687 return ivtv_call_i2c_client(itv, IVTV_UPD64031A_I2C_ADDR, cmd, arg);
688}
689
690int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg)
691{
692 return ivtv_call_i2c_client(itv, IVTV_UPD64083_I2C_ADDR, cmd, arg);
693}
694
695/* broadcast cmd for all I2C clients and for the gpio subsystem */
696void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg)
697{
698 if (itv->i2c_adap.algo == NULL) {
699 IVTV_ERR("adapter is not set");
700 return;
701 }
702 i2c_clients_command(&itv->i2c_adap, cmd, arg);
703 if (itv->hw_flags & IVTV_HW_GPIO)
704 ivtv_gpio(itv, cmd, arg);
705}
706
707/* init + register i2c algo-bit adapter */
708int __devinit init_ivtv_i2c(struct ivtv *itv)
709{
710 IVTV_DEBUG_I2C("i2c init\n");
711
712 if (itv->options.newi2c > 0) {
713 memcpy(&itv->i2c_adap, &ivtv_i2c_adap_hw_template,
714 sizeof(struct i2c_adapter));
715 } else {
716 memcpy(&itv->i2c_adap, &ivtv_i2c_adap_template,
717 sizeof(struct i2c_adapter));
718 memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template,
719 sizeof(struct i2c_algo_bit_data));
720 itv->i2c_algo.data = itv;
721 itv->i2c_adap.algo_data = &itv->i2c_algo;
722 }
723
724 sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d",
725 itv->num);
726 i2c_set_adapdata(&itv->i2c_adap, itv);
727
728 memcpy(&itv->i2c_client, &ivtv_i2c_client_template,
729 sizeof(struct i2c_client));
730 itv->i2c_client.adapter = &itv->i2c_adap;
731 itv->i2c_adap.dev.parent = &itv->dev->dev;
732
733 IVTV_DEBUG_I2C("setting scl and sda to 1\n");
734 ivtv_setscl(itv, 1);
735 ivtv_setsda(itv, 1);
736
737 if (itv->options.newi2c > 0)
738 return i2c_add_adapter(&itv->i2c_adap);
739 else
740 return i2c_bit_add_bus(&itv->i2c_adap);
741}
742
743void __devexit exit_ivtv_i2c(struct ivtv *itv)
744{
745 IVTV_DEBUG_I2C("i2c exit\n");
746
747 i2c_del_adapter(&itv->i2c_adap);
748}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.h b/drivers/media/video/ivtv/ivtv-i2c.h
new file mode 100644
index 000000000000..5d210adb5c52
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-i2c.h
@@ -0,0 +1,36 @@
1/*
2 I2C functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg);
22int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg);
23int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg);
24int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg);
25int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg);
26int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg);
27
28int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw);
29int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg);
30int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg);
31int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg);
32void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg);
33
34/* init + register i2c algo-bit adapter */
35int __devinit init_ivtv_i2c(struct ivtv *itv);
36void __devexit exit_ivtv_i2c(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
new file mode 100644
index 000000000000..794a6a02f82f
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -0,0 +1,1567 @@
1/*
2 ioctl system call
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-version.h"
23#include "ivtv-mailbox.h"
24#include "ivtv-i2c.h"
25#include "ivtv-queue.h"
26#include "ivtv-fileops.h"
27#include "ivtv-vbi.h"
28#include "ivtv-audio.h"
29#include "ivtv-video.h"
30#include "ivtv-streams.h"
31#include "ivtv-yuv.h"
32#include "ivtv-ioctl.h"
33#include "ivtv-gpio.h"
34#include "ivtv-controls.h"
35#include "ivtv-cards.h"
36#include <media/saa7127.h>
37#include <media/tveeprom.h>
38#include <media/v4l2-chip-ident.h>
39#include <linux/dvb/audio.h>
40#include <linux/i2c-id.h>
41
42u16 service2vbi(int type)
43{
44 switch (type) {
45 case V4L2_SLICED_TELETEXT_B:
46 return IVTV_SLICED_TYPE_TELETEXT_B;
47 case V4L2_SLICED_CAPTION_525:
48 return IVTV_SLICED_TYPE_CAPTION_525;
49 case V4L2_SLICED_WSS_625:
50 return IVTV_SLICED_TYPE_WSS_625;
51 case V4L2_SLICED_VPS:
52 return IVTV_SLICED_TYPE_VPS;
53 default:
54 return 0;
55 }
56}
57
58static int valid_service_line(int field, int line, int is_pal)
59{
60 return (is_pal && line >= 6 && (line != 23 || field == 0)) ||
61 (!is_pal && line >= 10 && line < 22);
62}
63
64static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
65{
66 u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525);
67 int i;
68
69 set = set & valid_set;
70 if (set == 0 || !valid_service_line(field, line, is_pal)) {
71 return 0;
72 }
73 if (!is_pal) {
74 if (line == 21 && (set & V4L2_SLICED_CAPTION_525))
75 return V4L2_SLICED_CAPTION_525;
76 }
77 else {
78 if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS))
79 return V4L2_SLICED_VPS;
80 if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625))
81 return V4L2_SLICED_WSS_625;
82 if (line == 23)
83 return 0;
84 }
85 for (i = 0; i < 32; i++) {
86 if ((1 << i) & set)
87 return 1 << i;
88 }
89 return 0;
90}
91
92void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
93{
94 u16 set = fmt->service_set;
95 int f, l;
96
97 fmt->service_set = 0;
98 for (f = 0; f < 2; f++) {
99 for (l = 0; l < 24; l++) {
100 fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal);
101 }
102 }
103}
104
105static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
106{
107 int f, l;
108 u16 set = 0;
109
110 for (f = 0; f < 2; f++) {
111 for (l = 0; l < 24; l++) {
112 fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal);
113 set |= fmt->service_lines[f][l];
114 }
115 }
116 return set != 0;
117}
118
119u16 get_service_set(struct v4l2_sliced_vbi_format *fmt)
120{
121 int f, l;
122 u16 set = 0;
123
124 for (f = 0; f < 2; f++) {
125 for (l = 0; l < 24; l++) {
126 set |= fmt->service_lines[f][l];
127 }
128 }
129 return set;
130}
131
132static const struct {
133 v4l2_std_id std;
134 char *name;
135} enum_stds[] = {
136 { V4L2_STD_PAL_BG | V4L2_STD_PAL_H, "PAL-BGH" },
137 { V4L2_STD_PAL_DK, "PAL-DK" },
138 { V4L2_STD_PAL_I, "PAL-I" },
139 { V4L2_STD_PAL_M, "PAL-M" },
140 { V4L2_STD_PAL_N, "PAL-N" },
141 { V4L2_STD_PAL_Nc, "PAL-Nc" },
142 { V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, "SECAM-BGH" },
143 { V4L2_STD_SECAM_DK, "SECAM-DK" },
144 { V4L2_STD_SECAM_L, "SECAM-L" },
145 { V4L2_STD_SECAM_LC, "SECAM-L'" },
146 { V4L2_STD_NTSC_M, "NTSC-M" },
147 { V4L2_STD_NTSC_M_JP, "NTSC-J" },
148 { V4L2_STD_NTSC_M_KR, "NTSC-K" },
149};
150
151static const struct v4l2_standard ivtv_std_60hz =
152{
153 .frameperiod = {.numerator = 1001, .denominator = 30000},
154 .framelines = 525,
155};
156
157static const struct v4l2_standard ivtv_std_50hz =
158{
159 .frameperiod = {.numerator = 1, .denominator = 25},
160 .framelines = 625,
161};
162
163void ivtv_set_osd_alpha(struct ivtv *itv)
164{
165 ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3,
166 itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state);
167 ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_color_key_state, itv->osd_color_key);
168}
169
170int ivtv_set_speed(struct ivtv *itv, int speed)
171{
172 u32 data[CX2341X_MBOX_MAX_DATA];
173 struct ivtv_stream *s;
174 int single_step = (speed == 1 || speed == -1);
175 DEFINE_WAIT(wait);
176
177 if (speed == 0) speed = 1000;
178
179 /* No change? */
180 if (speed == itv->speed && !single_step)
181 return 0;
182
183 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
184
185 if (single_step && (speed < 0) == (itv->speed < 0)) {
186 /* Single step video and no need to change direction */
187 ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
188 itv->speed = speed;
189 return 0;
190 }
191 if (single_step)
192 /* Need to change direction */
193 speed = speed < 0 ? -1000 : 1000;
194
195 data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0;
196 data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0;
197 data[1] = (speed < 0);
198 data[2] = speed < 0 ? 3 : 7;
199 data[3] = itv->params.video_b_frames;
200 data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0;
201 data[5] = 0;
202 data[6] = 0;
203
204 if (speed == 1500 || speed == -1500) data[0] |= 1;
205 else if (speed == 2000 || speed == -2000) data[0] |= 2;
206 else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed);
207 else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed);
208
209 /* If not decoding, just change speed setting */
210 if (atomic_read(&itv->decoding) > 0) {
211 int got_sig = 0;
212
213 /* Stop all DMA and decoding activity */
214 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
215
216 /* Wait for any DMA to finish */
217 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
218 while (itv->i_flags & IVTV_F_I_DMA) {
219 got_sig = signal_pending(current);
220 if (got_sig)
221 break;
222 got_sig = 0;
223 schedule();
224 }
225 finish_wait(&itv->dma_waitq, &wait);
226 if (got_sig)
227 return -EINTR;
228
229 /* Change Speed safely */
230 ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data);
231 IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
232 data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
233 }
234 if (single_step) {
235 speed = (speed < 0) ? -1 : 1;
236 ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
237 }
238 itv->speed = speed;
239 return 0;
240}
241
242static int ivtv_validate_speed(int cur_speed, int new_speed)
243{
244 int fact = new_speed < 0 ? -1 : 1;
245 int s;
246
247 if (new_speed < 0) new_speed = -new_speed;
248 if (cur_speed < 0) cur_speed = -cur_speed;
249
250 if (cur_speed <= new_speed) {
251 if (new_speed > 1500) return fact * 2000;
252 if (new_speed > 1000) return fact * 1500;
253 }
254 else {
255 if (new_speed >= 2000) return fact * 2000;
256 if (new_speed >= 1500) return fact * 1500;
257 if (new_speed >= 1000) return fact * 1000;
258 }
259 if (new_speed == 0) return 1000;
260 if (new_speed == 1 || new_speed == 1000) return fact * new_speed;
261
262 s = new_speed;
263 new_speed = 1000 / new_speed;
264 if (1000 / cur_speed == new_speed)
265 new_speed += (cur_speed < s) ? -1 : 1;
266 if (new_speed > 60) return 1000 / (fact * 60);
267 return 1000 / (fact * new_speed);
268}
269
270static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id,
271 struct video_command *vc, int try)
272{
273 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
274
275 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
276 return -EINVAL;
277
278 switch (vc->cmd) {
279 case VIDEO_CMD_PLAY: {
280 vc->flags = 0;
281 vc->play.speed = ivtv_validate_speed(itv->speed, vc->play.speed);
282 if (vc->play.speed < 0)
283 vc->play.format = VIDEO_PLAY_FMT_GOP;
284 if (try) break;
285
286 if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG)
287 return -EBUSY;
288 return ivtv_start_decoding(id, vc->play.speed);
289 }
290
291 case VIDEO_CMD_STOP:
292 vc->flags &= VIDEO_CMD_STOP_IMMEDIATELY|VIDEO_CMD_STOP_TO_BLACK;
293 if (vc->flags & VIDEO_CMD_STOP_IMMEDIATELY)
294 vc->stop.pts = 0;
295 if (try) break;
296 if (atomic_read(&itv->decoding) == 0)
297 return 0;
298 if (itv->output_mode != OUT_MPG)
299 return -EBUSY;
300
301 itv->output_mode = OUT_NONE;
302 return ivtv_stop_v4l2_decode_stream(s, vc->flags, vc->stop.pts);
303
304 case VIDEO_CMD_FREEZE:
305 vc->flags &= VIDEO_CMD_FREEZE_TO_BLACK;
306 if (try) break;
307 if (itv->output_mode != OUT_MPG)
308 return -EBUSY;
309 if (atomic_read(&itv->decoding) > 0) {
310 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1,
311 (vc->flags & VIDEO_CMD_FREEZE_TO_BLACK) ? 1 : 0);
312 }
313 break;
314
315 case VIDEO_CMD_CONTINUE:
316 vc->flags = 0;
317 if (try) break;
318 if (itv->output_mode != OUT_MPG)
319 return -EBUSY;
320 if (atomic_read(&itv->decoding) > 0) {
321 ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 0);
322 }
323 break;
324
325 default:
326 return -EINVAL;
327 }
328 return 0;
329}
330
331static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg)
332{
333 struct v4l2_register *regs = arg;
334 unsigned long flags;
335 volatile u8 __iomem *reg_start;
336
337 if (!capable(CAP_SYS_ADMIN))
338 return -EPERM;
339 if (regs->reg >= IVTV_REG_OFFSET && regs->reg < IVTV_REG_OFFSET + IVTV_REG_SIZE)
340 reg_start = itv->reg_mem - IVTV_REG_OFFSET;
341 else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET &&
342 regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE)
343 reg_start = itv->dec_mem - IVTV_DECODER_OFFSET;
344 else if (regs->reg >= 0 && regs->reg < IVTV_ENCODER_SIZE)
345 reg_start = itv->enc_mem;
346 else
347 return -EINVAL;
348
349 spin_lock_irqsave(&ivtv_cards_lock, flags);
350 if (cmd == VIDIOC_DBG_G_REGISTER) {
351 regs->val = readl(regs->reg + reg_start);
352 } else {
353 writel(regs->val, regs->reg + reg_start);
354 }
355 spin_unlock_irqrestore(&ivtv_cards_lock, flags);
356 return 0;
357}
358
359static int ivtv_get_fmt(struct ivtv *itv, int streamtype, struct v4l2_format *fmt)
360{
361 switch (fmt->type) {
362 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
363 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
364 return -EINVAL;
365 fmt->fmt.pix.left = itv->main_rect.left;
366 fmt->fmt.pix.top = itv->main_rect.top;
367 fmt->fmt.pix.width = itv->main_rect.width;
368 fmt->fmt.pix.height = itv->main_rect.height;
369 fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
370 fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
371 if (itv->output_mode == OUT_UDMA_YUV) {
372 switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) {
373 case IVTV_YUV_MODE_INTERLACED:
374 fmt->fmt.pix.field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ?
375 V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB;
376 break;
377 case IVTV_YUV_MODE_PROGRESSIVE:
378 fmt->fmt.pix.field = V4L2_FIELD_NONE;
379 break;
380 default:
381 fmt->fmt.pix.field = V4L2_FIELD_ANY;
382 break;
383 }
384 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12;
385 /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
386 fmt->fmt.pix.sizeimage =
387 fmt->fmt.pix.height * fmt->fmt.pix.width +
388 fmt->fmt.pix.height * (fmt->fmt.pix.width / 2);
389 }
390 else if (itv->output_mode == OUT_YUV ||
391 streamtype == IVTV_ENC_STREAM_TYPE_YUV ||
392 streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
393 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12;
394 /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
395 fmt->fmt.pix.sizeimage =
396 fmt->fmt.pix.height * fmt->fmt.pix.width +
397 fmt->fmt.pix.height * (fmt->fmt.pix.width / 2);
398 } else {
399 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
400 fmt->fmt.pix.sizeimage = 128 * 1024;
401 }
402 break;
403
404 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
405 fmt->fmt.pix.left = 0;
406 fmt->fmt.pix.top = 0;
407 fmt->fmt.pix.width = itv->params.width;
408 fmt->fmt.pix.height = itv->params.height;
409 fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
410 fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
411 if (streamtype == IVTV_ENC_STREAM_TYPE_YUV ||
412 streamtype == IVTV_DEC_STREAM_TYPE_YUV) {
413 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12;
414 /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
415 fmt->fmt.pix.sizeimage =
416 fmt->fmt.pix.height * fmt->fmt.pix.width +
417 fmt->fmt.pix.height * (fmt->fmt.pix.width / 2);
418 } else {
419 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
420 fmt->fmt.pix.sizeimage = 128 * 1024;
421 }
422 break;
423
424 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
425 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
426 return -EINVAL;
427 fmt->fmt.win.chromakey = itv->osd_color_key;
428 fmt->fmt.win.global_alpha = itv->osd_global_alpha;
429 break;
430
431 case V4L2_BUF_TYPE_VBI_CAPTURE:
432 fmt->fmt.vbi.sampling_rate = 27000000;
433 fmt->fmt.vbi.offset = 248;
434 fmt->fmt.vbi.samples_per_line = itv->vbi.raw_decoder_line_size - 4;
435 fmt->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
436 fmt->fmt.vbi.start[0] = itv->vbi.start[0];
437 fmt->fmt.vbi.start[1] = itv->vbi.start[1];
438 fmt->fmt.vbi.count[0] = fmt->fmt.vbi.count[1] = itv->vbi.count;
439 break;
440
441 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
442 {
443 struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
444
445 if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT))
446 return -EINVAL;
447 vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
448 memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
449 memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines));
450 if (itv->is_60hz) {
451 vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
452 vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
453 } else {
454 vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
455 vbifmt->service_lines[0][16] = V4L2_SLICED_VPS;
456 }
457 vbifmt->service_set = get_service_set(vbifmt);
458 break;
459 }
460
461 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
462 {
463 struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
464
465 vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
466 memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
467 memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines));
468
469 if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
470 vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 :
471 V4L2_SLICED_VBI_525;
472 expand_service_set(vbifmt, itv->is_50hz);
473 break;
474 }
475
476 itv->video_dec_func(itv, VIDIOC_G_FMT, fmt);
477 vbifmt->service_set = get_service_set(vbifmt);
478 break;
479 }
480 case V4L2_BUF_TYPE_VBI_OUTPUT:
481 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
482 default:
483 return -EINVAL;
484 }
485 return 0;
486}
487
488static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
489 struct v4l2_format *fmt, int set_fmt)
490{
491 struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
492 u16 set;
493
494 if (fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
495 struct v4l2_rect r;
496 int field;
497
498 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
499 return -EINVAL;
500 field = fmt->fmt.pix.field;
501 r.top = fmt->fmt.pix.top;
502 r.left = fmt->fmt.pix.left;
503 r.width = fmt->fmt.pix.width;
504 r.height = fmt->fmt.pix.height;
505 ivtv_get_fmt(itv, streamtype, fmt);
506 if (itv->output_mode != OUT_UDMA_YUV) {
507 /* TODO: would setting the rect also be valid for this mode? */
508 fmt->fmt.pix.top = r.top;
509 fmt->fmt.pix.left = r.left;
510 fmt->fmt.pix.width = r.width;
511 fmt->fmt.pix.height = r.height;
512 }
513 if (itv->output_mode == OUT_UDMA_YUV) {
514 /* TODO: add checks for validity */
515 fmt->fmt.pix.field = field;
516 }
517 if (set_fmt) {
518 if (itv->output_mode == OUT_UDMA_YUV) {
519 switch (field) {
520 case V4L2_FIELD_NONE:
521 itv->yuv_info.lace_mode = IVTV_YUV_MODE_PROGRESSIVE;
522 break;
523 case V4L2_FIELD_ANY:
524 itv->yuv_info.lace_mode = IVTV_YUV_MODE_AUTO;
525 break;
526 case V4L2_FIELD_INTERLACED_BT:
527 itv->yuv_info.lace_mode =
528 IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD;
529 break;
530 case V4L2_FIELD_INTERLACED_TB:
531 default:
532 itv->yuv_info.lace_mode = IVTV_YUV_MODE_INTERLACED;
533 break;
534 }
535 itv->yuv_info.lace_sync_field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1;
536
537 /* Force update of yuv registers */
538 itv->yuv_info.yuv_forced_update = 1;
539 return 0;
540 }
541 if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
542 r.width, r.height, r.left, r.top))
543 itv->main_rect = r;
544 else
545 return -EINVAL;
546 }
547 return 0;
548 }
549
550 if (fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY) {
551 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
552 return -EINVAL;
553 if (set_fmt) {
554 itv->osd_color_key = fmt->fmt.win.chromakey;
555 itv->osd_global_alpha = fmt->fmt.win.global_alpha;
556 ivtv_set_osd_alpha(itv);
557 }
558 return 0;
559 }
560
561 /* set window size */
562 if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
563 int w = fmt->fmt.pix.width;
564 int h = fmt->fmt.pix.height;
565
566 if (w > 720) w = 720;
567 else if (w < 1) w = 1;
568 if (h > (itv->is_50hz ? 576 : 480)) h = (itv->is_50hz ? 576 : 480);
569 else if (h < 2) h = 2;
570 ivtv_get_fmt(itv, streamtype, fmt);
571 fmt->fmt.pix.width = w;
572 fmt->fmt.pix.height = h;
573
574 if (!set_fmt || (itv->params.width == w && itv->params.height == h))
575 return 0;
576 if (atomic_read(&itv->capturing) > 0)
577 return -EBUSY;
578
579 itv->params.width = w;
580 itv->params.height = h;
581 if (w != 720 || h != (itv->is_50hz ? 576 : 480))
582 itv->params.video_temporal_filter = 0;
583 else
584 itv->params.video_temporal_filter = 8;
585 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt);
586 return ivtv_get_fmt(itv, streamtype, fmt);
587 }
588
589 /* set raw VBI format */
590 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
591 if (set_fmt && streamtype == IVTV_ENC_STREAM_TYPE_VBI &&
592 itv->vbi.sliced_in->service_set &&
593 atomic_read(&itv->capturing) > 0) {
594 return -EBUSY;
595 }
596 if (set_fmt) {
597 itv->vbi.sliced_in->service_set = 0;
598 itv->video_dec_func(itv, VIDIOC_S_FMT, &itv->vbi.in);
599 }
600 return ivtv_get_fmt(itv, streamtype, fmt);
601 }
602
603 /* set sliced VBI output
604 In principle the user could request that only certain
605 VBI types are output and that the others are ignored.
606 I.e., suppress CC in the even fields or only output
607 WSS and no VPS. Currently though there is no choice. */
608 if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
609 return ivtv_get_fmt(itv, streamtype, fmt);
610
611 /* any else but sliced VBI capture is an error */
612 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
613 return -EINVAL;
614
615 if (streamtype == IVTV_DEC_STREAM_TYPE_VBI)
616 return ivtv_get_fmt(itv, streamtype, fmt);
617
618 /* set sliced VBI capture format */
619 vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
620 memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
621
622 if (vbifmt->service_set)
623 expand_service_set(vbifmt, itv->is_50hz);
624 set = check_service_set(vbifmt, itv->is_50hz);
625 vbifmt->service_set = get_service_set(vbifmt);
626
627 if (!set_fmt)
628 return 0;
629 if (set == 0)
630 return -EINVAL;
631 if (atomic_read(&itv->capturing) > 0 && itv->vbi.sliced_in->service_set == 0) {
632 return -EBUSY;
633 }
634 itv->video_dec_func(itv, VIDIOC_S_FMT, fmt);
635 memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
636 return 0;
637}
638
639static int ivtv_debug_ioctls(struct file *filp, unsigned int cmd, void *arg)
640{
641 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
642 struct ivtv *itv = id->itv;
643 struct v4l2_register *reg = arg;
644
645 switch (cmd) {
646 /* ioctls to allow direct access to the encoder registers for testing */
647 case VIDIOC_DBG_G_REGISTER:
648 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
649 return ivtv_itvc(itv, cmd, arg);
650 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
651 return ivtv_i2c_id(itv, reg->match_chip, cmd, arg);
652 return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg);
653
654 case VIDIOC_DBG_S_REGISTER:
655 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
656 return ivtv_itvc(itv, cmd, arg);
657 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
658 return ivtv_i2c_id(itv, reg->match_chip, cmd, arg);
659 return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg);
660
661 case VIDIOC_G_CHIP_IDENT: {
662 struct v4l2_chip_ident *chip = arg;
663
664 chip->ident = V4L2_IDENT_NONE;
665 chip->revision = 0;
666 if (reg->match_type == V4L2_CHIP_MATCH_HOST) {
667 if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) {
668 struct v4l2_chip_ident *chip = arg;
669
670 chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416;
671 }
672 return 0;
673 }
674 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
675 return ivtv_i2c_id(itv, reg->match_chip, cmd, arg);
676 if (reg->match_type == V4L2_CHIP_MATCH_I2C_ADDR)
677 return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg);
678 return -EINVAL;
679 }
680
681 case VIDIOC_INT_S_AUDIO_ROUTING: {
682 struct v4l2_routing *route = arg;
683
684 ivtv_audio_set_route(itv, route);
685 break;
686 }
687
688 case VIDIOC_INT_RESET:
689 ivtv_reset_ir_gpio(itv);
690 break;
691
692 default:
693 return -EINVAL;
694 }
695 return 0;
696}
697
698int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void *arg)
699{
700 struct ivtv_open_id *id = NULL;
701
702 if (filp) id = (struct ivtv_open_id *)filp->private_data;
703
704 switch (cmd) {
705 case VIDIOC_G_PRIORITY:
706 {
707 enum v4l2_priority *p = arg;
708
709 *p = v4l2_prio_max(&itv->prio);
710 break;
711 }
712
713 case VIDIOC_S_PRIORITY:
714 {
715 enum v4l2_priority *prio = arg;
716
717 return v4l2_prio_change(&itv->prio, &id->prio, *prio);
718 }
719
720 case VIDIOC_QUERYCAP:{
721 struct v4l2_capability *vcap = arg;
722
723 memset(vcap, 0, sizeof(*vcap));
724 strcpy(vcap->driver, IVTV_DRIVER_NAME); /* driver name */
725 strcpy(vcap->card, itv->card_name); /* card type */
726 strcpy(vcap->bus_info, pci_name(itv->dev)); /* bus info... */
727 vcap->version = IVTV_DRIVER_VERSION; /* version */
728 vcap->capabilities = itv->v4l2_cap; /* capabilities */
729
730 /* reserved.. must set to 0! */
731 vcap->reserved[0] = vcap->reserved[1] =
732 vcap->reserved[2] = vcap->reserved[3] = 0;
733 break;
734 }
735
736 case VIDIOC_ENUMAUDIO:{
737 struct v4l2_audio *vin = arg;
738
739 return ivtv_get_audio_input(itv, vin->index, vin);
740 }
741
742 case VIDIOC_G_AUDIO:{
743 struct v4l2_audio *vin = arg;
744
745 vin->index = itv->audio_input;
746 return ivtv_get_audio_input(itv, vin->index, vin);
747 }
748
749 case VIDIOC_S_AUDIO:{
750 struct v4l2_audio *vout = arg;
751
752 if (vout->index >= itv->nof_audio_inputs)
753 return -EINVAL;
754 itv->audio_input = vout->index;
755 ivtv_audio_set_io(itv);
756 break;
757 }
758
759 case VIDIOC_ENUMAUDOUT:{
760 struct v4l2_audioout *vin = arg;
761
762 /* set it to defaults from our table */
763 return ivtv_get_audio_output(itv, vin->index, vin);
764 }
765
766 case VIDIOC_G_AUDOUT:{
767 struct v4l2_audioout *vin = arg;
768
769 vin->index = 0;
770 return ivtv_get_audio_output(itv, vin->index, vin);
771 }
772
773 case VIDIOC_S_AUDOUT:{
774 struct v4l2_audioout *vout = arg;
775
776 return ivtv_get_audio_output(itv, vout->index, vout);
777 }
778
779 case VIDIOC_ENUMINPUT:{
780 struct v4l2_input *vin = arg;
781
782 /* set it to defaults from our table */
783 return ivtv_get_input(itv, vin->index, vin);
784 }
785
786 case VIDIOC_ENUMOUTPUT:{
787 struct v4l2_output *vout = arg;
788
789 return ivtv_get_output(itv, vout->index, vout);
790 }
791
792 case VIDIOC_TRY_FMT:
793 case VIDIOC_S_FMT: {
794 struct v4l2_format *fmt = arg;
795
796 return ivtv_try_or_set_fmt(itv, id->type, fmt, cmd == VIDIOC_S_FMT);
797 }
798
799 case VIDIOC_G_FMT: {
800 struct v4l2_format *fmt = arg;
801 int type = fmt->type;
802
803 memset(fmt, 0, sizeof(*fmt));
804 fmt->type = type;
805 return ivtv_get_fmt(itv, id->type, fmt);
806 }
807
808 case VIDIOC_S_CROP: {
809 struct v4l2_crop *crop = arg;
810
811 if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
812 return -EINVAL;
813 return itv->video_dec_func(itv, VIDIOC_S_CROP, arg);
814 }
815
816 case VIDIOC_G_CROP: {
817 struct v4l2_crop *crop = arg;
818
819 if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
820 return -EINVAL;
821 return itv->video_dec_func(itv, VIDIOC_G_CROP, arg);
822 }
823
824 case VIDIOC_ENUM_FMT: {
825 static struct v4l2_fmtdesc formats[] = {
826 { 0, 0, 0,
827 "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12,
828 { 0, 0, 0, 0 }
829 },
830 { 1, 0, V4L2_FMT_FLAG_COMPRESSED,
831 "MPEG", V4L2_PIX_FMT_MPEG,
832 { 0, 0, 0, 0 }
833 }
834 };
835 struct v4l2_fmtdesc *fmt = arg;
836 enum v4l2_buf_type type = fmt->type;
837
838 switch (type) {
839 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
840 break;
841 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
842 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
843 return -EINVAL;
844 break;
845 default:
846 return -EINVAL;
847 }
848 if (fmt->index > 1)
849 return -EINVAL;
850 *fmt = formats[fmt->index];
851 fmt->type = type;
852 return 0;
853 }
854
855 case VIDIOC_G_INPUT:{
856 *(int *)arg = itv->active_input;
857 break;
858 }
859
860 case VIDIOC_S_INPUT:{
861 int inp = *(int *)arg;
862
863 if (inp < 0 || inp >= itv->nof_inputs)
864 return -EINVAL;
865
866 if (inp == itv->active_input) {
867 IVTV_DEBUG_INFO("Input unchanged\n");
868 break;
869 }
870 IVTV_DEBUG_INFO("Changing input from %d to %d\n",
871 itv->active_input, inp);
872
873 itv->active_input = inp;
874 /* Set the audio input to whatever is appropriate for the
875 input type. */
876 itv->audio_input = itv->card->video_inputs[inp].audio_index;
877
878 /* prevent others from messing with the streams until
879 we're finished changing inputs. */
880 ivtv_mute(itv);
881 ivtv_video_set_io(itv);
882 ivtv_audio_set_io(itv);
883 ivtv_unmute(itv);
884 break;
885 }
886
887 case VIDIOC_G_OUTPUT:{
888 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
889 return -EINVAL;
890 *(int *)arg = itv->active_output;
891 break;
892 }
893
894 case VIDIOC_S_OUTPUT:{
895 int outp = *(int *)arg;
896 struct v4l2_routing route;
897
898 if (outp >= itv->card->nof_outputs)
899 return -EINVAL;
900
901 if (outp == itv->active_output) {
902 IVTV_DEBUG_INFO("Output unchanged\n");
903 break;
904 }
905 IVTV_DEBUG_INFO("Changing output from %d to %d\n",
906 itv->active_output, outp);
907
908 itv->active_output = outp;
909 route.input = SAA7127_INPUT_TYPE_NORMAL;
910 route.output = itv->card->video_outputs[outp].video_output;
911 ivtv_saa7127(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route);
912 break;
913 }
914
915 case VIDIOC_G_FREQUENCY:{
916 struct v4l2_frequency *vf = arg;
917
918 if (vf->tuner != 0)
919 return -EINVAL;
920 ivtv_call_i2c_clients(itv, cmd, arg);
921 break;
922 }
923
924 case VIDIOC_S_FREQUENCY:{
925 struct v4l2_frequency vf = *(struct v4l2_frequency *)arg;
926
927 if (vf.tuner != 0)
928 return -EINVAL;
929
930 ivtv_mute(itv);
931 IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf.frequency);
932 ivtv_call_i2c_clients(itv, cmd, &vf);
933 ivtv_unmute(itv);
934 break;
935 }
936
937 case VIDIOC_ENUMSTD:{
938 struct v4l2_standard *vs = arg;
939 int idx = vs->index;
940
941 if (idx < 0 || idx >= ARRAY_SIZE(enum_stds))
942 return -EINVAL;
943
944 *vs = (enum_stds[idx].std & V4L2_STD_525_60) ?
945 ivtv_std_60hz : ivtv_std_50hz;
946 vs->index = idx;
947 vs->id = enum_stds[idx].std;
948 strcpy(vs->name, enum_stds[idx].name);
949 break;
950 }
951
952 case VIDIOC_G_STD:{
953 *(v4l2_std_id *) arg = itv->std;
954 break;
955 }
956
957 case VIDIOC_S_STD: {
958 v4l2_std_id std = *(v4l2_std_id *) arg;
959
960 if ((std & V4L2_STD_ALL) == 0)
961 return -EINVAL;
962
963 if (std == itv->std)
964 break;
965
966 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
967 atomic_read(&itv->capturing) > 0 ||
968 atomic_read(&itv->decoding) > 0) {
969 /* Switching standard would turn off the radio or mess
970 with already running streams, prevent that by
971 returning EBUSY. */
972 return -EBUSY;
973 }
974
975 itv->std = std;
976 itv->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0;
977 itv->params.is_50hz = itv->is_50hz = !itv->is_60hz;
978 itv->params.width = 720;
979 itv->params.height = itv->is_50hz ? 576 : 480;
980 itv->vbi.count = itv->is_50hz ? 18 : 12;
981 itv->vbi.start[0] = itv->is_50hz ? 6 : 10;
982 itv->vbi.start[1] = itv->is_50hz ? 318 : 273;
983 if (itv->hw_flags & IVTV_HW_CX25840) {
984 itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
985 }
986 IVTV_DEBUG_INFO("Switching standard to %llx.\n", itv->std);
987
988 /* Tuner */
989 ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std);
990
991 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
992 /* set display standard */
993 itv->std_out = std;
994 itv->is_out_60hz = itv->is_60hz;
995 itv->is_out_50hz = itv->is_50hz;
996 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std_out);
997 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
998 itv->main_rect.left = itv->main_rect.top = 0;
999 itv->main_rect.width = 720;
1000 itv->main_rect.height = itv->params.height;
1001 ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
1002 720, itv->main_rect.height, 0, 0);
1003 }
1004 break;
1005 }
1006
1007 case VIDIOC_S_TUNER: { /* Setting tuner can only set audio mode */
1008 struct v4l2_tuner *vt = arg;
1009
1010 if (vt->index != 0)
1011 return -EINVAL;
1012
1013 ivtv_call_i2c_clients(itv, VIDIOC_S_TUNER, vt);
1014 break;
1015 }
1016
1017 case VIDIOC_G_TUNER: {
1018 struct v4l2_tuner *vt = arg;
1019
1020 if (vt->index != 0)
1021 return -EINVAL;
1022
1023 memset(vt, 0, sizeof(*vt));
1024 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt);
1025
1026 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
1027 strcpy(vt->name, "ivtv Radio Tuner");
1028 vt->type = V4L2_TUNER_RADIO;
1029 } else {
1030 strcpy(vt->name, "ivtv TV Tuner");
1031 vt->type = V4L2_TUNER_ANALOG_TV;
1032 }
1033 break;
1034 }
1035
1036 case VIDIOC_G_SLICED_VBI_CAP: {
1037 struct v4l2_sliced_vbi_cap *cap = arg;
1038 int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
1039 int f, l;
1040 enum v4l2_buf_type type = cap->type;
1041
1042 memset(cap, 0, sizeof(*cap));
1043 cap->type = type;
1044 if (type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
1045 for (f = 0; f < 2; f++) {
1046 for (l = 0; l < 24; l++) {
1047 if (valid_service_line(f, l, itv->is_50hz)) {
1048 cap->service_lines[f][l] = set;
1049 }
1050 }
1051 }
1052 return 0;
1053 }
1054 if (type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
1055 if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT))
1056 return -EINVAL;
1057 if (itv->is_60hz) {
1058 cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
1059 cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
1060 } else {
1061 cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
1062 cap->service_lines[0][16] = V4L2_SLICED_VPS;
1063 }
1064 return 0;
1065 }
1066 return -EINVAL;
1067 }
1068
1069 case VIDIOC_G_ENC_INDEX: {
1070 struct v4l2_enc_idx *idx = arg;
1071 int i;
1072
1073 idx->entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) %
1074 IVTV_MAX_PGM_INDEX;
1075 if (idx->entries > V4L2_ENC_IDX_ENTRIES)
1076 idx->entries = V4L2_ENC_IDX_ENTRIES;
1077 for (i = 0; i < idx->entries; i++) {
1078 idx->entry[i] = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX];
1079 }
1080 itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX;
1081 break;
1082 }
1083
1084 case VIDIOC_ENCODER_CMD:
1085 case VIDIOC_TRY_ENCODER_CMD: {
1086 struct v4l2_encoder_cmd *enc = arg;
1087 int try = cmd == VIDIOC_TRY_ENCODER_CMD;
1088
1089 memset(&enc->raw, 0, sizeof(enc->raw));
1090 switch (enc->cmd) {
1091 case V4L2_ENC_CMD_START:
1092 enc->flags = 0;
1093 if (try)
1094 return 0;
1095 return ivtv_start_capture(id);
1096
1097 case V4L2_ENC_CMD_STOP:
1098 enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END;
1099 if (try)
1100 return 0;
1101 ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END);
1102 return 0;
1103
1104 case V4L2_ENC_CMD_PAUSE:
1105 enc->flags = 0;
1106 if (try)
1107 return 0;
1108 if (!atomic_read(&itv->capturing))
1109 return -EPERM;
1110 if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
1111 return 0;
1112 ivtv_mute(itv);
1113 ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0);
1114 break;
1115
1116 case V4L2_ENC_CMD_RESUME:
1117 enc->flags = 0;
1118 if (try)
1119 return 0;
1120 if (!atomic_read(&itv->capturing))
1121 return -EPERM;
1122 if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
1123 return 0;
1124 ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1);
1125 ivtv_unmute(itv);
1126 break;
1127 default:
1128 return -EINVAL;
1129 }
1130 break;
1131 }
1132
1133 case VIDIOC_G_FBUF: {
1134 struct v4l2_framebuffer *fb = arg;
1135
1136 memset(fb, 0, sizeof(*fb));
1137 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
1138 break;
1139 fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY |
1140 V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_GLOBAL_ALPHA;
1141 fb->fmt.pixelformat = itv->osd_pixelformat;
1142 fb->fmt.width = itv->osd_rect.width;
1143 fb->fmt.height = itv->osd_rect.height;
1144 fb->fmt.left = itv->osd_rect.left;
1145 fb->fmt.top = itv->osd_rect.top;
1146 fb->base = (void *)itv->osd_video_pbase;
1147 if (itv->osd_global_alpha_state)
1148 fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA;
1149 if (itv->osd_local_alpha_state)
1150 fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
1151 if (itv->osd_color_key_state)
1152 fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
1153 break;
1154 }
1155
1156 case VIDIOC_S_FBUF: {
1157 struct v4l2_framebuffer *fb = arg;
1158
1159 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
1160 break;
1161 itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
1162 itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0;
1163 itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0;
1164 break;
1165 }
1166
1167 case VIDIOC_LOG_STATUS:
1168 {
1169 int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT;
1170 struct v4l2_input vidin;
1171 struct v4l2_audio audin;
1172 int i;
1173
1174 IVTV_INFO("================= START STATUS CARD #%d =================\n", itv->num);
1175 if (itv->hw_flags & IVTV_HW_TVEEPROM) {
1176 struct tveeprom tv;
1177
1178 ivtv_read_eeprom(itv, &tv);
1179 }
1180 ivtv_call_i2c_clients(itv, VIDIOC_LOG_STATUS, NULL);
1181 ivtv_get_input(itv, itv->active_input, &vidin);
1182 ivtv_get_audio_input(itv, itv->audio_input, &audin);
1183 IVTV_INFO("Video Input: %s\n", vidin.name);
1184 IVTV_INFO("Audio Input: %s\n", audin.name);
1185 if (has_output) {
1186 struct v4l2_output vidout;
1187 struct v4l2_audioout audout;
1188 int mode = itv->output_mode;
1189 static const char * const output_modes[] = {
1190 "None",
1191 "MPEG Streaming",
1192 "YUV Streaming",
1193 "YUV Frames",
1194 "Passthrough",
1195 };
1196
1197 ivtv_get_output(itv, itv->active_output, &vidout);
1198 ivtv_get_audio_output(itv, 0, &audout);
1199 IVTV_INFO("Video Output: %s\n", vidout.name);
1200 IVTV_INFO("Audio Output: %s\n", audout.name);
1201 if (mode < 0 || mode > OUT_PASSTHROUGH)
1202 mode = OUT_NONE;
1203 IVTV_INFO("Output Mode: %s\n", output_modes[mode]);
1204 }
1205 IVTV_INFO("Tuner: %s\n",
1206 test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV");
1207 cx2341x_log_status(&itv->params, itv->name);
1208 IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags);
1209 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1210 struct ivtv_stream *s = &itv->streams[i];
1211
1212 if (s->v4l2dev == NULL || s->buffers == 0)
1213 continue;
1214 IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags,
1215 (s->buffers - s->q_free.buffers) * 100 / s->buffers,
1216 (s->buffers * s->buf_size) / 1024, s->buffers);
1217 }
1218 IVTV_INFO("Read MPEG/VBI: %lld/%lld bytes\n", itv->mpg_data_received, itv->vbi_data_inserted);
1219 IVTV_INFO("================== END STATUS CARD #%d ==================\n", itv->num);
1220 break;
1221 }
1222
1223 default:
1224 return -EINVAL;
1225 }
1226 return 0;
1227}
1228
1229static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
1230{
1231 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1232 struct ivtv *itv = id->itv;
1233 int nonblocking = filp->f_flags & O_NONBLOCK;
1234 struct ivtv_stream *s = &itv->streams[id->type];
1235
1236 switch (cmd) {
1237 case IVTV_IOC_DMA_FRAME: {
1238 struct ivtv_dma_frame *args = arg;
1239
1240 IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n");
1241 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
1242 return -EINVAL;
1243 if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1244 return -EINVAL;
1245 if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL)
1246 return 0;
1247 if (ivtv_claim_stream(id, id->type)) {
1248 return -EBUSY;
1249 }
1250 if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) {
1251 ivtv_release_stream(s);
1252 return -EBUSY;
1253 }
1254 if (args->y_source == NULL)
1255 return 0;
1256 return ivtv_yuv_prep_frame(itv, args);
1257 }
1258
1259 case VIDEO_GET_PTS: {
1260 u32 data[CX2341X_MBOX_MAX_DATA];
1261 u64 *pts = arg;
1262
1263 IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n");
1264 if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
1265 *pts = s->dma_pts;
1266 break;
1267 }
1268 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
1269 return -EINVAL;
1270
1271 if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) {
1272 *pts = (u64) ((u64)itv->last_dec_timing[2] << 32) |
1273 (u64)itv->last_dec_timing[1];
1274 break;
1275 }
1276 *pts = 0;
1277 if (atomic_read(&itv->decoding)) {
1278 if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) {
1279 IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n");
1280 return -EIO;
1281 }
1282 memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing));
1283 set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
1284 *pts = (u64) ((u64) data[2] << 32) | (u64) data[1];
1285 /*timing->scr = (u64) (((u64) data[4] << 32) | (u64) (data[3]));*/
1286 }
1287 break;
1288 }
1289
1290 case VIDEO_GET_FRAME_COUNT: {
1291 u32 data[CX2341X_MBOX_MAX_DATA];
1292 u64 *frame = arg;
1293
1294 IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n");
1295 if (s->type < IVTV_DEC_STREAM_TYPE_MPG) {
1296 *frame = 0;
1297 break;
1298 }
1299 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
1300 return -EINVAL;
1301
1302 if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) {
1303 *frame = itv->last_dec_timing[0];
1304 break;
1305 }
1306 *frame = 0;
1307 if (atomic_read(&itv->decoding)) {
1308 if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) {
1309 IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n");
1310 return -EIO;
1311 }
1312 memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing));
1313 set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
1314 *frame = data[0];
1315 }
1316 break;
1317 }
1318
1319 case VIDEO_PLAY: {
1320 struct video_command vc;
1321
1322 IVTV_DEBUG_IOCTL("VIDEO_PLAY\n");
1323 memset(&vc, 0, sizeof(vc));
1324 vc.cmd = VIDEO_CMD_PLAY;
1325 return ivtv_video_command(itv, id, &vc, 0);
1326 }
1327
1328 case VIDEO_STOP: {
1329 struct video_command vc;
1330
1331 IVTV_DEBUG_IOCTL("VIDEO_STOP\n");
1332 memset(&vc, 0, sizeof(vc));
1333 vc.cmd = VIDEO_CMD_STOP;
1334 vc.flags = VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY;
1335 return ivtv_video_command(itv, id, &vc, 0);
1336 }
1337
1338 case VIDEO_FREEZE: {
1339 struct video_command vc;
1340
1341 IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n");
1342 memset(&vc, 0, sizeof(vc));
1343 vc.cmd = VIDEO_CMD_FREEZE;
1344 return ivtv_video_command(itv, id, &vc, 0);
1345 }
1346
1347 case VIDEO_CONTINUE: {
1348 struct video_command vc;
1349
1350 IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n");
1351 memset(&vc, 0, sizeof(vc));
1352 vc.cmd = VIDEO_CMD_CONTINUE;
1353 return ivtv_video_command(itv, id, &vc, 0);
1354 }
1355
1356 case VIDEO_COMMAND:
1357 case VIDEO_TRY_COMMAND: {
1358 struct video_command *vc = arg;
1359 int try = (cmd == VIDEO_TRY_COMMAND);
1360
1361 if (try)
1362 IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND\n");
1363 else
1364 IVTV_DEBUG_IOCTL("VIDEO_COMMAND\n");
1365 return ivtv_video_command(itv, id, vc, try);
1366 }
1367
1368 case VIDEO_GET_EVENT: {
1369 struct video_event *ev = arg;
1370 DEFINE_WAIT(wait);
1371
1372 IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n");
1373 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
1374 return -EINVAL;
1375 memset(ev, 0, sizeof(*ev));
1376 set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
1377
1378 while (1) {
1379 if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
1380 ev->type = VIDEO_EVENT_DECODER_STOPPED;
1381 else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) {
1382 ev->type = VIDEO_EVENT_VSYNC;
1383 ev->u.vsync_field = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ?
1384 VIDEO_VSYNC_FIELD_ODD : VIDEO_VSYNC_FIELD_EVEN;
1385 if (itv->output_mode == OUT_UDMA_YUV &&
1386 (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) ==
1387 IVTV_YUV_MODE_PROGRESSIVE) {
1388 ev->u.vsync_field = VIDEO_VSYNC_FIELD_PROGRESSIVE;
1389 }
1390 }
1391 if (ev->type)
1392 return 0;
1393 if (nonblocking)
1394 return -EAGAIN;
1395 /* wait for event */
1396 prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE);
1397 if ((itv->i_flags & (IVTV_F_I_EV_DEC_STOPPED|IVTV_F_I_EV_VSYNC)) == 0)
1398 schedule();
1399 finish_wait(&itv->event_waitq, &wait);
1400 if (signal_pending(current)) {
1401 /* return if a signal was received */
1402 IVTV_DEBUG_INFO("User stopped wait for event\n");
1403 return -EINTR;
1404 }
1405 }
1406 break;
1407 }
1408
1409 default:
1410 return -EINVAL;
1411 }
1412 return 0;
1413}
1414
1415static int ivtv_v4l2_do_ioctl(struct inode *inode, struct file *filp,
1416 unsigned int cmd, void *arg)
1417{
1418 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1419 struct ivtv *itv = id->itv;
1420 int ret;
1421
1422 /* check priority */
1423 switch (cmd) {
1424 case VIDIOC_S_CTRL:
1425 case VIDIOC_S_STD:
1426 case VIDIOC_S_INPUT:
1427 case VIDIOC_S_OUTPUT:
1428 case VIDIOC_S_TUNER:
1429 case VIDIOC_S_FREQUENCY:
1430 case VIDIOC_S_FMT:
1431 case VIDIOC_S_CROP:
1432 case VIDIOC_S_AUDIO:
1433 case VIDIOC_S_AUDOUT:
1434 case VIDIOC_S_EXT_CTRLS:
1435 case VIDIOC_S_FBUF:
1436 ret = v4l2_prio_check(&itv->prio, &id->prio);
1437 if (ret)
1438 return ret;
1439 }
1440
1441 switch (cmd) {
1442 case VIDIOC_DBG_G_REGISTER:
1443 case VIDIOC_DBG_S_REGISTER:
1444 case VIDIOC_G_CHIP_IDENT:
1445 case VIDIOC_INT_S_AUDIO_ROUTING:
1446 case VIDIOC_INT_RESET:
1447 if (ivtv_debug & IVTV_DBGFLG_IOCTL) {
1448 printk(KERN_INFO "ivtv%d ioctl: ", itv->num);
1449 v4l_printk_ioctl(cmd);
1450 }
1451 return ivtv_debug_ioctls(filp, cmd, arg);
1452
1453 case VIDIOC_G_PRIORITY:
1454 case VIDIOC_S_PRIORITY:
1455 case VIDIOC_QUERYCAP:
1456 case VIDIOC_ENUMINPUT:
1457 case VIDIOC_G_INPUT:
1458 case VIDIOC_S_INPUT:
1459 case VIDIOC_ENUMOUTPUT:
1460 case VIDIOC_G_OUTPUT:
1461 case VIDIOC_S_OUTPUT:
1462 case VIDIOC_G_FMT:
1463 case VIDIOC_S_FMT:
1464 case VIDIOC_TRY_FMT:
1465 case VIDIOC_ENUM_FMT:
1466 case VIDIOC_G_CROP:
1467 case VIDIOC_S_CROP:
1468 case VIDIOC_G_FREQUENCY:
1469 case VIDIOC_S_FREQUENCY:
1470 case VIDIOC_ENUMSTD:
1471 case VIDIOC_G_STD:
1472 case VIDIOC_S_STD:
1473 case VIDIOC_S_TUNER:
1474 case VIDIOC_G_TUNER:
1475 case VIDIOC_ENUMAUDIO:
1476 case VIDIOC_S_AUDIO:
1477 case VIDIOC_G_AUDIO:
1478 case VIDIOC_ENUMAUDOUT:
1479 case VIDIOC_S_AUDOUT:
1480 case VIDIOC_G_AUDOUT:
1481 case VIDIOC_G_SLICED_VBI_CAP:
1482 case VIDIOC_LOG_STATUS:
1483 case VIDIOC_G_ENC_INDEX:
1484 case VIDIOC_ENCODER_CMD:
1485 case VIDIOC_TRY_ENCODER_CMD:
1486 case VIDIOC_G_FBUF:
1487 case VIDIOC_S_FBUF:
1488 if (ivtv_debug & IVTV_DBGFLG_IOCTL) {
1489 printk(KERN_INFO "ivtv%d ioctl: ", itv->num);
1490 v4l_printk_ioctl(cmd);
1491 }
1492 return ivtv_v4l2_ioctls(itv, filp, cmd, arg);
1493
1494 case VIDIOC_QUERYMENU:
1495 case VIDIOC_QUERYCTRL:
1496 case VIDIOC_S_CTRL:
1497 case VIDIOC_G_CTRL:
1498 case VIDIOC_S_EXT_CTRLS:
1499 case VIDIOC_G_EXT_CTRLS:
1500 case VIDIOC_TRY_EXT_CTRLS:
1501 if (ivtv_debug & IVTV_DBGFLG_IOCTL) {
1502 printk(KERN_INFO "ivtv%d ioctl: ", itv->num);
1503 v4l_printk_ioctl(cmd);
1504 }
1505 return ivtv_control_ioctls(itv, cmd, arg);
1506
1507 case IVTV_IOC_DMA_FRAME:
1508 case VIDEO_GET_PTS:
1509 case VIDEO_GET_FRAME_COUNT:
1510 case VIDEO_GET_EVENT:
1511 case VIDEO_PLAY:
1512 case VIDEO_STOP:
1513 case VIDEO_FREEZE:
1514 case VIDEO_CONTINUE:
1515 case VIDEO_COMMAND:
1516 case VIDEO_TRY_COMMAND:
1517 return ivtv_decoder_ioctls(filp, cmd, arg);
1518
1519 case 0x00005401: /* Handle isatty() calls */
1520 return -EINVAL;
1521 default:
1522 return v4l_compat_translate_ioctl(inode, filp, cmd, arg,
1523 ivtv_v4l2_do_ioctl);
1524 }
1525 return 0;
1526}
1527
1528int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1529 unsigned long arg)
1530{
1531 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1532 struct ivtv *itv = id->itv;
1533
1534 /* Filter dvb ioctls that cannot be handled by video_usercopy */
1535 switch (cmd) {
1536 case VIDEO_SELECT_SOURCE:
1537 IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
1538 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
1539 return -EINVAL;
1540 return ivtv_passthrough_mode(itv, arg == VIDEO_SOURCE_DEMUX);
1541
1542 case AUDIO_SET_MUTE:
1543 IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
1544 itv->speed_mute_audio = arg;
1545 return 0;
1546
1547 case AUDIO_CHANNEL_SELECT:
1548 IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
1549 if (arg > AUDIO_STEREO_SWAPPED)
1550 return -EINVAL;
1551 itv->audio_stereo_mode = arg;
1552 ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
1553 return 0;
1554
1555 case AUDIO_BILINGUAL_CHANNEL_SELECT:
1556 IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
1557 if (arg > AUDIO_STEREO_SWAPPED)
1558 return -EINVAL;
1559 itv->audio_bilingual_mode = arg;
1560 ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
1561 return 0;
1562
1563 default:
1564 break;
1565 }
1566 return video_usercopy(inode, filp, cmd, arg, ivtv_v4l2_do_ioctl);
1567}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
new file mode 100644
index 000000000000..cbccf7a9f65c
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -0,0 +1,28 @@
1/*
2 ioctl system call
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21u16 service2vbi(int type);
22void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
23u16 get_service_set(struct v4l2_sliced_vbi_format *fmt);
24int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
25 unsigned long arg);
26int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void *arg);
27void ivtv_set_osd_alpha(struct ivtv *itv);
28int ivtv_set_speed(struct ivtv *itv, int speed);
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
new file mode 100644
index 000000000000..c3a047b381b3
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -0,0 +1,838 @@
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-firmware.h"
23#include "ivtv-fileops.h"
24#include "ivtv-queue.h"
25#include "ivtv-udma.h"
26#include "ivtv-irq.h"
27#include "ivtv-ioctl.h"
28#include "ivtv-mailbox.h"
29#include "ivtv-vbi.h"
30#include "ivtv-yuv.h"
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
34#define SLICED_VBI_PIO 1
35
36static void ivtv_dma_dec_start(struct ivtv_stream *s);
37
38static const int ivtv_stream_map[] = {
39 IVTV_ENC_STREAM_TYPE_MPG,
40 IVTV_ENC_STREAM_TYPE_YUV,
41 IVTV_ENC_STREAM_TYPE_PCM,
42 IVTV_ENC_STREAM_TYPE_VBI,
43};
44
45static inline int ivtv_use_pio(struct ivtv_stream *s)
46{
47 struct ivtv *itv = s->itv;
48
49 return s->dma == PCI_DMA_NONE ||
50 (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set);
51}
52
53void ivtv_irq_work_handler(struct work_struct *work)
54{
55 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
56
57 DEFINE_WAIT(wait);
58
59 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
60 vbi_work_handler(itv);
61
62 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
63 ivtv_yuv_work_handler(itv);
64}
65
66/* Determine the required DMA size, setup enough buffers in the predma queue and
67 actually copy the data from the card to the buffers in case a PIO transfer is
68 required for this stream.
69 */
70static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
71{
72 struct ivtv *itv = s->itv;
73 struct ivtv_buffer *buf;
74 struct list_head *p;
75 u32 bytes_needed = 0;
76 u32 offset, size;
77 u32 UVoffset = 0, UVsize = 0;
78 int skip_bufs = s->q_predma.buffers;
79 int idx = s->SG_length;
80 int rc;
81
82 /* sanity checks */
83 if (s->v4l2dev == NULL) {
84 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
85 return -1;
86 }
87 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
88 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
89 return -1;
90 }
91
92 /* determine offset, size and PTS for the various streams */
93 switch (s->type) {
94 case IVTV_ENC_STREAM_TYPE_MPG:
95 offset = data[1];
96 size = data[2];
97 s->dma_pts = 0;
98 break;
99
100 case IVTV_ENC_STREAM_TYPE_YUV:
101 offset = data[1];
102 size = data[2];
103 UVoffset = data[3];
104 UVsize = data[4];
105 s->dma_pts = ((u64) data[5] << 32) | data[6];
106 break;
107
108 case IVTV_ENC_STREAM_TYPE_PCM:
109 offset = data[1] + 12;
110 size = data[2] - 12;
111 s->dma_pts = read_dec(offset - 8) |
112 ((u64)(read_dec(offset - 12)) << 32);
113 if (itv->has_cx23415)
114 offset += IVTV_DECODER_OFFSET;
115 break;
116
117 case IVTV_ENC_STREAM_TYPE_VBI:
118 size = itv->vbi.enc_size * itv->vbi.fpi;
119 offset = read_enc(itv->vbi.enc_start - 4) + 12;
120 if (offset == 12) {
121 IVTV_DEBUG_INFO("VBI offset == 0\n");
122 return -1;
123 }
124 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
125 break;
126
127 case IVTV_DEC_STREAM_TYPE_VBI:
128 size = read_dec(itv->vbi.dec_start + 4) + 8;
129 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
130 s->dma_pts = 0;
131 offset += IVTV_DECODER_OFFSET;
132 break;
133 default:
134 /* shouldn't happen */
135 return -1;
136 }
137
138 /* if this is the start of the DMA then fill in the magic cookie */
139 if (s->SG_length == 0) {
140 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
141 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
142 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
143 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
144 }
145 else {
146 s->dma_backup = read_enc(offset);
147 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
148 }
149 s->dma_offset = offset;
150 }
151
152 bytes_needed = size;
153 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
154 /* The size for the Y samples needs to be rounded upwards to a
155 multiple of the buf_size. The UV samples then start in the
156 next buffer. */
157 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
158 bytes_needed += UVsize;
159 }
160
161 IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
162 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
163
164 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
165 if (rc < 0) { /* Insufficient buffers */
166 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
167 bytes_needed, s->name);
168 return -1;
169 }
170 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
171 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
172 IVTV_WARN("Cause: the application is not reading fast enough.\n");
173 }
174 s->buffers_stolen = rc;
175
176 /* got the buffers, now fill in SGarray (DMA) or copy the data from the card
177 to the buffers (PIO). */
178 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
179 memset(buf->buf, 0, 128);
180 list_for_each(p, &s->q_predma.list) {
181 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
182
183 if (skip_bufs-- > 0)
184 continue;
185 if (!ivtv_use_pio(s)) {
186 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
187 s->SGarray[idx].src = cpu_to_le32(offset);
188 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
189 }
190 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
191
192 /* If PIO, then copy the data from the card to the buffer */
193 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
194 memcpy_fromio(buf->buf, itv->dec_mem + offset - IVTV_DECODER_OFFSET, buf->bytesused);
195 }
196 else if (ivtv_use_pio(s)) {
197 memcpy_fromio(buf->buf, itv->enc_mem + offset, buf->bytesused);
198 }
199
200 s->q_predma.bytesused += buf->bytesused;
201 size -= buf->bytesused;
202 offset += s->buf_size;
203
204 /* Sync SG buffers */
205 ivtv_buf_sync_for_device(s, buf);
206
207 if (size == 0) { /* YUV */
208 /* process the UV section */
209 offset = UVoffset;
210 size = UVsize;
211 }
212 idx++;
213 }
214 s->SG_length = idx;
215 return 0;
216}
217
218static void dma_post(struct ivtv_stream *s)
219{
220 struct ivtv *itv = s->itv;
221 struct ivtv_buffer *buf = NULL;
222 struct list_head *p;
223 u32 offset;
224 u32 *u32buf;
225 int x = 0;
226
227 if (ivtv_use_pio(s)) {
228 if (s->q_predma.bytesused)
229 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
230 s->SG_length = 0;
231 }
232 IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
233 s->name, s->dma_offset);
234 list_for_each(p, &s->q_dma.list) {
235 buf = list_entry(p, struct ivtv_buffer, list);
236 u32buf = (u32 *)buf->buf;
237
238 /* Sync Buffer */
239 ivtv_buf_sync_for_cpu(s, buf);
240
241 if (x == 0) {
242 offset = s->dma_last_offset;
243 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
244 {
245 for (offset = 0; offset < 64; offset++) {
246 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
247 break;
248 }
249 }
250 offset *= 4;
251 if (offset == 256) {
252 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
253 offset = s->dma_last_offset;
254 }
255 if (s->dma_last_offset != offset)
256 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
257 s->dma_last_offset = offset;
258 }
259 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
260 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
261 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
262 }
263 else {
264 write_enc_sync(0, s->dma_offset);
265 }
266 if (offset) {
267 buf->bytesused -= offset;
268 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
269 }
270 *u32buf = cpu_to_le32(s->dma_backup);
271 }
272 x++;
273 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
274 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
275 s->type == IVTV_ENC_STREAM_TYPE_VBI)
276 set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
277 }
278 if (buf)
279 buf->bytesused += s->dma_last_offset;
280 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
281 /* Parse and Groom VBI Data */
282 s->q_dma.bytesused -= buf->bytesused;
283 ivtv_process_vbi_data(itv, buf, 0, s->type);
284 s->q_dma.bytesused += buf->bytesused;
285 if (s->id == -1) {
286 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
287 return;
288 }
289 }
290 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
291 if (s->id != -1)
292 wake_up(&s->waitq);
293}
294
295void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
296{
297 struct ivtv *itv = s->itv;
298 struct ivtv_buffer *buf;
299 struct list_head *p;
300 u32 y_size = itv->params.height * itv->params.width;
301 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
302 int y_done = 0;
303 int bytes_written = 0;
304 unsigned long flags = 0;
305 int idx = 0;
306
307 IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
308 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
309 list_for_each(p, &s->q_predma.list) {
310 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
311
312 /* YUV UV Offset from Y Buffer */
313 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
314 offset = uv_offset;
315 y_done = 1;
316 }
317 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
318 s->SGarray[idx].dst = cpu_to_le32(offset);
319 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
320
321 offset += buf->bytesused;
322 bytes_written += buf->bytesused;
323
324 /* Sync SG buffers */
325 ivtv_buf_sync_for_device(s, buf);
326 idx++;
327 }
328 s->SG_length = idx;
329
330 /* Mark last buffer size for Interrupt flag */
331 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
332
333 /* Sync Hardware SG List of buffers */
334 ivtv_stream_sync_for_device(s);
335 if (lock)
336 spin_lock_irqsave(&itv->dma_reg_lock, flags);
337 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
338 ivtv_dma_dec_start(s);
339 }
340 else {
341 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
342 }
343 if (lock)
344 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
345}
346
347/* start the encoder DMA */
348static void ivtv_dma_enc_start(struct ivtv_stream *s)
349{
350 struct ivtv *itv = s->itv;
351 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
352 int i;
353
354 if (s->q_predma.bytesused)
355 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
356 IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
357 s->SGarray[s->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
358
359 /* If this is an MPEG stream, and VBI data is also pending, then append the
360 VBI DMA to the MPEG DMA and transfer both sets of data at once.
361
362 VBI DMA is a second class citizen compared to MPEG and mixing them together
363 will confuse the firmware (the end of a VBI DMA is seen as the end of a
364 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
365 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
366 use. This way no conflicts occur. */
367 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
368 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
369 s->SG_length + s_vbi->SG_length <= s->buffers) {
370 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
371 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
372 for (i = 0; i < s_vbi->SG_length; i++) {
373 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
374 }
375 itv->vbi.dma_offset = s_vbi->dma_offset;
376 s_vbi->SG_length = 0;
377 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
378 IVTV_DEBUG_DMA("include DMA for %s\n", s->name);
379 }
380
381 /* Mark last buffer size for Interrupt flag */
382 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
383
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
388 set_bit(IVTV_F_I_DMA, &itv->i_flags);
389 itv->cur_dma_stream = s->type;
390 itv->dma_timer.expires = jiffies + HZ / 10;
391 add_timer(&itv->dma_timer);
392}
393
394static void ivtv_dma_dec_start(struct ivtv_stream *s)
395{
396 struct ivtv *itv = s->itv;
397
398 if (s->q_predma.bytesused)
399 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
400 IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
401 /* put SG Handle into register 0x0c */
402 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
403 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
404 set_bit(IVTV_F_I_DMA, &itv->i_flags);
405 itv->cur_dma_stream = s->type;
406 itv->dma_timer.expires = jiffies + HZ / 10;
407 add_timer(&itv->dma_timer);
408}
409
410static void ivtv_irq_dma_read(struct ivtv *itv)
411{
412 struct ivtv_stream *s = NULL;
413 struct ivtv_buffer *buf;
414 int hw_stream_type;
415
416 IVTV_DEBUG_IRQ("DEC DMA READ\n");
417 del_timer(&itv->dma_timer);
418 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
419 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
420 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
421 }
422 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
423 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
424 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
425 hw_stream_type = 2;
426 }
427 else {
428 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
429 hw_stream_type = 0;
430 }
431 IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
432
433 ivtv_stream_sync_for_cpu(s);
434
435 /* For some reason must kick the firmware, like PIO mode,
436 I think this tells the firmware we are done and the size
437 of the xfer so it can calculate what we need next.
438 I think we can do this part ourselves but would have to
439 fully calculate xfer info ourselves and not use interrupts
440 */
441 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
442 hw_stream_type);
443
444 /* Free last DMA call */
445 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
446 ivtv_buf_sync_for_cpu(s, buf);
447 ivtv_enqueue(s, buf, &s->q_free);
448 }
449 wake_up(&s->waitq);
450 }
451 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
452 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
453 itv->cur_dma_stream = -1;
454 wake_up(&itv->dma_waitq);
455}
456
457static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
458{
459 u32 data[CX2341X_MBOX_MAX_DATA];
460 struct ivtv_stream *s;
461
462 del_timer(&itv->dma_timer);
463 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
464 IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
465 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
466 data[1] = 3;
467 else if (data[1] > 2)
468 return;
469 s = &itv->streams[ivtv_stream_map[data[1]]];
470 if (data[0] & 0x18) {
471 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
472 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
473 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
474 }
475 s->SG_length = 0;
476 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
477 itv->cur_dma_stream = -1;
478 dma_post(s);
479 ivtv_stream_sync_for_cpu(s);
480 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
481 u32 tmp;
482
483 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
484 tmp = s->dma_offset;
485 s->dma_offset = itv->vbi.dma_offset;
486 dma_post(s);
487 s->dma_offset = tmp;
488 }
489 wake_up(&itv->dma_waitq);
490}
491
492static void ivtv_irq_dma_err(struct ivtv *itv)
493{
494 u32 data[CX2341X_MBOX_MAX_DATA];
495
496 del_timer(&itv->dma_timer);
497 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
498 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
499 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
500 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
501 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
502 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
503
504 /* retry */
505 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
506 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
507 ivtv_dma_dec_start(s);
508 else
509 ivtv_dma_enc_start(s);
510 return;
511 }
512 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
513 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
514 itv->cur_dma_stream = -1;
515 wake_up(&itv->dma_waitq);
516}
517
518static void ivtv_irq_enc_start_cap(struct ivtv *itv)
519{
520 u32 data[CX2341X_MBOX_MAX_DATA];
521 struct ivtv_stream *s;
522
523 /* Get DMA destination and size arguments from card */
524 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
525 IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
526
527 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
528 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
529 data[0], data[1], data[2]);
530 return;
531 }
532 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
533 s = &itv->streams[ivtv_stream_map[data[0]]];
534 if (!stream_enc_dma_append(s, data)) {
535 if (ivtv_use_pio(s)) {
536 dma_post(s);
537 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[0]);
538 }
539 else {
540 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
541 }
542 }
543}
544
545static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
546{
547 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
548 u32 data[CX2341X_MBOX_MAX_DATA];
549 struct ivtv_stream *s;
550
551 IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
552 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
553
554 if (ivtv_use_pio(s)) {
555 if (stream_enc_dma_append(s, data))
556 return;
557 if (s->q_predma.bytesused)
558 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
559 s->SG_length = 0;
560 dma_post(s);
561 return;
562 }
563 /* If more than two VBI buffers are pending, then
564 clear the old ones and start with this new one.
565 This can happen during transition stages when MPEG capturing is
566 started, but the first interrupts haven't arrived yet. During
567 that period VBI requests can accumulate without being able to
568 DMA the data. Since at most four VBI DMA buffers are available,
569 we just drop the old requests when there are already three
570 requests queued. */
571 if (s->SG_length > 2) {
572 struct list_head *p;
573 list_for_each(p, &s->q_predma.list) {
574 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
575 ivtv_buf_sync_for_cpu(s, buf);
576 }
577 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
578 s->SG_length = 0;
579 }
580 /* if we can append the data, and the MPEG stream isn't capturing,
581 then start a DMA request for just the VBI data. */
582 if (!stream_enc_dma_append(s, data) &&
583 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
584 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
585 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
586 }
587}
588
589static void ivtv_irq_dev_vbi_reinsert(struct ivtv *itv)
590{
591 u32 data[CX2341X_MBOX_MAX_DATA];
592 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
593
594 IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
595 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
596 !stream_enc_dma_append(s, data)) {
597 dma_post(s);
598 }
599}
600
601static void ivtv_irq_dec_data_req(struct ivtv *itv)
602{
603 u32 data[CX2341X_MBOX_MAX_DATA];
604 struct ivtv_stream *s;
605
606 /* YUV or MPG */
607 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
608
609 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
610 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
611 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
612 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
613 }
614 else {
615 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
616 itv->dma_data_req_offset = data[1];
617 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
618 }
619 IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
620 itv->dma_data_req_offset, itv->dma_data_req_size);
621 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
622 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
623 }
624 else {
625 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
626 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
627 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
628 }
629}
630
631static void ivtv_irq_vsync(struct ivtv *itv)
632{
633 /* The vsync interrupt is unusual in that it won't clear until
634 * the end of the first line for the current field, at which
635 * point it clears itself. This can result in repeated vsync
636 * interrupts, or a missed vsync. Read some of the registers
637 * to determine the line being displayed and ensure we handle
638 * one vsync per frame.
639 */
640 unsigned int frame = read_reg(0x28c0) & 1;
641 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
642
643 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
644
645 if (((frame ^ itv->yuv_info.lace_sync_field) == 0 && ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.lace_sync_field)) ||
646 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
647 int next_dma_frame = last_dma_frame;
648
649 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
650 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
651 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
652 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
653 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
654 next_dma_frame = (next_dma_frame + 1) & 0x3;
655 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
656 }
657 }
658 if (frame != (itv->lastVsyncFrame & 1)) {
659 struct ivtv_stream *s = ivtv_get_output_stream(itv);
660 int work = 0;
661
662 itv->lastVsyncFrame += 1;
663 if (frame == 0) {
664 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
665 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
666 }
667 else {
668 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
669 }
670 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
671 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
672 wake_up(&itv->event_waitq);
673 }
674 wake_up(&itv->vsync_waitq);
675 if (s)
676 wake_up(&s->waitq);
677
678 /* Send VBI to saa7127 */
679 if (frame) {
680 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
681 work = 1;
682 }
683
684 /* Check if we need to update the yuv registers */
685 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
686 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
687 last_dma_frame = (last_dma_frame - 1) & 3;
688
689 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
690 itv->yuv_info.update_frame = last_dma_frame;
691 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
692 itv->yuv_info.yuv_forced_update = 0;
693 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
694 work = 1;
695 }
696 }
697 if (work)
698 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
699 }
700}
701
702#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
703
704irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
705{
706 struct ivtv *itv = (struct ivtv *)dev_id;
707 u32 combo;
708 u32 stat;
709 int i;
710 u8 vsync_force = 0;
711
712 spin_lock(&itv->dma_reg_lock);
713 /* get contents of irq status register */
714 stat = read_reg(IVTV_REG_IRQSTATUS);
715
716 combo = ~itv->irqmask & stat;
717
718 /* Clear out IRQ */
719 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
720
721 if (0 == combo) {
722 /* The vsync interrupt is unusual and clears itself. If we
723 * took too long, we may have missed it. Do some checks
724 */
725 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
726 /* vsync is enabled, see if we're in a new field */
727 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
728 /* New field, looks like we missed it */
729 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
730 vsync_force = 1;
731 }
732 }
733
734 if (!vsync_force) {
735 /* No Vsync expected, wasn't for us */
736 spin_unlock(&itv->dma_reg_lock);
737 return IRQ_NONE;
738 }
739 }
740
741 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
742 these messages */
743 if (combo & ~0xff6d0400)
744 IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
745
746 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
747 IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n");
748 }
749
750 if (combo & IVTV_IRQ_DMA_READ) {
751 ivtv_irq_dma_read(itv);
752 }
753
754 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
755 ivtv_irq_enc_dma_complete(itv);
756 }
757
758 if (combo & IVTV_IRQ_DMA_ERR) {
759 ivtv_irq_dma_err(itv);
760 }
761
762 if (combo & IVTV_IRQ_ENC_START_CAP) {
763 ivtv_irq_enc_start_cap(itv);
764 }
765
766 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
767 ivtv_irq_enc_vbi_cap(itv);
768 }
769
770 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
771 ivtv_irq_dev_vbi_reinsert(itv);
772 }
773
774 if (combo & IVTV_IRQ_ENC_EOS) {
775 IVTV_DEBUG_IRQ("ENC EOS\n");
776 set_bit(IVTV_F_I_EOS, &itv->i_flags);
777 wake_up(&itv->cap_w);
778 }
779
780 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
781 ivtv_irq_dec_data_req(itv);
782 }
783
784 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
785 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
786 ivtv_irq_vsync(itv);
787 }
788
789 if (combo & IVTV_IRQ_ENC_VIM_RST) {
790 IVTV_DEBUG_IRQ("VIM RST\n");
791 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
792 }
793
794 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
795 IVTV_DEBUG_INFO("Stereo mode changed\n");
796 }
797
798 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
799 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
800 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
801 struct ivtv_stream *s = &itv->streams[idx];
802
803 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
804 continue;
805 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
806 ivtv_dma_dec_start(s);
807 else
808 ivtv_dma_enc_start(s);
809 break;
810 }
811 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
812 ivtv_udma_start(itv);
813 }
814 }
815
816 spin_unlock(&itv->dma_reg_lock);
817
818 /* If we've just handled a 'forced' vsync, it's safest to say it
819 * wasn't ours. Another device may have triggered it at just
820 * the right time.
821 */
822 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
823}
824
825void ivtv_unfinished_dma(unsigned long arg)
826{
827 struct ivtv *itv = (struct ivtv *)arg;
828
829 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
830 return;
831 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
832
833 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
834 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
835 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
836 itv->cur_dma_stream = -1;
837 wake_up(&itv->dma_waitq);
838}
diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h
new file mode 100644
index 000000000000..a43348a30309
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-irq.h
@@ -0,0 +1,26 @@
1/*
2 interrupt handling
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
23
24void ivtv_irq_work_handler(struct work_struct *work);
25void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
26void ivtv_unfinished_dma(unsigned long arg);
diff --git a/drivers/media/video/ivtv/ivtv-mailbox.c b/drivers/media/video/ivtv/ivtv-mailbox.c
new file mode 100644
index 000000000000..6ae42a3b03cc
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-mailbox.c
@@ -0,0 +1,360 @@
1/*
2 mailbox functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <stdarg.h>
23
24#include "ivtv-driver.h"
25#include "ivtv-mailbox.h"
26
27/* Firmware mailbox flags*/
28#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
29#define IVTV_MBOX_DRIVER_DONE 0x00000002
30#define IVTV_MBOX_DRIVER_BUSY 0x00000001
31#define IVTV_MBOX_FREE 0x00000000
32
33/* Firmware mailbox standard timeout */
34#define IVTV_API_STD_TIMEOUT 0x02000000
35
36#define API_CACHE (1 << 0) /* Allow the command to be stored in the cache */
37#define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */
38#define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */
39#define API_DMA (1 << 3) /* DMA mailbox, has special handling */
40#define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */
41#define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */
42
43struct ivtv_api_info {
44 int flags; /* Flags, see above */
45 const char *name; /* The name of the command */
46};
47
48#define API_ENTRY(x, f) [x] = { (f), #x }
49
50static const struct ivtv_api_info api_info[256] = {
51 /* MPEG encoder API */
52 API_ENTRY(CX2341X_ENC_PING_FW, API_FAST_RESULT),
53 API_ENTRY(CX2341X_ENC_START_CAPTURE, API_RESULT),
54 API_ENTRY(CX2341X_ENC_STOP_CAPTURE, API_RESULT),
55 API_ENTRY(CX2341X_ENC_SET_AUDIO_ID, API_CACHE),
56 API_ENTRY(CX2341X_ENC_SET_VIDEO_ID, API_CACHE),
57 API_ENTRY(CX2341X_ENC_SET_PCR_ID, API_CACHE),
58 API_ENTRY(CX2341X_ENC_SET_FRAME_RATE, API_CACHE),
59 API_ENTRY(CX2341X_ENC_SET_FRAME_SIZE, API_CACHE),
60 API_ENTRY(CX2341X_ENC_SET_BIT_RATE, API_CACHE),
61 API_ENTRY(CX2341X_ENC_SET_GOP_PROPERTIES, API_CACHE),
62 API_ENTRY(CX2341X_ENC_SET_ASPECT_RATIO, API_CACHE),
63 API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_MODE, API_CACHE),
64 API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_PROPS, API_CACHE),
65 API_ENTRY(CX2341X_ENC_SET_CORING_LEVELS, API_CACHE),
66 API_ENTRY(CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, API_CACHE),
67 API_ENTRY(CX2341X_ENC_SET_VBI_LINE, API_RESULT),
68 API_ENTRY(CX2341X_ENC_SET_STREAM_TYPE, API_CACHE),
69 API_ENTRY(CX2341X_ENC_SET_OUTPUT_PORT, API_CACHE),
70 API_ENTRY(CX2341X_ENC_SET_AUDIO_PROPERTIES, API_CACHE),
71 API_ENTRY(CX2341X_ENC_HALT_FW, API_FAST_RESULT),
72 API_ENTRY(CX2341X_ENC_GET_VERSION, API_FAST_RESULT),
73 API_ENTRY(CX2341X_ENC_SET_GOP_CLOSURE, API_CACHE),
74 API_ENTRY(CX2341X_ENC_GET_SEQ_END, API_RESULT),
75 API_ENTRY(CX2341X_ENC_SET_PGM_INDEX_INFO, API_FAST_RESULT),
76 API_ENTRY(CX2341X_ENC_SET_VBI_CONFIG, API_RESULT),
77 API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE),
78 API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT),
79 API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT),
80 API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA),
81 API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT),
82 API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE),
83 API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT),
84 API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB),
85 API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE),
86 API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT),
87 API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE),
88 API_ENTRY(CX2341X_ENC_SET_PLACEHOLDER, API_CACHE),
89 API_ENTRY(CX2341X_ENC_MUTE_VIDEO, API_RESULT),
90 API_ENTRY(CX2341X_ENC_MUTE_AUDIO, API_RESULT),
91 API_ENTRY(CX2341X_ENC_SET_VERT_CROP_LINE, API_FAST_RESULT),
92 API_ENTRY(CX2341X_ENC_MISC, API_FAST_RESULT),
93 /* Obsolete PULLDOWN API command */
94 API_ENTRY(0xb1, API_CACHE),
95
96 /* MPEG decoder API */
97 API_ENTRY(CX2341X_DEC_PING_FW, API_FAST_RESULT),
98 API_ENTRY(CX2341X_DEC_START_PLAYBACK, API_RESULT),
99 API_ENTRY(CX2341X_DEC_STOP_PLAYBACK, API_RESULT),
100 API_ENTRY(CX2341X_DEC_SET_PLAYBACK_SPEED, API_RESULT),
101 API_ENTRY(CX2341X_DEC_STEP_VIDEO, API_RESULT),
102 API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE),
103 API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT),
104 API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT),
105 API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA),
106 API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT),
107 API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT),
108 API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE),
109 API_ENTRY(CX2341X_DEC_GET_VERSION, API_FAST_RESULT),
110 API_ENTRY(CX2341X_DEC_SET_STREAM_INPUT, API_CACHE),
111 API_ENTRY(CX2341X_DEC_GET_TIMING_INFO, API_RESULT /*| API_NO_WAIT_RES*/),
112 API_ENTRY(CX2341X_DEC_SET_AUDIO_MODE, API_CACHE),
113 API_ENTRY(CX2341X_DEC_SET_EVENT_NOTIFICATION, API_RESULT),
114 API_ENTRY(CX2341X_DEC_SET_DISPLAY_BUFFERS, API_CACHE),
115 API_ENTRY(CX2341X_DEC_EXTRACT_VBI, API_RESULT),
116 API_ENTRY(CX2341X_DEC_SET_DECODER_SOURCE, API_FAST_RESULT),
117 API_ENTRY(CX2341X_DEC_SET_PREBUFFERING, API_CACHE),
118
119 /* OSD API */
120 API_ENTRY(CX2341X_OSD_GET_FRAMEBUFFER, API_FAST_RESULT),
121 API_ENTRY(CX2341X_OSD_GET_PIXEL_FORMAT, API_FAST_RESULT),
122 API_ENTRY(CX2341X_OSD_SET_PIXEL_FORMAT, API_CACHE),
123 API_ENTRY(CX2341X_OSD_GET_STATE, API_FAST_RESULT),
124 API_ENTRY(CX2341X_OSD_SET_STATE, API_CACHE),
125 API_ENTRY(CX2341X_OSD_GET_OSD_COORDS, API_FAST_RESULT),
126 API_ENTRY(CX2341X_OSD_SET_OSD_COORDS, API_CACHE),
127 API_ENTRY(CX2341X_OSD_GET_SCREEN_COORDS, API_FAST_RESULT),
128 API_ENTRY(CX2341X_OSD_SET_SCREEN_COORDS, API_CACHE),
129 API_ENTRY(CX2341X_OSD_GET_GLOBAL_ALPHA, API_FAST_RESULT),
130 API_ENTRY(CX2341X_OSD_SET_GLOBAL_ALPHA, API_CACHE),
131 API_ENTRY(CX2341X_OSD_SET_BLEND_COORDS, API_CACHE),
132 API_ENTRY(CX2341X_OSD_GET_FLICKER_STATE, API_FAST_RESULT),
133 API_ENTRY(CX2341X_OSD_SET_FLICKER_STATE, API_CACHE),
134 API_ENTRY(CX2341X_OSD_BLT_COPY, API_RESULT),
135 API_ENTRY(CX2341X_OSD_BLT_FILL, API_RESULT),
136 API_ENTRY(CX2341X_OSD_BLT_TEXT, API_RESULT),
137 API_ENTRY(CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, API_CACHE),
138 API_ENTRY(CX2341X_OSD_SET_CHROMA_KEY, API_CACHE),
139 API_ENTRY(CX2341X_OSD_GET_ALPHA_CONTENT_INDEX, API_FAST_RESULT),
140 API_ENTRY(CX2341X_OSD_SET_ALPHA_CONTENT_INDEX, API_CACHE)
141};
142
143static int try_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int mb)
144{
145 u32 flags = readl(&mbdata->mbox[mb].flags);
146 int is_free = flags == IVTV_MBOX_FREE || (flags & IVTV_MBOX_FIRMWARE_DONE);
147
148 /* if the mailbox is free, then try to claim it */
149 if (is_free && !test_and_set_bit(mb, &mbdata->busy)) {
150 write_sync(IVTV_MBOX_DRIVER_BUSY, &mbdata->mbox[mb].flags);
151 return 1;
152 }
153 return 0;
154}
155
156/* Try to find a free mailbox. Note mailbox 0 is reserved for DMA and so is not
157 attempted here. */
158static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int flags)
159{
160 unsigned long then = jiffies;
161 int i, mb;
162 int max_mbox = mbdata->max_mbox;
163 int retries = 100;
164
165 /* All slow commands use the same mailbox, serializing them and also
166 leaving the other mailbox free for simple fast commands. */
167 if ((flags & API_FAST_RESULT) == API_RESULT)
168 max_mbox = 1;
169
170 /* find free non-DMA mailbox */
171 for (i = 0; i < retries; i++) {
172 for (mb = 1; mb <= max_mbox; mb++)
173 if (try_mailbox(itv, mbdata, mb))
174 return mb;
175
176 /* Sleep before a retry, if not atomic */
177 if (!(flags & API_NO_WAIT_MB)) {
178 if (jiffies - then > retries * HZ / 100)
179 break;
180 ivtv_sleep_timeout(HZ / 100, 0);
181 }
182 }
183 return -ENODEV;
184}
185
186static void write_mailbox(volatile struct ivtv_mailbox __iomem *mbox, int cmd, int args, u32 data[])
187{
188 int i;
189
190 write_sync(cmd, &mbox->cmd);
191 write_sync(IVTV_API_STD_TIMEOUT, &mbox->timeout);
192
193 for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
194 write_sync(data[i], &mbox->data[i]);
195
196 write_sync(IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_DRIVER_BUSY, &mbox->flags);
197}
198
199static void clear_all_mailboxes(struct ivtv *itv, struct ivtv_mailbox_data *mbdata)
200{
201 int i;
202
203 for (i = 0; i <= mbdata->max_mbox; i++) {
204 IVTV_DEBUG_WARN("Clearing mailbox %d: cmd 0x%08x flags 0x%08x\n",
205 i, readl(&mbdata->mbox[i].cmd), readl(&mbdata->mbox[i].flags));
206 write_sync(0, &mbdata->mbox[i].flags);
207 clear_bit(i, &mbdata->busy);
208 }
209}
210
211static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
212{
213 struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox;
214 volatile struct ivtv_mailbox __iomem *mbox;
215 int api_timeout = HZ;
216 int flags, mb, i;
217 unsigned long then;
218
219 /* sanity checks */
220 if (NULL == mbdata) {
221 IVTV_ERR("No mailbox allocated\n");
222 return -ENODEV;
223 }
224 if (args < 0 || args > CX2341X_MBOX_MAX_DATA ||
225 cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) {
226 IVTV_ERR("Invalid API call: cmd = 0x%02x, args = %d\n", cmd, args);
227 return -EINVAL;
228 }
229
230 IVTV_DEBUG_API("API Call: %s\n", api_info[cmd].name);
231
232 /* clear possibly uninitialized part of data array */
233 for (i = args; i < CX2341X_MBOX_MAX_DATA; i++)
234 data[i] = 0;
235
236 /* If this command was issued within the last 30 minutes and with identical
237 data, then just return 0 as there is no need to issue this command again.
238 Just an optimization to prevent unnecessary use of mailboxes. */
239 if (itv->api_cache[cmd].last_jiffies &&
240 jiffies - itv->api_cache[cmd].last_jiffies < HZ * 1800 &&
241 !memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) {
242 itv->api_cache[cmd].last_jiffies = jiffies;
243 return 0;
244 }
245
246 flags = api_info[cmd].flags;
247
248 if (flags & API_DMA) {
249 for (i = 0; i < 100; i++) {
250 mb = i % (mbdata->max_mbox + 1);
251 if (try_mailbox(itv, mbdata, mb)) {
252 write_mailbox(&mbdata->mbox[mb], cmd, args, data);
253 clear_bit(mb, &mbdata->busy);
254 return 0;
255 }
256 IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n",
257 api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags));
258 }
259 IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name);
260 clear_all_mailboxes(itv, mbdata);
261 return -EBUSY;
262 }
263
264 if ((flags & API_FAST_RESULT) == API_FAST_RESULT)
265 api_timeout = HZ / 10;
266
267 mb = get_mailbox(itv, mbdata, flags);
268 if (mb < 0) {
269 IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name);
270 clear_all_mailboxes(itv, mbdata);
271 return -EBUSY;
272 }
273 mbox = &mbdata->mbox[mb];
274 write_mailbox(mbox, cmd, args, data);
275 if (flags & API_CACHE) {
276 memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data));
277 itv->api_cache[cmd].last_jiffies = jiffies;
278 }
279 if ((flags & API_RESULT) == 0) {
280 clear_bit(mb, &mbdata->busy);
281 return 0;
282 }
283
284 /* Get results */
285 then = jiffies;
286
287 while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) {
288 if (jiffies - then > api_timeout) {
289 IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name);
290 /* reset the mailbox, but it is likely too late already */
291 write_sync(0, &mbox->flags);
292 clear_bit(mb, &mbdata->busy);
293 return -EIO;
294 }
295 if (flags & API_NO_WAIT_RES)
296 mdelay(1);
297 else
298 ivtv_sleep_timeout(HZ / 100, 0);
299 }
300 if (jiffies - then > HZ / 10)
301 IVTV_DEBUG_WARN("%s took %lu jiffies (%d per HZ)\n",
302 api_info[cmd].name, jiffies - then, HZ);
303
304 for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
305 data[i] = readl(&mbox->data[i]);
306 write_sync(0, &mbox->flags);
307 clear_bit(mb, &mbdata->busy);
308 return 0;
309}
310
311int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[])
312{
313 int res = ivtv_api_call(itv, cmd, args, data);
314
315 /* Allow a single retry, probably already too late though.
316 If there is no free mailbox then that is usually an indication
317 of a more serious problem. */
318 return (res == -EBUSY) ? ivtv_api_call(itv, cmd, args, data) : res;
319}
320
321int ivtv_api_func(void *priv, int cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
322{
323 return ivtv_api(priv, cmd, in, data);
324}
325
326int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...)
327{
328 va_list ap;
329 int i;
330
331 va_start(ap, args);
332 for (i = 0; i < args; i++) {
333 data[i] = va_arg(ap, u32);
334 }
335 va_end(ap);
336 return ivtv_api(itv, cmd, args, data);
337}
338
339int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...)
340{
341 u32 data[CX2341X_MBOX_MAX_DATA];
342 va_list ap;
343 int i;
344
345 va_start(ap, args);
346 for (i = 0; i < args; i++) {
347 data[i] = va_arg(ap, u32);
348 }
349 va_end(ap);
350 return ivtv_api(itv, cmd, args, data);
351}
352
353/* This one is for stuff that can't sleep.. irq handlers, etc.. */
354void ivtv_api_get_data(struct ivtv_mailbox_data *mbdata, int mb, u32 data[])
355{
356 int i;
357
358 for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
359 data[i] = readl(&mbdata->mbox[mb].data[i]);
360}
diff --git a/drivers/media/video/ivtv/ivtv-mailbox.h b/drivers/media/video/ivtv/ivtv-mailbox.h
new file mode 100644
index 000000000000..79b8aec14370
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-mailbox.h
@@ -0,0 +1,25 @@
1/*
2 mailbox functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21void ivtv_api_get_data(struct ivtv_mailbox_data *mbox, int mb, u32 data[]);
22int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[]);
23int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...);
24int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...);
25int ivtv_api_func(void *priv, int cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]);
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
new file mode 100644
index 000000000000..ccfcef1ad91a
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -0,0 +1,262 @@
1/*
2 buffer queues.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "ivtv-driver.h"
23#include "ivtv-streams.h"
24#include "ivtv-queue.h"
25#include "ivtv-mailbox.h"
26
27int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
28{
29 if (s->buf_size - buf->bytesused < copybytes)
30 copybytes = s->buf_size - buf->bytesused;
31 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
32 return -EFAULT;
33 }
34 buf->bytesused += copybytes;
35 return copybytes;
36}
37
38void ivtv_buf_swap(struct ivtv_buffer *buf)
39{
40 int i;
41
42 for (i = 0; i < buf->bytesused; i += 4)
43 swab32s((u32 *)(buf->buf + i));
44}
45
46void ivtv_queue_init(struct ivtv_queue *q)
47{
48 INIT_LIST_HEAD(&q->list);
49 q->buffers = 0;
50 q->length = 0;
51 q->bytesused = 0;
52}
53
54void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
55{
56 unsigned long flags = 0;
57
58 /* clear the buffer if it is going to be enqueued to the free queue */
59 if (q == &s->q_free) {
60 buf->bytesused = 0;
61 buf->readpos = 0;
62 buf->b_flags = 0;
63 }
64 spin_lock_irqsave(&s->qlock, flags);
65 list_add_tail(&buf->list, &q->list);
66 q->buffers++;
67 q->length += s->buf_size;
68 q->bytesused += buf->bytesused - buf->readpos;
69 spin_unlock_irqrestore(&s->qlock, flags);
70}
71
72struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
73{
74 struct ivtv_buffer *buf = NULL;
75 unsigned long flags = 0;
76
77 spin_lock_irqsave(&s->qlock, flags);
78 if (!list_empty(&q->list)) {
79 buf = list_entry(q->list.next, struct ivtv_buffer, list);
80 list_del_init(q->list.next);
81 q->buffers--;
82 q->length -= s->buf_size;
83 q->bytesused -= buf->bytesused - buf->readpos;
84 }
85 spin_unlock_irqrestore(&s->qlock, flags);
86 return buf;
87}
88
89static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
90 struct ivtv_queue *to, int clear, int full)
91{
92 struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
93
94 list_move_tail(from->list.next, &to->list);
95 from->buffers--;
96 from->length -= s->buf_size;
97 from->bytesused -= buf->bytesused - buf->readpos;
98 /* special handling for q_free */
99 if (clear)
100 buf->bytesused = buf->readpos = buf->b_flags = 0;
101 else if (full) {
102 /* special handling for stolen buffers, assume
103 all bytes are used. */
104 buf->bytesused = s->buf_size;
105 buf->readpos = buf->b_flags = 0;
106 }
107 to->buffers++;
108 to->length += s->buf_size;
109 to->bytesused += buf->bytesused - buf->readpos;
110}
111
112/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
113 If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
114 If 'steal' != NULL, then buffers may also taken from that queue if
115 needed.
116
117 The buffer is automatically cleared if it goes to the free queue. It is
118 also cleared if buffers need to be taken from the 'steal' queue and
119 the 'from' queue is the free queue.
120
121 When 'from' is q_free, then needed_bytes is compared to the total
122 available buffer length, otherwise needed_bytes is compared to the
123 bytesused value. For the 'steal' queue the total available buffer
124 length is always used.
125
126 -ENOMEM is returned if the buffers could not be obtained, 0 if all
127 buffers where obtained from the 'from' list and if non-zero then
128 the number of stolen buffers is returned. */
129int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
130 struct ivtv_queue *to, int needed_bytes)
131{
132 unsigned long flags;
133 int rc = 0;
134 int from_free = from == &s->q_free;
135 int to_free = to == &s->q_free;
136 int bytes_available;
137
138 spin_lock_irqsave(&s->qlock, flags);
139 if (needed_bytes == 0) {
140 from_free = 1;
141 needed_bytes = from->length;
142 }
143
144 bytes_available = from_free ? from->length : from->bytesused;
145 bytes_available += steal ? steal->length : 0;
146
147 if (bytes_available < needed_bytes) {
148 spin_unlock_irqrestore(&s->qlock, flags);
149 return -ENOMEM;
150 }
151 if (from_free) {
152 u32 old_length = to->length;
153
154 while (to->length - old_length < needed_bytes) {
155 if (list_empty(&from->list))
156 from = steal;
157 if (from == steal)
158 rc++; /* keep track of 'stolen' buffers */
159 ivtv_queue_move_buf(s, from, to, 1, 0);
160 }
161 }
162 else {
163 u32 old_bytesused = to->bytesused;
164
165 while (to->bytesused - old_bytesused < needed_bytes) {
166 if (list_empty(&from->list))
167 from = steal;
168 if (from == steal)
169 rc++; /* keep track of 'stolen' buffers */
170 ivtv_queue_move_buf(s, from, to, to_free, rc);
171 }
172 }
173 spin_unlock_irqrestore(&s->qlock, flags);
174 return rc;
175}
176
177void ivtv_flush_queues(struct ivtv_stream *s)
178{
179 ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
180 ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
181 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
182 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
183}
184
185int ivtv_stream_alloc(struct ivtv_stream *s)
186{
187 struct ivtv *itv = s->itv;
188 int SGsize = sizeof(struct ivtv_SG_element) * s->buffers;
189 int i;
190
191 if (s->buffers == 0)
192 return 0;
193
194 IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
195 s->dma != PCI_DMA_NONE ? "DMA " : "",
196 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
197
198 /* Allocate DMA SG Arrays */
199 if (s->dma != PCI_DMA_NONE) {
200 s->SGarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL);
201 if (s->SGarray == NULL) {
202 IVTV_ERR("Could not allocate SGarray for %s stream\n", s->name);
203 return -ENOMEM;
204 }
205 s->SG_length = 0;
206 s->SG_handle = pci_map_single(itv->dev, s->SGarray, SGsize, s->dma);
207 ivtv_stream_sync_for_cpu(s);
208 }
209
210 /* allocate stream buffers. Initially all buffers are in q_free. */
211 for (i = 0; i < s->buffers; i++) {
212 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer), GFP_KERNEL);
213
214 if (buf == NULL)
215 break;
216 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL);
217 if (buf->buf == NULL) {
218 kfree(buf);
219 break;
220 }
221 INIT_LIST_HEAD(&buf->list);
222 if (s->dma != PCI_DMA_NONE) {
223 buf->dma_handle = pci_map_single(s->itv->dev,
224 buf->buf, s->buf_size + 256, s->dma);
225 ivtv_buf_sync_for_cpu(s, buf);
226 }
227 ivtv_enqueue(s, buf, &s->q_free);
228 }
229 if (i == s->buffers)
230 return 0;
231 IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
232 ivtv_stream_free(s);
233 return -ENOMEM;
234}
235
236void ivtv_stream_free(struct ivtv_stream *s)
237{
238 struct ivtv_buffer *buf;
239
240 /* move all buffers to q_free */
241 ivtv_flush_queues(s);
242
243 /* empty q_free */
244 while ((buf = ivtv_dequeue(s, &s->q_free))) {
245 if (s->dma != PCI_DMA_NONE)
246 pci_unmap_single(s->itv->dev, buf->dma_handle,
247 s->buf_size + 256, s->dma);
248 kfree(buf->buf);
249 kfree(buf);
250 }
251
252 /* Free SG Array/Lists */
253 if (s->SGarray != NULL) {
254 if (s->SG_handle != IVTV_DMA_UNMAPPED) {
255 pci_unmap_single(s->itv->dev, s->SG_handle,
256 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
257 s->SG_handle = IVTV_DMA_UNMAPPED;
258 }
259 s->SGarray = NULL;
260 s->SG_length = 0;
261 }
262}
diff --git a/drivers/media/video/ivtv/ivtv-queue.h b/drivers/media/video/ivtv/ivtv-queue.h
new file mode 100644
index 000000000000..903edd4b4381
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-queue.h
@@ -0,0 +1,64 @@
1/*
2 buffer queues.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#define IVTV_DMA_UNMAPPED ((u32) -1)
23
24/* ivtv_buffer utility functions */
25static inline void ivtv_buf_sync_for_cpu(struct ivtv_stream *s, struct ivtv_buffer *buf)
26{
27 if (s->dma != PCI_DMA_NONE)
28 pci_dma_sync_single_for_cpu(s->itv->dev, buf->dma_handle,
29 s->buf_size + 256, s->dma);
30}
31
32static inline void ivtv_buf_sync_for_device(struct ivtv_stream *s, struct ivtv_buffer *buf)
33{
34 if (s->dma != PCI_DMA_NONE)
35 pci_dma_sync_single_for_device(s->itv->dev, buf->dma_handle,
36 s->buf_size + 256, s->dma);
37}
38
39int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes);
40void ivtv_buf_swap(struct ivtv_buffer *buf);
41
42/* ivtv_queue utility functions */
43void ivtv_queue_init(struct ivtv_queue *q);
44void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q);
45struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q);
46int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
47 struct ivtv_queue *to, int needed_bytes);
48void ivtv_flush_queues(struct ivtv_stream *s);
49
50/* ivtv_stream utility functions */
51int ivtv_stream_alloc(struct ivtv_stream *s);
52void ivtv_stream_free(struct ivtv_stream *s);
53
54static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s)
55{
56 pci_dma_sync_single_for_cpu(s->itv->dev, s->SG_handle,
57 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
58}
59
60static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s)
61{
62 pci_dma_sync_single_for_device(s->itv->dev, s->SG_handle,
63 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE);
64}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
new file mode 100644
index 000000000000..01a41a844a30
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -0,0 +1,977 @@
1/*
2 init/start/stop/exit stream functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/* License: GPL
23 * Author: Kevin Thayer <nufan_wfk at yahoo dot com>
24 *
25 * This file will hold API related functions, both internal (firmware api)
26 * and external (v4l2, etc)
27 *
28 * -----
29 * MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
30 * and Takeru KOMORIYA<komoriya@paken.org>
31 *
32 * AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org>
33 * using information provided by Jiun-Kuei Jung @ AVerMedia.
34 */
35
36#include "ivtv-driver.h"
37#include "ivtv-fileops.h"
38#include "ivtv-i2c.h"
39#include "ivtv-queue.h"
40#include "ivtv-mailbox.h"
41#include "ivtv-audio.h"
42#include "ivtv-video.h"
43#include "ivtv-vbi.h"
44#include "ivtv-ioctl.h"
45#include "ivtv-irq.h"
46#include "ivtv-streams.h"
47#include "ivtv-cards.h"
48
49static struct file_operations ivtv_v4l2_enc_fops = {
50 .owner = THIS_MODULE,
51 .read = ivtv_v4l2_read,
52 .write = ivtv_v4l2_write,
53 .open = ivtv_v4l2_open,
54 .ioctl = ivtv_v4l2_ioctl,
55 .release = ivtv_v4l2_close,
56 .poll = ivtv_v4l2_enc_poll,
57};
58
59static struct file_operations ivtv_v4l2_dec_fops = {
60 .owner = THIS_MODULE,
61 .read = ivtv_v4l2_read,
62 .write = ivtv_v4l2_write,
63 .open = ivtv_v4l2_open,
64 .ioctl = ivtv_v4l2_ioctl,
65 .release = ivtv_v4l2_close,
66 .poll = ivtv_v4l2_dec_poll,
67};
68
69static struct {
70 const char *name;
71 int vfl_type;
72 int minor_offset;
73 int dma, pio;
74 enum v4l2_buf_type buf_type;
75 struct file_operations *fops;
76} ivtv_stream_info[] = {
77 { /* IVTV_ENC_STREAM_TYPE_MPG */
78 "encoder MPEG",
79 VFL_TYPE_GRABBER, 0,
80 PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VIDEO_CAPTURE,
81 &ivtv_v4l2_enc_fops
82 },
83 { /* IVTV_ENC_STREAM_TYPE_YUV */
84 "encoder YUV",
85 VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET,
86 PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VIDEO_CAPTURE,
87 &ivtv_v4l2_enc_fops
88 },
89 { /* IVTV_ENC_STREAM_TYPE_VBI */
90 "encoder VBI",
91 VFL_TYPE_VBI, 0,
92 PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VBI_CAPTURE,
93 &ivtv_v4l2_enc_fops
94 },
95 { /* IVTV_ENC_STREAM_TYPE_PCM */
96 "encoder PCM audio",
97 VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET,
98 PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_PRIVATE,
99 &ivtv_v4l2_enc_fops
100 },
101 { /* IVTV_ENC_STREAM_TYPE_RAD */
102 "encoder radio",
103 VFL_TYPE_RADIO, 0,
104 PCI_DMA_NONE, 1, V4L2_BUF_TYPE_PRIVATE,
105 &ivtv_v4l2_enc_fops
106 },
107 { /* IVTV_DEC_STREAM_TYPE_MPG */
108 "decoder MPEG",
109 VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
110 PCI_DMA_TODEVICE, 0, V4L2_BUF_TYPE_VIDEO_OUTPUT,
111 &ivtv_v4l2_dec_fops
112 },
113 { /* IVTV_DEC_STREAM_TYPE_VBI */
114 "decoder VBI",
115 VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
116 PCI_DMA_NONE, 1, V4L2_BUF_TYPE_VBI_CAPTURE,
117 &ivtv_v4l2_enc_fops
118 },
119 { /* IVTV_DEC_STREAM_TYPE_VOUT */
120 "decoder VOUT",
121 VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
122 PCI_DMA_NONE, 1, V4L2_BUF_TYPE_VBI_OUTPUT,
123 &ivtv_v4l2_dec_fops
124 },
125 { /* IVTV_DEC_STREAM_TYPE_YUV */
126 "decoder YUV",
127 VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
128 PCI_DMA_TODEVICE, 0, V4L2_BUF_TYPE_VIDEO_OUTPUT,
129 &ivtv_v4l2_dec_fops
130 }
131};
132
133static void ivtv_stream_init(struct ivtv *itv, int type)
134{
135 struct ivtv_stream *s = &itv->streams[type];
136 struct video_device *dev = s->v4l2dev;
137
138 /* we need to keep v4l2dev, so restore it afterwards */
139 memset(s, 0, sizeof(*s));
140 s->v4l2dev = dev;
141
142 /* initialize ivtv_stream fields */
143 s->itv = itv;
144 s->type = type;
145 s->name = ivtv_stream_info[type].name;
146
147 if (ivtv_stream_info[type].pio)
148 s->dma = PCI_DMA_NONE;
149 else
150 s->dma = ivtv_stream_info[type].dma;
151 s->buf_size = itv->stream_buf_size[type];
152 if (s->buf_size)
153 s->buffers = itv->options.megabytes[type] * 1024 * 1024 / s->buf_size;
154 spin_lock_init(&s->qlock);
155 init_waitqueue_head(&s->waitq);
156 s->id = -1;
157 s->SG_handle = IVTV_DMA_UNMAPPED;
158 ivtv_queue_init(&s->q_free);
159 ivtv_queue_init(&s->q_full);
160 ivtv_queue_init(&s->q_dma);
161 ivtv_queue_init(&s->q_predma);
162 ivtv_queue_init(&s->q_io);
163}
164
165static int ivtv_reg_dev(struct ivtv *itv, int type)
166{
167 struct ivtv_stream *s = &itv->streams[type];
168 int vfl_type = ivtv_stream_info[type].vfl_type;
169 int minor_offset = ivtv_stream_info[type].minor_offset;
170 int minor;
171
172 /* These four fields are always initialized. If v4l2dev == NULL, then
173 this stream is not in use. In that case no other fields but these
174 four can be used. */
175 s->v4l2dev = NULL;
176 s->itv = itv;
177 s->type = type;
178 s->name = ivtv_stream_info[type].name;
179
180 /* Check whether the radio is supported */
181 if (type == IVTV_ENC_STREAM_TYPE_RAD && !(itv->v4l2_cap & V4L2_CAP_RADIO))
182 return 0;
183 if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
184 return 0;
185
186 if (minor_offset >= 0)
187 /* card number + user defined offset + device offset */
188 minor = itv->num + ivtv_first_minor + minor_offset;
189 else
190 minor = -1;
191
192 /* User explicitly selected 0 buffers for these streams, so don't
193 create them. */
194 if (minor >= 0 && ivtv_stream_info[type].dma != PCI_DMA_NONE &&
195 itv->options.megabytes[type] == 0) {
196 IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
197 return 0;
198 }
199
200 ivtv_stream_init(itv, type);
201
202 /* allocate and initialize the v4l2 video device structure */
203 s->v4l2dev = video_device_alloc();
204 if (s->v4l2dev == NULL) {
205 IVTV_ERR("Couldn't allocate v4l2 video_device for %s\n", s->name);
206 return -ENOMEM;
207 }
208
209 s->v4l2dev->type = VID_TYPE_CAPTURE | VID_TYPE_TUNER | VID_TYPE_TELETEXT |
210 VID_TYPE_CLIPPING | VID_TYPE_SCALES | VID_TYPE_MPEG_ENCODER;
211 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
212 s->v4l2dev->type |= VID_TYPE_MPEG_DECODER;
213 }
214 snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "ivtv%d %s",
215 itv->num, s->name);
216
217 s->v4l2dev->minor = minor;
218 s->v4l2dev->dev = &itv->dev->dev;
219 s->v4l2dev->fops = ivtv_stream_info[type].fops;
220 s->v4l2dev->release = video_device_release;
221
222 if (minor >= 0) {
223 /* Register device. First try the desired minor, then any free one. */
224 if (video_register_device(s->v4l2dev, vfl_type, minor) &&
225 video_register_device(s->v4l2dev, vfl_type, -1)) {
226 IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n",
227 s->name, minor);
228 video_device_release(s->v4l2dev);
229 s->v4l2dev = NULL;
230 return -ENOMEM;
231 }
232 }
233 else {
234 /* Don't register a 'hidden' stream (OSD) */
235 IVTV_INFO("Created framebuffer stream for %s\n", s->name);
236 return 0;
237 }
238
239 switch (vfl_type) {
240 case VFL_TYPE_GRABBER:
241 IVTV_INFO("Registered device video%d for %s (%d MB)\n",
242 s->v4l2dev->minor, s->name, itv->options.megabytes[type]);
243 break;
244 case VFL_TYPE_RADIO:
245 IVTV_INFO("Registered device radio%d for %s\n",
246 s->v4l2dev->minor - MINOR_VFL_TYPE_RADIO_MIN, s->name);
247 break;
248 case VFL_TYPE_VBI:
249 if (itv->options.megabytes[type])
250 IVTV_INFO("Registered device vbi%d for %s (%d MB)\n",
251 s->v4l2dev->minor - MINOR_VFL_TYPE_VBI_MIN,
252 s->name, itv->options.megabytes[type]);
253 else
254 IVTV_INFO("Registered device vbi%d for %s\n",
255 s->v4l2dev->minor - MINOR_VFL_TYPE_VBI_MIN, s->name);
256 break;
257 }
258 return 0;
259}
260
261/* Initialize v4l2 variables and register v4l2 devices */
262int ivtv_streams_setup(struct ivtv *itv)
263{
264 int type;
265
266 /* Setup V4L2 Devices */
267 for (type = 0; type < IVTV_MAX_STREAMS; type++) {
268 /* Register Device */
269 if (ivtv_reg_dev(itv, type))
270 break;
271
272 if (itv->streams[type].v4l2dev == NULL)
273 continue;
274
275 /* Allocate Stream */
276 if (ivtv_stream_alloc(&itv->streams[type]))
277 break;
278 }
279 if (type == IVTV_MAX_STREAMS) {
280 return 0;
281 }
282
283 /* One or more streams could not be initialized. Clean 'em all up. */
284 ivtv_streams_cleanup(itv);
285 return -ENOMEM;
286}
287
288/* Unregister v4l2 devices */
289void ivtv_streams_cleanup(struct ivtv *itv)
290{
291 int type;
292
293 /* Teardown all streams */
294 for (type = 0; type < IVTV_MAX_STREAMS; type++) {
295 struct video_device *vdev = itv->streams[type].v4l2dev;
296
297 itv->streams[type].v4l2dev = NULL;
298 if (vdev == NULL)
299 continue;
300
301 ivtv_stream_free(&itv->streams[type]);
302 /* Free Device */
303 if (vdev->minor == -1) /* 'Hidden' never registered stream (OSD) */
304 video_device_release(vdev);
305 else /* All others, just unregister. */
306 video_unregister_device(vdev);
307 }
308}
309
310static void ivtv_vbi_setup(struct ivtv *itv)
311{
312 int raw = itv->vbi.sliced_in->service_set == 0;
313 u32 data[CX2341X_MBOX_MAX_DATA];
314 int lines;
315 int i;
316
317 /* If Embed then streamtype must be Program */
318 /* TODO: should we really do this? */
319 if (0 && !raw && itv->vbi.insert_mpeg) {
320 itv->params.stream_type = 0;
321
322 /* assign stream type */
323 ivtv_vapi(itv, CX2341X_ENC_SET_STREAM_TYPE, 1, itv->params.stream_type);
324 }
325
326 /* Reset VBI */
327 ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
328
329 if (itv->is_60hz) {
330 itv->vbi.count = 12;
331 itv->vbi.start[0] = 10;
332 itv->vbi.start[1] = 273;
333 } else { /* PAL/SECAM */
334 itv->vbi.count = 18;
335 itv->vbi.start[0] = 6;
336 itv->vbi.start[1] = 318;
337 }
338
339 /* setup VBI registers */
340 itv->video_dec_func(itv, VIDIOC_S_FMT, &itv->vbi.in);
341
342 /* determine number of lines and total number of VBI bytes.
343 A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
344 The '- 1' byte is probably an unused U or V byte. Or something...
345 A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
346 header, 42 data bytes + checksum (to be confirmed) */
347 if (raw) {
348 lines = itv->vbi.count * 2;
349 } else {
350 lines = itv->is_60hz ? 24 : 38;
351 if (itv->is_60hz && (itv->hw_flags & IVTV_HW_CX25840))
352 lines += 2;
353 }
354
355 itv->vbi.enc_size = lines * (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
356
357 /* Note: sliced vs raw flag doesn't seem to have any effect
358 TODO: check mode (0x02) value with older ivtv versions. */
359 data[0] = raw | 0x02 | (0xbd << 8);
360
361 /* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */
362 data[1] = 1;
363 /* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */
364 data[2] = raw ? 4 : 8;
365 /* The start/stop codes determine which VBI lines end up in the raw VBI data area.
366 The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line
367 is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video)
368 code. These values for raw VBI are obtained from a driver disassembly. The sliced
369 start/stop codes was deduced from this, but they do not appear in the driver.
370 Other code pairs that I found are: 0x250E6249/0x13545454 and 0x25256262/0x38137F54.
371 However, I have no idea what these values are for. */
372 if (itv->hw_flags & IVTV_HW_CX25840) {
373 /* Setup VBI for the cx25840 digitizer */
374 if (raw) {
375 data[3] = 0x20602060;
376 data[4] = 0x30703070;
377 } else {
378 data[3] = 0xB0F0B0F0;
379 data[4] = 0xA0E0A0E0;
380 }
381 /* Lines per frame */
382 data[5] = lines;
383 /* bytes per line */
384 data[6] = (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
385 } else {
386 /* Setup VBI for the saa7115 digitizer */
387 if (raw) {
388 data[3] = 0x25256262;
389 data[4] = 0x387F7F7F;
390 } else {
391 data[3] = 0xABABECEC;
392 data[4] = 0xB6F1F1F1;
393 }
394 /* Lines per frame */
395 data[5] = lines;
396 /* bytes per line */
397 data[6] = itv->vbi.enc_size / lines;
398 }
399
400 IVTV_DEBUG_INFO(
401 "Setup VBI API header 0x%08x pkts %d buffs %d ln %d sz %d\n",
402 data[0], data[1], data[2], data[5], data[6]);
403
404 ivtv_api(itv, CX2341X_ENC_SET_VBI_CONFIG, 7, data);
405
406 /* returns the VBI encoder memory area. */
407 itv->vbi.enc_start = data[2];
408 itv->vbi.fpi = data[0];
409 if (!itv->vbi.fpi)
410 itv->vbi.fpi = 1;
411
412 IVTV_DEBUG_INFO("Setup VBI start 0x%08x frames %d fpi %d lines 0x%08x\n",
413 itv->vbi.enc_start, data[1], itv->vbi.fpi, itv->digitizer);
414
415 /* select VBI lines.
416 Note that the sliced argument seems to have no effect. */
417 for (i = 2; i <= 24; i++) {
418 int valid;
419
420 if (itv->is_60hz) {
421 valid = i >= 10 && i < 22;
422 } else {
423 valid = i >= 6 && i < 24;
424 }
425 ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, i - 1,
426 valid, 0 , 0, 0);
427 ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, (i - 1) | 0x80000000,
428 valid, 0, 0, 0);
429 }
430
431 /* Remaining VBI questions:
432 - Is it possible to select particular VBI lines only for inclusion in the MPEG
433 stream? Currently you can only get the first X lines.
434 - Is mixed raw and sliced VBI possible?
435 - What's the meaning of the raw/sliced flag?
436 - What's the meaning of params 2, 3 & 4 of the Select VBI command? */
437}
438
439int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
440{
441 u32 data[CX2341X_MBOX_MAX_DATA];
442 struct ivtv *itv = s->itv;
443 int captype = 0, subtype = 0;
444 int enable_passthrough = 0;
445
446 if (s->v4l2dev == NULL)
447 return -EINVAL;
448
449 IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
450
451 switch (s->type) {
452 case IVTV_ENC_STREAM_TYPE_MPG:
453 captype = 0;
454 subtype = 3;
455
456 /* Stop Passthrough */
457 if (itv->output_mode == OUT_PASSTHROUGH) {
458 ivtv_passthrough_mode(itv, 0);
459 enable_passthrough = 1;
460 }
461 itv->mpg_data_received = itv->vbi_data_inserted = 0;
462 itv->dualwatch_jiffies = jiffies;
463 itv->dualwatch_stereo_mode = itv->params.audio_properties & 0x0300;
464 itv->search_pack_header = 0;
465 break;
466
467 case IVTV_ENC_STREAM_TYPE_YUV:
468 if (itv->output_mode == OUT_PASSTHROUGH) {
469 captype = 2;
470 subtype = 11; /* video+audio+decoder */
471 break;
472 }
473 captype = 1;
474 subtype = 1;
475 break;
476 case IVTV_ENC_STREAM_TYPE_PCM:
477 captype = 1;
478 subtype = 2;
479 break;
480 case IVTV_ENC_STREAM_TYPE_VBI:
481 captype = 1;
482 subtype = 4;
483
484 itv->vbi.frame = 0;
485 itv->vbi.inserted_frame = 0;
486 memset(itv->vbi.sliced_mpeg_size,
487 0, sizeof(itv->vbi.sliced_mpeg_size));
488 break;
489 default:
490 return -EINVAL;
491 }
492 s->subtype = subtype;
493 s->buffers_stolen = 0;
494
495 /* mute/unmute video */
496 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? 1 : 0);
497
498 /* Clear Streamoff flags in case left from last capture */
499 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
500
501 if (atomic_read(&itv->capturing) == 0) {
502 /* Always use frame based mode. Experiments have demonstrated that byte
503 stream based mode results in dropped frames and corruption. Not often,
504 but occasionally. Many thanks go to Leonard Orb who spent a lot of
505 effort and time trying to trace the cause of the drop outs. */
506 /* 1 frame per DMA */
507 /*ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 128, 0); */
508 ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 1, 1);
509
510 /* Stuff from Windows, we don't know what it is */
511 ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, 0);
512 /* According to the docs, this should be correct. However, this is
513 untested. I don't dare enable this without having tested it.
514 Only very few old cards actually have this hardware combination.
515 ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1,
516 ((itv->hw_flags & IVTV_HW_SAA7114) && itv->is_60hz) ? 10001 : 0);
517 */
518 ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 3, !itv->has_cx23415);
519 ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 8, 0);
520 ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 4, 1);
521 ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
522
523 /* assign placeholder */
524 ivtv_vapi(itv, CX2341X_ENC_SET_PLACEHOLDER, 12,
525 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
526
527 ivtv_vapi(itv, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, itv->digitizer, itv->digitizer);
528
529 /* Setup VBI */
530 if (itv->v4l2_cap & V4L2_CAP_VBI_CAPTURE) {
531 ivtv_vbi_setup(itv);
532 }
533
534 /* assign program index info. Mask 7: select I/P/B, Num_req: 400 max */
535 ivtv_vapi_result(itv, data, CX2341X_ENC_SET_PGM_INDEX_INFO, 2, 7, 400);
536 itv->pgm_info_offset = data[0];
537 itv->pgm_info_num = data[1];
538 itv->pgm_info_write_idx = 0;
539 itv->pgm_info_read_idx = 0;
540
541 IVTV_DEBUG_INFO("PGM Index at 0x%08x with %d elements\n",
542 itv->pgm_info_offset, itv->pgm_info_num);
543
544 /* Setup API for Stream */
545 cx2341x_update(itv, ivtv_api_func, NULL, &itv->params);
546 }
547
548 /* Vsync Setup */
549 if (itv->has_cx23415 && !test_and_set_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
550 /* event notification (on) */
551 ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_ENC_VIM_RST, -1);
552 ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
553 }
554
555 if (atomic_read(&itv->capturing) == 0) {
556 /* Clear all Pending Interrupts */
557 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
558
559 clear_bit(IVTV_F_I_EOS, &itv->i_flags);
560
561 /* Initialize Digitizer for Capture */
562 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
563
564 ivtv_sleep_timeout(HZ / 10, 0);
565 }
566
567 /* begin_capture */
568 if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
569 {
570 IVTV_DEBUG_WARN( "Error starting capture!\n");
571 return -EINVAL;
572 }
573
574 /* Start Passthrough */
575 if (enable_passthrough) {
576 ivtv_passthrough_mode(itv, 1);
577 }
578
579 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
580 ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
581 else
582 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
583
584 /* you're live! sit back and await interrupts :) */
585 atomic_inc(&itv->capturing);
586 return 0;
587}
588
589static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
590{
591 u32 data[CX2341X_MBOX_MAX_DATA];
592 struct ivtv *itv = s->itv;
593 int datatype;
594
595 if (s->v4l2dev == NULL)
596 return -EINVAL;
597
598 IVTV_DEBUG_INFO("Setting some initial decoder settings\n");
599
600 /* disable VBI signals, if the MPEG stream contains VBI data,
601 then that data will be processed automatically for you. */
602 ivtv_disable_vbi(itv);
603
604 /* set audio mode to left/stereo for dual/stereo mode. */
605 ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
606
607 /* set number of internal decoder buffers */
608 ivtv_vapi(itv, CX2341X_DEC_SET_DISPLAY_BUFFERS, 1, 0);
609
610 /* prebuffering */
611 ivtv_vapi(itv, CX2341X_DEC_SET_PREBUFFERING, 1, 1);
612
613 /* extract from user packets */
614 ivtv_vapi_result(itv, data, CX2341X_DEC_EXTRACT_VBI, 1, 1);
615 itv->vbi.dec_start = data[0];
616
617 IVTV_DEBUG_INFO("Decoder VBI RE-Insert start 0x%08x size 0x%08x\n",
618 itv->vbi.dec_start, data[1]);
619
620 /* set decoder source settings */
621 /* Data type: 0 = mpeg from host,
622 1 = yuv from encoder,
623 2 = yuv_from_host */
624 switch (s->type) {
625 case IVTV_DEC_STREAM_TYPE_YUV:
626 datatype = itv->output_mode == OUT_PASSTHROUGH ? 1 : 2;
627 IVTV_DEBUG_INFO("Setup DEC YUV Stream data[0] = %d\n", datatype);
628 break;
629 case IVTV_DEC_STREAM_TYPE_MPG:
630 default:
631 datatype = 0;
632 break;
633 }
634 if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
635 itv->params.width, itv->params.height, itv->params.audio_properties)) {
636 IVTV_DEBUG_WARN("COULDN'T INITIALIZE DECODER SOURCE\n");
637 }
638 return 0;
639}
640
641int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset)
642{
643 struct ivtv *itv = s->itv;
644
645 if (s->v4l2dev == NULL)
646 return -EINVAL;
647
648 if (test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags))
649 return 0; /* already started */
650
651 IVTV_DEBUG_INFO("Starting decode stream %s (gop_offset %d)\n", s->name, gop_offset);
652
653 /* Clear Streamoff */
654 if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
655 /* Initialize Decoder */
656 /* Reprogram Decoder YUV Buffers for YUV */
657 write_reg(yuv_offset[0] >> 4, 0x82c);
658 write_reg((yuv_offset[0] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
659 write_reg(yuv_offset[0] >> 4, 0x834);
660 write_reg((yuv_offset[0] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
661
662 write_reg_sync(0x00000000 | (0x0c << 16) | (0x0b << 8), 0x2d24);
663
664 write_reg_sync(0x00108080, 0x2898);
665 /* Enable YUV decoder output */
666 write_reg_sync(0x01, IVTV_REG_VDM);
667 }
668
669 ivtv_setup_v4l2_decode_stream(s);
670
671 /* set dma size to 65536 bytes */
672 ivtv_vapi(itv, CX2341X_DEC_SET_DMA_BLOCK_SIZE, 1, 65536);
673
674 clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
675
676 /* Zero out decoder counters */
677 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[0]);
678 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[1]);
679 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[2]);
680 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[3]);
681 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[0]);
682 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[1]);
683 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[2]);
684 writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[3]);
685
686 /* turn on notification of dual/stereo mode change */
687 ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
688
689 /* start playback */
690 ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, gop_offset, 0);
691
692 /* Clear the following Interrupt mask bits for decoding */
693 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
694 IVTV_DEBUG_IRQ("IRQ Mask is now: 0x%08x\n", itv->irqmask);
695
696 /* you're live! sit back and await interrupts :) */
697 atomic_inc(&itv->decoding);
698 return 0;
699}
700
701void ivtv_stop_all_captures(struct ivtv *itv)
702{
703 int i;
704
705 for (i = IVTV_MAX_STREAMS - 1; i >= 0; i--) {
706 struct ivtv_stream *s = &itv->streams[i];
707
708 if (s->v4l2dev == NULL)
709 continue;
710 if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
711 ivtv_stop_v4l2_encode_stream(s, 0);
712 }
713 }
714}
715
716int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
717{
718 struct ivtv *itv = s->itv;
719 DECLARE_WAITQUEUE(wait, current);
720 int cap_type;
721 unsigned long then;
722 int stopmode;
723 u32 data[CX2341X_MBOX_MAX_DATA];
724
725 if (s->v4l2dev == NULL)
726 return -EINVAL;
727
728 /* This function assumes that you are allowed to stop the capture
729 and that we are actually capturing */
730
731 IVTV_DEBUG_INFO("Stop Capture\n");
732
733 if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
734 return 0;
735 if (atomic_read(&itv->capturing) == 0)
736 return 0;
737
738 switch (s->type) {
739 case IVTV_ENC_STREAM_TYPE_YUV:
740 cap_type = 1;
741 break;
742 case IVTV_ENC_STREAM_TYPE_PCM:
743 cap_type = 1;
744 break;
745 case IVTV_ENC_STREAM_TYPE_VBI:
746 cap_type = 1;
747 break;
748 case IVTV_ENC_STREAM_TYPE_MPG:
749 default:
750 cap_type = 0;
751 break;
752 }
753
754 /* Stop Capture Mode */
755 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
756 stopmode = 0;
757 } else {
758 stopmode = 1;
759 }
760
761 /* end_capture */
762 /* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
763 ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
764
765 /* only run these if we're shutting down the last cap */
766 if (atomic_read(&itv->capturing) - 1 == 0) {
767 /* event notification (off) */
768 if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
769 /* type: 0 = refresh */
770 /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
771 ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
772 ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
773 }
774 }
775
776 then = jiffies;
777
778 if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
779 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
780 /* only run these if we're shutting down the last cap */
781 unsigned long duration;
782
783 then = jiffies;
784 add_wait_queue(&itv->cap_w, &wait);
785
786 set_current_state(TASK_INTERRUPTIBLE);
787
788 /* wait 2s for EOS interrupt */
789 while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && jiffies < then + 2 * HZ) {
790 schedule_timeout(HZ / 100);
791 }
792
793 /* To convert jiffies to ms, we must multiply by 1000
794 * and divide by HZ. To avoid runtime division, we
795 * convert this to multiplication by 1000/HZ.
796 * Since integer division truncates, we get the best
797 * accuracy if we do a rounding calculation of the constant.
798 * Think of the case where HZ is 1024.
799 */
800 duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);
801
802 if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
803 IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
804 IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
805 } else {
806 IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
807 }
808 set_current_state(TASK_RUNNING);
809 remove_wait_queue(&itv->cap_w, &wait);
810 }
811
812 then = jiffies;
813 /* Make sure DMA is complete */
814 add_wait_queue(&s->waitq, &wait);
815 set_current_state(TASK_INTERRUPTIBLE);
816 do {
817 /* check if DMA is pending */
818 if ((s->type == IVTV_ENC_STREAM_TYPE_MPG) && /* MPG Only */
819 (read_reg(IVTV_REG_DMASTATUS) & 0x02)) {
820 /* Check for last DMA */
821 ivtv_vapi_result(itv, data, CX2341X_ENC_GET_SEQ_END, 2, 0, 0);
822
823 if (data[0] == 1) {
824 IVTV_DEBUG_DMA("%s: Last DMA of size 0x%08x\n", s->name, data[1]);
825 break;
826 }
827 } else if (read_reg(IVTV_REG_DMASTATUS) & 0x02) {
828 break;
829 }
830
831 ivtv_sleep_timeout(HZ / 100, 1);
832 } while (then + HZ * 2 > jiffies);
833
834 set_current_state(TASK_RUNNING);
835 remove_wait_queue(&s->waitq, &wait);
836 }
837
838 atomic_dec(&itv->capturing);
839
840 /* Clear capture and no-read bits */
841 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
842
843 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
844 ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
845
846 if (atomic_read(&itv->capturing) > 0) {
847 return 0;
848 }
849
850 /* Set the following Interrupt mask bits for capture */
851 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
852
853 wake_up(&s->waitq);
854
855 return 0;
856}
857
858int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
859{
860 struct ivtv *itv = s->itv;
861
862 if (s->v4l2dev == NULL)
863 return -EINVAL;
864
865 if (s->type != IVTV_DEC_STREAM_TYPE_YUV && s->type != IVTV_DEC_STREAM_TYPE_MPG)
866 return -EINVAL;
867
868 if (!test_bit(IVTV_F_S_STREAMING, &s->s_flags))
869 return 0;
870
871 IVTV_DEBUG_INFO("Stop Decode at %llu, flags: %x\n", pts, flags);
872
873 /* Stop Decoder */
874 if (!(flags & VIDEO_CMD_STOP_IMMEDIATELY) || pts) {
875 u32 tmp = 0;
876
877 /* Wait until the decoder is no longer running */
878 if (pts) {
879 ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3,
880 0, (u32)(pts & 0xffffffff), (u32)(pts >> 32));
881 }
882 while (1) {
883 u32 data[CX2341X_MBOX_MAX_DATA];
884 ivtv_vapi_result(itv, data, CX2341X_DEC_GET_XFER_INFO, 0);
885 if (s->q_full.buffers + s->q_dma.buffers == 0) {
886 if (tmp == data[3])
887 break;
888 tmp = data[3];
889 }
890 if (ivtv_sleep_timeout(HZ/10, 1))
891 break;
892 }
893 }
894 ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, flags & VIDEO_CMD_STOP_TO_BLACK, 0, 0);
895
896 /* turn off notification of dual/stereo mode change */
897 ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
898
899 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
900
901 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
902 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
903 ivtv_flush_queues(s);
904
905 if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
906 /* disable VBI on TV-out */
907 ivtv_disable_vbi(itv);
908 }
909
910 /* decrement decoding */
911 atomic_dec(&itv->decoding);
912
913 set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
914 wake_up(&itv->event_waitq);
915
916 /* wake up wait queues */
917 wake_up(&s->waitq);
918
919 return 0;
920}
921
922int ivtv_passthrough_mode(struct ivtv *itv, int enable)
923{
924 struct ivtv_stream *yuv_stream = &itv->streams[IVTV_ENC_STREAM_TYPE_YUV];
925 struct ivtv_stream *dec_stream = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
926
927 if (yuv_stream->v4l2dev == NULL || dec_stream->v4l2dev == NULL)
928 return -EINVAL;
929
930 IVTV_DEBUG_INFO("ivtv ioctl: Select passthrough mode\n");
931
932 /* Prevent others from starting/stopping streams while we
933 initiate/terminate passthrough mode */
934 if (enable) {
935 if (itv->output_mode == OUT_PASSTHROUGH) {
936 return 0;
937 }
938 if (ivtv_set_output_mode(itv, OUT_PASSTHROUGH) != OUT_PASSTHROUGH)
939 return -EBUSY;
940
941 /* Fully initialize stream, and then unflag init */
942 set_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
943 set_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
944
945 /* Setup YUV Decoder */
946 ivtv_setup_v4l2_decode_stream(dec_stream);
947
948 /* Start Decoder */
949 ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1);
950 atomic_inc(&itv->decoding);
951
952 /* Setup capture if not already done */
953 if (atomic_read(&itv->capturing) == 0) {
954 cx2341x_update(itv, ivtv_api_func, NULL, &itv->params);
955 }
956
957 /* Start Passthrough Mode */
958 ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, 2, 11);
959 atomic_inc(&itv->capturing);
960 return 0;
961 }
962
963 if (itv->output_mode != OUT_PASSTHROUGH)
964 return 0;
965
966 /* Stop Passthrough Mode */
967 ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 11);
968 ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, 1, 0, 0);
969
970 atomic_dec(&itv->capturing);
971 atomic_dec(&itv->decoding);
972 clear_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
973 clear_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
974 itv->output_mode = OUT_NONE;
975
976 return 0;
977}
diff --git a/drivers/media/video/ivtv/ivtv-streams.h b/drivers/media/video/ivtv/ivtv-streams.h
new file mode 100644
index 000000000000..8597b75384a7
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-streams.h
@@ -0,0 +1,31 @@
1/*
2 init/start/stop/exit stream functions
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21int ivtv_streams_setup(struct ivtv *itv);
22void ivtv_streams_cleanup(struct ivtv *itv);
23
24/* Capture related */
25int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s);
26int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end);
27int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset);
28int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts);
29
30void ivtv_stop_all_captures(struct ivtv *itv);
31int ivtv_passthrough_mode(struct ivtv *itv, int enable);
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
new file mode 100644
index 000000000000..bd642e1aafc3
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -0,0 +1,200 @@
1/*
2 User DMA
3
4 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
5 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
6 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include "ivtv-driver.h"
24#include "ivtv-streams.h"
25#include "ivtv-udma.h"
26
27void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
28{
29 dma_page->uaddr = first & PAGE_MASK;
30 dma_page->offset = first & ~PAGE_MASK;
31 dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
32 dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
33 dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
34 dma_page->page_count = dma_page->last - dma_page->first + 1;
35 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
36}
37
38int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
39{
40 int i, offset;
41
42 offset = dma_page->offset;
43
44 /* Fill SG Array with new values */
45 for (i = 0; i < dma_page->page_count; i++) {
46 if (i == dma_page->page_count - 1) {
47 dma->SGlist[map_offset].length = dma_page->tail;
48 }
49 else {
50 dma->SGlist[map_offset].length = PAGE_SIZE - offset;
51 }
52 dma->SGlist[map_offset].offset = offset;
53 dma->SGlist[map_offset].page = dma->map[map_offset];
54 offset = 0;
55 map_offset++;
56 }
57 return map_offset;
58}
59
60void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
61 int i;
62 struct scatterlist *sg;
63
64 for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) {
65 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
66 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
67 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
68 buffer_offset += sg_dma_len(sg);
69
70 split -= sg_dma_len(sg);
71 if (split == 0)
72 buffer_offset = buffer_offset_2;
73 }
74}
75
76/* User DMA Buffers */
77void ivtv_udma_alloc(struct ivtv *itv)
78{
79 if (itv->udma.SG_handle == 0) {
80 /* Map DMA Page Array Buffer */
81 itv->udma.SG_handle = pci_map_single(itv->dev, itv->udma.SGarray,
82 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
83 ivtv_udma_sync_for_cpu(itv);
84 }
85}
86
87int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
88 void __user *userbuf, int size_in_bytes)
89{
90 struct ivtv_dma_page_info user_dma;
91 struct ivtv_user_dma *dma = &itv->udma;
92 int err;
93
94 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
95
96 /* Still in USE */
97 if (dma->SG_length || dma->page_count) {
98 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
99 dma->SG_length, dma->page_count);
100 return -EBUSY;
101 }
102
103 ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
104
105 if (user_dma.page_count <= 0) {
106 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
107 user_dma.page_count, size_in_bytes, user_dma.offset);
108 return -EINVAL;
109 }
110
111 /* Get user pages for DMA Xfer */
112 down_read(&current->mm->mmap_sem);
113 err = get_user_pages(current, current->mm,
114 user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
115 up_read(&current->mm->mmap_sem);
116
117 if (user_dma.page_count != err) {
118 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
119 err, user_dma.page_count);
120 return -EINVAL;
121 }
122
123 dma->page_count = user_dma.page_count;
124
125 /* Fill SG List with new values */
126 ivtv_udma_fill_sg_list(dma, &user_dma, 0);
127
128 /* Map SG List */
129 dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
130
131 /* Fill SG Array with new values */
132 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
133
134 /* Tag SG Array with Interrupt Bit */
135 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
136
137 ivtv_udma_sync_for_device(itv);
138 return dma->page_count;
139}
140
141void ivtv_udma_unmap(struct ivtv *itv)
142{
143 struct ivtv_user_dma *dma = &itv->udma;
144 int i;
145
146 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
147
148 /* Nothing to free */
149 if (dma->page_count == 0)
150 return;
151
152 /* Unmap Scatterlist */
153 if (dma->SG_length) {
154 pci_unmap_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
155 dma->SG_length = 0;
156 }
157 /* sync DMA */
158 ivtv_udma_sync_for_cpu(itv);
159
160 /* Release User Pages */
161 for (i = 0; i < dma->page_count; i++) {
162 put_page(dma->map[i]);
163 }
164 dma->page_count = 0;
165}
166
167void ivtv_udma_free(struct ivtv *itv)
168{
169 /* Unmap SG Array */
170 if (itv->udma.SG_handle) {
171 pci_unmap_single(itv->dev, itv->udma.SG_handle,
172 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
173 }
174
175 /* Unmap Scatterlist */
176 if (itv->udma.SG_length) {
177 pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
178 }
179}
180
181void ivtv_udma_start(struct ivtv *itv)
182{
183 IVTV_DEBUG_DMA("start UDMA\n");
184 write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
185 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
186 set_bit(IVTV_F_I_DMA, &itv->i_flags);
187 set_bit(IVTV_F_I_UDMA, &itv->i_flags);
188}
189
190void ivtv_udma_prepare(struct ivtv *itv)
191{
192 unsigned long flags;
193
194 spin_lock_irqsave(&itv->dma_reg_lock, flags);
195 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
196 ivtv_udma_start(itv);
197 else
198 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
199 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
200}
diff --git a/drivers/media/video/ivtv/ivtv-udma.h b/drivers/media/video/ivtv/ivtv-udma.h
new file mode 100644
index 000000000000..e131bccedec0
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-udma.h
@@ -0,0 +1,43 @@
1/*
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2006-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* User DMA functions */
22void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size);
23int ivtv_udma_fill_sg_list(struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset);
24void ivtv_udma_fill_sg_array(struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split);
25int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
26 void __user *userbuf, int size_in_bytes);
27void ivtv_udma_unmap(struct ivtv *itv);
28void ivtv_udma_free(struct ivtv *itv);
29void ivtv_udma_alloc(struct ivtv *itv);
30void ivtv_udma_prepare(struct ivtv *itv);
31void ivtv_udma_start(struct ivtv *itv);
32
33static inline void ivtv_udma_sync_for_device(struct ivtv *itv)
34{
35 pci_dma_sync_single_for_device((struct pci_dev *)itv->dev, itv->udma.SG_handle,
36 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
37}
38
39static inline void ivtv_udma_sync_for_cpu(struct ivtv *itv)
40{
41 pci_dma_sync_single_for_cpu((struct pci_dev *)itv->dev, itv->udma.SG_handle,
42 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
43}
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
new file mode 100644
index 000000000000..5efa5a867818
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -0,0 +1,538 @@
1/*
2 Vertical Blank Interval support functions
3 Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include "ivtv-driver.h"
21#include "ivtv-video.h"
22#include "ivtv-vbi.h"
23#include "ivtv-ioctl.h"
24#include "ivtv-queue.h"
25
26static int odd_parity(u8 c)
27{
28 c ^= (c >> 4);
29 c ^= (c >> 2);
30 c ^= (c >> 1);
31
32 return c & 1;
33}
34
35static void passthrough_vbi_data(struct ivtv *itv, int cnt)
36{
37 int wss = 0;
38 u8 cc[4] = { 0x80, 0x80, 0x80, 0x80 };
39 u8 vps[13];
40 int found_cc = 0;
41 int found_wss = 0;
42 int found_vps = 0;
43 int cc_pos = itv->vbi.cc_pos;
44 int i;
45
46 for (i = 0; i < cnt; i++) {
47 struct v4l2_sliced_vbi_data *d = itv->vbi.sliced_dec_data + i;
48
49 if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) {
50 found_cc = 1;
51 if (d->field) {
52 cc[2] = d->data[0];
53 cc[3] = d->data[1];
54 } else {
55 cc[0] = d->data[0];
56 cc[1] = d->data[1];
57 }
58 }
59 else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) {
60 memcpy(vps, d->data, sizeof(vps));
61 found_vps = 1;
62 }
63 else if (d->id == V4L2_SLICED_WSS_625 && d->line == 23 && d->field == 0) {
64 wss = d->data[0] | d->data[1] << 8;
65 found_wss = 1;
66 }
67 }
68
69 if (itv->vbi.wss_found != found_wss || itv->vbi.wss != wss) {
70 itv->vbi.wss = wss;
71 itv->vbi.wss_found = found_wss;
72 set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
73 }
74
75 if (found_vps || itv->vbi.vps_found) {
76 itv->vbi.vps[0] = vps[2];
77 itv->vbi.vps[1] = vps[8];
78 itv->vbi.vps[2] = vps[9];
79 itv->vbi.vps[3] = vps[10];
80 itv->vbi.vps[4] = vps[11];
81 itv->vbi.vps_found = found_vps;
82 set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
83 }
84
85 if (found_cc && cc_pos < sizeof(itv->vbi.cc_data_even)) {
86 itv->vbi.cc_data_odd[cc_pos] = cc[0];
87 itv->vbi.cc_data_odd[cc_pos + 1] = cc[1];
88 itv->vbi.cc_data_even[cc_pos] = cc[2];
89 itv->vbi.cc_data_even[cc_pos + 1] = cc[3];
90 itv->vbi.cc_pos = cc_pos + 2;
91 set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
92 }
93}
94
95static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
96{
97 int line = 0;
98 int i;
99 u32 linemask[2] = { 0, 0 };
100 unsigned short size;
101 static const u8 mpeg_hdr_data[] = {
102 0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
103 0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
104 0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
105 0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
106 };
107 const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
108 int idx = itv->vbi.frame % IVTV_VBI_FRAMES;
109 u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0];
110
111 for (i = 0; i < lines; i++) {
112 int f, l;
113
114 if (itv->vbi.sliced_data[i].id == 0)
115 continue;
116
117 l = itv->vbi.sliced_data[i].line - 6;
118 f = itv->vbi.sliced_data[i].field;
119 if (f)
120 l += 18;
121 if (l < 32)
122 linemask[0] |= (1 << l);
123 else
124 linemask[1] |= (1 << (l - 32));
125 dst[sd + 12 + line * 43] = service2vbi(itv->vbi.sliced_data[i].id);
126 memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
127 line++;
128 }
129 memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
130 if (line == 36) {
131 /* All lines are used, so there is no space for the linemask
132 (the max size of the VBI data is 36 * 43 + 4 bytes).
133 So in this case we use the magic number 'ITV0'. */
134 memcpy(dst + sd, "ITV0", 4);
135 memcpy(dst + sd + 4, dst + sd + 12, line * 43);
136 size = 4 + ((43 * line + 3) & ~3);
137 } else {
138 memcpy(dst + sd, "itv0", 4);
139 memcpy(dst + sd + 4, &linemask[0], 8);
140 size = 12 + ((43 * line + 3) & ~3);
141 }
142 dst[4+16] = (size + 10) >> 8;
143 dst[5+16] = (size + 10) & 0xff;
144 dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
145 dst[10+16] = (pts_stamp >> 22) & 0xff;
146 dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
147 dst[12+16] = (pts_stamp >> 7) & 0xff;
148 dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
149 itv->vbi.sliced_mpeg_size[idx] = sd + size;
150}
151
152static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
153{
154 u32 linemask[2];
155 int i, l, id2;
156 int line = 0;
157
158 if (!memcmp(p, "itv0", 4)) {
159 memcpy(linemask, p + 4, 8);
160 p += 12;
161 } else if (!memcmp(p, "ITV0", 4)) {
162 linemask[0] = 0xffffffff;
163 linemask[1] = 0xf;
164 p += 4;
165 } else {
166 /* unknown VBI data stream */
167 return 0;
168 }
169 for (i = 0; i < 36; i++) {
170 int err = 0;
171
172 if (i < 32 && !(linemask[0] & (1 << i)))
173 continue;
174 if (i >= 32 && !(linemask[1] & (1 << (i - 32))))
175 continue;
176 id2 = *p & 0xf;
177 switch (id2) {
178 case IVTV_SLICED_TYPE_TELETEXT_B:
179 id2 = V4L2_SLICED_TELETEXT_B;
180 break;
181 case IVTV_SLICED_TYPE_CAPTION_525:
182 id2 = V4L2_SLICED_CAPTION_525;
183 err = !odd_parity(p[1]) || !odd_parity(p[2]);
184 break;
185 case IVTV_SLICED_TYPE_VPS:
186 id2 = V4L2_SLICED_VPS;
187 break;
188 case IVTV_SLICED_TYPE_WSS_625:
189 id2 = V4L2_SLICED_WSS_625;
190 break;
191 default:
192 id2 = 0;
193 break;
194 }
195 if (err == 0) {
196 l = (i < 18) ? i + 6 : i - 18 + 6;
197 itv->vbi.sliced_dec_data[line].line = l;
198 itv->vbi.sliced_dec_data[line].field = i >= 18;
199 itv->vbi.sliced_dec_data[line].id = id2;
200 memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
201 line++;
202 }
203 p += 43;
204 }
205 while (line < 36) {
206 itv->vbi.sliced_dec_data[line].id = 0;
207 itv->vbi.sliced_dec_data[line].line = 0;
208 itv->vbi.sliced_dec_data[line].field = 0;
209 line++;
210 }
211 return line * sizeof(itv->vbi.sliced_dec_data[0]);
212}
213
214ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count)
215{
216 /* Should be a __user pointer, but sparse doesn't parse this bit correctly. */
217 const struct v4l2_sliced_vbi_data *p = (const struct v4l2_sliced_vbi_data *)ubuf;
218 u8 cc[4] = { 0x80, 0x80, 0x80, 0x80 };
219 int found_cc = 0;
220 int cc_pos = itv->vbi.cc_pos;
221
222 if (itv->vbi.service_set_out == 0)
223 return -EPERM;
224
225 while (count >= sizeof(struct v4l2_sliced_vbi_data)) {
226 switch (p->id) {
227 case V4L2_SLICED_CAPTION_525:
228 if (p->id == V4L2_SLICED_CAPTION_525 &&
229 p->line == 21 &&
230 (itv->vbi.service_set_out &
231 V4L2_SLICED_CAPTION_525) == 0) {
232 break;
233 }
234 found_cc = 1;
235 if (p->field) {
236 cc[2] = p->data[0];
237 cc[3] = p->data[1];
238 } else {
239 cc[0] = p->data[0];
240 cc[1] = p->data[1];
241 }
242 break;
243
244 case V4L2_SLICED_VPS:
245 if (p->line == 16 && p->field == 0 &&
246 (itv->vbi.service_set_out & V4L2_SLICED_VPS)) {
247 itv->vbi.vps[0] = p->data[2];
248 itv->vbi.vps[1] = p->data[8];
249 itv->vbi.vps[2] = p->data[9];
250 itv->vbi.vps[3] = p->data[10];
251 itv->vbi.vps[4] = p->data[11];
252 itv->vbi.vps_found = 1;
253 set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
254 }
255 break;
256
257 case V4L2_SLICED_WSS_625:
258 if (p->line == 23 && p->field == 0 &&
259 (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) {
260 /* No lock needed for WSS */
261 itv->vbi.wss = p->data[0] | (p->data[1] << 8);
262 itv->vbi.wss_found = 1;
263 set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
264 }
265 break;
266
267 default:
268 break;
269 }
270 count -= sizeof(*p);
271 p++;
272 }
273
274 if (found_cc && cc_pos < sizeof(itv->vbi.cc_data_even)) {
275 itv->vbi.cc_data_odd[cc_pos] = cc[0];
276 itv->vbi.cc_data_odd[cc_pos + 1] = cc[1];
277 itv->vbi.cc_data_even[cc_pos] = cc[2];
278 itv->vbi.cc_data_even[cc_pos + 1] = cc[3];
279 itv->vbi.cc_pos = cc_pos + 2;
280 set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
281 }
282
283 return (const char __user *)p - ubuf;
284}
285
286/* Compress raw VBI format, removes leading SAV codes and surplus space after the
287 field.
288 Returns new compressed size. */
289static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
290{
291 u32 line_size = itv->vbi.raw_decoder_line_size;
292 u32 lines = itv->vbi.count;
293 u8 sav1 = itv->vbi.raw_decoder_sav_odd_field;
294 u8 sav2 = itv->vbi.raw_decoder_sav_even_field;
295 u8 *q = buf;
296 u8 *p;
297 int i;
298
299 for (i = 0; i < lines; i++) {
300 p = buf + i * line_size;
301
302 /* Look for SAV code */
303 if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
304 break;
305 }
306 memcpy(q, p + 4, line_size - 4);
307 q += line_size - 4;
308 }
309 return lines * (line_size - 4);
310}
311
312
313/* Compressed VBI format, all found sliced blocks put next to one another
314 Returns new compressed size */
315static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
316{
317 u32 line_size = itv->vbi.sliced_decoder_line_size;
318 struct v4l2_decode_vbi_line vbi;
319 int i;
320
321 /* find the first valid line */
322 for (i = 0; i < size; i++, buf++) {
323 if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
324 break;
325 }
326
327 size -= i;
328 if (size < line_size) {
329 return line;
330 }
331 for (i = 0; i < size / line_size; i++) {
332 u8 *p = buf + i * line_size;
333
334 /* Look for SAV code */
335 if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
336 continue;
337 }
338 vbi.p = p + 4;
339 itv->video_dec_func(itv, VIDIOC_INT_DECODE_VBI_LINE, &vbi);
340 if (vbi.type) {
341 itv->vbi.sliced_data[line].id = vbi.type;
342 itv->vbi.sliced_data[line].field = vbi.is_second_field;
343 itv->vbi.sliced_data[line].line = vbi.line;
344 memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
345 line++;
346 }
347 }
348 return line;
349}
350
351void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
352 u64 pts_stamp, int streamtype)
353{
354 u8 *p = (u8 *) buf->buf;
355 u32 size = buf->bytesused;
356 int y;
357
358 /* Raw VBI data */
359 if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set == 0) {
360 u8 type;
361
362 ivtv_buf_swap(buf);
363
364 type = p[3];
365
366 size = buf->bytesused = compress_raw_buf(itv, p, size);
367
368 /* second field of the frame? */
369 if (type == itv->vbi.raw_decoder_sav_even_field) {
370 /* Dirty hack needed for backwards
371 compatibility of old VBI software. */
372 p += size - 4;
373 memcpy(p, &itv->vbi.frame, 4);
374 itv->vbi.frame++;
375 }
376 return;
377 }
378
379 /* Sliced VBI data with data insertion */
380 if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) {
381 int lines;
382
383 ivtv_buf_swap(buf);
384
385 /* first field */
386 lines = compress_sliced_buf(itv, 0, p, size / 2,
387 itv->vbi.sliced_decoder_sav_odd_field);
388 /* second field */
389 /* experimentation shows that the second half does not always begin
390 at the exact address. So start a bit earlier (hence 32). */
391 lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
392 itv->vbi.sliced_decoder_sav_even_field);
393 /* always return at least one empty line */
394 if (lines == 0) {
395 itv->vbi.sliced_data[0].id = 0;
396 itv->vbi.sliced_data[0].line = 0;
397 itv->vbi.sliced_data[0].field = 0;
398 lines = 1;
399 }
400 buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]);
401 memcpy(p, &itv->vbi.sliced_data[0], size);
402
403 if (itv->vbi.insert_mpeg) {
404 copy_vbi_data(itv, lines, pts_stamp);
405 }
406 itv->vbi.frame++;
407 return;
408 }
409
410 /* Sliced VBI re-inserted from an MPEG stream */
411 if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
412 /* If the size is not 4-byte aligned, then the starting address
413 for the swapping is also shifted. After swapping the data the
414 real start address of the VBI data is exactly 4 bytes after the
415 original start. It's a bit fiddly but it works like a charm.
416 Non-4-byte alignment happens when an lseek is done on the input
417 mpeg file to a non-4-byte aligned position. So on arrival here
418 the VBI data is also non-4-byte aligned. */
419 int offset = size & 3;
420 int cnt;
421
422 if (offset) {
423 p += 4 - offset;
424 }
425 /* Swap Buffer */
426 for (y = 0; y < size; y += 4) {
427 swab32s((u32 *)(p + y));
428 }
429
430 cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
431 memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt);
432 buf->bytesused = cnt;
433
434 passthrough_vbi_data(itv, cnt / sizeof(itv->vbi.sliced_dec_data[0]));
435 return;
436 }
437}
438
439void ivtv_disable_vbi(struct ivtv *itv)
440{
441 clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
442 clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
443 clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
444 ivtv_set_wss(itv, 0, 0);
445 ivtv_set_cc(itv, 0, 0, 0, 0, 0);
446 ivtv_set_vps(itv, 0, 0, 0, 0, 0, 0);
447 itv->vbi.vps_found = itv->vbi.wss_found = 0;
448 itv->vbi.wss = 0;
449 itv->vbi.cc_pos = 0;
450}
451
452
453void vbi_work_handler(struct ivtv *itv)
454{
455 struct v4l2_sliced_vbi_data data;
456
457 /* Lock */
458 if (itv->output_mode == OUT_PASSTHROUGH) {
459 /* Note: currently only the saa7115 is used in a PVR350,
460 so these commands are for now saa7115 specific. */
461 if (itv->is_50hz) {
462 data.id = V4L2_SLICED_WSS_625;
463 data.field = 0;
464
465 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) {
466 ivtv_set_wss(itv, 1, data.data[0] & 0xf);
467 itv->vbi.wss_no_update = 0;
468 } else if (itv->vbi.wss_no_update == 4) {
469 ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */
470 } else {
471 itv->vbi.wss_no_update++;
472 }
473 }
474 else {
475 u8 c1 = 0, c2 = 0, c3 = 0, c4 = 0;
476 int mode = 0;
477
478 data.id = V4L2_SLICED_CAPTION_525;
479 data.field = 0;
480 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) {
481 mode |= 1;
482 c1 = data.data[0];
483 c2 = data.data[1];
484 }
485 data.field = 1;
486 if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) {
487 mode |= 2;
488 c3 = data.data[0];
489 c4 = data.data[1];
490 }
491 if (mode) {
492 itv->vbi.cc_no_update = 0;
493 ivtv_set_cc(itv, mode, c1, c2, c3, c4);
494 } else if (itv->vbi.cc_no_update == 4) {
495 ivtv_set_cc(itv, 0, 0, 0, 0, 0);
496 } else {
497 itv->vbi.cc_no_update++;
498 }
499 }
500 return;
501 }
502
503 if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) {
504 /* Lock */
505 ivtv_set_wss(itv, itv->vbi.wss_found, itv->vbi.wss & 0xf);
506 }
507
508 if (test_and_clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) {
509 if (itv->vbi.cc_pos == 0) {
510 ivtv_set_cc(itv, 3, 0x80, 0x80, 0x80, 0x80);
511 }
512 while (itv->vbi.cc_pos) {
513 u8 cc_odd0 = itv->vbi.cc_data_odd[0];
514 u8 cc_odd1 = itv->vbi.cc_data_odd[1];
515 u8 cc_even0 = itv->vbi.cc_data_even[0];
516 u8 cc_even1 = itv->vbi.cc_data_even[1];
517
518 memcpy(itv->vbi.cc_data_odd, itv->vbi.cc_data_odd + 2, sizeof(itv->vbi.cc_data_odd) - 2);
519 memcpy(itv->vbi.cc_data_even, itv->vbi.cc_data_even + 2, sizeof(itv->vbi.cc_data_even) - 2);
520 itv->vbi.cc_pos -= 2;
521 if (itv->vbi.cc_pos && cc_odd0 == 0x80 && cc_odd1 == 0x80)
522 continue;
523
524 /* Send to Saa7127 */
525 ivtv_set_cc(itv, 3, cc_odd0, cc_odd1, cc_even0, cc_even1);
526 if (itv->vbi.cc_pos == 0)
527 set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
528 break;
529 }
530 }
531
532 if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) {
533 /* Lock */
534 ivtv_set_vps(itv, itv->vbi.vps_found,
535 itv->vbi.vps[0], itv->vbi.vps[1],
536 itv->vbi.vps[2], itv->vbi.vps[3], itv->vbi.vps[4]);
537 }
538}
diff --git a/drivers/media/video/ivtv/ivtv-vbi.h b/drivers/media/video/ivtv/ivtv-vbi.h
new file mode 100644
index 000000000000..cdaea697b3ec
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-vbi.h
@@ -0,0 +1,26 @@
1/*
2 Vertical Blank Interval support functions
3 Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count);
21void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
22 u64 pts_stamp, int streamtype);
23int ivtv_used_line(struct ivtv *itv, int line, int field);
24void ivtv_disable_vbi(struct ivtv *itv);
25void ivtv_set_vbi(unsigned long arg);
26void vbi_work_handler(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
new file mode 100644
index 000000000000..85530a3cd369
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -0,0 +1,26 @@
1/*
2 ivtv driver version information
3 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#define IVTV_DRIVER_NAME "ivtv"
21#define IVTV_DRIVER_VERSION_MAJOR 1
22#define IVTV_DRIVER_VERSION_MINOR 0
23#define IVTV_DRIVER_VERSION_PATCHLEVEL 0
24
25#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
26#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
diff --git a/drivers/media/video/ivtv/ivtv-video.c b/drivers/media/video/ivtv/ivtv-video.c
new file mode 100644
index 000000000000..5858b197d510
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-video.c
@@ -0,0 +1,142 @@
1/*
2 saa7127 interface functions
3 Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include "ivtv-driver.h"
21#include "ivtv-video.h"
22#include "ivtv-i2c.h"
23#include "ivtv-gpio.h"
24#include "ivtv-cards.h"
25#include <media/upd64031a.h>
26#include <media/upd64083.h>
27
28void ivtv_set_vps(struct ivtv *itv, int enabled, u8 vps1, u8 vps2, u8 vps3,
29 u8 vps4, u8 vps5)
30{
31 struct v4l2_sliced_vbi_data data;
32
33 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
34 return;
35 data.id = V4L2_SLICED_VPS;
36 data.field = 0;
37 data.line = enabled ? 16 : 0;
38 data.data[4] = vps1;
39 data.data[10] = vps2;
40 data.data[11] = vps3;
41 data.data[12] = vps4;
42 data.data[13] = vps5;
43 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data);
44}
45
46void ivtv_set_cc(struct ivtv *itv, int mode, u8 cc1, u8 cc2, u8 cc3, u8 cc4)
47{
48 struct v4l2_sliced_vbi_data data;
49
50 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
51 return;
52 data.id = V4L2_SLICED_CAPTION_525;
53 data.field = 0;
54 data.line = (mode & 1) ? 21 : 0;
55 data.data[0] = cc1;
56 data.data[1] = cc2;
57 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data);
58 data.field = 1;
59 data.line = (mode & 2) ? 21 : 0;
60 data.data[0] = cc3;
61 data.data[1] = cc4;
62 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data);
63}
64
65void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
66{
67 struct v4l2_sliced_vbi_data data;
68
69 if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
70 return;
71 /* When using a 50 Hz system, always turn on the
72 wide screen signal with 4x3 ratio as the default.
73 Turning this signal on and off can confuse certain
74 TVs. As far as I can tell there is no reason not to
75 transmit this signal. */
76 if ((itv->std & V4L2_STD_625_50) && !enabled) {
77 enabled = 1;
78 mode = 0x08; /* 4x3 full format */
79 }
80 data.id = V4L2_SLICED_WSS_625;
81 data.field = 0;
82 data.line = enabled ? 23 : 0;
83 data.data[0] = mode & 0xff;
84 data.data[1] = (mode >> 8) & 0xff;
85 ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data);
86}
87
88void ivtv_video_set_io(struct ivtv *itv)
89{
90 struct v4l2_routing route;
91 int inp = itv->active_input;
92 u32 type;
93
94 route.input = itv->card->video_inputs[inp].video_input;
95 route.output = 0;
96 itv->video_dec_func(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route);
97
98 type = itv->card->video_inputs[inp].video_type;
99
100 if (type == IVTV_CARD_INPUT_VID_TUNER) {
101 route.input = 0; /* Tuner */
102 } else if (type < IVTV_CARD_INPUT_COMPOSITE1) {
103 route.input = 2; /* S-Video */
104 } else {
105 route.input = 1; /* Composite */
106 }
107
108 if (itv->card->hw_video & IVTV_HW_GPIO)
109 ivtv_gpio(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route);
110
111 if (itv->card->hw_video & IVTV_HW_UPD64031A) {
112 if (type == IVTV_CARD_INPUT_VID_TUNER ||
113 type >= IVTV_CARD_INPUT_COMPOSITE1) {
114 /* Composite: GR on, connect to 3DYCS */
115 route.input = UPD64031A_GR_ON | UPD64031A_3DYCS_COMPOSITE;
116 } else {
117 /* S-Video: GR bypassed, turn it off */
118 route.input = UPD64031A_GR_OFF | UPD64031A_3DYCS_DISABLE;
119 }
120 route.input |= itv->card->gr_config;
121
122 ivtv_upd64031a(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route);
123 }
124
125 if (itv->card->hw_video & IVTV_HW_UPD6408X) {
126 route.input = UPD64083_YCS_MODE;
127 if (type > IVTV_CARD_INPUT_VID_TUNER &&
128 type < IVTV_CARD_INPUT_COMPOSITE1) {
129 /* S-Video uses YCNR mode and internal Y-ADC, the upd64031a
130 is not used. */
131 route.input |= UPD64083_YCNR_MODE;
132 }
133 else if (itv->card->hw_video & IVTV_HW_UPD64031A) {
134 /* Use upd64031a output for tuner and composite(CX23416GYC only) inputs */
135 if ((type == IVTV_CARD_INPUT_VID_TUNER)||
136 (itv->card->type == IVTV_CARD_CX23416GYC)) {
137 route.input |= UPD64083_EXT_Y_ADC;
138 }
139 }
140 ivtv_upd64083(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route);
141 }
142}
diff --git a/drivers/media/video/ivtv/ivtv-video.h b/drivers/media/video/ivtv/ivtv-video.h
new file mode 100644
index 000000000000..c8ade5d3c413
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-video.h
@@ -0,0 +1,24 @@
1/*
2 saa7127 interface functions
3 Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20void ivtv_set_wss(struct ivtv *itv, int enabled, int mode);
21void ivtv_set_cc(struct ivtv *itv, int mode, u8 cc1, u8 cc2, u8 cc3, u8 cc4);
22void ivtv_set_vps(struct ivtv *itv, int enabled, u8 vps1, u8 vps2, u8 vps3,
23 u8 vps4, u8 vps5);
24void ivtv_video_set_io(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
new file mode 100644
index 000000000000..bcea09542e5a
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -0,0 +1,1129 @@
1/*
2 yuv support
3
4 Copyright (C) 2007 Ian Armstrong <ian@iarmst.demon.co.uk>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
25#include "ivtv-yuv.h"
26
27static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
28 struct ivtv_dma_frame *args)
29{
30 struct ivtv_dma_page_info y_dma;
31 struct ivtv_dma_page_info uv_dma;
32
33 int i;
34 int y_pages, uv_pages;
35
36 unsigned long y_buffer_offset, uv_buffer_offset;
37 int y_decode_height, uv_decode_height, y_size;
38 int frame = atomic_read(&itv->yuv_info.next_fill_frame);
39
40 y_buffer_offset = IVTV_DEC_MEM_START + yuv_offset[frame];
41 uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;
42
43 y_decode_height = uv_decode_height = args->src.height + args->src.top;
44
45 if (y_decode_height < 512-16)
46 y_buffer_offset += 720 * 16;
47
48 if (y_decode_height & 15)
49 y_decode_height = (y_decode_height + 16) & ~15;
50
51 if (uv_decode_height & 31)
52 uv_decode_height = (uv_decode_height + 32) & ~31;
53
54 y_size = 720 * y_decode_height;
55
56 /* Still in USE */
57 if (dma->SG_length || dma->page_count) {
58 IVTV_DEBUG_WARN("prep_user_dma: SG_length %d page_count %d still full?\n",
59 dma->SG_length, dma->page_count);
60 return -EBUSY;
61 }
62
63 ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
64 ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
65
66 /* Get user pages for DMA Xfer */
67 down_read(&current->mm->mmap_sem);
68 y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
69 uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL);
70 up_read(&current->mm->mmap_sem);
71
72 dma->page_count = y_dma.page_count + uv_dma.page_count;
73
74 if (y_pages + uv_pages != dma->page_count) {
75 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
76 y_pages + uv_pages, dma->page_count);
77
78 for (i = 0; i < dma->page_count; i++) {
79 put_page(dma->map[i]);
80 }
81 dma->page_count = 0;
82 return -EINVAL;
83 }
84
85 /* Fill & map SG List */
86 ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0));
87 dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
88
89 /* Fill SG Array with new values */
90 ivtv_udma_fill_sg_array (dma, y_buffer_offset, uv_buffer_offset, y_size);
91
92 /* If we've offset the y plane, ensure top area is blanked */
93 if (args->src.height + args->src.top < 512-16) {
94 if (itv->yuv_info.blanking_dmaptr) {
95 dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
96 dma->SGarray[dma->SG_length].src = cpu_to_le32(itv->yuv_info.blanking_dmaptr);
97 dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DEC_MEM_START + yuv_offset[frame]);
98 dma->SG_length++;
99 }
100 }
101
102 /* Tag SG Array with Interrupt Bit */
103 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
104
105 ivtv_udma_sync_for_device(itv);
106 return 0;
107}
108
109/* We rely on a table held in the firmware - Quick check. */
110int ivtv_yuv_filter_check(struct ivtv *itv)
111{
112 int i, offset_y, offset_uv;
113
114 for (i=0, offset_y = 16, offset_uv = 4; i<16; i++, offset_y += 24, offset_uv += 12) {
115 if ((read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + offset_y) != i << 16) ||
116 (read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + offset_uv) != i << 16)) {
117 IVTV_WARN ("YUV filter table not found in firmware.\n");
118 return -1;
119 }
120 }
121 return 0;
122}
123
124static void ivtv_yuv_filter(struct ivtv *itv, int h_filter, int v_filter_1, int v_filter_2)
125{
126 int filter_index, filter_line;
127
128 /* If any filter is -1, then don't update it */
129 if (h_filter > -1) {
130 if (h_filter > 4) h_filter = 4;
131 filter_index = h_filter * 384;
132 filter_line = 0;
133 while (filter_line < 16) {
134 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02804);
135 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0281c);
136 filter_index += 4;
137 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02808);
138 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02820);
139 filter_index += 4;
140 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0280c);
141 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02824);
142 filter_index += 4;
143 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02810);
144 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02828);
145 filter_index += 4;
146 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02814);
147 write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0282c);
148 filter_index += 8;
149 write_reg(0, 0x02818);
150 write_reg(0, 0x02830);
151 filter_line ++;
152 }
153 IVTV_DEBUG_YUV("h_filter -> %d\n",h_filter);
154 }
155
156 if (v_filter_1 > -1) {
157 if (v_filter_1 > 4) v_filter_1 = 4;
158 filter_index = v_filter_1 * 192;
159 filter_line = 0;
160 while (filter_line < 16) {
161 write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02900);
162 filter_index += 4;
163 write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02904);
164 filter_index += 8;
165 write_reg(0, 0x02908);
166 filter_line ++;
167 }
168 IVTV_DEBUG_YUV("v_filter_1 -> %d\n",v_filter_1);
169 }
170
171 if (v_filter_2 > -1) {
172 if (v_filter_2 > 4) v_filter_2 = 4;
173 filter_index = v_filter_2 * 192;
174 filter_line = 0;
175 while (filter_line < 16) {
176 write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x0290c);
177 filter_index += 4;
178 write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02910);
179 filter_index += 8;
180 write_reg(0, 0x02914);
181 filter_line ++;
182 }
183 IVTV_DEBUG_YUV("v_filter_2 -> %d\n",v_filter_2);
184 }
185}
186
187static void ivtv_yuv_handle_horizontal(struct ivtv *itv, struct yuv_frame_info *window)
188{
189 u32 reg_2834, reg_2838, reg_283c;
190 u32 reg_2844, reg_2854, reg_285c;
191 u32 reg_2864, reg_2874, reg_2890;
192 u32 reg_2870, reg_2870_base, reg_2870_offset;
193 int x_cutoff;
194 int h_filter;
195 u32 master_width;
196
197 IVTV_DEBUG_WARN( "Need to adjust to width %d src_w %d dst_w %d src_x %d dst_x %d\n",
198 window->tru_w, window->src_w, window->dst_w,window->src_x, window->dst_x);
199
200 /* How wide is the src image */
201 x_cutoff = window->src_w + window->src_x;
202
203 /* Set the display width */
204 reg_2834 = window->dst_w;
205 reg_2838 = reg_2834;
206
207 /* Set the display position */
208 reg_2890 = window->dst_x;
209
210 /* Index into the image horizontally */
211 reg_2870 = 0;
212
213 /* 2870 is normally fudged to align video coords with osd coords.
214 If running full screen, it causes an unwanted left shift
215 Remove the fudge if we almost fill the screen.
216 Gradually adjust the offset to avoid the video 'snapping'
217 left/right if it gets dragged through this region.
218 Only do this if osd is full width. */
219 if (window->vis_w == 720) {
220 if ((window->tru_x - window->pan_x > -1) && (window->tru_x - window->pan_x <= 40) && (window->dst_w >= 680)){
221 reg_2870 = 10 - (window->tru_x - window->pan_x) / 4;
222 }
223 else if ((window->tru_x - window->pan_x < 0) && (window->tru_x - window->pan_x >= -20) && (window->dst_w >= 660)) {
224 reg_2870 = (10 + (window->tru_x - window->pan_x) / 2);
225 }
226
227 if (window->dst_w >= window->src_w)
228 reg_2870 = reg_2870 << 16 | reg_2870;
229 else
230 reg_2870 = ((reg_2870 & ~1) << 15) | (reg_2870 & ~1);
231 }
232
233 if (window->dst_w < window->src_w)
234 reg_2870 = 0x000d000e - reg_2870;
235 else
236 reg_2870 = 0x0012000e - reg_2870;
237
238 /* We're also using 2870 to shift the image left (src_x & negative dst_x) */
239 reg_2870_offset = (window->src_x*((window->dst_w << 21)/window->src_w))>>19;
240
241 if (window->dst_w >= window->src_w) {
242 x_cutoff &= ~1;
243 master_width = (window->src_w * 0x00200000) / (window->dst_w);
244 if (master_width * window->dst_w != window->src_w * 0x00200000) master_width ++;
245 reg_2834 = (reg_2834 << 16) | x_cutoff;
246 reg_2838 = (reg_2838 << 16) | x_cutoff;
247 reg_283c = master_width >> 2;
248 reg_2844 = master_width >> 2;
249 reg_2854 = master_width;
250 reg_285c = master_width >> 1;
251 reg_2864 = master_width >> 1;
252
253 /* We also need to factor in the scaling
254 (src_w - dst_w) / (src_w / 4) */
255 if (window->dst_w > window->src_w)
256 reg_2870_base = ((window->dst_w - window->src_w)<<16) / (window->src_w <<14);
257 else
258 reg_2870_base = 0;
259
260 reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 2) + (reg_2870_base << 17 | reg_2870_base);
261 reg_2874 = 0;
262 }
263 else if (window->dst_w < window->src_w / 2) {
264 master_width = (window->src_w * 0x00080000) / window->dst_w;
265 if (master_width * window->dst_w != window->src_w * 0x00080000) master_width ++;
266 reg_2834 = (reg_2834 << 16) | x_cutoff;
267 reg_2838 = (reg_2838 << 16) | x_cutoff;
268 reg_283c = master_width >> 2;
269 reg_2844 = master_width >> 1;
270 reg_2854 = master_width;
271 reg_285c = master_width >> 1;
272 reg_2864 = master_width >> 1;
273 reg_2870 += (((reg_2870_offset << 15) & 0xFFFF0000) | reg_2870_offset);
274 reg_2870 += (5 - (((window->src_w + window->src_w / 2) - 1) / window->dst_w)) << 16;
275 reg_2874 = 0x00000012;
276 }
277 else {
278 master_width = (window->src_w * 0x00100000) / window->dst_w;
279 if (master_width * window->dst_w != window->src_w * 0x00100000) master_width ++;
280 reg_2834 = (reg_2834 << 16) | x_cutoff;
281 reg_2838 = (reg_2838 << 16) | x_cutoff;
282 reg_283c = master_width >> 2;
283 reg_2844 = master_width >> 1;
284 reg_2854 = master_width;
285 reg_285c = master_width >> 1;
286 reg_2864 = master_width >> 1;
287 reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 1);
288 reg_2870 += (5 - (((window->src_w * 3) - 1) / window->dst_w)) << 16;
289 reg_2874 = 0x00000001;
290 }
291
292 /* Select the horizontal filter */
293 if (window->src_w == window->dst_w) {
294 /* An exact size match uses filter 0 */
295 h_filter = 0;
296 }
297 else {
298 /* Figure out which filter to use */
299 h_filter = ((window->src_w << 16) / window->dst_w) >> 15;
300 h_filter = (h_filter >> 1) + (h_filter & 1);
301 /* Only an exact size match can use filter 0 */
302 if (h_filter == 0) h_filter = 1;
303 }
304
305 write_reg(reg_2834, 0x02834);
306 write_reg(reg_2838, 0x02838);
307 IVTV_DEBUG_YUV("Update reg 0x2834 %08x->%08x 0x2838 %08x->%08x\n",itv->yuv_info.reg_2834, reg_2834, itv->yuv_info.reg_2838, reg_2838);
308
309 write_reg(reg_283c, 0x0283c);
310 write_reg(reg_2844, 0x02844);
311
312 IVTV_DEBUG_YUV("Update reg 0x283c %08x->%08x 0x2844 %08x->%08x\n",itv->yuv_info.reg_283c, reg_283c, itv->yuv_info.reg_2844, reg_2844);
313
314 write_reg(0x00080514, 0x02840);
315 write_reg(0x00100514, 0x02848);
316 IVTV_DEBUG_YUV("Update reg 0x2840 %08x->%08x 0x2848 %08x->%08x\n",itv->yuv_info.reg_2840, 0x00080514, itv->yuv_info.reg_2848, 0x00100514);
317
318 write_reg(reg_2854, 0x02854);
319 IVTV_DEBUG_YUV("Update reg 0x2854 %08x->%08x \n",itv->yuv_info.reg_2854, reg_2854);
320
321 write_reg(reg_285c, 0x0285c);
322 write_reg(reg_2864, 0x02864);
323 IVTV_DEBUG_YUV("Update reg 0x285c %08x->%08x 0x2864 %08x->%08x\n",itv->yuv_info.reg_285c, reg_285c, itv->yuv_info.reg_2864, reg_2864);
324
325 write_reg(reg_2874, 0x02874);
326 IVTV_DEBUG_YUV("Update reg 0x2874 %08x->%08x\n",itv->yuv_info.reg_2874, reg_2874);
327
328 write_reg(reg_2870, 0x02870);
329 IVTV_DEBUG_YUV("Update reg 0x2870 %08x->%08x\n",itv->yuv_info.reg_2870, reg_2870);
330
331 write_reg( reg_2890,0x02890);
332 IVTV_DEBUG_YUV("Update reg 0x2890 %08x->%08x\n",itv->yuv_info.reg_2890, reg_2890);
333
334 /* Only update the filter if we really need to */
335 if (h_filter != itv->yuv_info.h_filter) {
336 ivtv_yuv_filter (itv,h_filter,-1,-1);
337 itv->yuv_info.h_filter = h_filter;
338 }
339}
340
341static void ivtv_yuv_handle_vertical(struct ivtv *itv, struct yuv_frame_info *window)
342{
343 u32 master_height;
344 u32 reg_2918, reg_291c, reg_2920, reg_2928;
345 u32 reg_2930, reg_2934, reg_293c;
346 u32 reg_2940, reg_2944, reg_294c;
347 u32 reg_2950, reg_2954, reg_2958, reg_295c;
348 u32 reg_2960, reg_2964, reg_2968, reg_296c;
349 u32 reg_289c;
350 u32 src_y_major_y, src_y_minor_y;
351 u32 src_y_major_uv, src_y_minor_uv;
352 u32 reg_2964_base, reg_2968_base;
353 int v_filter_1, v_filter_2;
354
355 IVTV_DEBUG_WARN("Need to adjust to height %d src_h %d dst_h %d src_y %d dst_y %d\n",
356 window->tru_h, window->src_h, window->dst_h,window->src_y, window->dst_y);
357
358 /* What scaling mode is being used... */
359 if (window->interlaced_y) {
360 IVTV_DEBUG_YUV("Scaling mode Y: Interlaced\n");
361 }
362 else {
363 IVTV_DEBUG_YUV("Scaling mode Y: Progressive\n");
364 }
365
366 if (window->interlaced_uv) {
367 IVTV_DEBUG_YUV("Scaling mode UV: Interlaced\n");
368 }
369 else {
370 IVTV_DEBUG_YUV("Scaling mode UV: Progressive\n");
371 }
372
373 /* What is the source video being treated as... */
374 if (itv->yuv_info.frame_interlaced) {
375 IVTV_DEBUG_WARN("Source video: Interlaced\n");
376 }
377 else {
378 IVTV_DEBUG_WARN("Source video: Non-interlaced\n");
379 }
380
381 /* We offset into the image using two different index methods, so split
382 the y source coord into two parts. */
383 if (window->src_y < 8) {
384 src_y_minor_uv = window->src_y;
385 src_y_major_uv = 0;
386 }
387 else {
388 src_y_minor_uv = 8;
389 src_y_major_uv = window->src_y - 8;
390 }
391
392 src_y_minor_y = src_y_minor_uv;
393 src_y_major_y = src_y_major_uv;
394
395 if (window->offset_y) src_y_minor_y += 16;
396
397 if (window->interlaced_y)
398 reg_2918 = (window->dst_h << 16) | (window->src_h + src_y_minor_y);
399 else
400 reg_2918 = (window->dst_h << 16) | ((window->src_h + src_y_minor_y) << 1);
401
402 if (window->interlaced_uv)
403 reg_291c = (window->dst_h << 16) | ((window->src_h + src_y_minor_uv) >> 1);
404 else
405 reg_291c = (window->dst_h << 16) | (window->src_h + src_y_minor_uv);
406
407 reg_2964_base = (src_y_minor_y * ((window->dst_h << 16)/window->src_h)) >> 14;
408 reg_2968_base = (src_y_minor_uv * ((window->dst_h << 16)/window->src_h)) >> 14;
409
410 if (window->dst_h / 2 >= window->src_h && !window->interlaced_y) {
411 master_height = (window->src_h * 0x00400000) / window->dst_h;
412 if ((window->src_h * 0x00400000) - (master_height * window->dst_h) >= window->dst_h / 2) master_height ++;
413 reg_2920 = master_height >> 2;
414 reg_2928 = master_height >> 3;
415 reg_2930 = master_height;
416 reg_2940 = master_height >> 1;
417 reg_2964_base >>= 3;
418 reg_2968_base >>= 3;
419 reg_296c = 0x00000000;
420 }
421 else if (window->dst_h >= window->src_h) {
422 master_height = (window->src_h * 0x00400000) / window->dst_h;
423 master_height = (master_height >> 1) + (master_height & 1);
424 reg_2920 = master_height >> 2;
425 reg_2928 = master_height >> 2;
426 reg_2930 = master_height;
427 reg_2940 = master_height >> 1;
428 reg_296c = 0x00000000;
429 if (window->interlaced_y) {
430 reg_2964_base >>= 3;
431 }
432 else {
433 reg_296c ++;
434 reg_2964_base >>= 2;
435 }
436 if (window->interlaced_uv) reg_2928 >>= 1;
437 reg_2968_base >>= 3;
438 }
439 else if (window->dst_h >= window->src_h / 2) {
440 master_height = (window->src_h * 0x00200000) / window->dst_h;
441 master_height = (master_height >> 1) + (master_height & 1);
442 reg_2920 = master_height >> 2;
443 reg_2928 = master_height >> 2;
444 reg_2930 = master_height;
445 reg_2940 = master_height;
446 reg_296c = 0x00000101;
447 if (window->interlaced_y) {
448 reg_2964_base >>= 2;
449 }
450 else {
451 reg_296c ++;
452 reg_2964_base >>= 1;
453 }
454 if (window->interlaced_uv) reg_2928 >>= 1;
455 reg_2968_base >>= 2;
456 }
457 else {
458 master_height = (window->src_h * 0x00100000) / window->dst_h;
459 master_height = (master_height >> 1) + (master_height & 1);
460 reg_2920 = master_height >> 2;
461 reg_2928 = master_height >> 2;
462 reg_2930 = master_height;
463 reg_2940 = master_height;
464 reg_2964_base >>= 1;
465 reg_2968_base >>= 2;
466 reg_296c = 0x00000102;
467 }
468
469 /* FIXME These registers change depending on scaled / unscaled output
470 We really need to work out what they should be */
471 if (window->src_h == window->dst_h){
472 reg_2934 = 0x00020000;
473 reg_293c = 0x00100000;
474 reg_2944 = 0x00040000;
475 reg_294c = 0x000b0000;
476 }
477 else {
478 reg_2934 = 0x00000FF0;
479 reg_293c = 0x00000FF0;
480 reg_2944 = 0x00000FF0;
481 reg_294c = 0x00000FF0;
482 }
483
484 /* The first line to be displayed */
485 reg_2950 = 0x00010000 + src_y_major_y;
486 if (window->interlaced_y) reg_2950 += 0x00010000;
487 reg_2954 = reg_2950 + 1;
488
489 reg_2958 = 0x00010000 + (src_y_major_y >> 1);
490 if (window->interlaced_uv) reg_2958 += 0x00010000;
491 reg_295c = reg_2958 + 1;
492
493 if (itv->yuv_info.decode_height == 480)
494 reg_289c = 0x011e0017;
495 else
496 reg_289c = 0x01500017;
497
498 if (window->dst_y < 0)
499 reg_289c = (reg_289c - ((window->dst_y & ~1)<<15))-(window->dst_y >>1);
500 else
501 reg_289c = (reg_289c + ((window->dst_y & ~1)<<15))+(window->dst_y >>1);
502
503 /* How much of the source to decode.
504 Take into account the source offset */
505 reg_2960 = ((src_y_minor_y + window->src_h + src_y_major_y) - 1 ) |
506 ((((src_y_minor_uv + window->src_h + src_y_major_uv) - 1) & ~1) << 15);
507
508 /* Calculate correct value for register 2964 */
509 if (window->src_h == window->dst_h)
510 reg_2964 = 1;
511 else {
512 reg_2964 = 2 + ((window->dst_h << 1) / window->src_h);
513 reg_2964 = (reg_2964 >> 1) + (reg_2964 & 1);
514 }
515 reg_2968 = (reg_2964 << 16) + reg_2964 + (reg_2964 >> 1);
516 reg_2964 = (reg_2964 << 16) + reg_2964 + (reg_2964 * 46 / 94);
517
518 /* Okay, we've wasted time working out the correct value,
519 but if we use it, it fouls the the window alignment.
520 Fudge it to what we want... */
521 reg_2964 = 0x00010001 + ((reg_2964 & 0x0000FFFF) - (reg_2964 >> 16));
522 reg_2968 = 0x00010001 + ((reg_2968 & 0x0000FFFF) - (reg_2968 >> 16));
523
524 /* Deviate further from what it should be. I find the flicker headache
525 inducing so try to reduce it slightly. Leave 2968 as-is otherwise
526 colours foul. */
527 if ((reg_2964 != 0x00010001) && (window->dst_h / 2 <= window->src_h))
528 reg_2964 = (reg_2964 & 0xFFFF0000) + ((reg_2964 & 0x0000FFFF)/2);
529
530 if (!window->interlaced_y) reg_2964 -= 0x00010001;
531 if (!window->interlaced_uv) reg_2968 -= 0x00010001;
532
533 reg_2964 += ((reg_2964_base << 16) | reg_2964_base);
534 reg_2968 += ((reg_2968_base << 16) | reg_2968_base);
535
536 /* Select the vertical filter */
537 if (window->src_h == window->dst_h) {
538 /* An exact size match uses filter 0/1 */
539 v_filter_1 = 0;
540 v_filter_2 = 1;
541 }
542 else {
543 /* Figure out which filter to use */
544 v_filter_1 = ((window->src_h << 16) / window->dst_h) >> 15;
545 v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
546 /* Only an exact size match can use filter 0 */
547 if (v_filter_1 == 0) v_filter_1 = 1;
548 v_filter_2 = v_filter_1;
549 }
550
551 write_reg(reg_2934, 0x02934);
552 write_reg(reg_293c, 0x0293c);
553 IVTV_DEBUG_YUV("Update reg 0x2934 %08x->%08x 0x293c %08x->%08x\n",itv->yuv_info.reg_2934, reg_2934, itv->yuv_info.reg_293c, reg_293c);
554 write_reg(reg_2944, 0x02944);
555 write_reg(reg_294c, 0x0294c);
556 IVTV_DEBUG_YUV("Update reg 0x2944 %08x->%08x 0x294c %08x->%08x\n",itv->yuv_info.reg_2944, reg_2944, itv->yuv_info.reg_294c, reg_294c);
557
558 /* Ensure 2970 is 0 (does it ever change ?) */
559/* write_reg(0,0x02970); */
560/* IVTV_DEBUG_YUV("Update reg 0x2970 %08x->%08x\n",itv->yuv_info.reg_2970, 0); */
561
562 write_reg(reg_2930, 0x02938);
563 write_reg(reg_2930, 0x02930);
564 IVTV_DEBUG_YUV("Update reg 0x2930 %08x->%08x 0x2938 %08x->%08x\n",itv->yuv_info.reg_2930, reg_2930, itv->yuv_info.reg_2938, reg_2930);
565
566 write_reg(reg_2928, 0x02928);
567 write_reg(reg_2928+0x514, 0x0292C);
568 IVTV_DEBUG_YUV("Update reg 0x2928 %08x->%08x 0x292c %08x->%08x\n",itv->yuv_info.reg_2928, reg_2928, itv->yuv_info.reg_292c, reg_2928+0x514);
569
570 write_reg(reg_2920, 0x02920);
571 write_reg(reg_2920+0x514, 0x02924);
572 IVTV_DEBUG_YUV("Update reg 0x2920 %08x->%08x 0x2924 %08x->%08x\n",itv->yuv_info.reg_2920, reg_2920, itv->yuv_info.reg_2924, 0x514+reg_2920);
573
574 write_reg (reg_2918,0x02918);
575 write_reg (reg_291c,0x0291C);
576 IVTV_DEBUG_YUV("Update reg 0x2918 %08x->%08x 0x291C %08x->%08x\n",itv->yuv_info.reg_2918,reg_2918,itv->yuv_info.reg_291c,reg_291c);
577
578 write_reg(reg_296c, 0x0296c);
579 IVTV_DEBUG_YUV("Update reg 0x296c %08x->%08x\n",itv->yuv_info.reg_296c, reg_296c);
580
581 write_reg(reg_2940, 0x02948);
582 write_reg(reg_2940, 0x02940);
583 IVTV_DEBUG_YUV("Update reg 0x2940 %08x->%08x 0x2948 %08x->%08x\n",itv->yuv_info.reg_2940, reg_2940, itv->yuv_info.reg_2948, reg_2940);
584
585 write_reg(reg_2950, 0x02950);
586 write_reg(reg_2954, 0x02954);
587 IVTV_DEBUG_YUV("Update reg 0x2950 %08x->%08x 0x2954 %08x->%08x\n",itv->yuv_info.reg_2950, reg_2950, itv->yuv_info.reg_2954, reg_2954);
588
589 write_reg(reg_2958, 0x02958);
590 write_reg(reg_295c, 0x0295C);
591 IVTV_DEBUG_YUV("Update reg 0x2958 %08x->%08x 0x295C %08x->%08x\n",itv->yuv_info.reg_2958, reg_2958, itv->yuv_info.reg_295c, reg_295c);
592
593 write_reg(reg_2960, 0x02960);
594 IVTV_DEBUG_YUV("Update reg 0x2960 %08x->%08x \n",itv->yuv_info.reg_2960, reg_2960);
595
596 write_reg(reg_2964, 0x02964);
597 write_reg(reg_2968, 0x02968);
598 IVTV_DEBUG_YUV("Update reg 0x2964 %08x->%08x 0x2968 %08x->%08x\n",itv->yuv_info.reg_2964, reg_2964, itv->yuv_info.reg_2968, reg_2968);
599
600 write_reg( reg_289c,0x0289c);
601 IVTV_DEBUG_YUV("Update reg 0x289c %08x->%08x\n",itv->yuv_info.reg_289c, reg_289c);
602
603 /* Only update filter 1 if we really need to */
604 if (v_filter_1 != itv->yuv_info.v_filter_1) {
605 ivtv_yuv_filter (itv,-1,v_filter_1,-1);
606 itv->yuv_info.v_filter_1 = v_filter_1;
607 }
608
609 /* Only update filter 2 if we really need to */
610 if (v_filter_2 != itv->yuv_info.v_filter_2) {
611 ivtv_yuv_filter (itv,-1,-1,v_filter_2);
612 itv->yuv_info.v_filter_2 = v_filter_2;
613 }
614
615 itv->yuv_info.frame_interlaced_last = itv->yuv_info.frame_interlaced;
616}
617
618/* Modify the supplied coordinate information to fit the visible osd area */
619static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *window)
620{
621 int osd_crop, lace_threshold;
622 u32 osd_scale;
623 u32 yuv_update = 0;
624
625 lace_threshold = itv->yuv_info.lace_threshold;
626 if (lace_threshold < 0)
627 lace_threshold = itv->yuv_info.decode_height - 1;
628
629 /* Work out the lace settings */
630 switch (itv->yuv_info.lace_mode) {
631 case IVTV_YUV_MODE_PROGRESSIVE: /* Progressive mode */
632 itv->yuv_info.frame_interlaced = 0;
633 if (window->tru_h < 512 || (window->tru_h > 576 && window->tru_h < 1021))
634 window->interlaced_y = 0;
635 else
636 window->interlaced_y = 1;
637
638 if (window->tru_h < 1021 && (window->dst_h >= window->src_h /2))
639 window->interlaced_uv = 0;
640 else
641 window->interlaced_uv = 1;
642 break;
643
644 case IVTV_YUV_MODE_AUTO:
645 if (window->tru_h <= lace_threshold || window->tru_h > 576 || window->tru_w > 720){
646 itv->yuv_info.frame_interlaced = 0;
647 if ((window->tru_h < 512) ||
648 (window->tru_h > 576 && window->tru_h < 1021) ||
649 (window->tru_w > 720 && window->tru_h < 1021))
650 window->interlaced_y = 0;
651 else
652 window->interlaced_y = 1;
653
654 if (window->tru_h < 1021 && (window->dst_h >= window->src_h /2))
655 window->interlaced_uv = 0;
656 else
657 window->interlaced_uv = 1;
658 }
659 else {
660 itv->yuv_info.frame_interlaced = 1;
661 window->interlaced_y = 1;
662 window->interlaced_uv = 1;
663 }
664 break;
665
666 case IVTV_YUV_MODE_INTERLACED: /* Interlace mode */
667 default:
668 itv->yuv_info.frame_interlaced = 1;
669 window->interlaced_y = 1;
670 window->interlaced_uv = 1;
671 break;
672 }
673
674 /* Sorry, but no negative coords for src */
675 if (window->src_x < 0) window->src_x = 0;
676 if (window->src_y < 0) window->src_y = 0;
677
678 /* Can only reduce width down to 1/4 original size */
679 if ((osd_crop = window->src_w - ( 4 * window->dst_w )) > 0) {
680 window->src_x += osd_crop / 2;
681 window->src_w = (window->src_w - osd_crop) & ~3;
682 window->dst_w = window->src_w / 4;
683 window->dst_w += window->dst_w & 1;
684 }
685
686 /* Can only reduce height down to 1/4 original size */
687 if (window->src_h / window->dst_h >= 2) {
688 /* Overflow may be because we're running progressive, so force mode switch */
689 window->interlaced_y = 1;
690 /* Make sure we're still within limits for interlace */
691 if ((osd_crop = window->src_h - ( 4 * window->dst_h )) > 0) {
692 /* If we reach here we'll have to force the height. */
693 window->src_y += osd_crop / 2;
694 window->src_h = (window->src_h - osd_crop) & ~3;
695 window->dst_h = window->src_h / 4;
696 window->dst_h += window->dst_h & 1;
697 }
698 }
699
700 /* If there's nothing to safe to display, we may as well stop now */
701 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
702 return 0;
703 }
704
705 /* Ensure video remains inside OSD area */
706 osd_scale = (window->src_h << 16) / window->dst_h;
707
708 if ((osd_crop = window->pan_y - window->dst_y) > 0) {
709 /* Falls off the upper edge - crop */
710 window->src_y += (osd_scale * osd_crop) >> 16;
711 window->src_h -= (osd_scale * osd_crop) >> 16;
712 window->dst_h -= osd_crop;
713 window->dst_y = 0;
714 }
715 else {
716 window->dst_y -= window->pan_y;
717 }
718
719 if ((osd_crop = window->dst_h + window->dst_y - window->vis_h) > 0) {
720 /* Falls off the lower edge - crop */
721 window->dst_h -= osd_crop;
722 window->src_h -= (osd_scale * osd_crop) >> 16;
723 }
724
725 osd_scale = (window->src_w << 16) / window->dst_w;
726
727 if ((osd_crop = window->pan_x - window->dst_x) > 0) {
728 /* Fall off the left edge - crop */
729 window->src_x += (osd_scale * osd_crop) >> 16;
730 window->src_w -= (osd_scale * osd_crop) >> 16;
731 window->dst_w -= osd_crop;
732 window->dst_x = 0;
733 }
734 else {
735 window->dst_x -= window->pan_x;
736 }
737
738 if ((osd_crop = window->dst_w + window->dst_x - window->vis_w) > 0) {
739 /* Falls off the right edge - crop */
740 window->dst_w -= osd_crop;
741 window->src_w -= (osd_scale * osd_crop) >> 16;
742 }
743
744 /* The OSD can be moved. Track to it */
745 window->dst_x += itv->yuv_info.osd_x_offset;
746 window->dst_y += itv->yuv_info.osd_y_offset;
747
748 /* Width & height for both src & dst must be even.
749 Same for coordinates. */
750 window->dst_w &= ~1;
751 window->dst_x &= ~1;
752
753 window->src_w += window->src_x & 1;
754 window->src_x &= ~1;
755
756 window->src_w &= ~1;
757 window->dst_w &= ~1;
758
759 window->dst_h &= ~1;
760 window->dst_y &= ~1;
761
762 window->src_h += window->src_y & 1;
763 window->src_y &= ~1;
764
765 window->src_h &= ~1;
766 window->dst_h &= ~1;
767
768 /* Due to rounding, we may have reduced the output size to <1/4 of the source
769 Check again, but this time just resize. Don't change source coordinates */
770 if (window->dst_w < window->src_w / 4) {
771 window->src_w &= ~3;
772 window->dst_w = window->src_w / 4;
773 window->dst_w += window->dst_w & 1;
774 }
775 if (window->dst_h < window->src_h / 4) {
776 window->src_h &= ~3;
777 window->dst_h = window->src_h / 4;
778 window->dst_h += window->dst_h & 1;
779 }
780
781 /* Check again. If there's nothing to safe to display, stop now */
782 if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
783 return 0;
784 }
785
786 /* Both x offset & width are linked, so they have to be done together */
787 if ((itv->yuv_info.old_frame_info.dst_w != window->dst_w) ||
788 (itv->yuv_info.old_frame_info.src_w != window->src_w) ||
789 (itv->yuv_info.old_frame_info.dst_x != window->dst_x) ||
790 (itv->yuv_info.old_frame_info.src_x != window->src_x) ||
791 (itv->yuv_info.old_frame_info.pan_x != window->pan_x) ||
792 (itv->yuv_info.old_frame_info.vis_w != window->vis_w)) {
793 yuv_update |= IVTV_YUV_UPDATE_HORIZONTAL;
794 }
795
796 if ((itv->yuv_info.old_frame_info.src_h != window->src_h) ||
797 (itv->yuv_info.old_frame_info.dst_h != window->dst_h) ||
798 (itv->yuv_info.old_frame_info.dst_y != window->dst_y) ||
799 (itv->yuv_info.old_frame_info.src_y != window->src_y) ||
800 (itv->yuv_info.old_frame_info.pan_y != window->pan_y) ||
801 (itv->yuv_info.old_frame_info.vis_h != window->vis_h) ||
802 (itv->yuv_info.old_frame_info.interlaced_y != window->interlaced_y) ||
803 (itv->yuv_info.old_frame_info.interlaced_uv != window->interlaced_uv)) {
804 yuv_update |= IVTV_YUV_UPDATE_VERTICAL;
805 }
806
807 return yuv_update;
808}
809
810/* Update the scaling register to the requested value */
811void ivtv_yuv_work_handler (struct ivtv *itv)
812{
813 struct yuv_frame_info window;
814 u32 yuv_update;
815
816 int frame = itv->yuv_info.update_frame;
817
818/* IVTV_DEBUG_YUV("Update yuv registers for frame %d\n",frame); */
819 memcpy(&window, &itv->yuv_info.new_frame_info[frame], sizeof (window));
820
821 /* Update the osd pan info */
822 window.pan_x = itv->yuv_info.osd_x_pan;
823 window.pan_y = itv->yuv_info.osd_y_pan;
824 window.vis_w = itv->yuv_info.osd_vis_w;
825 window.vis_h = itv->yuv_info.osd_vis_h;
826
827 /* Calculate the display window coordinates. Exit if nothing left */
828 if (!(yuv_update = ivtv_yuv_window_setup (itv, &window)))
829 return;
830
831 /* Update horizontal settings */
832 if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
833 ivtv_yuv_handle_horizontal(itv, &window);
834
835 if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
836 ivtv_yuv_handle_vertical(itv, &window);
837
838 memcpy(&itv->yuv_info.old_frame_info, &window, sizeof (itv->yuv_info.old_frame_info));
839}
840
841static void ivtv_yuv_init (struct ivtv *itv)
842{
843 IVTV_DEBUG_YUV("ivtv_yuv_init\n");
844
845 /* Take a snapshot of the current register settings */
846 itv->yuv_info.reg_2834 = read_reg(0x02834);
847 itv->yuv_info.reg_2838 = read_reg(0x02838);
848 itv->yuv_info.reg_283c = read_reg(0x0283c);
849 itv->yuv_info.reg_2840 = read_reg(0x02840);
850 itv->yuv_info.reg_2844 = read_reg(0x02844);
851 itv->yuv_info.reg_2848 = read_reg(0x02848);
852 itv->yuv_info.reg_2854 = read_reg(0x02854);
853 itv->yuv_info.reg_285c = read_reg(0x0285c);
854 itv->yuv_info.reg_2864 = read_reg(0x02864);
855 itv->yuv_info.reg_2870 = read_reg(0x02870);
856 itv->yuv_info.reg_2874 = read_reg(0x02874);
857 itv->yuv_info.reg_2898 = read_reg(0x02898);
858 itv->yuv_info.reg_2890 = read_reg(0x02890);
859
860 itv->yuv_info.reg_289c = read_reg(0x0289c);
861 itv->yuv_info.reg_2918 = read_reg(0x02918);
862 itv->yuv_info.reg_291c = read_reg(0x0291c);
863 itv->yuv_info.reg_2920 = read_reg(0x02920);
864 itv->yuv_info.reg_2924 = read_reg(0x02924);
865 itv->yuv_info.reg_2928 = read_reg(0x02928);
866 itv->yuv_info.reg_292c = read_reg(0x0292c);
867 itv->yuv_info.reg_2930 = read_reg(0x02930);
868 itv->yuv_info.reg_2934 = read_reg(0x02934);
869 itv->yuv_info.reg_2938 = read_reg(0x02938);
870 itv->yuv_info.reg_293c = read_reg(0x0293c);
871 itv->yuv_info.reg_2940 = read_reg(0x02940);
872 itv->yuv_info.reg_2944 = read_reg(0x02944);
873 itv->yuv_info.reg_2948 = read_reg(0x02948);
874 itv->yuv_info.reg_294c = read_reg(0x0294c);
875 itv->yuv_info.reg_2950 = read_reg(0x02950);
876 itv->yuv_info.reg_2954 = read_reg(0x02954);
877 itv->yuv_info.reg_2958 = read_reg(0x02958);
878 itv->yuv_info.reg_295c = read_reg(0x0295c);
879 itv->yuv_info.reg_2960 = read_reg(0x02960);
880 itv->yuv_info.reg_2964 = read_reg(0x02964);
881 itv->yuv_info.reg_2968 = read_reg(0x02968);
882 itv->yuv_info.reg_296c = read_reg(0x0296c);
883 itv->yuv_info.reg_2970 = read_reg(0x02970);
884
885 itv->yuv_info.v_filter_1 = -1;
886 itv->yuv_info.v_filter_2 = -1;
887 itv->yuv_info.h_filter = -1;
888
889 /* Set some valid size info */
890 itv->yuv_info.osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
891 itv->yuv_info.osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
892
893 /* Bit 2 of reg 2878 indicates current decoder output format
894 0 : NTSC 1 : PAL */
895 if (read_reg(0x2878) & 4)
896 itv->yuv_info.decode_height = 576;
897 else
898 itv->yuv_info.decode_height = 480;
899
900 /* If no visible size set, assume full size */
901 if (!itv->yuv_info.osd_vis_w) itv->yuv_info.osd_vis_w = 720 - itv->yuv_info.osd_x_offset;
902 if (!itv->yuv_info.osd_vis_h) itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset;
903
904 /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
905 itv->yuv_info.blanking_ptr = kzalloc(720*16,GFP_KERNEL);
906 if (itv->yuv_info.blanking_ptr) {
907 itv->yuv_info.blanking_dmaptr = pci_map_single(itv->dev, itv->yuv_info.blanking_ptr, 720*16, PCI_DMA_TODEVICE);
908 }
909 else {
910 itv->yuv_info.blanking_dmaptr = 0;
911 IVTV_DEBUG_WARN ("Failed to allocate yuv blanking buffer\n");
912 }
913
914 IVTV_DEBUG_WARN("Enable video output\n");
915 write_reg_sync(0x00108080, 0x2898);
916
917 /* Enable YUV decoder output */
918 write_reg_sync(0x01, IVTV_REG_VDM);
919
920 set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
921 atomic_set(&itv->yuv_info.next_dma_frame,0);
922}
923
924int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
925{
926 DEFINE_WAIT(wait);
927 int rc = 0;
928 int got_sig = 0;
929 int frame, next_fill_frame, last_fill_frame;
930
931 IVTV_DEBUG_INFO("yuv_prep_frame\n");
932
933 if (atomic_read(&itv->yuv_info.next_dma_frame) == -1) ivtv_yuv_init(itv);
934
935 frame = atomic_read(&itv->yuv_info.next_fill_frame);
936 next_fill_frame = (frame + 1) & 0x3;
937 last_fill_frame = (atomic_read(&itv->yuv_info.next_dma_frame)+1) & 0x3;
938
939 if (next_fill_frame != last_fill_frame && last_fill_frame != frame) {
940 /* Buffers are full - Overwrite the last frame */
941 next_fill_frame = frame;
942 frame = (frame - 1) & 3;
943 }
944
945 /* Take a snapshot of the yuv coordinate information */
946 itv->yuv_info.new_frame_info[frame].src_x = args->src.left;
947 itv->yuv_info.new_frame_info[frame].src_y = args->src.top;
948 itv->yuv_info.new_frame_info[frame].src_w = args->src.width;
949 itv->yuv_info.new_frame_info[frame].src_h = args->src.height;
950 itv->yuv_info.new_frame_info[frame].dst_x = args->dst.left;
951 itv->yuv_info.new_frame_info[frame].dst_y = args->dst.top;
952 itv->yuv_info.new_frame_info[frame].dst_w = args->dst.width;
953 itv->yuv_info.new_frame_info[frame].dst_h = args->dst.height;
954 itv->yuv_info.new_frame_info[frame].tru_x = args->dst.left;
955 itv->yuv_info.new_frame_info[frame].tru_w = args->src_width;
956 itv->yuv_info.new_frame_info[frame].tru_h = args->src_height;
957
958 /* Are we going to offset the Y plane */
959 if (args->src.height + args->src.top < 512-16)
960 itv->yuv_info.new_frame_info[frame].offset_y = 1;
961 else
962 itv->yuv_info.new_frame_info[frame].offset_y = 0;
963
964 /* Snapshot the osd pan info */
965 itv->yuv_info.new_frame_info[frame].pan_x = itv->yuv_info.osd_x_pan;
966 itv->yuv_info.new_frame_info[frame].pan_y = itv->yuv_info.osd_y_pan;
967 itv->yuv_info.new_frame_info[frame].vis_w = itv->yuv_info.osd_vis_w;
968 itv->yuv_info.new_frame_info[frame].vis_h = itv->yuv_info.osd_vis_h;
969
970 itv->yuv_info.new_frame_info[frame].update = 0;
971 itv->yuv_info.new_frame_info[frame].interlaced_y = 0;
972 itv->yuv_info.new_frame_info[frame].interlaced_uv = 0;
973
974 if (memcmp (&itv->yuv_info.old_frame_info_args, &itv->yuv_info.new_frame_info[frame],
975 sizeof (itv->yuv_info.new_frame_info[frame]))) {
976 memcpy(&itv->yuv_info.old_frame_info_args, &itv->yuv_info.new_frame_info[frame], sizeof (itv->yuv_info.old_frame_info_args));
977 itv->yuv_info.new_frame_info[frame].update = 1;
978/* IVTV_DEBUG_YUV ("Requesting register update for frame %d\n",frame); */
979 }
980
981 /* DMA the frame */
982 mutex_lock(&itv->udma.lock);
983
984 if ((rc = ivtv_yuv_prep_user_dma(itv, &itv->udma, args)) != 0) {
985 mutex_unlock(&itv->udma.lock);
986 return rc;
987 }
988
989 ivtv_udma_prepare(itv);
990 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
991 /* if no UDMA is pending and no UDMA is in progress, then the DMA
992 is finished */
993 while (itv->i_flags & (IVTV_F_I_UDMA_PENDING | IVTV_F_I_UDMA)) {
994 /* don't interrupt if the DMA is in progress but break off
995 a still pending DMA. */
996 got_sig = signal_pending(current);
997 if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
998 break;
999 got_sig = 0;
1000 schedule();
1001 }
1002 finish_wait(&itv->dma_waitq, &wait);
1003
1004 /* Unmap Last DMA Xfer */
1005 ivtv_udma_unmap(itv);
1006
1007 if (got_sig) {
1008 IVTV_DEBUG_INFO("User stopped YUV UDMA\n");
1009 mutex_unlock(&itv->udma.lock);
1010 return -EINTR;
1011 }
1012
1013 atomic_set(&itv->yuv_info.next_fill_frame, next_fill_frame);
1014
1015 mutex_unlock(&itv->udma.lock);
1016 return rc;
1017}
1018
1019void ivtv_yuv_close(struct ivtv *itv)
1020{
1021 int h_filter, v_filter_1, v_filter_2;
1022
1023 IVTV_DEBUG_YUV("ivtv_yuv_close\n");
1024 ivtv_waitq(&itv->vsync_waitq);
1025
1026 atomic_set(&itv->yuv_info.next_dma_frame, -1);
1027 atomic_set(&itv->yuv_info.next_fill_frame, 0);
1028
1029 /* Reset registers we have changed so mpeg playback works */
1030
1031 /* If we fully restore this register, the display may remain active.
1032 Restore, but set one bit to blank the video. Firmware will always
1033 clear this bit when needed, so not a problem. */
1034 write_reg(itv->yuv_info.reg_2898 | 0x01000000, 0x2898);
1035
1036 write_reg(itv->yuv_info.reg_2834, 0x02834);
1037 write_reg(itv->yuv_info.reg_2838, 0x02838);
1038 write_reg(itv->yuv_info.reg_283c, 0x0283c);
1039 write_reg(itv->yuv_info.reg_2840, 0x02840);
1040 write_reg(itv->yuv_info.reg_2844, 0x02844);
1041 write_reg(itv->yuv_info.reg_2848, 0x02848);
1042 write_reg(itv->yuv_info.reg_2854, 0x02854);
1043 write_reg(itv->yuv_info.reg_285c, 0x0285c);
1044 write_reg(itv->yuv_info.reg_2864, 0x02864);
1045 write_reg(itv->yuv_info.reg_2870, 0x02870);
1046 write_reg(itv->yuv_info.reg_2874, 0x02874);
1047 write_reg(itv->yuv_info.reg_2890, 0x02890);
1048 write_reg(itv->yuv_info.reg_289c, 0x0289c);
1049
1050 write_reg(itv->yuv_info.reg_2918, 0x02918);
1051 write_reg(itv->yuv_info.reg_291c, 0x0291c);
1052 write_reg(itv->yuv_info.reg_2920, 0x02920);
1053 write_reg(itv->yuv_info.reg_2924, 0x02924);
1054 write_reg(itv->yuv_info.reg_2928, 0x02928);
1055 write_reg(itv->yuv_info.reg_292c, 0x0292c);
1056 write_reg(itv->yuv_info.reg_2930, 0x02930);
1057 write_reg(itv->yuv_info.reg_2934, 0x02934);
1058 write_reg(itv->yuv_info.reg_2938, 0x02938);
1059 write_reg(itv->yuv_info.reg_293c, 0x0293c);
1060 write_reg(itv->yuv_info.reg_2940, 0x02940);
1061 write_reg(itv->yuv_info.reg_2944, 0x02944);
1062 write_reg(itv->yuv_info.reg_2948, 0x02948);
1063 write_reg(itv->yuv_info.reg_294c, 0x0294c);
1064 write_reg(itv->yuv_info.reg_2950, 0x02950);
1065 write_reg(itv->yuv_info.reg_2954, 0x02954);
1066 write_reg(itv->yuv_info.reg_2958, 0x02958);
1067 write_reg(itv->yuv_info.reg_295c, 0x0295c);
1068 write_reg(itv->yuv_info.reg_2960, 0x02960);
1069 write_reg(itv->yuv_info.reg_2964, 0x02964);
1070 write_reg(itv->yuv_info.reg_2968, 0x02968);
1071 write_reg(itv->yuv_info.reg_296c, 0x0296c);
1072 write_reg(itv->yuv_info.reg_2970, 0x02970);
1073
1074 /* Prepare to restore filters */
1075
1076 /* First the horizontal filter */
1077 if ((itv->yuv_info.reg_2834 & 0x0000FFFF) == (itv->yuv_info.reg_2834 >> 16)) {
1078 /* An exact size match uses filter 0 */
1079 h_filter = 0;
1080 }
1081 else {
1082 /* Figure out which filter to use */
1083 h_filter = ((itv->yuv_info.reg_2834 << 16) / (itv->yuv_info.reg_2834 >> 16)) >> 15;
1084 h_filter = (h_filter >> 1) + (h_filter & 1);
1085 /* Only an exact size match can use filter 0. */
1086 if (h_filter < 1) h_filter = 1;
1087 }
1088
1089 /* Now the vertical filter */
1090 if ((itv->yuv_info.reg_2918 & 0x0000FFFF) == (itv->yuv_info.reg_2918 >> 16)) {
1091 /* An exact size match uses filter 0/1 */
1092 v_filter_1 = 0;
1093 v_filter_2 = 1;
1094 }
1095 else {
1096 /* Figure out which filter to use */
1097 v_filter_1 = ((itv->yuv_info.reg_2918 << 16) / (itv->yuv_info.reg_2918 >> 16)) >> 15;
1098 v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
1099 /* Only an exact size match can use filter 0 */
1100 if (v_filter_1 == 0) v_filter_1 = 1;
1101 v_filter_2 = v_filter_1;
1102 }
1103
1104 /* Now restore the filters */
1105 ivtv_yuv_filter (itv,h_filter,v_filter_1,v_filter_2);
1106
1107 /* and clear a few registers */
1108 write_reg(0, 0x02814);
1109 write_reg(0, 0x0282c);
1110 write_reg(0, 0x02904);
1111 write_reg(0, 0x02910);
1112
1113 /* Release the blanking buffer */
1114 if (itv->yuv_info.blanking_ptr) {
1115 kfree (itv->yuv_info.blanking_ptr);
1116 itv->yuv_info.blanking_ptr = NULL;
1117 pci_unmap_single(itv->dev, itv->yuv_info.blanking_dmaptr, 720*16, PCI_DMA_TODEVICE);
1118 }
1119
1120 /* Invalidate the old dimension information */
1121 itv->yuv_info.old_frame_info.src_w = 0;
1122 itv->yuv_info.old_frame_info.src_h = 0;
1123 itv->yuv_info.old_frame_info_args.src_w = 0;
1124 itv->yuv_info.old_frame_info_args.src_h = 0;
1125
1126 /* All done. */
1127 clear_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
1128}
1129
diff --git a/drivers/media/video/ivtv/ivtv-yuv.h b/drivers/media/video/ivtv/ivtv-yuv.h
new file mode 100644
index 000000000000..88972d3f77c4
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-yuv.h
@@ -0,0 +1,24 @@
1/*
2 yuv support
3
4 Copyright (C) 2007 Ian Armstrong <ian@iarmst.demon.co.uk>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21int ivtv_yuv_filter_check(struct ivtv *itv);
22int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args);
23void ivtv_yuv_close(struct ivtv *itv);
24void ivtv_yuv_work_handler (struct ivtv *itv);
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index ba1af3c8525e..3bb7d6634862 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -773,6 +773,9 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
773 break; 773 break;
774 } 774 }
775 775
776 case VIDIOC_G_CHIP_IDENT:
777 return v4l2_chip_ident_i2c_client(client, arg, state->ident, (state->rev1 << 16) | state->rev2);
778
776 default: 779 default:
777 /* unknown */ 780 /* unknown */
778 return -EINVAL; 781 return -EINVAL;
@@ -872,6 +875,8 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
872 snprintf(client->name, sizeof(client->name), "MSP%d4%02d%c-%c%d", 875 snprintf(client->name, sizeof(client->name), "MSP%d4%02d%c-%c%d",
873 msp_family, msp_product, 876 msp_family, msp_product,
874 msp_revision, msp_hard, msp_rom); 877 msp_revision, msp_hard, msp_rom);
878 /* Rev B=2, C=3, D=4, G=7 */
879 state->ident = msp_family * 10000 + 4000 + msp_product * 10 + msp_revision - '@';
875 880
876 /* Has NICAM support: all mspx41x and mspx45x products have NICAM */ 881 /* Has NICAM support: all mspx41x and mspx45x products have NICAM */
877 state->has_nicam = msp_prod_hi == 1 || msp_prod_hi == 5; 882 state->has_nicam = msp_prod_hi == 1 || msp_prod_hi == 5;
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 7531efa1615e..ab69a290e5dc 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -50,6 +50,7 @@ extern int msp_stereo_thresh;
50 50
51struct msp_state { 51struct msp_state {
52 int rev1, rev2; 52 int rev1, rev2;
53 int ident;
53 u8 has_nicam; 54 u8 has_nicam;
54 u8 has_radio; 55 u8 has_radio;
55 u8 has_headphones; 56 u8 has_headphones;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 5ed0adc4ca26..03bc369a9e49 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -5,6 +5,8 @@
5 * by Jonathan Corbet with substantial inspiration from Mark 5 * by Jonathan Corbet with substantial inspiration from Mark
6 * McClelland's ovcamchip code. 6 * McClelland's ovcamchip code.
7 * 7 *
8 * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
9 *
8 * This file may be distributed under the terms of the GNU General 10 * This file may be distributed under the terms of the GNU General
9 * Public License, version 2. 11 * Public License, version 2.
10 */ 12 */
@@ -15,6 +17,7 @@
15#include <linux/delay.h> 17#include <linux/delay.h>
16#include <linux/videodev.h> 18#include <linux/videodev.h>
17#include <media/v4l2-common.h> 19#include <media/v4l2-common.h>
20#include <media/v4l2-chip-ident.h>
18#include <linux/i2c.h> 21#include <linux/i2c.h>
19 22
20 23
@@ -162,6 +165,10 @@ MODULE_LICENSE("GPL");
162 165
163#define REG_GFIX 0x69 /* Fix gain control */ 166#define REG_GFIX 0x69 /* Fix gain control */
164 167
168#define REG_REG76 0x76 /* OV's name */
169#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
170#define R76_WHTPCOR 0x40 /* White pixel correction enable */
171
165#define REG_RGB444 0x8c /* RGB 444 control */ 172#define REG_RGB444 0x8c /* RGB 444 control */
166#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */ 173#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */
167#define R444_RGBX 0x01 /* Empty nibble at end */ 174#define R444_RGBX 0x01 /* Empty nibble at end */
@@ -255,7 +262,7 @@ static struct regval_list ov7670_default_regs[] = {
255 262
256 /* Almost all of these are magic "reserved" values. */ 263 /* Almost all of these are magic "reserved" values. */
257 { REG_COM5, 0x61 }, { REG_COM6, 0x4b }, 264 { REG_COM5, 0x61 }, { REG_COM6, 0x4b },
258 { 0x16, 0x02 }, { REG_MVFP, 0x07|MVFP_MIRROR }, 265 { 0x16, 0x02 }, { REG_MVFP, 0x07 },
259 { 0x21, 0x02 }, { 0x22, 0x91 }, 266 { 0x21, 0x02 }, { 0x22, 0x91 },
260 { 0x29, 0x07 }, { 0x33, 0x0b }, 267 { 0x29, 0x07 }, { 0x33, 0x0b },
261 { 0x35, 0x0b }, { 0x37, 0x1d }, 268 { 0x35, 0x0b }, { 0x37, 0x1d },
@@ -380,6 +387,13 @@ static struct regval_list ov7670_fmt_rgb444[] = {
380 { 0xff, 0xff }, 387 { 0xff, 0xff },
381}; 388};
382 389
390static struct regval_list ov7670_fmt_raw[] = {
391 { REG_COM7, COM7_BAYER },
392 { REG_COM13, 0x08 }, /* No gamma, magic rsvd bit */
393 { REG_COM16, 0x3d }, /* Edge enhancement, denoise */
394 { REG_REG76, 0xe1 }, /* Pix correction, magic rsvd */
395 { 0xff, 0xff },
396};
383 397
384 398
385 399
@@ -483,32 +497,39 @@ static struct ov7670_format_struct {
483 __u32 pixelformat; 497 __u32 pixelformat;
484 struct regval_list *regs; 498 struct regval_list *regs;
485 int cmatrix[CMATRIX_LEN]; 499 int cmatrix[CMATRIX_LEN];
500 int bpp; /* Bytes per pixel */
486} ov7670_formats[] = { 501} ov7670_formats[] = {
487 { 502 {
488 .desc = "YUYV 4:2:2", 503 .desc = "YUYV 4:2:2",
489 .pixelformat = V4L2_PIX_FMT_YUYV, 504 .pixelformat = V4L2_PIX_FMT_YUYV,
490 .regs = ov7670_fmt_yuv422, 505 .regs = ov7670_fmt_yuv422,
491 .cmatrix = { 128, -128, 0, -34, -94, 128 }, 506 .cmatrix = { 128, -128, 0, -34, -94, 128 },
507 .bpp = 2,
492 }, 508 },
493 { 509 {
494 .desc = "RGB 444", 510 .desc = "RGB 444",
495 .pixelformat = V4L2_PIX_FMT_RGB444, 511 .pixelformat = V4L2_PIX_FMT_RGB444,
496 .regs = ov7670_fmt_rgb444, 512 .regs = ov7670_fmt_rgb444,
497 .cmatrix = { 179, -179, 0, -61, -176, 228 }, 513 .cmatrix = { 179, -179, 0, -61, -176, 228 },
514 .bpp = 2,
498 }, 515 },
499 { 516 {
500 .desc = "RGB 565", 517 .desc = "RGB 565",
501 .pixelformat = V4L2_PIX_FMT_RGB565, 518 .pixelformat = V4L2_PIX_FMT_RGB565,
502 .regs = ov7670_fmt_rgb565, 519 .regs = ov7670_fmt_rgb565,
503 .cmatrix = { 179, -179, 0, -61, -176, 228 }, 520 .cmatrix = { 179, -179, 0, -61, -176, 228 },
521 .bpp = 2,
522 },
523 {
524 .desc = "Raw RGB Bayer",
525 .pixelformat = V4L2_PIX_FMT_SBGGR8,
526 .regs = ov7670_fmt_raw,
527 .cmatrix = { 0, 0, 0, 0, 0, 0 },
528 .bpp = 1
504 }, 529 },
505}; 530};
506#define N_OV7670_FMTS (sizeof(ov7670_formats)/sizeof(ov7670_formats[0])) 531#define N_OV7670_FMTS ARRAY_SIZE(ov7670_formats)
507 532
508/*
509 * All formats we support are 2 bytes/pixel.
510 */
511#define BYTES_PER_PIXEL 2
512 533
513/* 534/*
514 * Then there is the issue of window sizes. Try to capture the info here. 535 * Then there is the issue of window sizes. Try to capture the info here.
@@ -685,7 +706,7 @@ static int ov7670_try_fmt(struct i2c_client *c, struct v4l2_format *fmt,
685 */ 706 */
686 pix->width = wsize->width; 707 pix->width = wsize->width;
687 pix->height = wsize->height; 708 pix->height = wsize->height;
688 pix->bytesperline = pix->width*BYTES_PER_PIXEL; 709 pix->bytesperline = pix->width*ov7670_formats[index].bpp;
689 pix->sizeimage = pix->height*pix->bytesperline; 710 pix->sizeimage = pix->height*pix->bytesperline;
690 return 0; 711 return 0;
691} 712}
@@ -1270,9 +1291,8 @@ static int ov7670_command(struct i2c_client *client, unsigned int cmd,
1270 void *arg) 1291 void *arg)
1271{ 1292{
1272 switch (cmd) { 1293 switch (cmd) {
1273 case VIDIOC_INT_G_CHIP_IDENT: 1294 case VIDIOC_G_CHIP_IDENT:
1274 * (enum v4l2_chip_ident *) arg = V4L2_IDENT_OV7670; 1295 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_OV7670, 0);
1275 return 0;
1276 1296
1277 case VIDIOC_INT_RESET: 1297 case VIDIOC_INT_RESET:
1278 ov7670_reset(client); 1298 ov7670_reset(client);
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index e6e61df0d071..1455a8f4e930 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -2210,7 +2210,7 @@ static int find_planb(void)
2210 "membase 0x%x (base reg. 0x%x)\n", 2210 "membase 0x%x (base reg. 0x%x)\n",
2211 bus, PCI_SLOT(dev_fn), PCI_FUNC(dev_fn), old_base, confreg); 2211 bus, PCI_SLOT(dev_fn), PCI_FUNC(dev_fn), old_base, confreg);
2212 2212
2213 pdev = pci_find_slot (bus, dev_fn); 2213 pdev = pci_get_bus_and_slot(bus, dev_fn);
2214 if (!pdev) { 2214 if (!pdev) {
2215 printk(KERN_ERR "planb: cannot find slot\n"); 2215 printk(KERN_ERR "planb: cannot find slot\n");
2216 goto err_out; 2216 goto err_out;
@@ -2240,6 +2240,7 @@ static int find_planb(void)
2240 pb->planb_base = planb_regs; 2240 pb->planb_base = planb_regs;
2241 pb->planb_base_phys = (struct planb_registers *)new_base; 2241 pb->planb_base_phys = (struct planb_registers *)new_base;
2242 pb->irq = irq; 2242 pb->irq = irq;
2243 pb->dev = pdev;
2243 2244
2244 return planb_num; 2245 return planb_num;
2245 2246
@@ -2247,6 +2248,7 @@ err_out_disable:
2247 pci_disable_device(pdev); 2248 pci_disable_device(pdev);
2248err_out: 2249err_out:
2249 /* FIXME handle error */ /* comment moved from pci_find_slot, above */ 2250 /* FIXME handle error */ /* comment moved from pci_find_slot, above */
2251 pci_dev_put(pdev);
2250 return 0; 2252 return 0;
2251} 2253}
2252 2254
@@ -2274,6 +2276,8 @@ static void release_planb(void)
2274 printk(KERN_INFO "PlanB: unregistering with v4l\n"); 2276 printk(KERN_INFO "PlanB: unregistering with v4l\n");
2275 video_unregister_device(&pb->video_dev); 2277 video_unregister_device(&pb->video_dev);
2276 2278
2279 pci_dev_put(pb->dev);
2280
2277 /* note that iounmap() does nothing on the PPC right now */ 2281 /* note that iounmap() does nothing on the PPC right now */
2278 iounmap ((void *)pb->planb_base); 2282 iounmap ((void *)pb->planb_base);
2279 } 2283 }
diff --git a/drivers/media/video/planb.h b/drivers/media/video/planb.h
index 92823211d0c5..e21b5735c103 100644
--- a/drivers/media/video/planb.h
+++ b/drivers/media/video/planb.h
@@ -177,6 +177,7 @@ struct planb {
177 struct mutex lock; 177 struct mutex lock;
178 unsigned int irq; /* interrupt number */ 178 unsigned int irq; /* interrupt number */
179 volatile unsigned int intr_mask; 179 volatile unsigned int intr_mask;
180 struct pci_dev *dev; /* Our PCI device */
180 181
181 int overlay; /* overlay running? */ 182 int overlay; /* overlay running? */
182 struct planb_window win; 183 struct planb_window win;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index 5786faf9b3b8..5669c8ca9ca3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -324,7 +324,7 @@ static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
324 324
325/* This implements some extra setup for the encoder that seems to be 325/* This implements some extra setup for the encoder that seems to be
326 specific to the PVR USB2 hardware. */ 326 specific to the PVR USB2 hardware. */
327int pvr2_encoder_prep_config(struct pvr2_hdw *hdw) 327static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw)
328{ 328{
329 int ret = 0; 329 int ret = 0;
330 int encMisc3Arg = 0; 330 int encMisc3Arg = 0;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 16bd74199601..ce66ab8ff2d8 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -283,6 +283,8 @@ struct pvr2_hdw {
283 int unit_number; /* ID for driver instance */ 283 int unit_number; /* ID for driver instance */
284 unsigned long serial_number; /* ID for hardware itself */ 284 unsigned long serial_number; /* ID for hardware itself */
285 285
286 char bus_info[32]; /* Bus location info */
287
286 /* Minor numbers used by v4l logic (yes, this is a hack, as there 288 /* Minor numbers used by v4l logic (yes, this is a hack, as there
287 should be no v4l junk here). Probably a better way to do this. */ 289 should be no v4l junk here). Probably a better way to do this. */
288 int v4l_minor_number_video; 290 int v4l_minor_number_video;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 9916cf32494d..acf651e01f94 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -1008,6 +1008,13 @@ unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *hdw)
1008 return hdw->serial_number; 1008 return hdw->serial_number;
1009} 1009}
1010 1010
1011
1012const char *pvr2_hdw_get_bus_info(struct pvr2_hdw *hdw)
1013{
1014 return hdw->bus_info;
1015}
1016
1017
1011unsigned long pvr2_hdw_get_cur_freq(struct pvr2_hdw *hdw) 1018unsigned long pvr2_hdw_get_cur_freq(struct pvr2_hdw *hdw)
1012{ 1019{
1013 return hdw->freqSelector ? hdw->freqValTelevision : hdw->freqValRadio; 1020 return hdw->freqSelector ? hdw->freqValTelevision : hdw->freqValRadio;
@@ -2105,6 +2112,11 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
2105 hdw->usb_intf = intf; 2112 hdw->usb_intf = intf;
2106 hdw->usb_dev = interface_to_usbdev(intf); 2113 hdw->usb_dev = interface_to_usbdev(intf);
2107 2114
2115 scnprintf(hdw->bus_info,sizeof(hdw->bus_info),
2116 "usb %s address %d",
2117 hdw->usb_dev->dev.bus_id,
2118 hdw->usb_dev->devnum);
2119
2108 ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber; 2120 ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber;
2109 usb_set_interface(hdw->usb_dev,ifnum,0); 2121 usb_set_interface(hdw->usb_dev,ifnum,0);
2110 2122
@@ -3275,7 +3287,9 @@ int pvr2_hdw_register_access(struct pvr2_hdw *hdw,
3275 mutex_lock(&hdw->i2c_list_lock); do { 3287 mutex_lock(&hdw->i2c_list_lock); do {
3276 list_for_each(item,&hdw->i2c_clients) { 3288 list_for_each(item,&hdw->i2c_clients) {
3277 cp = list_entry(item,struct pvr2_i2c_client,list); 3289 cp = list_entry(item,struct pvr2_i2c_client,list);
3278 if (!v4l2_chip_match_i2c_client(cp->client, req.match_type, req.match_chip)) { 3290 if (!v4l2_chip_match_i2c_client(
3291 cp->client,
3292 req.match_type, req.match_chip)) {
3279 continue; 3293 continue;
3280 } 3294 }
3281 stat = pvr2_i2c_client_cmd( 3295 stat = pvr2_i2c_client_cmd(
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
index 0c9cca43ff85..4dba8d006324 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -124,6 +124,9 @@ struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *);
124/* Retrieve serial number of device */ 124/* Retrieve serial number of device */
125unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *); 125unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *);
126 126
127/* Retrieve bus location info of device */
128const char *pvr2_hdw_get_bus_info(struct pvr2_hdw *);
129
127/* Called when hardware has been unplugged */ 130/* Called when hardware has been unplugged */
128void pvr2_hdw_disconnect(struct pvr2_hdw *); 131void pvr2_hdw_disconnect(struct pvr2_hdw *);
129 132
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 91396fd573e4..a741c556a39a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -42,9 +42,11 @@ struct pvr2_sysfs {
42 struct class_device_attribute attr_v4l_minor_number; 42 struct class_device_attribute attr_v4l_minor_number;
43 struct class_device_attribute attr_v4l_radio_minor_number; 43 struct class_device_attribute attr_v4l_radio_minor_number;
44 struct class_device_attribute attr_unit_number; 44 struct class_device_attribute attr_unit_number;
45 struct class_device_attribute attr_bus_info;
45 int v4l_minor_number_created_ok; 46 int v4l_minor_number_created_ok;
46 int v4l_radio_minor_number_created_ok; 47 int v4l_radio_minor_number_created_ok;
47 int unit_number_created_ok; 48 int unit_number_created_ok;
49 int bus_info_created_ok;
48}; 50};
49 51
50#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC 52#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
@@ -705,6 +707,10 @@ static void class_dev_destroy(struct pvr2_sysfs *sfp)
705 pvr2_sysfs_tear_down_debugifc(sfp); 707 pvr2_sysfs_tear_down_debugifc(sfp);
706#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */ 708#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
707 pvr2_sysfs_tear_down_controls(sfp); 709 pvr2_sysfs_tear_down_controls(sfp);
710 if (sfp->bus_info_created_ok) {
711 class_device_remove_file(sfp->class_dev,
712 &sfp->attr_bus_info);
713 }
708 if (sfp->v4l_minor_number_created_ok) { 714 if (sfp->v4l_minor_number_created_ok) {
709 class_device_remove_file(sfp->class_dev, 715 class_device_remove_file(sfp->class_dev,
710 &sfp->attr_v4l_minor_number); 716 &sfp->attr_v4l_minor_number);
@@ -735,6 +741,16 @@ static ssize_t v4l_minor_number_show(struct class_device *class_dev,char *buf)
735} 741}
736 742
737 743
744static ssize_t bus_info_show(struct class_device *class_dev,char *buf)
745{
746 struct pvr2_sysfs *sfp;
747 sfp = (struct pvr2_sysfs *)class_dev->class_data;
748 if (!sfp) return -EINVAL;
749 return scnprintf(buf,PAGE_SIZE,"%s\n",
750 pvr2_hdw_get_bus_info(sfp->channel.hdw));
751}
752
753
738static ssize_t v4l_radio_minor_number_show(struct class_device *class_dev, 754static ssize_t v4l_radio_minor_number_show(struct class_device *class_dev,
739 char *buf) 755 char *buf)
740{ 756{
@@ -836,6 +852,20 @@ static void class_dev_create(struct pvr2_sysfs *sfp,
836 sfp->unit_number_created_ok = !0; 852 sfp->unit_number_created_ok = !0;
837 } 853 }
838 854
855 sfp->attr_bus_info.attr.owner = THIS_MODULE;
856 sfp->attr_bus_info.attr.name = "bus_info_str";
857 sfp->attr_bus_info.attr.mode = S_IRUGO;
858 sfp->attr_bus_info.show = bus_info_show;
859 sfp->attr_bus_info.store = NULL;
860 ret = class_device_create_file(sfp->class_dev,
861 &sfp->attr_bus_info);
862 if (ret < 0) {
863 printk(KERN_WARNING "%s: class_device_create_file error: %d\n",
864 __FUNCTION__, ret);
865 } else {
866 sfp->bus_info_created_ok = !0;
867 }
868
839 pvr2_sysfs_add_controls(sfp); 869 pvr2_sysfs_add_controls(sfp);
840#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC 870#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
841 pvr2_sysfs_add_debugifc(sfp); 871 pvr2_sysfs_add_debugifc(sfp);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 25d3830b482a..4563b3df8a0d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -203,6 +203,8 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
203 struct v4l2_capability *cap = arg; 203 struct v4l2_capability *cap = arg;
204 204
205 memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); 205 memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
206 strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw),
207 sizeof(cap->bus_info));
206 208
207 ret = 0; 209 ret = 0;
208 break; 210 break;
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 0bd115588f31..338ced7188f2 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -140,6 +140,8 @@ static const char *size2name[PSZ_MAX] =
140 An alternate value of 0 means this mode is not available at all. 140 An alternate value of 0 means this mode is not available at all.
141 */ 141 */
142 142
143#define PWC_FPS_MAX_NALA 8
144
143struct Nala_table_entry { 145struct Nala_table_entry {
144 char alternate; /* USB alternate setting */ 146 char alternate; /* USB alternate setting */
145 int compressed; /* Compressed yes/no */ 147 int compressed; /* Compressed yes/no */
@@ -147,7 +149,9 @@ struct Nala_table_entry {
147 unsigned char mode[3]; /* precomputed mode table */ 149 unsigned char mode[3]; /* precomputed mode table */
148}; 150};
149 151
150static struct Nala_table_entry Nala_table[PSZ_MAX][8] = 152static unsigned int Nala_fps_vector[PWC_FPS_MAX_NALA] = { 4, 5, 7, 10, 12, 15, 20, 24 };
153
154static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
151{ 155{
152#include "pwc-nala.h" 156#include "pwc-nala.h"
153}; 157};
@@ -423,6 +427,59 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
423 return 0; 427 return 0;
424} 428}
425 429
430static unsigned int pwc_get_fps_Nala(struct pwc_device *pdev, unsigned int index, unsigned int size)
431{
432 unsigned int i;
433
434 for (i = 0; i < PWC_FPS_MAX_NALA; i++) {
435 if (Nala_table[size][i].alternate) {
436 if (index--==0) return Nala_fps_vector[i];
437 }
438 }
439 return 0;
440}
441
442static unsigned int pwc_get_fps_Kiara(struct pwc_device *pdev, unsigned int index, unsigned int size)
443{
444 unsigned int i;
445
446 for (i = 0; i < PWC_FPS_MAX_KIARA; i++) {
447 if (Kiara_table[size][i][3].alternate) {
448 if (index--==0) return Kiara_fps_vector[i];
449 }
450 }
451 return 0;
452}
453
454static unsigned int pwc_get_fps_Timon(struct pwc_device *pdev, unsigned int index, unsigned int size)
455{
456 unsigned int i;
457
458 for (i=0; i < PWC_FPS_MAX_TIMON; i++) {
459 if (Timon_table[size][i][3].alternate) {
460 if (index--==0) return Timon_fps_vector[i];
461 }
462 }
463 return 0;
464}
465
466unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size)
467{
468 unsigned int ret;
469
470 if (DEVICE_USE_CODEC1(pdev->type)) {
471 ret = pwc_get_fps_Nala(pdev, index, size);
472
473 } else if (DEVICE_USE_CODEC3(pdev->type)) {
474 ret = pwc_get_fps_Kiara(pdev, index, size);
475
476 } else {
477 ret = pwc_get_fps_Timon(pdev, index, size);
478 }
479
480 return ret;
481}
482
426#define BLACK_Y 0 483#define BLACK_Y 0
427#define BLACK_U 128 484#define BLACK_U 128
428#define BLACK_V 128 485#define BLACK_V 128
@@ -1343,7 +1400,7 @@ int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1343 ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red); 1400 ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
1344 if (ret < 0) 1401 if (ret < 0)
1345 break; 1402 break;
1346 ret =pwc_read_blue_gain(pdev, &ARGR(wb).read_blue); 1403 ret = pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
1347 if (ret < 0) 1404 if (ret < 0)
1348 break; 1405 break;
1349 } 1406 }
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 27ed76986ca2..085332a503de 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -95,8 +95,8 @@ static const struct usb_device_id pwc_device_table [] = {
95 { USB_DEVICE(0x046D, 0x08B3) }, /* Logitech QuickCam Zoom (old model) */ 95 { USB_DEVICE(0x046D, 0x08B3) }, /* Logitech QuickCam Zoom (old model) */
96 { USB_DEVICE(0x046D, 0x08B4) }, /* Logitech QuickCam Zoom (new model) */ 96 { USB_DEVICE(0x046D, 0x08B4) }, /* Logitech QuickCam Zoom (new model) */
97 { USB_DEVICE(0x046D, 0x08B5) }, /* Logitech QuickCam Orbit/Sphere */ 97 { USB_DEVICE(0x046D, 0x08B5) }, /* Logitech QuickCam Orbit/Sphere */
98 { USB_DEVICE(0x046D, 0x08B6) }, /* Logitech (reserved) */ 98 { USB_DEVICE(0x046D, 0x08B6) }, /* Cisco VT Camera */
99 { USB_DEVICE(0x046D, 0x08B7) }, /* Logitech (reserved) */ 99 { USB_DEVICE(0x046D, 0x08B7) }, /* Logitech ViewPort AV 100 */
100 { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech (reserved) */ 100 { USB_DEVICE(0x046D, 0x08B8) }, /* Logitech (reserved) */
101 { USB_DEVICE(0x055D, 0x9000) }, /* Samsung MPC-C10 */ 101 { USB_DEVICE(0x055D, 0x9000) }, /* Samsung MPC-C10 */
102 { USB_DEVICE(0x055D, 0x9001) }, /* Samsung MPC-C30 */ 102 { USB_DEVICE(0x055D, 0x9001) }, /* Samsung MPC-C30 */
@@ -1493,7 +1493,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1493 case 0x0329: 1493 case 0x0329:
1494 PWC_INFO("Philips SPC 900NC USB webcam detected.\n"); 1494 PWC_INFO("Philips SPC 900NC USB webcam detected.\n");
1495 name = "Philips SPC 900NC webcam"; 1495 name = "Philips SPC 900NC webcam";
1496 type_id = 720; 1496 type_id = 740;
1497 break; 1497 break;
1498 default: 1498 default:
1499 return -ENODEV; 1499 return -ENODEV;
@@ -1547,8 +1547,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1547 features |= FEATURE_MOTOR_PANTILT; 1547 features |= FEATURE_MOTOR_PANTILT;
1548 break; 1548 break;
1549 case 0x08b6: 1549 case 0x08b6:
1550 PWC_INFO("Logitech/Cisco VT Camera webcam detected.\n");
1551 name = "Cisco VT Camera";
1552 type_id = 740; /* CCD sensor */
1553 break;
1550 case 0x08b7: 1554 case 0x08b7:
1551 case 0x08b8: 1555 PWC_INFO("Logitech ViewPort AV 100 webcam detected.\n");
1556 name = "Logitech ViewPort AV 100";
1557 type_id = 740; /* CCD sensor */
1558 break;
1559 case 0x08b8: /* Where this released? */
1552 PWC_INFO("Logitech QuickCam detected (reserved ID).\n"); 1560 PWC_INFO("Logitech QuickCam detected (reserved ID).\n");
1553 name = "Logitech QuickCam (res.)"; 1561 name = "Logitech QuickCam (res.)";
1554 type_id = 730; /* Assuming CMOS */ 1562 type_id = 730; /* Assuming CMOS */
diff --git a/drivers/media/video/pwc/pwc-ioctl.h b/drivers/media/video/pwc/pwc-ioctl.h
index 784bc72521fa..cec660299768 100644
--- a/drivers/media/video/pwc/pwc-ioctl.h
+++ b/drivers/media/video/pwc/pwc-ioctl.h
@@ -2,7 +2,7 @@
2#define PWC_IOCTL_H 2#define PWC_IOCTL_H
3 3
4/* (C) 2001-2004 Nemosoft Unv. 4/* (C) 2001-2004 Nemosoft Unv.
5 (C) 2004 Luc Saillard (luc@saillard.org) 5 (C) 2004-2006 Luc Saillard (luc@saillard.org)
6 6
7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx 7 NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
8 driver and thus may have bugs that are not present in the original version. 8 driver and thus may have bugs that are not present in the original version.
@@ -25,7 +25,7 @@
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26*/ 26*/
27 27
28/* This is pwc-ioctl.h belonging to PWC 8.12.1 28/* This is pwc-ioctl.h belonging to PWC 10.0.10
29 It contains structures and defines to communicate from user space 29 It contains structures and defines to communicate from user space
30 directly to the driver. 30 directly to the driver.
31 */ 31 */
@@ -51,6 +51,9 @@
51 ... the function 51 ... the function
52 */ 52 */
53 53
54#include <linux/types.h>
55#include <linux/version.h>
56
54 57
55 /* Enumeration of image sizes */ 58 /* Enumeration of image sizes */
56#define PSZ_SQCIF 0x00 59#define PSZ_SQCIF 0x00
@@ -65,6 +68,8 @@
65/* The frame rate is encoded in the video_window.flags parameter using 68/* The frame rate is encoded in the video_window.flags parameter using
66 the upper 16 bits, since some flags are defined nowadays. The following 69 the upper 16 bits, since some flags are defined nowadays. The following
67 defines provide a mask and shift to filter out this value. 70 defines provide a mask and shift to filter out this value.
71 This value can also be passing using the private flag when using v4l2 and
72 VIDIOC_S_FMT ioctl.
68 73
69 In 'Snapshot' mode the camera freezes its automatic exposure and colour 74 In 'Snapshot' mode the camera freezes its automatic exposure and colour
70 balance controls. 75 balance controls.
@@ -73,6 +78,8 @@
73#define PWC_FPS_MASK 0x00FF0000 78#define PWC_FPS_MASK 0x00FF0000
74#define PWC_FPS_FRMASK 0x003F0000 79#define PWC_FPS_FRMASK 0x003F0000
75#define PWC_FPS_SNAPSHOT 0x00400000 80#define PWC_FPS_SNAPSHOT 0x00400000
81#define PWC_QLT_MASK 0x03000000
82#define PWC_QLT_SHIFT 24
76 83
77 84
78/* structure for transferring x & y coordinates */ 85/* structure for transferring x & y coordinates */
@@ -289,4 +296,29 @@ struct pwc_table_init_buffer {
289}; 296};
290#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer) 297#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer)
291 298
299/*
300 * This is private command used when communicating with v4l2.
301 * In the future all private ioctl will be remove/replace to
302 * use interface offer by v4l2.
303 */
304
305#define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0)
306#define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1)
307#define V4L2_CID_PRIVATE_RESTORE_FACTORY (V4L2_CID_PRIVATE_BASE + 2)
308#define V4L2_CID_PRIVATE_COLOUR_MODE (V4L2_CID_PRIVATE_BASE + 3)
309#define V4L2_CID_PRIVATE_AUTOCONTOUR (V4L2_CID_PRIVATE_BASE + 4)
310#define V4L2_CID_PRIVATE_CONTOUR (V4L2_CID_PRIVATE_BASE + 5)
311#define V4L2_CID_PRIVATE_BACKLIGHT (V4L2_CID_PRIVATE_BASE + 6)
312#define V4L2_CID_PRIVATE_FLICKERLESS (V4L2_CID_PRIVATE_BASE + 7)
313#define V4L2_CID_PRIVATE_NOISE_REDUCTION (V4L2_CID_PRIVATE_BASE + 8)
314
315struct pwc_raw_frame {
316 __le16 type; /* type of the webcam */
317 __le16 vbandlength; /* Size of 4lines compressed (used by the decompressor) */
318 __u8 cmd[4]; /* the four byte of the command (in case of nala,
319 only the first 3 bytes is filled) */
320 __u8 rawframe[0]; /* frame_size = H/4*vbandlength */
321} __attribute__ ((packed));
322
323
292#endif 324#endif
diff --git a/drivers/media/video/pwc/pwc-kiara.c b/drivers/media/video/pwc/pwc-kiara.c
index fec39cc5a9f1..f4ae83c0cf2b 100644
--- a/drivers/media/video/pwc/pwc-kiara.c
+++ b/drivers/media/video/pwc/pwc-kiara.c
@@ -42,6 +42,8 @@
42#include "pwc-kiara.h" 42#include "pwc-kiara.h"
43#include "pwc-uncompress.h" 43#include "pwc-uncompress.h"
44 44
45const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 };
46
45const struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] = 47const struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4] =
46{ 48{
47 /* SQCIF */ 49 /* SQCIF */
diff --git a/drivers/media/video/pwc/pwc-kiara.h b/drivers/media/video/pwc/pwc-kiara.h
index 0bdb22547d86..047dad8c15f7 100644
--- a/drivers/media/video/pwc/pwc-kiara.h
+++ b/drivers/media/video/pwc/pwc-kiara.h
@@ -29,6 +29,8 @@
29 29
30#include <media/pwc-ioctl.h> 30#include <media/pwc-ioctl.h>
31 31
32#define PWC_FPS_MAX_KIARA 6
33
32struct Kiara_table_entry 34struct Kiara_table_entry
33{ 35{
34 char alternate; /* USB alternate interface */ 36 char alternate; /* USB alternate interface */
@@ -37,8 +39,9 @@ struct Kiara_table_entry
37 unsigned char mode[12]; /* precomputed mode settings for cam */ 39 unsigned char mode[12]; /* precomputed mode settings for cam */
38}; 40};
39 41
40extern const struct Kiara_table_entry Kiara_table[PSZ_MAX][6][4]; 42extern const struct Kiara_table_entry Kiara_table[PSZ_MAX][PWC_FPS_MAX_KIARA][4];
41extern const unsigned int KiaraRomTable[8][2][16][8]; 43extern const unsigned int KiaraRomTable[8][2][16][8];
44extern const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA];
42 45
43#endif 46#endif
44 47
diff --git a/drivers/media/video/pwc/pwc-timon.c b/drivers/media/video/pwc/pwc-timon.c
index be65bdcd195b..c56c174b161c 100644
--- a/drivers/media/video/pwc/pwc-timon.c
+++ b/drivers/media/video/pwc/pwc-timon.c
@@ -40,7 +40,9 @@
40 40
41#include "pwc-timon.h" 41#include "pwc-timon.h"
42 42
43const struct Timon_table_entry Timon_table[PSZ_MAX][6][4] = 43const unsigned int Timon_fps_vector[PWC_FPS_MAX_TIMON] = { 5, 10, 15, 20, 25, 30 };
44
45const struct Timon_table_entry Timon_table[PSZ_MAX][PWC_FPS_MAX_TIMON][4] =
44{ 46{
45 /* SQCIF */ 47 /* SQCIF */
46 { 48 {
diff --git a/drivers/media/video/pwc/pwc-timon.h b/drivers/media/video/pwc/pwc-timon.h
index eef9e2cd4320..a6e22224c95f 100644
--- a/drivers/media/video/pwc/pwc-timon.h
+++ b/drivers/media/video/pwc/pwc-timon.h
@@ -44,6 +44,8 @@
44 44
45#include <media/pwc-ioctl.h> 45#include <media/pwc-ioctl.h>
46 46
47#define PWC_FPS_MAX_TIMON 6
48
47struct Timon_table_entry 49struct Timon_table_entry
48{ 50{
49 char alternate; /* USB alternate interface */ 51 char alternate; /* USB alternate interface */
@@ -52,9 +54,9 @@ struct Timon_table_entry
52 unsigned char mode[13]; /* precomputed mode settings for cam */ 54 unsigned char mode[13]; /* precomputed mode settings for cam */
53}; 55};
54 56
55extern const struct Timon_table_entry Timon_table[PSZ_MAX][6][4]; 57extern const struct Timon_table_entry Timon_table[PSZ_MAX][PWC_FPS_MAX_TIMON][4];
56extern const unsigned int TimonRomTable [16][2][16][8]; 58extern const unsigned int TimonRomTable [16][2][16][8];
57 59extern const unsigned int Timon_fps_vector[PWC_FPS_MAX_TIMON];
58 60
59#endif 61#endif
60 62
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index d5e6bc850643..32fbe1ae6251 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -1168,7 +1168,7 @@ int pwc_video_do_ioctl(struct inode *inode, struct file *file,
1168 buf->sequence = 0; 1168 buf->sequence = 0;
1169 buf->memory = V4L2_MEMORY_MMAP; 1169 buf->memory = V4L2_MEMORY_MMAP;
1170 buf->m.offset = pdev->fill_image * pdev->len_per_image; 1170 buf->m.offset = pdev->fill_image * pdev->len_per_image;
1171 buf->length = buf->bytesused; 1171 buf->length = pdev->len_per_image;
1172 pwc_next_image(pdev); 1172 pwc_next_image(pdev);
1173 1173
1174 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n",buf->index); 1174 PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n",buf->index);
@@ -1193,6 +1193,64 @@ int pwc_video_do_ioctl(struct inode *inode, struct file *file,
1193 return 0; 1193 return 0;
1194 } 1194 }
1195 1195
1196 case VIDIOC_ENUM_FRAMESIZES:
1197 {
1198 struct v4l2_frmsizeenum *fsize = arg;
1199 unsigned int i = 0, index = fsize->index;
1200
1201 if (fsize->pixel_format == V4L2_PIX_FMT_YUV420) {
1202 for (i = 0; i < PSZ_MAX; i++) {
1203 if (pdev->image_mask & (1UL << i)) {
1204 if (!index--) {
1205 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1206 fsize->discrete.width = pwc_image_sizes[i].x;
1207 fsize->discrete.height = pwc_image_sizes[i].y;
1208 return 0;
1209 }
1210 }
1211 }
1212 } else if (fsize->index == 0 &&
1213 ((fsize->pixel_format == V4L2_PIX_FMT_PWC1 && DEVICE_USE_CODEC1(pdev->type)) ||
1214 (fsize->pixel_format == V4L2_PIX_FMT_PWC2 && DEVICE_USE_CODEC23(pdev->type)))) {
1215
1216 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1217 fsize->discrete.width = pdev->abs_max.x;
1218 fsize->discrete.height = pdev->abs_max.y;
1219 return 0;
1220 }
1221 return -EINVAL;
1222 }
1223
1224 case VIDIOC_ENUM_FRAMEINTERVALS:
1225 {
1226 struct v4l2_frmivalenum *fival = arg;
1227 int size = -1;
1228 unsigned int i;
1229
1230 for (i = 0; i < PSZ_MAX; i++) {
1231 if (pwc_image_sizes[i].x == fival->width &&
1232 pwc_image_sizes[i].y == fival->height) {
1233 size = i;
1234 break;
1235 }
1236 }
1237
1238 /* TODO: Support raw format */
1239 if (size < 0 || fival->pixel_format != V4L2_PIX_FMT_YUV420) {
1240 return -EINVAL;
1241 }
1242
1243 i = pwc_get_fps(pdev, fival->index, size);
1244 if (!i)
1245 return -EINVAL;
1246
1247 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1248 fival->discrete.numerator = 1;
1249 fival->discrete.denominator = i;
1250
1251 return 0;
1252 }
1253
1196 default: 1254 default:
1197 return pwc_ioctl(pdev, cmd, arg); 1255 return pwc_ioctl(pdev, cmd, arg);
1198 } /* ..switch */ 1256 } /* ..switch */
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e778a2b8c280..acbb9312960a 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -44,7 +44,7 @@
44#define PWC_MINOR 0 44#define PWC_MINOR 0
45#define PWC_EXTRAMINOR 12 45#define PWC_EXTRAMINOR 12
46#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR) 46#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
47#define PWC_VERSION "10.0.12" 47#define PWC_VERSION "10.0.13"
48#define PWC_NAME "pwc" 48#define PWC_NAME "pwc"
49#define PFX PWC_NAME ": " 49#define PFX PWC_NAME ": "
50 50
@@ -85,7 +85,7 @@
85#define PWC_INFO(fmt, args...) printk(KERN_INFO PFX fmt, ##args) 85#define PWC_INFO(fmt, args...) printk(KERN_INFO PFX fmt, ##args)
86#define PWC_TRACE(fmt, args...) PWC_DEBUG(TRACE, fmt, ##args) 86#define PWC_TRACE(fmt, args...) PWC_DEBUG(TRACE, fmt, ##args)
87 87
88#else /* if ! CONFIG_PWC_DEBUG */ 88#else /* if ! CONFIG_USB_PWC_DEBUG */
89 89
90#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args) 90#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args)
91#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args) 91#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args)
@@ -287,6 +287,7 @@ void pwc_construct(struct pwc_device *pdev);
287/** Functions in pwc-ctrl.c */ 287/** Functions in pwc-ctrl.c */
288/* Request a certain video mode. Returns < 0 if not possible */ 288/* Request a certain video mode. Returns < 0 if not possible */
289extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot); 289extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
290extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
290/* Calculate the number of bytes per image (not frame) */ 291/* Calculate the number of bytes per image (not frame) */
291extern int pwc_mpt_reset(struct pwc_device *pdev, int flags); 292extern int pwc_mpt_reset(struct pwc_device *pdev, int flags);
292extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt); 293extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 4d5bbd859de1..2d18f0069821 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -45,6 +45,7 @@
45#include <linux/i2c.h> 45#include <linux/i2c.h>
46#include <linux/videodev2.h> 46#include <linux/videodev2.h>
47#include <media/v4l2-common.h> 47#include <media/v4l2-common.h>
48#include <media/v4l2-chip-ident.h>
48#include <media/saa7115.h> 49#include <media/saa7115.h>
49#include <asm/div64.h> 50#include <asm/div64.h>
50 51
@@ -80,7 +81,7 @@ struct saa711x_state {
80 int sat; 81 int sat;
81 int width; 82 int width;
82 int height; 83 int height;
83 enum v4l2_chip_ident ident; 84 u32 ident;
84 u32 audclk_freq; 85 u32 audclk_freq;
85 u32 crystal_freq; 86 u32 crystal_freq;
86 u8 ucgc; 87 u8 ucgc;
@@ -1232,7 +1233,6 @@ static void saa711x_decode_vbi_line(struct i2c_client *client,
1232static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *arg) 1233static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *arg)
1233{ 1234{
1234 struct saa711x_state *state = i2c_get_clientdata(client); 1235 struct saa711x_state *state = i2c_get_clientdata(client);
1235 int *iarg = arg;
1236 1236
1237 /* ioctls to allow direct access to the saa7115 registers for testing */ 1237 /* ioctls to allow direct access to the saa7115 registers for testing */
1238 switch (cmd) { 1238 switch (cmd) {
@@ -1437,9 +1437,8 @@ static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *ar
1437 } 1437 }
1438#endif 1438#endif
1439 1439
1440 case VIDIOC_INT_G_CHIP_IDENT: 1440 case VIDIOC_G_CHIP_IDENT:
1441 *iarg = state->ident; 1441 return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0);
1442 break;
1443 1442
1444 default: 1443 default:
1445 return -EINVAL; 1444 return -EINVAL;
@@ -1487,6 +1486,7 @@ static int saa711x_attach(struct i2c_adapter *adapter, int address, int kind)
1487 if (memcmp(name, "1f711", 5)) { 1486 if (memcmp(name, "1f711", 5)) {
1488 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", 1487 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
1489 address << 1, name); 1488 address << 1, name);
1489 kfree(client);
1490 return 0; 1490 return 0;
1491 } 1491 }
1492 1492
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 654863db1591..9f986930490f 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -54,6 +54,7 @@
54#include <linux/i2c.h> 54#include <linux/i2c.h>
55#include <linux/videodev2.h> 55#include <linux/videodev2.h>
56#include <media/v4l2-common.h> 56#include <media/v4l2-common.h>
57#include <media/v4l2-chip-ident.h>
57#include <media/saa7127.h> 58#include <media/saa7127.h>
58 59
59static int debug = 0; 60static int debug = 0;
@@ -234,7 +235,7 @@ static struct i2c_reg_value saa7127_init_config_50hz[] = {
234 235
235struct saa7127_state { 236struct saa7127_state {
236 v4l2_std_id std; 237 v4l2_std_id std;
237 enum v4l2_chip_ident ident; 238 u32 ident;
238 enum saa7127_input_type input_type; 239 enum saa7127_input_type input_type;
239 enum saa7127_output_type output_type; 240 enum saa7127_output_type output_type;
240 int video_enable; 241 int video_enable;
@@ -550,12 +551,12 @@ static int saa7127_command(struct i2c_client *client,
550 struct v4l2_routing *route = arg; 551 struct v4l2_routing *route = arg;
551 552
552 switch (cmd) { 553 switch (cmd) {
553 case VIDIOC_S_STD: 554 case VIDIOC_INT_S_STD_OUTPUT:
554 if (state->std == *(v4l2_std_id *)arg) 555 if (state->std == *(v4l2_std_id *)arg)
555 break; 556 break;
556 return saa7127_set_std(client, *(v4l2_std_id *)arg); 557 return saa7127_set_std(client, *(v4l2_std_id *)arg);
557 558
558 case VIDIOC_G_STD: 559 case VIDIOC_INT_G_STD_OUTPUT:
559 *(v4l2_std_id *)arg = state->std; 560 *(v4l2_std_id *)arg = state->std;
560 break; 561 break;
561 562
@@ -650,9 +651,8 @@ static int saa7127_command(struct i2c_client *client,
650 break; 651 break;
651 } 652 }
652 653
653 case VIDIOC_INT_G_CHIP_IDENT: 654 case VIDIOC_G_CHIP_IDENT:
654 *(enum v4l2_chip_ident *)arg = state->ident; 655 return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0);
655 break;
656 656
657 default: 657 default:
658 return -EINVAL; 658 return -EINVAL;
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 59da79ce2efd..309dca368f4a 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -46,6 +46,7 @@ config VIDEO_SAA7134_DVB
46 select DVB_NXT200X if !DVB_FE_CUSTOMISE 46 select DVB_NXT200X if !DVB_FE_CUSTOMISE
47 select DVB_TDA10086 if !DVB_FE_CUSTOMISE 47 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
48 select DVB_TDA826X if !DVB_FE_CUSTOMISE 48 select DVB_TDA826X if !DVB_FE_CUSTOMISE
49 select DVB_TDA827X if !DVB_FE_CUSTOMISE
49 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 50 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
50 ---help--- 51 ---help---
51 This adds support for DVB cards based on the 52 This adds support for DVB cards based on the
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 89f32107f46b..4ea479baee74 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -1543,12 +1543,12 @@ struct saa7134_board saa7134_boards[] = {
1543 },{ 1543 },{
1544 .name = name_comp1, 1544 .name = name_comp1,
1545 .vmux = 0, 1545 .vmux = 0,
1546 .amux = LINE2, 1546 .amux = LINE1,
1547 .gpio = 0x02, 1547 .gpio = 0x02,
1548 },{ 1548 },{
1549 .name = name_svideo, 1549 .name = name_svideo,
1550 .vmux = 6, 1550 .vmux = 6,
1551 .amux = LINE2, 1551 .amux = LINE1,
1552 .gpio = 0x02, 1552 .gpio = 0x02,
1553 }}, 1553 }},
1554 .radio = { 1554 .radio = {
@@ -1778,17 +1778,19 @@ struct saa7134_board saa7134_boards[] = {
1778 [SAA7134_BOARD_FLYDVBTDUO] = { 1778 [SAA7134_BOARD_FLYDVBTDUO] = {
1779 /* LifeView FlyDVB-T DUO */ 1779 /* LifeView FlyDVB-T DUO */
1780 /* "Nico Sabbi <nsabbi@tiscali.it> Hartmut Hackmann hartmut.hackmann@t-online.de*/ 1780 /* "Nico Sabbi <nsabbi@tiscali.it> Hartmut Hackmann hartmut.hackmann@t-online.de*/
1781 .name = "LifeView FlyDVB-T DUO", 1781 .name = "LifeView FlyDVB-T DUO / MSI TV@nywhere Duo",
1782 .audio_clock = 0x00200000, 1782 .audio_clock = 0x00200000,
1783 .tuner_type = TUNER_PHILIPS_TDA8290, 1783 .tuner_type = TUNER_PHILIPS_TDA8290,
1784 .radio_type = UNSET, 1784 .radio_type = UNSET,
1785 .tuner_addr = ADDR_UNSET, 1785 .tuner_addr = ADDR_UNSET,
1786 .radio_addr = ADDR_UNSET, 1786 .radio_addr = ADDR_UNSET,
1787 .gpiomask = 0x00200000,
1787 .mpeg = SAA7134_MPEG_DVB, 1788 .mpeg = SAA7134_MPEG_DVB,
1788 .inputs = {{ 1789 .inputs = {{
1789 .name = name_tv, 1790 .name = name_tv,
1790 .vmux = 1, 1791 .vmux = 1,
1791 .amux = TV, 1792 .amux = TV,
1793 .gpio = 0x200000, /* GPIO21=High for TV input */
1792 .tv = 1, 1794 .tv = 1,
1793 },{ 1795 },{
1794 .name = name_comp1, /* Composite signal on S-Video input */ 1796 .name = name_comp1, /* Composite signal on S-Video input */
@@ -1803,6 +1805,11 @@ struct saa7134_board saa7134_boards[] = {
1803 .vmux = 8, 1805 .vmux = 8,
1804 .amux = LINE2, 1806 .amux = LINE2,
1805 }}, 1807 }},
1808 .radio = {
1809 .name = name_radio,
1810 .amux = TV,
1811 .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
1812 },
1806 }, 1813 },
1807 [SAA7134_BOARD_PHILIPS_TOUGH] = { 1814 [SAA7134_BOARD_PHILIPS_TOUGH] = {
1808 .name = "Philips TOUGH DVB-T reference design", 1815 .name = "Philips TOUGH DVB-T reference design",
@@ -2546,8 +2553,9 @@ struct saa7134_board saa7134_boards[] = {
2546 .radio_type = UNSET, 2553 .radio_type = UNSET,
2547 .tuner_addr = ADDR_UNSET, 2554 .tuner_addr = ADDR_UNSET,
2548 .radio_addr = ADDR_UNSET, 2555 .radio_addr = ADDR_UNSET,
2556 .tuner_config = 0,
2549 .mpeg = SAA7134_MPEG_DVB, 2557 .mpeg = SAA7134_MPEG_DVB,
2550 .gpiomask = 1 << 21, 2558 .gpiomask = 0x0200000,
2551 .inputs = {{ 2559 .inputs = {{
2552 .name = name_tv, 2560 .name = name_tv,
2553 .vmux = 1, 2561 .vmux = 1,
@@ -2624,7 +2632,7 @@ struct saa7134_board saa7134_boards[] = {
2624 }}, 2632 }},
2625 .radio = { 2633 .radio = {
2626 .name = name_radio, 2634 .name = name_radio,
2627 .amux = LINE1, 2635 .amux = TV,
2628 .gpio = 0x0200000, 2636 .gpio = 0x0200000,
2629 }, 2637 },
2630 }, 2638 },
@@ -3043,6 +3051,7 @@ struct saa7134_board saa7134_boards[] = {
3043 .radio_type = UNSET, 3051 .radio_type = UNSET,
3044 .tuner_addr = ADDR_UNSET, 3052 .tuner_addr = ADDR_UNSET,
3045 .radio_addr = ADDR_UNSET, 3053 .radio_addr = ADDR_UNSET,
3054 .tuner_config = 1,
3046 .mpeg = SAA7134_MPEG_DVB, 3055 .mpeg = SAA7134_MPEG_DVB,
3047 .gpiomask = 0x000200000, 3056 .gpiomask = 0x000200000,
3048 .inputs = {{ 3057 .inputs = {{
@@ -3289,6 +3298,115 @@ struct saa7134_board saa7134_boards[] = {
3289 .amux = LINE1, 3298 .amux = LINE1,
3290 }}, 3299 }},
3291 }, 3300 },
3301 [SAA7134_BOARD_PHILIPS_TIGER_S] = {
3302 .name = "Philips Tiger - S Reference design",
3303 .audio_clock = 0x00187de7,
3304 .tuner_type = TUNER_PHILIPS_TDA8290,
3305 .radio_type = UNSET,
3306 .tuner_addr = ADDR_UNSET,
3307 .radio_addr = ADDR_UNSET,
3308 .tuner_config = 2,
3309 .mpeg = SAA7134_MPEG_DVB,
3310 .gpiomask = 0x0200000,
3311 .inputs = {{
3312 .name = name_tv,
3313 .vmux = 1,
3314 .amux = TV,
3315 .tv = 1,
3316 },{
3317 .name = name_comp1,
3318 .vmux = 3,
3319 .amux = LINE1,
3320 },{
3321 .name = name_svideo,
3322 .vmux = 8,
3323 .amux = LINE1,
3324 }},
3325 .radio = {
3326 .name = name_radio,
3327 .amux = TV,
3328 .gpio = 0x0200000,
3329 },
3330 },
3331 [SAA7134_BOARD_AVERMEDIA_M102] = {
3332 .name = "Avermedia M102",
3333 .audio_clock = 0x00187de7,
3334 .tuner_type = TUNER_PHILIPS_TDA8290,
3335 .radio_type = UNSET,
3336 .tuner_addr = ADDR_UNSET,
3337 .radio_addr = ADDR_UNSET,
3338 .gpiomask = 1<<21,
3339 .inputs = {{
3340 .name = name_tv,
3341 .vmux = 1,
3342 .amux = TV,
3343 .tv = 1,
3344 },{
3345 .name = name_comp1,
3346 .vmux = 0,
3347 .amux = LINE2,
3348 },{
3349 .name = name_svideo,
3350 .vmux = 6,
3351 .amux = LINE2,
3352 }},
3353 },
3354 [SAA7134_BOARD_ASUS_P7131_4871] = {
3355 .name = "ASUS P7131 4871",
3356 .audio_clock = 0x00187de7,
3357 .tuner_type = TUNER_PHILIPS_TDA8290,
3358 .radio_type = UNSET,
3359 .tuner_addr = ADDR_UNSET,
3360 .radio_addr = ADDR_UNSET,
3361 .tuner_config = 2,
3362 .mpeg = SAA7134_MPEG_DVB,
3363 .gpiomask = 0x0200000,
3364 .inputs = {{
3365 .name = name_tv,
3366 .vmux = 1,
3367 .amux = TV,
3368 .tv = 1,
3369 .gpio = 0x0200000,
3370 }},
3371 },
3372 [SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA] = {
3373 .name = "ASUSTeK P7131 Hybrid",
3374 .audio_clock = 0x00187de7,
3375 .tuner_type = TUNER_PHILIPS_TDA8290,
3376 .radio_type = UNSET,
3377 .tuner_addr = ADDR_UNSET,
3378 .radio_addr = ADDR_UNSET,
3379 .tuner_config = 2,
3380 .gpiomask = 1 << 21,
3381 .mpeg = SAA7134_MPEG_DVB,
3382 .inputs = {{
3383 .name = name_tv,
3384 .vmux = 1,
3385 .amux = TV,
3386 .tv = 1,
3387 .gpio = 0x0000000,
3388 },{
3389 .name = name_comp1,
3390 .vmux = 3,
3391 .amux = LINE2,
3392 .gpio = 0x0200000,
3393 },{
3394 .name = name_comp2,
3395 .vmux = 0,
3396 .amux = LINE2,
3397 .gpio = 0x0200000,
3398 },{
3399 .name = name_svideo,
3400 .vmux = 8,
3401 .amux = LINE2,
3402 .gpio = 0x0200000,
3403 }},
3404 .radio = {
3405 .name = name_radio,
3406 .amux = TV,
3407 .gpio = 0x0200000,
3408 },
3409 },
3292}; 3410};
3293 3411
3294const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 3412const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -3914,7 +4032,7 @@ struct pci_device_id saa7134_pci_tbl[] = {
3914 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 4032 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3915 .subvendor = 0x1043, 4033 .subvendor = 0x1043,
3916 .subdevice = 0x4876, 4034 .subdevice = 0x4876,
3917 .driver_data = SAA7134_BOARD_ASUSTeK_P7131_DUAL, 4035 .driver_data = SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA,
3918 },{ 4036 },{
3919 .vendor = PCI_VENDOR_ID_PHILIPS, 4037 .vendor = PCI_VENDOR_ID_PHILIPS,
3920 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 4038 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -3958,6 +4076,30 @@ struct pci_device_id saa7134_pci_tbl[] = {
3958 .subdevice = 0x1175, 4076 .subdevice = 0x1175,
3959 .driver_data = SAA7134_BOARD_CINERGY_HT_PCI, 4077 .driver_data = SAA7134_BOARD_CINERGY_HT_PCI,
3960 },{ 4078 },{
4079 .vendor = PCI_VENDOR_ID_PHILIPS,
4080 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4081 .subvendor = 0x1461, /* Avermedia Technologies Inc */
4082 .subdevice = 0xf31e,
4083 .driver_data = SAA7134_BOARD_AVERMEDIA_M102,
4084 },{
4085 .vendor = PCI_VENDOR_ID_PHILIPS,
4086 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4087 .subvendor = 0x4E42, /* MSI */
4088 .subdevice = 0x0306, /* TV@nywhere DUO */
4089 .driver_data = SAA7134_BOARD_FLYDVBTDUO,
4090 },{
4091 .vendor = PCI_VENDOR_ID_PHILIPS,
4092 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4093 .subvendor = 0x1043,
4094 .subdevice = 0x4871,
4095 .driver_data = SAA7134_BOARD_ASUS_P7131_4871,
4096 },{
4097 .vendor = PCI_VENDOR_ID_PHILIPS,
4098 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4099 .subvendor = 0x1043,
4100 .subdevice = 0x4857,
4101 .driver_data = SAA7134_BOARD_ASUSTeK_P7131_DUAL,
4102 },{
3961 /* --- boards without eeprom + subsystem ID --- */ 4103 /* --- boards without eeprom + subsystem ID --- */
3962 .vendor = PCI_VENDOR_ID_PHILIPS, 4104 .vendor = PCI_VENDOR_ID_PHILIPS,
3963 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 4105 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3971,7 +4113,6 @@ struct pci_device_id saa7134_pci_tbl[] = {
3971 .subdevice = 0, 4113 .subdevice = 0,
3972 .driver_data = SAA7134_BOARD_NOAUTO, 4114 .driver_data = SAA7134_BOARD_NOAUTO,
3973 },{ 4115 },{
3974
3975 /* --- default catch --- */ 4116 /* --- default catch --- */
3976 .vendor = PCI_VENDOR_ID_PHILIPS, 4117 .vendor = PCI_VENDOR_ID_PHILIPS,
3977 .device = PCI_DEVICE_ID_PHILIPS_SAA7130, 4118 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
@@ -4063,6 +4204,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
4063 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS: 4204 case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
4064 case SAA7134_BOARD_FLYDVBT_LR301: 4205 case SAA7134_BOARD_FLYDVBT_LR301:
4065 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 4206 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
4207 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
4066 case SAA7134_BOARD_FLYDVBTDUO: 4208 case SAA7134_BOARD_FLYDVBTDUO:
4067 case SAA7134_BOARD_PROTEUS_2309: 4209 case SAA7134_BOARD_PROTEUS_2309:
4068 case SAA7134_BOARD_AVERMEDIA_A16AR: 4210 case SAA7134_BOARD_AVERMEDIA_A16AR:
@@ -4103,8 +4245,8 @@ int saa7134_board_init1(struct saa7134_dev *dev)
4103 break; 4245 break;
4104 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 4246 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
4105 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS: 4247 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
4106 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08); 4248 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08000000, 0x08000000);
4107 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x00); 4249 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08000000, 0x00000000);
4108 break; 4250 break;
4109 case SAA7134_BOARD_AVERMEDIA_CARDBUS: 4251 case SAA7134_BOARD_AVERMEDIA_CARDBUS:
4110 /* power-up tuner chip */ 4252 /* power-up tuner chip */
@@ -4137,6 +4279,11 @@ int saa7134_board_init1(struct saa7134_dev *dev)
4137 "%s: Dual decoder functionality is disabled for now, use the other chip.\n", 4279 "%s: Dual decoder functionality is disabled for now, use the other chip.\n",
4138 dev->name,card(dev).name,dev->name,dev->name); 4280 dev->name,card(dev).name,dev->name,dev->name);
4139 break; 4281 break;
4282 case SAA7134_BOARD_AVERMEDIA_M102:
4283 /* enable tuner */
4284 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x8c040007, 0x8c040007);
4285 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0c0007cd, 0x0c0007cd);
4286 break;
4140 } 4287 }
4141 return 0; 4288 return 0;
4142} 4289}
@@ -4146,6 +4293,9 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4146{ 4293{
4147 unsigned char buf; 4294 unsigned char buf;
4148 int board; 4295 int board;
4296 struct tuner_setup tun_setup;
4297 tun_setup.config = 0;
4298 tun_setup.tuner_callback = saa7134_tuner_callback;
4149 4299
4150 switch (dev->board) { 4300 switch (dev->board) {
4151 case SAA7134_BOARD_BMK_MPEX_NOTUNER: 4301 case SAA7134_BOARD_BMK_MPEX_NOTUNER:
@@ -4162,8 +4312,6 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4162 dev->tuner_type = saa7134_boards[dev->board].tuner_type; 4312 dev->tuner_type = saa7134_boards[dev->board].tuner_type;
4163 4313
4164 if (TUNER_ABSENT != dev->tuner_type) { 4314 if (TUNER_ABSENT != dev->tuner_type) {
4165 struct tuner_setup tun_setup;
4166
4167 tun_setup.mode_mask = T_RADIO | T_ANALOG_TV | T_DIGITAL_TV; 4315 tun_setup.mode_mask = T_RADIO | T_ANALOG_TV | T_DIGITAL_TV;
4168 tun_setup.type = dev->tuner_type; 4316 tun_setup.type = dev->tuner_type;
4169 tun_setup.addr = ADDR_UNSET; 4317 tun_setup.addr = ADDR_UNSET;
@@ -4173,7 +4321,6 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4173 break; 4321 break;
4174 case SAA7134_BOARD_MD7134: 4322 case SAA7134_BOARD_MD7134:
4175 { 4323 {
4176 struct tuner_setup tun_setup;
4177 u8 subaddr; 4324 u8 subaddr;
4178 u8 data[3]; 4325 u8 data[3];
4179 int ret, tuner_t; 4326 int ret, tuner_t;
@@ -4245,7 +4392,6 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4245 * the channel decoder. We have to make it transparent to find it 4392 * the channel decoder. We have to make it transparent to find it
4246 */ 4393 */
4247 { 4394 {
4248 struct tuner_setup tun_setup;
4249 u8 data[] = { 0x07, 0x02}; 4395 u8 data[] = { 0x07, 0x02};
4250 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 4396 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4251 i2c_transfer(&dev->i2c_adap, &msg, 1); 4397 i2c_transfer(&dev->i2c_adap, &msg, 1);
@@ -4258,16 +4404,38 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4258 } 4404 }
4259 break; 4405 break;
4260 case SAA7134_BOARD_PHILIPS_TIGER: 4406 case SAA7134_BOARD_PHILIPS_TIGER:
4407 case SAA7134_BOARD_PHILIPS_TIGER_S:
4408 {
4409 u8 data[] = { 0x3c, 0x33, 0x60};
4410 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4411 if(dev->autodetected && (dev->eedata[0x49] == 0x50)) {
4412 dev->board = SAA7134_BOARD_PHILIPS_TIGER_S;
4413 printk(KERN_INFO "%s: Reconfigured board as %s\n",
4414 dev->name, saa7134_boards[dev->board].name);
4415 }
4416 if(dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) {
4417 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
4418 tun_setup.type = TUNER_PHILIPS_TDA8290;
4419 tun_setup.addr = 0x4b;
4420 tun_setup.config = 2;
4421
4422 saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup);
4423 data[2] = 0x68;
4424 }
4425 i2c_transfer(&dev->i2c_adap, &msg, 1);
4426 }
4427 break;
4261 case SAA7134_BOARD_PINNACLE_PCTV_310i: 4428 case SAA7134_BOARD_PINNACLE_PCTV_310i:
4262 case SAA7134_BOARD_TEVION_DVBT_220RF: 4429 case SAA7134_BOARD_TEVION_DVBT_220RF:
4263 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 4430 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
4431 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
4264 case SAA7134_BOARD_MEDION_MD8800_QUADRO: 4432 case SAA7134_BOARD_MEDION_MD8800_QUADRO:
4265 case SAA7134_BOARD_HAUPPAUGE_HVR1110: 4433 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
4266 /* this is a hybrid board, initialize to analog mode 4434 /* this is a hybrid board, initialize to analog mode
4267 * and configure firmware eeprom address 4435 * and configure firmware eeprom address
4268 */ 4436 */
4269 { 4437 {
4270 u8 data[] = { 0x3c, 0x33, 0x68}; 4438 u8 data[] = { 0x3c, 0x33, 0x60};
4271 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 4439 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4272 i2c_transfer(&dev->i2c_adap, &msg, 1); 4440 i2c_transfer(&dev->i2c_adap, &msg, 1);
4273 } 4441 }
@@ -4281,18 +4449,18 @@ int saa7134_board_init2(struct saa7134_dev *dev)
4281 break; 4449 break;
4282 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 4450 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
4283 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS: 4451 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
4284 /* make the tda10046 find its eeprom */ 4452 /* initialize analog mode */
4285 { 4453 {
4286 u8 data[] = { 0x3c, 0x33, 0x62}; 4454 u8 data[] = { 0x3c, 0x33, 0x6a};
4287 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 4455 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4288 i2c_transfer(&dev->i2c_adap, &msg, 1); 4456 i2c_transfer(&dev->i2c_adap, &msg, 1);
4289 } 4457 }
4290 break; 4458 break;
4291 case SAA7134_BOARD_CINERGY_HT_PCMCIA: 4459 case SAA7134_BOARD_CINERGY_HT_PCMCIA:
4292 case SAA7134_BOARD_CINERGY_HT_PCI: 4460 case SAA7134_BOARD_CINERGY_HT_PCI:
4293 /* make the tda10046 find its eeprom */ 4461 /* initialize analog mode */
4294 { 4462 {
4295 u8 data[] = { 0x3c, 0x33, 0x60}; 4463 u8 data[] = { 0x3c, 0x33, 0x68};
4296 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 4464 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4297 i2c_transfer(&dev->i2c_adap, &msg, 1); 4465 i2c_transfer(&dev->i2c_adap, &msg, 1);
4298 } 4466 }
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index ed038fff3b4f..25f84701a8e8 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -117,6 +117,64 @@ void saa7134_track_gpio(struct saa7134_dev *dev, char *msg)
117 dev->name, mode, (~mode) & status, mode & status, msg); 117 dev->name, mode, (~mode) & status, mode & status, msg);
118} 118}
119 119
120void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value)
121{
122 u32 index, bitval;
123
124 index = 1 << bit_no;
125 switch (value) {
126 case 0: /* static value */
127 case 1: dprintk("setting GPIO%d to static %d\n", bit_no, value);
128 /* turn sync mode off if necessary */
129 if (index & 0x00c00000)
130 saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x00);
131 if (value)
132 bitval = index;
133 else
134 bitval = 0;
135 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, index);
136 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, index, bitval);
137 break;
138 case 3: /* tristate */
139 dprintk("setting GPIO%d to tristate\n", bit_no);
140 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, 0);
141 break;
142 }
143}
144
145int saa7134_tuner_callback(void *ptr, int command, int arg)
146{
147 u8 sync_control;
148 struct saa7134_dev *dev = ptr;
149
150 switch (dev->tuner_type) {
151 case TUNER_PHILIPS_TDA8290:
152 switch (command) {
153 case 0: /* switch LNA gain through GPIO 22*/
154 saa7134_set_gpio(dev, 22, arg) ;
155 break;
156 case 1: /* vsync output at GPIO22. 50 / 60Hz */
157 dprintk("setting GPIO22 to vsync %d\n", arg);
158 saa_andorb(SAA7134_VIDEO_PORT_CTRL3, 0x80, 0x80);
159 saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x03);
160 if (arg == 1)
161 sync_control = 11;
162 else
163 sync_control = 17;
164 saa_writeb(SAA7134_VGATE_START, sync_control);
165 saa_writeb(SAA7134_VGATE_STOP, sync_control + 1);
166 saa_andorb(SAA7134_MISC_VGATE_MSB, 0x03, 0x00);
167 break;
168 default:
169 return -EINVAL;
170 }
171 break;
172 default:
173 return -ENODEV;
174 }
175 return 0;
176}
177
120/* ------------------------------------------------------------------ */ 178/* ------------------------------------------------------------------ */
121 179
122 180
@@ -124,55 +182,28 @@ void saa7134_track_gpio(struct saa7134_dev *dev, char *msg)
124/* delayed request_module */ 182/* delayed request_module */
125 183
126#if defined(CONFIG_MODULES) && defined(MODULE) 184#if defined(CONFIG_MODULES) && defined(MODULE)
127static int need_empress;
128static int need_dvb;
129static int need_alsa;
130static int need_oss;
131 185
132static int pending_call(struct notifier_block *self, unsigned long state,
133 void *module)
134{
135 if (module != THIS_MODULE || state != MODULE_STATE_LIVE)
136 return NOTIFY_DONE;
137 186
138 if (need_empress) 187static void request_module_async(struct work_struct *work){
188 struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk);
189 if (card_is_empress(dev))
139 request_module("saa7134-empress"); 190 request_module("saa7134-empress");
140 if (need_dvb) 191 if (card_is_dvb(dev))
141 request_module("saa7134-dvb"); 192 request_module("saa7134-dvb");
142 if (need_alsa) 193 if (alsa)
143 request_module("saa7134-alsa"); 194 request_module("saa7134-alsa");
144 if (need_oss) 195 if (oss)
145 request_module("saa7134-oss"); 196 request_module("saa7134-oss");
146 return NOTIFY_DONE;
147} 197}
148 198
149static int pending_registered; 199static void request_submodules(struct saa7134_dev *dev)
150static struct notifier_block pending_notifier = {
151 .notifier_call = pending_call,
152};
153
154static void request_module_depend(char *name, int *flag)
155{ 200{
156 int err; 201 INIT_WORK(&dev->request_module_wk, request_module_async);
157 switch (THIS_MODULE->state) { 202 schedule_work(&dev->request_module_wk);
158 case MODULE_STATE_COMING:
159 if (!pending_registered) {
160 err = register_module_notifier(&pending_notifier);
161 pending_registered = 1;
162 }
163 *flag = 1;
164 break;
165 case MODULE_STATE_LIVE:
166 request_module(name);
167 break;
168 default:
169 /* nothing */;
170 break;
171 }
172} 203}
173 204
174#else 205#else
175#define request_module_depend(name,flag) 206#define request_submodules(dev)
176#endif /* CONFIG_MODULES */ 207#endif /* CONFIG_MODULES */
177 208
178/* ------------------------------------------------------------------ */ 209/* ------------------------------------------------------------------ */
@@ -703,7 +734,6 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
703 saa7134_ts_fini(dev); 734 saa7134_ts_fini(dev);
704 saa7134_input_fini(dev); 735 saa7134_input_fini(dev);
705 saa7134_vbi_fini(dev); 736 saa7134_vbi_fini(dev);
706 saa7134_video_fini(dev);
707 saa7134_tvaudio_fini(dev); 737 saa7134_tvaudio_fini(dev);
708 return 0; 738 return 0;
709} 739}
@@ -944,18 +974,9 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
944 request_module("tuner"); 974 request_module("tuner");
945 if (card_is_empress(dev)) { 975 if (card_is_empress(dev)) {
946 request_module("saa6752hs"); 976 request_module("saa6752hs");
947 request_module_depend("saa7134-empress",&need_empress);
948 } 977 }
949 978
950 if (card_is_dvb(dev)) 979 request_submodules(dev);
951 request_module_depend("saa7134-dvb",&need_dvb);
952
953
954 if (alsa)
955 request_module_depend("saa7134-alsa",&need_alsa);
956
957 if (oss)
958 request_module_depend("saa7134-oss",&need_oss);
959 980
960 v4l2_prio_init(&dev->prio); 981 v4l2_prio_init(&dev->prio);
961 982
@@ -1013,6 +1034,9 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1013 saa7134_dmasound_init(dev); 1034 saa7134_dmasound_init(dev);
1014 } 1035 }
1015 1036
1037 if (TUNER_ABSENT != dev->tuner_type)
1038 saa7134_i2c_call_clients(dev, TUNER_SET_STANDBY, NULL);
1039
1016 return 0; 1040 return 0;
1017 1041
1018 fail4: 1042 fail4:
@@ -1152,10 +1176,6 @@ static int saa7134_init(void)
1152 1176
1153static void saa7134_fini(void) 1177static void saa7134_fini(void)
1154{ 1178{
1155#if defined(CONFIG_MODULES) && defined(MODULE)
1156 if (pending_registered)
1157 unregister_module_notifier(&pending_notifier);
1158#endif /* CONFIG_MODULES */
1159 pci_unregister_driver(&saa7134_pci_driver); 1179 pci_unregister_driver(&saa7134_pci_driver);
1160} 1180}
1161 1181
@@ -1164,6 +1184,7 @@ module_exit(saa7134_fini);
1164 1184
1165/* ----------------------------------------------------------- */ 1185/* ----------------------------------------------------------- */
1166 1186
1187EXPORT_SYMBOL(saa7134_set_gpio);
1167EXPORT_SYMBOL(saa7134_i2c_call_clients); 1188EXPORT_SYMBOL(saa7134_i2c_call_clients);
1168EXPORT_SYMBOL(saa7134_devlist); 1189EXPORT_SYMBOL(saa7134_devlist);
1169EXPORT_SYMBOL(saa7134_boards); 1190EXPORT_SYMBOL(saa7134_boards);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index e3059fd33951..65aec881bbde 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -41,7 +41,9 @@
41 41
42#include "tda10086.h" 42#include "tda10086.h"
43#include "tda826x.h" 43#include "tda826x.h"
44#include "tda827x.h"
44#include "isl6421.h" 45#include "isl6421.h"
46
45MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); 47MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
46MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
47 49
@@ -54,7 +56,21 @@ static int use_frontend = 0;
54module_param(use_frontend, int, 0644); 56module_param(use_frontend, int, 0644);
55MODULE_PARM_DESC(use_frontend,"for cards with multiple frontends (0: terrestrial, 1: satellite)"); 57MODULE_PARM_DESC(use_frontend,"for cards with multiple frontends (0: terrestrial, 1: satellite)");
56 58
57/* ------------------------------------------------------------------ */ 59static int debug = 0;
60module_param(debug, int, 0644);
61MODULE_PARM_DESC(debug, "Turn on/off module debugging (default:off).");
62
63#define dprintk(fmt, arg...) do { if (debug) \
64 printk(KERN_DEBUG "%s/dvb: " fmt, dev->name , ## arg); } while(0)
65
66/* Print a warning */
67#define wprintk(fmt, arg...) \
68 printk(KERN_WARNING "%s/dvb: " fmt, dev->name, ## arg)
69
70/* ------------------------------------------------------------------
71 * mt352 based DVB-T cards
72 */
73
58static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on) 74static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on)
59{ 75{
60 u32 ok; 76 u32 ok;
@@ -75,8 +91,7 @@ static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on)
75 saa_setl(SAA7134_GPIO_GPSTATUS0 >> 2, (1 << 28)); 91 saa_setl(SAA7134_GPIO_GPSTATUS0 >> 2, (1 << 28));
76 udelay(10); 92 udelay(10);
77 ok = saa_readl(SAA7134_GPIO_GPSTATUS0) & (1 << 27); 93 ok = saa_readl(SAA7134_GPIO_GPSTATUS0) & (1 << 27);
78 printk("%s: %s %s\n", dev->name, __FUNCTION__, 94 dprintk("%s %s\n", __FUNCTION__, ok ? "on" : "off");
79 ok ? "on" : "off");
80 95
81 if (!ok) 96 if (!ok)
82 saa_clearl(SAA7134_GPIO_GPSTATUS0 >> 2, (1 << 26)); 97 saa_clearl(SAA7134_GPIO_GPSTATUS0 >> 2, (1 << 26));
@@ -96,7 +111,7 @@ static int mt352_pinnacle_init(struct dvb_frontend* fe)
96 static u8 irq_cfg [] = { INTERRUPT_EN_0, 0x00, 0x00, 0x00, 0x00 }; 111 static u8 irq_cfg [] = { INTERRUPT_EN_0, 0x00, 0x00, 0x00, 0x00 };
97 struct saa7134_dev *dev= fe->dvb->priv; 112 struct saa7134_dev *dev= fe->dvb->priv;
98 113
99 printk("%s: %s called\n",dev->name,__FUNCTION__); 114 dprintk("%s called\n", __FUNCTION__);
100 115
101 mt352_write(fe, clock_config, sizeof(clock_config)); 116 mt352_write(fe, clock_config, sizeof(clock_config));
102 udelay(200); 117 udelay(200);
@@ -185,10 +200,26 @@ static struct mt352_config avermedia_777 = {
185 .demod_init = mt352_aver777_init, 200 .demod_init = mt352_aver777_init,
186}; 201};
187 202
188/* ------------------------------------------------------------------ */ 203/* ==================================================================
189static int philips_tda6651_pll_set(u8 addr, struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 204 * tda1004x based DVB-T cards, helper functions
205 */
206
207static int philips_tda1004x_request_firmware(struct dvb_frontend *fe,
208 const struct firmware **fw, char *name)
209{
210 struct saa7134_dev *dev = fe->dvb->priv;
211 return request_firmware(fw, name, &dev->pci->dev);
212}
213
214/* ------------------------------------------------------------------
215 * these tuners are tu1216, td1316(a)
216 */
217
218static int philips_tda6651_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
190{ 219{
191 struct saa7134_dev *dev = fe->dvb->priv; 220 struct saa7134_dev *dev = fe->dvb->priv;
221 struct tda1004x_state *state = fe->demodulator_priv;
222 u8 addr = state->config->tuner_address;
192 u8 tuner_buf[4]; 223 u8 tuner_buf[4];
193 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tuner_buf,.len = 224 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tuner_buf,.len =
194 sizeof(tuner_buf) }; 225 sizeof(tuner_buf) };
@@ -263,15 +294,20 @@ static int philips_tda6651_pll_set(u8 addr, struct dvb_frontend *fe, struct dvb_
263 294
264 if (fe->ops.i2c_gate_ctrl) 295 if (fe->ops.i2c_gate_ctrl)
265 fe->ops.i2c_gate_ctrl(fe, 1); 296 fe->ops.i2c_gate_ctrl(fe, 1);
266 if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) 297 if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) {
298 wprintk("could not write to tuner at addr: 0x%02x\n",
299 addr << 1);
267 return -EIO; 300 return -EIO;
301 }
268 msleep(1); 302 msleep(1);
269 return 0; 303 return 0;
270} 304}
271 305
272static int philips_tda6651_pll_init(u8 addr, struct dvb_frontend *fe) 306static int philips_tu1216_init(struct dvb_frontend *fe)
273{ 307{
274 struct saa7134_dev *dev = fe->dvb->priv; 308 struct saa7134_dev *dev = fe->dvb->priv;
309 struct tda1004x_state *state = fe->demodulator_priv;
310 u8 addr = state->config->tuner_address;
275 static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab }; 311 static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab };
276 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) }; 312 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) };
277 313
@@ -287,46 +323,17 @@ static int philips_tda6651_pll_init(u8 addr, struct dvb_frontend *fe)
287 323
288/* ------------------------------------------------------------------ */ 324/* ------------------------------------------------------------------ */
289 325
290static int philips_tu1216_tuner_60_init(struct dvb_frontend *fe)
291{
292 return philips_tda6651_pll_init(0x60, fe);
293}
294
295static int philips_tu1216_tuner_60_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
296{
297 return philips_tda6651_pll_set(0x60, fe, params);
298}
299
300static int philips_tda1004x_request_firmware(struct dvb_frontend *fe,
301 const struct firmware **fw, char *name)
302{
303 struct saa7134_dev *dev = fe->dvb->priv;
304 return request_firmware(fw, name, &dev->pci->dev);
305}
306
307static struct tda1004x_config philips_tu1216_60_config = { 326static struct tda1004x_config philips_tu1216_60_config = {
308
309 .demod_address = 0x8, 327 .demod_address = 0x8,
310 .invert = 1, 328 .invert = 1,
311 .invert_oclk = 0, 329 .invert_oclk = 0,
312 .xtal_freq = TDA10046_XTAL_4M, 330 .xtal_freq = TDA10046_XTAL_4M,
313 .agc_config = TDA10046_AGC_DEFAULT, 331 .agc_config = TDA10046_AGC_DEFAULT,
314 .if_freq = TDA10046_FREQ_3617, 332 .if_freq = TDA10046_FREQ_3617,
315 .request_firmware = philips_tda1004x_request_firmware, 333 .tuner_address = 0x60,
334 .request_firmware = philips_tda1004x_request_firmware
316}; 335};
317 336
318/* ------------------------------------------------------------------ */
319
320static int philips_tu1216_tuner_61_init(struct dvb_frontend *fe)
321{
322 return philips_tda6651_pll_init(0x61, fe);
323}
324
325static int philips_tu1216_tuner_61_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
326{
327 return philips_tda6651_pll_set(0x61, fe, params);
328}
329
330static struct tda1004x_config philips_tu1216_61_config = { 337static struct tda1004x_config philips_tu1216_61_config = {
331 338
332 .demod_address = 0x8, 339 .demod_address = 0x8,
@@ -335,7 +342,8 @@ static struct tda1004x_config philips_tu1216_61_config = {
335 .xtal_freq = TDA10046_XTAL_4M, 342 .xtal_freq = TDA10046_XTAL_4M,
336 .agc_config = TDA10046_AGC_DEFAULT, 343 .agc_config = TDA10046_AGC_DEFAULT,
337 .if_freq = TDA10046_FREQ_3617, 344 .if_freq = TDA10046_FREQ_3617,
338 .request_firmware = philips_tda1004x_request_firmware, 345 .tuner_address = 0x61,
346 .request_firmware = philips_tda1004x_request_firmware
339}; 347};
340 348
341/* ------------------------------------------------------------------ */ 349/* ------------------------------------------------------------------ */
@@ -343,24 +351,42 @@ static struct tda1004x_config philips_tu1216_61_config = {
343static int philips_td1316_tuner_init(struct dvb_frontend *fe) 351static int philips_td1316_tuner_init(struct dvb_frontend *fe)
344{ 352{
345 struct saa7134_dev *dev = fe->dvb->priv; 353 struct saa7134_dev *dev = fe->dvb->priv;
354 struct tda1004x_state *state = fe->demodulator_priv;
355 u8 addr = state->config->tuner_address;
346 static u8 msg[] = { 0x0b, 0xf5, 0x86, 0xab }; 356 static u8 msg[] = { 0x0b, 0xf5, 0x86, 0xab };
347 struct i2c_msg init_msg = {.addr = 0x61,.flags = 0,.buf = msg,.len = sizeof(msg) }; 357 struct i2c_msg init_msg = {.addr = addr,.flags = 0,.buf = msg,.len = sizeof(msg) };
348 358
349 /* setup PLL configuration */ 359 /* setup PLL configuration */
350 if (fe->ops.i2c_gate_ctrl) 360 if (fe->ops.i2c_gate_ctrl)
351 fe->ops.i2c_gate_ctrl(fe, 1); 361 fe->ops.i2c_gate_ctrl(fe, 1);
352 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1) 362 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1)
353 return -EIO; 363 return -EIO;
354 if (fe->ops.i2c_gate_ctrl)
355 fe->ops.i2c_gate_ctrl(fe, 0);
356 return 0; 364 return 0;
357} 365}
358 366
359static int philips_td1316_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 367static int philips_td1316_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
360{ 368{
361 return philips_tda6651_pll_set(0x61, fe, params); 369 return philips_tda6651_pll_set(fe, params);
362} 370}
363 371
372static int philips_td1316_tuner_sleep(struct dvb_frontend *fe)
373{
374 struct saa7134_dev *dev = fe->dvb->priv;
375 struct tda1004x_state *state = fe->demodulator_priv;
376 u8 addr = state->config->tuner_address;
377 static u8 msg[] = { 0x0b, 0xdc, 0x86, 0xa4 };
378 struct i2c_msg analog_msg = {.addr = addr,.flags = 0,.buf = msg,.len = sizeof(msg) };
379
380 /* switch the tuner to analog mode */
381 if (fe->ops.i2c_gate_ctrl)
382 fe->ops.i2c_gate_ctrl(fe, 1);
383 if (i2c_transfer(&dev->i2c_adap, &analog_msg, 1) != 1)
384 return -EIO;
385 return 0;
386}
387
388/* ------------------------------------------------------------------ */
389
364static int philips_europa_tuner_init(struct dvb_frontend *fe) 390static int philips_europa_tuner_init(struct dvb_frontend *fe)
365{ 391{
366 struct saa7134_dev *dev = fe->dvb->priv; 392 struct saa7134_dev *dev = fe->dvb->priv;
@@ -380,18 +406,14 @@ static int philips_europa_tuner_init(struct dvb_frontend *fe)
380static int philips_europa_tuner_sleep(struct dvb_frontend *fe) 406static int philips_europa_tuner_sleep(struct dvb_frontend *fe)
381{ 407{
382 struct saa7134_dev *dev = fe->dvb->priv; 408 struct saa7134_dev *dev = fe->dvb->priv;
383 /* this message actually turns the tuner back to analog mode */
384 static u8 msg[] = { 0x0b, 0xdc, 0x86, 0xa4 };
385 struct i2c_msg analog_msg = {.addr = 0x61,.flags = 0,.buf = msg,.len = sizeof(msg) };
386 409
387 i2c_transfer(&dev->i2c_adap, &analog_msg, 1); 410 static u8 msg[] = { 0x00, 0x14 };
388 msleep(1); 411 struct i2c_msg analog_msg = {.addr = 0x43,.flags = 0,.buf = msg,.len = sizeof(msg) };
412
413 if (philips_td1316_tuner_sleep(fe))
414 return -EIO;
389 415
390 /* switch the board to analog mode */ 416 /* switch the board to analog mode */
391 analog_msg.addr = 0x43;
392 analog_msg.len = 0x02;
393 msg[0] = 0x00;
394 msg[1] = 0x14;
395 if (fe->ops.i2c_gate_ctrl) 417 if (fe->ops.i2c_gate_ctrl)
396 fe->ops.i2c_gate_ctrl(fe, 1); 418 fe->ops.i2c_gate_ctrl(fe, 1);
397 i2c_transfer(&dev->i2c_adap, &analog_msg, 1); 419 i2c_transfer(&dev->i2c_adap, &analog_msg, 1);
@@ -416,7 +438,8 @@ static struct tda1004x_config philips_europa_config = {
416 .xtal_freq = TDA10046_XTAL_4M, 438 .xtal_freq = TDA10046_XTAL_4M,
417 .agc_config = TDA10046_AGC_IFO_AUTO_POS, 439 .agc_config = TDA10046_AGC_IFO_AUTO_POS,
418 .if_freq = TDA10046_FREQ_052, 440 .if_freq = TDA10046_FREQ_052,
419 .request_firmware = NULL, 441 .tuner_address = 0x61,
442 .request_firmware = philips_tda1004x_request_firmware
420}; 443};
421 444
422/* ------------------------------------------------------------------ */ 445/* ------------------------------------------------------------------ */
@@ -424,9 +447,11 @@ static struct tda1004x_config philips_europa_config = {
424static int philips_fmd1216_tuner_init(struct dvb_frontend *fe) 447static int philips_fmd1216_tuner_init(struct dvb_frontend *fe)
425{ 448{
426 struct saa7134_dev *dev = fe->dvb->priv; 449 struct saa7134_dev *dev = fe->dvb->priv;
450 struct tda1004x_state *state = fe->demodulator_priv;
451 u8 addr = state->config->tuner_address;
427 /* this message is to set up ATC and ALC */ 452 /* this message is to set up ATC and ALC */
428 static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 }; 453 static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
429 struct i2c_msg tuner_msg = {.addr = 0x61,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) }; 454 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
430 455
431 if (fe->ops.i2c_gate_ctrl) 456 if (fe->ops.i2c_gate_ctrl)
432 fe->ops.i2c_gate_ctrl(fe, 1); 457 fe->ops.i2c_gate_ctrl(fe, 1);
@@ -440,9 +465,11 @@ static int philips_fmd1216_tuner_init(struct dvb_frontend *fe)
440static int philips_fmd1216_tuner_sleep(struct dvb_frontend *fe) 465static int philips_fmd1216_tuner_sleep(struct dvb_frontend *fe)
441{ 466{
442 struct saa7134_dev *dev = fe->dvb->priv; 467 struct saa7134_dev *dev = fe->dvb->priv;
468 struct tda1004x_state *state = fe->demodulator_priv;
469 u8 addr = state->config->tuner_address;
443 /* this message actually turns the tuner back to analog mode */ 470 /* this message actually turns the tuner back to analog mode */
444 static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0x60 }; 471 u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0x60 };
445 struct i2c_msg tuner_msg = {.addr = 0x61,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) }; 472 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
446 473
447 if (fe->ops.i2c_gate_ctrl) 474 if (fe->ops.i2c_gate_ctrl)
448 fe->ops.i2c_gate_ctrl(fe, 1); 475 fe->ops.i2c_gate_ctrl(fe, 1);
@@ -460,8 +487,10 @@ static int philips_fmd1216_tuner_sleep(struct dvb_frontend *fe)
460static int philips_fmd1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 487static int philips_fmd1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
461{ 488{
462 struct saa7134_dev *dev = fe->dvb->priv; 489 struct saa7134_dev *dev = fe->dvb->priv;
490 struct tda1004x_state *state = fe->demodulator_priv;
491 u8 addr = state->config->tuner_address;
463 u8 tuner_buf[4]; 492 u8 tuner_buf[4];
464 struct i2c_msg tuner_msg = {.addr = 0x61,.flags = 0,.buf = tuner_buf,.len = 493 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tuner_buf,.len =
465 sizeof(tuner_buf) }; 494 sizeof(tuner_buf) };
466 int tuner_frequency = 0; 495 int tuner_frequency = 0;
467 int divider = 0; 496 int divider = 0;
@@ -536,8 +565,11 @@ static int philips_fmd1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_
536 565
537 if (fe->ops.i2c_gate_ctrl) 566 if (fe->ops.i2c_gate_ctrl)
538 fe->ops.i2c_gate_ctrl(fe, 1); 567 fe->ops.i2c_gate_ctrl(fe, 1);
539 if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) 568 if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) {
569 wprintk("could not write to tuner at addr: 0x%02x\n",
570 addr << 1);
540 return -EIO; 571 return -EIO;
572 }
541 return 0; 573 return 0;
542} 574}
543 575
@@ -548,582 +580,365 @@ static struct tda1004x_config medion_cardbus = {
548 .xtal_freq = TDA10046_XTAL_16M, 580 .xtal_freq = TDA10046_XTAL_16M,
549 .agc_config = TDA10046_AGC_IFO_AUTO_NEG, 581 .agc_config = TDA10046_AGC_IFO_AUTO_NEG,
550 .if_freq = TDA10046_FREQ_3613, 582 .if_freq = TDA10046_FREQ_3613,
551 .request_firmware = NULL, 583 .tuner_address = 0x61,
584 .request_firmware = philips_tda1004x_request_firmware
552}; 585};
553 586
554/* ------------------------------------------------------------------ */ 587/* ------------------------------------------------------------------
555 588 * tda 1004x based cards with philips silicon tuner
556struct tda827x_data { 589 */
557 u32 lomax;
558 u8 spd;
559 u8 bs;
560 u8 bp;
561 u8 cp;
562 u8 gc3;
563 u8 div1p5;
564};
565
566static struct tda827x_data tda827x_dvbt[] = {
567 { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
568 { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
569 { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
570 { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
571 { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
572 { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
573 { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
574 { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
575 { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
576 { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
577 { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
578 { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0},
579 { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
580 { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
581 { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
582 { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
583 { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
584 { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
585 { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
586 { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
587 { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
588 { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
589 { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
590 { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
591 { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
592 { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
593 { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
594 { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
595 { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0}
596};
597
598static int philips_tda827x_tuner_init(struct dvb_frontend *fe)
599{
600 return 0;
601}
602 590
603static int philips_tda827x_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 591static void philips_tda827x_lna_gain(struct dvb_frontend *fe, int high)
604{ 592{
605 struct saa7134_dev *dev = fe->dvb->priv; 593 struct saa7134_dev *dev = fe->dvb->priv;
606 u8 tuner_buf[14]; 594 struct tda1004x_state *state = fe->demodulator_priv;
607 595 u8 addr = state->config->i2c_gate;
608 struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf, 596 u8 config = state->config->tuner_config;
609 .len = sizeof(tuner_buf) }; 597 u8 GP00_CF[] = {0x20, 0x01};
610 int i, tuner_freq, if_freq; 598 u8 GP00_LEV[] = {0x22, 0x00};
611 u32 N; 599
612 switch (params->u.ofdm.bandwidth) { 600 struct i2c_msg msg = {.addr = addr,.flags = 0,.buf = GP00_CF, .len = 2};
613 case BANDWIDTH_6_MHZ: 601 if (config) {
614 if_freq = 4000000; 602 if (high) {
615 break; 603 dprintk("setting LNA to high gain\n");
616 case BANDWIDTH_7_MHZ: 604 } else {
617 if_freq = 4500000; 605 dprintk("setting LNA to low gain\n");
618 break; 606 }
619 default: /* 8 MHz or Auto */
620 if_freq = 5000000;
621 break;
622 }
623 tuner_freq = params->frequency + if_freq;
624
625 i = 0;
626 while (tda827x_dvbt[i].lomax < tuner_freq) {
627 if(tda827x_dvbt[i + 1].lomax == 0)
628 break;
629 i++;
630 } 607 }
631 608 switch (config) {
632 N = ((tuner_freq + 125000) / 250000) << (tda827x_dvbt[i].spd + 2); 609 case 0: /* no LNA */
633 tuner_buf[0] = 0;
634 tuner_buf[1] = (N>>8) | 0x40;
635 tuner_buf[2] = N & 0xff;
636 tuner_buf[3] = 0;
637 tuner_buf[4] = 0x52;
638 tuner_buf[5] = (tda827x_dvbt[i].spd << 6) + (tda827x_dvbt[i].div1p5 << 5) +
639 (tda827x_dvbt[i].bs << 3) + tda827x_dvbt[i].bp;
640 tuner_buf[6] = (tda827x_dvbt[i].gc3 << 4) + 0x8f;
641 tuner_buf[7] = 0xbf;
642 tuner_buf[8] = 0x2a;
643 tuner_buf[9] = 0x05;
644 tuner_buf[10] = 0xff;
645 tuner_buf[11] = 0x00;
646 tuner_buf[12] = 0x00;
647 tuner_buf[13] = 0x40;
648
649 tuner_msg.len = 14;
650 if (fe->ops.i2c_gate_ctrl)
651 fe->ops.i2c_gate_ctrl(fe, 1);
652 if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1)
653 return -EIO;
654
655 msleep(500);
656 /* correct CP value */
657 tuner_buf[0] = 0x30;
658 tuner_buf[1] = 0x50 + tda827x_dvbt[i].cp;
659 tuner_msg.len = 2;
660 if (fe->ops.i2c_gate_ctrl)
661 fe->ops.i2c_gate_ctrl(fe, 1);
662 i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
663
664 return 0;
665}
666
667static int philips_tda827x_tuner_sleep(struct dvb_frontend *fe)
668{
669 struct saa7134_dev *dev = fe->dvb->priv;
670 static u8 tda827x_sleep[] = { 0x30, 0xd0};
671 struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tda827x_sleep,
672 .len = sizeof(tda827x_sleep) };
673 if (fe->ops.i2c_gate_ctrl)
674 fe->ops.i2c_gate_ctrl(fe, 1);
675 i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
676 return 0;
677}
678
679static struct tda1004x_config tda827x_lifeview_config = {
680 .demod_address = 0x08,
681 .invert = 1,
682 .invert_oclk = 0,
683 .xtal_freq = TDA10046_XTAL_16M,
684 .agc_config = TDA10046_AGC_TDA827X_GP11,
685 .if_freq = TDA10046_FREQ_045,
686 .request_firmware = NULL,
687};
688
689/* ------------------------------------------------------------------ */
690
691struct tda827xa_data {
692 u32 lomax;
693 u8 svco;
694 u8 spd;
695 u8 scr;
696 u8 sbs;
697 u8 gc3;
698};
699
700static struct tda827xa_data tda827xa_dvbt[] = {
701 { .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 1},
702 { .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
703 { .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
704 { .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1},
705 { .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1},
706 { .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
707 { .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
708 { .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
709 { .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1},
710 { .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
711 { .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1},
712 { .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
713 { .lomax = 290000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1},
714 { .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
715 { .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
716 { .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1},
717 { .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
718 { .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1},
719 { .lomax = 550000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1},
720 { .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
721 { .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
722 { .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
723 { .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
724 { .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0},
725 { .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0},
726 { .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0},
727 { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0}};
728
729
730static int philips_tda827xa_pll_set(u8 addr, struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
731{
732 struct saa7134_dev *dev = fe->dvb->priv;
733 u8 tuner_buf[14];
734 unsigned char reg2[2];
735
736 struct i2c_msg msg = {.addr = addr,.flags = 0,.buf = tuner_buf};
737 int i, tuner_freq, if_freq;
738 u32 N;
739
740 switch (params->u.ofdm.bandwidth) {
741 case BANDWIDTH_6_MHZ:
742 if_freq = 4000000;
743 break; 610 break;
744 case BANDWIDTH_7_MHZ: 611 case 1: /* switch is GPIO 0 of tda8290 */
745 if_freq = 4500000; 612 case 2:
613 /* turn Vsync off */
614 saa7134_set_gpio(dev, 22, 0);
615 GP00_LEV[1] = high ? 0 : 1;
616 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
617 wprintk("could not access tda8290 at addr: 0x%02x\n",
618 addr << 1);
619 return;
620 }
621 msg.buf = GP00_LEV;
622 if (config == 2)
623 GP00_LEV[1] = high ? 1 : 0;
624 i2c_transfer(&dev->i2c_adap, &msg, 1);
746 break; 625 break;
747 default: /* 8 MHz or Auto */ 626 case 3: /* switch with GPIO of saa713x */
748 if_freq = 5000000; 627 saa7134_set_gpio(dev, 22, high);
749 break; 628 break;
750 } 629 }
751 tuner_freq = params->frequency + if_freq;
752
753 i = 0;
754 while (tda827xa_dvbt[i].lomax < tuner_freq) {
755 if(tda827xa_dvbt[i + 1].lomax == 0)
756 break;
757 i++;
758 }
759
760 N = ((tuner_freq + 31250) / 62500) << tda827xa_dvbt[i].spd;
761 tuner_buf[0] = 0; // subaddress
762 tuner_buf[1] = N >> 8;
763 tuner_buf[2] = N & 0xff;
764 tuner_buf[3] = 0;
765 tuner_buf[4] = 0x16;
766 tuner_buf[5] = (tda827xa_dvbt[i].spd << 5) + (tda827xa_dvbt[i].svco << 3) +
767 tda827xa_dvbt[i].sbs;
768 tuner_buf[6] = 0x4b + (tda827xa_dvbt[i].gc3 << 4);
769 tuner_buf[7] = 0x0c;
770 tuner_buf[8] = 0x06;
771 tuner_buf[9] = 0x24;
772 tuner_buf[10] = 0xff;
773 tuner_buf[11] = 0x60;
774 tuner_buf[12] = 0x00;
775 tuner_buf[13] = 0x39; // lpsel
776 msg.len = 14;
777 if (fe->ops.i2c_gate_ctrl)
778 fe->ops.i2c_gate_ctrl(fe, 1);
779 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
780 return -EIO;
781
782 msg.buf= reg2;
783 msg.len = 2;
784 reg2[0] = 0x60;
785 reg2[1] = 0x3c;
786 if (fe->ops.i2c_gate_ctrl)
787 fe->ops.i2c_gate_ctrl(fe, 1);
788 i2c_transfer(&dev->i2c_adap, &msg, 1);
789
790 reg2[0] = 0xa0;
791 reg2[1] = 0x40;
792 if (fe->ops.i2c_gate_ctrl)
793 fe->ops.i2c_gate_ctrl(fe, 1);
794 i2c_transfer(&dev->i2c_adap, &msg, 1);
795
796 msleep(2);
797 /* correct CP value */
798 reg2[0] = 0x30;
799 reg2[1] = 0x10 + tda827xa_dvbt[i].scr;
800 msg.len = 2;
801 if (fe->ops.i2c_gate_ctrl)
802 fe->ops.i2c_gate_ctrl(fe, 1);
803 i2c_transfer(&dev->i2c_adap, &msg, 1);
804
805 msleep(550);
806 reg2[0] = 0x50;
807 reg2[1] = 0x4f + (tda827xa_dvbt[i].gc3 << 4);
808 if (fe->ops.i2c_gate_ctrl)
809 fe->ops.i2c_gate_ctrl(fe, 1);
810 i2c_transfer(&dev->i2c_adap, &msg, 1);
811
812 return 0;
813
814} 630}
815 631
816static int philips_tda827xa_tuner_sleep(u8 addr, struct dvb_frontend *fe) 632static int tda8290_i2c_gate_ctrl( struct dvb_frontend* fe, int enable)
817{ 633{
818 struct saa7134_dev *dev = fe->dvb->priv; 634 struct tda1004x_state *state = fe->demodulator_priv;
819 static u8 tda827xa_sleep[] = { 0x30, 0x90};
820 struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tda827xa_sleep,
821 .len = sizeof(tda827xa_sleep) };
822 if (fe->ops.i2c_gate_ctrl)
823 fe->ops.i2c_gate_ctrl(fe, 1);
824 i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
825 if (fe->ops.i2c_gate_ctrl)
826 fe->ops.i2c_gate_ctrl(fe, 0);
827 return 0;
828}
829 635
830/* ------------------------------------------------------------------ */ 636 u8 addr = state->config->i2c_gate;
831
832static int tda8290_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
833{
834 struct saa7134_dev *dev = fe->dvb->priv;
835 static u8 tda8290_close[] = { 0x21, 0xc0}; 637 static u8 tda8290_close[] = { 0x21, 0xc0};
836 static u8 tda8290_open[] = { 0x21, 0x80}; 638 static u8 tda8290_open[] = { 0x21, 0x80};
837 struct i2c_msg tda8290_msg = {.addr = 0x4b,.flags = 0, .len = 2}; 639 struct i2c_msg tda8290_msg = {.addr = addr,.flags = 0, .len = 2};
838 if (enable) { 640 if (enable) {
839 tda8290_msg.buf = tda8290_close; 641 tda8290_msg.buf = tda8290_close;
840 } else { 642 } else {
841 tda8290_msg.buf = tda8290_open; 643 tda8290_msg.buf = tda8290_open;
842 } 644 }
843 if (i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1) != 1) 645 if (i2c_transfer(state->i2c, &tda8290_msg, 1) != 1) {
646 struct saa7134_dev *dev = fe->dvb->priv;
647 wprintk("could not access tda8290 I2C gate\n");
844 return -EIO; 648 return -EIO;
649 }
845 msleep(20); 650 msleep(20);
846 return 0; 651 return 0;
847} 652}
848 653
849/* ------------------------------------------------------------------ */ 654/* ------------------------------------------------------------------ */
850 655
851static int philips_tiger_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 656static int philips_tda827x_tuner_init(struct dvb_frontend *fe)
852{ 657{
853 int ret; 658 struct saa7134_dev *dev = fe->dvb->priv;
659 struct tda1004x_state *state = fe->demodulator_priv;
854 660
855 ret = philips_tda827xa_pll_set(0x61, fe, params); 661 switch (state->config->antenna_switch) {
856 if (ret != 0) 662 case 0: break;
857 return ret; 663 case 1: dprintk("setting GPIO21 to 0 (TV antenna?)\n");
664 saa7134_set_gpio(dev, 21, 0);
665 break;
666 case 2: dprintk("setting GPIO21 to 1 (Radio antenna?)\n");
667 saa7134_set_gpio(dev, 21, 1);
668 break;
669 }
858 return 0; 670 return 0;
859} 671}
860 672
861static int philips_tiger_tuner_init(struct dvb_frontend *fe) 673static int philips_tda827x_tuner_sleep(struct dvb_frontend *fe)
862{ 674{
863 struct saa7134_dev *dev = fe->dvb->priv; 675 struct saa7134_dev *dev = fe->dvb->priv;
864 static u8 data[] = { 0x3c, 0x33, 0x6a}; 676 struct tda1004x_state *state = fe->demodulator_priv;
865 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
866 677
867 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) 678 switch (state->config->antenna_switch) {
868 return -EIO; 679 case 0: break;
680 case 1: dprintk("setting GPIO21 to 1 (Radio antenna?)\n");
681 saa7134_set_gpio(dev, 21, 1);
682 break;
683 case 2: dprintk("setting GPIO21 to 0 (TV antenna?)\n");
684 saa7134_set_gpio(dev, 21, 0);
685 break;
686 }
869 return 0; 687 return 0;
870} 688}
871 689
872static int philips_tiger_tuner_sleep(struct dvb_frontend *fe) 690static struct tda827x_config tda827x_cfg = {
873{ 691 .lna_gain = philips_tda827x_lna_gain,
874 struct saa7134_dev *dev = fe->dvb->priv; 692 .init = philips_tda827x_tuner_init,
875 static u8 data[] = { 0x3c, 0x33, 0x68}; 693 .sleep = philips_tda827x_tuner_sleep
876 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 694};
877 695
878 i2c_transfer(&dev->i2c_adap, &msg, 1); 696static void configure_tda827x_fe(struct saa7134_dev *dev, struct tda1004x_config *tda_conf)
879 philips_tda827xa_tuner_sleep( 0x61, fe); 697{
880 return 0; 698 dev->dvb.frontend = dvb_attach(tda10046_attach, tda_conf, &dev->i2c_adap);
699 if (dev->dvb.frontend) {
700 if (tda_conf->i2c_gate)
701 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
702 if (dvb_attach(tda827x_attach, dev->dvb.frontend, tda_conf->tuner_address,
703 &dev->i2c_adap,&tda827x_cfg) == NULL) {
704 wprintk("no tda827x tuner found at addr: %02x\n",
705 tda_conf->tuner_address);
706 }
707 }
881} 708}
882 709
883static struct tda1004x_config philips_tiger_config = { 710/* ------------------------------------------------------------------ */
711static struct tda1004x_config tda827x_lifeview_config = {
884 .demod_address = 0x08, 712 .demod_address = 0x08,
885 .invert = 1, 713 .invert = 1,
886 .invert_oclk = 0, 714 .invert_oclk = 0,
887 .xtal_freq = TDA10046_XTAL_16M, 715 .xtal_freq = TDA10046_XTAL_16M,
888 .agc_config = TDA10046_AGC_TDA827X_GP11, 716 .agc_config = TDA10046_AGC_TDA827X,
717 .gpio_config = TDA10046_GP11_I,
889 .if_freq = TDA10046_FREQ_045, 718 .if_freq = TDA10046_FREQ_045,
890 .request_firmware = NULL, 719 .tuner_address = 0x60,
720 .request_firmware = philips_tda1004x_request_firmware
891}; 721};
892/* ------------------------------------------------------------------ */
893
894static int cinergy_ht_tuner_init(struct dvb_frontend *fe)
895{
896 struct saa7134_dev *dev = fe->dvb->priv;
897 static u8 data[] = { 0x3c, 0x33, 0x62};
898 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
899 722
900 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) 723static struct tda1004x_config philips_tiger_config = {
901 return -EIO; 724 .demod_address = 0x08,
902 return 0; 725 .invert = 1,
903} 726 .invert_oclk = 0,
904 727 .xtal_freq = TDA10046_XTAL_16M,
905static int cinergy_ht_tuner_sleep(struct dvb_frontend *fe) 728 .agc_config = TDA10046_AGC_TDA827X,
906{ 729 .gpio_config = TDA10046_GP11_I,
907 struct saa7134_dev *dev = fe->dvb->priv; 730 .if_freq = TDA10046_FREQ_045,
908 static u8 data[] = { 0x3c, 0x33, 0x60}; 731 .i2c_gate = 0x4b,
909 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 732 .tuner_address = 0x61,
910 733 .tuner_config = 0,
911 i2c_transfer(&dev->i2c_adap, &msg, 1); 734 .antenna_switch= 1,
912 philips_tda827xa_tuner_sleep( 0x61, fe); 735 .request_firmware = philips_tda1004x_request_firmware
913 return 0; 736};
914}
915 737
916static struct tda1004x_config cinergy_ht_config = { 738static struct tda1004x_config cinergy_ht_config = {
917 .demod_address = 0x08, 739 .demod_address = 0x08,
918 .invert = 1, 740 .invert = 1,
919 .invert_oclk = 0, 741 .invert_oclk = 0,
920 .xtal_freq = TDA10046_XTAL_16M, 742 .xtal_freq = TDA10046_XTAL_16M,
921 .agc_config = TDA10046_AGC_TDA827X_GP01, 743 .agc_config = TDA10046_AGC_TDA827X,
744 .gpio_config = TDA10046_GP01_I,
922 .if_freq = TDA10046_FREQ_045, 745 .if_freq = TDA10046_FREQ_045,
923 .request_firmware = NULL, 746 .i2c_gate = 0x4b,
747 .tuner_address = 0x61,
748 .tuner_config = 0,
749 .request_firmware = philips_tda1004x_request_firmware
924}; 750};
925 751
926/* ------------------------------------------------------------------ */ 752static struct tda1004x_config cinergy_ht_pci_config = {
753 .demod_address = 0x08,
754 .invert = 1,
755 .invert_oclk = 0,
756 .xtal_freq = TDA10046_XTAL_16M,
757 .agc_config = TDA10046_AGC_TDA827X,
758 .gpio_config = TDA10046_GP01_I,
759 .if_freq = TDA10046_FREQ_045,
760 .i2c_gate = 0x4b,
761 .tuner_address = 0x60,
762 .tuner_config = 0,
763 .request_firmware = philips_tda1004x_request_firmware
764};
927 765
928static struct tda1004x_config pinnacle_pctv_310i_config = { 766static struct tda1004x_config philips_tiger_s_config = {
929 .demod_address = 0x08, 767 .demod_address = 0x08,
930 .invert = 1, 768 .invert = 1,
931 .invert_oclk = 0, 769 .invert_oclk = 0,
932 .xtal_freq = TDA10046_XTAL_16M, 770 .xtal_freq = TDA10046_XTAL_16M,
933 .agc_config = TDA10046_AGC_TDA827X_GP11, 771 .agc_config = TDA10046_AGC_TDA827X,
772 .gpio_config = TDA10046_GP01_I,
934 .if_freq = TDA10046_FREQ_045, 773 .if_freq = TDA10046_FREQ_045,
935 .request_firmware = philips_tda1004x_request_firmware, 774 .i2c_gate = 0x4b,
775 .tuner_address = 0x61,
776 .tuner_config = 2,
777 .antenna_switch= 1,
778 .request_firmware = philips_tda1004x_request_firmware
936}; 779};
937 780
938/* ------------------------------------------------------------------ */ 781static struct tda1004x_config pinnacle_pctv_310i_config = {
782 .demod_address = 0x08,
783 .invert = 1,
784 .invert_oclk = 0,
785 .xtal_freq = TDA10046_XTAL_16M,
786 .agc_config = TDA10046_AGC_TDA827X,
787 .gpio_config = TDA10046_GP11_I,
788 .if_freq = TDA10046_FREQ_045,
789 .i2c_gate = 0x4b,
790 .tuner_address = 0x61,
791 .tuner_config = 1,
792 .request_firmware = philips_tda1004x_request_firmware
793};
939 794
940static struct tda1004x_config hauppauge_hvr_1110_config = { 795static struct tda1004x_config hauppauge_hvr_1110_config = {
941 .demod_address = 0x08, 796 .demod_address = 0x08,
942 .invert = 1, 797 .invert = 1,
943 .invert_oclk = 0, 798 .invert_oclk = 0,
944 .xtal_freq = TDA10046_XTAL_16M, 799 .xtal_freq = TDA10046_XTAL_16M,
945 .agc_config = TDA10046_AGC_TDA827X_GP11, 800 .agc_config = TDA10046_AGC_TDA827X,
801 .gpio_config = TDA10046_GP11_I,
946 .if_freq = TDA10046_FREQ_045, 802 .if_freq = TDA10046_FREQ_045,
947 .request_firmware = philips_tda1004x_request_firmware, 803 .i2c_gate = 0x4b,
804 .tuner_address = 0x61,
805 .request_firmware = philips_tda1004x_request_firmware
948}; 806};
949 807
950/* ------------------------------------------------------------------ */
951
952static struct tda1004x_config asus_p7131_dual_config = { 808static struct tda1004x_config asus_p7131_dual_config = {
953 .demod_address = 0x08, 809 .demod_address = 0x08,
954 .invert = 1, 810 .invert = 1,
955 .invert_oclk = 0, 811 .invert_oclk = 0,
956 .xtal_freq = TDA10046_XTAL_16M, 812 .xtal_freq = TDA10046_XTAL_16M,
957 .agc_config = TDA10046_AGC_TDA827X_GP11, 813 .agc_config = TDA10046_AGC_TDA827X,
814 .gpio_config = TDA10046_GP11_I,
958 .if_freq = TDA10046_FREQ_045, 815 .if_freq = TDA10046_FREQ_045,
959 .request_firmware = philips_tda1004x_request_firmware, 816 .i2c_gate = 0x4b,
817 .tuner_address = 0x61,
818 .tuner_config = 0,
819 .antenna_switch= 2,
820 .request_firmware = philips_tda1004x_request_firmware
960}; 821};
961 822
962static int asus_p7131_dual_tuner_init(struct dvb_frontend *fe)
963{
964 struct saa7134_dev *dev = fe->dvb->priv;
965 static u8 data[] = { 0x3c, 0x33, 0x6a};
966 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
967
968 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
969 return -EIO;
970 /* make sure the DVB-T antenna input is set */
971 saa_setl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0200000);
972 return 0;
973}
974
975static int asus_p7131_dual_tuner_sleep(struct dvb_frontend *fe)
976{
977 struct saa7134_dev *dev = fe->dvb->priv;
978 static u8 data[] = { 0x3c, 0x33, 0x68};
979 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
980
981 i2c_transfer(&dev->i2c_adap, &msg, 1);
982 philips_tda827xa_tuner_sleep( 0x61, fe);
983 /* reset antenna inputs for analog usage */
984 saa_clearl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0200000);
985 return 0;
986}
987
988/* ------------------------------------------------------------------ */
989
990static int lifeview_trio_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
991{
992 int ret;
993
994 ret = philips_tda827xa_pll_set(0x60, fe, params);
995 return ret;
996}
997
998static int lifeview_trio_tuner_sleep(struct dvb_frontend *fe)
999{
1000 philips_tda827xa_tuner_sleep(0x60, fe);
1001 return 0;
1002}
1003
1004static struct tda1004x_config lifeview_trio_config = { 823static struct tda1004x_config lifeview_trio_config = {
1005 .demod_address = 0x09, 824 .demod_address = 0x09,
1006 .invert = 1, 825 .invert = 1,
1007 .invert_oclk = 0, 826 .invert_oclk = 0,
1008 .xtal_freq = TDA10046_XTAL_16M, 827 .xtal_freq = TDA10046_XTAL_16M,
1009 .agc_config = TDA10046_AGC_TDA827X_GP00, 828 .agc_config = TDA10046_AGC_TDA827X,
829 .gpio_config = TDA10046_GP00_I,
1010 .if_freq = TDA10046_FREQ_045, 830 .if_freq = TDA10046_FREQ_045,
1011 .request_firmware = NULL, 831 .tuner_address = 0x60,
832 .request_firmware = philips_tda1004x_request_firmware
1012}; 833};
1013 834
1014/* ------------------------------------------------------------------ */ 835static struct tda1004x_config tevion_dvbt220rf_config = {
1015
1016static int ads_duo_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
1017{
1018 int ret;
1019
1020 ret = philips_tda827xa_pll_set(0x61, fe, params);
1021 return ret;
1022}
1023
1024static int ads_duo_tuner_init(struct dvb_frontend *fe)
1025{
1026 struct saa7134_dev *dev = fe->dvb->priv;
1027 /* route TDA8275a AGC input to the channel decoder */
1028 saa_writeb(SAA7134_GPIO_GPSTATUS2, 0x60);
1029 return 0;
1030}
1031
1032static int ads_duo_tuner_sleep(struct dvb_frontend *fe)
1033{
1034 struct saa7134_dev *dev = fe->dvb->priv;
1035 /* route TDA8275a AGC input to the analog IF chip*/
1036 saa_writeb(SAA7134_GPIO_GPSTATUS2, 0x20);
1037 philips_tda827xa_tuner_sleep( 0x61, fe);
1038 return 0;
1039}
1040
1041static struct tda1004x_config ads_tech_duo_config = {
1042 .demod_address = 0x08, 836 .demod_address = 0x08,
1043 .invert = 1, 837 .invert = 1,
1044 .invert_oclk = 0, 838 .invert_oclk = 0,
1045 .xtal_freq = TDA10046_XTAL_16M, 839 .xtal_freq = TDA10046_XTAL_16M,
1046 .agc_config = TDA10046_AGC_TDA827X_GP00, 840 .agc_config = TDA10046_AGC_TDA827X,
841 .gpio_config = TDA10046_GP11_I,
1047 .if_freq = TDA10046_FREQ_045, 842 .if_freq = TDA10046_FREQ_045,
1048 .request_firmware = NULL, 843 .tuner_address = 0x60,
844 .request_firmware = philips_tda1004x_request_firmware
1049}; 845};
1050 846
1051/* ------------------------------------------------------------------ */ 847static struct tda1004x_config md8800_dvbt_config = {
1052 848 .demod_address = 0x08,
1053static int tevion_dvb220rf_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 849 .invert = 1,
1054{ 850 .invert_oclk = 0,
1055 int ret; 851 .xtal_freq = TDA10046_XTAL_16M,
1056 ret = philips_tda827xa_pll_set(0x60, fe, params); 852 .agc_config = TDA10046_AGC_TDA827X,
1057 return ret; 853 .gpio_config = TDA10046_GP01_I,
1058} 854 .if_freq = TDA10046_FREQ_045,
1059 855 .i2c_gate = 0x4b,
1060static int tevion_dvb220rf_tuner_sleep(struct dvb_frontend *fe) 856 .tuner_address = 0x60,
1061{ 857 .tuner_config = 0,
1062 philips_tda827xa_tuner_sleep( 0x61, fe); 858 .request_firmware = philips_tda1004x_request_firmware
1063 return 0; 859};
1064}
1065 860
1066static struct tda1004x_config tevion_dvbt220rf_config = { 861static struct tda1004x_config asus_p7131_4871_config = {
1067 .demod_address = 0x08, 862 .demod_address = 0x08,
1068 .invert = 1, 863 .invert = 1,
1069 .invert_oclk = 0, 864 .invert_oclk = 0,
1070 .xtal_freq = TDA10046_XTAL_16M, 865 .xtal_freq = TDA10046_XTAL_16M,
1071 .agc_config = TDA10046_AGC_TDA827X_GP11, 866 .agc_config = TDA10046_AGC_TDA827X,
867 .gpio_config = TDA10046_GP01_I,
1072 .if_freq = TDA10046_FREQ_045, 868 .if_freq = TDA10046_FREQ_045,
1073 .request_firmware = NULL, 869 .i2c_gate = 0x4b,
870 .tuner_address = 0x61,
871 .tuner_config = 2,
872 .antenna_switch= 2,
873 .request_firmware = philips_tda1004x_request_firmware
1074}; 874};
1075 875
1076/* ------------------------------------------------------------------ */ 876static struct tda1004x_config asus_p7131_hybrid_lna_config = {
877 .demod_address = 0x08,
878 .invert = 1,
879 .invert_oclk = 0,
880 .xtal_freq = TDA10046_XTAL_16M,
881 .agc_config = TDA10046_AGC_TDA827X,
882 .gpio_config = TDA10046_GP11_I,
883 .if_freq = TDA10046_FREQ_045,
884 .i2c_gate = 0x4b,
885 .tuner_address = 0x61,
886 .tuner_config = 2,
887 .antenna_switch= 2,
888 .request_firmware = philips_tda1004x_request_firmware
889};
890/* ------------------------------------------------------------------
891 * special case: this card uses saa713x GPIO22 for the mode switch
892 */
1077 893
1078static int md8800_dvbt_analog_mode(struct dvb_frontend *fe) 894static int ads_duo_tuner_init(struct dvb_frontend *fe)
1079{ 895{
1080 struct saa7134_dev *dev = fe->dvb->priv; 896 struct saa7134_dev *dev = fe->dvb->priv;
1081 static u8 data[] = { 0x3c, 0x33, 0x68}; 897 philips_tda827x_tuner_init(fe);
1082 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 898 /* route TDA8275a AGC input to the channel decoder */
1083 899 saa7134_set_gpio(dev, 22, 1);
1084 i2c_transfer(&dev->i2c_adap, &msg, 1);
1085 philips_tda827xa_tuner_sleep( 0x61, fe);
1086 return 0; 900 return 0;
1087} 901}
1088 902
1089static int md8800_dvbt_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 903static int ads_duo_tuner_sleep(struct dvb_frontend *fe)
1090{ 904{
1091 int ret;
1092 struct saa7134_dev *dev = fe->dvb->priv; 905 struct saa7134_dev *dev = fe->dvb->priv;
1093 static u8 tda8290_close[] = { 0x21, 0xc0}; 906 /* route TDA8275a AGC input to the analog IF chip*/
1094 static u8 tda8290_open[] = { 0x21, 0x80}; 907 saa7134_set_gpio(dev, 22, 0);
1095 struct i2c_msg tda8290_msg = {.addr = 0x4b,.flags = 0, .len = 2}; 908 philips_tda827x_tuner_sleep(fe);
1096 /* close tda8290 i2c bridge */ 909 return 0;
1097 tda8290_msg.buf = tda8290_close;
1098 ret = i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1);
1099 if (ret != 1)
1100 return -EIO;
1101 msleep(20);
1102 ret = philips_tda827xa_pll_set(0x60, fe, params);
1103 if (ret != 0)
1104 return ret;
1105 /* open tda8290 i2c bridge */
1106 tda8290_msg.buf = tda8290_open;
1107 i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1);
1108 return ret;
1109} 910}
1110 911
1111static struct tda1004x_config md8800_dvbt_config = { 912static struct tda827x_config ads_duo_cfg = {
913 .lna_gain = philips_tda827x_lna_gain,
914 .init = ads_duo_tuner_init,
915 .sleep = ads_duo_tuner_sleep
916};
917
918static struct tda1004x_config ads_tech_duo_config = {
1112 .demod_address = 0x08, 919 .demod_address = 0x08,
1113 .invert = 1, 920 .invert = 1,
1114 .invert_oclk = 0, 921 .invert_oclk = 0,
1115 .xtal_freq = TDA10046_XTAL_16M, 922 .xtal_freq = TDA10046_XTAL_16M,
1116 .agc_config = TDA10046_AGC_TDA827X_GP11, 923 .agc_config = TDA10046_AGC_TDA827X,
924 .gpio_config = TDA10046_GP00_I,
1117 .if_freq = TDA10046_FREQ_045, 925 .if_freq = TDA10046_FREQ_045,
1118 .request_firmware = NULL, 926 .tuner_address = 0x61,
927 .request_firmware = philips_tda1004x_request_firmware
1119}; 928};
1120 929
930/* ==================================================================
931 * tda10086 based DVB-S cards, helper functions
932 */
933
1121static struct tda10086_config flydvbs = { 934static struct tda10086_config flydvbs = {
1122 .demod_address = 0x0e, 935 .demod_address = 0x0e,
1123 .invert = 0, 936 .invert = 0,
1124}; 937};
1125 938
1126/* ------------------------------------------------------------------ */ 939/* ==================================================================
940 * nxt200x based ATSC cards, helper functions
941 */
1127 942
1128static struct nxt200x_config avertvhda180 = { 943static struct nxt200x_config avertvhda180 = {
1129 .demod_address = 0x0a, 944 .demod_address = 0x0a,
@@ -1143,10 +958,13 @@ static struct nxt200x_config kworldatsc110 = {
1143 .set_pll_input = nxt200x_set_pll_input, 958 .set_pll_input = nxt200x_set_pll_input,
1144}; 959};
1145 960
1146/* ------------------------------------------------------------------ */ 961/* ==================================================================
962 * Core code
963 */
1147 964
1148static int dvb_init(struct saa7134_dev *dev) 965static int dvb_init(struct saa7134_dev *dev)
1149{ 966{
967 int ret;
1150 /* init struct videobuf_dvb */ 968 /* init struct videobuf_dvb */
1151 dev->ts.nr_bufs = 32; 969 dev->ts.nr_bufs = 32;
1152 dev->ts.nr_packets = 32*4; 970 dev->ts.nr_packets = 32*4;
@@ -1160,7 +978,7 @@ static int dvb_init(struct saa7134_dev *dev)
1160 978
1161 switch (dev->board) { 979 switch (dev->board) {
1162 case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL: 980 case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
1163 printk("%s: pinnacle 300i dvb setup\n",dev->name); 981 dprintk("pinnacle 300i dvb setup\n");
1164 dev->dvb.frontend = dvb_attach(mt352_attach, &pinnacle_300i, 982 dev->dvb.frontend = dvb_attach(mt352_attach, &pinnacle_300i,
1165 &dev->i2c_adap); 983 &dev->i2c_adap);
1166 if (dev->dvb.frontend) { 984 if (dev->dvb.frontend) {
@@ -1169,7 +987,7 @@ static int dvb_init(struct saa7134_dev *dev)
1169 break; 987 break;
1170 case SAA7134_BOARD_AVERMEDIA_777: 988 case SAA7134_BOARD_AVERMEDIA_777:
1171 case SAA7134_BOARD_AVERMEDIA_A16AR: 989 case SAA7134_BOARD_AVERMEDIA_A16AR:
1172 printk("%s: avertv 777 dvb setup\n",dev->name); 990 dprintk("avertv 777 dvb setup\n");
1173 dev->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777, 991 dev->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777,
1174 &dev->i2c_adap); 992 &dev->i2c_adap);
1175 if (dev->dvb.frontend) { 993 if (dev->dvb.frontend) {
@@ -1191,42 +1009,15 @@ static int dvb_init(struct saa7134_dev *dev)
1191 &philips_tu1216_60_config, 1009 &philips_tu1216_60_config,
1192 &dev->i2c_adap); 1010 &dev->i2c_adap);
1193 if (dev->dvb.frontend) { 1011 if (dev->dvb.frontend) {
1194 dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_tuner_60_init; 1012 dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
1195 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tu1216_tuner_60_set_params; 1013 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
1196 } 1014 }
1197 break; 1015 break;
1198 case SAA7134_BOARD_FLYDVBTDUO: 1016 case SAA7134_BOARD_FLYDVBTDUO:
1199 dev->dvb.frontend = dvb_attach(tda10046_attach,
1200 &tda827x_lifeview_config,
1201 &dev->i2c_adap);
1202 if (dev->dvb.frontend) {
1203 dev->dvb.frontend->ops.tuner_ops.init = philips_tda827x_tuner_init;
1204 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tda827x_tuner_sleep;
1205 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda827x_tuner_set_params;
1206 }
1207 break;
1208 case SAA7134_BOARD_FLYDVBT_DUO_CARDBUS: 1017 case SAA7134_BOARD_FLYDVBT_DUO_CARDBUS:
1209 dev->dvb.frontend = dvb_attach(tda10046_attach, 1018 configure_tda827x_fe(dev, &tda827x_lifeview_config);
1210 &tda827x_lifeview_config,
1211 &dev->i2c_adap);
1212 if (dev->dvb.frontend) {
1213 dev->dvb.frontend->ops.tuner_ops.init = philips_tda827x_tuner_init;
1214 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tda827x_tuner_sleep;
1215 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda827x_tuner_set_params;
1216 }
1217 break; 1019 break;
1218 case SAA7134_BOARD_PHILIPS_EUROPA: 1020 case SAA7134_BOARD_PHILIPS_EUROPA:
1219 dev->dvb.frontend = dvb_attach(tda10046_attach,
1220 &philips_europa_config,
1221 &dev->i2c_adap);
1222 if (dev->dvb.frontend) {
1223 dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
1224 dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
1225 dev->dvb.frontend->ops.tuner_ops.init = philips_europa_tuner_init;
1226 dev->dvb.frontend->ops.tuner_ops.sleep = philips_europa_tuner_sleep;
1227 dev->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
1228 }
1229 break;
1230 case SAA7134_BOARD_VIDEOMATE_DVBT_300: 1021 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
1231 dev->dvb.frontend = dvb_attach(tda10046_attach, 1022 dev->dvb.frontend = dvb_attach(tda10046_attach,
1232 &philips_europa_config, 1023 &philips_europa_config,
@@ -1244,125 +1035,61 @@ static int dvb_init(struct saa7134_dev *dev)
1244 &philips_tu1216_61_config, 1035 &philips_tu1216_61_config,
1245 &dev->i2c_adap); 1036 &dev->i2c_adap);
1246 if (dev->dvb.frontend) { 1037 if (dev->dvb.frontend) {
1247 dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_tuner_61_init; 1038 dev->dvb.frontend->ops.tuner_ops.init = philips_tu1216_init;
1248 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tu1216_tuner_61_set_params; 1039 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda6651_pll_set;
1249 } 1040 }
1250 break; 1041 break;
1251 case SAA7134_BOARD_PHILIPS_TIGER: 1042 case SAA7134_BOARD_PHILIPS_TIGER:
1252 dev->dvb.frontend = dvb_attach(tda10046_attach, 1043 configure_tda827x_fe(dev, &philips_tiger_config);
1253 &philips_tiger_config,
1254 &dev->i2c_adap);
1255 if (dev->dvb.frontend) {
1256 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1257 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1258 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1259 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1260 }
1261 break; 1044 break;
1262 case SAA7134_BOARD_PINNACLE_PCTV_310i: 1045 case SAA7134_BOARD_PINNACLE_PCTV_310i:
1263 dev->dvb.frontend = dvb_attach(tda10046_attach, 1046 configure_tda827x_fe(dev, &pinnacle_pctv_310i_config);
1264 &pinnacle_pctv_310i_config,
1265 &dev->i2c_adap);
1266 if (dev->dvb.frontend) {
1267 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1268 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1269 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1270 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1271 }
1272 break; 1047 break;
1273 case SAA7134_BOARD_HAUPPAUGE_HVR1110: 1048 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
1274 dev->dvb.frontend = dvb_attach(tda10046_attach, 1049 configure_tda827x_fe(dev, &hauppauge_hvr_1110_config);
1275 &hauppauge_hvr_1110_config,
1276 &dev->i2c_adap);
1277 if (dev->dvb.frontend) {
1278 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1279 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1280 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1281 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1282 }
1283 break; 1050 break;
1284 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 1051 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
1285 dev->dvb.frontend = dvb_attach(tda10046_attach, 1052 configure_tda827x_fe(dev, &asus_p7131_dual_config);
1286 &asus_p7131_dual_config,
1287 &dev->i2c_adap);
1288 if (dev->dvb.frontend) {
1289 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1290 dev->dvb.frontend->ops.tuner_ops.init = asus_p7131_dual_tuner_init;
1291 dev->dvb.frontend->ops.tuner_ops.sleep = asus_p7131_dual_tuner_sleep;
1292 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1293 }
1294 break; 1053 break;
1295 case SAA7134_BOARD_FLYDVBT_LR301: 1054 case SAA7134_BOARD_FLYDVBT_LR301:
1296 dev->dvb.frontend = dvb_attach(tda10046_attach, 1055 configure_tda827x_fe(dev, &tda827x_lifeview_config);
1297 &tda827x_lifeview_config,
1298 &dev->i2c_adap);
1299 if (dev->dvb.frontend) {
1300 dev->dvb.frontend->ops.tuner_ops.init = philips_tda827x_tuner_init;
1301 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tda827x_tuner_sleep;
1302 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tda827x_tuner_set_params;
1303 }
1304 break; 1056 break;
1305 case SAA7134_BOARD_FLYDVB_TRIO: 1057 case SAA7134_BOARD_FLYDVB_TRIO:
1306 if(! use_frontend) { //terrestrial 1058 if(! use_frontend) { //terrestrial
1307 dev->dvb.frontend = dvb_attach(tda10046_attach, 1059 configure_tda827x_fe(dev, &lifeview_trio_config);
1308 &lifeview_trio_config,
1309 &dev->i2c_adap);
1310 if (dev->dvb.frontend) {
1311 dev->dvb.frontend->ops.tuner_ops.sleep = lifeview_trio_tuner_sleep;
1312 dev->dvb.frontend->ops.tuner_ops.set_params =
1313 lifeview_trio_tuner_set_params;
1314 }
1315 } else { //satellite 1060 } else { //satellite
1316 dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap); 1061 dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
1317 if (dev->dvb.frontend) { 1062 if (dev->dvb.frontend) {
1318 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63, 1063 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63,
1319 &dev->i2c_adap, 0) == NULL) { 1064 &dev->i2c_adap, 0) == NULL) {
1320 printk("%s: Lifeview Trio, No tda826x found!\n", __FUNCTION__); 1065 wprintk("%s: Lifeview Trio, No tda826x found!\n", __FUNCTION__);
1321 } 1066 }
1322 if (dvb_attach(isl6421_attach, dev->dvb.frontend, &dev->i2c_adap, 1067 if (dvb_attach(isl6421_attach, dev->dvb.frontend, &dev->i2c_adap,
1323 0x08, 0, 0) == NULL) { 1068 0x08, 0, 0) == NULL) {
1324 printk("%s: Lifeview Trio, No ISL6421 found!\n", __FUNCTION__); 1069 wprintk("%s: Lifeview Trio, No ISL6421 found!\n", __FUNCTION__);
1325 } 1070 }
1326 } 1071 }
1327 } 1072 }
1328 break; 1073 break;
1329 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 1074 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
1075 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
1330 dev->dvb.frontend = dvb_attach(tda10046_attach, 1076 dev->dvb.frontend = dvb_attach(tda10046_attach,
1331 &ads_tech_duo_config, 1077 &ads_tech_duo_config,
1332 &dev->i2c_adap); 1078 &dev->i2c_adap);
1333 if (dev->dvb.frontend) { 1079 if (dev->dvb.frontend) {
1334 dev->dvb.frontend->ops.tuner_ops.init = ads_duo_tuner_init; 1080 if (dvb_attach(tda827x_attach,dev->dvb.frontend,
1335 dev->dvb.frontend->ops.tuner_ops.sleep = ads_duo_tuner_sleep; 1081 ads_tech_duo_config.tuner_address,
1336 dev->dvb.frontend->ops.tuner_ops.set_params = ads_duo_tuner_set_params; 1082 &dev->i2c_adap,&ads_duo_cfg) == NULL) {
1083 wprintk("no tda827x tuner found at addr: %02x\n",
1084 ads_tech_duo_config.tuner_address);
1085 }
1337 } 1086 }
1338 break; 1087 break;
1339 case SAA7134_BOARD_TEVION_DVBT_220RF: 1088 case SAA7134_BOARD_TEVION_DVBT_220RF:
1340 dev->dvb.frontend = dvb_attach(tda10046_attach, 1089 configure_tda827x_fe(dev, &tevion_dvbt220rf_config);
1341 &tevion_dvbt220rf_config,
1342 &dev->i2c_adap);
1343 if (dev->dvb.frontend) {
1344 dev->dvb.frontend->ops.tuner_ops.sleep = tevion_dvb220rf_tuner_sleep;
1345 dev->dvb.frontend->ops.tuner_ops.set_params = tevion_dvb220rf_tuner_set_params;
1346 }
1347 break;
1348 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
1349 dev->dvb.frontend = dvb_attach(tda10046_attach,
1350 &ads_tech_duo_config,
1351 &dev->i2c_adap);
1352 if (dev->dvb.frontend) {
1353 dev->dvb.frontend->ops.tuner_ops.init = ads_duo_tuner_init;
1354 dev->dvb.frontend->ops.tuner_ops.sleep = ads_duo_tuner_sleep;
1355 dev->dvb.frontend->ops.tuner_ops.set_params = ads_duo_tuner_set_params;
1356 }
1357 break; 1090 break;
1358 case SAA7134_BOARD_MEDION_MD8800_QUADRO: 1091 case SAA7134_BOARD_MEDION_MD8800_QUADRO:
1359 dev->dvb.frontend = tda10046_attach(&md8800_dvbt_config, 1092 configure_tda827x_fe(dev, &md8800_dvbt_config);
1360 &dev->i2c_adap);
1361 if (dev->dvb.frontend) {
1362 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1363 dev->dvb.frontend->ops.tuner_ops.sleep = md8800_dvbt_analog_mode;
1364 dev->dvb.frontend->ops.tuner_ops.set_params = md8800_dvbt_pll_set;
1365 }
1366 break; 1093 break;
1367 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180: 1094 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
1368 dev->dvb.frontend = dvb_attach(nxt200x_attach, &avertvhda180, 1095 dev->dvb.frontend = dvb_attach(nxt200x_attach, &avertvhda180,
@@ -1386,11 +1113,11 @@ static int dvb_init(struct saa7134_dev *dev)
1386 if (dev->dvb.frontend) { 1113 if (dev->dvb.frontend) {
1387 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60, 1114 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60,
1388 &dev->i2c_adap, 0) == NULL) { 1115 &dev->i2c_adap, 0) == NULL) {
1389 printk("%s: No tda826x found!\n", __FUNCTION__); 1116 wprintk("%s: No tda826x found!\n", __FUNCTION__);
1390 } 1117 }
1391 if (dvb_attach(isl6421_attach, dev->dvb.frontend, 1118 if (dvb_attach(isl6421_attach, dev->dvb.frontend,
1392 &dev->i2c_adap, 0x08, 0, 0) == NULL) { 1119 &dev->i2c_adap, 0x08, 0, 0) == NULL) {
1393 printk("%s: No ISL6421 found!\n", __FUNCTION__); 1120 wprintk("%s: No ISL6421 found!\n", __FUNCTION__);
1394 } 1121 }
1395 } 1122 }
1396 break; 1123 break;
@@ -1415,41 +1142,45 @@ static int dvb_init(struct saa7134_dev *dev)
1415 } 1142 }
1416 break; 1143 break;
1417 case SAA7134_BOARD_CINERGY_HT_PCMCIA: 1144 case SAA7134_BOARD_CINERGY_HT_PCMCIA:
1418 dev->dvb.frontend = dvb_attach(tda10046_attach, 1145 configure_tda827x_fe(dev, &cinergy_ht_config);
1419 &cinergy_ht_config,
1420 &dev->i2c_adap);
1421 if (dev->dvb.frontend) {
1422 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1423 dev->dvb.frontend->ops.tuner_ops.init = cinergy_ht_tuner_init;
1424 dev->dvb.frontend->ops.tuner_ops.sleep = cinergy_ht_tuner_sleep;
1425 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1426
1427 }
1428 break; 1146 break;
1429 case SAA7134_BOARD_CINERGY_HT_PCI: 1147 case SAA7134_BOARD_CINERGY_HT_PCI:
1430 dev->dvb.frontend = dvb_attach(tda10046_attach, 1148 configure_tda827x_fe(dev, &cinergy_ht_pci_config);
1431 &cinergy_ht_config, 1149 break;
1432 &dev->i2c_adap); 1150 case SAA7134_BOARD_PHILIPS_TIGER_S:
1433 if (dev->dvb.frontend) { 1151 configure_tda827x_fe(dev, &philips_tiger_s_config);
1434 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl; 1152 break;
1435 dev->dvb.frontend->ops.tuner_ops.init = cinergy_ht_tuner_init; 1153 case SAA7134_BOARD_ASUS_P7131_4871:
1436 dev->dvb.frontend->ops.tuner_ops.sleep = cinergy_ht_tuner_sleep; 1154 configure_tda827x_fe(dev, &asus_p7131_4871_config);
1437 dev->dvb.frontend->ops.tuner_ops.set_params = md8800_dvbt_pll_set; 1155 break;
1438 1156 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
1439 } 1157 configure_tda827x_fe(dev, &asus_p7131_hybrid_lna_config);
1440 break; 1158 break;
1441 default: 1159 default:
1442 printk("%s: Huh? unknown DVB card?\n",dev->name); 1160 wprintk("Huh? unknown DVB card?\n");
1443 break; 1161 break;
1444 } 1162 }
1445 1163
1446 if (NULL == dev->dvb.frontend) { 1164 if (NULL == dev->dvb.frontend) {
1447 printk("%s: frontend initialization failed\n",dev->name); 1165 printk(KERN_ERR "%s/dvb: frontend initialization failed\n", dev->name);
1448 return -1; 1166 return -1;
1449 } 1167 }
1450 1168
1451 /* register everything else */ 1169 /* register everything else */
1452 return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev, &dev->pci->dev); 1170 ret = videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev, &dev->pci->dev);
1171
1172 /* this sequence is necessary to make the tda1004x load its firmware
1173 * and to enter analog mode of hybrid boards
1174 */
1175 if (!ret) {
1176 if (dev->dvb.frontend->ops.init)
1177 dev->dvb.frontend->ops.init(dev->dvb.frontend);
1178 if (dev->dvb.frontend->ops.sleep)
1179 dev->dvb.frontend->ops.sleep(dev->dvb.frontend);
1180 if (dev->dvb.frontend->ops.tuner_ops.sleep)
1181 dev->dvb.frontend->ops.tuner_ops.sleep(dev->dvb.frontend);
1182 }
1183 return ret;
1453} 1184}
1454 1185
1455static int dvb_fini(struct saa7134_dev *dev) 1186static int dvb_fini(struct saa7134_dev *dev)
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index cce8da6a4f94..1cb8c709ca90 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -370,6 +370,8 @@ static int attach_inform(struct i2c_client *client)
370 370
371 tun_setup.type = tuner; 371 tun_setup.type = tuner;
372 tun_setup.addr = saa7134_boards[dev->board].tuner_addr; 372 tun_setup.addr = saa7134_boards[dev->board].tuner_addr;
373 tun_setup.config = saa7134_boards[dev->board].tuner_config;
374 tun_setup.tuner_callback = saa7134_tuner_callback;
373 375
374 if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) { 376 if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) {
375 377
@@ -445,7 +447,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
445 unsigned char buf; 447 unsigned char buf;
446 int i,rc; 448 int i,rc;
447 449
448 for (i = 0; i < 128; i++) { 450 for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
449 c->addr = i; 451 c->addr = i;
450 rc = i2c_master_recv(c,&buf,0); 452 rc = i2c_master_recv(c,&buf,0);
451 if (rc < 0) 453 if (rc < 0)
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 46c583f1e788..c0de37e3f5c6 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -321,6 +321,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
321 mask_keydown = 0x0040000; 321 mask_keydown = 0x0040000;
322 break; 322 break;
323 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 323 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
324 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
324 ir_codes = ir_codes_asus_pc39; 325 ir_codes = ir_codes_asus_pc39;
325 mask_keydown = 0x0040000; 326 mask_keydown = 0x0040000;
326 rc5_gpio = 1; 327 rc5_gpio = 1;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index f2cb63053041..9985ded20950 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -26,6 +26,7 @@
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/sort.h>
29 30
30#include "saa7134-reg.h" 31#include "saa7134-reg.h"
31#include "saa7134.h" 32#include "saa7134.h"
@@ -516,14 +517,12 @@ static int res_get(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int
516 return 1; 517 return 1;
517} 518}
518 519
519static 520static int res_check(struct saa7134_fh *fh, unsigned int bit)
520int res_check(struct saa7134_fh *fh, unsigned int bit)
521{ 521{
522 return (fh->resources & bit); 522 return (fh->resources & bit);
523} 523}
524 524
525static 525static int res_locked(struct saa7134_dev *dev, unsigned int bit)
526int res_locked(struct saa7134_dev *dev, unsigned int bit)
527{ 526{
528 return (dev->resources & bit); 527 return (dev->resources & bit);
529} 528}
@@ -603,7 +602,14 @@ static void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm)
603 saa_writeb(SAA7134_RAW_DATA_GAIN, 0x40); 602 saa_writeb(SAA7134_RAW_DATA_GAIN, 0x40);
604 saa_writeb(SAA7134_RAW_DATA_OFFSET, 0x80); 603 saa_writeb(SAA7134_RAW_DATA_OFFSET, 0x80);
605 604
606 saa7134_i2c_call_clients(dev,VIDIOC_S_STD,&norm->id); 605 /* only tell the tuner if this is a tv input */
606 if (card_in(dev,dev->ctl_input).tv) {
607 if ((card(dev).tuner_type == TUNER_PHILIPS_TDA8290)
608 && ((card(dev).tuner_config == 1)
609 || (card(dev).tuner_config == 2)))
610 saa7134_set_gpio(dev, 22, 5);
611 saa7134_i2c_call_clients(dev,VIDIOC_S_STD,&norm->id);
612 }
607} 613}
608 614
609static void video_mux(struct saa7134_dev *dev, int input) 615static void video_mux(struct saa7134_dev *dev, int input)
@@ -732,25 +738,6 @@ struct cliplist {
732 __u8 disable; 738 __u8 disable;
733}; 739};
734 740
735static void sort_cliplist(struct cliplist *cl, int entries)
736{
737 struct cliplist swap;
738 int i,j,n;
739
740 for (i = entries-2; i >= 0; i--) {
741 for (n = 0, j = 0; j <= i; j++) {
742 if (cl[j].position > cl[j+1].position) {
743 swap = cl[j];
744 cl[j] = cl[j+1];
745 cl[j+1] = swap;
746 n++;
747 }
748 }
749 if (0 == n)
750 break;
751 }
752}
753
754static void set_cliplist(struct saa7134_dev *dev, int reg, 741static void set_cliplist(struct saa7134_dev *dev, int reg,
755 struct cliplist *cl, int entries, char *name) 742 struct cliplist *cl, int entries, char *name)
756{ 743{
@@ -784,15 +771,27 @@ static int clip_range(int val)
784 return val; 771 return val;
785} 772}
786 773
774/* Sort into smallest position first order */
775static int cliplist_cmp(const void *a, const void *b)
776{
777 const struct cliplist *cla = a;
778 const struct cliplist *clb = b;
779 if (cla->position < clb->position)
780 return -1;
781 if (cla->position > clb->position)
782 return 1;
783 return 0;
784}
785
787static int setup_clipping(struct saa7134_dev *dev, struct v4l2_clip *clips, 786static int setup_clipping(struct saa7134_dev *dev, struct v4l2_clip *clips,
788 int nclips, int interlace) 787 int nclips, int interlace)
789{ 788{
790 struct cliplist col[16], row[16]; 789 struct cliplist col[16], row[16];
791 int cols, rows, i; 790 int cols = 0, rows = 0, i;
792 int div = interlace ? 2 : 1; 791 int div = interlace ? 2 : 1;
793 792
794 memset(col,0,sizeof(col)); cols = 0; 793 memset(col, 0, sizeof(col));
795 memset(row,0,sizeof(row)); rows = 0; 794 memset(row, 0, sizeof(row));
796 for (i = 0; i < nclips && i < 8; i++) { 795 for (i = 0; i < nclips && i < 8; i++) {
797 col[cols].position = clip_range(clips[i].c.left); 796 col[cols].position = clip_range(clips[i].c.left);
798 col[cols].enable = (1 << i); 797 col[cols].enable = (1 << i);
@@ -808,8 +807,8 @@ static int setup_clipping(struct saa7134_dev *dev, struct v4l2_clip *clips,
808 row[rows].disable = (1 << i); 807 row[rows].disable = (1 << i);
809 rows++; 808 rows++;
810 } 809 }
811 sort_cliplist(col,cols); 810 sort(col, cols, sizeof col[0], cliplist_cmp, NULL);
812 sort_cliplist(row,rows); 811 sort(row, rows, sizeof row[0], cliplist_cmp, NULL);
813 set_cliplist(dev,0x380,col,cols,"cols"); 812 set_cliplist(dev,0x380,col,cols,"cols");
814 set_cliplist(dev,0x384,row,rows,"rows"); 813 set_cliplist(dev,0x384,row,rows,"rows");
815 return 0; 814 return 0;
@@ -1261,19 +1260,14 @@ static struct videobuf_queue* saa7134_queue(struct saa7134_fh *fh)
1261 1260
1262static int saa7134_resource(struct saa7134_fh *fh) 1261static int saa7134_resource(struct saa7134_fh *fh)
1263{ 1262{
1264 int res = 0; 1263 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1264 return RESOURCE_VIDEO;
1265 1265
1266 switch (fh->type) { 1266 if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1267 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1267 return RESOURCE_VBI;
1268 res = RESOURCE_VIDEO; 1268
1269 break; 1269 BUG();
1270 case V4L2_BUF_TYPE_VBI_CAPTURE: 1270 return 0;
1271 res = RESOURCE_VBI;
1272 break;
1273 default:
1274 BUG();
1275 }
1276 return res;
1277} 1271}
1278 1272
1279static int video_open(struct inode *inode, struct file *file) 1273static int video_open(struct inode *inode, struct file *file)
@@ -1461,8 +1455,7 @@ static int video_release(struct inode *inode, struct file *file)
1461 return 0; 1455 return 0;
1462} 1456}
1463 1457
1464static int 1458static int video_mmap(struct file *file, struct vm_area_struct * vma)
1465video_mmap(struct file *file, struct vm_area_struct * vma)
1466{ 1459{
1467 struct saa7134_fh *fh = file->private_data; 1460 struct saa7134_fh *fh = file->private_data;
1468 1461
@@ -2461,12 +2454,6 @@ int saa7134_video_init2(struct saa7134_dev *dev)
2461 return 0; 2454 return 0;
2462} 2455}
2463 2456
2464int saa7134_video_fini(struct saa7134_dev *dev)
2465{
2466 /* nothing */
2467 return 0;
2468}
2469
2470void saa7134_irq_video_intl(struct saa7134_dev *dev) 2457void saa7134_irq_video_intl(struct saa7134_dev *dev)
2471{ 2458{
2472 static const char *st[] = { 2459 static const char *st[] = {
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index b3e3957c89b5..62224cc958f1 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -231,6 +231,10 @@ struct saa7134_format {
231#define SAA7134_BOARD_ENCORE_ENLTV 106 231#define SAA7134_BOARD_ENCORE_ENLTV 106
232#define SAA7134_BOARD_ENCORE_ENLTV_FM 107 232#define SAA7134_BOARD_ENCORE_ENLTV_FM 107
233#define SAA7134_BOARD_CINERGY_HT_PCI 108 233#define SAA7134_BOARD_CINERGY_HT_PCI 108
234#define SAA7134_BOARD_PHILIPS_TIGER_S 109
235#define SAA7134_BOARD_AVERMEDIA_M102 110
236#define SAA7134_BOARD_ASUS_P7131_4871 111
237#define SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA 112
234 238
235#define SAA7134_MAXBOARDS 8 239#define SAA7134_MAXBOARDS 8
236#define SAA7134_INPUT_MAX 8 240#define SAA7134_INPUT_MAX 8
@@ -280,6 +284,7 @@ struct saa7134_board {
280 unsigned char radio_addr; 284 unsigned char radio_addr;
281 285
282 unsigned int tda9887_conf; 286 unsigned int tda9887_conf;
287 unsigned int tuner_config;
283 288
284 /* peripheral I/O */ 289 /* peripheral I/O */
285 enum saa7134_video_out video_out; 290 enum saa7134_video_out video_out;
@@ -435,6 +440,8 @@ struct saa7134_dev {
435#ifdef VIDIOC_G_PRIORITY 440#ifdef VIDIOC_G_PRIORITY
436 struct v4l2_prio_state prio; 441 struct v4l2_prio_state prio;
437#endif 442#endif
443 /* workstruct for loading modules */
444 struct work_struct request_module_wk;
438 445
439 /* insmod option/autodetected */ 446 /* insmod option/autodetected */
440 int autodetected; 447 int autodetected;
@@ -562,6 +569,8 @@ extern struct list_head saa7134_devlist;
562extern int saa7134_no_overlay; 569extern int saa7134_no_overlay;
563 570
564void saa7134_track_gpio(struct saa7134_dev *dev, char *msg); 571void saa7134_track_gpio(struct saa7134_dev *dev, char *msg);
572void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value);
573int saa7134_tuner_callback(void *ptr, int command, int arg);
565 574
566#define SAA7134_PGTABLE_SIZE 4096 575#define SAA7134_PGTABLE_SIZE 4096
567 576
@@ -620,7 +629,6 @@ int saa7134_common_ioctl(struct saa7134_dev *dev,
620 629
621int saa7134_video_init1(struct saa7134_dev *dev); 630int saa7134_video_init1(struct saa7134_dev *dev);
622int saa7134_video_init2(struct saa7134_dev *dev); 631int saa7134_video_init2(struct saa7134_dev *dev);
623int saa7134_video_fini(struct saa7134_dev *dev);
624void saa7134_irq_video_intl(struct saa7134_dev *dev); 632void saa7134_irq_video_intl(struct saa7134_dev *dev);
625void saa7134_irq_video_done(struct saa7134_dev *dev, unsigned long status); 633void saa7134_irq_video_done(struct saa7134_dev *dev, unsigned long status);
626 634
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 038448f5a978..93fb04ed99a0 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -450,6 +450,13 @@ static int se401_start_stream(struct usb_se401 *se401)
450 } 450 }
451 for (i=0; i<SE401_NUMSBUF; i++) { 451 for (i=0; i<SE401_NUMSBUF; i++) {
452 se401->sbuf[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL); 452 se401->sbuf[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
453 if (!se401->sbuf[i].data) {
454 for(i = i - 1; i >= 0; i--) {
455 kfree(se401->sbuf[i].data);
456 se401->sbuf[i].data = NULL;
457 }
458 return -ENOMEM;
459 }
453 } 460 }
454 461
455 se401->bayeroffset=0; 462 se401->bayeroffset=0;
@@ -458,13 +465,26 @@ static int se401_start_stream(struct usb_se401 *se401)
458 se401->scratch_overflow=0; 465 se401->scratch_overflow=0;
459 for (i=0; i<SE401_NUMSCRATCH; i++) { 466 for (i=0; i<SE401_NUMSCRATCH; i++) {
460 se401->scratch[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL); 467 se401->scratch[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
468 if (!se401->scratch[i].data) {
469 for(i = i - 1; i >= 0; i--) {
470 kfree(se401->scratch[i].data);
471 se401->scratch[i].data = NULL;
472 }
473 goto nomem_sbuf;
474 }
461 se401->scratch[i].state=BUFFER_UNUSED; 475 se401->scratch[i].state=BUFFER_UNUSED;
462 } 476 }
463 477
464 for (i=0; i<SE401_NUMSBUF; i++) { 478 for (i=0; i<SE401_NUMSBUF; i++) {
465 urb=usb_alloc_urb(0, GFP_KERNEL); 479 urb=usb_alloc_urb(0, GFP_KERNEL);
466 if(!urb) 480 if(!urb) {
467 return -ENOMEM; 481 for(i = i - 1; i >= 0; i--) {
482 usb_kill_urb(se401->urb[i]);
483 usb_free_urb(se401->urb[i]);
484 se401->urb[i] = NULL;
485 }
486 goto nomem_scratch;
487 }
468 488
469 usb_fill_bulk_urb(urb, se401->dev, 489 usb_fill_bulk_urb(urb, se401->dev,
470 usb_rcvbulkpipe(se401->dev, SE401_VIDEO_ENDPOINT), 490 usb_rcvbulkpipe(se401->dev, SE401_VIDEO_ENDPOINT),
@@ -482,6 +502,18 @@ static int se401_start_stream(struct usb_se401 *se401)
482 se401->framecount=0; 502 se401->framecount=0;
483 503
484 return 0; 504 return 0;
505
506 nomem_scratch:
507 for (i=0; i<SE401_NUMSCRATCH; i++) {
508 kfree(se401->scratch[i].data);
509 se401->scratch[i].data = NULL;
510 }
511 nomem_sbuf:
512 for (i=0; i<SE401_NUMSBUF; i++) {
513 kfree(se401->sbuf[i].data);
514 se401->sbuf[i].data = NULL;
515 }
516 return -ENOMEM;
485} 517}
486 518
487static int se401_stop_stream(struct usb_se401 *se401) 519static int se401_stop_stream(struct usb_se401 *se401)
diff --git a/drivers/media/video/sn9c102/Kconfig b/drivers/media/video/sn9c102/Kconfig
index 1a7ccb666ab0..19204f5686e1 100644
--- a/drivers/media/video/sn9c102/Kconfig
+++ b/drivers/media/video/sn9c102/Kconfig
@@ -1,6 +1,6 @@
1config USB_SN9C102 1config USB_SN9C102
2 tristate "USB SN9C1xx PC Camera Controller support" 2 tristate "USB SN9C1xx PC Camera Controller support"
3 depends on USB && VIDEO_V4L1 3 depends on USB && VIDEO_V4L2
4 ---help--- 4 ---help---
5 Say Y here if you want support for cameras based on SONiX SN9C101, 5 Say Y here if you want support for cameras based on SONiX SN9C101,
6 SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers. 6 SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers.
diff --git a/drivers/media/video/sn9c102/Makefile b/drivers/media/video/sn9c102/Makefile
index 30e3dfe537fe..a56d16f69c71 100644
--- a/drivers/media/video/sn9c102/Makefile
+++ b/drivers/media/video/sn9c102/Makefile
@@ -1,7 +1,14 @@
1sn9c102-objs := sn9c102_core.o sn9c102_hv7131d.o sn9c102_mi0343.o \ 1sn9c102-objs := sn9c102_core.o \
2 sn9c102_ov7630.o sn9c102_ov7660.o sn9c102_pas106b.o \ 2 sn9c102_hv7131d.o \
3 sn9c102_pas202bcb.o sn9c102_tas5110c1b.o \ 3 sn9c102_hv7131r.o \
4 sn9c102_tas5130d1b.o 4 sn9c102_mi0343.o \
5 sn9c102_mi0360.o \
6 sn9c102_ov7630.o \
7 sn9c102_ov7660.o \
8 sn9c102_pas106b.o \
9 sn9c102_pas202bcb.o \
10 sn9c102_tas5110c1b.o \
11 sn9c102_tas5110d.o \
12 sn9c102_tas5130d1b.o
5 13
6obj-$(CONFIG_USB_SN9C102) += sn9c102.o 14obj-$(CONFIG_USB_SN9C102) += sn9c102.o
7
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index 5428f34e7c5b..680e74634527 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -78,8 +78,13 @@ enum sn9c102_stream_state {
78 78
79typedef char sn9c102_sof_header_t[62]; 79typedef char sn9c102_sof_header_t[62];
80 80
81struct sn9c102_sof_t {
82 sn9c102_sof_header_t header;
83 u16 bytesread;
84};
85
81struct sn9c102_sysfs_attr { 86struct sn9c102_sysfs_attr {
82 u8 reg, i2c_reg; 87 u16 reg, i2c_reg;
83 sn9c102_sof_header_t frame_header; 88 sn9c102_sof_header_t frame_header;
84}; 89};
85 90
@@ -112,7 +117,7 @@ struct sn9c102_device {
112 struct v4l2_jpegcompression compression; 117 struct v4l2_jpegcompression compression;
113 118
114 struct sn9c102_sysfs_attr sysfs; 119 struct sn9c102_sysfs_attr sysfs;
115 sn9c102_sof_header_t sof_header; 120 struct sn9c102_sof_t sof;
116 u16 reg[384]; 121 u16 reg[384];
117 122
118 struct sn9c102_module_param module_param; 123 struct sn9c102_module_param module_param;
@@ -182,8 +187,8 @@ do { \
182 if ((level) == 1 || (level) == 2) \ 187 if ((level) == 1 || (level) == 2) \
183 pr_info("sn9c102: " fmt "\n", ## args); \ 188 pr_info("sn9c102: " fmt "\n", ## args); \
184 else if ((level) == 3) \ 189 else if ((level) == 3) \
185 pr_debug("sn9c102: [%s:%d] " fmt "\n", __FUNCTION__, \ 190 pr_debug("sn9c102: [%s:%d] " fmt "\n", \
186 __LINE__ , ## args); \ 191 __FUNCTION__, __LINE__ , ## args); \
187 } \ 192 } \
188} while (0) 193} while (0)
189#else 194#else
@@ -194,8 +199,8 @@ do { \
194 199
195#undef PDBG 200#undef PDBG
196#define PDBG(fmt, args...) \ 201#define PDBG(fmt, args...) \
197dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n", \ 202dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __FUNCTION__, \
198 __FUNCTION__, __LINE__ , ## args) 203 __LINE__ , ## args)
199 204
200#undef PDBGG 205#undef PDBGG
201#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */ 206#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index d0e2b40a7725..89f83354de3b 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -44,11 +44,12 @@
44/*****************************************************************************/ 44/*****************************************************************************/
45 45
46#define SN9C102_MODULE_NAME "V4L2 driver for SN9C1xx PC Camera Controllers" 46#define SN9C102_MODULE_NAME "V4L2 driver for SN9C1xx PC Camera Controllers"
47#define SN9C102_MODULE_AUTHOR "(C) 2004-2006 Luca Risolia" 47#define SN9C102_MODULE_ALIAS "sn9c1xx"
48#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia"
48#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>" 49#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
49#define SN9C102_MODULE_LICENSE "GPL" 50#define SN9C102_MODULE_LICENSE "GPL"
50#define SN9C102_MODULE_VERSION "1:1.34" 51#define SN9C102_MODULE_VERSION "1:1.39"
51#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 34) 52#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 39)
52 53
53/*****************************************************************************/ 54/*****************************************************************************/
54 55
@@ -56,6 +57,7 @@ MODULE_DEVICE_TABLE(usb, sn9c102_id_table);
56 57
57MODULE_AUTHOR(SN9C102_MODULE_AUTHOR " " SN9C102_AUTHOR_EMAIL); 58MODULE_AUTHOR(SN9C102_MODULE_AUTHOR " " SN9C102_AUTHOR_EMAIL);
58MODULE_DESCRIPTION(SN9C102_MODULE_NAME); 59MODULE_DESCRIPTION(SN9C102_MODULE_NAME);
60MODULE_ALIAS(SN9C102_MODULE_ALIAS);
59MODULE_VERSION(SN9C102_MODULE_VERSION); 61MODULE_VERSION(SN9C102_MODULE_VERSION);
60MODULE_LICENSE(SN9C102_MODULE_LICENSE); 62MODULE_LICENSE(SN9C102_MODULE_LICENSE);
61 63
@@ -106,8 +108,7 @@ MODULE_PARM_DESC(debug,
106 "\n1 = critical errors" 108 "\n1 = critical errors"
107 "\n2 = significant informations" 109 "\n2 = significant informations"
108 "\n3 = more verbose messages" 110 "\n3 = more verbose messages"
109 "\nLevel 3 is useful for testing only, when only " 111 "\nLevel 3 is useful for testing only."
110 "one device is used."
111 "\nDefault value is "__MODULE_STRING(SN9C102_DEBUG_LEVEL)"." 112 "\nDefault value is "__MODULE_STRING(SN9C102_DEBUG_LEVEL)"."
112 "\n"); 113 "\n");
113#endif 114#endif
@@ -121,8 +122,8 @@ sn9c102_request_buffers(struct sn9c102_device* cam, u32 count,
121 struct v4l2_pix_format* p = &(cam->sensor.pix_format); 122 struct v4l2_pix_format* p = &(cam->sensor.pix_format);
122 struct v4l2_rect* r = &(cam->sensor.cropcap.bounds); 123 struct v4l2_rect* r = &(cam->sensor.cropcap.bounds);
123 size_t imagesize = cam->module_param.force_munmap || io == IO_READ ? 124 size_t imagesize = cam->module_param.force_munmap || io == IO_READ ?
124 (p->width * p->height * p->priv) / 8 : 125 (p->width * p->height * p->priv) / 8 :
125 (r->width * r->height * p->priv) / 8; 126 (r->width * r->height * p->priv) / 8;
126 void* buff = NULL; 127 void* buff = NULL;
127 u32 i; 128 u32 i;
128 129
@@ -208,27 +209,40 @@ static void sn9c102_queue_unusedframes(struct sn9c102_device* cam)
208} 209}
209 210
210/*****************************************************************************/ 211/*****************************************************************************/
211 212/*
212int sn9c102_write_regs(struct sn9c102_device* cam, u8* buff, u16 index) 213 * Write a sequence of count value/register pairs. Returns -1 after the
214 * first failed write, or 0 for no errors.
215 */
216int sn9c102_write_regs(struct sn9c102_device* cam, const u8 valreg[][2],
217 int count)
213{ 218{
214 struct usb_device* udev = cam->usbdev; 219 struct usb_device* udev = cam->usbdev;
220 u8* value = cam->control_buffer; /* Needed for DMA'able memory */
215 int i, res; 221 int i, res;
216 222
217 if (index + sizeof(buff) >= ARRAY_SIZE(cam->reg)) 223 for (i = 0; i < count; i++) {
218 return -1; 224 u8 index = valreg[i][1];
225
226 /*
227 * index is a u8, so it must be <256 and can't be out of range.
228 * If we put in a check anyway, gcc annoys us with a warning
229 * that our check is useless. People get all uppity when they
230 * see warnings in the kernel compile.
231 */
232
233 *value = valreg[i][0];
234 res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
235 0x08, 0x41, index, 0,
236 value, 1, SN9C102_CTRL_TIMEOUT);
237 if (res < 0) {
238 DBG(3, "Failed to write a register (value 0x%02X, "
239 "index 0x%02X, error %d)", *value, index, res);
240 return -1;
241 }
219 242
220 res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41, 243 cam->reg[index] = *value;
221 index, 0, buff, sizeof(buff),
222 SN9C102_CTRL_TIMEOUT*sizeof(buff));
223 if (res < 0) {
224 DBG(3, "Failed to write registers (index 0x%02X, error %d)",
225 index, res);
226 return -1;
227 } 244 }
228 245
229 for (i = 0; i < sizeof(buff); i++)
230 cam->reg[index+i] = buff[i];
231
232 return 0; 246 return 0;
233} 247}
234 248
@@ -485,18 +499,43 @@ static size_t sn9c102_sof_length(struct sn9c102_device* cam)
485static void* 499static void*
486sn9c102_find_sof_header(struct sn9c102_device* cam, void* mem, size_t len) 500sn9c102_find_sof_header(struct sn9c102_device* cam, void* mem, size_t len)
487{ 501{
488 char sof_header[6] = {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96}; 502 static const char marker[6] = {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96};
489 size_t soflen = 0, i; 503 const char *m = mem;
504 size_t soflen = 0, i, j;
490 505
491 soflen = sn9c102_sof_length(cam); 506 soflen = sn9c102_sof_length(cam);
492 507
493 for (i = 0; (len >= soflen) && (i <= len - soflen); i++) 508 for (i = 0; i < len; i++) {
494 if (!memcmp(mem + i, sof_header, sizeof(sof_header))) { 509 size_t b;
495 memcpy(cam->sof_header, mem + i, 510
496 sizeof(sn9c102_sof_header_t)); 511 /* Read the variable part of the header */
497 /* Skip the header */ 512 if (unlikely(cam->sof.bytesread >= sizeof(marker))) {
498 return mem + i + soflen; 513 cam->sof.header[cam->sof.bytesread] = *(m+i);
514 if (++cam->sof.bytesread == soflen) {
515 cam->sof.bytesread = 0;
516 return mem + i;
517 }
518 continue;
519 }
520
521 /* Search for the SOF marker (fixed part) in the header */
522 for (j = 0, b=cam->sof.bytesread; j+b < sizeof(marker); j++) {
523 if (unlikely(i+j) == len)
524 return NULL;
525 if (*(m+i+j) == marker[cam->sof.bytesread]) {
526 cam->sof.header[cam->sof.bytesread] = *(m+i+j);
527 if (++cam->sof.bytesread == sizeof(marker)) {
528 PDBGG("Bytes to analyze: %zd. SOF "
529 "starts at byte #%zd", len, i);
530 i += j+1;
531 break;
532 }
533 } else {
534 cam->sof.bytesread = 0;
535 break;
499 } 536 }
537 }
538 }
500 539
501 return NULL; 540 return NULL;
502} 541}
@@ -505,7 +544,7 @@ sn9c102_find_sof_header(struct sn9c102_device* cam, void* mem, size_t len)
505static void* 544static void*
506sn9c102_find_eof_header(struct sn9c102_device* cam, void* mem, size_t len) 545sn9c102_find_eof_header(struct sn9c102_device* cam, void* mem, size_t len)
507{ 546{
508 char eof_header[4][4] = { 547 static const u8 eof_header[4][4] = {
509 {0x00, 0x00, 0x00, 0x00}, 548 {0x00, 0x00, 0x00, 0x00},
510 {0x40, 0x00, 0x00, 0x00}, 549 {0x40, 0x00, 0x00, 0x00},
511 {0x80, 0x00, 0x00, 0x00}, 550 {0x80, 0x00, 0x00, 0x00},
@@ -513,10 +552,16 @@ sn9c102_find_eof_header(struct sn9c102_device* cam, void* mem, size_t len)
513 }; 552 };
514 size_t i, j; 553 size_t i, j;
515 554
555 /* The EOF header does not exist in compressed data */
516 if (cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_SN9C10X || 556 if (cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_SN9C10X ||
517 cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_JPEG) 557 cam->sensor.pix_format.pixelformat == V4L2_PIX_FMT_JPEG)
518 return NULL; /* EOF header does not exist in compressed data */ 558 return NULL;
519 559
560 /*
561 The EOF header might cross the packet boundary, but this is not a
562 problem, since the end of a frame is determined by checking its size
563 in the first place.
564 */
520 for (i = 0; (len >= 4) && (i <= len - 4); i++) 565 for (i = 0; (len >= 4) && (i <= len - 4); i++)
521 for (j = 0; j < ARRAY_SIZE(eof_header); j++) 566 for (j = 0; j < ARRAY_SIZE(eof_header); j++)
522 if (!memcmp(mem + i, eof_header[j], 4)) 567 if (!memcmp(mem + i, eof_header[j], 4))
@@ -529,7 +574,7 @@ sn9c102_find_eof_header(struct sn9c102_device* cam, void* mem, size_t len)
529static void 574static void
530sn9c102_write_jpegheader(struct sn9c102_device* cam, struct sn9c102_frame_t* f) 575sn9c102_write_jpegheader(struct sn9c102_device* cam, struct sn9c102_frame_t* f)
531{ 576{
532 static u8 jpeg_header[589] = { 577 static const u8 jpeg_header[589] = {
533 0xff, 0xd8, 0xff, 0xdb, 0x00, 0x84, 0x00, 0x06, 0x04, 0x05, 578 0xff, 0xd8, 0xff, 0xdb, 0x00, 0x84, 0x00, 0x06, 0x04, 0x05,
534 0x06, 0x05, 0x04, 0x06, 0x06, 0x05, 0x06, 0x07, 0x07, 0x06, 579 0x06, 0x05, 0x04, 0x06, 0x06, 0x05, 0x06, 0x07, 0x07, 0x06,
535 0x08, 0x0a, 0x10, 0x0a, 0x0a, 0x09, 0x09, 0x0a, 0x14, 0x0e, 580 0x08, 0x0a, 0x10, 0x0a, 0x0a, 0x09, 0x09, 0x0a, 0x14, 0x0e,
@@ -639,6 +684,7 @@ static void sn9c102_urb_complete(struct urb *urb)
639 cam->stream = STREAM_OFF; 684 cam->stream = STREAM_OFF;
640 if ((*f)) 685 if ((*f))
641 (*f)->state = F_QUEUED; 686 (*f)->state = F_QUEUED;
687 cam->sof.bytesread = 0;
642 DBG(3, "Stream interrupted by application"); 688 DBG(3, "Stream interrupted by application");
643 wake_up(&cam->wait_stream); 689 wake_up(&cam->wait_stream);
644 } 690 }
@@ -676,6 +722,7 @@ static void sn9c102_urb_complete(struct urb *urb)
676 if (status) { 722 if (status) {
677 DBG(3, "Error in isochronous frame"); 723 DBG(3, "Error in isochronous frame");
678 (*f)->state = F_ERROR; 724 (*f)->state = F_ERROR;
725 cam->sof.bytesread = 0;
679 continue; 726 continue;
680 } 727 }
681 728
@@ -692,13 +739,13 @@ end_of_frame:
692 if (eof) 739 if (eof)
693 img = (eof > pos) ? eof - pos - 1 : 0; 740 img = (eof > pos) ? eof - pos - 1 : 0;
694 741
695 if ((*f)->buf.bytesused+img > imagesize) { 742 if ((*f)->buf.bytesused + img > imagesize) {
696 u32 b; 743 u32 b;
697 b = (*f)->buf.bytesused + img - 744 b = (*f)->buf.bytesused + img -
698 imagesize; 745 imagesize;
699 img = imagesize - (*f)->buf.bytesused; 746 img = imagesize - (*f)->buf.bytesused;
700 DBG(3, "Expected EOF not found: " 747 PDBGG("Expected EOF not found: video "
701 "video frame cut"); 748 "frame cut");
702 if (eof) 749 if (eof)
703 DBG(3, "Exceeded limit: +%u " 750 DBG(3, "Exceeded limit: +%u "
704 "bytes", (unsigned)(b)); 751 "bytes", (unsigned)(b));
@@ -719,11 +766,6 @@ end_of_frame:
719 V4L2_PIX_FMT_JPEG) && eof)) { 766 V4L2_PIX_FMT_JPEG) && eof)) {
720 u32 b; 767 u32 b;
721 768
722 if (cam->sensor.pix_format.pixelformat
723 == V4L2_PIX_FMT_JPEG)
724 sn9c102_write_eoimarker(cam,
725 (*f));
726
727 b = (*f)->buf.bytesused; 769 b = (*f)->buf.bytesused;
728 (*f)->state = F_DONE; 770 (*f)->state = F_DONE;
729 (*f)->buf.sequence= ++cam->frame_count; 771 (*f)->buf.sequence= ++cam->frame_count;
@@ -741,7 +783,7 @@ end_of_frame:
741 spin_unlock(&cam->queue_lock); 783 spin_unlock(&cam->queue_lock);
742 784
743 memcpy(cam->sysfs.frame_header, 785 memcpy(cam->sysfs.frame_header,
744 cam->sof_header, soflen); 786 cam->sof.header, soflen);
745 787
746 DBG(3, "Video frame captured: %lu " 788 DBG(3, "Video frame captured: %lu "
747 "bytes", (unsigned long)(b)); 789 "bytes", (unsigned long)(b));
@@ -791,7 +833,13 @@ start_of_frame:
791 V4L2_PIX_FMT_SN9C10X || 833 V4L2_PIX_FMT_SN9C10X ||
792 cam->sensor.pix_format.pixelformat == 834 cam->sensor.pix_format.pixelformat ==
793 V4L2_PIX_FMT_JPEG) { 835 V4L2_PIX_FMT_JPEG) {
794 eof = sof - soflen; 836 if (sof - pos >= soflen) {
837 eof = sof - soflen;
838 } else { /* remove header */
839 eof = pos;
840 (*f)->buf.bytesused -=
841 (soflen - (sof - pos));
842 }
795 goto end_of_frame; 843 goto end_of_frame;
796 } else { 844 } else {
797 DBG(3, "SOF before expected EOF after " 845 DBG(3, "SOF before expected EOF after "
@@ -878,6 +926,7 @@ static int sn9c102_start_transfer(struct sn9c102_device* cam)
878 } 926 }
879 927
880 cam->frame_current = NULL; 928 cam->frame_current = NULL;
929 cam->sof.bytesread = 0;
881 930
882 for (i = 0; i < SN9C102_URBS; i++) { 931 for (i = 0; i < SN9C102_URBS; i++) {
883 err = usb_submit_urb(cam->urb[i], GFP_KERNEL); 932 err = usb_submit_urb(cam->urb[i], GFP_KERNEL);
@@ -959,9 +1008,9 @@ static u16 sn9c102_strtou16(const char* buff, size_t len, ssize_t* count)
959 1008
960 if (len < 6) { 1009 if (len < 6) {
961 strncpy(str, buff, len); 1010 strncpy(str, buff, len);
962 str[len+1] = '\0'; 1011 str[len] = '\0';
963 } else { 1012 } else {
964 strncpy(str, buff, 4); 1013 strncpy(str, buff, 6);
965 str[6] = '\0'; 1014 str[6] = '\0';
966 } 1015 }
967 1016
@@ -1062,7 +1111,7 @@ static ssize_t sn9c102_show_val(struct class_device* cd, char* buf)
1062 1111
1063 count = sprintf(buf, "%d\n", val); 1112 count = sprintf(buf, "%d\n", val);
1064 1113
1065 DBG(3, "Read bytes: %zd", count); 1114 DBG(3, "Read bytes: %zd, value: %d", count, val);
1066 1115
1067 mutex_unlock(&sn9c102_sysfs_lock); 1116 mutex_unlock(&sn9c102_sysfs_lock);
1068 1117
@@ -1197,7 +1246,7 @@ static ssize_t sn9c102_show_i2c_val(struct class_device* cd, char* buf)
1197 1246
1198 count = sprintf(buf, "%d\n", val); 1247 count = sprintf(buf, "%d\n", val);
1199 1248
1200 DBG(3, "Read bytes: %zd", count); 1249 DBG(3, "Read bytes: %zd, value: %d", count, val);
1201 1250
1202 mutex_unlock(&sn9c102_sysfs_lock); 1251 mutex_unlock(&sn9c102_sysfs_lock);
1203 1252
@@ -1371,35 +1420,35 @@ static CLASS_DEVICE_ATTR(frame_header, S_IRUGO,
1371 1420
1372static int sn9c102_create_sysfs(struct sn9c102_device* cam) 1421static int sn9c102_create_sysfs(struct sn9c102_device* cam)
1373{ 1422{
1374 struct video_device *v4ldev = cam->v4ldev; 1423 struct class_device *classdev = &(cam->v4ldev->class_dev);
1375 int err = 0; 1424 int err = 0;
1376 1425
1377 if ((err = video_device_create_file(v4ldev, &class_device_attr_reg))) 1426 if ((err = class_device_create_file(classdev, &class_device_attr_reg)))
1378 goto err_out; 1427 goto err_out;
1379 if ((err = video_device_create_file(v4ldev, &class_device_attr_val))) 1428 if ((err = class_device_create_file(classdev, &class_device_attr_val)))
1380 goto err_reg; 1429 goto err_reg;
1381 if ((err = video_device_create_file(v4ldev, 1430 if ((err = class_device_create_file(classdev,
1382 &class_device_attr_frame_header))) 1431 &class_device_attr_frame_header)))
1383 goto err_val; 1432 goto err_val;
1384 1433
1385 if (cam->sensor.sysfs_ops) { 1434 if (cam->sensor.sysfs_ops) {
1386 if ((err = video_device_create_file(v4ldev, 1435 if ((err = class_device_create_file(classdev,
1387 &class_device_attr_i2c_reg))) 1436 &class_device_attr_i2c_reg)))
1388 goto err_frame_header; 1437 goto err_frame_header;
1389 if ((err = video_device_create_file(v4ldev, 1438 if ((err = class_device_create_file(classdev,
1390 &class_device_attr_i2c_val))) 1439 &class_device_attr_i2c_val)))
1391 goto err_i2c_reg; 1440 goto err_i2c_reg;
1392 } 1441 }
1393 1442
1394 if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102) { 1443 if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102) {
1395 if ((err = video_device_create_file(v4ldev, 1444 if ((err = class_device_create_file(classdev,
1396 &class_device_attr_green))) 1445 &class_device_attr_green)))
1397 goto err_i2c_val; 1446 goto err_i2c_val;
1398 } else { 1447 } else {
1399 if ((err = video_device_create_file(v4ldev, 1448 if ((err = class_device_create_file(classdev,
1400 &class_device_attr_blue))) 1449 &class_device_attr_blue)))
1401 goto err_i2c_val; 1450 goto err_i2c_val;
1402 if ((err = video_device_create_file(v4ldev, 1451 if ((err = class_device_create_file(classdev,
1403 &class_device_attr_red))) 1452 &class_device_attr_red)))
1404 goto err_blue; 1453 goto err_blue;
1405 } 1454 }
@@ -1407,19 +1456,19 @@ static int sn9c102_create_sysfs(struct sn9c102_device* cam)
1407 return 0; 1456 return 0;
1408 1457
1409err_blue: 1458err_blue:
1410 video_device_remove_file(v4ldev, &class_device_attr_blue); 1459 class_device_remove_file(classdev, &class_device_attr_blue);
1411err_i2c_val: 1460err_i2c_val:
1412 if (cam->sensor.sysfs_ops) 1461 if (cam->sensor.sysfs_ops)
1413 video_device_remove_file(v4ldev, &class_device_attr_i2c_val); 1462 class_device_remove_file(classdev, &class_device_attr_i2c_val);
1414err_i2c_reg: 1463err_i2c_reg:
1415 if (cam->sensor.sysfs_ops) 1464 if (cam->sensor.sysfs_ops)
1416 video_device_remove_file(v4ldev, &class_device_attr_i2c_reg); 1465 class_device_remove_file(classdev, &class_device_attr_i2c_reg);
1417err_frame_header: 1466err_frame_header:
1418 video_device_remove_file(v4ldev, &class_device_attr_frame_header); 1467 class_device_remove_file(classdev, &class_device_attr_frame_header);
1419err_val: 1468err_val:
1420 video_device_remove_file(v4ldev, &class_device_attr_val); 1469 class_device_remove_file(classdev, &class_device_attr_val);
1421err_reg: 1470err_reg:
1422 video_device_remove_file(v4ldev, &class_device_attr_reg); 1471 class_device_remove_file(classdev, &class_device_attr_reg);
1423err_out: 1472err_out:
1424 return err; 1473 return err;
1425} 1474}
@@ -1477,10 +1526,10 @@ sn9c102_set_compression(struct sn9c102_device* cam,
1477 case BRIDGE_SN9C101: 1526 case BRIDGE_SN9C101:
1478 case BRIDGE_SN9C102: 1527 case BRIDGE_SN9C102:
1479 case BRIDGE_SN9C103: 1528 case BRIDGE_SN9C103:
1480 if (compression->quality == 0) 1529 if (compression->quality == 0)
1481 err += sn9c102_write_reg(cam, cam->reg[0x17] | 0x01, 1530 err += sn9c102_write_reg(cam, cam->reg[0x17] | 0x01,
1482 0x17); 1531 0x17);
1483 else if (compression->quality == 1) 1532 else if (compression->quality == 1)
1484 err += sn9c102_write_reg(cam, cam->reg[0x17] & 0xfe, 1533 err += sn9c102_write_reg(cam, cam->reg[0x17] & 0xfe,
1485 0x17); 1534 0x17);
1486 break; 1535 break;
@@ -1489,10 +1538,10 @@ sn9c102_set_compression(struct sn9c102_device* cam,
1489 if (compression->quality == 0) { 1538 if (compression->quality == 0) {
1490 for (i = 0; i <= 63; i++) { 1539 for (i = 0; i <= 63; i++) {
1491 err += sn9c102_write_reg(cam, 1540 err += sn9c102_write_reg(cam,
1492 SN9C102_Y_QTABLE0[i], 1541 SN9C102_Y_QTABLE1[i],
1493 0x100 + i); 1542 0x100 + i);
1494 err += sn9c102_write_reg(cam, 1543 err += sn9c102_write_reg(cam,
1495 SN9C102_UV_QTABLE0[i], 1544 SN9C102_UV_QTABLE1[i],
1496 0x140 + i); 1545 0x140 + i);
1497 } 1546 }
1498 err += sn9c102_write_reg(cam, cam->reg[0x18] & 0xbf, 1547 err += sn9c102_write_reg(cam, cam->reg[0x18] & 0xbf,
@@ -1597,9 +1646,13 @@ static int sn9c102_init(struct sn9c102_device* cam)
1597 if (cam->bridge == BRIDGE_SN9C101 || 1646 if (cam->bridge == BRIDGE_SN9C101 ||
1598 cam->bridge == BRIDGE_SN9C102 || 1647 cam->bridge == BRIDGE_SN9C102 ||
1599 cam->bridge == BRIDGE_SN9C103) { 1648 cam->bridge == BRIDGE_SN9C103) {
1649 if (s->pix_format.pixelformat == V4L2_PIX_FMT_JPEG)
1650 s->pix_format.pixelformat= V4L2_PIX_FMT_SBGGR8;
1600 cam->compression.quality = cam->reg[0x17] & 0x01 ? 1651 cam->compression.quality = cam->reg[0x17] & 0x01 ?
1601 0 : 1; 1652 0 : 1;
1602 } else { 1653 } else {
1654 if (s->pix_format.pixelformat == V4L2_PIX_FMT_SN9C10X)
1655 s->pix_format.pixelformat = V4L2_PIX_FMT_JPEG;
1603 cam->compression.quality = cam->reg[0x18] & 0x40 ? 1656 cam->compression.quality = cam->reg[0x18] & 0x40 ?
1604 0 : 1; 1657 0 : 1;
1605 err += sn9c102_set_compression(cam, &cam->compression); 1658 err += sn9c102_set_compression(cam, &cam->compression);
@@ -1805,7 +1858,7 @@ sn9c102_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos)
1805 DBG(3, "Close and open the device again to choose " 1858 DBG(3, "Close and open the device again to choose "
1806 "the read method"); 1859 "the read method");
1807 mutex_unlock(&cam->fileop_mutex); 1860 mutex_unlock(&cam->fileop_mutex);
1808 return -EINVAL; 1861 return -EBUSY;
1809 } 1862 }
1810 1863
1811 if (cam->io == IO_NONE) { 1864 if (cam->io == IO_NONE) {
@@ -1845,16 +1898,16 @@ sn9c102_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos)
1845 return err; 1898 return err;
1846 } 1899 }
1847 } else { 1900 } else {
1848 timeout = wait_event_interruptible_timeout 1901 timeout = wait_event_interruptible_timeout
1849 ( cam->wait_frame, 1902 ( cam->wait_frame,
1850 (!list_empty(&cam->outqueue)) || 1903 (!list_empty(&cam->outqueue)) ||
1851 (cam->state & DEV_DISCONNECTED) || 1904 (cam->state & DEV_DISCONNECTED) ||
1852 (cam->state & DEV_MISCONFIGURED), 1905 (cam->state & DEV_MISCONFIGURED),
1853 cam->module_param.frame_timeout * 1906 cam->module_param.frame_timeout *
1854 1000 * msecs_to_jiffies(1) ); 1907 1000 * msecs_to_jiffies(1) );
1855 if (timeout < 0) { 1908 if (timeout < 0) {
1856 mutex_unlock(&cam->fileop_mutex); 1909 mutex_unlock(&cam->fileop_mutex);
1857 return timeout; 1910 return timeout;
1858 } else if (timeout == 0 && 1911 } else if (timeout == 0 &&
1859 !(cam->state & DEV_DISCONNECTED)) { 1912 !(cam->state & DEV_DISCONNECTED)) {
1860 DBG(1, "Video frame timeout elapsed"); 1913 DBG(1, "Video frame timeout elapsed");
@@ -2001,7 +2054,12 @@ static int sn9c102_mmap(struct file* filp, struct vm_area_struct *vma)
2001 return -EIO; 2054 return -EIO;
2002 } 2055 }
2003 2056
2004 if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) || 2057 if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
2058 mutex_unlock(&cam->fileop_mutex);
2059 return -EACCES;
2060 }
2061
2062 if (cam->io != IO_MMAP ||
2005 size != PAGE_ALIGN(cam->frame[0].buf.length)) { 2063 size != PAGE_ALIGN(cam->frame[0].buf.length)) {
2006 mutex_unlock(&cam->fileop_mutex); 2064 mutex_unlock(&cam->fileop_mutex);
2007 return -EINVAL; 2065 return -EINVAL;
@@ -2267,7 +2325,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2267 if (cam->frame[i].vma_use_count) { 2325 if (cam->frame[i].vma_use_count) {
2268 DBG(3, "VIDIOC_S_CROP failed. " 2326 DBG(3, "VIDIOC_S_CROP failed. "
2269 "Unmap the buffers first."); 2327 "Unmap the buffers first.");
2270 return -EINVAL; 2328 return -EBUSY;
2271 } 2329 }
2272 2330
2273 /* Preserve R,G or B origin */ 2331 /* Preserve R,G or B origin */
@@ -2410,8 +2468,8 @@ sn9c102_vidioc_enum_fmt(struct sn9c102_device* cam, void __user * arg)
2410 case BRIDGE_SN9C101: 2468 case BRIDGE_SN9C101:
2411 case BRIDGE_SN9C102: 2469 case BRIDGE_SN9C102:
2412 case BRIDGE_SN9C103: 2470 case BRIDGE_SN9C103:
2413 strcpy(fmtd.description, "compressed"); 2471 strcpy(fmtd.description, "compressed");
2414 fmtd.pixelformat = V4L2_PIX_FMT_SN9C10X; 2472 fmtd.pixelformat = V4L2_PIX_FMT_SN9C10X;
2415 break; 2473 break;
2416 case BRIDGE_SN9C105: 2474 case BRIDGE_SN9C105:
2417 case BRIDGE_SN9C120: 2475 case BRIDGE_SN9C120:
@@ -2445,8 +2503,10 @@ sn9c102_vidioc_g_fmt(struct sn9c102_device* cam, void __user * arg)
2445 if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 2503 if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
2446 return -EINVAL; 2504 return -EINVAL;
2447 2505
2448 pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_SN9C10X || 2506 pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_JPEG) ?
2449 pfmt->pixelformat==V4L2_PIX_FMT_JPEG) 2507 V4L2_COLORSPACE_JPEG : V4L2_COLORSPACE_SRGB;
2508 pfmt->bytesperline = (pfmt->pixelformat == V4L2_PIX_FMT_SN9C10X ||
2509 pfmt->pixelformat == V4L2_PIX_FMT_JPEG)
2450 ? 0 : (pfmt->width * pfmt->priv) / 8; 2510 ? 0 : (pfmt->width * pfmt->priv) / 8;
2451 pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8); 2511 pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
2452 pfmt->field = V4L2_FIELD_NONE; 2512 pfmt->field = V4L2_FIELD_NONE;
@@ -2521,9 +2581,9 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2521 case BRIDGE_SN9C101: 2581 case BRIDGE_SN9C101:
2522 case BRIDGE_SN9C102: 2582 case BRIDGE_SN9C102:
2523 case BRIDGE_SN9C103: 2583 case BRIDGE_SN9C103:
2524 if (pix->pixelformat != V4L2_PIX_FMT_SN9C10X && 2584 if (pix->pixelformat != V4L2_PIX_FMT_SN9C10X &&
2525 pix->pixelformat != V4L2_PIX_FMT_SBGGR8) 2585 pix->pixelformat != V4L2_PIX_FMT_SBGGR8)
2526 pix->pixelformat = pfmt->pixelformat; 2586 pix->pixelformat = pfmt->pixelformat;
2527 break; 2587 break;
2528 case BRIDGE_SN9C105: 2588 case BRIDGE_SN9C105:
2529 case BRIDGE_SN9C120: 2589 case BRIDGE_SN9C120:
@@ -2533,7 +2593,8 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2533 break; 2593 break;
2534 } 2594 }
2535 pix->priv = pfmt->priv; /* bpp */ 2595 pix->priv = pfmt->priv; /* bpp */
2536 pix->colorspace = pfmt->colorspace; 2596 pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_JPEG) ?
2597 V4L2_COLORSPACE_JPEG : V4L2_COLORSPACE_SRGB;
2537 pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_SN9C10X || 2598 pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_SN9C10X ||
2538 pix->pixelformat == V4L2_PIX_FMT_JPEG) 2599 pix->pixelformat == V4L2_PIX_FMT_JPEG)
2539 ? 0 : (pix->width * pix->priv) / 8; 2600 ? 0 : (pix->width * pix->priv) / 8;
@@ -2551,7 +2612,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2551 if (cam->frame[i].vma_use_count) { 2612 if (cam->frame[i].vma_use_count) {
2552 DBG(3, "VIDIOC_S_FMT failed. Unmap the " 2613 DBG(3, "VIDIOC_S_FMT failed. Unmap the "
2553 "buffers first."); 2614 "buffers first.");
2554 return -EINVAL; 2615 return -EBUSY;
2555 } 2616 }
2556 2617
2557 if (cam->stream == STREAM_ON) 2618 if (cam->stream == STREAM_ON)
@@ -2666,14 +2727,14 @@ sn9c102_vidioc_reqbufs(struct sn9c102_device* cam, void __user * arg)
2666 if (cam->io == IO_READ) { 2727 if (cam->io == IO_READ) {
2667 DBG(3, "Close and open the device again to choose the mmap " 2728 DBG(3, "Close and open the device again to choose the mmap "
2668 "I/O method"); 2729 "I/O method");
2669 return -EINVAL; 2730 return -EBUSY;
2670 } 2731 }
2671 2732
2672 for (i = 0; i < cam->nbuffers; i++) 2733 for (i = 0; i < cam->nbuffers; i++)
2673 if (cam->frame[i].vma_use_count) { 2734 if (cam->frame[i].vma_use_count) {
2674 DBG(3, "VIDIOC_REQBUFS failed. Previous buffers are " 2735 DBG(3, "VIDIOC_REQBUFS failed. Previous buffers are "
2675 "still mapped."); 2736 "still mapped.");
2676 return -EINVAL; 2737 return -EBUSY;
2677 } 2738 }
2678 2739
2679 if (cam->stream == STREAM_ON) 2740 if (cam->stream == STREAM_ON)
@@ -2785,15 +2846,15 @@ sn9c102_vidioc_dqbuf(struct sn9c102_device* cam, struct file* filp,
2785 if (err) 2846 if (err)
2786 return err; 2847 return err;
2787 } else { 2848 } else {
2788 timeout = wait_event_interruptible_timeout 2849 timeout = wait_event_interruptible_timeout
2789 ( cam->wait_frame, 2850 ( cam->wait_frame,
2790 (!list_empty(&cam->outqueue)) || 2851 (!list_empty(&cam->outqueue)) ||
2791 (cam->state & DEV_DISCONNECTED) || 2852 (cam->state & DEV_DISCONNECTED) ||
2792 (cam->state & DEV_MISCONFIGURED), 2853 (cam->state & DEV_MISCONFIGURED),
2793 cam->module_param.frame_timeout * 2854 cam->module_param.frame_timeout *
2794 1000 * msecs_to_jiffies(1) ); 2855 1000 * msecs_to_jiffies(1) );
2795 if (timeout < 0) 2856 if (timeout < 0)
2796 return timeout; 2857 return timeout;
2797 else if (timeout == 0 && 2858 else if (timeout == 0 &&
2798 !(cam->state & DEV_DISCONNECTED)) { 2859 !(cam->state & DEV_DISCONNECTED)) {
2799 DBG(1, "Video frame timeout elapsed"); 2860 DBG(1, "Video frame timeout elapsed");
@@ -2837,9 +2898,6 @@ sn9c102_vidioc_streamon(struct sn9c102_device* cam, void __user * arg)
2837 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) 2898 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
2838 return -EINVAL; 2899 return -EINVAL;
2839 2900
2840 if (list_empty(&cam->inqueue))
2841 return -EINVAL;
2842
2843 cam->stream = STREAM_ON; 2901 cam->stream = STREAM_ON;
2844 2902
2845 DBG(3, "Stream on"); 2903 DBG(3, "Stream on");
@@ -3166,8 +3224,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3166 3224
3167 r = sn9c102_read_reg(cam, 0x00); 3225 r = sn9c102_read_reg(cam, 0x00);
3168 if (r < 0 || (r != 0x10 && r != 0x11 && r != 0x12)) { 3226 if (r < 0 || (r != 0x10 && r != 0x11 && r != 0x12)) {
3169 DBG(1, "Sorry, this is not a SN9C1xx based camera " 3227 DBG(1, "Sorry, this is not a SN9C1xx-based camera "
3170 "(vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 3228 "(vid:pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
3171 err = -ENODEV; 3229 err = -ENODEV;
3172 goto fail; 3230 goto fail;
3173 } 3231 }
@@ -3177,19 +3235,19 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3177 case BRIDGE_SN9C101: 3235 case BRIDGE_SN9C101:
3178 case BRIDGE_SN9C102: 3236 case BRIDGE_SN9C102:
3179 DBG(2, "SN9C10[12] PC Camera Controller detected " 3237 DBG(2, "SN9C10[12] PC Camera Controller detected "
3180 "(vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 3238 "(vid:pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
3181 break; 3239 break;
3182 case BRIDGE_SN9C103: 3240 case BRIDGE_SN9C103:
3183 DBG(2, "SN9C103 PC Camera Controller detected " 3241 DBG(2, "SN9C103 PC Camera Controller detected "
3184 "(vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 3242 "(vid:pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
3185 break; 3243 break;
3186 case BRIDGE_SN9C105: 3244 case BRIDGE_SN9C105:
3187 DBG(2, "SN9C105 PC Camera Controller detected " 3245 DBG(2, "SN9C105 PC Camera Controller detected "
3188 "(vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 3246 "(vid:pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
3189 break; 3247 break;
3190 case BRIDGE_SN9C120: 3248 case BRIDGE_SN9C120:
3191 DBG(2, "SN9C120 PC Camera Controller detected " 3249 DBG(2, "SN9C120 PC Camera Controller detected "
3192 "(vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 3250 "(vid:pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
3193 break; 3251 break;
3194 } 3252 }
3195 3253
@@ -3260,6 +3318,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3260 "device controlling. Error #%d", err); 3318 "device controlling. Error #%d", err);
3261#else 3319#else
3262 DBG(2, "Optional device control through 'sysfs' interface disabled"); 3320 DBG(2, "Optional device control through 'sysfs' interface disabled");
3321 DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' "
3322 "configuration option to enable it.");
3263#endif 3323#endif
3264 3324
3265 usb_set_intfdata(intf, cam); 3325 usb_set_intfdata(intf, cam);
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 3a682eca6c65..f49bd8c5b86e 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -89,16 +89,22 @@ static const struct usb_device_id sn9c102_id_table[] = {
89 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), }, 89 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
90 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), }, 90 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
91 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), }, 91 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
92 { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
92 { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, 93 { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
93 { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, 94 { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), },
94 { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, 95 { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), },
95 { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, 96 { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), },
97 { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), },
96 { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, 98 { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), },
97 { SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), }, 99 { SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
98 { SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), }, 100 { SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
99 { SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), }, 101 { SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
100 /* SN9C120 */ 102 /* SN9C120 */
103 { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
104 { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
105 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
101 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), }, 106 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
107 { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), },
102 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), }, 108 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
103 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), }, 109 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
104 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), }, 110 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
@@ -114,12 +120,15 @@ static const struct usb_device_id sn9c102_id_table[] = {
114 Functions must return 0 on success, the appropriate error otherwise. 120 Functions must return 0 on success, the appropriate error otherwise.
115*/ 121*/
116extern int sn9c102_probe_hv7131d(struct sn9c102_device* cam); 122extern int sn9c102_probe_hv7131d(struct sn9c102_device* cam);
123extern int sn9c102_probe_hv7131r(struct sn9c102_device* cam);
117extern int sn9c102_probe_mi0343(struct sn9c102_device* cam); 124extern int sn9c102_probe_mi0343(struct sn9c102_device* cam);
125extern int sn9c102_probe_mi0360(struct sn9c102_device* cam);
118extern int sn9c102_probe_ov7630(struct sn9c102_device* cam); 126extern int sn9c102_probe_ov7630(struct sn9c102_device* cam);
119extern int sn9c102_probe_ov7660(struct sn9c102_device* cam); 127extern int sn9c102_probe_ov7660(struct sn9c102_device* cam);
120extern int sn9c102_probe_pas106b(struct sn9c102_device* cam); 128extern int sn9c102_probe_pas106b(struct sn9c102_device* cam);
121extern int sn9c102_probe_pas202bcb(struct sn9c102_device* cam); 129extern int sn9c102_probe_pas202bcb(struct sn9c102_device* cam);
122extern int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam); 130extern int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam);
131extern int sn9c102_probe_tas5110d(struct sn9c102_device* cam);
123extern int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam); 132extern int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam);
124 133
125/* 134/*
@@ -128,13 +137,16 @@ extern int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam);
128 the order of the list below, from top to bottom. 137 the order of the list below, from top to bottom.
129*/ 138*/
130static int (*sn9c102_sensor_table[])(struct sn9c102_device*) = { 139static int (*sn9c102_sensor_table[])(struct sn9c102_device*) = {
140 &sn9c102_probe_hv7131d, /* strong detection based on SENSOR ids */
141 &sn9c102_probe_hv7131r, /* strong detection based on SENSOR ids */
131 &sn9c102_probe_mi0343, /* strong detection based on SENSOR ids */ 142 &sn9c102_probe_mi0343, /* strong detection based on SENSOR ids */
143 &sn9c102_probe_mi0360, /* strong detection based on SENSOR ids */
132 &sn9c102_probe_pas106b, /* strong detection based on SENSOR ids */ 144 &sn9c102_probe_pas106b, /* strong detection based on SENSOR ids */
133 &sn9c102_probe_pas202bcb, /* strong detection based on SENSOR ids */ 145 &sn9c102_probe_pas202bcb, /* strong detection based on SENSOR ids */
134 &sn9c102_probe_hv7131d, /* strong detection based on SENSOR ids */
135 &sn9c102_probe_ov7630, /* strong detection based on SENSOR ids */ 146 &sn9c102_probe_ov7630, /* strong detection based on SENSOR ids */
136 &sn9c102_probe_ov7660, /* strong detection based on SENSOR ids */ 147 &sn9c102_probe_ov7660, /* strong detection based on SENSOR ids */
137 &sn9c102_probe_tas5110c1b, /* detection based on USB pid/vid */ 148 &sn9c102_probe_tas5110c1b, /* detection based on USB pid/vid */
149 &sn9c102_probe_tas5110d, /* detection based on USB pid/vid */
138 &sn9c102_probe_tas5130d1b, /* detection based on USB pid/vid */ 150 &sn9c102_probe_tas5130d1b, /* detection based on USB pid/vid */
139 NULL, 151 NULL,
140}; 152};
diff --git a/drivers/media/video/sn9c102/sn9c102_hv7131d.c b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
index 7ae368f60d89..28a861aed044 100644
--- a/drivers/media/video/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
@@ -22,19 +22,13 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor hv7131d;
26
27
28static int hv7131d_init(struct sn9c102_device* cam) 25static int hv7131d_init(struct sn9c102_device* cam)
29{ 26{
30 int err = 0; 27 int err;
31 28
32 err += sn9c102_write_reg(cam, 0x00, 0x10); 29 err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11},
33 err += sn9c102_write_reg(cam, 0x00, 0x11); 30 {0x00, 0x14}, {0x60, 0x17},
34 err += sn9c102_write_reg(cam, 0x00, 0x14); 31 {0x0e, 0x18}, {0xf2, 0x19});
35 err += sn9c102_write_reg(cam, 0x60, 0x17);
36 err += sn9c102_write_reg(cam, 0x0e, 0x18);
37 err += sn9c102_write_reg(cam, 0xf2, 0x19);
38 32
39 err += sn9c102_i2c_write(cam, 0x01, 0x04); 33 err += sn9c102_i2c_write(cam, 0x01, 0x04);
40 err += sn9c102_i2c_write(cam, 0x02, 0x00); 34 err += sn9c102_i2c_write(cam, 0x02, 0x00);
@@ -153,7 +147,7 @@ static int hv7131d_set_pix_format(struct sn9c102_device* cam,
153static struct sn9c102_sensor hv7131d = { 147static struct sn9c102_sensor hv7131d = {
154 .name = "HV7131D", 148 .name = "HV7131D",
155 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", 149 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
156 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103, 150 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
157 .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, 151 .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
158 .frequency = SN9C102_I2C_100KHZ, 152 .frequency = SN9C102_I2C_100KHZ,
159 .interface = SN9C102_I2C_2WIRES, 153 .interface = SN9C102_I2C_2WIRES,
@@ -250,11 +244,10 @@ static struct sn9c102_sensor hv7131d = {
250 244
251int sn9c102_probe_hv7131d(struct sn9c102_device* cam) 245int sn9c102_probe_hv7131d(struct sn9c102_device* cam)
252{ 246{
253 int r0 = 0, r1 = 0, err = 0; 247 int r0 = 0, r1 = 0, err;
254 248
255 err += sn9c102_write_reg(cam, 0x01, 0x01); 249 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01},
256 err += sn9c102_write_reg(cam, 0x00, 0x01); 250 {0x28, 0x17});
257 err += sn9c102_write_reg(cam, 0x28, 0x17);
258 if (err) 251 if (err)
259 return -EIO; 252 return -EIO;
260 253
@@ -263,7 +256,7 @@ int sn9c102_probe_hv7131d(struct sn9c102_device* cam)
263 if (r0 < 0 || r1 < 0) 256 if (r0 < 0 || r1 < 0)
264 return -EIO; 257 return -EIO;
265 258
266 if (r0 != 0x00 && r1 != 0x04) 259 if (r0 != 0x00 || r1 != 0x04)
267 return -ENODEV; 260 return -ENODEV;
268 261
269 sn9c102_attach_sensor(cam, &hv7131d); 262 sn9c102_attach_sensor(cam, &hv7131d);
diff --git a/drivers/media/video/sn9c102/sn9c102_hv7131r.c b/drivers/media/video/sn9c102/sn9c102_hv7131r.c
new file mode 100644
index 000000000000..5a495baa5f95
--- /dev/null
+++ b/drivers/media/video/sn9c102/sn9c102_hv7131r.c
@@ -0,0 +1,366 @@
1/***************************************************************************
2 * Plug-in for HV7131R image sensor connected to the SN9C1xx PC Camera *
3 * Controllers *
4 * *
5 * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
6 * *
7 * This program is free software; you can redistribute it and/or modify *
8 * it under the terms of the GNU General Public License as published by *
9 * the Free Software Foundation; either version 2 of the License, or *
10 * (at your option) any later version. *
11 * *
12 * This program is distributed in the hope that it will be useful, *
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
15 * GNU General Public License for more details. *
16 * *
17 * You should have received a copy of the GNU General Public License *
18 * along with this program; if not, write to the Free Software *
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
20 ***************************************************************************/
21
22#include "sn9c102_sensor.h"
23
24
25static int hv7131r_init(struct sn9c102_device* cam)
26{
27 int err = 0;
28
29 switch (sn9c102_get_bridge(cam)) {
30 case BRIDGE_SN9C103:
31 err = sn9c102_write_const_regs(cam, {0x00, 0x03}, {0x1a, 0x04},
32 {0x20, 0x05}, {0x20, 0x06},
33 {0x03, 0x10}, {0x00, 0x14},
34 {0x60, 0x17}, {0x0a, 0x18},
35 {0xf0, 0x19}, {0x1d, 0x1a},
36 {0x10, 0x1b}, {0x02, 0x1c},
37 {0x03, 0x1d}, {0x0f, 0x1e},
38 {0x0c, 0x1f}, {0x00, 0x20},
39 {0x10, 0x21}, {0x20, 0x22},
40 {0x30, 0x23}, {0x40, 0x24},
41 {0x50, 0x25}, {0x60, 0x26},
42 {0x70, 0x27}, {0x80, 0x28},
43 {0x90, 0x29}, {0xa0, 0x2a},
44 {0xb0, 0x2b}, {0xc0, 0x2c},
45 {0xd0, 0x2d}, {0xe0, 0x2e},
46 {0xf0, 0x2f}, {0xff, 0x30});
47
48 break;
49 case BRIDGE_SN9C105:
50 case BRIDGE_SN9C120:
51 err = sn9c102_write_const_regs(cam, {0x44, 0x01}, {0x40, 0x02},
52 {0x00, 0x03}, {0x1a, 0x04},
53 {0x44, 0x05}, {0x3e, 0x06},
54 {0x1a, 0x07}, {0x03, 0x10},
55 {0x08, 0x14}, {0xa3, 0x17},
56 {0x4b, 0x18}, {0x00, 0x19},
57 {0x1d, 0x1a}, {0x10, 0x1b},
58 {0x02, 0x1c}, {0x03, 0x1d},
59 {0x0f, 0x1e}, {0x0c, 0x1f},
60 {0x00, 0x20}, {0x29, 0x21},
61 {0x40, 0x22}, {0x54, 0x23},
62 {0x66, 0x24}, {0x76, 0x25},
63 {0x85, 0x26}, {0x94, 0x27},
64 {0xa1, 0x28}, {0xae, 0x29},
65 {0xbb, 0x2a}, {0xc7, 0x2b},
66 {0xd3, 0x2c}, {0xde, 0x2d},
67 {0xea, 0x2e}, {0xf4, 0x2f},
68 {0xff, 0x30}, {0x00, 0x3F},
69 {0xC7, 0x40}, {0x01, 0x41},
70 {0x44, 0x42}, {0x00, 0x43},
71 {0x44, 0x44}, {0x00, 0x45},
72 {0x44, 0x46}, {0x00, 0x47},
73 {0xC7, 0x48}, {0x01, 0x49},
74 {0xC7, 0x4A}, {0x01, 0x4B},
75 {0xC7, 0x4C}, {0x01, 0x4D},
76 {0x44, 0x4E}, {0x00, 0x4F},
77 {0x44, 0x50}, {0x00, 0x51},
78 {0x44, 0x52}, {0x00, 0x53},
79 {0xC7, 0x54}, {0x01, 0x55},
80 {0xC7, 0x56}, {0x01, 0x57},
81 {0xC7, 0x58}, {0x01, 0x59},
82 {0x44, 0x5A}, {0x00, 0x5B},
83 {0x44, 0x5C}, {0x00, 0x5D},
84 {0x44, 0x5E}, {0x00, 0x5F},
85 {0xC7, 0x60}, {0x01, 0x61},
86 {0xC7, 0x62}, {0x01, 0x63},
87 {0xC7, 0x64}, {0x01, 0x65},
88 {0x44, 0x66}, {0x00, 0x67},
89 {0x44, 0x68}, {0x00, 0x69},
90 {0x44, 0x6A}, {0x00, 0x6B},
91 {0xC7, 0x6C}, {0x01, 0x6D},
92 {0xC7, 0x6E}, {0x01, 0x6F},
93 {0xC7, 0x70}, {0x01, 0x71},
94 {0x44, 0x72}, {0x00, 0x73},
95 {0x44, 0x74}, {0x00, 0x75},
96 {0x44, 0x76}, {0x00, 0x77},
97 {0xC7, 0x78}, {0x01, 0x79},
98 {0xC7, 0x7A}, {0x01, 0x7B},
99 {0xC7, 0x7C}, {0x01, 0x7D},
100 {0x44, 0x7E}, {0x00, 0x7F},
101 {0x14, 0x84}, {0x00, 0x85},
102 {0x27, 0x86}, {0x00, 0x87},
103 {0x07, 0x88}, {0x00, 0x89},
104 {0xEC, 0x8A}, {0x0f, 0x8B},
105 {0xD8, 0x8C}, {0x0f, 0x8D},
106 {0x3D, 0x8E}, {0x00, 0x8F},
107 {0x3D, 0x90}, {0x00, 0x91},
108 {0xCD, 0x92}, {0x0f, 0x93},
109 {0xf7, 0x94}, {0x0f, 0x95},
110 {0x0C, 0x96}, {0x00, 0x97},
111 {0x00, 0x98}, {0x66, 0x99},
112 {0x05, 0x9A}, {0x00, 0x9B},
113 {0x04, 0x9C}, {0x00, 0x9D},
114 {0x08, 0x9E}, {0x00, 0x9F},
115 {0x2D, 0xC0}, {0x2D, 0xC1},
116 {0x3A, 0xC2}, {0x05, 0xC3},
117 {0x04, 0xC4}, {0x3F, 0xC5},
118 {0x00, 0xC6}, {0x00, 0xC7},
119 {0x50, 0xC8}, {0x3C, 0xC9},
120 {0x28, 0xCA}, {0xD8, 0xCB},
121 {0x14, 0xCC}, {0xEC, 0xCD},
122 {0x32, 0xCE}, {0xDD, 0xCF},
123 {0x32, 0xD0}, {0xDD, 0xD1},
124 {0x6A, 0xD2}, {0x50, 0xD3},
125 {0x00, 0xD4}, {0x00, 0xD5},
126 {0x00, 0xD6});
127 break;
128 default:
129 break;
130 }
131
132 err += sn9c102_i2c_write(cam, 0x20, 0x00);
133 err += sn9c102_i2c_write(cam, 0x21, 0xd6);
134 err += sn9c102_i2c_write(cam, 0x25, 0x06);
135
136 return err;
137}
138
139
140static int hv7131r_get_ctrl(struct sn9c102_device* cam,
141 struct v4l2_control* ctrl)
142{
143 switch (ctrl->id) {
144 case V4L2_CID_GAIN:
145 if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0)
146 return -EIO;
147 return 0;
148 case V4L2_CID_RED_BALANCE:
149 if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0)
150 return -EIO;
151 ctrl->value = ctrl->value & 0x3f;
152 return 0;
153 case V4L2_CID_BLUE_BALANCE:
154 if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0)
155 return -EIO;
156 ctrl->value = ctrl->value & 0x3f;
157 return 0;
158 case SN9C102_V4L2_CID_GREEN_BALANCE:
159 if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0)
160 return -EIO;
161 ctrl->value = ctrl->value & 0x3f;
162 return 0;
163 case V4L2_CID_BLACK_LEVEL:
164 if ((ctrl->value = sn9c102_i2c_read(cam, 0x01)) < 0)
165 return -EIO;
166 ctrl->value = (ctrl->value & 0x08) ? 1 : 0;
167 return 0;
168 default:
169 return -EINVAL;
170 }
171}
172
173
174static int hv7131r_set_ctrl(struct sn9c102_device* cam,
175 const struct v4l2_control* ctrl)
176{
177 int err = 0;
178
179 switch (ctrl->id) {
180 case V4L2_CID_GAIN:
181 err += sn9c102_i2c_write(cam, 0x30, ctrl->value);
182 break;
183 case V4L2_CID_RED_BALANCE:
184 err += sn9c102_i2c_write(cam, 0x31, ctrl->value);
185 break;
186 case V4L2_CID_BLUE_BALANCE:
187 err += sn9c102_i2c_write(cam, 0x33, ctrl->value);
188 break;
189 case SN9C102_V4L2_CID_GREEN_BALANCE:
190 err += sn9c102_i2c_write(cam, 0x32, ctrl->value);
191 break;
192 case V4L2_CID_BLACK_LEVEL:
193 {
194 int r = sn9c102_i2c_read(cam, 0x01);
195 if (r < 0)
196 return -EIO;
197 err += sn9c102_i2c_write(cam, 0x01,
198 (ctrl->value<<3) | (r&0xf7));
199 }
200 break;
201 default:
202 return -EINVAL;
203 }
204
205 return err ? -EIO : 0;
206}
207
208
209static int hv7131r_set_crop(struct sn9c102_device* cam,
210 const struct v4l2_rect* rect)
211{
212 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
213 int err = 0;
214 u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1,
215 v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
216
217 err += sn9c102_write_reg(cam, h_start, 0x12);
218 err += sn9c102_write_reg(cam, v_start, 0x13);
219
220 return err;
221}
222
223
224static int hv7131r_set_pix_format(struct sn9c102_device* cam,
225 const struct v4l2_pix_format* pix)
226{
227 int err = 0;
228
229 switch (sn9c102_get_bridge(cam)) {
230 case BRIDGE_SN9C103:
231 if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) {
232 err += sn9c102_write_reg(cam, 0xa0, 0x19);
233 err += sn9c102_i2c_write(cam, 0x01, 0x04);
234 } else {
235 err += sn9c102_write_reg(cam, 0x30, 0x19);
236 err += sn9c102_i2c_write(cam, 0x01, 0x04);
237 }
238 break;
239 case BRIDGE_SN9C105:
240 case BRIDGE_SN9C120:
241 if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) {
242 err += sn9c102_write_reg(cam, 0xa5, 0x17);
243 err += sn9c102_i2c_write(cam, 0x01, 0x24);
244 } else {
245 err += sn9c102_write_reg(cam, 0xa3, 0x17);
246 err += sn9c102_i2c_write(cam, 0x01, 0x04);
247 }
248 break;
249 default:
250 break;
251 }
252
253 return err;
254}
255
256
257static struct sn9c102_sensor hv7131r = {
258 .name = "HV7131R",
259 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
260 .supported_bridge = BRIDGE_SN9C103 | BRIDGE_SN9C105 | BRIDGE_SN9C120,
261 .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
262 .frequency = SN9C102_I2C_100KHZ,
263 .interface = SN9C102_I2C_2WIRES,
264 .i2c_slave_id = 0x11,
265 .init = &hv7131r_init,
266 .qctrl = {
267 {
268 .id = V4L2_CID_GAIN,
269 .type = V4L2_CTRL_TYPE_INTEGER,
270 .name = "global gain",
271 .minimum = 0x00,
272 .maximum = 0xff,
273 .step = 0x01,
274 .default_value = 0x40,
275 .flags = 0,
276 },
277 {
278 .id = V4L2_CID_RED_BALANCE,
279 .type = V4L2_CTRL_TYPE_INTEGER,
280 .name = "red balance",
281 .minimum = 0x00,
282 .maximum = 0x3f,
283 .step = 0x01,
284 .default_value = 0x08,
285 .flags = 0,
286 },
287 {
288 .id = V4L2_CID_BLUE_BALANCE,
289 .type = V4L2_CTRL_TYPE_INTEGER,
290 .name = "blue balance",
291 .minimum = 0x00,
292 .maximum = 0x3f,
293 .step = 0x01,
294 .default_value = 0x1a,
295 .flags = 0,
296 },
297 {
298 .id = SN9C102_V4L2_CID_GREEN_BALANCE,
299 .type = V4L2_CTRL_TYPE_INTEGER,
300 .name = "green balance",
301 .minimum = 0x00,
302 .maximum = 0x3f,
303 .step = 0x01,
304 .default_value = 0x2f,
305 .flags = 0,
306 },
307 {
308 .id = V4L2_CID_BLACK_LEVEL,
309 .type = V4L2_CTRL_TYPE_BOOLEAN,
310 .name = "auto black level compensation",
311 .minimum = 0x00,
312 .maximum = 0x01,
313 .step = 0x01,
314 .default_value = 0x00,
315 .flags = 0,
316 },
317 },
318 .get_ctrl = &hv7131r_get_ctrl,
319 .set_ctrl = &hv7131r_set_ctrl,
320 .cropcap = {
321 .bounds = {
322 .left = 0,
323 .top = 0,
324 .width = 640,
325 .height = 480,
326 },
327 .defrect = {
328 .left = 0,
329 .top = 0,
330 .width = 640,
331 .height = 480,
332 },
333 },
334 .set_crop = &hv7131r_set_crop,
335 .pix_format = {
336 .width = 640,
337 .height = 480,
338 .pixelformat = V4L2_PIX_FMT_SBGGR8,
339 .priv = 8,
340 },
341 .set_pix_format = &hv7131r_set_pix_format
342};
343
344
345int sn9c102_probe_hv7131r(struct sn9c102_device* cam)
346{
347 int devid, err;
348
349 err = sn9c102_write_const_regs(cam, {0x09, 0x01}, {0x44, 0x02},
350 {0x34, 0x01}, {0x20, 0x17},
351 {0x34, 0x01}, {0x46, 0x01});
352
353 if (err)
354 return -EIO;
355
356 devid = sn9c102_i2c_try_read(cam, &hv7131r, 0x00);
357 if (devid < 0)
358 return -EIO;
359
360 if (devid != 0x02)
361 return -ENODEV;
362
363 sn9c102_attach_sensor(cam, &hv7131r);
364
365 return 0;
366}
diff --git a/drivers/media/video/sn9c102/sn9c102_mi0343.c b/drivers/media/video/sn9c102/sn9c102_mi0343.c
index a33d1bc10f90..9200845d011b 100644
--- a/drivers/media/video/sn9c102/sn9c102_mi0343.c
+++ b/drivers/media/video/sn9c102/sn9c102_mi0343.c
@@ -22,36 +22,30 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor mi0343;
26static u8 mi0343_i2c_data[5+1];
27
28
29static int mi0343_init(struct sn9c102_device* cam) 25static int mi0343_init(struct sn9c102_device* cam)
30{ 26{
27 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
31 int err = 0; 28 int err = 0;
32 29
33 err += sn9c102_write_reg(cam, 0x00, 0x10); 30 err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11},
34 err += sn9c102_write_reg(cam, 0x00, 0x11); 31 {0x0a, 0x14}, {0x40, 0x01},
35 err += sn9c102_write_reg(cam, 0x0a, 0x14); 32 {0x20, 0x17}, {0x07, 0x18},
36 err += sn9c102_write_reg(cam, 0x40, 0x01); 33 {0xa0, 0x19});
37 err += sn9c102_write_reg(cam, 0x20, 0x17); 34
38 err += sn9c102_write_reg(cam, 0x07, 0x18); 35 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d,
39 err += sn9c102_write_reg(cam, 0xa0, 0x19); 36 0x00, 0x01, 0, 0);
40 37 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d,
41 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 38 0x00, 0x00, 0, 0);
42 0x0d, 0x00, 0x01, 0, 0); 39 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x03,
43 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 40 0x01, 0xe1, 0, 0);
44 0x0d, 0x00, 0x00, 0, 0); 41 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x04,
45 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 42 0x02, 0x81, 0, 0);
46 0x03, 0x01, 0xe1, 0, 0); 43 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x05,
47 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 44 0x00, 0x17, 0, 0);
48 0x04, 0x02, 0x81, 0, 0); 45 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x06,
49 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 46 0x00, 0x11, 0, 0);
50 0x05, 0x00, 0x17, 0, 0); 47 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x62,
51 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id, 48 0x04, 0x9a, 0, 0);
52 0x06, 0x00, 0x11, 0, 0);
53 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, mi0343.i2c_slave_id,
54 0x62, 0x04, 0x9a, 0, 0);
55 49
56 return err; 50 return err;
57} 51}
@@ -60,43 +54,46 @@ static int mi0343_init(struct sn9c102_device* cam)
60static int mi0343_get_ctrl(struct sn9c102_device* cam, 54static int mi0343_get_ctrl(struct sn9c102_device* cam,
61 struct v4l2_control* ctrl) 55 struct v4l2_control* ctrl)
62{ 56{
57 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
58 u8 data[5+1];
59
63 switch (ctrl->id) { 60 switch (ctrl->id) {
64 case V4L2_CID_EXPOSURE: 61 case V4L2_CID_EXPOSURE:
65 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 62 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x09,
66 0x09, 2+1, mi0343_i2c_data) < 0) 63 2+1, data) < 0)
67 return -EIO; 64 return -EIO;
68 ctrl->value = mi0343_i2c_data[2]; 65 ctrl->value = data[2];
69 return 0; 66 return 0;
70 case V4L2_CID_GAIN: 67 case V4L2_CID_GAIN:
71 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 68 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x35,
72 0x35, 2+1, mi0343_i2c_data) < 0) 69 2+1, data) < 0)
73 return -EIO; 70 return -EIO;
74 break; 71 break;
75 case V4L2_CID_HFLIP: 72 case V4L2_CID_HFLIP:
76 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 73 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20,
77 0x20, 2+1, mi0343_i2c_data) < 0) 74 2+1, data) < 0)
78 return -EIO; 75 return -EIO;
79 ctrl->value = mi0343_i2c_data[3] & 0x20 ? 1 : 0; 76 ctrl->value = data[3] & 0x20 ? 1 : 0;
80 return 0; 77 return 0;
81 case V4L2_CID_VFLIP: 78 case V4L2_CID_VFLIP:
82 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 79 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20,
83 0x20, 2+1, mi0343_i2c_data) < 0) 80 2+1, data) < 0)
84 return -EIO; 81 return -EIO;
85 ctrl->value = mi0343_i2c_data[3] & 0x80 ? 1 : 0; 82 ctrl->value = data[3] & 0x80 ? 1 : 0;
86 return 0; 83 return 0;
87 case V4L2_CID_RED_BALANCE: 84 case V4L2_CID_RED_BALANCE:
88 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 85 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2d,
89 0x2d, 2+1, mi0343_i2c_data) < 0) 86 2+1, data) < 0)
90 return -EIO; 87 return -EIO;
91 break; 88 break;
92 case V4L2_CID_BLUE_BALANCE: 89 case V4L2_CID_BLUE_BALANCE:
93 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 90 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2c,
94 0x2c, 2+1, mi0343_i2c_data) < 0) 91 2+1, data) < 0)
95 return -EIO; 92 return -EIO;
96 break; 93 break;
97 case SN9C102_V4L2_CID_GREEN_BALANCE: 94 case SN9C102_V4L2_CID_GREEN_BALANCE:
98 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 95 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2e,
99 0x2e, 2+1, mi0343_i2c_data) < 0) 96 2+1, data) < 0)
100 return -EIO; 97 return -EIO;
101 break; 98 break;
102 default: 99 default:
@@ -108,7 +105,7 @@ static int mi0343_get_ctrl(struct sn9c102_device* cam,
108 case V4L2_CID_RED_BALANCE: 105 case V4L2_CID_RED_BALANCE:
109 case V4L2_CID_BLUE_BALANCE: 106 case V4L2_CID_BLUE_BALANCE:
110 case SN9C102_V4L2_CID_GREEN_BALANCE: 107 case SN9C102_V4L2_CID_GREEN_BALANCE:
111 ctrl->value = mi0343_i2c_data[3] | (mi0343_i2c_data[2] << 8); 108 ctrl->value = data[3] | (data[2] << 8);
112 if (ctrl->value >= 0x10 && ctrl->value <= 0x3f) 109 if (ctrl->value >= 0x10 && ctrl->value <= 0x3f)
113 ctrl->value -= 0x10; 110 ctrl->value -= 0x10;
114 else if (ctrl->value >= 0x60 && ctrl->value <= 0x7f) 111 else if (ctrl->value >= 0x60 && ctrl->value <= 0x7f)
@@ -124,6 +121,7 @@ static int mi0343_get_ctrl(struct sn9c102_device* cam,
124static int mi0343_set_ctrl(struct sn9c102_device* cam, 121static int mi0343_set_ctrl(struct sn9c102_device* cam,
125 const struct v4l2_control* ctrl) 122 const struct v4l2_control* ctrl)
126{ 123{
124 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
127 u16 reg = 0; 125 u16 reg = 0;
128 int err = 0; 126 int err = 0;
129 127
@@ -143,50 +141,42 @@ static int mi0343_set_ctrl(struct sn9c102_device* cam,
143 141
144 switch (ctrl->id) { 142 switch (ctrl->id) {
145 case V4L2_CID_EXPOSURE: 143 case V4L2_CID_EXPOSURE:
146 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 144 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
147 mi0343.i2c_slave_id,
148 0x09, ctrl->value, 0x00, 145 0x09, ctrl->value, 0x00,
149 0, 0); 146 0, 0);
150 break; 147 break;
151 case V4L2_CID_GAIN: 148 case V4L2_CID_GAIN:
152 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 149 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
153 mi0343.i2c_slave_id,
154 0x35, reg >> 8, reg & 0xff, 150 0x35, reg >> 8, reg & 0xff,
155 0, 0); 151 0, 0);
156 break; 152 break;
157 case V4L2_CID_HFLIP: 153 case V4L2_CID_HFLIP:
158 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 154 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
159 mi0343.i2c_slave_id,
160 0x20, ctrl->value ? 0x40:0x00, 155 0x20, ctrl->value ? 0x40:0x00,
161 ctrl->value ? 0x20:0x00, 156 ctrl->value ? 0x20:0x00,
162 0, 0); 157 0, 0);
163 break; 158 break;
164 case V4L2_CID_VFLIP: 159 case V4L2_CID_VFLIP:
165 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 160 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
166 mi0343.i2c_slave_id,
167 0x20, ctrl->value ? 0x80:0x00, 161 0x20, ctrl->value ? 0x80:0x00,
168 ctrl->value ? 0x80:0x00, 162 ctrl->value ? 0x80:0x00,
169 0, 0); 163 0, 0);
170 break; 164 break;
171 case V4L2_CID_RED_BALANCE: 165 case V4L2_CID_RED_BALANCE:
172 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 166 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
173 mi0343.i2c_slave_id,
174 0x2d, reg >> 8, reg & 0xff, 167 0x2d, reg >> 8, reg & 0xff,
175 0, 0); 168 0, 0);
176 break; 169 break;
177 case V4L2_CID_BLUE_BALANCE: 170 case V4L2_CID_BLUE_BALANCE:
178 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 171 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
179 mi0343.i2c_slave_id,
180 0x2c, reg >> 8, reg & 0xff, 172 0x2c, reg >> 8, reg & 0xff,
181 0, 0); 173 0, 0);
182 break; 174 break;
183 case SN9C102_V4L2_CID_GREEN_BALANCE: 175 case SN9C102_V4L2_CID_GREEN_BALANCE:
184 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 176 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
185 mi0343.i2c_slave_id,
186 0x2b, reg >> 8, reg & 0xff, 177 0x2b, reg >> 8, reg & 0xff,
187 0, 0); 178 0, 0);
188 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 179 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
189 mi0343.i2c_slave_id,
190 0x2e, reg >> 8, reg & 0xff, 180 0x2e, reg >> 8, reg & 0xff,
191 0, 0); 181 0, 0);
192 break; 182 break;
@@ -216,16 +206,15 @@ static int mi0343_set_crop(struct sn9c102_device* cam,
216static int mi0343_set_pix_format(struct sn9c102_device* cam, 206static int mi0343_set_pix_format(struct sn9c102_device* cam,
217 const struct v4l2_pix_format* pix) 207 const struct v4l2_pix_format* pix)
218{ 208{
209 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
219 int err = 0; 210 int err = 0;
220 211
221 if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) { 212 if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) {
222 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 213 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
223 mi0343.i2c_slave_id,
224 0x0a, 0x00, 0x03, 0, 0); 214 0x0a, 0x00, 0x03, 0, 0);
225 err += sn9c102_write_reg(cam, 0x20, 0x19); 215 err += sn9c102_write_reg(cam, 0x20, 0x19);
226 } else { 216 } else {
227 err += sn9c102_i2c_try_raw_write(cam, &mi0343, 4, 217 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
228 mi0343.i2c_slave_id,
229 0x0a, 0x00, 0x05, 0, 0); 218 0x0a, 0x00, 0x05, 0, 0);
230 err += sn9c102_write_reg(cam, 0xa0, 0x19); 219 err += sn9c102_write_reg(cam, 0xa0, 0x19);
231 } 220 }
@@ -237,7 +226,7 @@ static int mi0343_set_pix_format(struct sn9c102_device* cam,
237static struct sn9c102_sensor mi0343 = { 226static struct sn9c102_sensor mi0343 = {
238 .name = "MI-0343", 227 .name = "MI-0343",
239 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", 228 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
240 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103, 229 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
241 .frequency = SN9C102_I2C_100KHZ, 230 .frequency = SN9C102_I2C_100KHZ,
242 .interface = SN9C102_I2C_2WIRES, 231 .interface = SN9C102_I2C_2WIRES,
243 .i2c_slave_id = 0x5d, 232 .i2c_slave_id = 0x5d,
@@ -343,19 +332,20 @@ static struct sn9c102_sensor mi0343 = {
343 332
344int sn9c102_probe_mi0343(struct sn9c102_device* cam) 333int sn9c102_probe_mi0343(struct sn9c102_device* cam)
345{ 334{
335 u8 data[5+1];
346 int err = 0; 336 int err = 0;
347 337
348 err += sn9c102_write_reg(cam, 0x01, 0x01); 338 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01},
349 err += sn9c102_write_reg(cam, 0x00, 0x01); 339 {0x28, 0x17});
350 err += sn9c102_write_reg(cam, 0x28, 0x17); 340
351 if (err) 341 if (err)
352 return -EIO; 342 return -EIO;
353 343
354 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 0x00, 344 if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 0x00,
355 2, mi0343_i2c_data) < 0) 345 2, data) < 0)
356 return -EIO; 346 return -EIO;
357 347
358 if (mi0343_i2c_data[4] != 0x32 && mi0343_i2c_data[3] != 0xe3) 348 if (data[4] != 0x32 || data[3] != 0xe3)
359 return -ENODEV; 349 return -ENODEV;
360 350
361 sn9c102_attach_sensor(cam, &mi0343); 351 sn9c102_attach_sensor(cam, &mi0343);
diff --git a/drivers/media/video/sn9c102/sn9c102_mi0360.c b/drivers/media/video/sn9c102/sn9c102_mi0360.c
new file mode 100644
index 000000000000..64698acb0b15
--- /dev/null
+++ b/drivers/media/video/sn9c102/sn9c102_mi0360.c
@@ -0,0 +1,338 @@
1/***************************************************************************
2 * Plug-in for MI-0360 image sensor connected to the SN9C1xx PC Camera *
3 * Controllers *
4 * *
5 * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
6 * *
7 * This program is free software; you can redistribute it and/or modify *
8 * it under the terms of the GNU General Public License as published by *
9 * the Free Software Foundation; either version 2 of the License, or *
10 * (at your option) any later version. *
11 * *
12 * This program is distributed in the hope that it will be useful, *
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
15 * GNU General Public License for more details. *
16 * *
17 * You should have received a copy of the GNU General Public License *
18 * along with this program; if not, write to the Free Software *
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
20 ***************************************************************************/
21
22#include "sn9c102_sensor.h"
23
24
25static int mi0360_init(struct sn9c102_device* cam)
26{
27 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
28 int err = 0;
29
30 err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11},
31 {0x0a, 0x14}, {0x40, 0x01},
32 {0x20, 0x17}, {0x07, 0x18},
33 {0xa0, 0x19}, {0x02, 0x1c},
34 {0x03, 0x1d}, {0x0f, 0x1e},
35 {0x0c, 0x1f}, {0x00, 0x20},
36 {0x10, 0x21}, {0x20, 0x22},
37 {0x30, 0x23}, {0x40, 0x24},
38 {0x50, 0x25}, {0x60, 0x26},
39 {0x70, 0x27}, {0x80, 0x28},
40 {0x90, 0x29}, {0xa0, 0x2a},
41 {0xb0, 0x2b}, {0xc0, 0x2c},
42 {0xd0, 0x2d}, {0xe0, 0x2e},
43 {0xf0, 0x2f}, {0xff, 0x30});
44
45 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d,
46 0x00, 0x01, 0, 0);
47 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d,
48 0x00, 0x00, 0, 0);
49 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x03,
50 0x01, 0xe1, 0, 0);
51 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x04,
52 0x02, 0x81, 0, 0);
53 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x05,
54 0x00, 0x17, 0, 0);
55 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x06,
56 0x00, 0x11, 0, 0);
57 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x62,
58 0x04, 0x9a, 0, 0);
59
60 return err;
61}
62
63
64static int mi0360_get_ctrl(struct sn9c102_device* cam,
65 struct v4l2_control* ctrl)
66{
67 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
68 u8 data[5+1];
69
70 switch (ctrl->id) {
71 case V4L2_CID_EXPOSURE:
72 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x09,
73 2+1, data) < 0)
74 return -EIO;
75 ctrl->value = data[2];
76 return 0;
77 case V4L2_CID_GAIN:
78 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x35,
79 2+1, data) < 0)
80 return -EIO;
81 ctrl->value = data[3];
82 return 0;
83 case V4L2_CID_RED_BALANCE:
84 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2c,
85 2+1, data) < 0)
86 return -EIO;
87 ctrl->value = data[3];
88 return 0;
89 case V4L2_CID_BLUE_BALANCE:
90 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2d,
91 2+1, data) < 0)
92 return -EIO;
93 ctrl->value = data[3];
94 return 0;
95 case SN9C102_V4L2_CID_GREEN_BALANCE:
96 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2e,
97 2+1, data) < 0)
98 return -EIO;
99 ctrl->value = data[3];
100 return 0;
101 case V4L2_CID_HFLIP:
102 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20,
103 2+1, data) < 0)
104 return -EIO;
105 ctrl->value = data[3] & 0x20 ? 1 : 0;
106 return 0;
107 case V4L2_CID_VFLIP:
108 if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20,
109 2+1, data) < 0)
110 return -EIO;
111 ctrl->value = data[3] & 0x80 ? 1 : 0;
112 return 0;
113 default:
114 return -EINVAL;
115 }
116
117 return 0;
118}
119
120
121static int mi0360_set_ctrl(struct sn9c102_device* cam,
122 const struct v4l2_control* ctrl)
123{
124 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
125 int err = 0;
126
127 switch (ctrl->id) {
128 case V4L2_CID_EXPOSURE:
129 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
130 0x09, ctrl->value, 0x00,
131 0, 0);
132 break;
133 case V4L2_CID_GAIN:
134 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
135 0x35, 0x03, ctrl->value,
136 0, 0);
137 break;
138 case V4L2_CID_RED_BALANCE:
139 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
140 0x2c, 0x03, ctrl->value,
141 0, 0);
142 break;
143 case V4L2_CID_BLUE_BALANCE:
144 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
145 0x2d, 0x03, ctrl->value,
146 0, 0);
147 break;
148 case SN9C102_V4L2_CID_GREEN_BALANCE:
149 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
150 0x2b, 0x03, ctrl->value,
151 0, 0);
152 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
153 0x2e, 0x03, ctrl->value,
154 0, 0);
155 break;
156 case V4L2_CID_HFLIP:
157 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
158 0x20, ctrl->value ? 0x40:0x00,
159 ctrl->value ? 0x20:0x00,
160 0, 0);
161 break;
162 case V4L2_CID_VFLIP:
163 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
164 0x20, ctrl->value ? 0x80:0x00,
165 ctrl->value ? 0x80:0x00,
166 0, 0);
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 return err ? -EIO : 0;
173}
174
175
176static int mi0360_set_crop(struct sn9c102_device* cam,
177 const struct v4l2_rect* rect)
178{
179 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
180 int err = 0;
181 u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 0,
182 v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
183
184 err += sn9c102_write_reg(cam, h_start, 0x12);
185 err += sn9c102_write_reg(cam, v_start, 0x13);
186
187 return err;
188}
189
190
191static int mi0360_set_pix_format(struct sn9c102_device* cam,
192 const struct v4l2_pix_format* pix)
193{
194 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
195 int err = 0;
196
197 if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) {
198 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
199 0x0a, 0x00, 0x02, 0, 0);
200 err += sn9c102_write_reg(cam, 0x20, 0x19);
201 } else {
202 err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id,
203 0x0a, 0x00, 0x05, 0, 0);
204 err += sn9c102_write_reg(cam, 0x60, 0x19);
205 }
206
207 return err;
208}
209
210
211static struct sn9c102_sensor mi0360 = {
212 .name = "MI-0360",
213 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
214 .supported_bridge = BRIDGE_SN9C103,
215 .frequency = SN9C102_I2C_100KHZ,
216 .interface = SN9C102_I2C_2WIRES,
217 .i2c_slave_id = 0x5d,
218 .init = &mi0360_init,
219 .qctrl = {
220 {
221 .id = V4L2_CID_EXPOSURE,
222 .type = V4L2_CTRL_TYPE_INTEGER,
223 .name = "exposure",
224 .minimum = 0x00,
225 .maximum = 0x0f,
226 .step = 0x01,
227 .default_value = 0x05,
228 .flags = 0,
229 },
230 {
231 .id = V4L2_CID_GAIN,
232 .type = V4L2_CTRL_TYPE_INTEGER,
233 .name = "global gain",
234 .minimum = 0x00,
235 .maximum = 0x7f,
236 .step = 0x01,
237 .default_value = 0x25,
238 .flags = 0,
239 },
240 {
241 .id = V4L2_CID_HFLIP,
242 .type = V4L2_CTRL_TYPE_BOOLEAN,
243 .name = "horizontal mirror",
244 .minimum = 0,
245 .maximum = 1,
246 .step = 1,
247 .default_value = 0,
248 .flags = 0,
249 },
250 {
251 .id = V4L2_CID_VFLIP,
252 .type = V4L2_CTRL_TYPE_BOOLEAN,
253 .name = "vertical mirror",
254 .minimum = 0,
255 .maximum = 1,
256 .step = 1,
257 .default_value = 0,
258 .flags = 0,
259 },
260 {
261 .id = V4L2_CID_BLUE_BALANCE,
262 .type = V4L2_CTRL_TYPE_INTEGER,
263 .name = "blue balance",
264 .minimum = 0x00,
265 .maximum = 0x7f,
266 .step = 0x01,
267 .default_value = 0x0f,
268 .flags = 0,
269 },
270 {
271 .id = V4L2_CID_RED_BALANCE,
272 .type = V4L2_CTRL_TYPE_INTEGER,
273 .name = "red balance",
274 .minimum = 0x00,
275 .maximum = 0x7f,
276 .step = 0x01,
277 .default_value = 0x32,
278 .flags = 0,
279 },
280 {
281 .id = SN9C102_V4L2_CID_GREEN_BALANCE,
282 .type = V4L2_CTRL_TYPE_INTEGER,
283 .name = "green balance",
284 .minimum = 0x00,
285 .maximum = 0x7f,
286 .step = 0x01,
287 .default_value = 0x25,
288 .flags = 0,
289 },
290 },
291 .get_ctrl = &mi0360_get_ctrl,
292 .set_ctrl = &mi0360_set_ctrl,
293 .cropcap = {
294 .bounds = {
295 .left = 0,
296 .top = 0,
297 .width = 640,
298 .height = 480,
299 },
300 .defrect = {
301 .left = 0,
302 .top = 0,
303 .width = 640,
304 .height = 480,
305 },
306 },
307 .set_crop = &mi0360_set_crop,
308 .pix_format = {
309 .width = 640,
310 .height = 480,
311 .pixelformat = V4L2_PIX_FMT_SBGGR8,
312 .priv = 8,
313 },
314 .set_pix_format = &mi0360_set_pix_format
315};
316
317
318int sn9c102_probe_mi0360(struct sn9c102_device* cam)
319{
320 u8 data[5+1];
321 int err;
322
323 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01},
324 {0x28, 0x17});
325 if (err)
326 return -EIO;
327
328 if (sn9c102_i2c_try_raw_read(cam, &mi0360, mi0360.i2c_slave_id, 0x00,
329 2+1, data) < 0)
330 return -EIO;
331
332 if (data[2] != 0x82 || data[3] != 0x43)
333 return -ENODEV;
334
335 sn9c102_attach_sensor(cam, &mi0360);
336
337 return 0;
338}
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7630.c b/drivers/media/video/sn9c102/sn9c102_ov7630.c
index 7df09ff38e63..31b6080b0615 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7630.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7630.c
@@ -22,9 +22,6 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor ov7630;
26
27
28static int ov7630_init(struct sn9c102_device* cam) 25static int ov7630_init(struct sn9c102_device* cam)
29{ 26{
30 int err = 0; 27 int err = 0;
@@ -32,21 +29,20 @@ static int ov7630_init(struct sn9c102_device* cam)
32 switch (sn9c102_get_bridge(cam)) { 29 switch (sn9c102_get_bridge(cam)) {
33 case BRIDGE_SN9C101: 30 case BRIDGE_SN9C101:
34 case BRIDGE_SN9C102: 31 case BRIDGE_SN9C102:
35 err += sn9c102_write_reg(cam, 0x00, 0x14); 32 err = sn9c102_write_const_regs(cam, {0x00, 0x14},
36 err += sn9c102_write_reg(cam, 0x60, 0x17); 33 {0x60, 0x17}, {0x0f, 0x18},
37 err += sn9c102_write_reg(cam, 0x0f, 0x18); 34 {0x50, 0x19});
38 err += sn9c102_write_reg(cam, 0x50, 0x19);
39 35
40 err += sn9c102_i2c_write(cam, 0x12, 0x8d); 36 err += sn9c102_i2c_write(cam, 0x12, 0x8d);
41 err += sn9c102_i2c_write(cam, 0x12, 0x0d); 37 err += sn9c102_i2c_write(cam, 0x12, 0x0d);
42 err += sn9c102_i2c_write(cam, 0x11, 0x00); 38 err += sn9c102_i2c_write(cam, 0x11, 0x00);
43 err += sn9c102_i2c_write(cam, 0x15, 0x34); 39 err += sn9c102_i2c_write(cam, 0x15, 0x35);
44 err += sn9c102_i2c_write(cam, 0x16, 0x03); 40 err += sn9c102_i2c_write(cam, 0x16, 0x03);
45 err += sn9c102_i2c_write(cam, 0x17, 0x1c); 41 err += sn9c102_i2c_write(cam, 0x17, 0x1c);
46 err += sn9c102_i2c_write(cam, 0x18, 0xbd); 42 err += sn9c102_i2c_write(cam, 0x18, 0xbd);
47 err += sn9c102_i2c_write(cam, 0x19, 0x06); 43 err += sn9c102_i2c_write(cam, 0x19, 0x06);
48 err += sn9c102_i2c_write(cam, 0x1a, 0xf6); 44 err += sn9c102_i2c_write(cam, 0x1a, 0xf6);
49 err += sn9c102_i2c_write(cam, 0x1b, 0x04); 45 err += sn9c102_i2c_write(cam, 0x1b, 0x04);
50 err += sn9c102_i2c_write(cam, 0x20, 0x44); 46 err += sn9c102_i2c_write(cam, 0x20, 0x44);
51 err += sn9c102_i2c_write(cam, 0x23, 0xee); 47 err += sn9c102_i2c_write(cam, 0x23, 0xee);
52 err += sn9c102_i2c_write(cam, 0x26, 0xa0); 48 err += sn9c102_i2c_write(cam, 0x26, 0xa0);
@@ -65,42 +61,26 @@ static int ov7630_init(struct sn9c102_device* cam)
65 err += sn9c102_i2c_write(cam, 0x71, 0x00); 61 err += sn9c102_i2c_write(cam, 0x71, 0x00);
66 err += sn9c102_i2c_write(cam, 0x74, 0x21); 62 err += sn9c102_i2c_write(cam, 0x74, 0x21);
67 err += sn9c102_i2c_write(cam, 0x7d, 0xf7); 63 err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
64
68 break; 65 break;
69 case BRIDGE_SN9C103: 66 case BRIDGE_SN9C103:
70 err += sn9c102_write_reg(cam, 0x00, 0x02); 67 err = sn9c102_write_const_regs(cam, {0x00, 0x02}, {0x00, 0x03},
71 err += sn9c102_write_reg(cam, 0x00, 0x03); 68 {0x1a, 0x04}, {0x20, 0x05},
72 err += sn9c102_write_reg(cam, 0x1a, 0x04); 69 {0x20, 0x06}, {0x20, 0x07},
73 err += sn9c102_write_reg(cam, 0x20, 0x05); 70 {0x03, 0x10}, {0x0a, 0x14},
74 err += sn9c102_write_reg(cam, 0x20, 0x06); 71 {0x60, 0x17}, {0x0f, 0x18},
75 err += sn9c102_write_reg(cam, 0x20, 0x07); 72 {0x50, 0x19}, {0x1d, 0x1a},
76 err += sn9c102_write_reg(cam, 0x03, 0x10); 73 {0x10, 0x1b}, {0x02, 0x1c},
77 err += sn9c102_write_reg(cam, 0x0a, 0x14); 74 {0x03, 0x1d}, {0x0f, 0x1e},
78 err += sn9c102_write_reg(cam, 0x60, 0x17); 75 {0x0c, 0x1f}, {0x00, 0x20},
79 err += sn9c102_write_reg(cam, 0x0f, 0x18); 76 {0x10, 0x21}, {0x20, 0x22},
80 err += sn9c102_write_reg(cam, 0x50, 0x19); 77 {0x30, 0x23}, {0x40, 0x24},
81 err += sn9c102_write_reg(cam, 0x1d, 0x1a); 78 {0x50, 0x25}, {0x60, 0x26},
82 err += sn9c102_write_reg(cam, 0x10, 0x1b); 79 {0x70, 0x27}, {0x80, 0x28},
83 err += sn9c102_write_reg(cam, 0x02, 0x1c); 80 {0x90, 0x29}, {0xa0, 0x2a},
84 err += sn9c102_write_reg(cam, 0x03, 0x1d); 81 {0xb0, 0x2b}, {0xc0, 0x2c},
85 err += sn9c102_write_reg(cam, 0x0f, 0x1e); 82 {0xd0, 0x2d}, {0xe0, 0x2e},
86 err += sn9c102_write_reg(cam, 0x0c, 0x1f); 83 {0xf0, 0x2f}, {0xff, 0x30});
87 err += sn9c102_write_reg(cam, 0x00, 0x20);
88 err += sn9c102_write_reg(cam, 0x10, 0x21);
89 err += sn9c102_write_reg(cam, 0x20, 0x22);
90 err += sn9c102_write_reg(cam, 0x30, 0x23);
91 err += sn9c102_write_reg(cam, 0x40, 0x24);
92 err += sn9c102_write_reg(cam, 0x50, 0x25);
93 err += sn9c102_write_reg(cam, 0x60, 0x26);
94 err += sn9c102_write_reg(cam, 0x70, 0x27);
95 err += sn9c102_write_reg(cam, 0x80, 0x28);
96 err += sn9c102_write_reg(cam, 0x90, 0x29);
97 err += sn9c102_write_reg(cam, 0xa0, 0x2a);
98 err += sn9c102_write_reg(cam, 0xb0, 0x2b);
99 err += sn9c102_write_reg(cam, 0xc0, 0x2c);
100 err += sn9c102_write_reg(cam, 0xd0, 0x2d);
101 err += sn9c102_write_reg(cam, 0xe0, 0x2e);
102 err += sn9c102_write_reg(cam, 0xf0, 0x2f);
103 err += sn9c102_write_reg(cam, 0xff, 0x30);
104 84
105 err += sn9c102_i2c_write(cam, 0x12, 0x8d); 85 err += sn9c102_i2c_write(cam, 0x12, 0x8d);
106 err += sn9c102_i2c_write(cam, 0x12, 0x0d); 86 err += sn9c102_i2c_write(cam, 0x12, 0x0d);
@@ -108,23 +88,23 @@ static int ov7630_init(struct sn9c102_device* cam)
108 err += sn9c102_i2c_write(cam, 0x11, 0x01); 88 err += sn9c102_i2c_write(cam, 0x11, 0x01);
109 err += sn9c102_i2c_write(cam, 0x1b, 0x04); 89 err += sn9c102_i2c_write(cam, 0x1b, 0x04);
110 err += sn9c102_i2c_write(cam, 0x20, 0x44); 90 err += sn9c102_i2c_write(cam, 0x20, 0x44);
111 err += sn9c102_i2c_write(cam, 0x23, 0xee); 91 err += sn9c102_i2c_write(cam, 0x23, 0xee);
112 err += sn9c102_i2c_write(cam, 0x26, 0xa0); 92 err += sn9c102_i2c_write(cam, 0x26, 0xa0);
113 err += sn9c102_i2c_write(cam, 0x27, 0x9a); 93 err += sn9c102_i2c_write(cam, 0x27, 0x9a);
114 err += sn9c102_i2c_write(cam, 0x28, 0x20); 94 err += sn9c102_i2c_write(cam, 0x28, 0x20);
115 err += sn9c102_i2c_write(cam, 0x29, 0x30); 95 err += sn9c102_i2c_write(cam, 0x29, 0x30);
116 err += sn9c102_i2c_write(cam, 0x2f, 0x3d); 96 err += sn9c102_i2c_write(cam, 0x2f, 0x3d);
117 err += sn9c102_i2c_write(cam, 0x30, 0x24); 97 err += sn9c102_i2c_write(cam, 0x30, 0x24);
118 err += sn9c102_i2c_write(cam, 0x32, 0x86); 98 err += sn9c102_i2c_write(cam, 0x32, 0x86);
119 err += sn9c102_i2c_write(cam, 0x60, 0xa9); 99 err += sn9c102_i2c_write(cam, 0x60, 0xa9);
120 err += sn9c102_i2c_write(cam, 0x61, 0x42); 100 err += sn9c102_i2c_write(cam, 0x61, 0x42);
121 err += sn9c102_i2c_write(cam, 0x65, 0x00); 101 err += sn9c102_i2c_write(cam, 0x65, 0x00);
122 err += sn9c102_i2c_write(cam, 0x69, 0x38); 102 err += sn9c102_i2c_write(cam, 0x69, 0x38);
123 err += sn9c102_i2c_write(cam, 0x6f, 0x88); 103 err += sn9c102_i2c_write(cam, 0x6f, 0x88);
124 err += sn9c102_i2c_write(cam, 0x70, 0x0b); 104 err += sn9c102_i2c_write(cam, 0x70, 0x0b);
125 err += sn9c102_i2c_write(cam, 0x71, 0x00); 105 err += sn9c102_i2c_write(cam, 0x71, 0x00);
126 err += sn9c102_i2c_write(cam, 0x74, 0x21); 106 err += sn9c102_i2c_write(cam, 0x74, 0x21);
127 err += sn9c102_i2c_write(cam, 0x7d, 0xf7); 107 err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
128 break; 108 break;
129 default: 109 default:
130 break; 110 break;
@@ -428,15 +408,14 @@ int sn9c102_probe_ov7630(struct sn9c102_device* cam)
428 switch (sn9c102_get_bridge(cam)) { 408 switch (sn9c102_get_bridge(cam)) {
429 case BRIDGE_SN9C101: 409 case BRIDGE_SN9C101:
430 case BRIDGE_SN9C102: 410 case BRIDGE_SN9C102:
431 err += sn9c102_write_reg(cam, 0x01, 0x01); 411 err = sn9c102_write_const_regs(cam, {0x01, 0x01},
432 err += sn9c102_write_reg(cam, 0x00, 0x01); 412 {0x00, 0x01}, {0x28, 0x17});
433 err += sn9c102_write_reg(cam, 0x28, 0x17); 413
434 break; 414 break;
435 case BRIDGE_SN9C103: /* do _not_ change anything! */ 415 case BRIDGE_SN9C103: /* do _not_ change anything! */
436 err += sn9c102_write_reg(cam, 0x09, 0x01); 416 err = sn9c102_write_const_regs(cam, {0x09, 0x01},
437 err += sn9c102_write_reg(cam, 0x42, 0x01); 417 {0x42, 0x01}, {0x28, 0x17},
438 err += sn9c102_write_reg(cam, 0x28, 0x17); 418 {0x44, 0x02});
439 err += sn9c102_write_reg(cam, 0x44, 0x02);
440 pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a); 419 pid = sn9c102_i2c_try_read(cam, &ov7630, 0x0a);
441 if (err || pid < 0) { /* try a different initialization */ 420 if (err || pid < 0) { /* try a different initialization */
442 err = sn9c102_write_reg(cam, 0x01, 0x01); 421 err = sn9c102_write_reg(cam, 0x01, 0x01);
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7660.c b/drivers/media/video/sn9c102/sn9c102_ov7660.c
index d670c24d4435..c898e948fe8d 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7660.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7660.c
@@ -22,160 +22,84 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor ov7660;
26
27
28static int ov7660_init(struct sn9c102_device* cam) 25static int ov7660_init(struct sn9c102_device* cam)
29{ 26{
30 int err = 0; 27 int err = 0;
31 28
32 err += sn9c102_write_reg(cam, 0x40, 0x02); 29 err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03},
33 err += sn9c102_write_reg(cam, 0x00, 0x03); 30 {0x1a, 0x04}, {0x03, 0x10},
34 err += sn9c102_write_reg(cam, 0x1a, 0x04); 31 {0x08, 0x14}, {0x20, 0x17},
35 err += sn9c102_write_reg(cam, 0x03, 0x10); 32 {0x8b, 0x18}, {0x00, 0x19},
36 err += sn9c102_write_reg(cam, 0x08, 0x14); 33 {0x1d, 0x1a}, {0x10, 0x1b},
37 err += sn9c102_write_reg(cam, 0x20, 0x17); 34 {0x02, 0x1c}, {0x03, 0x1d},
38 err += sn9c102_write_reg(cam, 0x8b, 0x18); 35 {0x0f, 0x1e}, {0x0c, 0x1f},
39 err += sn9c102_write_reg(cam, 0x00, 0x19); 36 {0x00, 0x20}, {0x29, 0x21},
40 err += sn9c102_write_reg(cam, 0x1d, 0x1a); 37 {0x40, 0x22}, {0x54, 0x23},
41 err += sn9c102_write_reg(cam, 0x10, 0x1b); 38 {0x66, 0x24}, {0x76, 0x25},
42 err += sn9c102_write_reg(cam, 0x02, 0x1c); 39 {0x85, 0x26}, {0x94, 0x27},
43 err += sn9c102_write_reg(cam, 0x03, 0x1d); 40 {0xa1, 0x28}, {0xae, 0x29},
44 err += sn9c102_write_reg(cam, 0x0f, 0x1e); 41 {0xbb, 0x2a}, {0xc7, 0x2b},
45 err += sn9c102_write_reg(cam, 0x0c, 0x1f); 42 {0xd3, 0x2c}, {0xde, 0x2d},
46 err += sn9c102_write_reg(cam, 0x00, 0x20); 43 {0xea, 0x2e}, {0xf4, 0x2f},
47 err += sn9c102_write_reg(cam, 0x29, 0x21); 44 {0xff, 0x30}, {0x00, 0x3F},
48 err += sn9c102_write_reg(cam, 0x40, 0x22); 45 {0xC7, 0x40}, {0x01, 0x41},
49 err += sn9c102_write_reg(cam, 0x54, 0x23); 46 {0x44, 0x42}, {0x00, 0x43},
50 err += sn9c102_write_reg(cam, 0x66, 0x24); 47 {0x44, 0x44}, {0x00, 0x45},
51 err += sn9c102_write_reg(cam, 0x76, 0x25); 48 {0x44, 0x46}, {0x00, 0x47},
52 err += sn9c102_write_reg(cam, 0x85, 0x26); 49 {0xC7, 0x48}, {0x01, 0x49},
53 err += sn9c102_write_reg(cam, 0x94, 0x27); 50 {0xC7, 0x4A}, {0x01, 0x4B},
54 err += sn9c102_write_reg(cam, 0xa1, 0x28); 51 {0xC7, 0x4C}, {0x01, 0x4D},
55 err += sn9c102_write_reg(cam, 0xae, 0x29); 52 {0x44, 0x4E}, {0x00, 0x4F},
56 err += sn9c102_write_reg(cam, 0xbb, 0x2a); 53 {0x44, 0x50}, {0x00, 0x51},
57 err += sn9c102_write_reg(cam, 0xc7, 0x2b); 54 {0x44, 0x52}, {0x00, 0x53},
58 err += sn9c102_write_reg(cam, 0xd3, 0x2c); 55 {0xC7, 0x54}, {0x01, 0x55},
59 err += sn9c102_write_reg(cam, 0xde, 0x2d); 56 {0xC7, 0x56}, {0x01, 0x57},
60 err += sn9c102_write_reg(cam, 0xea, 0x2e); 57 {0xC7, 0x58}, {0x01, 0x59},
61 err += sn9c102_write_reg(cam, 0xf4, 0x2f); 58 {0x44, 0x5A}, {0x00, 0x5B},
62 err += sn9c102_write_reg(cam, 0xff, 0x30); 59 {0x44, 0x5C}, {0x00, 0x5D},
63 err += sn9c102_write_reg(cam, 0x00, 0x3F); 60 {0x44, 0x5E}, {0x00, 0x5F},
64 err += sn9c102_write_reg(cam, 0xC7, 0x40); 61 {0xC7, 0x60}, {0x01, 0x61},
65 err += sn9c102_write_reg(cam, 0x01, 0x41); 62 {0xC7, 0x62}, {0x01, 0x63},
66 err += sn9c102_write_reg(cam, 0x44, 0x42); 63 {0xC7, 0x64}, {0x01, 0x65},
67 err += sn9c102_write_reg(cam, 0x00, 0x43); 64 {0x44, 0x66}, {0x00, 0x67},
68 err += sn9c102_write_reg(cam, 0x44, 0x44); 65 {0x44, 0x68}, {0x00, 0x69},
69 err += sn9c102_write_reg(cam, 0x00, 0x45); 66 {0x44, 0x6A}, {0x00, 0x6B},
70 err += sn9c102_write_reg(cam, 0x44, 0x46); 67 {0xC7, 0x6C}, {0x01, 0x6D},
71 err += sn9c102_write_reg(cam, 0x00, 0x47); 68 {0xC7, 0x6E}, {0x01, 0x6F},
72 err += sn9c102_write_reg(cam, 0xC7, 0x48); 69 {0xC7, 0x70}, {0x01, 0x71},
73 err += sn9c102_write_reg(cam, 0x01, 0x49); 70 {0x44, 0x72}, {0x00, 0x73},
74 err += sn9c102_write_reg(cam, 0xC7, 0x4A); 71 {0x44, 0x74}, {0x00, 0x75},
75 err += sn9c102_write_reg(cam, 0x01, 0x4B); 72 {0x44, 0x76}, {0x00, 0x77},
76 err += sn9c102_write_reg(cam, 0xC7, 0x4C); 73 {0xC7, 0x78}, {0x01, 0x79},
77 err += sn9c102_write_reg(cam, 0x01, 0x4D); 74 {0xC7, 0x7A}, {0x01, 0x7B},
78 err += sn9c102_write_reg(cam, 0x44, 0x4E); 75 {0xC7, 0x7C}, {0x01, 0x7D},
79 err += sn9c102_write_reg(cam, 0x00, 0x4F); 76 {0x44, 0x7E}, {0x00, 0x7F},
80 err += sn9c102_write_reg(cam, 0x44, 0x50); 77 {0x14, 0x84}, {0x00, 0x85},
81 err += sn9c102_write_reg(cam, 0x00, 0x51); 78 {0x27, 0x86}, {0x00, 0x87},
82 err += sn9c102_write_reg(cam, 0x44, 0x52); 79 {0x07, 0x88}, {0x00, 0x89},
83 err += sn9c102_write_reg(cam, 0x00, 0x53); 80 {0xEC, 0x8A}, {0x0f, 0x8B},
84 err += sn9c102_write_reg(cam, 0xC7, 0x54); 81 {0xD8, 0x8C}, {0x0f, 0x8D},
85 err += sn9c102_write_reg(cam, 0x01, 0x55); 82 {0x3D, 0x8E}, {0x00, 0x8F},
86 err += sn9c102_write_reg(cam, 0xC7, 0x56); 83 {0x3D, 0x90}, {0x00, 0x91},
87 err += sn9c102_write_reg(cam, 0x01, 0x57); 84 {0xCD, 0x92}, {0x0f, 0x93},
88 err += sn9c102_write_reg(cam, 0xC7, 0x58); 85 {0xf7, 0x94}, {0x0f, 0x95},
89 err += sn9c102_write_reg(cam, 0x01, 0x59); 86 {0x0C, 0x96}, {0x00, 0x97},
90 err += sn9c102_write_reg(cam, 0x44, 0x5A); 87 {0x00, 0x98}, {0x66, 0x99},
91 err += sn9c102_write_reg(cam, 0x00, 0x5B); 88 {0x05, 0x9A}, {0x00, 0x9B},
92 err += sn9c102_write_reg(cam, 0x44, 0x5C); 89 {0x04, 0x9C}, {0x00, 0x9D},
93 err += sn9c102_write_reg(cam, 0x00, 0x5D); 90 {0x08, 0x9E}, {0x00, 0x9F},
94 err += sn9c102_write_reg(cam, 0x44, 0x5E); 91 {0x2D, 0xC0}, {0x2D, 0xC1},
95 err += sn9c102_write_reg(cam, 0x00, 0x5F); 92 {0x3A, 0xC2}, {0x05, 0xC3},
96 err += sn9c102_write_reg(cam, 0xC7, 0x60); 93 {0x04, 0xC4}, {0x3F, 0xC5},
97 err += sn9c102_write_reg(cam, 0x01, 0x61); 94 {0x00, 0xC6}, {0x00, 0xC7},
98 err += sn9c102_write_reg(cam, 0xC7, 0x62); 95 {0x50, 0xC8}, {0x3C, 0xC9},
99 err += sn9c102_write_reg(cam, 0x01, 0x63); 96 {0x28, 0xCA}, {0xD8, 0xCB},
100 err += sn9c102_write_reg(cam, 0xC7, 0x64); 97 {0x14, 0xCC}, {0xEC, 0xCD},
101 err += sn9c102_write_reg(cam, 0x01, 0x65); 98 {0x32, 0xCE}, {0xDD, 0xCF},
102 err += sn9c102_write_reg(cam, 0x44, 0x66); 99 {0x32, 0xD0}, {0xDD, 0xD1},
103 err += sn9c102_write_reg(cam, 0x00, 0x67); 100 {0x6A, 0xD2}, {0x50, 0xD3},
104 err += sn9c102_write_reg(cam, 0x44, 0x68); 101 {0x00, 0xD4}, {0x00, 0xD5},
105 err += sn9c102_write_reg(cam, 0x00, 0x69); 102 {0x00, 0xD6});
106 err += sn9c102_write_reg(cam, 0x44, 0x6A);
107 err += sn9c102_write_reg(cam, 0x00, 0x6B);
108 err += sn9c102_write_reg(cam, 0xC7, 0x6C);
109 err += sn9c102_write_reg(cam, 0x01, 0x6D);
110 err += sn9c102_write_reg(cam, 0xC7, 0x6E);
111 err += sn9c102_write_reg(cam, 0x01, 0x6F);
112 err += sn9c102_write_reg(cam, 0xC7, 0x70);
113 err += sn9c102_write_reg(cam, 0x01, 0x71);
114 err += sn9c102_write_reg(cam, 0x44, 0x72);
115 err += sn9c102_write_reg(cam, 0x00, 0x73);
116 err += sn9c102_write_reg(cam, 0x44, 0x74);
117 err += sn9c102_write_reg(cam, 0x00, 0x75);
118 err += sn9c102_write_reg(cam, 0x44, 0x76);
119 err += sn9c102_write_reg(cam, 0x00, 0x77);
120 err += sn9c102_write_reg(cam, 0xC7, 0x78);
121 err += sn9c102_write_reg(cam, 0x01, 0x79);
122 err += sn9c102_write_reg(cam, 0xC7, 0x7A);
123 err += sn9c102_write_reg(cam, 0x01, 0x7B);
124 err += sn9c102_write_reg(cam, 0xC7, 0x7C);
125 err += sn9c102_write_reg(cam, 0x01, 0x7D);
126 err += sn9c102_write_reg(cam, 0x44, 0x7E);
127 err += sn9c102_write_reg(cam, 0x00, 0x7F);
128 err += sn9c102_write_reg(cam, 0x14, 0x84);
129 err += sn9c102_write_reg(cam, 0x00, 0x85);
130 err += sn9c102_write_reg(cam, 0x27, 0x86);
131 err += sn9c102_write_reg(cam, 0x00, 0x87);
132 err += sn9c102_write_reg(cam, 0x07, 0x88);
133 err += sn9c102_write_reg(cam, 0x00, 0x89);
134 err += sn9c102_write_reg(cam, 0xEC, 0x8A);
135 err += sn9c102_write_reg(cam, 0x0f, 0x8B);
136 err += sn9c102_write_reg(cam, 0xD8, 0x8C);
137 err += sn9c102_write_reg(cam, 0x0f, 0x8D);
138 err += sn9c102_write_reg(cam, 0x3D, 0x8E);
139 err += sn9c102_write_reg(cam, 0x00, 0x8F);
140 err += sn9c102_write_reg(cam, 0x3D, 0x90);
141 err += sn9c102_write_reg(cam, 0x00, 0x91);
142 err += sn9c102_write_reg(cam, 0xCD, 0x92);
143 err += sn9c102_write_reg(cam, 0x0f, 0x93);
144 err += sn9c102_write_reg(cam, 0xf7, 0x94);
145 err += sn9c102_write_reg(cam, 0x0f, 0x95);
146 err += sn9c102_write_reg(cam, 0x0C, 0x96);
147 err += sn9c102_write_reg(cam, 0x00, 0x97);
148 err += sn9c102_write_reg(cam, 0x00, 0x98);
149 err += sn9c102_write_reg(cam, 0x66, 0x99);
150 err += sn9c102_write_reg(cam, 0x05, 0x9A);
151 err += sn9c102_write_reg(cam, 0x00, 0x9B);
152 err += sn9c102_write_reg(cam, 0x04, 0x9C);
153 err += sn9c102_write_reg(cam, 0x00, 0x9D);
154 err += sn9c102_write_reg(cam, 0x08, 0x9E);
155 err += sn9c102_write_reg(cam, 0x00, 0x9F);
156 err += sn9c102_write_reg(cam, 0x2D, 0xC0);
157 err += sn9c102_write_reg(cam, 0x2D, 0xC1);
158 err += sn9c102_write_reg(cam, 0x3A, 0xC2);
159 err += sn9c102_write_reg(cam, 0x05, 0xC3);
160 err += sn9c102_write_reg(cam, 0x04, 0xC4);
161 err += sn9c102_write_reg(cam, 0x3F, 0xC5);
162 err += sn9c102_write_reg(cam, 0x00, 0xC6);
163 err += sn9c102_write_reg(cam, 0x00, 0xC7);
164 err += sn9c102_write_reg(cam, 0x50, 0xC8);
165 err += sn9c102_write_reg(cam, 0x3C, 0xC9);
166 err += sn9c102_write_reg(cam, 0x28, 0xCA);
167 err += sn9c102_write_reg(cam, 0xD8, 0xCB);
168 err += sn9c102_write_reg(cam, 0x14, 0xCC);
169 err += sn9c102_write_reg(cam, 0xEC, 0xCD);
170 err += sn9c102_write_reg(cam, 0x32, 0xCE);
171 err += sn9c102_write_reg(cam, 0xDD, 0xCF);
172 err += sn9c102_write_reg(cam, 0x32, 0xD0);
173 err += sn9c102_write_reg(cam, 0xDD, 0xD1);
174 err += sn9c102_write_reg(cam, 0x6A, 0xD2);
175 err += sn9c102_write_reg(cam, 0x50, 0xD3);
176 err += sn9c102_write_reg(cam, 0x00, 0xD4);
177 err += sn9c102_write_reg(cam, 0x00, 0xD5);
178 err += sn9c102_write_reg(cam, 0x00, 0xD6);
179 103
180 err += sn9c102_i2c_write(cam, 0x12, 0x80); 104 err += sn9c102_i2c_write(cam, 0x12, 0x80);
181 err += sn9c102_i2c_write(cam, 0x11, 0x09); 105 err += sn9c102_i2c_write(cam, 0x11, 0x09);
@@ -572,13 +496,11 @@ static struct sn9c102_sensor ov7660 = {
572 496
573int sn9c102_probe_ov7660(struct sn9c102_device* cam) 497int sn9c102_probe_ov7660(struct sn9c102_device* cam)
574{ 498{
575 int pid, ver, err = 0; 499 int pid, ver, err;
576 500
577 err += sn9c102_write_reg(cam, 0x01, 0xf1); 501 err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1},
578 err += sn9c102_write_reg(cam, 0x00, 0xf1); 502 {0x01, 0x01}, {0x00, 0x01},
579 err += sn9c102_write_reg(cam, 0x01, 0x01); 503 {0x28, 0x17});
580 err += sn9c102_write_reg(cam, 0x00, 0x01);
581 err += sn9c102_write_reg(cam, 0x28, 0x17);
582 504
583 pid = sn9c102_i2c_try_read(cam, &ov7660, 0x0a); 505 pid = sn9c102_i2c_try_read(cam, &ov7660, 0x0a);
584 ver = sn9c102_i2c_try_read(cam, &ov7660, 0x0b); 506 ver = sn9c102_i2c_try_read(cam, &ov7660, 0x0b);
diff --git a/drivers/media/video/sn9c102/sn9c102_pas106b.c b/drivers/media/video/sn9c102/sn9c102_pas106b.c
index 8d79a5fae5de..67151964801f 100644
--- a/drivers/media/video/sn9c102/sn9c102_pas106b.c
+++ b/drivers/media/video/sn9c102/sn9c102_pas106b.c
@@ -23,19 +23,13 @@
23#include "sn9c102_sensor.h" 23#include "sn9c102_sensor.h"
24 24
25 25
26static struct sn9c102_sensor pas106b;
27
28
29static int pas106b_init(struct sn9c102_device* cam) 26static int pas106b_init(struct sn9c102_device* cam)
30{ 27{
31 int err = 0; 28 int err = 0;
32 29
33 err += sn9c102_write_reg(cam, 0x00, 0x10); 30 err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11},
34 err += sn9c102_write_reg(cam, 0x00, 0x11); 31 {0x00, 0x14}, {0x20, 0x17},
35 err += sn9c102_write_reg(cam, 0x00, 0x14); 32 {0x20, 0x19}, {0x09, 0x18});
36 err += sn9c102_write_reg(cam, 0x20, 0x17);
37 err += sn9c102_write_reg(cam, 0x20, 0x19);
38 err += sn9c102_write_reg(cam, 0x09, 0x18);
39 33
40 err += sn9c102_i2c_write(cam, 0x02, 0x0c); 34 err += sn9c102_i2c_write(cam, 0x02, 0x0c);
41 err += sn9c102_i2c_write(cam, 0x05, 0x5a); 35 err += sn9c102_i2c_write(cam, 0x05, 0x5a);
@@ -172,7 +166,7 @@ static int pas106b_set_pix_format(struct sn9c102_device* cam,
172static struct sn9c102_sensor pas106b = { 166static struct sn9c102_sensor pas106b = {
173 .name = "PAS106B", 167 .name = "PAS106B",
174 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", 168 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
175 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103, 169 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
176 .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, 170 .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
177 .frequency = SN9C102_I2C_400KHZ | SN9C102_I2C_100KHZ, 171 .frequency = SN9C102_I2C_400KHZ | SN9C102_I2C_100KHZ,
178 .interface = SN9C102_I2C_2WIRES, 172 .interface = SN9C102_I2C_2WIRES,
@@ -279,16 +273,17 @@ static struct sn9c102_sensor pas106b = {
279 273
280int sn9c102_probe_pas106b(struct sn9c102_device* cam) 274int sn9c102_probe_pas106b(struct sn9c102_device* cam)
281{ 275{
282 int r0 = 0, r1 = 0, err = 0; 276 int r0 = 0, r1 = 0, err;
283 unsigned int pid = 0; 277 unsigned int pid = 0;
284 278
285 /* 279 /*
286 Minimal initialization to enable the I2C communication 280 Minimal initialization to enable the I2C communication
287 NOTE: do NOT change the values! 281 NOTE: do NOT change the values!
288 */ 282 */
289 err += sn9c102_write_reg(cam, 0x01, 0x01); /* sensor power down */ 283 err = sn9c102_write_const_regs(cam,
290 err += sn9c102_write_reg(cam, 0x00, 0x01); /* sensor power on */ 284 {0x01, 0x01}, /* sensor power down */
291 err += sn9c102_write_reg(cam, 0x28, 0x17); /* sensor clock at 24 MHz */ 285 {0x00, 0x01}, /* sensor power on */
286 {0x28, 0x17});/* sensor clock 24 MHz */
292 if (err) 287 if (err)
293 return -EIO; 288 return -EIO;
294 289
diff --git a/drivers/media/video/sn9c102/sn9c102_pas202bcb.c b/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
index 7894f01b56e8..c1b8d6b63b47 100644
--- a/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
+++ b/drivers/media/video/sn9c102/sn9c102_pas202bcb.c
@@ -28,9 +28,6 @@
28#include "sn9c102_sensor.h" 28#include "sn9c102_sensor.h"
29 29
30 30
31static struct sn9c102_sensor pas202bcb;
32
33
34static int pas202bcb_init(struct sn9c102_device* cam) 31static int pas202bcb_init(struct sn9c102_device* cam)
35{ 32{
36 int err = 0; 33 int err = 0;
@@ -38,47 +35,29 @@ static int pas202bcb_init(struct sn9c102_device* cam)
38 switch (sn9c102_get_bridge(cam)) { 35 switch (sn9c102_get_bridge(cam)) {
39 case BRIDGE_SN9C101: 36 case BRIDGE_SN9C101:
40 case BRIDGE_SN9C102: 37 case BRIDGE_SN9C102:
41 err += sn9c102_write_reg(cam, 0x00, 0x10); 38 err = sn9c102_write_const_regs(cam, {0x00, 0x10},
42 err += sn9c102_write_reg(cam, 0x00, 0x11); 39 {0x00, 0x11}, {0x00, 0x14},
43 err += sn9c102_write_reg(cam, 0x00, 0x14); 40 {0x20, 0x17}, {0x30, 0x19},
44 err += sn9c102_write_reg(cam, 0x20, 0x17); 41 {0x09, 0x18});
45 err += sn9c102_write_reg(cam, 0x30, 0x19);
46 err += sn9c102_write_reg(cam, 0x09, 0x18);
47 break; 42 break;
48 case BRIDGE_SN9C103: 43 case BRIDGE_SN9C103:
49 err += sn9c102_write_reg(cam, 0x00, 0x02); 44 err = sn9c102_write_const_regs(cam, {0x00, 0x02},
50 err += sn9c102_write_reg(cam, 0x00, 0x03); 45 {0x00, 0x03}, {0x1a, 0x04},
51 err += sn9c102_write_reg(cam, 0x1a, 0x04); 46 {0x20, 0x05}, {0x20, 0x06},
52 err += sn9c102_write_reg(cam, 0x20, 0x05); 47 {0x20, 0x07}, {0x00, 0x10},
53 err += sn9c102_write_reg(cam, 0x20, 0x06); 48 {0x00, 0x11}, {0x00, 0x14},
54 err += sn9c102_write_reg(cam, 0x20, 0x07); 49 {0x20, 0x17}, {0x30, 0x19},
55 err += sn9c102_write_reg(cam, 0x00, 0x10); 50 {0x09, 0x18}, {0x02, 0x1c},
56 err += sn9c102_write_reg(cam, 0x00, 0x11); 51 {0x03, 0x1d}, {0x0f, 0x1e},
57 err += sn9c102_write_reg(cam, 0x00, 0x14); 52 {0x0c, 0x1f}, {0x00, 0x20},
58 err += sn9c102_write_reg(cam, 0x20, 0x17); 53 {0x10, 0x21}, {0x20, 0x22},
59 err += sn9c102_write_reg(cam, 0x30, 0x19); 54 {0x30, 0x23}, {0x40, 0x24},
60 err += sn9c102_write_reg(cam, 0x09, 0x18); 55 {0x50, 0x25}, {0x60, 0x26},
61 err += sn9c102_write_reg(cam, 0x02, 0x1c); 56 {0x70, 0x27}, {0x80, 0x28},
62 err += sn9c102_write_reg(cam, 0x03, 0x1d); 57 {0x90, 0x29}, {0xa0, 0x2a},
63 err += sn9c102_write_reg(cam, 0x0f, 0x1e); 58 {0xb0, 0x2b}, {0xc0, 0x2c},
64 err += sn9c102_write_reg(cam, 0x0c, 0x1f); 59 {0xd0, 0x2d}, {0xe0, 0x2e},
65 err += sn9c102_write_reg(cam, 0x00, 0x20); 60 {0xf0, 0x2f}, {0xff, 0x30});
66 err += sn9c102_write_reg(cam, 0x10, 0x21);
67 err += sn9c102_write_reg(cam, 0x20, 0x22);
68 err += sn9c102_write_reg(cam, 0x30, 0x23);
69 err += sn9c102_write_reg(cam, 0x40, 0x24);
70 err += sn9c102_write_reg(cam, 0x50, 0x25);
71 err += sn9c102_write_reg(cam, 0x60, 0x26);
72 err += sn9c102_write_reg(cam, 0x70, 0x27);
73 err += sn9c102_write_reg(cam, 0x80, 0x28);
74 err += sn9c102_write_reg(cam, 0x90, 0x29);
75 err += sn9c102_write_reg(cam, 0xa0, 0x2a);
76 err += sn9c102_write_reg(cam, 0xb0, 0x2b);
77 err += sn9c102_write_reg(cam, 0xc0, 0x2c);
78 err += sn9c102_write_reg(cam, 0xd0, 0x2d);
79 err += sn9c102_write_reg(cam, 0xe0, 0x2e);
80 err += sn9c102_write_reg(cam, 0xf0, 0x2f);
81 err += sn9c102_write_reg(cam, 0xff, 0x30);
82 break; 61 break;
83 default: 62 default:
84 break; 63 break;
@@ -328,15 +307,15 @@ int sn9c102_probe_pas202bcb(struct sn9c102_device* cam)
328 switch (sn9c102_get_bridge(cam)) { 307 switch (sn9c102_get_bridge(cam)) {
329 case BRIDGE_SN9C101: 308 case BRIDGE_SN9C101:
330 case BRIDGE_SN9C102: 309 case BRIDGE_SN9C102:
331 err += sn9c102_write_reg(cam, 0x01, 0x01); /* power down */ 310 err = sn9c102_write_const_regs(cam,
332 err += sn9c102_write_reg(cam, 0x40, 0x01); /* power on */ 311 {0x01, 0x01}, /* power down */
333 err += sn9c102_write_reg(cam, 0x28, 0x17); /* clock 24 MHz */ 312 {0x40, 0x01}, /* power on */
313 {0x28, 0x17});/* clock 24 MHz */
334 break; 314 break;
335 case BRIDGE_SN9C103: /* do _not_ change anything! */ 315 case BRIDGE_SN9C103: /* do _not_ change anything! */
336 err += sn9c102_write_reg(cam, 0x09, 0x01); 316 err = sn9c102_write_const_regs(cam, {0x09, 0x01},
337 err += sn9c102_write_reg(cam, 0x44, 0x01); 317 {0x44, 0x01}, {0x44, 0x02},
338 err += sn9c102_write_reg(cam, 0x44, 0x02); 318 {0x29, 0x17});
339 err += sn9c102_write_reg(cam, 0x29, 0x17);
340 break; 319 break;
341 default: 320 default:
342 break; 321 break;
diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h
index 05f2942639c3..1bbf64c897a2 100644
--- a/drivers/media/video/sn9c102/sn9c102_sensor.h
+++ b/drivers/media/video/sn9c102/sn9c102_sensor.h
@@ -114,9 +114,17 @@ extern int sn9c102_i2c_write(struct sn9c102_device*, u8 address, u8 value);
114extern int sn9c102_i2c_read(struct sn9c102_device*, u8 address); 114extern int sn9c102_i2c_read(struct sn9c102_device*, u8 address);
115 115
116/* I/O on registers in the bridge. Could be used by the sensor methods too */ 116/* I/O on registers in the bridge. Could be used by the sensor methods too */
117extern int sn9c102_write_regs(struct sn9c102_device*, u8* buff, u16 index);
118extern int sn9c102_write_reg(struct sn9c102_device*, u8 value, u16 index);
119extern int sn9c102_pread_reg(struct sn9c102_device*, u16 index); 117extern int sn9c102_pread_reg(struct sn9c102_device*, u16 index);
118extern int sn9c102_write_reg(struct sn9c102_device*, u8 value, u16 index);
119extern int sn9c102_write_regs(struct sn9c102_device*, const u8 valreg[][2],
120 int count);
121/*
122 * Write multiple registers with constant values. For example:
123 * sn9c102_write_const_regs(cam, {0x00, 0x14}, {0x60, 0x17}, {0x0f, 0x18});
124 */
125#define sn9c102_write_const_regs(device, data...) \
126 ({ const static u8 _data[][2] = {data}; \
127 sn9c102_write_regs(device, _data, ARRAY_SIZE(_data)); })
120 128
121/*****************************************************************************/ 129/*****************************************************************************/
122 130
diff --git a/drivers/media/video/sn9c102/sn9c102_tas5110c1b.c b/drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
index 90023ad63adc..0e7ec8662c70 100644
--- a/drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
+++ b/drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
@@ -22,21 +22,14 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor tas5110c1b;
26
27
28static int tas5110c1b_init(struct sn9c102_device* cam) 25static int tas5110c1b_init(struct sn9c102_device* cam)
29{ 26{
30 int err = 0; 27 int err = 0;
31 28
32 err += sn9c102_write_reg(cam, 0x01, 0x01); 29 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x44, 0x01},
33 err += sn9c102_write_reg(cam, 0x44, 0x01); 30 {0x00, 0x10}, {0x00, 0x11},
34 err += sn9c102_write_reg(cam, 0x00, 0x10); 31 {0x0a, 0x14}, {0x60, 0x17},
35 err += sn9c102_write_reg(cam, 0x00, 0x11); 32 {0x06, 0x18}, {0xfb, 0x19});
36 err += sn9c102_write_reg(cam, 0x0a, 0x14);
37 err += sn9c102_write_reg(cam, 0x60, 0x17);
38 err += sn9c102_write_reg(cam, 0x06, 0x18);
39 err += sn9c102_write_reg(cam, 0xfb, 0x19);
40 33
41 err += sn9c102_i2c_write(cam, 0xc0, 0x80); 34 err += sn9c102_i2c_write(cam, 0xc0, 0x80);
42 35
@@ -98,7 +91,7 @@ static int tas5110c1b_set_pix_format(struct sn9c102_device* cam,
98static struct sn9c102_sensor tas5110c1b = { 91static struct sn9c102_sensor tas5110c1b = {
99 .name = "TAS5110C1B", 92 .name = "TAS5110C1B",
100 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", 93 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
101 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103, 94 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
102 .sysfs_ops = SN9C102_I2C_WRITE, 95 .sysfs_ops = SN9C102_I2C_WRITE,
103 .frequency = SN9C102_I2C_100KHZ, 96 .frequency = SN9C102_I2C_100KHZ,
104 .interface = SN9C102_I2C_3WIRES, 97 .interface = SN9C102_I2C_3WIRES,
@@ -146,7 +139,6 @@ int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam)
146 const struct usb_device_id tas5110c1b_id_table[] = { 139 const struct usb_device_id tas5110c1b_id_table[] = {
147 { USB_DEVICE(0x0c45, 0x6001), }, 140 { USB_DEVICE(0x0c45, 0x6001), },
148 { USB_DEVICE(0x0c45, 0x6005), }, 141 { USB_DEVICE(0x0c45, 0x6005), },
149 { USB_DEVICE(0x0c45, 0x6007), },
150 { USB_DEVICE(0x0c45, 0x60ab), }, 142 { USB_DEVICE(0x0c45, 0x60ab), },
151 { } 143 { }
152 }; 144 };
diff --git a/drivers/media/video/sn9c102/sn9c102_tas5110d.c b/drivers/media/video/sn9c102/sn9c102_tas5110d.c
new file mode 100644
index 000000000000..83a39e8b5e71
--- /dev/null
+++ b/drivers/media/video/sn9c102/sn9c102_tas5110d.c
@@ -0,0 +1,118 @@
1/***************************************************************************
2 * Plug-in for TAS5110D image sensor connected to the SN9C1xx PC Camera *
3 * Controllers *
4 * *
5 * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
6 * *
7 * This program is free software; you can redistribute it and/or modify *
8 * it under the terms of the GNU General Public License as published by *
9 * the Free Software Foundation; either version 2 of the License, or *
10 * (at your option) any later version. *
11 * *
12 * This program is distributed in the hope that it will be useful, *
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
15 * GNU General Public License for more details. *
16 * *
17 * You should have received a copy of the GNU General Public License *
18 * along with this program; if not, write to the Free Software *
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
20 ***************************************************************************/
21
22#include "sn9c102_sensor.h"
23
24
25static int tas5110d_init(struct sn9c102_device* cam)
26{
27 int err;
28
29 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x04, 0x01},
30 {0x0a, 0x14}, {0x60, 0x17},
31 {0x06, 0x18}, {0xfb, 0x19});
32
33 err += sn9c102_i2c_write(cam, 0x9a, 0xca);
34
35 return err;
36}
37
38
39static int tas5110d_set_crop(struct sn9c102_device* cam,
40 const struct v4l2_rect* rect)
41{
42 struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
43 int err = 0;
44 u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69,
45 v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9;
46
47 err += sn9c102_write_reg(cam, h_start, 0x12);
48 err += sn9c102_write_reg(cam, v_start, 0x13);
49
50 err += sn9c102_write_reg(cam, 0x14, 0x1a);
51 err += sn9c102_write_reg(cam, 0x0a, 0x1b);
52
53 return err;
54}
55
56
57static int tas5110d_set_pix_format(struct sn9c102_device* cam,
58 const struct v4l2_pix_format* pix)
59{
60 int err = 0;
61
62 if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X)
63 err += sn9c102_write_reg(cam, 0x3b, 0x19);
64 else
65 err += sn9c102_write_reg(cam, 0xfb, 0x19);
66
67 return err;
68}
69
70
71static struct sn9c102_sensor tas5110d = {
72 .name = "TAS5110D",
73 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
74 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
75 .sysfs_ops = SN9C102_I2C_WRITE,
76 .frequency = SN9C102_I2C_100KHZ,
77 .interface = SN9C102_I2C_2WIRES,
78 .i2c_slave_id = 0x61,
79 .init = &tas5110d_init,
80 .cropcap = {
81 .bounds = {
82 .left = 0,
83 .top = 0,
84 .width = 352,
85 .height = 288,
86 },
87 .defrect = {
88 .left = 0,
89 .top = 0,
90 .width = 352,
91 .height = 288,
92 },
93 },
94 .set_crop = &tas5110d_set_crop,
95 .pix_format = {
96 .width = 352,
97 .height = 288,
98 .pixelformat = V4L2_PIX_FMT_SBGGR8,
99 .priv = 8,
100 },
101 .set_pix_format = &tas5110d_set_pix_format
102};
103
104
105int sn9c102_probe_tas5110d(struct sn9c102_device* cam)
106{
107 const struct usb_device_id tas5110d_id_table[] = {
108 { USB_DEVICE(0x0c45, 0x6007), },
109 { }
110 };
111
112 if (!sn9c102_match_id(cam, tas5110d_id_table))
113 return -ENODEV;
114
115 sn9c102_attach_sensor(cam, &tas5110d);
116
117 return 0;
118}
diff --git a/drivers/media/video/sn9c102/sn9c102_tas5130d1b.c b/drivers/media/video/sn9c102/sn9c102_tas5130d1b.c
index cb1b318bc1ff..50406503fc40 100644
--- a/drivers/media/video/sn9c102/sn9c102_tas5130d1b.c
+++ b/drivers/media/video/sn9c102/sn9c102_tas5130d1b.c
@@ -22,21 +22,14 @@
22#include "sn9c102_sensor.h" 22#include "sn9c102_sensor.h"
23 23
24 24
25static struct sn9c102_sensor tas5130d1b;
26
27
28static int tas5130d1b_init(struct sn9c102_device* cam) 25static int tas5130d1b_init(struct sn9c102_device* cam)
29{ 26{
30 int err = 0; 27 int err;
31 28
32 err += sn9c102_write_reg(cam, 0x01, 0x01); 29 err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x20, 0x17},
33 err += sn9c102_write_reg(cam, 0x20, 0x17); 30 {0x04, 0x01}, {0x01, 0x10},
34 err += sn9c102_write_reg(cam, 0x04, 0x01); 31 {0x00, 0x11}, {0x00, 0x14},
35 err += sn9c102_write_reg(cam, 0x01, 0x10); 32 {0x60, 0x17}, {0x07, 0x18});
36 err += sn9c102_write_reg(cam, 0x00, 0x11);
37 err += sn9c102_write_reg(cam, 0x00, 0x14);
38 err += sn9c102_write_reg(cam, 0x60, 0x17);
39 err += sn9c102_write_reg(cam, 0x07, 0x18);
40 33
41 return err; 34 return err;
42} 35}
@@ -99,7 +92,7 @@ static int tas5130d1b_set_pix_format(struct sn9c102_device* cam,
99static struct sn9c102_sensor tas5130d1b = { 92static struct sn9c102_sensor tas5130d1b = {
100 .name = "TAS5130D1B", 93 .name = "TAS5130D1B",
101 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", 94 .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
102 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103, 95 .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102,
103 .sysfs_ops = SN9C102_I2C_WRITE, 96 .sysfs_ops = SN9C102_I2C_WRITE,
104 .frequency = SN9C102_I2C_100KHZ, 97 .frequency = SN9C102_I2C_100KHZ,
105 .interface = SN9C102_I2C_3WIRES, 98 .interface = SN9C102_I2C_3WIRES,
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index d1ccc064206f..43225802a551 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -45,7 +45,6 @@
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/videodev.h> 46#include <linux/videodev.h>
47#include <linux/i2c.h> 47#include <linux/i2c.h>
48#include <linux/i2c-algo-bit.h>
49 48
50#include <media/v4l2-common.h> 49#include <media/v4l2-common.h>
51#include <media/i2c-addr.h> 50#include <media/i2c-addr.h>
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index 027c8a074dfe..1a1bef0e9c3d 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -192,14 +192,52 @@ static struct tda827xa_data tda827xa_analog[] = {
192 { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0} /* End */ 192 { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0} /* End */
193}; 193};
194 194
195static void tda827xa_lna_gain(struct i2c_client *c, int high)
196{
197 struct tuner *t = i2c_get_clientdata(c);
198 unsigned char buf[] = {0x22, 0x01};
199 int arg;
200 struct i2c_msg msg = {.addr = c->addr, .flags = 0, .buf = buf, .len = sizeof(buf)};
201 if (t->config) {
202 if (high)
203 tuner_dbg("setting LNA to high gain\n");
204 else
205 tuner_dbg("setting LNA to low gain\n");
206 }
207 switch (t->config) {
208 case 0: /* no LNA */
209 break;
210 case 1: /* switch is GPIO 0 of tda8290 */
211 case 2:
212 /* turn Vsync on */
213 if (t->std & V4L2_STD_MN)
214 arg = 1;
215 else
216 arg = 0;
217 if (t->tuner_callback)
218 t->tuner_callback(c->adapter->algo_data, 1, arg);
219 buf[1] = high ? 0 : 1;
220 if (t->config == 2)
221 buf[1] = high ? 1 : 0;
222 i2c_transfer(c->adapter, &msg, 1);
223 break;
224 case 3: /* switch with GPIO of saa713x */
225 if (t->tuner_callback)
226 t->tuner_callback(c->adapter->algo_data, 0, high);
227 break;
228 }
229}
230
195static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq) 231static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
196{ 232{
197 unsigned char tuner_reg[14]; 233 unsigned char tuner_reg[11];
198 unsigned char reg2[2];
199 u32 N; 234 u32 N;
200 int i; 235 int i;
201 struct tuner *t = i2c_get_clientdata(c); 236 struct tuner *t = i2c_get_clientdata(c);
202 struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0}; 237 struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0, .buf = tuner_reg};
238
239 tda827xa_lna_gain( c, 1);
240 msleep(10);
203 241
204 if (t->mode == V4L2_TUNER_RADIO) 242 if (t->mode == V4L2_TUNER_RADIO)
205 freq = freq / 1000; 243 freq = freq / 1000;
@@ -222,48 +260,58 @@ static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
222 tuner_reg[5] = (tda827xa_analog[i].spd << 5) + (tda827xa_analog[i].svco << 3) + 260 tuner_reg[5] = (tda827xa_analog[i].spd << 5) + (tda827xa_analog[i].svco << 3) +
223 tda827xa_analog[i].sbs; 261 tda827xa_analog[i].sbs;
224 tuner_reg[6] = 0x8b + (tda827xa_analog[i].gc3 << 4); 262 tuner_reg[6] = 0x8b + (tda827xa_analog[i].gc3 << 4);
225 tuner_reg[7] = 0x0c; 263 tuner_reg[7] = 0x1c;
226 tuner_reg[8] = 4; 264 tuner_reg[8] = 4;
227 tuner_reg[9] = 0x20; 265 tuner_reg[9] = 0x20;
228 tuner_reg[10] = 0xff; 266 tuner_reg[10] = 0x00;
229 tuner_reg[11] = 0xe0; 267 msg.len = 11;
230 tuner_reg[12] = 0; 268 i2c_transfer(c->adapter, &msg, 1);
231 tuner_reg[13] = 0x39 + (t->tda827x_lpsel << 1);
232 269
233 msg.buf = tuner_reg; 270 tuner_reg[0] = 0x90;
234 msg.len = 14; 271 tuner_reg[1] = 0xff;
272 tuner_reg[2] = 0xe0;
273 tuner_reg[3] = 0;
274 tuner_reg[4] = 0x99 + (t->tda827x_lpsel << 1);
275 msg.len = 5;
235 i2c_transfer(c->adapter, &msg, 1); 276 i2c_transfer(c->adapter, &msg, 1);
236 277
237 msg.buf= reg2; 278 tuner_reg[0] = 0xa0;
279 tuner_reg[1] = 0xc0;
238 msg.len = 2; 280 msg.len = 2;
239 reg2[0] = 0x60;
240 reg2[1] = 0x3c;
241 i2c_transfer(c->adapter, &msg, 1); 281 i2c_transfer(c->adapter, &msg, 1);
242 282
243 reg2[0] = 0xa0; 283 tuner_reg[0] = 0x30;
244 reg2[1] = 0xc0; 284 tuner_reg[1] = 0x10 + tda827xa_analog[i].scr;
245 i2c_transfer(c->adapter, &msg, 1); 285 i2c_transfer(c->adapter, &msg, 1);
246 286
247 msleep(2); 287 msg.flags = I2C_M_RD;
248 reg2[0] = 0x30; 288 i2c_transfer(c->adapter, &msg, 1);
249 reg2[1] = 0x10 + tda827xa_analog[i].scr; 289 msg.flags = 0;
290 tuner_reg[1] >>= 4;
291 tuner_dbg("AGC2 gain is: %d\n", tuner_reg[1]);
292 if (tuner_reg[1] < 1)
293 tda827xa_lna_gain( c, 0);
294
295 msleep(100);
296 tuner_reg[0] = 0x60;
297 tuner_reg[1] = 0x3c;
250 i2c_transfer(c->adapter, &msg, 1); 298 i2c_transfer(c->adapter, &msg, 1);
251 299
252 msleep(550); 300 msleep(163);
253 reg2[0] = 0x50; 301 tuner_reg[0] = 0x50;
254 reg2[1] = 0x8f + (tda827xa_analog[i].gc3 << 4); 302 tuner_reg[1] = 0x8f + (tda827xa_analog[i].gc3 << 4);
255 i2c_transfer(c->adapter, &msg, 1); 303 i2c_transfer(c->adapter, &msg, 1);
256 304
257 reg2[0] = 0x80; 305 tuner_reg[0] = 0x80;
258 reg2[1] = 0x28; 306 tuner_reg[1] = 0x28;
259 i2c_transfer(c->adapter, &msg, 1); 307 i2c_transfer(c->adapter, &msg, 1);
260 308
261 reg2[0] = 0xb0; 309 tuner_reg[0] = 0xb0;
262 reg2[1] = 0x01; 310 tuner_reg[1] = 0x01;
263 i2c_transfer(c->adapter, &msg, 1); 311 i2c_transfer(c->adapter, &msg, 1);
264 312
265 reg2[0] = 0xc0; 313 tuner_reg[0] = 0xc0;
266 reg2[1] = 0x19 + (t->tda827x_lpsel << 1); 314 tuner_reg[1] = 0x19 + (t->tda827x_lpsel << 1);
267 i2c_transfer(c->adapter, &msg, 1); 315 i2c_transfer(c->adapter, &msg, 1);
268} 316}
269 317
@@ -319,7 +367,9 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
319 unsigned char addr_pll_stat = 0x1b; 367 unsigned char addr_pll_stat = 0x1b;
320 unsigned char adc_sat, agc_stat, 368 unsigned char adc_sat, agc_stat,
321 pll_stat; 369 pll_stat;
370 int i;
322 371
372 tuner_dbg("tda827xa config is 0x%02x\n", t->config);
323 i2c_master_send(c, easy_mode, 2); 373 i2c_master_send(c, easy_mode, 2);
324 i2c_master_send(c, agc_out_on, 2); 374 i2c_master_send(c, agc_out_on, 2);
325 i2c_master_send(c, soft_reset, 2); 375 i2c_master_send(c, soft_reset, 2);
@@ -340,17 +390,22 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
340 tda827xa_tune(c, ifc, freq); 390 tda827xa_tune(c, ifc, freq);
341 else 391 else
342 tda827x_tune(c, ifc, freq); 392 tda827x_tune(c, ifc, freq);
393 for (i = 0; i < 3; i++) {
394 i2c_master_send(c, &addr_pll_stat, 1);
395 i2c_master_recv(c, &pll_stat, 1);
396 if (pll_stat & 0x80) {
397 i2c_master_send(c, &addr_adc_sat, 1);
398 i2c_master_recv(c, &adc_sat, 1);
399 i2c_master_send(c, &addr_agc_stat, 1);
400 i2c_master_recv(c, &agc_stat, 1);
401 tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
402 break;
403 } else {
404 tuner_dbg("tda8290 not locked, no signal?\n");
405 msleep(100);
406 }
407 }
343 /* adjust headroom resp. gain */ 408 /* adjust headroom resp. gain */
344 i2c_master_send(c, &addr_adc_sat, 1);
345 i2c_master_recv(c, &adc_sat, 1);
346 i2c_master_send(c, &addr_agc_stat, 1);
347 i2c_master_recv(c, &agc_stat, 1);
348 i2c_master_send(c, &addr_pll_stat, 1);
349 i2c_master_recv(c, &pll_stat, 1);
350 if (pll_stat & 0x80)
351 tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
352 else
353 tuner_dbg("tda8290 not locked, no signal?\n");
354 if ((agc_stat > 115) || (!(pll_stat & 0x80) && (adc_sat < 20))) { 409 if ((agc_stat > 115) || (!(pll_stat & 0x80) && (adc_sat < 20))) {
355 tuner_dbg("adjust gain, step 1. Agc: %d, ADC stat: %d, lock: %d\n", 410 tuner_dbg("adjust gain, step 1. Agc: %d, ADC stat: %d, lock: %d\n",
356 agc_stat, adc_sat, pll_stat & 0x80); 411 agc_stat, adc_sat, pll_stat & 0x80);
@@ -407,7 +462,6 @@ static void set_audio(struct tuner *t)
407 char* mode; 462 char* mode;
408 463
409 t->tda827x_lpsel = 0; 464 t->tda827x_lpsel = 0;
410 mode = "xx";
411 if (t->std & V4L2_STD_MN) { 465 if (t->std & V4L2_STD_MN) {
412 t->sgIF = 92; 466 t->sgIF = 92;
413 t->tda8290_easy_mode = 0x01; 467 t->tda8290_easy_mode = 0x01;
@@ -437,8 +491,12 @@ static void set_audio(struct tuner *t)
437 t->sgIF = 20; 491 t->sgIF = 20;
438 t->tda8290_easy_mode = 0x40; 492 t->tda8290_easy_mode = 0x40;
439 mode = "LC"; 493 mode = "LC";
494 } else {
495 t->sgIF = 124;
496 t->tda8290_easy_mode = 0x10;
497 mode = "xx";
440 } 498 }
441 tuner_dbg("setting tda8290 to system %s\n", mode); 499 tuner_dbg("setting tda8290 to system %s\n", mode);
442} 500}
443 501
444static void set_tv_freq(struct i2c_client *c, unsigned int freq) 502static void set_tv_freq(struct i2c_client *c, unsigned int freq)
@@ -487,11 +545,16 @@ static void standby(struct i2c_client *c)
487 545
488static void tda8290_init_if(struct i2c_client *c) 546static void tda8290_init_if(struct i2c_client *c)
489{ 547{
548 struct tuner *t = i2c_get_clientdata(c);
490 unsigned char set_VS[] = { 0x30, 0x6F }; 549 unsigned char set_VS[] = { 0x30, 0x6F };
550 unsigned char set_GP00_CF[] = { 0x20, 0x01 };
491 unsigned char set_GP01_CF[] = { 0x20, 0x0B }; 551 unsigned char set_GP01_CF[] = { 0x20, 0x0B };
492 552
553 if ((t->config == 1) || (t->config == 2))
554 i2c_master_send(c, set_GP00_CF, 2);
555 else
556 i2c_master_send(c, set_GP01_CF, 2);
493 i2c_master_send(c, set_VS, 2); 557 i2c_master_send(c, set_VS, 2);
494 i2c_master_send(c, set_GP01_CF, 2);
495} 558}
496 559
497static void tda8290_init_tuner(struct i2c_client *c) 560static void tda8290_init_tuner(struct i2c_client *c)
@@ -576,6 +639,7 @@ int tda8290_init(struct i2c_client *c)
576 t->has_signal = has_signal; 639 t->has_signal = has_signal;
577 t->standby = standby; 640 t->standby = standby;
578 t->tda827x_lpsel = 0; 641 t->tda827x_lpsel = 0;
642 t->mode = V4L2_TUNER_ANALOG_TV;
579 643
580 tda8290_init_tuner(c); 644 tda8290_init_tuner(c);
581 tda8290_init_if(c); 645 tda8290_init_if(c);
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index 00f0e8b6e03b..d11044170872 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -27,7 +27,6 @@
27#include <linux/videodev.h> 27#include <linux/videodev.h>
28#include <media/v4l2-common.h> 28#include <media/v4l2-common.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/i2c-algo-bit.h>
31#include <linux/init.h> 30#include <linux/init.h>
32 31
33 32
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 15dbc6bf42a7..505591a7abe9 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -144,7 +144,8 @@ static void set_freq(struct i2c_client *c, unsigned long freq)
144} 144}
145 145
146static void set_type(struct i2c_client *c, unsigned int type, 146static void set_type(struct i2c_client *c, unsigned int type,
147 unsigned int new_mode_mask) 147 unsigned int new_mode_mask, unsigned int new_config,
148 int (*tuner_callback) (void *dev, int command,int arg))
148{ 149{
149 struct tuner *t = i2c_get_clientdata(c); 150 struct tuner *t = i2c_get_clientdata(c);
150 unsigned char buffer[4]; 151 unsigned char buffer[4];
@@ -159,15 +160,20 @@ static void set_type(struct i2c_client *c, unsigned int type,
159 return; 160 return;
160 } 161 }
161 162
163 t->type = type;
164 t->config = new_config;
165 if (tuner_callback != NULL) {
166 tuner_dbg("defining GPIO callback\n");
167 t->tuner_callback = tuner_callback;
168 }
169
162 /* This code detects calls by card attach_inform */ 170 /* This code detects calls by card attach_inform */
163 if (NULL == t->i2c.dev.driver) { 171 if (NULL == t->i2c.dev.driver) {
164 tuner_dbg ("tuner 0x%02x: called during i2c_client register by adapter's attach_inform\n", c->addr); 172 tuner_dbg ("tuner 0x%02x: called during i2c_client register by adapter's attach_inform\n", c->addr);
165 173
166 t->type=type;
167 return; 174 return;
168 } 175 }
169 176
170 t->type = type;
171 switch (t->type) { 177 switch (t->type) {
172 case TUNER_MT2032: 178 case TUNER_MT2032:
173 microtune_init(c); 179 microtune_init(c);
@@ -234,10 +240,11 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup)
234 240
235 tuner_dbg("set addr for type %i\n", t->type); 241 tuner_dbg("set addr for type %i\n", t->type);
236 242
237 if ( t->type == UNSET && ((tun_setup->addr == ADDR_UNSET && 243 if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
238 (t->mode_mask & tun_setup->mode_mask)) || 244 (t->mode_mask & tun_setup->mode_mask))) ||
239 tun_setup->addr == c->addr)) { 245 (tun_setup->addr == c->addr)) {
240 set_type(c, tun_setup->type, tun_setup->mode_mask); 246 set_type(c, tun_setup->type, tun_setup->mode_mask,
247 tun_setup->config, tun_setup->tuner_callback);
241 } 248 }
242} 249}
243 250
@@ -496,7 +503,7 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
496register_client: 503register_client:
497 tuner_info("chip found @ 0x%x (%s)\n", addr << 1, adap->name); 504 tuner_info("chip found @ 0x%x (%s)\n", addr << 1, adap->name);
498 i2c_attach_client (&t->i2c); 505 i2c_attach_client (&t->i2c);
499 set_type (&t->i2c,t->type, t->mode_mask); 506 set_type (&t->i2c,t->type, t->mode_mask, t->config, t->tuner_callback);
500 return 0; 507 return 0;
501} 508}
502 509
@@ -576,10 +583,11 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
576 switch (cmd) { 583 switch (cmd) {
577 /* --- configuration --- */ 584 /* --- configuration --- */
578 case TUNER_SET_TYPE_ADDR: 585 case TUNER_SET_TYPE_ADDR:
579 tuner_dbg ("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x\n", 586 tuner_dbg ("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n",
580 ((struct tuner_setup *)arg)->type, 587 ((struct tuner_setup *)arg)->type,
581 ((struct tuner_setup *)arg)->addr, 588 ((struct tuner_setup *)arg)->addr,
582 ((struct tuner_setup *)arg)->mode_mask); 589 ((struct tuner_setup *)arg)->mode_mask,
590 ((struct tuner_setup *)arg)->config);
583 591
584 set_addr(client, (struct tuner_setup *)arg); 592 set_addr(client, (struct tuner_setup *)arg);
585 break; 593 break;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index d506dfaa45a9..a2da5d2affff 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/videodev.h> 26#include <linux/videodev.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/i2c-algo-bit.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
31#include <linux/kthread.h> 30#include <linux/kthread.h>
@@ -33,6 +32,7 @@
33 32
34#include <media/tvaudio.h> 33#include <media/tvaudio.h>
35#include <media/v4l2-common.h> 34#include <media/v4l2-common.h>
35#include <media/v4l2-chip-ident.h>
36 36
37#include <media/i2c-addr.h> 37#include <media/i2c-addr.h>
38 38
@@ -1775,6 +1775,9 @@ static int chip_command(struct i2c_client *client,
1775 /* the thread will call checkmode() later */ 1775 /* the thread will call checkmode() later */
1776 } 1776 }
1777 break; 1777 break;
1778
1779 case VIDIOC_G_CHIP_IDENT:
1780 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_TVAUDIO, 0);
1778 } 1781 }
1779 return 0; 1782 return 0;
1780} 1783}
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 4e7c1fa668d3..a1136da74ba8 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -163,7 +163,7 @@ hauppauge_tuner[] =
163 /* 60-69 */ 163 /* 60-69 */
164 { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"}, 164 { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"},
165 { TUNER_ABSENT, "LG M001D MK3"}, 165 { TUNER_ABSENT, "LG M001D MK3"},
166 { TUNER_ABSENT, "LG S701D MK3"}, 166 { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"},
167 { TUNER_ABSENT, "LG M701D MK3"}, 167 { TUNER_ABSENT, "LG M701D MK3"},
168 { TUNER_ABSENT, "Temic 4146FM5"}, 168 { TUNER_ABSENT, "Temic 4146FM5"},
169 { TUNER_ABSENT, "Temic 4136FY5"}, 169 { TUNER_ABSENT, "Temic 4136FY5"},
@@ -229,6 +229,36 @@ hauppauge_tuner[] =
229 /* 120-129 */ 229 /* 120-129 */
230 { TUNER_ABSENT, "Xceive XC3028"}, 230 { TUNER_ABSENT, "Xceive XC3028"},
231 { TUNER_ABSENT, "Philips FQ1216LME MK5"}, 231 { TUNER_ABSENT, "Philips FQ1216LME MK5"},
232 { TUNER_ABSENT, "Philips FQD1216LME"},
233 { TUNER_ABSENT, "Conexant CX24118A"},
234 { TUNER_ABSENT, "TCL DMF11WIP"},
235 { TUNER_ABSENT, "TCL MFNM05_4H_E"},
236 { TUNER_ABSENT, "TCL MNM05_4H_E"},
237 { TUNER_ABSENT, "TCL MPE05_2H_E"},
238 { TUNER_ABSENT, "TCL MQNM05_4_U"},
239 { TUNER_ABSENT, "TCL M2523_5NH_E"},
240 /* 130-139 */
241 { TUNER_ABSENT, "TCL M2523_3DBH_E"},
242 { TUNER_ABSENT, "TCL M2523_3DIH_E"},
243 { TUNER_ABSENT, "TCL MFPE05_2_U"},
244 { TUNER_ABSENT, "Philips FMD1216MEX"},
245 { TUNER_ABSENT, "Philips FRH2036B"},
246 { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
247 { TUNER_ABSENT, "MaxLinear MXL5005"},
248 { TUNER_ABSENT, "MaxLinear MXL5003"},
249 { TUNER_ABSENT, "Xceive XC2028"},
250 { TUNER_ABSENT, "Microtune MT2131"},
251 /* 140-149 */
252 { TUNER_ABSENT, "Philips 8275A_8295"},
253 { TUNER_ABSENT, "TCL MF02GIP_5N_E"},
254 { TUNER_ABSENT, "TCL MF02GIP_3DB_E"},
255 { TUNER_ABSENT, "TCL MF02GIP_3DI_E"},
256 { TUNER_ABSENT, "Microtune MT2266"},
257 { TUNER_ABSENT, "TCL MF10WPP_4N_E"},
258 { TUNER_ABSENT, "LG TAPQ_H702F"},
259 { TUNER_ABSENT, "TCL M09WPP_4N_E"},
260 { TUNER_ABSENT, "MaxLinear MXL5005_v2"},
261 { TUNER_ABSENT, "Philips 18271_8295"},
232}; 262};
233 263
234static struct HAUPPAUGE_AUDIOIC 264static struct HAUPPAUGE_AUDIOIC
@@ -280,11 +310,16 @@ audioIC[] =
280 {AUDIO_CHIP_INTERNAL, "CX883"}, 310 {AUDIO_CHIP_INTERNAL, "CX883"},
281 {AUDIO_CHIP_INTERNAL, "CX882"}, 311 {AUDIO_CHIP_INTERNAL, "CX882"},
282 {AUDIO_CHIP_INTERNAL, "CX25840"}, 312 {AUDIO_CHIP_INTERNAL, "CX25840"},
283 /* 35-38 */ 313 /* 35-39 */
284 {AUDIO_CHIP_INTERNAL, "CX25841"}, 314 {AUDIO_CHIP_INTERNAL, "CX25841"},
285 {AUDIO_CHIP_INTERNAL, "CX25842"}, 315 {AUDIO_CHIP_INTERNAL, "CX25842"},
286 {AUDIO_CHIP_INTERNAL, "CX25843"}, 316 {AUDIO_CHIP_INTERNAL, "CX25843"},
287 {AUDIO_CHIP_INTERNAL, "CX23418"}, 317 {AUDIO_CHIP_INTERNAL, "CX23418"},
318 {AUDIO_CHIP_INTERNAL, "CX23885"},
319 /* 40-42 */
320 {AUDIO_CHIP_INTERNAL, "CX23888"},
321 {AUDIO_CHIP_INTERNAL, "SAA7131"},
322 {AUDIO_CHIP_INTERNAL, "CX23887"},
288}; 323};
289 324
290/* This list is supplied by Hauppauge. Thanks! */ 325/* This list is supplied by Hauppauge. Thanks! */
@@ -301,8 +336,10 @@ static const char *decoderIC[] = {
301 "CX880", "CX881", "CX883", "SAA7111", "SAA7113", 336 "CX880", "CX881", "CX883", "SAA7111", "SAA7113",
302 /* 25-29 */ 337 /* 25-29 */
303 "CX882", "TVP5150A", "CX25840", "CX25841", "CX25842", 338 "CX882", "TVP5150A", "CX25840", "CX25841", "CX25842",
304 /* 30-31 */ 339 /* 30-34 */
305 "CX25843", "CX23418", 340 "CX25843", "CX23418", "NEC61153", "CX23885", "CX23888",
341 /* 35-37 */
342 "SAA7131", "CX25837", "CX23887"
306}; 343};
307 344
308static int hasRadioTuner(int tunerType) 345static int hasRadioTuner(int tunerType)
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index 28d1133a3b7a..0b2a961efd22 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -27,6 +27,7 @@
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/videodev2.h> 28#include <linux/videodev2.h>
29#include <media/v4l2-common.h> 29#include <media/v4l2-common.h>
30#include <media/v4l2-chip-ident.h>
30#include <media/upd64031a.h> 31#include <media/upd64031a.h>
31 32
32// --------------------- read registers functions define ----------------------- 33// --------------------- read registers functions define -----------------------
@@ -179,6 +180,9 @@ static int upd64031a_command(struct i2c_client *client, unsigned int cmd, void *
179 } 180 }
180#endif 181#endif
181 182
183 case VIDIOC_G_CHIP_IDENT:
184 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_UPD64031A, 0);
185
182 default: 186 default:
183 break; 187 break;
184 } 188 }
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index fe38224150d8..401bd21f46eb 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -26,6 +26,7 @@
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/videodev2.h> 27#include <linux/videodev2.h>
28#include <media/v4l2-common.h> 28#include <media/v4l2-common.h>
29#include <media/v4l2-chip-ident.h>
29#include <media/upd64083.h> 30#include <media/upd64083.h>
30 31
31MODULE_DESCRIPTION("uPD64083 driver"); 32MODULE_DESCRIPTION("uPD64083 driver");
@@ -155,6 +156,10 @@ static int upd64083_command(struct i2c_client *client, unsigned int cmd, void *a
155 break; 156 break;
156 } 157 }
157#endif 158#endif
159
160 case VIDIOC_G_CHIP_IDENT:
161 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_UPD64083, 0);
162
158 default: 163 default:
159 break; 164 break;
160 } 165 }
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index d34d8c8b7376..687f026753b2 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -628,24 +628,21 @@ EXPORT_SYMBOL(usbvideo_HexDump);
628/* ******************************************************************** */ 628/* ******************************************************************** */
629 629
630/* XXX: this piece of crap really wants some error handling.. */ 630/* XXX: this piece of crap really wants some error handling.. */
631static void usbvideo_ClientIncModCount(struct uvd *uvd) 631static int usbvideo_ClientIncModCount(struct uvd *uvd)
632{ 632{
633 if (uvd == NULL) { 633 if (uvd == NULL) {
634 err("%s: uvd == NULL", __FUNCTION__); 634 err("%s: uvd == NULL", __FUNCTION__);
635 return; 635 return -EINVAL;
636 } 636 }
637 if (uvd->handle == NULL) { 637 if (uvd->handle == NULL) {
638 err("%s: uvd->handle == NULL", __FUNCTION__); 638 err("%s: uvd->handle == NULL", __FUNCTION__);
639 return; 639 return -EINVAL;
640 }
641 if (uvd->handle->md_module == NULL) {
642 err("%s: uvd->handle->md_module == NULL", __FUNCTION__);
643 return;
644 } 640 }
645 if (!try_module_get(uvd->handle->md_module)) { 641 if (!try_module_get(uvd->handle->md_module)) {
646 err("%s: try_module_get() == 0", __FUNCTION__); 642 err("%s: try_module_get() == 0", __FUNCTION__);
647 return; 643 return -ENODEV;
648 } 644 }
645 return 0;
649} 646}
650 647
651static void usbvideo_ClientDecModCount(struct uvd *uvd) 648static void usbvideo_ClientDecModCount(struct uvd *uvd)
@@ -712,8 +709,6 @@ int usbvideo_register(
712 cams->num_cameras = num_cams; 709 cams->num_cameras = num_cams;
713 cams->cam = (struct uvd *) &cams[1]; 710 cams->cam = (struct uvd *) &cams[1];
714 cams->md_module = md; 711 cams->md_module = md;
715 if (cams->md_module == NULL)
716 warn("%s: module == NULL!", __FUNCTION__);
717 mutex_init(&cams->lock); /* to 1 == available */ 712 mutex_init(&cams->lock); /* to 1 == available */
718 713
719 for (i = 0; i < num_cams; i++) { 714 for (i = 0; i < num_cams; i++) {
@@ -1119,7 +1114,8 @@ static int usbvideo_v4l_open(struct inode *inode, struct file *file)
1119 if (uvd->debug > 1) 1114 if (uvd->debug > 1)
1120 info("%s($%p)", __FUNCTION__, dev); 1115 info("%s($%p)", __FUNCTION__, dev);
1121 1116
1122 usbvideo_ClientIncModCount(uvd); 1117 if (0 < usbvideo_ClientIncModCount(uvd))
1118 return -ENODEV;
1123 mutex_lock(&uvd->lock); 1119 mutex_lock(&uvd->lock);
1124 1120
1125 if (uvd->user) { 1121 if (uvd->user) {
diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
index a40e5838515b..13f69fe6360d 100644
--- a/drivers/media/video/usbvision/usbvision-cards.c
+++ b/drivers/media/video/usbvision/usbvision-cards.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * USBVISION.H 2 * usbvision-cards.c
3 * usbvision header file 3 * usbvision cards definition file
4 * 4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> 5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 * 6 *
@@ -28,129 +28,1060 @@
28#include <media/v4l2-dev.h> 28#include <media/v4l2-dev.h>
29#include <media/tuner.h> 29#include <media/tuner.h>
30#include "usbvision.h" 30#include "usbvision.h"
31#include "usbvision-cards.h"
31 32
32/* Supported Devices: A table for usbvision.c*/ 33/* Supported Devices: A table for usbvision.c*/
33struct usbvision_device_data_st usbvision_device_data[] = { 34struct usbvision_device_data_st usbvision_device_data[] = {
34 {0xFFF0, 0xFFF0, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Custom Dummy USBVision Device"}, 35 [XANBOO] = {
35 {0x0A6F, 0x0400, -1, CODEC_SAA7113, 4, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "Xanboo"}, 36 .Interface = -1,
36 {0x050D, 0x0208, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Belkin USBView II"}, 37 .Codec = CODEC_SAA7113,
37 {0x0571, 0x0002, 0, CODEC_SAA7111, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, -1, -1, 7, "echoFX InterView Lite"}, 38 .VideoChannels = 4,
38 {0x0573, 0x0003, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "USBGear USBG-V1 resp. HAMA USB"}, 39 .VideoNorm = V4L2_STD_NTSC,
39 {0x0573, 0x0400, -1, CODEC_SAA7113, 4, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "D-Link V100"}, 40 .AudioChannels = 1,
40 {0x0573, 0x2000, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "X10 USB Camera"}, 41 .Radio = 0,
41 {0x0573, 0x2d00, -1, CODEC_SAA7111, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, -1, 3, 7, "Osprey 50"}, 42 .vbi = 1,
42 {0x0573, 0x2d01, -1, CODEC_SAA7113, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Hauppauge USB-Live Model 600"}, 43 .Tuner = 0,
43 {0x0573, 0x2101, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 2, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Zoran Co. PMD (Nogatech) AV-grabber Manhattan"}, 44 .TunerType = 0,
44 {0x0573, 0x4100, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "Nogatech USB-TV (NTSC) FM"}, 45 .X_Offset = -1,
45 {0x0573, 0x4110, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "PNY USB-TV (NTSC) FM"}, 46 .Y_Offset = -1,
46 {0x0573, 0x4450, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "PixelView PlayTv-USB PRO (PAL) FM"}, 47 .ModelString = "Xanboo",
47 {0x0573, 0x4550, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "ZTV ZT-721 2.4GHz USB A/V Receiver"}, 48 },
48 {0x0573, 0x4d00, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "Hauppauge WinTv-USB USA"}, 49 [BELKIN_VIDEOBUS_II] = {
49 {0x0573, 0x4d01, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB"}, 50 .Interface = -1,
50 {0x0573, 0x4d02, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (NTSC)"}, 51 .Codec = CODEC_SAA7113,
51 {0x0573, 0x4d03, -1, CODEC_SAA7111, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (SECAM) "}, 52 .VideoChannels = 2,
52 {0x0573, 0x4d10, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (NTSC) FM"}, 53 .VideoNorm = V4L2_STD_PAL,
53 {0x0573, 0x4d11, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (PAL) FM"}, 54 .AudioChannels = 1,
54 {0x0573, 0x4d12, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (PAL) FM"}, 55 .Radio = 0,
55 {0x0573, 0x4d2a, 0, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (NTSC) FM Model 602 40201 Rev B285"}, 56 .vbi = 1,
56 {0x0573, 0x4d2b, 0, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (NTSC) FM Model 602 40201 Rev B282"}, 57 .Tuner = 0,
57 {0x0573, 0x4d2c, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_FM1216ME_MK3, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (PAL/SECAM) 40209 Rev E1A5"}, 58 .TunerType = 0,
58 {0x0573, 0x4d20, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL) FM Model 40201 Rev B226"}, 59 .X_Offset = 0,
59 {0x0573, 0x4d21, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL)"}, 60 .Y_Offset = 3,
60 {0x0573, 0x4d22, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL) MODEL 566"}, 61 .Dvi_yuv_override = 1,
61 {0x0573, 0x4d23, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) 4D23"}, 62 .Dvi_yuv = 7,
62 {0x0573, 0x4d25, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) Model 40209 Rev B234"}, 63 .ModelString = "Belkin USB VideoBus II Adapter",
63 {0x0573, 0x4d26, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) Model 40209 Rev B243"}, 64 },
64 {0x0573, 0x4d27, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_ALPS_TSBE1_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40204 Rev B281"}, 65 [BELKIN_VIDEOBUS] = {
65 {0x0573, 0x4d28, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_ALPS_TSBE1_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40204 Rev B283"}, 66 .Interface = -1,
66 {0x0573, 0x4d29, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40205 Rev B298"}, 67 .Codec = CODEC_SAA7111,
67 {0x0573, 0x4d30, -1, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB FM Model 40211 Rev B123"}, 68 .VideoChannels = 2,
68 {0x0573, 0x4d31, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 568"}, 69 .VideoNorm = V4L2_STD_NTSC,
69 {0x0573, 0x4d32, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 573"}, 70 .AudioChannels = 1,
70 {0x0573, 0x4d35, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 40219 Rev B252"}, 71 .Radio = 0,
71 {0x0573, 0x4d37, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_FM1216ME_MK3, -1, -1, 0, 3, 7, "Hauppauge WinTV USB device Model 40219 Rev E189"}, 72 .vbi = 1,
72 {0x0768, 0x0006, -1, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, 5, 5, -1, "Camtel Technology USB TV Genie Pro FM Model TVB330"}, 73 .Tuner = 0,
73 {0x07d0, 0x0001, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Digital Video Creator I"}, 74 .TunerType = 0,
74 {0x07d0, 0x0002, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 82, 20, 7, "Global Village GV-007 (NTSC)"}, 75 .X_Offset = -1,
75 {0x07d0, 0x0003, 0, CODEC_SAA7113, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)"}, 76 .Y_Offset = -1,
76 {0x07d0, 0x0004, 0, CODEC_SAA7113, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-80 Rev 1 (PAL)"}, 77 .ModelString = "Belkin Components USB VideoBus",
77 {0x07d0, 0x0005, 0, CODEC_SAA7113, 2, V4L2_STD_SECAM, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)"}, 78 },
78 {0x2304, 0x010d, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 0, 0, 1, TUNER_TEMIC_4066FY5_PAL_I, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (PAL)"}, 79 [BELKIN_USB_VIDEOBUS_II] = {
79 {0x2304, 0x0109, -1, CODEC_SAA7111, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (SECAM)"}, 80 .Interface = -1,
80 {0x2304, 0x0110, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1,128, 23, -1, "Pinnacle Studio PCTV USB (PAL) FM"}, 81 .Codec = CODEC_SAA7113,
81 {0x2304, 0x0111, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Miro PCTV USB"}, 82 .VideoChannels = 2,
82 {0x2304, 0x0112, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (NTSC) FM"}, 83 .VideoNorm = V4L2_STD_PAL,
83 {0x2304, 0x0210, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (PAL) FM"}, 84 .AudioChannels = 1,
84 {0x2304, 0x0212, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_TEMIC_4039FR5_NTSC, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (NTSC) FM"}, 85 .Radio = 0,
85 {0x2304, 0x0214, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (PAL) FM"}, 86 .vbi = 1,
86 {0x2304, 0x0300, -1, CODEC_SAA7113, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Pinnacle Studio Linx Video input cable (NTSC)"}, 87 .Tuner = 0,
87 {0x2304, 0x0301, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Pinnacle Studio Linx Video input cable (PAL)"}, 88 .TunerType = 0,
88 {0x2304, 0x0419, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle PCTV Bungee USB (PAL) FM"}, 89 .X_Offset = 0,
89 {0x2400, 0x4200, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB"}, 90 .Y_Offset = 3,
90 {} /* Terminating entry */ 91 .Dvi_yuv_override = 1,
92 .Dvi_yuv = 7,
93 .ModelString = "Belkin USB VideoBus II",
94 },
95 [ECHOFX_INTERVIEW_LITE] = {
96 .Interface = 0,
97 .Codec = CODEC_SAA7111,
98 .VideoChannels = 2,
99 .VideoNorm = V4L2_STD_PAL,
100 .AudioChannels = 0,
101 .Radio = 0,
102 .vbi = 1,
103 .Tuner = 0,
104 .TunerType = 0,
105 .X_Offset = -1,
106 .Y_Offset = -1,
107 .Dvi_yuv_override = 1,
108 .Dvi_yuv = 7,
109 .ModelString = "echoFX InterView Lite",
110 },
111 [USBGEAR_USBG_V1] = {
112 .Interface = -1,
113 .Codec = CODEC_SAA7111,
114 .VideoChannels = 2,
115 .VideoNorm = V4L2_STD_NTSC,
116 .AudioChannels = 1,
117 .Radio = 0,
118 .vbi = 1,
119 .Tuner = 0,
120 .TunerType = 0,
121 .X_Offset = -1,
122 .Y_Offset = -1,
123 .ModelString = "USBGear USBG-V1 resp. HAMA USB",
124 },
125 [D_LINK_V100] = {
126 .Interface = -1,
127 .Codec = CODEC_SAA7113,
128 .VideoChannels = 4,
129 .VideoNorm = V4L2_STD_NTSC,
130 .AudioChannels = 0,
131 .Radio = 0,
132 .vbi = 1,
133 .Tuner = 0,
134 .TunerType = 0,
135 .X_Offset = 0,
136 .Y_Offset = 3,
137 .Dvi_yuv_override = 1,
138 .Dvi_yuv = 7,
139 .ModelString = "D-Link V100",
140 },
141 [X10_USB_CAMERA] = {
142 .Interface = -1,
143 .Codec = CODEC_SAA7111,
144 .VideoChannels = 2,
145 .VideoNorm = V4L2_STD_NTSC,
146 .AudioChannels = 1,
147 .Radio = 0,
148 .vbi = 1,
149 .Tuner = 0,
150 .TunerType = 0,
151 .X_Offset = -1,
152 .Y_Offset = -1,
153 .ModelString = "X10 USB Camera",
154 },
155 [HPG_WINTV_LIVE_PAL_BG] = {
156 .Interface = -1,
157 .Codec = CODEC_SAA7111,
158 .VideoChannels = 2,
159 .VideoNorm = V4L2_STD_PAL,
160 .AudioChannels = 1,
161 .Radio = 0,
162 .vbi = 1,
163 .Tuner = 0,
164 .TunerType = 0,
165 .X_Offset = -1,
166 .Y_Offset = 3,
167 .Dvi_yuv_override = 1,
168 .Dvi_yuv = 7,
169 .ModelString = "Hauppauge WinTV USB Live (PAL B/G)",
170 },
171 [HPG_WINTV_LIVE_PRO_NTSC_MN] = {
172 .Interface = -1,
173 .Codec = CODEC_SAA7113,
174 .VideoChannels = 2,
175 .VideoNorm = V4L2_STD_NTSC,
176 .AudioChannels = 0,
177 .Radio = 0,
178 .vbi = 1,
179 .Tuner = 0,
180 .TunerType = 0,
181 .X_Offset = 0,
182 .Y_Offset = 3,
183 .Dvi_yuv_override = 1,
184 .Dvi_yuv = 7,
185 .ModelString = "Hauppauge WinTV USB Live Pro (NTSC M/N)",
186 },
187 [ZORAN_PMD_NOGATECH] = {
188 .Interface = -1,
189 .Codec = CODEC_SAA7113,
190 .VideoChannels = 2,
191 .VideoNorm = V4L2_STD_PAL,
192 .AudioChannels = 2,
193 .Radio = 0,
194 .vbi = 1,
195 .Tuner = 0,
196 .TunerType = 0,
197 .X_Offset = 0,
198 .Y_Offset = 3,
199 .Dvi_yuv_override = 1,
200 .Dvi_yuv = 7,
201 .ModelString = "Zoran Co. PMD (Nogatech) AV-grabber Manhattan",
202 },
203 [NOGATECH_USB_TV_NTSC_FM] = {
204 .Interface = -1,
205 .Codec = CODEC_SAA7111,
206 .VideoChannels = 3,
207 .VideoNorm = V4L2_STD_NTSC,
208 .AudioChannels = 1,
209 .Radio = 1,
210 .vbi = 1,
211 .Tuner = 1,
212 .TunerType = TUNER_PHILIPS_NTSC_M,
213 .X_Offset = -1,
214 .Y_Offset = 20,
215 .ModelString = "Nogatech USB-TV (NTSC) FM",
216 },
217 [PNY_USB_TV_NTSC_FM] = {
218 .Interface = -1,
219 .Codec = CODEC_SAA7111,
220 .VideoChannels = 3,
221 .VideoNorm = V4L2_STD_NTSC,
222 .AudioChannels = 1,
223 .Radio = 1,
224 .vbi = 1,
225 .Tuner = 1,
226 .TunerType = TUNER_PHILIPS_NTSC_M,
227 .X_Offset = -1,
228 .Y_Offset = 20,
229 .ModelString = "PNY USB-TV (NTSC) FM",
230 },
231 [PV_PLAYTV_USB_PRO_PAL_FM] = {
232 .Interface = 0,
233 .Codec = CODEC_SAA7113,
234 .VideoChannels = 3,
235 .VideoNorm = V4L2_STD_PAL,
236 .AudioChannels = 1,
237 .Radio = 1,
238 .vbi = 1,
239 .Tuner = 1,
240 .TunerType = TUNER_PHILIPS_PAL,
241 .X_Offset = 0,
242 .Y_Offset = 3,
243 .Dvi_yuv_override = 1,
244 .Dvi_yuv = 7,
245 .ModelString = "PixelView PlayTv-USB PRO (PAL) FM",
246 },
247 [ZT_721] = {
248 .Interface = 0,
249 .Codec = CODEC_SAA7113,
250 .VideoChannels = 3,
251 .VideoNorm = V4L2_STD_PAL,
252 .AudioChannels = 1,
253 .Radio = 1,
254 .vbi = 1,
255 .Tuner = 1,
256 .TunerType = TUNER_PHILIPS_PAL,
257 .X_Offset = 0,
258 .Y_Offset = 3,
259 .Dvi_yuv_override = 1,
260 .Dvi_yuv = 7,
261 .ModelString = "ZTV ZT-721 2.4GHz USB A/V Receiver",
262 },
263 [HPG_WINTV_NTSC_MN] = {
264 .Interface = -1,
265 .Codec = CODEC_SAA7111,
266 .VideoChannels = 3,
267 .VideoNorm = V4L2_STD_NTSC,
268 .AudioChannels = 1,
269 .Radio = 0,
270 .vbi = 1,
271 .Tuner = 1,
272 .TunerType = TUNER_PHILIPS_NTSC_M,
273 .X_Offset = -1,
274 .Y_Offset = 20,
275 .ModelString = "Hauppauge WinTV USB (NTSC M/N)",
276 },
277 [HPG_WINTV_PAL_BG] = {
278 .Interface = -1,
279 .Codec = CODEC_SAA7111,
280 .VideoChannels = 3,
281 .VideoNorm = V4L2_STD_PAL,
282 .AudioChannels = 1,
283 .Radio = 0,
284 .vbi = 1,
285 .Tuner = 1,
286 .TunerType = TUNER_PHILIPS_PAL,
287 .X_Offset = -1,
288 .Y_Offset = -1,
289 .ModelString = "Hauppauge WinTV USB (PAL B/G)",
290 },
291 [HPG_WINTV_PAL_I] = {
292 .Interface = -1,
293 .Codec = CODEC_SAA7111,
294 .VideoChannels = 3,
295 .VideoNorm = V4L2_STD_PAL,
296 .AudioChannels = 1,
297 .Radio = 0,
298 .vbi = 1,
299 .Tuner = 1,
300 .TunerType = TUNER_PHILIPS_PAL,
301 .X_Offset = -1,
302 .Y_Offset = -1,
303 .ModelString = "Hauppauge WinTV USB (PAL I)",
304 },
305 [HPG_WINTV_PAL_SECAM_L] = {
306 .Interface = -1,
307 .Codec = CODEC_SAA7111,
308 .VideoChannels = 3,
309 .VideoNorm = V4L2_STD_SECAM,
310 .AudioChannels = 1,
311 .Radio = 0,
312 .vbi = 1,
313 .Tuner = 1,
314 .TunerType = TUNER_PHILIPS_SECAM,
315 .X_Offset = -1,
316 .Y_Offset = -1,
317 .ModelString = "Hauppauge WinTV USB (PAL/SECAM L)",
318 },
319 [HPG_WINTV_PAL_D_K] = {
320 .Interface = -1,
321 .Codec = CODEC_SAA7111,
322 .VideoChannels = 3,
323 .VideoNorm = V4L2_STD_PAL,
324 .AudioChannels = 1,
325 .Radio = 0,
326 .vbi = 1,
327 .Tuner = 1,
328 .TunerType = TUNER_PHILIPS_PAL,
329 .X_Offset = -1,
330 .Y_Offset = -1,
331 .ModelString = "Hauppauge WinTV USB (PAL D/K)",
332 },
333 [HPG_WINTV_NTSC_FM] = {
334 .Interface = -1,
335 .Codec = CODEC_SAA7111,
336 .VideoChannels = 3,
337 .VideoNorm = V4L2_STD_NTSC,
338 .AudioChannels = 1,
339 .Radio = 1,
340 .vbi = 1,
341 .Tuner = 1,
342 .TunerType = TUNER_PHILIPS_NTSC_M,
343 .X_Offset = -1,
344 .Y_Offset = -1,
345 .ModelString = "Hauppauge WinTV USB (NTSC FM)",
346 },
347 [HPG_WINTV_PAL_BG_FM] = {
348 .Interface = -1,
349 .Codec = CODEC_SAA7111,
350 .VideoChannels = 3,
351 .VideoNorm = V4L2_STD_PAL,
352 .AudioChannels = 1,
353 .Radio = 1,
354 .vbi = 1,
355 .Tuner = 1,
356 .TunerType = TUNER_PHILIPS_PAL,
357 .X_Offset = -1,
358 .Y_Offset = -1,
359 .ModelString = "Hauppauge WinTV USB (PAL B/G FM)",
360 },
361 [HPG_WINTV_PAL_I_FM] = {
362 .Interface = -1,
363 .Codec = CODEC_SAA7111,
364 .VideoChannels = 3,
365 .VideoNorm = V4L2_STD_PAL,
366 .AudioChannels = 1,
367 .Radio = 1,
368 .vbi = 1,
369 .Tuner = 1,
370 .TunerType = TUNER_PHILIPS_PAL,
371 .X_Offset = -1,
372 .Y_Offset = -1,
373 .ModelString = "Hauppauge WinTV USB (PAL I FM)",
374 },
375 [HPG_WINTV_PAL_D_K_FM] = {
376 .Interface = -1,
377 .Codec = CODEC_SAA7111,
378 .VideoChannels = 3,
379 .VideoNorm = V4L2_STD_PAL,
380 .AudioChannels = 1,
381 .Radio = 1,
382 .vbi = 1,
383 .Tuner = 1,
384 .TunerType = TUNER_PHILIPS_PAL,
385 .X_Offset = -1,
386 .Y_Offset = -1,
387 .ModelString = "Hauppauge WinTV USB (PAL D/K FM)",
388 },
389 [HPG_WINTV_PRO_NTSC_MN] = {
390 .Interface = 0,
391 .Codec = CODEC_SAA7113,
392 .VideoChannels = 3,
393 .VideoNorm = V4L2_STD_NTSC,
394 .AudioChannels = 1,
395 .Radio = 1,
396 .vbi = 1,
397 .Tuner = 1,
398 .TunerType = TUNER_MICROTUNE_4049FM5,
399 .X_Offset = 0,
400 .Y_Offset = 3,
401 .Dvi_yuv_override = 1,
402 .Dvi_yuv = 7,
403 .ModelString = "Hauppauge WinTV USB Pro (NTSC M/N)",
404 },
405 [HPG_WINTV_PRO_NTSC_MN_V2] = {
406 .Interface = 0,
407 .Codec = CODEC_SAA7113,
408 .VideoChannels = 3,
409 .VideoNorm = V4L2_STD_NTSC,
410 .AudioChannels = 1,
411 .Radio = 1,
412 .vbi = 1,
413 .Tuner = 1,
414 .TunerType = TUNER_MICROTUNE_4049FM5,
415 .X_Offset = 0,
416 .Y_Offset = 3,
417 .Dvi_yuv_override = 1,
418 .Dvi_yuv = 7,
419 .ModelString = "Hauppauge WinTV USB Pro (NTSC M/N) V2",
420 },
421 [HPG_WINTV_PRO_PAL] = {
422 .Interface = 0,
423 .Codec = CODEC_SAA7113,
424 .VideoChannels = 3,
425 .VideoNorm = V4L2_STD_PAL,
426 .AudioChannels = 1,
427 .Radio = 0,
428 .vbi = 1,
429 .Tuner = 1,
430 .TunerType = TUNER_PHILIPS_FM1216ME_MK3,
431 .X_Offset = 0,
432 .Y_Offset = 3,
433 .Dvi_yuv_override = 1,
434 .Dvi_yuv = 7,
435 .ModelString = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)",
436 },
437 [HPG_WINTV_PRO_NTSC_MN_V3] = {
438 .Interface = 0,
439 .Codec = CODEC_SAA7113,
440 .VideoChannels = 3,
441 .VideoNorm = V4L2_STD_NTSC,
442 .AudioChannels = 1,
443 .Radio = 1,
444 .vbi = 1,
445 .Tuner = 1,
446 .TunerType = TUNER_PHILIPS_NTSC_M,
447 .X_Offset = 0,
448 .Y_Offset = 3,
449 .Dvi_yuv_override = 1,
450 .Dvi_yuv = 7,
451 .ModelString = "Hauppauge WinTV USB Pro (NTSC M/N) V3",
452 },
453 [HPG_WINTV_PRO_PAL_BG] = {
454 .Interface = 0,
455 .Codec = CODEC_SAA7113,
456 .VideoChannels = 3,
457 .VideoNorm = V4L2_STD_PAL,
458 .AudioChannels = 1,
459 .Radio = 0,
460 .vbi = 1,
461 .Tuner = 1,
462 .TunerType = TUNER_PHILIPS_PAL,
463 .X_Offset = 0,
464 .Y_Offset = 3,
465 .Dvi_yuv_override = 1,
466 .Dvi_yuv = 7,
467 .ModelString = "Hauppauge WinTV USB Pro (PAL B/G)",
468 },
469 [HPG_WINTV_PRO_PAL_I] = {
470 .Interface = 0,
471 .Codec = CODEC_SAA7113,
472 .VideoChannels = 3,
473 .VideoNorm = V4L2_STD_PAL,
474 .AudioChannels = 1,
475 .Radio = 0,
476 .vbi = 1,
477 .Tuner = 1,
478 .TunerType = TUNER_PHILIPS_PAL,
479 .X_Offset = 0,
480 .Y_Offset = 3,
481 .Dvi_yuv_override = 1,
482 .Dvi_yuv = 7,
483 .ModelString = "Hauppauge WinTV USB Pro (PAL I)",
484 },
485 [HPG_WINTV_PRO_PAL_SECAM_L] = {
486 .Interface = -1,
487 .Codec = CODEC_SAA7113,
488 .VideoChannels = 3,
489 .VideoNorm = V4L2_STD_SECAM,
490 .AudioChannels = 1,
491 .Radio = 0,
492 .vbi = 1,
493 .Tuner = 1,
494 .TunerType = TUNER_PHILIPS_SECAM,
495 .X_Offset = 0,
496 .Y_Offset = 3,
497 .Dvi_yuv_override = 1,
498 .Dvi_yuv = 7,
499 .ModelString = "Hauppauge WinTV USB Pro (PAL/SECAM L)",
500 },
501 [HPG_WINTV_PRO_PAL_D_K] = {
502 .Interface = -1,
503 .Codec = CODEC_SAA7113,
504 .VideoChannels = 3,
505 .VideoNorm = V4L2_STD_PAL,
506 .AudioChannels = 1,
507 .Radio = 0,
508 .vbi = 1,
509 .Tuner = 1,
510 .TunerType = TUNER_PHILIPS_PAL,
511 .X_Offset = 0,
512 .Y_Offset = 3,
513 .Dvi_yuv_override = 1,
514 .Dvi_yuv = 7,
515 .ModelString = "Hauppauge WinTV USB Pro (PAL D/K)",
516 },
517 [HPG_WINTV_PRO_PAL_SECAM] = {
518 .Interface = -1,
519 .Codec = CODEC_SAA7113,
520 .VideoChannels = 3,
521 .VideoNorm = V4L2_STD_SECAM,
522 .AudioChannels = 1,
523 .Radio = 0,
524 .vbi = 1,
525 .Tuner = 1,
526 .TunerType = TUNER_PHILIPS_SECAM,
527 .X_Offset = 0,
528 .Y_Offset = 3,
529 .Dvi_yuv_override = 1,
530 .Dvi_yuv = 7,
531 .ModelString = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)",
532 },
533 [HPG_WINTV_PRO_PAL_SECAM_V2] = {
534 .Interface = -1,
535 .Codec = CODEC_SAA7113,
536 .VideoChannels = 3,
537 .VideoNorm = V4L2_STD_SECAM,
538 .AudioChannels = 1,
539 .Radio = 0,
540 .vbi = 1,
541 .Tuner = 1,
542 .TunerType = TUNER_PHILIPS_SECAM,
543 .X_Offset = 0,
544 .Y_Offset = 3,
545 .Dvi_yuv_override = 1,
546 .Dvi_yuv = 7,
547 .ModelString = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2",
548 },
549 [HPG_WINTV_PRO_PAL_BG_V2] = {
550 .Interface = -1,
551 .Codec = CODEC_SAA7113,
552 .VideoChannels = 3,
553 .VideoNorm = V4L2_STD_PAL,
554 .AudioChannels = 1,
555 .Radio = 0,
556 .vbi = 1,
557 .Tuner = 1,
558 .TunerType = TUNER_ALPS_TSBE1_PAL,
559 .X_Offset = 0,
560 .Y_Offset = 3,
561 .Dvi_yuv_override = 1,
562 .Dvi_yuv = 7,
563 .ModelString = "Hauppauge WinTV USB Pro (PAL B/G) V2",
564 },
565 [HPG_WINTV_PRO_PAL_BG_D_K] = {
566 .Interface = -1,
567 .Codec = CODEC_SAA7113,
568 .VideoChannels = 3,
569 .VideoNorm = V4L2_STD_PAL,
570 .AudioChannels = 1,
571 .Radio = 0,
572 .vbi = 1,
573 .Tuner = 1,
574 .TunerType = TUNER_ALPS_TSBE1_PAL,
575 .X_Offset = 0,
576 .Y_Offset = 3,
577 .Dvi_yuv_override = 1,
578 .Dvi_yuv = 7,
579 .ModelString = "Hauppauge WinTV USB Pro (PAL B/G,D/K)",
580 },
581 [HPG_WINTV_PRO_PAL_I_D_K] = {
582 .Interface = -1,
583 .Codec = CODEC_SAA7113,
584 .VideoChannels = 3,
585 .VideoNorm = V4L2_STD_PAL,
586 .AudioChannels = 1,
587 .Radio = 0,
588 .vbi = 1,
589 .Tuner = 1,
590 .TunerType = TUNER_PHILIPS_PAL,
591 .X_Offset = 0,
592 .Y_Offset = 3,
593 .Dvi_yuv_override = 1,
594 .Dvi_yuv = 7,
595 .ModelString = "Hauppauge WinTV USB Pro (PAL I,D/K)",
596 },
597 [HPG_WINTV_PRO_NTSC_MN_FM] = {
598 .Interface = -1,
599 .Codec = CODEC_SAA7113,
600 .VideoChannels = 3,
601 .VideoNorm = V4L2_STD_NTSC,
602 .AudioChannels = 1,
603 .Radio = 1,
604 .vbi = 1,
605 .Tuner = 1,
606 .TunerType = TUNER_PHILIPS_NTSC_M,
607 .X_Offset = 0,
608 .Y_Offset = 3,
609 .Dvi_yuv_override = 1,
610 .Dvi_yuv = 7,
611 .ModelString = "Hauppauge WinTV USB Pro (NTSC M/N FM)",
612 },
613 [HPG_WINTV_PRO_PAL_BG_FM] = {
614 .Interface = 0,
615 .Codec = CODEC_SAA7113,
616 .VideoChannels = 3,
617 .VideoNorm = V4L2_STD_PAL,
618 .AudioChannels = 1,
619 .Radio = 1,
620 .vbi = 1,
621 .Tuner = 1,
622 .TunerType = TUNER_PHILIPS_PAL,
623 .X_Offset = 0,
624 .Y_Offset = 3,
625 .Dvi_yuv_override = 1,
626 .Dvi_yuv = 7,
627 .ModelString = "Hauppauge WinTV USB Pro (PAL B/G FM)",
628 },
629 [HPG_WINTV_PRO_PAL_I_FM] = {
630 .Interface = 0,
631 .Codec = CODEC_SAA7113,
632 .VideoChannels = 3,
633 .VideoNorm = V4L2_STD_PAL,
634 .AudioChannels = 1,
635 .Radio = 1,
636 .vbi = 1,
637 .Tuner = 1,
638 .TunerType = TUNER_PHILIPS_PAL,
639 .X_Offset = 0,
640 .Y_Offset = 3,
641 .Dvi_yuv_override = 1,
642 .Dvi_yuv = 7,
643 .ModelString = "Hauppauge WinTV USB Pro (PAL I FM)",
644 },
645 [HPG_WINTV_PRO_PAL_D_K_FM] = {
646 .Interface = 0,
647 .Codec = CODEC_SAA7113,
648 .VideoChannels = 3,
649 .VideoNorm = V4L2_STD_PAL,
650 .AudioChannels = 1,
651 .Radio = 1,
652 .vbi = 1,
653 .Tuner = 1,
654 .TunerType = TUNER_PHILIPS_PAL,
655 .X_Offset = 0,
656 .Y_Offset = 3,
657 .Dvi_yuv_override = 1,
658 .Dvi_yuv = 7,
659 .ModelString = "Hauppauge WinTV USB Pro (PAL D/K FM)",
660 },
661 [HPG_WINTV_PRO_TEMIC_PAL_FM] = {
662 .Interface = 0,
663 .Codec = CODEC_SAA7113,
664 .VideoChannels = 3,
665 .VideoNorm = V4L2_STD_PAL,
666 .AudioChannels = 1,
667 .Radio = 1,
668 .vbi = 1,
669 .Tuner = 1,
670 .TunerType = TUNER_MICROTUNE_4049FM5,
671 .X_Offset = 0,
672 .Y_Offset = 3,
673 .Dvi_yuv_override = 1,
674 .Dvi_yuv = 7,
675 .ModelString = "Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM)",
676 },
677 [HPG_WINTV_PRO_TEMIC_PAL_BG_FM] = {
678 .Interface = 0,
679 .Codec = CODEC_SAA7113,
680 .VideoChannels = 3,
681 .VideoNorm = V4L2_STD_PAL,
682 .AudioChannels = 1,
683 .Radio = 1,
684 .vbi = 1,
685 .Tuner = 1,
686 .TunerType = TUNER_MICROTUNE_4049FM5,
687 .X_Offset = 0,
688 .Y_Offset = 3,
689 .Dvi_yuv_override = 1,
690 .Dvi_yuv = 7,
691 .ModelString = "Hauppauge WinTV USB Pro (Temic PAL B/G FM)",
692 },
693 [HPG_WINTV_PRO_PAL_FM] = {
694 .Interface = 0,
695 .Codec = CODEC_SAA7113,
696 .VideoChannels = 3,
697 .VideoNorm = V4L2_STD_PAL,
698 .AudioChannels = 1,
699 .Radio = 1,
700 .vbi = 1,
701 .Tuner = 1,
702 .TunerType = TUNER_PHILIPS_FM1216ME_MK3,
703 .X_Offset = 0,
704 .Y_Offset = 3,
705 .Dvi_yuv_override = 1,
706 .Dvi_yuv = 7,
707 .ModelString = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)",
708 },
709 [HPG_WINTV_PRO_NTSC_MN_FM_V2] = {
710 .Interface = 0,
711 .Codec = CODEC_SAA7113,
712 .VideoChannels = 3,
713 .VideoNorm = V4L2_STD_NTSC,
714 .AudioChannels = 1,
715 .Radio = 1,
716 .vbi = 1,
717 .Tuner = 1,
718 .TunerType = TUNER_PHILIPS_NTSC_M,
719 .X_Offset = 0,
720 .Y_Offset = 3,
721 .Dvi_yuv_override = 1,
722 .Dvi_yuv = 7,
723 .ModelString = "Hauppauge WinTV USB Pro (NTSC M/N FM) V2",
724 },
725 [CAMTEL_TVB330] = {
726 .Interface = -1,
727 .Codec = CODEC_SAA7113,
728 .VideoChannels = 3,
729 .VideoNorm = V4L2_STD_NTSC,
730 .AudioChannels = 1,
731 .Radio = 1,
732 .vbi = 1,
733 .Tuner = 1,
734 .TunerType = TUNER_PHILIPS_NTSC_M,
735 .X_Offset = 5,
736 .Y_Offset = 5,
737 .ModelString = "Camtel Technology USB TV Genie Pro FM Model TVB330",
738 },
739 [DIGITAL_VIDEO_CREATOR_I] = {
740 .Interface = -1,
741 .Codec = CODEC_SAA7113,
742 .VideoChannels = 2,
743 .VideoNorm = V4L2_STD_PAL,
744 .AudioChannels = 0,
745 .Radio = 0,
746 .vbi = 1,
747 .Tuner = 0,
748 .TunerType = 0,
749 .X_Offset = 0,
750 .Y_Offset = 3,
751 .Dvi_yuv_override = 1,
752 .Dvi_yuv = 7,
753 .ModelString = "Digital Video Creator I",
754 },
755 [GLOBAL_VILLAGE_GV_007_NTSC] = {
756 .Interface = -1,
757 .Codec = CODEC_SAA7111,
758 .VideoChannels = 2,
759 .VideoNorm = V4L2_STD_NTSC,
760 .AudioChannels = 0,
761 .Radio = 0,
762 .vbi = 1,
763 .Tuner = 0,
764 .TunerType = 0,
765 .X_Offset = 82,
766 .Y_Offset = 20,
767 .Dvi_yuv_override = 1,
768 .Dvi_yuv = 7,
769 .ModelString = "Global Village GV-007 (NTSC)",
770 },
771 [DAZZLE_DVC_50_REV_1_NTSC] = {
772 .Interface = 0,
773 .Codec = CODEC_SAA7113,
774 .VideoChannels = 2,
775 .VideoNorm = V4L2_STD_NTSC,
776 .AudioChannels = 0,
777 .Radio = 0,
778 .vbi = 1,
779 .Tuner = 0,
780 .TunerType = 0,
781 .X_Offset = 0,
782 .Y_Offset = 3,
783 .Dvi_yuv_override = 1,
784 .Dvi_yuv = 7,
785 .ModelString = "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)",
786 },
787 [DAZZLE_DVC_80_REV_1_PAL] = {
788 .Interface = 0,
789 .Codec = CODEC_SAA7113,
790 .VideoChannels = 2,
791 .VideoNorm = V4L2_STD_PAL,
792 .AudioChannels = 0,
793 .Radio = 0,
794 .vbi = 1,
795 .Tuner = 0,
796 .TunerType = 0,
797 .X_Offset = 0,
798 .Y_Offset = 3,
799 .Dvi_yuv_override = 1,
800 .Dvi_yuv = 7,
801 .ModelString = "Dazzle Fusion Model DVC-80 Rev 1 (PAL)",
802 },
803 [DAZZLE_DVC_90_REV_1_SECAM] = {
804 .Interface = 0,
805 .Codec = CODEC_SAA7113,
806 .VideoChannels = 2,
807 .VideoNorm = V4L2_STD_SECAM,
808 .AudioChannels = 0,
809 .Radio = 0,
810 .vbi = 1,
811 .Tuner = 0,
812 .TunerType = 0,
813 .X_Offset = 0,
814 .Y_Offset = 3,
815 .Dvi_yuv_override = 1,
816 .Dvi_yuv = 7,
817 .ModelString = "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)",
818 },
819 [ESKAPE_LABS_MYTV2GO] = {
820 .Interface = 0,
821 .Codec = CODEC_SAA7113,
822 .VideoChannels = 2,
823 .VideoNorm = V4L2_STD_PAL,
824 .AudioChannels = 1,
825 .Radio = 1,
826 .vbi = 1,
827 .Tuner = 1,
828 .TunerType = TUNER_PHILIPS_FM1216ME_MK3,
829 .X_Offset = 0,
830 .Y_Offset = 3,
831 .Dvi_yuv_override = 1,
832 .Dvi_yuv = 7,
833 .ModelString = "Eskape Labs MyTV2Go",
834 },
835 [PINNA_PCTV_USB_PAL] = {
836 .Interface = -1,
837 .Codec = CODEC_SAA7111,
838 .VideoChannels = 3,
839 .VideoNorm = V4L2_STD_PAL,
840 .AudioChannels = 1,
841 .Radio = 0,
842 .vbi = 0,
843 .Tuner = 1,
844 .TunerType = TUNER_TEMIC_4066FY5_PAL_I,
845 .X_Offset = -1,
846 .Y_Offset = -1,
847 .ModelString = "Pinnacle Studio PCTV USB (PAL)",
848 },
849 [PINNA_PCTV_USB_SECAM] = {
850 .Interface = -1,
851 .Codec = CODEC_SAA7111,
852 .VideoChannels = 3,
853 .VideoNorm = V4L2_STD_SECAM,
854 .AudioChannels = 1,
855 .Radio = 0,
856 .vbi = 1,
857 .Tuner = 1,
858 .TunerType = TUNER_PHILIPS_SECAM,
859 .X_Offset = -1,
860 .Y_Offset = -1,
861 .ModelString = "Pinnacle Studio PCTV USB (SECAM)",
862 },
863 [PINNA_PCTV_USB_PAL_FM] = {
864 .Interface = -1,
865 .Codec = CODEC_SAA7111,
866 .VideoChannels = 3,
867 .VideoNorm = V4L2_STD_PAL,
868 .AudioChannels = 1,
869 .Radio = 1,
870 .vbi = 1,
871 .Tuner = 1,
872 .TunerType = TUNER_PHILIPS_PAL,
873 .X_Offset = 128,
874 .Y_Offset = 23,
875 .ModelString = "Pinnacle Studio PCTV USB (PAL) FM",
876 },
877 [MIRO_PCTV_USB] = {
878 .Interface = -1,
879 .Codec = CODEC_SAA7111,
880 .VideoChannels = 3,
881 .VideoNorm = V4L2_STD_PAL,
882 .AudioChannels = 1,
883 .Radio = 0,
884 .vbi = 1,
885 .Tuner = 1,
886 .TunerType = TUNER_PHILIPS_PAL,
887 .X_Offset = -1,
888 .Y_Offset = -1,
889 .ModelString = "Miro PCTV USB",
890 },
891 [PINNA_PCTV_USB_NTSC_FM] = {
892 .Interface = -1,
893 .Codec = CODEC_SAA7111,
894 .VideoChannels = 3,
895 .VideoNorm = V4L2_STD_NTSC,
896 .AudioChannels = 1,
897 .Radio = 1,
898 .vbi = 1,
899 .Tuner = 1,
900 .TunerType = TUNER_PHILIPS_NTSC_M,
901 .X_Offset = -1,
902 .Y_Offset = -1,
903 .ModelString = "Pinnacle Studio PCTV USB (NTSC) FM",
904 },
905 [PINNA_PCTV_USB_PAL_FM_V2] = {
906 .Interface = -1,
907 .Codec = CODEC_SAA7113,
908 .VideoChannels = 3,
909 .VideoNorm = V4L2_STD_PAL,
910 .AudioChannels = 1,
911 .Radio = 1,
912 .vbi = 1,
913 .Tuner = 1,
914 .TunerType = TUNER_TEMIC_4009FR5_PAL,
915 .X_Offset = 0,
916 .Y_Offset = 3,
917 .Dvi_yuv_override = 1,
918 .Dvi_yuv = 7,
919 .ModelString = "Pinnacle Studio PCTV USB (PAL) FM V2",
920 },
921 [PINNA_PCTV_USB_NTSC_FM_V2] = {
922 .Interface = -1,
923 .Codec = CODEC_SAA7111,
924 .VideoChannels = 3,
925 .VideoNorm = V4L2_STD_NTSC,
926 .AudioChannels = 1,
927 .Radio = 1,
928 .vbi = 1,
929 .Tuner = 1,
930 .TunerType = TUNER_TEMIC_4039FR5_NTSC,
931 .X_Offset = 0,
932 .Y_Offset = 3,
933 .Dvi_yuv_override = 1,
934 .Dvi_yuv = 7,
935 .ModelString = "Pinnacle Studio PCTV USB (NTSC) FM V2",
936 },
937 [PINNA_PCTV_USB_PAL_FM_V3] = {
938 .Interface = -1,
939 .Codec = CODEC_SAA7113,
940 .VideoChannels = 3,
941 .VideoNorm = V4L2_STD_PAL,
942 .AudioChannels = 1,
943 .Radio = 1,
944 .vbi = 1,
945 .Tuner = 1,
946 .TunerType = TUNER_TEMIC_4009FR5_PAL,
947 .X_Offset = 0,
948 .Y_Offset = 3,
949 .Dvi_yuv_override = 1,
950 .Dvi_yuv = 7,
951 .ModelString = "Pinnacle Studio PCTV USB (PAL) FM V3",
952 },
953 [PINNA_LINX_VD_IN_CAB_NTSC] = {
954 .Interface = -1,
955 .Codec = CODEC_SAA7113,
956 .VideoChannels = 2,
957 .VideoNorm = V4L2_STD_NTSC,
958 .AudioChannels = 1,
959 .Radio = 0,
960 .vbi = 1,
961 .Tuner = 0,
962 .TunerType = 0,
963 .X_Offset = 0,
964 .Y_Offset = 3,
965 .Dvi_yuv_override = 1,
966 .Dvi_yuv = 7,
967 .ModelString = "Pinnacle Studio Linx Video input cable (NTSC)",
968 },
969 [PINNA_LINX_VD_IN_CAB_PAL] = {
970 .Interface = -1,
971 .Codec = CODEC_SAA7113,
972 .VideoChannels = 2,
973 .VideoNorm = V4L2_STD_PAL,
974 .AudioChannels = 1,
975 .Radio = 0,
976 .vbi = 1,
977 .Tuner = 0,
978 .TunerType = 0,
979 .X_Offset = 0,
980 .Y_Offset = 3,
981 .Dvi_yuv_override = 1,
982 .Dvi_yuv = 7,
983 .ModelString = "Pinnacle Studio Linx Video input cable (PAL)",
984 },
985 [PINNA_PCTV_BUNGEE_PAL_FM] = {
986 .Interface = -1,
987 .Codec = CODEC_SAA7113,
988 .VideoChannels = 3,
989 .VideoNorm = V4L2_STD_PAL,
990 .AudioChannels = 1,
991 .Radio = 1,
992 .vbi = 1,
993 .Tuner = 1,
994 .TunerType = TUNER_TEMIC_4009FR5_PAL,
995 .X_Offset = 0,
996 .Y_Offset = 3,
997 .Dvi_yuv_override = 1,
998 .Dvi_yuv = 7,
999 .ModelString = "Pinnacle PCTV Bungee USB (PAL) FM",
1000 },
1001 [HPG_WINTV] = {
1002 .Interface = -1,
1003 .Codec = CODEC_SAA7111,
1004 .VideoChannels = 3,
1005 .VideoNorm = V4L2_STD_NTSC,
1006 .AudioChannels = 1,
1007 .Radio = 0,
1008 .vbi = 1,
1009 .Tuner = 1,
1010 .TunerType = TUNER_PHILIPS_NTSC_M,
1011 .X_Offset = -1,
1012 .Y_Offset = -1,
1013 .ModelString = "Hauppauge WinTv-USB",
1014 },
91}; 1015};
1016const int usbvision_device_data_size=ARRAY_SIZE(usbvision_device_data);
92 1017
93/* Supported Devices */ 1018/* Supported Devices */
94 1019
95struct usb_device_id usbvision_table [] = { 1020struct usb_device_id usbvision_table [] = {
96 { USB_DEVICE(0xFFF0, 0xFFF0) }, /* Custom Dummy USBVision Device */ 1021 { USB_DEVICE(0x0a6f, 0x0400), .driver_info=XANBOO },
97 { USB_DEVICE(0x0A6F, 0x0400) }, /* Xanboo */ 1022 { USB_DEVICE(0x050d, 0x0106), .driver_info=BELKIN_VIDEOBUS_II },
98 { USB_DEVICE(0x050d, 0x0208) }, /* Belkin USBView II */ 1023 { USB_DEVICE(0x050d, 0x0207), .driver_info=BELKIN_VIDEOBUS },
99 { USB_DEVICE(0x0571, 0x0002) }, /* echoFX InterView Lite */ 1024 { USB_DEVICE(0x050d, 0x0208), .driver_info=BELKIN_USB_VIDEOBUS_II },
100 { USB_DEVICE(0x0573, 0x0003) }, /* USBGear USBG-V1 */ 1025 { USB_DEVICE(0x0571, 0x0002), .driver_info=ECHOFX_INTERVIEW_LITE },
101 { USB_DEVICE(0x0573, 0x0400) }, /* D-Link V100 */ 1026 { USB_DEVICE(0x0573, 0x0003), .driver_info=USBGEAR_USBG_V1 },
102 { USB_DEVICE(0x0573, 0x2000) }, /* X10 USB Camera */ 1027 { USB_DEVICE(0x0573, 0x0400), .driver_info=D_LINK_V100 },
103 { USB_DEVICE(0x0573, 0x2d00) }, /* Osprey 50 */ 1028 { USB_DEVICE(0x0573, 0x2000), .driver_info=X10_USB_CAMERA },
104 { USB_DEVICE(0x0573, 0x2d01) }, /* Hauppauge USB-Live Model 600 */ 1029 { USB_DEVICE(0x0573, 0x2d00), .driver_info=HPG_WINTV_LIVE_PAL_BG },
105 { USB_DEVICE(0x0573, 0x2101) }, /* Zoran Co. PMD (Nogatech) AV-grabber Manhattan */ 1030 { USB_DEVICE(0x0573, 0x2d01), .driver_info=HPG_WINTV_LIVE_PRO_NTSC_MN },
106 { USB_DEVICE(0x0573, 0x4100) }, /* Nogatech USB-TV FM (NTSC) */ 1031 { USB_DEVICE(0x0573, 0x2101), .driver_info=ZORAN_PMD_NOGATECH },
107 { USB_DEVICE(0x0573, 0x4110) }, /* PNY USB-TV (NTSC) FM */ 1032 { USB_DEVICE(0x0573, 0x4100), .driver_info=NOGATECH_USB_TV_NTSC_FM },
108 { USB_DEVICE(0x0573, 0x4450) }, /* PixelView PlayTv-USB PRO (PAL) FM */ 1033 { USB_DEVICE(0x0573, 0x4110), .driver_info=PNY_USB_TV_NTSC_FM },
109 { USB_DEVICE(0x0573, 0x4550) }, /* ZTV ZT-721 2.4GHz USB A/V Receiver */ 1034 { USB_DEVICE(0x0573, 0x4450), .driver_info=PV_PLAYTV_USB_PRO_PAL_FM },
110 { USB_DEVICE(0x0573, 0x4d00) }, /* Hauppauge WinTv-USB USA */ 1035 { USB_DEVICE(0x0573, 0x4550), .driver_info=ZT_721 },
111 { USB_DEVICE(0x0573, 0x4d01) }, /* Hauppauge WinTv-USB */ 1036 { USB_DEVICE(0x0573, 0x4d00), .driver_info=HPG_WINTV_NTSC_MN },
112 { USB_DEVICE(0x0573, 0x4d02) }, /* Hauppauge WinTv-USB UK */ 1037 { USB_DEVICE(0x0573, 0x4d01), .driver_info=HPG_WINTV_PAL_BG },
113 { USB_DEVICE(0x0573, 0x4d03) }, /* Hauppauge WinTv-USB France */ 1038 { USB_DEVICE(0x0573, 0x4d02), .driver_info=HPG_WINTV_PAL_I },
114 { USB_DEVICE(0x0573, 0x4d10) }, /* Hauppauge WinTv-USB with FM USA radio */ 1039 { USB_DEVICE(0x0573, 0x4d03), .driver_info=HPG_WINTV_PAL_SECAM_L },
115 { USB_DEVICE(0x0573, 0x4d11) }, /* Hauppauge WinTv-USB (PAL) with FM radio */ 1040 { USB_DEVICE(0x0573, 0x4d04), .driver_info=HPG_WINTV_PAL_D_K },
116 { USB_DEVICE(0x0573, 0x4d12) }, /* Hauppauge WinTv-USB UK with FM Radio */ 1041 { USB_DEVICE(0x0573, 0x4d10), .driver_info=HPG_WINTV_NTSC_FM },
117 { USB_DEVICE(0x0573, 0x4d2a) }, /* Hauppague WinTv USB Model 602 40201 Rev B285 */ 1042 { USB_DEVICE(0x0573, 0x4d11), .driver_info=HPG_WINTV_PAL_BG_FM },
118 { USB_DEVICE(0x0573, 0x4d2b) }, /* Hauppague WinTv USB Model 602 40201 Rev B282 */ 1043 { USB_DEVICE(0x0573, 0x4d12), .driver_info=HPG_WINTV_PAL_I_FM },
119 { USB_DEVICE(0x0573, 0x4d2c) }, /* Hauppague WinTv USB Model 40209 Rev. E1A5 PAL*/ 1044 { USB_DEVICE(0x0573, 0x4d14), .driver_info=HPG_WINTV_PAL_D_K_FM },
120 { USB_DEVICE(0x0573, 0x4d20) }, /* Hauppauge WinTv-USB II (PAL) FM Model 40201 Rev B226 */ 1045 { USB_DEVICE(0x0573, 0x4d2a), .driver_info=HPG_WINTV_PRO_NTSC_MN },
121 { USB_DEVICE(0x0573, 0x4d21) }, /* Hauppauge WinTv-USB II (PAL) with FM radio*/ 1046 { USB_DEVICE(0x0573, 0x4d2b), .driver_info=HPG_WINTV_PRO_NTSC_MN_V2 },
122 { USB_DEVICE(0x0573, 0x4d22) }, /* Hauppauge WinTv-USB II (PAL) Model 566 */ 1047 { USB_DEVICE(0x0573, 0x4d2c), .driver_info=HPG_WINTV_PRO_PAL },
123 { USB_DEVICE(0x0573, 0x4d23) }, /* Hauppauge WinTv-USB France 4D23*/ 1048 { USB_DEVICE(0x0573, 0x4d20), .driver_info=HPG_WINTV_PRO_NTSC_MN_V3 },
124 { USB_DEVICE(0x0573, 0x4d25) }, /* Hauppauge WinTv-USB Model 40209 rev B234 */ 1049 { USB_DEVICE(0x0573, 0x4d21), .driver_info=HPG_WINTV_PRO_PAL_BG },
125 { USB_DEVICE(0x0573, 0x4d26) }, /* Hauppauge WinTv-USB Model 40209 Rev B243 */ 1050 { USB_DEVICE(0x0573, 0x4d22), .driver_info=HPG_WINTV_PRO_PAL_I },
126 { USB_DEVICE(0x0573, 0x4d27) }, /* Hauppauge WinTv-USB Model 40204 Rev B281 */ 1051 { USB_DEVICE(0x0573, 0x4d23), .driver_info=HPG_WINTV_PRO_PAL_SECAM_L },
127 { USB_DEVICE(0x0573, 0x4d28) }, /* Hauppauge WinTv-USB Model 40204 Rev B283 */ 1052 { USB_DEVICE(0x0573, 0x4d24), .driver_info=HPG_WINTV_PRO_PAL_D_K },
128 { USB_DEVICE(0x0573, 0x4d29) }, /* Hauppauge WinTv-USB Model 40205 Rev B298 */ 1053 { USB_DEVICE(0x0573, 0x4d25), .driver_info=HPG_WINTV_PRO_PAL_SECAM },
129 { USB_DEVICE(0x0573, 0x4d30) }, /* Hauppauge WinTv-USB FM Model 40211 Rev B123 */ 1054 { USB_DEVICE(0x0573, 0x4d26), .driver_info=HPG_WINTV_PRO_PAL_SECAM_V2 },
130 { USB_DEVICE(0x0573, 0x4d31) }, /* Hauppauge WinTv-USB III (PAL) with FM radio Model 568 */ 1055 { USB_DEVICE(0x0573, 0x4d27), .driver_info=HPG_WINTV_PRO_PAL_BG_V2 },
131 { USB_DEVICE(0x0573, 0x4d32) }, /* Hauppauge WinTv-USB III (PAL) FM Model 573 */ 1056 { USB_DEVICE(0x0573, 0x4d28), .driver_info=HPG_WINTV_PRO_PAL_BG_D_K },
132 { USB_DEVICE(0x0573, 0x4d35) }, /* Hauppauge WinTv-USB III (SECAM) FM Model 40219 Rev B252 */ 1057 { USB_DEVICE(0x0573, 0x4d29), .driver_info=HPG_WINTV_PRO_PAL_I_D_K },
133 { USB_DEVICE(0x0573, 0x4d37) }, /* Hauppauge WinTv-USB Model 40219 Rev E189 */ 1058 { USB_DEVICE(0x0573, 0x4d30), .driver_info=HPG_WINTV_PRO_NTSC_MN_FM },
134 { USB_DEVICE(0x0768, 0x0006) }, /* Camtel Technology USB TV Genie Pro FM Model TVB330 */ 1059 { USB_DEVICE(0x0573, 0x4d31), .driver_info=HPG_WINTV_PRO_PAL_BG_FM },
135 { USB_DEVICE(0x07d0, 0x0001) }, /* Digital Video Creator I */ 1060 { USB_DEVICE(0x0573, 0x4d32), .driver_info=HPG_WINTV_PRO_PAL_I_FM },
136 { USB_DEVICE(0x07d0, 0x0002) }, /* Global Village GV-007 (NTSC) */ 1061 { USB_DEVICE(0x0573, 0x4d34), .driver_info=HPG_WINTV_PRO_PAL_D_K_FM },
137 { USB_DEVICE(0x07d0, 0x0003) }, /* Dazzle Fusion Model DVC-50 Rev 1 (NTSC) */ 1062 { USB_DEVICE(0x0573, 0x4d35), .driver_info=HPG_WINTV_PRO_TEMIC_PAL_FM },
138 { USB_DEVICE(0x07d0, 0x0004) }, /* Dazzle Fusion Model DVC-80 Rev 1 (PAL) */ 1063 { USB_DEVICE(0x0573, 0x4d36), .driver_info=HPG_WINTV_PRO_TEMIC_PAL_BG_FM },
139 { USB_DEVICE(0x07d0, 0x0005) }, /* Dazzle Fusion Model DVC-90 Rev 1 (SECAM) */ 1064 { USB_DEVICE(0x0573, 0x4d37), .driver_info=HPG_WINTV_PRO_PAL_FM },
140 { USB_DEVICE(0x2304, 0x010d) }, /* Pinnacle Studio PCTV USB (PAL) */ 1065 { USB_DEVICE(0x0573, 0x4d38), .driver_info=HPG_WINTV_PRO_NTSC_MN_FM_V2 },
141 { USB_DEVICE(0x2304, 0x0109) }, /* Pinnacle Studio PCTV USB (SECAM) */ 1066 { USB_DEVICE(0x0768, 0x0006), .driver_info=CAMTEL_TVB330 },
142 { USB_DEVICE(0x2304, 0x0110) }, /* Pinnacle Studio PCTV USB (PAL) */ 1067 { USB_DEVICE(0x07d0, 0x0001), .driver_info=DIGITAL_VIDEO_CREATOR_I },
143 { USB_DEVICE(0x2304, 0x0111) }, /* Miro PCTV USB */ 1068 { USB_DEVICE(0x07d0, 0x0002), .driver_info=GLOBAL_VILLAGE_GV_007_NTSC },
144 { USB_DEVICE(0x2304, 0x0112) }, /* Pinnacle Studio PCTV USB (NTSC) with FM radio */ 1069 { USB_DEVICE(0x07d0, 0x0003), .driver_info=DAZZLE_DVC_50_REV_1_NTSC },
145 { USB_DEVICE(0x2304, 0x0210) }, /* Pinnacle Studio PCTV USB (PAL) with FM radio */ 1070 { USB_DEVICE(0x07d0, 0x0004), .driver_info=DAZZLE_DVC_80_REV_1_PAL },
146 { USB_DEVICE(0x2304, 0x0212) }, /* Pinnacle Studio PCTV USB (NTSC) with FM radio */ 1071 { USB_DEVICE(0x07d0, 0x0005), .driver_info=DAZZLE_DVC_90_REV_1_SECAM },
147 { USB_DEVICE(0x2304, 0x0214) }, /* Pinnacle Studio PCTV USB (PAL) with FM radio */ 1072 { USB_DEVICE(0x07f8, 0x9104), .driver_info=ESKAPE_LABS_MYTV2GO },
148 { USB_DEVICE(0x2304, 0x0300) }, /* Pinnacle Studio Linx Video input cable (NTSC) */ 1073 { USB_DEVICE(0x2304, 0x010d), .driver_info=PINNA_PCTV_USB_PAL },
149 { USB_DEVICE(0x2304, 0x0301) }, /* Pinnacle Studio Linx Video input cable (PAL) */ 1074 { USB_DEVICE(0x2304, 0x0109), .driver_info=PINNA_PCTV_USB_SECAM },
150 { USB_DEVICE(0x2304, 0x0419) }, /* Pinnacle PCTV Bungee USB (PAL) FM */ 1075 { USB_DEVICE(0x2304, 0x0110), .driver_info=PINNA_PCTV_USB_PAL_FM },
151 { USB_DEVICE(0x2400, 0x4200) }, /* Hauppauge WinTv-USB2 Model 42012 */ 1076 { USB_DEVICE(0x2304, 0x0111), .driver_info=MIRO_PCTV_USB },
152 1077 { USB_DEVICE(0x2304, 0x0112), .driver_info=PINNA_PCTV_USB_NTSC_FM },
153 { } /* Terminating entry */ 1078 { USB_DEVICE(0x2304, 0x0210), .driver_info=PINNA_PCTV_USB_PAL_FM_V2 },
1079 { USB_DEVICE(0x2304, 0x0212), .driver_info=PINNA_PCTV_USB_NTSC_FM_V2 },
1080 { USB_DEVICE(0x2304, 0x0214), .driver_info=PINNA_PCTV_USB_PAL_FM_V3 },
1081 { USB_DEVICE(0x2304, 0x0300), .driver_info=PINNA_LINX_VD_IN_CAB_NTSC },
1082 { USB_DEVICE(0x2304, 0x0301), .driver_info=PINNA_LINX_VD_IN_CAB_PAL },
1083 { USB_DEVICE(0x2304, 0x0419), .driver_info=PINNA_PCTV_BUNGEE_PAL_FM },
1084 { USB_DEVICE(0x2400, 0x4200), .driver_info=HPG_WINTV },
154}; 1085};
155 1086
156MODULE_DEVICE_TABLE (usb, usbvision_table); 1087MODULE_DEVICE_TABLE (usb, usbvision_table);
diff --git a/drivers/media/video/usbvision/usbvision-cards.h b/drivers/media/video/usbvision/usbvision-cards.h
new file mode 100644
index 000000000000..512c5cee4145
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision-cards.h
@@ -0,0 +1,66 @@
1#define XANBOO 0
2#define BELKIN_VIDEOBUS_II 1
3#define BELKIN_VIDEOBUS 2
4#define BELKIN_USB_VIDEOBUS_II 3
5#define ECHOFX_INTERVIEW_LITE 4
6#define USBGEAR_USBG_V1 5
7#define D_LINK_V100 6
8#define X10_USB_CAMERA 7
9#define HPG_WINTV_LIVE_PAL_BG 8
10#define HPG_WINTV_LIVE_PRO_NTSC_MN 9
11#define ZORAN_PMD_NOGATECH 10
12#define NOGATECH_USB_TV_NTSC_FM 11
13#define PNY_USB_TV_NTSC_FM 12
14#define PV_PLAYTV_USB_PRO_PAL_FM 13
15#define ZT_721 14
16#define HPG_WINTV_NTSC_MN 15
17#define HPG_WINTV_PAL_BG 16
18#define HPG_WINTV_PAL_I 17
19#define HPG_WINTV_PAL_SECAM_L 18
20#define HPG_WINTV_PAL_D_K 19
21#define HPG_WINTV_NTSC_FM 20
22#define HPG_WINTV_PAL_BG_FM 21
23#define HPG_WINTV_PAL_I_FM 22
24#define HPG_WINTV_PAL_D_K_FM 23
25#define HPG_WINTV_PRO_NTSC_MN 24
26#define HPG_WINTV_PRO_NTSC_MN_V2 25
27#define HPG_WINTV_PRO_PAL 26
28#define HPG_WINTV_PRO_NTSC_MN_V3 27
29#define HPG_WINTV_PRO_PAL_BG 28
30#define HPG_WINTV_PRO_PAL_I 29
31#define HPG_WINTV_PRO_PAL_SECAM_L 30
32#define HPG_WINTV_PRO_PAL_D_K 31
33#define HPG_WINTV_PRO_PAL_SECAM 32
34#define HPG_WINTV_PRO_PAL_SECAM_V2 33
35#define HPG_WINTV_PRO_PAL_BG_V2 34
36#define HPG_WINTV_PRO_PAL_BG_D_K 35
37#define HPG_WINTV_PRO_PAL_I_D_K 36
38#define HPG_WINTV_PRO_NTSC_MN_FM 37
39#define HPG_WINTV_PRO_PAL_BG_FM 38
40#define HPG_WINTV_PRO_PAL_I_FM 39
41#define HPG_WINTV_PRO_PAL_D_K_FM 40
42#define HPG_WINTV_PRO_TEMIC_PAL_FM 41
43#define HPG_WINTV_PRO_TEMIC_PAL_BG_FM 42
44#define HPG_WINTV_PRO_PAL_FM 43
45#define HPG_WINTV_PRO_NTSC_MN_FM_V2 44
46#define CAMTEL_TVB330 45
47#define DIGITAL_VIDEO_CREATOR_I 46
48#define GLOBAL_VILLAGE_GV_007_NTSC 47
49#define DAZZLE_DVC_50_REV_1_NTSC 48
50#define DAZZLE_DVC_80_REV_1_PAL 49
51#define DAZZLE_DVC_90_REV_1_SECAM 50
52#define ESKAPE_LABS_MYTV2GO 51
53#define PINNA_PCTV_USB_PAL 52
54#define PINNA_PCTV_USB_SECAM 53
55#define PINNA_PCTV_USB_PAL_FM 54
56#define MIRO_PCTV_USB 55
57#define PINNA_PCTV_USB_NTSC_FM 56
58#define PINNA_PCTV_USB_PAL_FM_V2 57
59#define PINNA_PCTV_USB_NTSC_FM_V2 58
60#define PINNA_PCTV_USB_PAL_FM_V3 59
61#define PINNA_LINX_VD_IN_CAB_NTSC 60
62#define PINNA_LINX_VD_IN_CAB_PAL 61
63#define PINNA_PCTV_BUNGEE_PAL_FM 62
64#define HPG_WINTV 63
65
66extern const int usbvision_device_data_size;
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f2154dc072e2..bcb551adb7e6 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -2040,8 +2040,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
2040 return 0; 2040 return 0;
2041 2041
2042 /* Set input format expected from decoder*/ 2042 /* Set input format expected from decoder*/
2043 if (usbvision_device_data[usbvision->DevModel].Vin_Reg1 >= 0) { 2043 if (usbvision_device_data[usbvision->DevModel].Vin_Reg1_override) {
2044 value[0] = usbvision_device_data[usbvision->DevModel].Vin_Reg1 & 0xff; 2044 value[0] = usbvision_device_data[usbvision->DevModel].Vin_Reg1;
2045 } else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) { 2045 } else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) {
2046 /* SAA7113 uses 8 bit output */ 2046 /* SAA7113 uses 8 bit output */
2047 value[0] = USBVISION_8_422_SYNC; 2047 value[0] = USBVISION_8_422_SYNC;
@@ -2112,8 +2112,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
2112 2112
2113 dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */ 2113 dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */
2114 2114
2115 if(usbvision_device_data[usbvision->DevModel].Dvi_yuv >= 0){ 2115 if(usbvision_device_data[usbvision->DevModel].Dvi_yuv_override){
2116 dvi_yuv_value = usbvision_device_data[usbvision->DevModel].Dvi_yuv & 0xff; 2116 dvi_yuv_value = usbvision_device_data[usbvision->DevModel].Dvi_yuv;
2117 } 2117 }
2118 else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) { 2118 else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) {
2119 /* This changes as the fine sync control changes. Further investigation necessary */ 2119 /* This changes as the fine sync control changes. Further investigation necessary */
@@ -2238,7 +2238,7 @@ static void call_usbvision_power_off(struct work_struct *work)
2238 PDEBUG(DBG_FUNC, ""); 2238 PDEBUG(DBG_FUNC, "");
2239 down_interruptible(&usbvision->lock); 2239 down_interruptible(&usbvision->lock);
2240 if(usbvision->user == 0) { 2240 if(usbvision->user == 0) {
2241 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap); 2241 usbvision_i2c_unregister(usbvision);
2242 2242
2243 usbvision_power_off(usbvision); 2243 usbvision_power_off(usbvision);
2244 usbvision->initialized = 0; 2244 usbvision->initialized = 0;
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 609e1fd9c784..025be555194f 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * I2C_ALGO_USB.C 2 * usbvision_i2c.c
3 * i2c algorithm for USB-I2C Bridges 3 * i2c algorithm for USB-I2C Bridges
4 * 4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> 5 * Copyright (c) 1999-2007 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 * Dwaine Garden <dwainegarden@rogers.com> 6 * Dwaine Garden <dwainegarden@rogers.com>
7 * 7 *
8 * This module is part of usbvision driver project. 8 * This module is part of usbvision driver project.
@@ -39,7 +39,6 @@
39#include "usbvision.h" 39#include "usbvision.h"
40 40
41#define DBG_I2C 1<<0 41#define DBG_I2C 1<<0
42#define DBG_ALGO 1<<1
43 42
44static int i2c_debug = 0; 43static int i2c_debug = 0;
45 44
@@ -49,22 +48,22 @@ MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
49#define PDEBUG(level, fmt, args...) \ 48#define PDEBUG(level, fmt, args...) \
50 if (i2c_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args) 49 if (i2c_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
51 50
52static int usbvision_i2c_write(void *data, unsigned char addr, char *buf, 51static int usbvision_i2c_write(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
53 short len); 52 short len);
54static int usbvision_i2c_read(void *data, unsigned char addr, char *buf, 53static int usbvision_i2c_read(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
55 short len); 54 short len);
56 55
57static inline int try_write_address(struct i2c_adapter *i2c_adap, 56static inline int try_write_address(struct i2c_adapter *i2c_adap,
58 unsigned char addr, int retries) 57 unsigned char addr, int retries)
59{ 58{
60 void *data; 59 struct usb_usbvision *usbvision;
61 int i, ret = -1; 60 int i, ret = -1;
62 char buf[4]; 61 char buf[4];
63 62
64 data = i2c_get_adapdata(i2c_adap); 63 usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
65 buf[0] = 0x00; 64 buf[0] = 0x00;
66 for (i = 0; i <= retries; i++) { 65 for (i = 0; i <= retries; i++) {
67 ret = (usbvision_i2c_write(data, addr, buf, 1)); 66 ret = (usbvision_i2c_write(usbvision, addr, buf, 1));
68 if (ret == 1) 67 if (ret == 1)
69 break; /* success! */ 68 break; /* success! */
70 udelay(5); 69 udelay(5);
@@ -73,8 +72,8 @@ static inline int try_write_address(struct i2c_adapter *i2c_adap,
73 udelay(10); 72 udelay(10);
74 } 73 }
75 if (i) { 74 if (i) {
76 PDEBUG(DBG_ALGO,"Needed %d retries for address %#2x", i, addr); 75 PDEBUG(DBG_I2C,"Needed %d retries for address %#2x", i, addr);
77 PDEBUG(DBG_ALGO,"Maybe there's no device at this address"); 76 PDEBUG(DBG_I2C,"Maybe there's no device at this address");
78 } 77 }
79 return ret; 78 return ret;
80} 79}
@@ -82,13 +81,13 @@ static inline int try_write_address(struct i2c_adapter *i2c_adap,
82static inline int try_read_address(struct i2c_adapter *i2c_adap, 81static inline int try_read_address(struct i2c_adapter *i2c_adap,
83 unsigned char addr, int retries) 82 unsigned char addr, int retries)
84{ 83{
85 void *data; 84 struct usb_usbvision *usbvision;
86 int i, ret = -1; 85 int i, ret = -1;
87 char buf[4]; 86 char buf[4];
88 87
89 data = i2c_get_adapdata(i2c_adap); 88 usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
90 for (i = 0; i <= retries; i++) { 89 for (i = 0; i <= retries; i++) {
91 ret = (usbvision_i2c_read(data, addr, buf, 1)); 90 ret = (usbvision_i2c_read(usbvision, addr, buf, 1));
92 if (ret == 1) 91 if (ret == 1)
93 break; /* success! */ 92 break; /* success! */
94 udelay(5); 93 udelay(5);
@@ -97,8 +96,8 @@ static inline int try_read_address(struct i2c_adapter *i2c_adap,
97 udelay(10); 96 udelay(10);
98 } 97 }
99 if (i) { 98 if (i) {
100 PDEBUG(DBG_ALGO,"Needed %d retries for address %#2x", i, addr); 99 PDEBUG(DBG_I2C,"Needed %d retries for address %#2x", i, addr);
101 PDEBUG(DBG_ALGO,"Maybe there's no device at this address"); 100 PDEBUG(DBG_I2C,"Maybe there's no device at this address");
102 } 101 }
103 return ret; 102 return ret;
104} 103}
@@ -152,32 +151,32 @@ static inline int usb_find_address(struct i2c_adapter *i2c_adap,
152} 151}
153 152
154static int 153static int
155usb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) 154usbvision_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
156{ 155{
157 struct i2c_msg *pmsg; 156 struct i2c_msg *pmsg;
158 void *data; 157 struct usb_usbvision *usbvision;
159 int i, ret; 158 int i, ret;
160 unsigned char addr; 159 unsigned char addr;
161 160
162 data = i2c_get_adapdata(i2c_adap); 161 usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
163 162
164 for (i = 0; i < num; i++) { 163 for (i = 0; i < num; i++) {
165 pmsg = &msgs[i]; 164 pmsg = &msgs[i];
166 ret = usb_find_address(i2c_adap, pmsg, i2c_adap->retries, &addr); 165 ret = usb_find_address(i2c_adap, pmsg, i2c_adap->retries, &addr);
167 if (ret != 0) { 166 if (ret != 0) {
168 PDEBUG(DBG_ALGO,"got NAK from device, message #%d", i); 167 PDEBUG(DBG_I2C,"got NAK from device, message #%d", i);
169 return (ret < 0) ? ret : -EREMOTEIO; 168 return (ret < 0) ? ret : -EREMOTEIO;
170 } 169 }
171 170
172 if (pmsg->flags & I2C_M_RD) { 171 if (pmsg->flags & I2C_M_RD) {
173 /* read bytes into buffer */ 172 /* read bytes into buffer */
174 ret = (usbvision_i2c_read(data, addr, pmsg->buf, pmsg->len)); 173 ret = (usbvision_i2c_read(usbvision, addr, pmsg->buf, pmsg->len));
175 if (ret < pmsg->len) { 174 if (ret < pmsg->len) {
176 return (ret < 0) ? ret : -EREMOTEIO; 175 return (ret < 0) ? ret : -EREMOTEIO;
177 } 176 }
178 } else { 177 } else {
179 /* write bytes from buffer */ 178 /* write bytes from buffer */
180 ret = (usbvision_i2c_write(data, addr, pmsg->buf, pmsg->len)); 179 ret = (usbvision_i2c_write(usbvision, addr, pmsg->buf, pmsg->len));
181 if (ret < pmsg->len) { 180 if (ret < pmsg->len) {
182 return (ret < 0) ? ret : -EREMOTEIO; 181 return (ret < 0) ? ret : -EREMOTEIO;
183 } 182 }
@@ -191,7 +190,7 @@ static int algo_control(struct i2c_adapter *adapter, unsigned int cmd, unsigned
191 return 0; 190 return 0;
192} 191}
193 192
194static u32 usb_func(struct i2c_adapter *adap) 193static u32 functionality(struct i2c_adapter *adap)
195{ 194{
196 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; 195 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
197} 196}
@@ -199,11 +198,11 @@ static u32 usb_func(struct i2c_adapter *adap)
199 198
200/* -----exported algorithm data: ------------------------------------- */ 199/* -----exported algorithm data: ------------------------------------- */
201 200
202static struct i2c_algorithm i2c_usb_algo = { 201static struct i2c_algorithm usbvision_algo = {
203 .master_xfer = usb_xfer, 202 .master_xfer = usbvision_i2c_xfer,
204 .smbus_xfer = NULL, 203 .smbus_xfer = NULL,
205 .algo_control = algo_control, 204 .algo_control = algo_control,
206 .functionality = usb_func, 205 .functionality = functionality,
207}; 206};
208 207
209 208
@@ -213,41 +212,29 @@ static struct i2c_algorithm i2c_usb_algo = {
213static int usbvision_i2c_usb_add_bus(struct i2c_adapter *adap) 212static int usbvision_i2c_usb_add_bus(struct i2c_adapter *adap)
214{ 213{
215 PDEBUG(DBG_I2C, "I2C debugging is enabled [i2c]"); 214 PDEBUG(DBG_I2C, "I2C debugging is enabled [i2c]");
216 PDEBUG(DBG_ALGO, "ALGO debugging is enabled [i2c]"); 215 PDEBUG(DBG_I2C, "ALGO debugging is enabled [i2c]");
217 216
218 /* register new adapter to i2c module... */ 217 /* register new adapter to i2c module... */
219 218
220 adap->algo = &i2c_usb_algo; 219 adap->algo = &usbvision_algo;
221 220
222 adap->timeout = 100; /* default values, should */ 221 adap->timeout = 100; /* default values, should */
223 adap->retries = 3; /* be replaced by defines */ 222 adap->retries = 3; /* be replaced by defines */
224 223
225 i2c_add_adapter(adap); 224 i2c_add_adapter(adap);
226 225
227 PDEBUG(DBG_ALGO,"i2c bus for %s registered", adap->name); 226 PDEBUG(DBG_I2C,"i2c bus for %s registered", adap->name);
228
229 return 0;
230}
231
232
233int usbvision_i2c_usb_del_bus(struct i2c_adapter *adap)
234{
235
236 i2c_del_adapter(adap);
237
238 PDEBUG(DBG_ALGO,"i2c bus for %s unregistered", adap->name);
239 227
240 return 0; 228 return 0;
241} 229}
242 230
243
244/* ----------------------------------------------------------------------- */ 231/* ----------------------------------------------------------------------- */
245/* usbvision specific I2C functions */ 232/* usbvision specific I2C functions */
246/* ----------------------------------------------------------------------- */ 233/* ----------------------------------------------------------------------- */
247static struct i2c_adapter i2c_adap_template; 234static struct i2c_adapter i2c_adap_template;
248static struct i2c_client i2c_client_template; 235static struct i2c_client i2c_client_template;
249 236
250int usbvision_init_i2c(struct usb_usbvision *usbvision) 237int usbvision_i2c_register(struct usb_usbvision *usbvision)
251{ 238{
252 memcpy(&usbvision->i2c_adap, &i2c_adap_template, 239 memcpy(&usbvision->i2c_adap, &i2c_adap_template,
253 sizeof(struct i2c_adapter)); 240 sizeof(struct i2c_adapter));
@@ -265,7 +252,7 @@ int usbvision_init_i2c(struct usb_usbvision *usbvision)
265 usbvision->i2c_client.adapter = &usbvision->i2c_adap; 252 usbvision->i2c_client.adapter = &usbvision->i2c_adap;
266 253
267 if (usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_IIC_LRNACK) < 0) { 254 if (usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_IIC_LRNACK) < 0) {
268 printk(KERN_ERR "usbvision_init_i2c: can't write reg\n"); 255 printk(KERN_ERR "usbvision_register: can't write reg\n");
269 return -EBUSY; 256 return -EBUSY;
270 } 257 }
271 258
@@ -287,6 +274,16 @@ int usbvision_init_i2c(struct usb_usbvision *usbvision)
287 return usbvision_i2c_usb_add_bus(&usbvision->i2c_adap); 274 return usbvision_i2c_usb_add_bus(&usbvision->i2c_adap);
288} 275}
289 276
277int usbvision_i2c_unregister(struct usb_usbvision *usbvision)
278{
279
280 i2c_del_adapter(&(usbvision->i2c_adap));
281
282 PDEBUG(DBG_I2C,"i2c bus for %s unregistered", usbvision->i2c_adap.name);
283
284 return 0;
285}
286
290void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd, 287void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd,
291 void *arg) 288 void *arg)
292{ 289{
@@ -300,19 +297,12 @@ static int attach_inform(struct i2c_client *client)
300 usbvision = (struct usb_usbvision *)i2c_get_adapdata(client->adapter); 297 usbvision = (struct usb_usbvision *)i2c_get_adapdata(client->adapter);
301 298
302 switch (client->addr << 1) { 299 switch (client->addr << 1) {
303 case 0x43: 300 case 0x42 << 1:
304 case 0x4b: 301 case 0x43 << 1:
305 { 302 case 0x4a << 1:
306 struct tuner_setup tun_setup; 303 case 0x4b << 1:
307 304 PDEBUG(DBG_I2C,"attach_inform: tda9887 detected.");
308 tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
309 tun_setup.type = TUNER_TDA9887;
310 tun_setup.addr = client->addr;
311
312 call_i2c_clients(usbvision, TUNER_SET_TYPE_ADDR, &tun_setup);
313
314 break; 305 break;
315 }
316 case 0x42: 306 case 0x42:
317 PDEBUG(DBG_I2C,"attach_inform: saa7114 detected."); 307 PDEBUG(DBG_I2C,"attach_inform: saa7114 detected.");
318 break; 308 break;
@@ -480,7 +470,7 @@ static int usbvision_i2c_write_max4(struct usb_usbvision *usbvision,
480 return len; 470 return len;
481} 471}
482 472
483static int usbvision_i2c_write(void *data, unsigned char addr, char *buf, 473static int usbvision_i2c_write(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
484 short len) 474 short len)
485{ 475{
486 char *bufPtr = buf; 476 char *bufPtr = buf;
@@ -488,7 +478,6 @@ static int usbvision_i2c_write(void *data, unsigned char addr, char *buf,
488 int wrcount = 0; 478 int wrcount = 0;
489 int count; 479 int count;
490 int maxLen = 4; 480 int maxLen = 4;
491 struct usb_usbvision *usbvision = (struct usb_usbvision *) data;
492 481
493 while (len > 0) { 482 while (len > 0) {
494 count = (len > maxLen) ? maxLen : len; 483 count = (len > maxLen) ? maxLen : len;
@@ -503,14 +492,13 @@ static int usbvision_i2c_write(void *data, unsigned char addr, char *buf,
503 return wrcount; 492 return wrcount;
504} 493}
505 494
506static int usbvision_i2c_read(void *data, unsigned char addr, char *buf, 495static int usbvision_i2c_read(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
507 short len) 496 short len)
508{ 497{
509 char temp[4]; 498 char temp[4];
510 int retval, i; 499 int retval, i;
511 int rdcount = 0; 500 int rdcount = 0;
512 int count; 501 int count;
513 struct usb_usbvision *usbvision = (struct usb_usbvision *) data;
514 502
515 while (len > 0) { 503 while (len > 0) {
516 count = (len > 3) ? 4 : len; 504 count = (len > 3) ? 4 : len;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 6fc14557d623..216704170a4c 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -76,6 +76,7 @@
76#endif 76#endif
77 77
78#include "usbvision.h" 78#include "usbvision.h"
79#include "usbvision-cards.h"
79 80
80#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>, Dwaine Garden <DwaineGarden@rogers.com>" 81#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>, Dwaine Garden <DwaineGarden@rogers.com>"
81#define DRIVER_NAME "usbvision" 82#define DRIVER_NAME "usbvision"
@@ -150,7 +151,6 @@ static int PowerOnAtOpen = 1; // Set the default device to power on at startu
150static int video_nr = -1; // Sequential Number of Video Device 151static int video_nr = -1; // Sequential Number of Video Device
151static int radio_nr = -1; // Sequential Number of Radio Device 152static int radio_nr = -1; // Sequential Number of Radio Device
152static int vbi_nr = -1; // Sequential Number of VBI Device 153static int vbi_nr = -1; // Sequential Number of VBI Device
153static char *CustomDevice=NULL; // Set as nothing....
154 154
155// Grab parameters for the device driver 155// Grab parameters for the device driver
156 156
@@ -161,7 +161,6 @@ module_param(PowerOnAtOpen, int, 0444);
161module_param(video_nr, int, 0444); 161module_param(video_nr, int, 0444);
162module_param(radio_nr, int, 0444); 162module_param(radio_nr, int, 0444);
163module_param(vbi_nr, int, 0444); 163module_param(vbi_nr, int, 0444);
164module_param(CustomDevice, charp, 0444);
165#else // Old Style 164#else // Old Style
166MODULE_PARAM(isocMode, "i"); 165MODULE_PARAM(isocMode, "i");
167MODULE_PARM(video_debug, "i"); // Grab the Debug Mode of the device driver 166MODULE_PARM(video_debug, "i"); // Grab the Debug Mode of the device driver
@@ -171,7 +170,6 @@ MODULE_PARM(SwitchSVideoInput, "i"); // To help people with Black and White ou
171MODULE_PARM(video_nr, "i"); // video_nr option allows to specify a certain /dev/videoX device (like /dev/video0 or /dev/video1 ...) 170MODULE_PARM(video_nr, "i"); // video_nr option allows to specify a certain /dev/videoX device (like /dev/video0 or /dev/video1 ...)
172MODULE_PARM(radio_nr, "i"); // radio_nr option allows to specify a certain /dev/radioX device (like /dev/radio0 or /dev/radio1 ...) 171MODULE_PARM(radio_nr, "i"); // radio_nr option allows to specify a certain /dev/radioX device (like /dev/radio0 or /dev/radio1 ...)
173MODULE_PARM(vbi_nr, "i"); // vbi_nr option allows to specify a certain /dev/vbiX device (like /dev/vbi0 or /dev/vbi1 ...) 172MODULE_PARM(vbi_nr, "i"); // vbi_nr option allows to specify a certain /dev/vbiX device (like /dev/vbi0 or /dev/vbi1 ...)
174MODULE_PARM(CustomDevice, "s"); // .... CustomDevice
175#endif 173#endif
176 174
177MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)"); 175MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
@@ -180,7 +178,6 @@ MODULE_PARM_DESC(PowerOnAtOpen, " Set the default device to power on when device
180MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)"); 178MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
181MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)"); 179MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
182MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)"); 180MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)");
183MODULE_PARM_DESC(CustomDevice, " Define the fine tuning parameters for the device. Default: null");
184 181
185 182
186// Misc stuff 183// Misc stuff
@@ -409,7 +406,7 @@ static int usbvision_v4l2_open(struct inode *inode, struct file *file)
409 down(&usbvision->lock); 406 down(&usbvision->lock);
410 if (usbvision->power == 0) { 407 if (usbvision->power == 0) {
411 usbvision_power_on(usbvision); 408 usbvision_power_on(usbvision);
412 usbvision_init_i2c(usbvision); 409 usbvision_i2c_register(usbvision);
413 } 410 }
414 411
415 /* Send init sequence only once, it's large! */ 412 /* Send init sequence only once, it's large! */
@@ -431,7 +428,7 @@ static int usbvision_v4l2_open(struct inode *inode, struct file *file)
431 } 428 }
432 else { 429 else {
433 if (PowerOnAtOpen) { 430 if (PowerOnAtOpen) {
434 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap); 431 usbvision_i2c_unregister(usbvision);
435 usbvision_power_off(usbvision); 432 usbvision_power_off(usbvision);
436 usbvision->initialized = 0; 433 usbvision->initialized = 0;
437 } 434 }
@@ -1239,7 +1236,7 @@ static int usbvision_radio_open(struct inode *inode, struct file *file)
1239 usbvision_reset_powerOffTimer(usbvision); 1236 usbvision_reset_powerOffTimer(usbvision);
1240 if (usbvision->power == 0) { 1237 if (usbvision->power == 0) {
1241 usbvision_power_on(usbvision); 1238 usbvision_power_on(usbvision);
1242 usbvision_init_i2c(usbvision); 1239 usbvision_i2c_register(usbvision);
1243 } 1240 }
1244 } 1241 }
1245 1242
@@ -1261,7 +1258,7 @@ static int usbvision_radio_open(struct inode *inode, struct file *file)
1261 1258
1262 if (errCode) { 1259 if (errCode) {
1263 if (PowerOnAtOpen) { 1260 if (PowerOnAtOpen) {
1264 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap); 1261 usbvision_i2c_unregister(usbvision);
1265 usbvision_power_off(usbvision); 1262 usbvision_power_off(usbvision);
1266 usbvision->initialized = 0; 1263 usbvision->initialized = 0;
1267 } 1264 }
@@ -1744,8 +1741,8 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
1744 model = usbvision->DevModel; 1741 model = usbvision->DevModel;
1745 usbvision->palette = usbvision_v4l2_format[2]; // V4L2_PIX_FMT_RGB24; 1742 usbvision->palette = usbvision_v4l2_format[2]; // V4L2_PIX_FMT_RGB24;
1746 1743
1747 if (usbvision_device_data[usbvision->DevModel].Vin_Reg2 >= 0) { 1744 if (usbvision_device_data[usbvision->DevModel].Vin_Reg2_override) {
1748 usbvision->Vin_Reg2_Preset = usbvision_device_data[usbvision->DevModel].Vin_Reg2 & 0xff; 1745 usbvision->Vin_Reg2_Preset = usbvision_device_data[usbvision->DevModel].Vin_Reg2;
1749 } else { 1746 } else {
1750 usbvision->Vin_Reg2_Preset = 0; 1747 usbvision->Vin_Reg2_Preset = 0;
1751 } 1748 }
@@ -1764,7 +1761,7 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
1764 usbvision_audio_off(usbvision); //first switch off audio 1761 usbvision_audio_off(usbvision); //first switch off audio
1765 if (!PowerOnAtOpen) { 1762 if (!PowerOnAtOpen) {
1766 usbvision_power_on(usbvision); //and then power up the noisy tuner 1763 usbvision_power_on(usbvision); //and then power up the noisy tuner
1767 usbvision_init_i2c(usbvision); 1764 usbvision_i2c_register(usbvision);
1768 } 1765 }
1769} 1766}
1770 1767
@@ -1775,7 +1772,8 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
1775 * if it looks like USBVISION video device 1772 * if it looks like USBVISION video device
1776 * 1773 *
1777 */ 1774 */
1778static int __devinit usbvision_probe(struct usb_interface *intf, const struct usb_device_id *devid) 1775static int __devinit usbvision_probe(struct usb_interface *intf,
1776 const struct usb_device_id *devid)
1779{ 1777{
1780 struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf)); 1778 struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf));
1781 struct usb_interface *uif; 1779 struct usb_interface *uif;
@@ -1786,25 +1784,17 @@ static int __devinit usbvision_probe(struct usb_interface *intf, const struct us
1786 int model,i; 1784 int model,i;
1787 1785
1788 PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u", 1786 PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
1789 dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum); 1787 dev->descriptor.idVendor,
1788 dev->descriptor.idProduct, ifnum);
1790 1789
1791 /* Is it an USBVISION video dev? */ 1790 model = devid->driver_info;
1792 model = 0; 1791 if ( (model<0) || (model>=usbvision_device_data_size) ) {
1793 for(model = 0; usbvision_device_data[model].idVendor; model++) { 1792 PDEBUG(DBG_PROBE, "model out of bounds %d",model);
1794 if (le16_to_cpu(dev->descriptor.idVendor) != usbvision_device_data[model].idVendor) { 1793 return -ENODEV;
1795 continue;
1796 }
1797 if (le16_to_cpu(dev->descriptor.idProduct) != usbvision_device_data[model].idProduct) {
1798 continue;
1799 }
1800
1801 printk(KERN_INFO "%s: %s found\n", __FUNCTION__, usbvision_device_data[model].ModelString);
1802 break;
1803 } 1794 }
1795 printk(KERN_INFO "%s: %s found\n", __FUNCTION__,
1796 usbvision_device_data[model].ModelString);
1804 1797
1805 if (usbvision_device_data[model].idVendor == 0) {
1806 return -ENODEV; //no matching device
1807 }
1808 if (usbvision_device_data[model].Interface >= 0) { 1798 if (usbvision_device_data[model].Interface >= 0) {
1809 interface = &dev->actconfig->interface[usbvision_device_data[model].Interface]->altsetting[0]; 1799 interface = &dev->actconfig->interface[usbvision_device_data[model].Interface]->altsetting[0];
1810 } 1800 }
@@ -1822,16 +1812,15 @@ static int __devinit usbvision_probe(struct usb_interface *intf, const struct us
1822 return -ENODEV; 1812 return -ENODEV;
1823 } 1813 }
1824 1814
1825 usb_get_dev(dev);
1826
1827 if ((usbvision = usbvision_alloc(dev)) == NULL) { 1815 if ((usbvision = usbvision_alloc(dev)) == NULL) {
1828 err("%s: couldn't allocate USBVision struct", __FUNCTION__); 1816 err("%s: couldn't allocate USBVision struct", __FUNCTION__);
1829 return -ENOMEM; 1817 return -ENOMEM;
1830 } 1818 }
1819
1831 if (dev->descriptor.bNumConfigurations > 1) { 1820 if (dev->descriptor.bNumConfigurations > 1) {
1832 usbvision->bridgeType = BRIDGE_NT1004; 1821 usbvision->bridgeType = BRIDGE_NT1004;
1833 } 1822 }
1834 else if (usbvision_device_data[model].ModelString == "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)") { 1823 else if (model == DAZZLE_DVC_90_REV_1_SECAM) {
1835 usbvision->bridgeType = BRIDGE_NT1005; 1824 usbvision->bridgeType = BRIDGE_NT1005;
1836 } 1825 }
1837 else { 1826 else {
@@ -1920,7 +1909,7 @@ static void __devexit usbvision_disconnect(struct usb_interface *intf)
1920 usbvision_stop_isoc(usbvision); 1909 usbvision_stop_isoc(usbvision);
1921 1910
1922 if (usbvision->power) { 1911 if (usbvision->power) {
1923 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap); 1912 usbvision_i2c_unregister(usbvision);
1924 usbvision_power_off(usbvision); 1913 usbvision_power_off(usbvision);
1925 } 1914 }
1926 usbvision->remove_pending = 1; // Now all ISO data will be ignored 1915 usbvision->remove_pending = 1; // Now all ISO data will be ignored
@@ -1951,124 +1940,6 @@ static struct usb_driver usbvision_driver = {
1951}; 1940};
1952 1941
1953/* 1942/*
1954 * customdevice_process()
1955 *
1956 * This procedure preprocesses CustomDevice parameter if any
1957 *
1958 */
1959static void customdevice_process(void)
1960{
1961 usbvision_device_data[0]=usbvision_device_data[1];
1962 usbvision_table[0]=usbvision_table[1];
1963
1964 if(CustomDevice)
1965 {
1966 char *parse=CustomDevice;
1967
1968 PDEBUG(DBG_PROBE, "CustomDevide=%s", CustomDevice);
1969
1970 /*format is CustomDevice="0x0573 0x4D31 0 7113 3 PAL 1 1 1 5 -1 -1 -1 -1 -1"
1971 usbvision_device_data[0].idVendor;
1972 usbvision_device_data[0].idProduct;
1973 usbvision_device_data[0].Interface;
1974 usbvision_device_data[0].Codec;
1975 usbvision_device_data[0].VideoChannels;
1976 usbvision_device_data[0].VideoNorm;
1977 usbvision_device_data[0].AudioChannels;
1978 usbvision_device_data[0].Radio;
1979 usbvision_device_data[0].Tuner;
1980 usbvision_device_data[0].TunerType;
1981 usbvision_device_data[0].Vin_Reg1;
1982 usbvision_device_data[0].Vin_Reg2;
1983 usbvision_device_data[0].X_Offset;
1984 usbvision_device_data[0].Y_Offset;
1985 usbvision_device_data[0].Dvi_yuv;
1986 usbvision_device_data[0].ModelString;
1987 */
1988
1989 rmspace(parse);
1990 usbvision_device_data[0].ModelString="USBVISION Custom Device";
1991
1992 parse+=2;
1993 sscanf(parse,"%x",&usbvision_device_data[0].idVendor);
1994 goto2next(parse);
1995 PDEBUG(DBG_PROBE, "idVendor=0x%.4X", usbvision_device_data[0].idVendor);
1996 parse+=2;
1997 sscanf(parse,"%x",&usbvision_device_data[0].idProduct);
1998 goto2next(parse);
1999 PDEBUG(DBG_PROBE, "idProduct=0x%.4X", usbvision_device_data[0].idProduct);
2000 sscanf(parse,"%d",&usbvision_device_data[0].Interface);
2001 goto2next(parse);
2002 PDEBUG(DBG_PROBE, "Interface=%d", usbvision_device_data[0].Interface);
2003 sscanf(parse,"%d",&usbvision_device_data[0].Codec);
2004 goto2next(parse);
2005 PDEBUG(DBG_PROBE, "Codec=%d", usbvision_device_data[0].Codec);
2006 sscanf(parse,"%d",&usbvision_device_data[0].VideoChannels);
2007 goto2next(parse);
2008 PDEBUG(DBG_PROBE, "VideoChannels=%d", usbvision_device_data[0].VideoChannels);
2009
2010 switch(*parse)
2011 {
2012 case 'P':
2013 PDEBUG(DBG_PROBE, "VideoNorm=PAL");
2014 usbvision_device_data[0].VideoNorm=V4L2_STD_PAL;
2015 break;
2016
2017 case 'S':
2018 PDEBUG(DBG_PROBE, "VideoNorm=SECAM");
2019 usbvision_device_data[0].VideoNorm=V4L2_STD_SECAM;
2020 break;
2021
2022 case 'N':
2023 PDEBUG(DBG_PROBE, "VideoNorm=NTSC");
2024 usbvision_device_data[0].VideoNorm=V4L2_STD_NTSC;
2025 break;
2026
2027 default:
2028 PDEBUG(DBG_PROBE, "VideoNorm=PAL (by default)");
2029 usbvision_device_data[0].VideoNorm=V4L2_STD_PAL;
2030 break;
2031 }
2032 goto2next(parse);
2033
2034 sscanf(parse,"%d",&usbvision_device_data[0].AudioChannels);
2035 goto2next(parse);
2036 PDEBUG(DBG_PROBE, "AudioChannels=%d", usbvision_device_data[0].AudioChannels);
2037 sscanf(parse,"%d",&usbvision_device_data[0].Radio);
2038 goto2next(parse);
2039 PDEBUG(DBG_PROBE, "Radio=%d", usbvision_device_data[0].Radio);
2040 sscanf(parse,"%d",&usbvision_device_data[0].Tuner);
2041 goto2next(parse);
2042 PDEBUG(DBG_PROBE, "Tuner=%d", usbvision_device_data[0].Tuner);
2043 sscanf(parse,"%d",&usbvision_device_data[0].TunerType);
2044 goto2next(parse);
2045 PDEBUG(DBG_PROBE, "TunerType=%d", usbvision_device_data[0].TunerType);
2046 sscanf(parse,"%d",&usbvision_device_data[0].Vin_Reg1);
2047 goto2next(parse);
2048 PDEBUG(DBG_PROBE, "Vin_Reg1=%d", usbvision_device_data[0].Vin_Reg1);
2049 sscanf(parse,"%d",&usbvision_device_data[0].Vin_Reg2);
2050 goto2next(parse);
2051 PDEBUG(DBG_PROBE, "Vin_Reg2=%d", usbvision_device_data[0].Vin_Reg2);
2052 sscanf(parse,"%d",&usbvision_device_data[0].X_Offset);
2053 goto2next(parse);
2054 PDEBUG(DBG_PROBE, "X_Offset=%d", usbvision_device_data[0].X_Offset);
2055 sscanf(parse,"%d",&usbvision_device_data[0].Y_Offset);
2056 goto2next(parse);
2057 PDEBUG(DBG_PROBE, "Y_Offset=%d", usbvision_device_data[0].Y_Offset);
2058 sscanf(parse,"%d",&usbvision_device_data[0].Dvi_yuv);
2059 PDEBUG(DBG_PROBE, "Dvi_yuv=%d", usbvision_device_data[0].Dvi_yuv);
2060
2061 //add to usbvision_table also
2062 usbvision_table[0].match_flags=USB_DEVICE_ID_MATCH_DEVICE;
2063 usbvision_table[0].idVendor=usbvision_device_data[0].idVendor;
2064 usbvision_table[0].idProduct=usbvision_device_data[0].idProduct;
2065
2066 }
2067}
2068
2069
2070
2071/*
2072 * usbvision_init() 1943 * usbvision_init()
2073 * 1944 *
2074 * This code is run to initialize the driver. 1945 * This code is run to initialize the driver.
@@ -2092,8 +1963,6 @@ static int __init usbvision_init(void)
2092 usbvision_v4l2_format[7].supported = 0; // V4L2_PIX_FMT_YUV422P 1963 usbvision_v4l2_format[7].supported = 0; // V4L2_PIX_FMT_YUV422P
2093 } 1964 }
2094 1965
2095 customdevice_process();
2096
2097 errCode = usb_register(&usbvision_driver); 1966 errCode = usb_register(&usbvision_driver);
2098 1967
2099 if (errCode == 0) { 1968 if (errCode == 0) {
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index ad6afd3e42a4..bd6f6422ed54 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -342,23 +342,24 @@ struct usbvision_frame {
342#define BRIDGE_NT1005 1005 342#define BRIDGE_NT1005 1005
343 343
344struct usbvision_device_data_st { 344struct usbvision_device_data_st {
345 int idVendor;
346 int idProduct;
347 int Interface; /* to handle special interface number like BELKIN and Hauppauge WinTV-USB II */
348 int Codec;
349 int VideoChannels;
350 __u64 VideoNorm; 345 __u64 VideoNorm;
351 int AudioChannels; 346 const char *ModelString;
352 int Radio; 347 int Interface; /* to handle special interface number like BELKIN and Hauppauge WinTV-USB II */
353 int vbi; 348 __u16 Codec;
354 int Tuner; 349 unsigned VideoChannels:3;
355 int TunerType; 350 unsigned AudioChannels:2;
356 int Vin_Reg1; 351 unsigned Radio:1;
357 int Vin_Reg2; 352 unsigned vbi:1;
358 int X_Offset; 353 unsigned Tuner:1;
359 int Y_Offset; 354 unsigned Vin_Reg1_override:1; /* Override default value with */
360 int Dvi_yuv; 355 unsigned Vin_Reg2_override:1; /* Vin_Reg1, Vin_Reg2, etc. */
361 char *ModelString; 356 unsigned Dvi_yuv_override:1;
357 __u8 Vin_Reg1;
358 __u8 Vin_Reg2;
359 __u8 Dvi_yuv;
360 __u8 TunerType;
361 __s16 X_Offset;
362 __s16 Y_Offset;
362}; 363};
363 364
364/* Declared on usbvision-cards.c */ 365/* Declared on usbvision-cards.c */
@@ -481,13 +482,11 @@ struct usb_usbvision {
481/* i2c-algo-usb declaration */ 482/* i2c-algo-usb declaration */
482/* --------------------------------------------------------------- */ 483/* --------------------------------------------------------------- */
483 484
484int usbvision_i2c_usb_del_bus(struct i2c_adapter *);
485
486
487/* ----------------------------------------------------------------------- */ 485/* ----------------------------------------------------------------------- */
488/* usbvision specific I2C functions */ 486/* usbvision specific I2C functions */
489/* ----------------------------------------------------------------------- */ 487/* ----------------------------------------------------------------------- */
490int usbvision_init_i2c(struct usb_usbvision *usbvision); 488int usbvision_i2c_register(struct usb_usbvision *usbvision);
489int usbvision_i2c_unregister(struct usb_usbvision *usbvision);
491void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd,void *arg); 490void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd,void *arg);
492 491
493/* defined in usbvision-core.c */ 492/* defined in usbvision-core.c */
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 54747606eae1..49f1df74aa21 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -60,6 +60,7 @@
60#include <linux/video_decoder.h> 60#include <linux/video_decoder.h>
61#define __OLD_VIDIOC_ /* To allow fixing old calls*/ 61#define __OLD_VIDIOC_ /* To allow fixing old calls*/
62#include <media/v4l2-common.h> 62#include <media/v4l2-common.h>
63#include <media/v4l2-chip-ident.h>
63 64
64#ifdef CONFIG_KMOD 65#ifdef CONFIG_KMOD
65#include <linux/kmod.h> 66#include <linux/kmod.h>
@@ -260,6 +261,8 @@ char *v4l2_field_names[] = {
260 [V4L2_FIELD_SEQ_TB] = "seq-tb", 261 [V4L2_FIELD_SEQ_TB] = "seq-tb",
261 [V4L2_FIELD_SEQ_BT] = "seq-bt", 262 [V4L2_FIELD_SEQ_BT] = "seq-bt",
262 [V4L2_FIELD_ALTERNATE] = "alternate", 263 [V4L2_FIELD_ALTERNATE] = "alternate",
264 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
265 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
263}; 266};
264 267
265char *v4l2_type_names[] = { 268char *v4l2_type_names[] = {
@@ -269,7 +272,8 @@ char *v4l2_type_names[] = {
269 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", 272 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
270 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", 273 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
271 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", 274 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
272 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "slicec-vbi-out", 275 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
276 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over",
273}; 277};
274 278
275 279
@@ -380,6 +384,8 @@ static const char *v4l2_ioctls[] = {
380 384
381 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER", 385 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
382 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER", 386 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
387
388 [_IOC_NR(VIDIOC_G_CHIP_IDENT)] = "VIDIOC_G_CHIP_IDENT",
383#endif 389#endif
384}; 390};
385#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) 391#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -410,14 +416,16 @@ static const char *v4l2_int_ioctls[] = {
410 [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE", 416 [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE",
411 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA", 417 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
412 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA", 418 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
413 [_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT",
414 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ", 419 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ",
415 [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY", 420 [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY",
416 [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING", 421 [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING",
417 [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING", 422 [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING",
418 [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING", 423 [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING",
419 [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING", 424 [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING",
420 [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ" 425 [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ",
426 [_IOC_NR(VIDIOC_INT_INIT)] = "VIDIOC_INT_INIT",
427 [_IOC_NR(VIDIOC_INT_G_STD_OUTPUT)] = "VIDIOC_INT_G_STD_OUTPUT",
428 [_IOC_NR(VIDIOC_INT_S_STD_OUTPUT)] = "VIDIOC_INT_S_STD_OUTPUT",
421}; 429};
422#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls) 430#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
423 431
@@ -680,6 +688,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
680 case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: name = "Audio Stereo Mode Extension"; break; 688 case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: name = "Audio Stereo Mode Extension"; break;
681 case V4L2_CID_MPEG_AUDIO_EMPHASIS: name = "Audio Emphasis"; break; 689 case V4L2_CID_MPEG_AUDIO_EMPHASIS: name = "Audio Emphasis"; break;
682 case V4L2_CID_MPEG_AUDIO_CRC: name = "Audio CRC"; break; 690 case V4L2_CID_MPEG_AUDIO_CRC: name = "Audio CRC"; break;
691 case V4L2_CID_MPEG_AUDIO_MUTE: name = "Audio Mute"; break;
683 case V4L2_CID_MPEG_VIDEO_ENCODING: name = "Video Encoding"; break; 692 case V4L2_CID_MPEG_VIDEO_ENCODING: name = "Video Encoding"; break;
684 case V4L2_CID_MPEG_VIDEO_ASPECT: name = "Video Aspect"; break; 693 case V4L2_CID_MPEG_VIDEO_ASPECT: name = "Video Aspect"; break;
685 case V4L2_CID_MPEG_VIDEO_B_FRAMES: name = "Video B Frames"; break; 694 case V4L2_CID_MPEG_VIDEO_B_FRAMES: name = "Video B Frames"; break;
@@ -690,6 +699,8 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
690 case V4L2_CID_MPEG_VIDEO_BITRATE: name = "Video Bitrate"; break; 699 case V4L2_CID_MPEG_VIDEO_BITRATE: name = "Video Bitrate"; break;
691 case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: name = "Video Peak Bitrate"; break; 700 case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: name = "Video Peak Bitrate"; break;
692 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: name = "Video Temporal Decimation"; break; 701 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: name = "Video Temporal Decimation"; break;
702 case V4L2_CID_MPEG_VIDEO_MUTE: name = "Video Mute"; break;
703 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: name = "Video Mute YUV"; break;
693 case V4L2_CID_MPEG_STREAM_TYPE: name = "Stream Type"; break; 704 case V4L2_CID_MPEG_STREAM_TYPE: name = "Stream Type"; break;
694 case V4L2_CID_MPEG_STREAM_PID_PMT: name = "Stream PMT Program ID"; break; 705 case V4L2_CID_MPEG_STREAM_PID_PMT: name = "Stream PMT Program ID"; break;
695 case V4L2_CID_MPEG_STREAM_PID_AUDIO: name = "Stream Audio Program ID"; break; 706 case V4L2_CID_MPEG_STREAM_PID_AUDIO: name = "Stream Audio Program ID"; break;
@@ -705,6 +716,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
705 switch (qctrl->id) { 716 switch (qctrl->id) {
706 case V4L2_CID_AUDIO_MUTE: 717 case V4L2_CID_AUDIO_MUTE:
707 case V4L2_CID_AUDIO_LOUDNESS: 718 case V4L2_CID_AUDIO_LOUDNESS:
719 case V4L2_CID_MPEG_AUDIO_MUTE:
708 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: 720 case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
709 case V4L2_CID_MPEG_VIDEO_PULLDOWN: 721 case V4L2_CID_MPEG_VIDEO_PULLDOWN:
710 qctrl->type = V4L2_CTRL_TYPE_BOOLEAN; 722 qctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
@@ -838,6 +850,8 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl)
838 V4L2_MPEG_AUDIO_CRC_NONE, 850 V4L2_MPEG_AUDIO_CRC_NONE,
839 V4L2_MPEG_AUDIO_CRC_CRC16, 1, 851 V4L2_MPEG_AUDIO_CRC_CRC16, 1,
840 V4L2_MPEG_AUDIO_CRC_NONE); 852 V4L2_MPEG_AUDIO_CRC_NONE);
853 case V4L2_CID_MPEG_AUDIO_MUTE:
854 return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0);
841 case V4L2_CID_MPEG_VIDEO_ENCODING: 855 case V4L2_CID_MPEG_VIDEO_ENCODING:
842 return v4l2_ctrl_query_fill(qctrl, 856 return v4l2_ctrl_query_fill(qctrl,
843 V4L2_MPEG_VIDEO_ENCODING_MPEG_1, 857 V4L2_MPEG_VIDEO_ENCODING_MPEG_1,
@@ -867,6 +881,10 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl)
867 return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000); 881 return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000);
868 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: 882 case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
869 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); 883 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
884 case V4L2_CID_MPEG_VIDEO_MUTE:
885 return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0);
886 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */
887 return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080);
870 case V4L2_CID_MPEG_STREAM_TYPE: 888 case V4L2_CID_MPEG_STREAM_TYPE:
871 return v4l2_ctrl_query_fill(qctrl, 889 return v4l2_ctrl_query_fill(qctrl,
872 V4L2_MPEG_STREAM_TYPE_MPEG2_PS, 890 V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
@@ -965,6 +983,22 @@ int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_c
965 } 983 }
966} 984}
967 985
986int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chip,
987 u32 ident, u32 revision)
988{
989 if (!v4l2_chip_match_i2c_client(c, chip->match_type, chip->match_chip))
990 return 0;
991 if (chip->ident == V4L2_IDENT_NONE) {
992 chip->ident = ident;
993 chip->revision = revision;
994 }
995 else {
996 chip->ident = V4L2_IDENT_AMBIGUOUS;
997 chip->revision = 0;
998 }
999 return 0;
1000}
1001
968int v4l2_chip_match_host(u32 match_type, u32 match_chip) 1002int v4l2_chip_match_host(u32 match_type, u32 match_chip)
969{ 1003{
970 switch (match_type) { 1004 switch (match_type) {
@@ -999,6 +1033,7 @@ EXPORT_SYMBOL(v4l2_ctrl_query_fill);
999EXPORT_SYMBOL(v4l2_ctrl_query_fill_std); 1033EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
1000 1034
1001EXPORT_SYMBOL(v4l2_chip_match_i2c_client); 1035EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
1036EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
1002EXPORT_SYMBOL(v4l2_chip_match_host); 1037EXPORT_SYMBOL(v4l2_chip_match_host);
1003 1038
1004/* 1039/*
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index 290e64135650..f2bbd7a4d562 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -348,6 +348,9 @@ videocodec_build_table (void)
348 kfree(videocodec_buf); 348 kfree(videocodec_buf);
349 videocodec_buf = kmalloc(size, GFP_KERNEL); 349 videocodec_buf = kmalloc(size, GFP_KERNEL);
350 350
351 if (!videocodec_buf)
352 return 0;
353
351 i = 0; 354 i = 0;
352 i += scnprintf(videocodec_buf + i, size - 1, 355 i += scnprintf(videocodec_buf + i, size - 1,
353 "<S>lave or attached <M>aster name type flags magic "); 356 "<S>lave or attached <M>aster name type flags magic ");
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 011938fb7e0e..80ac5f86d9e5 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -318,6 +318,7 @@ static char *v4l2_type_names_FIXME[] = {
318 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", 318 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
319 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", 319 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
320 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-capture", 320 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-capture",
321 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over",
321 [V4L2_BUF_TYPE_PRIVATE] = "private", 322 [V4L2_BUF_TYPE_PRIVATE] = "private",
322}; 323};
323 324
@@ -330,6 +331,8 @@ static char *v4l2_field_names_FIXME[] = {
330 [V4L2_FIELD_SEQ_TB] = "seq-tb", 331 [V4L2_FIELD_SEQ_TB] = "seq-tb",
331 [V4L2_FIELD_SEQ_BT] = "seq-bt", 332 [V4L2_FIELD_SEQ_BT] = "seq-bt",
332 [V4L2_FIELD_ALTERNATE] = "alternate", 333 [V4L2_FIELD_ALTERNATE] = "alternate",
334 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
335 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
333}; 336};
334 337
335#define prt_names(a,arr) (((a)>=0)&&((a)<ARRAY_SIZE(arr)))?arr[a]:"unknown" 338#define prt_names(a,arr) (((a)>=0)&&((a)<ARRAY_SIZE(arr)))?arr[a]:"unknown"
@@ -411,6 +414,10 @@ static int check_fmt (struct video_device *vfd, enum v4l2_buf_type type)
411 if (vfd->vidioc_try_fmt_vbi_output) 414 if (vfd->vidioc_try_fmt_vbi_output)
412 return (0); 415 return (0);
413 break; 416 break;
417 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
418 if (vfd->vidioc_try_fmt_output_overlay)
419 return (0);
420 break;
414 case V4L2_BUF_TYPE_PRIVATE: 421 case V4L2_BUF_TYPE_PRIVATE:
415 if (vfd->vidioc_try_fmt_type_private) 422 if (vfd->vidioc_try_fmt_type_private)
416 return (0); 423 return (0);
@@ -525,6 +532,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
525 ret=vfd->vidioc_enum_fmt_vbi_output(file, 532 ret=vfd->vidioc_enum_fmt_vbi_output(file,
526 fh, f); 533 fh, f);
527 break; 534 break;
535 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
536 if (vfd->vidioc_enum_fmt_output_overlay)
537 ret=vfd->vidioc_enum_fmt_output_overlay(file, fh, f);
538 break;
528 case V4L2_BUF_TYPE_PRIVATE: 539 case V4L2_BUF_TYPE_PRIVATE:
529 if (vfd->vidioc_enum_fmt_type_private) 540 if (vfd->vidioc_enum_fmt_type_private)
530 ret=vfd->vidioc_enum_fmt_type_private(file, 541 ret=vfd->vidioc_enum_fmt_type_private(file,
@@ -582,6 +593,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
582 ret=vfd->vidioc_g_fmt_video_output(file, 593 ret=vfd->vidioc_g_fmt_video_output(file,
583 fh, f); 594 fh, f);
584 break; 595 break;
596 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
597 if (vfd->vidioc_g_fmt_output_overlay)
598 ret=vfd->vidioc_g_fmt_output_overlay(file, fh, f);
599 break;
585 case V4L2_BUF_TYPE_VBI_OUTPUT: 600 case V4L2_BUF_TYPE_VBI_OUTPUT:
586 if (vfd->vidioc_g_fmt_vbi_output) 601 if (vfd->vidioc_g_fmt_vbi_output)
587 ret=vfd->vidioc_g_fmt_vbi_output(file, fh, f); 602 ret=vfd->vidioc_g_fmt_vbi_output(file, fh, f);
@@ -630,6 +645,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
630 ret=vfd->vidioc_s_fmt_video_output(file, 645 ret=vfd->vidioc_s_fmt_video_output(file,
631 fh, f); 646 fh, f);
632 break; 647 break;
648 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
649 if (vfd->vidioc_s_fmt_output_overlay)
650 ret=vfd->vidioc_s_fmt_output_overlay(file, fh, f);
651 break;
633 case V4L2_BUF_TYPE_VBI_OUTPUT: 652 case V4L2_BUF_TYPE_VBI_OUTPUT:
634 if (vfd->vidioc_s_fmt_vbi_output) 653 if (vfd->vidioc_s_fmt_vbi_output)
635 ret=vfd->vidioc_s_fmt_vbi_output(file, 654 ret=vfd->vidioc_s_fmt_vbi_output(file,
@@ -680,6 +699,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
680 ret=vfd->vidioc_try_fmt_video_output(file, 699 ret=vfd->vidioc_try_fmt_video_output(file,
681 fh, f); 700 fh, f);
682 break; 701 break;
702 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
703 if (vfd->vidioc_try_fmt_output_overlay)
704 ret=vfd->vidioc_try_fmt_output_overlay(file, fh, f);
705 break;
683 case V4L2_BUF_TYPE_VBI_OUTPUT: 706 case V4L2_BUF_TYPE_VBI_OUTPUT:
684 if (vfd->vidioc_try_fmt_vbi_output) 707 if (vfd->vidioc_try_fmt_vbi_output)
685 ret=vfd->vidioc_try_fmt_vbi_output(file, 708 ret=vfd->vidioc_try_fmt_vbi_output(file,
@@ -1381,6 +1404,11 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1381 case VIDIOC_G_PARM: 1404 case VIDIOC_G_PARM:
1382 { 1405 {
1383 struct v4l2_streamparm *p=arg; 1406 struct v4l2_streamparm *p=arg;
1407 __u32 type=p->type;
1408
1409 memset(p,0,sizeof(*p));
1410 p->type=type;
1411
1384 if (vfd->vidioc_g_parm) { 1412 if (vfd->vidioc_g_parm) {
1385 ret=vfd->vidioc_g_parm(file, fh, p); 1413 ret=vfd->vidioc_g_parm(file, fh, p);
1386 } else { 1414 } else {
@@ -1392,8 +1420,6 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1392 v4l2_video_std_construct(&s, vfd->current_norm, 1420 v4l2_video_std_construct(&s, vfd->current_norm,
1393 v4l2_norm_to_name(vfd->current_norm)); 1421 v4l2_norm_to_name(vfd->current_norm));
1394 1422
1395 memset(p,0,sizeof(*p));
1396
1397 p->parm.capture.timeperframe = s.frameperiod; 1423 p->parm.capture.timeperframe = s.frameperiod;
1398 ret=0; 1424 ret=0;
1399 } 1425 }
@@ -1509,6 +1535,16 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1509 break; 1535 break;
1510 } 1536 }
1511#endif 1537#endif
1538 case VIDIOC_G_CHIP_IDENT:
1539 {
1540 struct v4l2_chip_ident *p=arg;
1541 if (!vfd->vidioc_g_chip_ident)
1542 break;
1543 ret=vfd->vidioc_g_chip_ident(file, fh, p);
1544 if (!ret)
1545 dbgarg (cmd, "chip_ident=%u, revision=0x%x\n", p->ident, p->revision);
1546 break;
1547 }
1512 } /* switch */ 1548 } /* switch */
1513 1549
1514 if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) { 1550 if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) {
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index a9b59c35cd67..8f6741a28a47 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -29,6 +29,7 @@
29#include <linux/i2c-id.h> 29#include <linux/i2c-id.h>
30#include <linux/videodev.h> 30#include <linux/videodev.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-common.h>
32#include <media/v4l2-chip-ident.h>
32 33
33MODULE_DESCRIPTION("wm8739 driver"); 34MODULE_DESCRIPTION("wm8739 driver");
34MODULE_AUTHOR("T. Adachi, Hans Verkuil"); 35MODULE_AUTHOR("T. Adachi, Hans Verkuil");
@@ -236,6 +237,9 @@ static int wm8739_command(struct i2c_client *client, unsigned int cmd, void *arg
236 return -EINVAL; 237 return -EINVAL;
237 } 238 }
238 239
240 case VIDIOC_G_CHIP_IDENT:
241 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_WM8739, 0);
242
239 case VIDIOC_LOG_STATUS: 243 case VIDIOC_LOG_STATUS:
240 v4l_info(client, "Frequency: %u Hz\n", state->clock_freq); 244 v4l_info(client, "Frequency: %u Hz\n", state->clock_freq);
241 v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f, 245 v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f,
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index d81a88bbe43d..4df5d30d4d09 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -33,6 +33,7 @@
33#include <linux/i2c-id.h> 33#include <linux/i2c-id.h>
34#include <linux/videodev.h> 34#include <linux/videodev.h>
35#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
36#include <media/v4l2-chip-ident.h>
36 37
37MODULE_DESCRIPTION("wm8775 driver"); 38MODULE_DESCRIPTION("wm8775 driver");
38MODULE_AUTHOR("Ulf Eklund, Hans Verkuil"); 39MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
@@ -124,6 +125,9 @@ static int wm8775_command(struct i2c_client *client, unsigned int cmd,
124 wm8775_write(client, R21, 0x100 + state->input); 125 wm8775_write(client, R21, 0x100 + state->input);
125 break; 126 break;
126 127
128 case VIDIOC_G_CHIP_IDENT:
129 return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_WM8775, 0);
130
127 case VIDIOC_LOG_STATUS: 131 case VIDIOC_LOG_STATUS:
128 v4l_info(client, "Input: %d%s\n", state->input, 132 v4l_info(client, "Input: %d%s\n", state->input,
129 state->muted ? " (muted)" : ""); 133 state->muted ? " (muted)" : "");
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
new file mode 100644
index 000000000000..b5d3364c94c7
--- /dev/null
+++ b/drivers/media/video/zr364xx.c
@@ -0,0 +1,929 @@
1/*
2 * Zoran 364xx based USB webcam module version 0.72
3 *
4 * Allows you to use your USB webcam with V4L2 applications
5 * This is still in heavy developpement !
6 *
7 * Copyright (C) 2004 Antoine Jacquet <royale@zerezo.com>
8 * http://royale.zerezo.com/zr364xx/
9 *
10 * Heavily inspired by usb-skeleton.c, vicam.c, cpia.c and spca50x.c drivers
11 * V4L2 version inspired by meye.c driver
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28
29#include <linux/version.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/usb.h>
33#include <linux/vmalloc.h>
34#include <linux/slab.h>
35#include <linux/proc_fs.h>
36#include <linux/highmem.h>
37#include <media/v4l2-common.h>
38
39
40/* Version Information */
41#define DRIVER_VERSION "v0.72"
42#define DRIVER_AUTHOR "Antoine Jacquet, http://royale.zerezo.com/"
43#define DRIVER_DESC "Zoran 364xx"
44
45
46/* Camera */
47#define FRAMES 2
48#define MAX_FRAME_SIZE 100000
49#define BUFFER_SIZE 0x1000
50#define CTRL_TIMEOUT 500
51
52
53/* Debug macro */
54#define DBG(x...) if (debug) info(x)
55
56
57/* Init methods, need to find nicer names for these
58 * the exact names of the chipsets would be the best if someone finds it */
59#define METHOD0 0
60#define METHOD1 1
61#define METHOD2 2
62
63
64/* Module parameters */
65static int debug = 0;
66static int mode = 0;
67
68
69/* Module parameters interface */
70module_param(debug, int, 0644);
71MODULE_PARM_DESC(debug, "Debug level");
72module_param(mode, int, 0644);
73MODULE_PARM_DESC(mode, "0 = 320x240, 1 = 160x120, 2 = 640x480");
74
75
76/* Devices supported by this driver
77 * .driver_info contains the init method used by the camera */
78static struct usb_device_id device_table[] = {
79 {USB_DEVICE(0x08ca, 0x0109), .driver_info = METHOD0 },
80 {USB_DEVICE(0x041e, 0x4024), .driver_info = METHOD0 },
81 {USB_DEVICE(0x0d64, 0x0108), .driver_info = METHOD0 },
82 {USB_DEVICE(0x0546, 0x3187), .driver_info = METHOD0 },
83 {USB_DEVICE(0x0d64, 0x3108), .driver_info = METHOD0 },
84 {USB_DEVICE(0x0595, 0x4343), .driver_info = METHOD0 },
85 {USB_DEVICE(0x0bb0, 0x500d), .driver_info = METHOD0 },
86 {USB_DEVICE(0x0feb, 0x2004), .driver_info = METHOD0 },
87 {USB_DEVICE(0x055f, 0xb500), .driver_info = METHOD0 },
88 {USB_DEVICE(0x08ca, 0x2062), .driver_info = METHOD2 },
89 {USB_DEVICE(0x052b, 0x1a18), .driver_info = METHOD1 },
90 {USB_DEVICE(0x04c8, 0x0729), .driver_info = METHOD0 },
91 {USB_DEVICE(0x04f2, 0xa208), .driver_info = METHOD0 },
92 {USB_DEVICE(0x0784, 0x0040), .driver_info = METHOD1 },
93 {USB_DEVICE(0x06d6, 0x0034), .driver_info = METHOD0 },
94 {USB_DEVICE(0x0a17, 0x0062), .driver_info = METHOD2 },
95 {} /* Terminating entry */
96};
97
98MODULE_DEVICE_TABLE(usb, device_table);
99
100
101/* Camera stuff */
102struct zr364xx_camera {
103 struct usb_device *udev; /* save off the usb device pointer */
104 struct usb_interface *interface;/* the interface for this device */
105 struct video_device *vdev; /* v4l video device */
106 u8 *framebuf;
107 int nb;
108 unsigned char *buffer;
109 int skip;
110 int brightness;
111 int width;
112 int height;
113 int method;
114 struct mutex lock;
115};
116
117
118/* function used to send initialisation commands to the camera */
119static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
120 u16 index, unsigned char *cp, u16 size)
121{
122 int status;
123
124 unsigned char *transfer_buffer = kmalloc(size, GFP_KERNEL);
125 if (!transfer_buffer) {
126 info("kmalloc(%d) failed", size);
127 return -ENOMEM;
128 }
129
130 memcpy(transfer_buffer, cp, size);
131
132 status = usb_control_msg(udev,
133 usb_sndctrlpipe(udev, 0),
134 request,
135 USB_DIR_OUT | USB_TYPE_VENDOR |
136 USB_RECIP_DEVICE, value, index,
137 transfer_buffer, size, CTRL_TIMEOUT);
138
139 kfree(transfer_buffer);
140
141 if (status < 0)
142 info("Failed sending control message, error %d.", status);
143
144 return status;
145}
146
147
148/* Control messages sent to the camera to initialize it
149 * and launch the capture */
150typedef struct {
151 unsigned int value;
152 unsigned int size;
153 unsigned char *bytes;
154} message;
155
156/* method 0 */
157static unsigned char m0d1[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
158static unsigned char m0d2[] = { 0, 0, 0, 0, 0, 0 };
159static unsigned char m0d3[] = { 0, 0 };
160static message m0[] = {
161 {0x1f30, 0, NULL},
162 {0xd000, 0, NULL},
163 {0x3370, sizeof(m0d1), m0d1},
164 {0x2000, 0, NULL},
165 {0x2f0f, 0, NULL},
166 {0x2610, sizeof(m0d2), m0d2},
167 {0xe107, 0, NULL},
168 {0x2502, 0, NULL},
169 {0x1f70, 0, NULL},
170 {0xd000, 0, NULL},
171 {0x9a01, sizeof(m0d3), m0d3},
172 {-1, -1, NULL}
173};
174
175/* method 1 */
176static unsigned char m1d1[] = { 0xff, 0xff };
177static unsigned char m1d2[] = { 0x00, 0x00 };
178static message m1[] = {
179 {0x1f30, 0, NULL},
180 {0xd000, 0, NULL},
181 {0xf000, 0, NULL},
182 {0x2000, 0, NULL},
183 {0x2f0f, 0, NULL},
184 {0x2650, 0, NULL},
185 {0xe107, 0, NULL},
186 {0x2502, sizeof(m1d1), m1d1},
187 {0x1f70, 0, NULL},
188 {0xd000, 0, NULL},
189 {0xd000, 0, NULL},
190 {0xd000, 0, NULL},
191 {0x9a01, sizeof(m1d2), m1d2},
192 {-1, -1, NULL}
193};
194
195/* method 2 */
196static unsigned char m2d1[] = { 0xff, 0xff };
197static message m2[] = {
198 {0x1f30, 0, NULL},
199 {0xf000, 0, NULL},
200 {0x2000, 0, NULL},
201 {0x2f0f, 0, NULL},
202 {0x2650, 0, NULL},
203 {0xe107, 0, NULL},
204 {0x2502, sizeof(m2d1), m2d1},
205 {0x1f70, 0, NULL},
206 {-1, -1, NULL}
207};
208
209/* init table */
210static message *init[3] = { m0, m1, m2 };
211
212
213/* JPEG static data in header (Huffman table, etc) */
214static unsigned char header1[] = {
215 0xFF, 0xD8,
216 /*
217 0xFF, 0xE0, 0x00, 0x10, 'J', 'F', 'I', 'F',
218 0x00, 0x01, 0x01, 0x00, 0x33, 0x8A, 0x00, 0x00, 0x33, 0x88,
219 */
220 0xFF, 0xDB, 0x00, 0x84
221};
222static unsigned char header2[] = {
223 0xFF, 0xC4, 0x00, 0x1F, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01,
224 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
225 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
226 0xFF, 0xC4, 0x00, 0xB5, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02,
227 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01,
228 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06,
229 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1,
230 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33,
231 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25,
232 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
233 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54,
234 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67,
235 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A,
236 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94,
237 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
238 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
239 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA,
240 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
241 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3,
242 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFF, 0xC4, 0x00, 0x1F,
243 0x01, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
244 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04,
245 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0xFF, 0xC4, 0x00, 0xB5,
246 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05,
247 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11,
248 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
249 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1,
250 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16,
251 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27,
252 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
253 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57,
254 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A,
255 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84,
256 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96,
257 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
258 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA,
259 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3,
260 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5,
261 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
262 0xF8, 0xF9, 0xFA, 0xFF, 0xC0, 0x00, 0x11, 0x08, 0x00, 0xF0, 0x01,
263 0x40, 0x03, 0x01, 0x21, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01,
264 0xFF, 0xDA, 0x00, 0x0C, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11,
265 0x00, 0x3F, 0x00
266};
267static unsigned char header3;
268
269
270
271/********************/
272/* V4L2 integration */
273/********************/
274
275/* this function reads a full JPEG picture synchronously
276 * TODO: do it asynchronously... */
277static int read_frame(struct zr364xx_camera *cam, int framenum)
278{
279 int i, n, temp, head, size, actual_length;
280 unsigned char *ptr = NULL, *jpeg;
281
282 redo:
283 /* hardware brightness */
284 n = send_control_msg(cam->udev, 1, 0x2001, 0, NULL, 0);
285 temp = (0x60 << 8) + 127 - cam->brightness;
286 n = send_control_msg(cam->udev, 1, temp, 0, NULL, 0);
287
288 /* during the first loop we are going to insert JPEG header */
289 head = 0;
290 /* this is the place in memory where we are going to build
291 * the JPEG image */
292 jpeg = cam->framebuf + framenum * MAX_FRAME_SIZE;
293 /* read data... */
294 do {
295 n = usb_bulk_msg(cam->udev,
296 usb_rcvbulkpipe(cam->udev, 0x81),
297 cam->buffer, BUFFER_SIZE, &actual_length,
298 CTRL_TIMEOUT);
299 DBG("buffer : %d %d", cam->buffer[0], cam->buffer[1]);
300 DBG("bulk : n=%d size=%d", n, actual_length);
301 if (n < 0) {
302 info("error reading bulk msg");
303 return 0;
304 }
305 if (actual_length < 0 || actual_length > BUFFER_SIZE) {
306 info("wrong number of bytes");
307 return 0;
308 }
309
310 /* swap bytes if camera needs it */
311 if (cam->method == METHOD0) {
312 u16 *buf = (u16*)cam->buffer;
313 for (i = 0; i < BUFFER_SIZE/2; i++)
314 swab16s(buf + i);
315 }
316
317 /* write the JPEG header */
318 if (!head) {
319 DBG("jpeg header");
320 ptr = jpeg;
321 memcpy(ptr, header1, sizeof(header1));
322 ptr += sizeof(header1);
323 header3 = 0;
324 memcpy(ptr, &header3, 1);
325 ptr++;
326 memcpy(ptr, cam->buffer, 64);
327 ptr += 64;
328 header3 = 1;
329 memcpy(ptr, &header3, 1);
330 ptr++;
331 memcpy(ptr, cam->buffer + 64, 64);
332 ptr += 64;
333 memcpy(ptr, header2, sizeof(header2));
334 ptr += sizeof(header2);
335 memcpy(ptr, cam->buffer + 128,
336 actual_length - 128);
337 ptr += actual_length - 128;
338 head = 1;
339 DBG("header : %d %d %d %d %d %d %d %d %d",
340 cam->buffer[0], cam->buffer[1], cam->buffer[2],
341 cam->buffer[3], cam->buffer[4], cam->buffer[5],
342 cam->buffer[6], cam->buffer[7], cam->buffer[8]);
343 } else {
344 memcpy(ptr, cam->buffer, actual_length);
345 ptr += actual_length;
346 }
347 }
348 /* ... until there is no more */
349 while (actual_length == BUFFER_SIZE);
350
351 /* we skip the 2 first frames which are usually buggy */
352 if (cam->skip) {
353 cam->skip--;
354 goto redo;
355 }
356
357 /* go back to find the JPEG EOI marker */
358 size = ptr - jpeg;
359 ptr -= 2;
360 while (ptr > jpeg) {
361 if (*ptr == 0xFF && *(ptr + 1) == 0xD9
362 && *(ptr + 2) == 0xFF)
363 break;
364 ptr--;
365 }
366 if (ptr == jpeg)
367 DBG("No EOI marker");
368
369 /* Sometimes there is junk data in the middle of the picture,
370 * we want to skip this bogus frames */
371 while (ptr > jpeg) {
372 if (*ptr == 0xFF && *(ptr + 1) == 0xFF
373 && *(ptr + 2) == 0xFF)
374 break;
375 ptr--;
376 }
377 if (ptr != jpeg) {
378 DBG("Bogus frame ? %d", cam->nb);
379 goto redo;
380 }
381
382 DBG("jpeg : %d %d %d %d %d %d %d %d",
383 jpeg[0], jpeg[1], jpeg[2], jpeg[3],
384 jpeg[4], jpeg[5], jpeg[6], jpeg[7]);
385
386 return size;
387}
388
389
390static ssize_t zr364xx_read(struct file *file, char *buf, size_t cnt,
391 loff_t * ppos)
392{
393 unsigned long count = cnt;
394 struct video_device *vdev = video_devdata(file);
395 struct zr364xx_camera *cam;
396
397 DBG("zr364xx_read: read %d bytes.", (int) count);
398
399 if (vdev == NULL)
400 return -ENODEV;
401 cam = video_get_drvdata(vdev);
402
403 if (!buf)
404 return -EINVAL;
405
406 if (!count)
407 return -EINVAL;
408
409 /* NoMan Sux ! */
410 count = read_frame(cam, 0);
411
412 if (copy_to_user(buf, cam->framebuf, count))
413 return -EFAULT;
414
415 return count;
416}
417
418
419static int zr364xx_vidioc_querycap(struct file *file, void *priv,
420 struct v4l2_capability *cap)
421{
422 memset(cap, 0, sizeof(*cap));
423 strcpy(cap->driver, DRIVER_DESC);
424 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
425 return 0;
426}
427
428static int zr364xx_vidioc_enum_input(struct file *file, void *priv,
429 struct v4l2_input *i)
430{
431 if (i->index != 0)
432 return -EINVAL;
433 memset(i, 0, sizeof(*i));
434 i->index = 0;
435 strcpy(i->name, DRIVER_DESC " Camera");
436 i->type = V4L2_INPUT_TYPE_CAMERA;
437 return 0;
438}
439
440static int zr364xx_vidioc_g_input(struct file *file, void *priv,
441 unsigned int *i)
442{
443 *i = 0;
444 return 0;
445}
446
447static int zr364xx_vidioc_s_input(struct file *file, void *priv,
448 unsigned int i)
449{
450 if (i != 0)
451 return -EINVAL;
452 return 0;
453}
454
455static int zr364xx_vidioc_queryctrl(struct file *file, void *priv,
456 struct v4l2_queryctrl *c)
457{
458 struct video_device *vdev = video_devdata(file);
459 struct zr364xx_camera *cam;
460
461 if (vdev == NULL)
462 return -ENODEV;
463 cam = video_get_drvdata(vdev);
464
465 switch (c->id) {
466 case V4L2_CID_BRIGHTNESS:
467 c->type = V4L2_CTRL_TYPE_INTEGER;
468 strcpy(c->name, "Brightness");
469 c->minimum = 0;
470 c->maximum = 127;
471 c->step = 1;
472 c->default_value = cam->brightness;
473 c->flags = 0;
474 break;
475 default:
476 return -EINVAL;
477 }
478 return 0;
479}
480
481static int zr364xx_vidioc_s_ctrl(struct file *file, void *priv,
482 struct v4l2_control *c)
483{
484 struct video_device *vdev = video_devdata(file);
485 struct zr364xx_camera *cam;
486
487 if (vdev == NULL)
488 return -ENODEV;
489 cam = video_get_drvdata(vdev);
490
491 switch (c->id) {
492 case V4L2_CID_BRIGHTNESS:
493 cam->brightness = c->value;
494 break;
495 default:
496 return -EINVAL;
497 }
498 return 0;
499}
500
501static int zr364xx_vidioc_g_ctrl(struct file *file, void *priv,
502 struct v4l2_control *c)
503{
504 struct video_device *vdev = video_devdata(file);
505 struct zr364xx_camera *cam;
506
507 if (vdev == NULL)
508 return -ENODEV;
509 cam = video_get_drvdata(vdev);
510
511 switch (c->id) {
512 case V4L2_CID_BRIGHTNESS:
513 c->value = cam->brightness;
514 break;
515 default:
516 return -EINVAL;
517 }
518 return 0;
519}
520
521static int zr364xx_vidioc_enum_fmt_cap(struct file *file,
522 void *priv, struct v4l2_fmtdesc *f)
523{
524 if (f->index > 0)
525 return -EINVAL;
526 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
527 return -EINVAL;
528 memset(f, 0, sizeof(*f));
529 f->index = 0;
530 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
531 f->flags = V4L2_FMT_FLAG_COMPRESSED;
532 strcpy(f->description, "JPEG");
533 f->pixelformat = V4L2_PIX_FMT_JPEG;
534 return 0;
535}
536
537static int zr364xx_vidioc_try_fmt_cap(struct file *file, void *priv,
538 struct v4l2_format *f)
539{
540 struct video_device *vdev = video_devdata(file);
541 struct zr364xx_camera *cam;
542
543 if (vdev == NULL)
544 return -ENODEV;
545 cam = video_get_drvdata(vdev);
546
547 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
548 return -EINVAL;
549 if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG)
550 return -EINVAL;
551 if (f->fmt.pix.field != V4L2_FIELD_ANY &&
552 f->fmt.pix.field != V4L2_FIELD_NONE)
553 return -EINVAL;
554 f->fmt.pix.field = V4L2_FIELD_NONE;
555 f->fmt.pix.width = cam->width;
556 f->fmt.pix.height = cam->height;
557 f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
558 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
559 f->fmt.pix.colorspace = 0;
560 f->fmt.pix.priv = 0;
561 return 0;
562}
563
564static int zr364xx_vidioc_g_fmt_cap(struct file *file, void *priv,
565 struct v4l2_format *f)
566{
567 struct video_device *vdev = video_devdata(file);
568 struct zr364xx_camera *cam;
569
570 if (vdev == NULL)
571 return -ENODEV;
572 cam = video_get_drvdata(vdev);
573
574 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
575 return -EINVAL;
576 memset(&f->fmt.pix, 0, sizeof(struct v4l2_pix_format));
577 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
578 f->fmt.pix.pixelformat = V4L2_PIX_FMT_JPEG;
579 f->fmt.pix.field = V4L2_FIELD_NONE;
580 f->fmt.pix.width = cam->width;
581 f->fmt.pix.height = cam->height;
582 f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
583 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
584 f->fmt.pix.colorspace = 0;
585 f->fmt.pix.priv = 0;
586 return 0;
587}
588
589static int zr364xx_vidioc_s_fmt_cap(struct file *file, void *priv,
590 struct v4l2_format *f)
591{
592 struct video_device *vdev = video_devdata(file);
593 struct zr364xx_camera *cam;
594
595 if (vdev == NULL)
596 return -ENODEV;
597 cam = video_get_drvdata(vdev);
598
599 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
600 return -EINVAL;
601 if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG)
602 return -EINVAL;
603 if (f->fmt.pix.field != V4L2_FIELD_ANY &&
604 f->fmt.pix.field != V4L2_FIELD_NONE)
605 return -EINVAL;
606 f->fmt.pix.field = V4L2_FIELD_NONE;
607 f->fmt.pix.width = cam->width;
608 f->fmt.pix.height = cam->height;
609 f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
610 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
611 f->fmt.pix.colorspace = 0;
612 f->fmt.pix.priv = 0;
613 DBG("ok!");
614 return 0;
615}
616
617static int zr364xx_vidioc_streamon(struct file *file, void *priv,
618 enum v4l2_buf_type type)
619{
620 return 0;
621}
622
623static int zr364xx_vidioc_streamoff(struct file *file, void *priv,
624 enum v4l2_buf_type type)
625{
626 return 0;
627}
628
629
630/* open the camera */
631static int zr364xx_open(struct inode *inode, struct file *file)
632{
633 struct video_device *vdev = video_devdata(file);
634 struct zr364xx_camera *cam = video_get_drvdata(vdev);
635 struct usb_device *udev = cam->udev;
636 int i, err;
637
638 DBG("zr364xx_open");
639
640 cam->skip = 2;
641
642 err = video_exclusive_open(inode, file);
643 if (err < 0)
644 return err;
645
646 if (!cam->framebuf) {
647 cam->framebuf = vmalloc_32(MAX_FRAME_SIZE * FRAMES);
648 if (!cam->framebuf) {
649 info("vmalloc_32 failed!");
650 return -ENOMEM;
651 }
652 }
653
654 mutex_lock(&cam->lock);
655 for (i = 0; init[cam->method][i].size != -1; i++) {
656 err =
657 send_control_msg(udev, 1, init[cam->method][i].value,
658 0, init[cam->method][i].bytes,
659 init[cam->method][i].size);
660 if (err < 0) {
661 info("error during open sequence: %d", i);
662 mutex_unlock(&cam->lock);
663 return err;
664 }
665 }
666
667 file->private_data = vdev;
668
669 /* Added some delay here, since opening/closing the camera quickly,
670 * like Ekiga does during its startup, can crash the webcam
671 */
672 mdelay(100);
673
674 mutex_unlock(&cam->lock);
675 return 0;
676}
677
678
679/* release the camera */
680static int zr364xx_release(struct inode *inode, struct file *file)
681{
682 struct video_device *vdev = video_devdata(file);
683 struct zr364xx_camera *cam;
684 struct usb_device *udev;
685 int i, err;
686
687 DBG("zr364xx_release");
688
689 if (vdev == NULL)
690 return -ENODEV;
691 cam = video_get_drvdata(vdev);
692
693 udev = cam->udev;
694
695 mutex_lock(&cam->lock);
696 for (i = 0; i < 2; i++) {
697 err =
698 send_control_msg(udev, 1, init[cam->method][i].value,
699 0, init[i][cam->method].bytes,
700 init[cam->method][i].size);
701 if (err < 0) {
702 info("error during release sequence");
703 mutex_unlock(&cam->lock);
704 return err;
705 }
706 }
707
708 file->private_data = NULL;
709 video_exclusive_release(inode, file);
710
711 /* Added some delay here, since opening/closing the camera quickly,
712 * like Ekiga does during its startup, can crash the webcam
713 */
714 mdelay(100);
715
716 mutex_unlock(&cam->lock);
717 return 0;
718}
719
720
721static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
722{
723 void *pos;
724 unsigned long start = vma->vm_start;
725 unsigned long size = vma->vm_end - vma->vm_start;
726 struct video_device *vdev = video_devdata(file);
727 struct zr364xx_camera *cam;
728
729 DBG("zr364xx_mmap: %ld\n", size);
730
731 if (vdev == NULL)
732 return -ENODEV;
733 cam = video_get_drvdata(vdev);
734
735 pos = cam->framebuf;
736 while (size > 0) {
737 if (vm_insert_page(vma, start, vmalloc_to_page(pos)))
738 return -EAGAIN;
739 start += PAGE_SIZE;
740 pos += PAGE_SIZE;
741 if (size > PAGE_SIZE)
742 size -= PAGE_SIZE;
743 else
744 size = 0;
745 }
746
747 return 0;
748}
749
750
751static struct file_operations zr364xx_fops = {
752 .owner = THIS_MODULE,
753 .open = zr364xx_open,
754 .release = zr364xx_release,
755 .read = zr364xx_read,
756 .mmap = zr364xx_mmap,
757 .ioctl = video_ioctl2,
758 .llseek = no_llseek,
759};
760
761static struct video_device zr364xx_template = {
762 .owner = THIS_MODULE,
763 .name = DRIVER_DESC,
764 .type = VID_TYPE_CAPTURE,
765 .fops = &zr364xx_fops,
766 .release = video_device_release,
767 .minor = -1,
768
769 .vidioc_querycap = zr364xx_vidioc_querycap,
770 .vidioc_enum_fmt_cap = zr364xx_vidioc_enum_fmt_cap,
771 .vidioc_try_fmt_cap = zr364xx_vidioc_try_fmt_cap,
772 .vidioc_s_fmt_cap = zr364xx_vidioc_s_fmt_cap,
773 .vidioc_g_fmt_cap = zr364xx_vidioc_g_fmt_cap,
774 .vidioc_enum_input = zr364xx_vidioc_enum_input,
775 .vidioc_g_input = zr364xx_vidioc_g_input,
776 .vidioc_s_input = zr364xx_vidioc_s_input,
777 .vidioc_streamon = zr364xx_vidioc_streamon,
778 .vidioc_streamoff = zr364xx_vidioc_streamoff,
779 .vidioc_queryctrl = zr364xx_vidioc_queryctrl,
780 .vidioc_g_ctrl = zr364xx_vidioc_g_ctrl,
781 .vidioc_s_ctrl = zr364xx_vidioc_s_ctrl,
782};
783
784
785
786/*******************/
787/* USB integration */
788/*******************/
789
790static int zr364xx_probe(struct usb_interface *intf,
791 const struct usb_device_id *id)
792{
793 struct usb_device *udev = interface_to_usbdev(intf);
794 struct zr364xx_camera *cam = NULL;
795
796 DBG("probing...");
797
798 info(DRIVER_DESC " compatible webcam plugged");
799 info("model %04x:%04x detected", udev->descriptor.idVendor,
800 udev->descriptor.idProduct);
801
802 if ((cam =
803 kmalloc(sizeof(struct zr364xx_camera), GFP_KERNEL)) == NULL) {
804 info("cam: out of memory !");
805 return -ENODEV;
806 }
807 memset(cam, 0x00, sizeof(struct zr364xx_camera));
808 /* save the init method used by this camera */
809 cam->method = id->driver_info;
810
811 cam->vdev = video_device_alloc();
812 if (cam->vdev == NULL) {
813 info("cam->vdev: out of memory !");
814 kfree(cam);
815 return -ENODEV;
816 }
817 memcpy(cam->vdev, &zr364xx_template, sizeof(zr364xx_template));
818 video_set_drvdata(cam->vdev, cam);
819 if (debug)
820 cam->vdev->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
821
822 cam->udev = udev;
823
824 if ((cam->buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL)) == NULL) {
825 info("cam->buffer: out of memory !");
826 video_device_release(cam->vdev);
827 kfree(cam);
828 return -ENODEV;
829 }
830
831 switch (mode) {
832 case 1:
833 info("160x120 mode selected");
834 cam->width = 160;
835 cam->height = 120;
836 break;
837 case 2:
838 info("640x480 mode selected");
839 cam->width = 640;
840 cam->height = 480;
841 break;
842 default:
843 info("320x240 mode selected");
844 cam->width = 320;
845 cam->height = 240;
846 break;
847 }
848
849 m0d1[0] = mode;
850 m1[2].value = 0xf000 + mode;
851 m2[1].value = 0xf000 + mode;
852 header2[437] = cam->height / 256;
853 header2[438] = cam->height % 256;
854 header2[439] = cam->width / 256;
855 header2[440] = cam->width % 256;
856
857 cam->nb = 0;
858 cam->brightness = 64;
859 mutex_init(&cam->lock);
860
861 if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1) == -1) {
862 info("video_register_device failed");
863 video_device_release(cam->vdev);
864 kfree(cam->buffer);
865 kfree(cam);
866 return -ENODEV;
867 }
868
869 usb_set_intfdata(intf, cam);
870
871 info(DRIVER_DESC " controlling video device %d", cam->vdev->minor);
872 return 0;
873}
874
875
876static void zr364xx_disconnect(struct usb_interface *intf)
877{
878 struct zr364xx_camera *cam = usb_get_intfdata(intf);
879 usb_set_intfdata(intf, NULL);
880 dev_set_drvdata(&intf->dev, NULL);
881 info(DRIVER_DESC " webcam unplugged");
882 if (cam->vdev)
883 video_unregister_device(cam->vdev);
884 cam->vdev = NULL;
885 kfree(cam->buffer);
886 if (cam->framebuf)
887 vfree(cam->framebuf);
888 kfree(cam);
889}
890
891
892
893/**********************/
894/* Module integration */
895/**********************/
896
897static struct usb_driver zr364xx_driver = {
898 .name = "zr364xx",
899 .probe = zr364xx_probe,
900 .disconnect = zr364xx_disconnect,
901 .id_table = device_table
902};
903
904
905static int __init zr364xx_init(void)
906{
907 int retval;
908 retval = usb_register(&zr364xx_driver) < 0;
909 if (retval)
910 info("usb_register failed!");
911 else
912 info(DRIVER_DESC " module loaded");
913 return retval;
914}
915
916
917static void __exit zr364xx_exit(void)
918{
919 info(DRIVER_DESC " module unloaded");
920 usb_deregister(&zr364xx_driver);
921}
922
923
924module_init(zr364xx_init);
925module_exit(zr364xx_exit);
926
927MODULE_AUTHOR(DRIVER_AUTHOR);
928MODULE_DESCRIPTION(DRIVER_DESC);
929MODULE_LICENSE("GPL");
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index b691292ff599..7dd34bd28efc 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -714,6 +714,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
714 LANSendRequest_t *pSendReq; 714 LANSendRequest_t *pSendReq;
715 SGETransaction32_t *pTrans; 715 SGETransaction32_t *pTrans;
716 SGESimple64_t *pSimple; 716 SGESimple64_t *pSimple;
717 const unsigned char *mac;
717 dma_addr_t dma; 718 dma_addr_t dma;
718 unsigned long flags; 719 unsigned long flags;
719 int ctx; 720 int ctx;
@@ -753,7 +754,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
753 /* Set the mac.raw pointer, since this apparently isn't getting 754 /* Set the mac.raw pointer, since this apparently isn't getting
754 * done before we get the skb. Pull the data pointer past the mac data. 755 * done before we get the skb. Pull the data pointer past the mac data.
755 */ 756 */
756 skb->mac.raw = skb->data; 757 skb_reset_mac_header(skb);
757 skb_pull(skb, 12); 758 skb_pull(skb, 12);
758 759
759 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, 760 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
@@ -784,6 +785,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
784// IOC_AND_NETDEV_NAMES_s_s(dev), 785// IOC_AND_NETDEV_NAMES_s_s(dev),
785// ctx, skb, skb->data)); 786// ctx, skb, skb->data));
786 787
788 mac = skb_mac_header(skb);
787#ifdef QLOGIC_NAA_WORKAROUND 789#ifdef QLOGIC_NAA_WORKAROUND
788{ 790{
789 struct NAA_Hosed *nh; 791 struct NAA_Hosed *nh;
@@ -793,12 +795,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
793 drops. */ 795 drops. */
794 read_lock_irq(&bad_naa_lock); 796 read_lock_irq(&bad_naa_lock);
795 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { 797 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
796 if ((nh->ieee[0] == skb->mac.raw[0]) && 798 if ((nh->ieee[0] == mac[0]) &&
797 (nh->ieee[1] == skb->mac.raw[1]) && 799 (nh->ieee[1] == mac[1]) &&
798 (nh->ieee[2] == skb->mac.raw[2]) && 800 (nh->ieee[2] == mac[2]) &&
799 (nh->ieee[3] == skb->mac.raw[3]) && 801 (nh->ieee[3] == mac[3]) &&
800 (nh->ieee[4] == skb->mac.raw[4]) && 802 (nh->ieee[4] == mac[4]) &&
801 (nh->ieee[5] == skb->mac.raw[5])) { 803 (nh->ieee[5] == mac[5])) {
802 cur_naa = nh->NAA; 804 cur_naa = nh->NAA;
803 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " 805 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
804 "= %04x.\n", cur_naa)); 806 "= %04x.\n", cur_naa));
@@ -810,12 +812,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
810#endif 812#endif
811 813
812 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | 814 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
813 (skb->mac.raw[0] << 8) | 815 (mac[0] << 8) |
814 (skb->mac.raw[1] << 0)); 816 (mac[1] << 0));
815 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | 817 pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
816 (skb->mac.raw[3] << 16) | 818 (mac[3] << 16) |
817 (skb->mac.raw[4] << 8) | 819 (mac[4] << 8) |
818 (skb->mac.raw[5] << 0)); 820 (mac[5] << 0));
819 821
820 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; 822 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
821 823
@@ -930,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
930 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 932 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
931 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 933 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
932 934
933 memcpy(skb_put(skb, len), old_skb->data, len); 935 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
934 936
935 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 937 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
936 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 938 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
@@ -1091,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
1091 priv->RcvCtl[ctx].dma, 1093 priv->RcvCtl[ctx].dma,
1092 priv->RcvCtl[ctx].len, 1094 priv->RcvCtl[ctx].len,
1093 PCI_DMA_FROMDEVICE); 1095 PCI_DMA_FROMDEVICE);
1094 memcpy(skb_put(skb, l), old_skb->data, l); 1096 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1095 1097
1096 pci_dma_sync_single_for_device(mpt_dev->pcidev, 1098 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1097 priv->RcvCtl[ctx].dma, 1099 priv->RcvCtl[ctx].dma,
@@ -1120,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
1120 priv->RcvCtl[ctx].len, 1122 priv->RcvCtl[ctx].len,
1121 PCI_DMA_FROMDEVICE); 1123 PCI_DMA_FROMDEVICE);
1122 1124
1123 memcpy(skb_put(skb, len), old_skb->data, len); 1125 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1124 1126
1125 pci_dma_sync_single_for_device(mpt_dev->pcidev, 1127 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1126 priv->RcvCtl[ctx].dma, 1128 priv->RcvCtl[ctx].dma,
@@ -1549,7 +1551,7 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1549 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; 1551 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1550 struct fcllc *fcllc; 1552 struct fcllc *fcllc;
1551 1553
1552 skb->mac.raw = skb->data; 1554 skb_reset_mac_header(skb);
1553 skb_pull(skb, sizeof(struct mpt_lan_ohdr)); 1555 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1554 1556
1555 if (fch->dtype == htons(0xffff)) { 1557 if (fch->dtype == htons(0xffff)) {
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index d32698b02d7f..e0e82d849d5f 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -86,31 +86,26 @@ mmc_bus_uevent(struct device *dev, char **envp, int num_envp, char *buf,
86{ 86{
87 struct mmc_card *card = dev_to_mmc_card(dev); 87 struct mmc_card *card = dev_to_mmc_card(dev);
88 char ccc[13]; 88 char ccc[13];
89 int i = 0; 89 int retval = 0, i = 0, length = 0;
90 90
91#define add_env(fmt,val) \ 91#define add_env(fmt,val) do { \
92 ({ \ 92 retval = add_uevent_var(envp, num_envp, &i, \
93 int len, ret = -ENOMEM; \ 93 buf, buf_size, &length, \
94 if (i < num_envp) { \ 94 fmt, val); \
95 envp[i++] = buf; \ 95 if (retval) \
96 len = snprintf(buf, buf_size, fmt, val) + 1; \ 96 return retval; \
97 buf_size -= len; \ 97} while (0);
98 buf += len; \
99 if (buf_size >= 0) \
100 ret = 0; \
101 } \
102 ret; \
103 })
104 98
105 for (i = 0; i < 12; i++) 99 for (i = 0; i < 12; i++)
106 ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0'; 100 ccc[i] = card->csd.cmdclass & (1 << i) ? '1' : '0';
107 ccc[12] = '\0'; 101 ccc[12] = '\0';
108 102
109 i = 0;
110 add_env("MMC_CCC=%s", ccc); 103 add_env("MMC_CCC=%s", ccc);
111 add_env("MMC_MANFID=%06x", card->cid.manfid); 104 add_env("MMC_MANFID=%06x", card->cid.manfid);
112 add_env("MMC_NAME=%s", mmc_card_name(card)); 105 add_env("MMC_NAME=%s", mmc_card_name(card));
113 add_env("MMC_OEMID=%04x", card->cid.oemid); 106 add_env("MMC_OEMID=%04x", card->cid.oemid);
107#undef add_env
108 envp[i] = NULL;
114 109
115 return 0; 110 return 0;
116} 111}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 26f75c299440..c1b47db29bd2 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,8 +1,6 @@
1# $Id: Kconfig,v 1.11 2005/11/07 11:14:19 gleixner Exp $ 1# $Id: Kconfig,v 1.11 2005/11/07 11:14:19 gleixner Exp $
2 2
3menu "Memory Technology Devices (MTD)" 3menuconfig MTD
4
5config MTD
6 tristate "Memory Technology Device (MTD) support" 4 tristate "Memory Technology Device (MTD) support"
7 help 5 help
8 Memory Technology Devices are flash, RAM and similar chips, often 6 Memory Technology Devices are flash, RAM and similar chips, often
@@ -13,9 +11,10 @@ config MTD
13 them. It will also allow you to select individual drivers for 11 them. It will also allow you to select individual drivers for
14 particular hardware and users of MTD devices. If unsure, say N. 12 particular hardware and users of MTD devices. If unsure, say N.
15 13
14if MTD
15
16config MTD_DEBUG 16config MTD_DEBUG
17 bool "Debugging" 17 bool "Debugging"
18 depends on MTD
19 help 18 help
20 This turns on low-level debugging for the entire MTD sub-system. 19 This turns on low-level debugging for the entire MTD sub-system.
21 Normally, you should say 'N'. 20 Normally, you should say 'N'.
@@ -29,7 +28,6 @@ config MTD_DEBUG_VERBOSE
29 28
30config MTD_CONCAT 29config MTD_CONCAT
31 tristate "MTD concatenating support" 30 tristate "MTD concatenating support"
32 depends on MTD
33 help 31 help
34 Support for concatenating several MTD devices into a single 32 Support for concatenating several MTD devices into a single
35 (virtual) one. This allows you to have -for example- a JFFS(2) 33 (virtual) one. This allows you to have -for example- a JFFS(2)
@@ -38,7 +36,6 @@ config MTD_CONCAT
38 36
39config MTD_PARTITIONS 37config MTD_PARTITIONS
40 bool "MTD partitioning support" 38 bool "MTD partitioning support"
41 depends on MTD
42 help 39 help
43 If you have a device which needs to divide its flash chip(s) up 40 If you have a device which needs to divide its flash chip(s) up
44 into multiple 'partitions', each of which appears to the user as 41 into multiple 'partitions', each of which appears to the user as
@@ -153,11 +150,9 @@ config MTD_AFS_PARTS
153 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 150 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
154 151
155comment "User Modules And Translation Layers" 152comment "User Modules And Translation Layers"
156 depends on MTD
157 153
158config MTD_CHAR 154config MTD_CHAR
159 tristate "Direct char device access to MTD devices" 155 tristate "Direct char device access to MTD devices"
160 depends on MTD
161 help 156 help
162 This provides a character device for each MTD device present in 157 This provides a character device for each MTD device present in
163 the system, allowing the user to read and write directly to the 158 the system, allowing the user to read and write directly to the
@@ -166,12 +161,12 @@ config MTD_CHAR
166 161
167config MTD_BLKDEVS 162config MTD_BLKDEVS
168 tristate "Common interface to block layer for MTD 'translation layers'" 163 tristate "Common interface to block layer for MTD 'translation layers'"
169 depends on MTD && BLOCK 164 depends on BLOCK
170 default n 165 default n
171 166
172config MTD_BLOCK 167config MTD_BLOCK
173 tristate "Caching block device access to MTD devices" 168 tristate "Caching block device access to MTD devices"
174 depends on MTD && BLOCK 169 depends on BLOCK
175 select MTD_BLKDEVS 170 select MTD_BLKDEVS
176 ---help--- 171 ---help---
177 Although most flash chips have an erase size too large to be useful 172 Although most flash chips have an erase size too large to be useful
@@ -194,7 +189,7 @@ config MTD_BLOCK
194 189
195config MTD_BLOCK_RO 190config MTD_BLOCK_RO
196 tristate "Readonly block device access to MTD devices" 191 tristate "Readonly block device access to MTD devices"
197 depends on MTD_BLOCK!=y && MTD && BLOCK 192 depends on MTD_BLOCK!=y && BLOCK
198 select MTD_BLKDEVS 193 select MTD_BLKDEVS
199 help 194 help
200 This allows you to mount read-only file systems (such as cramfs) 195 This allows you to mount read-only file systems (such as cramfs)
@@ -206,7 +201,7 @@ config MTD_BLOCK_RO
206 201
207config FTL 202config FTL
208 tristate "FTL (Flash Translation Layer) support" 203 tristate "FTL (Flash Translation Layer) support"
209 depends on MTD && BLOCK 204 depends on BLOCK
210 select MTD_BLKDEVS 205 select MTD_BLKDEVS
211 ---help--- 206 ---help---
212 This provides support for the original Flash Translation Layer which 207 This provides support for the original Flash Translation Layer which
@@ -223,7 +218,7 @@ config FTL
223 218
224config NFTL 219config NFTL
225 tristate "NFTL (NAND Flash Translation Layer) support" 220 tristate "NFTL (NAND Flash Translation Layer) support"
226 depends on MTD && BLOCK 221 depends on BLOCK
227 select MTD_BLKDEVS 222 select MTD_BLKDEVS
228 ---help--- 223 ---help---
229 This provides support for the NAND Flash Translation Layer which is 224 This provides support for the NAND Flash Translation Layer which is
@@ -247,7 +242,7 @@ config NFTL_RW
247 242
248config INFTL 243config INFTL
249 tristate "INFTL (Inverse NAND Flash Translation Layer) support" 244 tristate "INFTL (Inverse NAND Flash Translation Layer) support"
250 depends on MTD && BLOCK 245 depends on BLOCK
251 select MTD_BLKDEVS 246 select MTD_BLKDEVS
252 ---help--- 247 ---help---
253 This provides support for the Inverse NAND Flash Translation 248 This provides support for the Inverse NAND Flash Translation
@@ -265,7 +260,7 @@ config INFTL
265 260
266config RFD_FTL 261config RFD_FTL
267 tristate "Resident Flash Disk (Flash Translation Layer) support" 262 tristate "Resident Flash Disk (Flash Translation Layer) support"
268 depends on MTD && BLOCK 263 depends on BLOCK
269 select MTD_BLKDEVS 264 select MTD_BLKDEVS
270 ---help--- 265 ---help---
271 This provides support for the flash translation layer known 266 This provides support for the flash translation layer known
@@ -276,7 +271,7 @@ config RFD_FTL
276 271
277config SSFDC 272config SSFDC
278 tristate "NAND SSFDC (SmartMedia) read only translation layer" 273 tristate "NAND SSFDC (SmartMedia) read only translation layer"
279 depends on MTD && BLOCK 274 depends on BLOCK
280 select MTD_BLKDEVS 275 select MTD_BLKDEVS
281 help 276 help
282 This enables read only access to SmartMedia formatted NAND 277 This enables read only access to SmartMedia formatted NAND
@@ -292,5 +287,6 @@ source "drivers/mtd/nand/Kconfig"
292 287
293source "drivers/mtd/onenand/Kconfig" 288source "drivers/mtd/onenand/Kconfig"
294 289
295endmenu 290source "drivers/mtd/ubi/Kconfig"
296 291
292endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index c130e6261adf..92055405cb30 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -28,3 +28,5 @@ nftl-objs := nftlcore.o nftlmount.o
28inftl-objs := inftlcore.o inftlmount.o 28inftl-objs := inftlcore.o inftlmount.o
29 29
30obj-y += chips/ maps/ devices/ nand/ onenand/ 30obj-y += chips/ maps/ devices/ nand/ onenand/
31
32obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 72e6d73beb40..d28e0fc85e12 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -6,7 +6,6 @@ menu "RAM/ROM/Flash chip drivers"
6 6
7config MTD_CFI 7config MTD_CFI
8 tristate "Detect flash chips by Common Flash Interface (CFI) probe" 8 tristate "Detect flash chips by Common Flash Interface (CFI) probe"
9 depends on MTD
10 select MTD_GEN_PROBE 9 select MTD_GEN_PROBE
11 help 10 help
12 The Common Flash Interface specification was developed by Intel, 11 The Common Flash Interface specification was developed by Intel,
@@ -18,7 +17,6 @@ config MTD_CFI
18 17
19config MTD_JEDECPROBE 18config MTD_JEDECPROBE
20 tristate "Detect non-CFI AMD/JEDEC-compatible flash chips" 19 tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
21 depends on MTD
22 select MTD_GEN_PROBE 20 select MTD_GEN_PROBE
23 help 21 help
24 This option enables JEDEC-style probing of flash chips which are not 22 This option enables JEDEC-style probing of flash chips which are not
@@ -213,21 +211,18 @@ config MTD_CFI_UTIL
213 211
214config MTD_RAM 212config MTD_RAM
215 tristate "Support for RAM chips in bus mapping" 213 tristate "Support for RAM chips in bus mapping"
216 depends on MTD
217 help 214 help
218 This option enables basic support for RAM chips accessed through 215 This option enables basic support for RAM chips accessed through
219 a bus mapping driver. 216 a bus mapping driver.
220 217
221config MTD_ROM 218config MTD_ROM
222 tristate "Support for ROM chips in bus mapping" 219 tristate "Support for ROM chips in bus mapping"
223 depends on MTD
224 help 220 help
225 This option enables basic support for ROM chips accessed through 221 This option enables basic support for ROM chips accessed through
226 a bus mapping driver. 222 a bus mapping driver.
227 223
228config MTD_ABSENT 224config MTD_ABSENT
229 tristate "Support for absent chips in bus mapping" 225 tristate "Support for absent chips in bus mapping"
230 depends on MTD
231 help 226 help
232 This option enables support for a dummy probing driver used to 227 This option enables support for a dummy probing driver used to
233 allocated placeholder MTD devices on systems that have socketed 228 allocated placeholder MTD devices on systems that have socketed
@@ -237,7 +232,6 @@ config MTD_ABSENT
237 with this driver will return -ENODEV upon access. 232 with this driver will return -ENODEV upon access.
238 233
239config MTD_OBSOLETE_CHIPS 234config MTD_OBSOLETE_CHIPS
240 depends on MTD
241 bool "Older (theoretically obsoleted now) drivers for non-CFI chips" 235 bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
242 help 236 help
243 This option does not enable any code directly, but will allow you to 237 This option does not enable any code directly, but will allow you to
@@ -250,7 +244,7 @@ config MTD_OBSOLETE_CHIPS
250 244
251config MTD_AMDSTD 245config MTD_AMDSTD
252 tristate "AMD compatible flash chip support (non-CFI)" 246 tristate "AMD compatible flash chip support (non-CFI)"
253 depends on MTD && MTD_OBSOLETE_CHIPS && BROKEN 247 depends on MTD_OBSOLETE_CHIPS && BROKEN
254 help 248 help
255 This option enables support for flash chips using AMD-compatible 249 This option enables support for flash chips using AMD-compatible
256 commands, including some which are not CFI-compatible and hence 250 commands, including some which are not CFI-compatible and hence
@@ -260,7 +254,7 @@ config MTD_AMDSTD
260 254
261config MTD_SHARP 255config MTD_SHARP
262 tristate "pre-CFI Sharp chip support" 256 tristate "pre-CFI Sharp chip support"
263 depends on MTD && MTD_OBSOLETE_CHIPS 257 depends on MTD_OBSOLETE_CHIPS
264 help 258 help
265 This option enables support for flash chips using Sharp-compatible 259 This option enables support for flash chips using Sharp-compatible
266 commands, including some which are not CFI-compatible and hence 260 commands, including some which are not CFI-compatible and hence
@@ -268,7 +262,7 @@ config MTD_SHARP
268 262
269config MTD_JEDEC 263config MTD_JEDEC
270 tristate "JEDEC device support" 264 tristate "JEDEC device support"
271 depends on MTD && MTD_OBSOLETE_CHIPS && BROKEN 265 depends on MTD_OBSOLETE_CHIPS && BROKEN
272 help 266 help
273 Enable older JEDEC flash interface devices for self 267 Enable older JEDEC flash interface devices for self
274 programming flash. It is commonly used in older AMD chips. It is 268 programming flash. It is commonly used in older AMD chips. It is
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index f334959a335b..2f19fa78d24a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -15,6 +15,8 @@
15 * - optimized write buffer method 15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com> 16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash 17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
18 */ 20 */
19 21
20#include <linux/module.h> 22#include <linux/module.h>
@@ -30,6 +32,7 @@
30#include <linux/delay.h> 32#include <linux/delay.h>
31#include <linux/interrupt.h> 33#include <linux/interrupt.h>
32#include <linux/reboot.h> 34#include <linux/reboot.h>
35#include <linux/bitmap.h>
33#include <linux/mtd/xip.h> 36#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h> 37#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h> 38#include <linux/mtd/mtd.h>
@@ -220,6 +223,15 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
220 } 223 }
221} 224}
222 225
226/*
227 * Some chips power-up with all sectors locked by default.
228 */
229static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
230{
231 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232 mtd->flags |= MTD_STUPID_LOCK;
233}
234
223static struct cfi_fixup cfi_fixup_table[] = { 235static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 236#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 237 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
@@ -232,6 +244,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
232#endif 244#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 245 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 246 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
235 { 0, 0, NULL, NULL } 248 { 0, 0, NULL, NULL }
236}; 249};
237 250
@@ -460,6 +473,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
460 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
461 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
462 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
463 } 477 }
464 offset += (ersize * ernum); 478 offset += (ersize * ernum);
465 } 479 }
@@ -1825,8 +1839,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1825 } 1839 }
1826} 1840}
1827 1841
1828#ifdef DEBUG_LOCK_BITS 1842static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1829static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1830 struct flchip *chip, 1843 struct flchip *chip,
1831 unsigned long adr, 1844 unsigned long adr,
1832 int len, void *thunk) 1845 int len, void *thunk)
@@ -1840,8 +1853,17 @@ static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1840 chip->state = FL_JEDEC_QUERY; 1853 chip->state = FL_JEDEC_QUERY;
1841 status = cfi_read_query(map, adr+(2*ofs_factor)); 1854 status = cfi_read_query(map, adr+(2*ofs_factor));
1842 xip_enable(map, chip, 0); 1855 xip_enable(map, chip, 0);
1856 return status;
1857}
1858
1859#ifdef DEBUG_LOCK_BITS
1860static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1861 struct flchip *chip,
1862 unsigned long adr,
1863 int len, void *thunk)
1864{
1843 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n", 1865 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1844 adr, status); 1866 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1845 return 0; 1867 return 0;
1846} 1868}
1847#endif 1869#endif
@@ -2216,14 +2238,45 @@ static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2216 2238
2217#endif 2239#endif
2218 2240
2241static void cfi_intelext_save_locks(struct mtd_info *mtd)
2242{
2243 struct mtd_erase_region_info *region;
2244 int block, status, i;
2245 unsigned long adr;
2246 size_t len;
2247
2248 for (i = 0; i < mtd->numeraseregions; i++) {
2249 region = &mtd->eraseregions[i];
2250 if (!region->lockmap)
2251 continue;
2252
2253 for (block = 0; block < region->numblocks; block++){
2254 len = region->erasesize;
2255 adr = region->offset + block * len;
2256
2257 status = cfi_varsize_frob(mtd,
2258 do_getlockstatus_oneblock, adr, len, 0);
2259 if (status)
2260 set_bit(block, region->lockmap);
2261 else
2262 clear_bit(block, region->lockmap);
2263 }
2264 }
2265}
2266
2219static int cfi_intelext_suspend(struct mtd_info *mtd) 2267static int cfi_intelext_suspend(struct mtd_info *mtd)
2220{ 2268{
2221 struct map_info *map = mtd->priv; 2269 struct map_info *map = mtd->priv;
2222 struct cfi_private *cfi = map->fldrv_priv; 2270 struct cfi_private *cfi = map->fldrv_priv;
2271 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2223 int i; 2272 int i;
2224 struct flchip *chip; 2273 struct flchip *chip;
2225 int ret = 0; 2274 int ret = 0;
2226 2275
2276 if ((mtd->flags & MTD_STUPID_LOCK)
2277 && extp && (extp->FeatureSupport & (1 << 5)))
2278 cfi_intelext_save_locks(mtd);
2279
2227 for (i=0; !ret && i<cfi->numchips; i++) { 2280 for (i=0; !ret && i<cfi->numchips; i++) {
2228 chip = &cfi->chips[i]; 2281 chip = &cfi->chips[i];
2229 2282
@@ -2285,10 +2338,33 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2285 return ret; 2338 return ret;
2286} 2339}
2287 2340
2341static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2342{
2343 struct mtd_erase_region_info *region;
2344 int block, i;
2345 unsigned long adr;
2346 size_t len;
2347
2348 for (i = 0; i < mtd->numeraseregions; i++) {
2349 region = &mtd->eraseregions[i];
2350 if (!region->lockmap)
2351 continue;
2352
2353 for (block = 0; block < region->numblocks; block++) {
2354 len = region->erasesize;
2355 adr = region->offset + block * len;
2356
2357 if (!test_bit(block, region->lockmap))
2358 cfi_intelext_unlock(mtd, adr, len);
2359 }
2360 }
2361}
2362
2288static void cfi_intelext_resume(struct mtd_info *mtd) 2363static void cfi_intelext_resume(struct mtd_info *mtd)
2289{ 2364{
2290 struct map_info *map = mtd->priv; 2365 struct map_info *map = mtd->priv;
2291 struct cfi_private *cfi = map->fldrv_priv; 2366 struct cfi_private *cfi = map->fldrv_priv;
2367 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2292 int i; 2368 int i;
2293 struct flchip *chip; 2369 struct flchip *chip;
2294 2370
@@ -2307,6 +2383,10 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2307 2383
2308 spin_unlock(chip->mutex); 2384 spin_unlock(chip->mutex);
2309 } 2385 }
2386
2387 if ((mtd->flags & MTD_STUPID_LOCK)
2388 && extp && (extp->FeatureSupport & (1 << 5)))
2389 cfi_intelext_restore_locks(mtd);
2310} 2390}
2311 2391
2312static int cfi_intelext_reset(struct mtd_info *mtd) 2392static int cfi_intelext_reset(struct mtd_info *mtd)
@@ -2347,12 +2427,19 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2347{ 2427{
2348 struct map_info *map = mtd->priv; 2428 struct map_info *map = mtd->priv;
2349 struct cfi_private *cfi = map->fldrv_priv; 2429 struct cfi_private *cfi = map->fldrv_priv;
2430 struct mtd_erase_region_info *region;
2431 int i;
2350 cfi_intelext_reset(mtd); 2432 cfi_intelext_reset(mtd);
2351 unregister_reboot_notifier(&mtd->reboot_notifier); 2433 unregister_reboot_notifier(&mtd->reboot_notifier);
2352 kfree(cfi->cmdset_priv); 2434 kfree(cfi->cmdset_priv);
2353 kfree(cfi->cfiq); 2435 kfree(cfi->cfiq);
2354 kfree(cfi->chips[0].priv); 2436 kfree(cfi->chips[0].priv);
2355 kfree(cfi); 2437 kfree(cfi);
2438 for (i = 0; i < mtd->numeraseregions; i++) {
2439 region = &mtd->eraseregions[i];
2440 if (region->lockmap)
2441 kfree(region->lockmap);
2442 }
2356 kfree(mtd->eraseregions); 2443 kfree(mtd->eraseregions);
2357} 2444}
2358 2445
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 77303ce5dcf1..ab44f2b996f8 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -65,11 +65,12 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
65 return ret; 65 return ret;
66 } 66 }
67 67
68 chip->oldstate = chip->state;
68 chip->state = xxlt->state; 69 chip->state = xxlt->state;
69 map_write(map, CMD(xxlt->val), adr); 70 map_write(map, CMD(xxlt->val), adr);
70 71
71 /* Done and happy. */ 72 /* Done and happy. */
72 chip->state = FL_READY; 73 chip->state = chip->oldstate;
73 put_chip(map, chip, adr); 74 put_chip(map, chip, adr);
74 spin_unlock(chip->mutex); 75 spin_unlock(chip->mutex);
75 return 0; 76 return 0;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 440f6851da69..690c94236d7f 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -6,7 +6,7 @@ menu "Self-contained MTD device drivers"
6 6
7config MTD_PMC551 7config MTD_PMC551
8 tristate "Ramix PMC551 PCI Mezzanine RAM card support" 8 tristate "Ramix PMC551 PCI Mezzanine RAM card support"
9 depends on MTD && PCI 9 depends on PCI
10 ---help--- 10 ---help---
11 This provides a MTD device driver for the Ramix PMC551 RAM PCI card 11 This provides a MTD device driver for the Ramix PMC551 RAM PCI card
12 from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>. 12 from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
@@ -40,7 +40,7 @@ config MTD_PMC551_DEBUG
40 40
41config MTD_MS02NV 41config MTD_MS02NV
42 tristate "DEC MS02-NV NVRAM module support" 42 tristate "DEC MS02-NV NVRAM module support"
43 depends on MTD && MACH_DECSTATION 43 depends on MACH_DECSTATION
44 help 44 help
45 This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery 45 This is an MTD driver for the DEC's MS02-NV (54-20948-01) battery
46 backed-up NVRAM module. The module was originally meant as an NFS 46 backed-up NVRAM module. The module was originally meant as an NFS
@@ -54,15 +54,23 @@ config MTD_MS02NV
54 54
55config MTD_DATAFLASH 55config MTD_DATAFLASH
56 tristate "Support for AT45xxx DataFlash" 56 tristate "Support for AT45xxx DataFlash"
57 depends on MTD && SPI_MASTER && EXPERIMENTAL 57 depends on SPI_MASTER && EXPERIMENTAL
58 help 58 help
59 This enables access to AT45xxx DataFlash chips, using SPI. 59 This enables access to AT45xxx DataFlash chips, using SPI.
60 Sometimes DataFlash chips are packaged inside MMC-format 60 Sometimes DataFlash chips are packaged inside MMC-format
61 cards; at this writing, the MMC stack won't handle those. 61 cards; at this writing, the MMC stack won't handle those.
62 62
63config MTD_DATAFLASH26
64 tristate "AT91RM9200 DataFlash AT26xxx"
65 depends on MTD && ARCH_AT91RM9200 && AT91_SPI
66 help
67 This enables access to the DataFlash chip (AT26xxx) on an
68 AT91RM9200-based board.
69 If you have such a board and such a DataFlash, say 'Y'.
70
63config MTD_M25P80 71config MTD_M25P80
64 tristate "Support for M25 SPI Flash" 72 tristate "Support for M25 SPI Flash"
65 depends on MTD && SPI_MASTER && EXPERIMENTAL 73 depends on SPI_MASTER && EXPERIMENTAL
66 help 74 help
67 This enables access to ST M25P80 and similar SPI flash chips, 75 This enables access to ST M25P80 and similar SPI flash chips,
68 used for program and data storage. Set up your spi devices 76 used for program and data storage. Set up your spi devices
@@ -70,7 +78,6 @@ config MTD_M25P80
70 78
71config MTD_SLRAM 79config MTD_SLRAM
72 tristate "Uncached system RAM" 80 tristate "Uncached system RAM"
73 depends on MTD
74 help 81 help
75 If your CPU cannot cache all of the physical memory in your machine, 82 If your CPU cannot cache all of the physical memory in your machine,
76 you can still use it for storage or swap by using this driver to 83 you can still use it for storage or swap by using this driver to
@@ -78,7 +85,6 @@ config MTD_SLRAM
78 85
79config MTD_PHRAM 86config MTD_PHRAM
80 tristate "Physical system RAM" 87 tristate "Physical system RAM"
81 depends on MTD
82 help 88 help
83 This is a re-implementation of the slram driver above. 89 This is a re-implementation of the slram driver above.
84 90
@@ -88,7 +94,7 @@ config MTD_PHRAM
88 94
89config MTD_LART 95config MTD_LART
90 tristate "28F160xx flash driver for LART" 96 tristate "28F160xx flash driver for LART"
91 depends on SA1100_LART && MTD 97 depends on SA1100_LART
92 help 98 help
93 This enables the flash driver for LART. Please note that you do 99 This enables the flash driver for LART. Please note that you do
94 not need any mapping/chip driver for LART. This one does it all 100 not need any mapping/chip driver for LART. This one does it all
@@ -96,7 +102,6 @@ config MTD_LART
96 102
97config MTD_MTDRAM 103config MTD_MTDRAM
98 tristate "Test driver using RAM" 104 tristate "Test driver using RAM"
99 depends on MTD
100 help 105 help
101 This enables a test MTD device driver which uses vmalloc() to 106 This enables a test MTD device driver which uses vmalloc() to
102 provide storage. You probably want to say 'N' unless you're 107 provide storage. You probably want to say 'N' unless you're
@@ -136,7 +141,7 @@ config MTDRAM_ABS_POS
136 141
137config MTD_BLOCK2MTD 142config MTD_BLOCK2MTD
138 tristate "MTD using block device" 143 tristate "MTD using block device"
139 depends on MTD && BLOCK 144 depends on BLOCK
140 help 145 help
141 This driver allows a block device to appear as an MTD. It would 146 This driver allows a block device to appear as an MTD. It would
142 generally be used in the following cases: 147 generally be used in the following cases:
@@ -150,7 +155,6 @@ comment "Disk-On-Chip Device Drivers"
150 155
151config MTD_DOC2000 156config MTD_DOC2000
152 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)" 157 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
153 depends on MTD
154 select MTD_DOCPROBE 158 select MTD_DOCPROBE
155 select MTD_NAND_IDS 159 select MTD_NAND_IDS
156 ---help--- 160 ---help---
@@ -173,7 +177,6 @@ config MTD_DOC2000
173 177
174config MTD_DOC2001 178config MTD_DOC2001
175 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)" 179 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
176 depends on MTD
177 select MTD_DOCPROBE 180 select MTD_DOCPROBE
178 select MTD_NAND_IDS 181 select MTD_NAND_IDS
179 ---help--- 182 ---help---
@@ -195,7 +198,6 @@ config MTD_DOC2001
195 198
196config MTD_DOC2001PLUS 199config MTD_DOC2001PLUS
197 tristate "M-Systems Disk-On-Chip Millennium Plus" 200 tristate "M-Systems Disk-On-Chip Millennium Plus"
198 depends on MTD
199 select MTD_DOCPROBE 201 select MTD_DOCPROBE
200 select MTD_NAND_IDS 202 select MTD_NAND_IDS
201 ---help--- 203 ---help---
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0f788d5c4bf8..8ab568b3f533 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
16obj-$(CONFIG_MTD_LART) += lart.o 16obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_DATAFLASH26) += at91_dataflash26.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 20obj-$(CONFIG_MTD_M25P80) += m25p80.o
diff --git a/drivers/mtd/devices/at91_dataflash26.c b/drivers/mtd/devices/at91_dataflash26.c
new file mode 100644
index 000000000000..64ce37f986fc
--- /dev/null
+++ b/drivers/mtd/devices/at91_dataflash26.c
@@ -0,0 +1,485 @@
1/*
2 * Atmel DataFlash driver for Atmel AT91RM9200 (Thunder)
3 * This is a largely modified version of at91_dataflash.c that
4 * supports AT26xxx dataflash chips. The original driver supports
5 * AT45xxx chips.
6 *
7 * Note: This driver was only tested with an AT26F004. It should be
8 * easy to make it work with other AT26xxx dataflash devices, though.
9 *
10 * Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
11 * original Copyright (C) SAN People (Pty) Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * version 2 as published by the Free Software Foundation.
16*/
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/mtd/mtd.h>
22
23#include <asm/arch/at91_spi.h>
24
25#define DATAFLASH_MAX_DEVICES 4 /* max number of dataflash devices */
26
27#define MANUFACTURER_ID_ATMEL 0x1F
28
29/* command codes */
30
31#define AT26_OP_READ_STATUS 0x05
32#define AT26_OP_READ_DEV_ID 0x9F
33#define AT26_OP_ERASE_PAGE_4K 0x20
34#define AT26_OP_READ_ARRAY_FAST 0x0B
35#define AT26_OP_SEQUENTIAL_WRITE 0xAF
36#define AT26_OP_WRITE_ENABLE 0x06
37#define AT26_OP_WRITE_DISABLE 0x04
38#define AT26_OP_SECTOR_PROTECT 0x36
39#define AT26_OP_SECTOR_UNPROTECT 0x39
40
41/* status register bits */
42
43#define AT26_STATUS_BUSY 0x01
44#define AT26_STATUS_WRITE_ENABLE 0x02
45
46struct dataflash_local
47{
48 int spi; /* SPI chip-select number */
49 unsigned int page_size; /* number of bytes per page */
50};
51
52
53/* Detected DataFlash devices */
54static struct mtd_info* mtd_devices[DATAFLASH_MAX_DEVICES];
55static int nr_devices = 0;
56
57/* Allocate a single SPI transfer descriptor. We're assuming that if multiple
58 SPI transfers occur at the same time, spi_access_bus() will serialize them.
59 If this is not valid, then either (i) each dataflash 'priv' structure
60 needs it's own transfer descriptor, (ii) we lock this one, or (iii) use
61 another mechanism. */
62static struct spi_transfer_list* spi_transfer_desc;
63
64/*
65 * Perform a SPI transfer to access the DataFlash device.
66 */
67static int do_spi_transfer(int nr, char* tx, int tx_len, char* rx, int rx_len,
68 char* txnext, int txnext_len, char* rxnext, int rxnext_len)
69{
70 struct spi_transfer_list* list = spi_transfer_desc;
71
72 list->tx[0] = tx; list->txlen[0] = tx_len;
73 list->rx[0] = rx; list->rxlen[0] = rx_len;
74
75 list->tx[1] = txnext; list->txlen[1] = txnext_len;
76 list->rx[1] = rxnext; list->rxlen[1] = rxnext_len;
77
78 list->nr_transfers = nr;
79 /* Note: spi_transfer() always returns 0, there are no error checks */
80 return spi_transfer(list);
81}
82
83/*
84 * Return the status of the DataFlash device.
85 */
86static unsigned char at91_dataflash26_status(void)
87{
88 unsigned char command[2];
89
90 command[0] = AT26_OP_READ_STATUS;
91 command[1] = 0;
92
93 do_spi_transfer(1, command, 2, command, 2, NULL, 0, NULL, 0);
94
95 return command[1];
96}
97
98/*
99 * Poll the DataFlash device until it is READY.
100 */
101static unsigned char at91_dataflash26_waitready(void)
102{
103 unsigned char status;
104
105 while (1) {
106 status = at91_dataflash26_status();
107 if (!(status & AT26_STATUS_BUSY))
108 return status;
109 }
110}
111
112/*
113 * Enable/disable write access
114 */
115 static void at91_dataflash26_write_enable(int enable)
116{
117 unsigned char cmd[2];
118
119 DEBUG(MTD_DEBUG_LEVEL3, "write_enable: enable=%i\n", enable);
120
121 if (enable)
122 cmd[0] = AT26_OP_WRITE_ENABLE;
123 else
124 cmd[0] = AT26_OP_WRITE_DISABLE;
125 cmd[1] = 0;
126
127 do_spi_transfer(1, cmd, 2, cmd, 2, NULL, 0, NULL, 0);
128}
129
130/*
131 * Protect/unprotect sector
132 */
133 static void at91_dataflash26_sector_protect(loff_t addr, int protect)
134{
135 unsigned char cmd[4];
136
137 DEBUG(MTD_DEBUG_LEVEL3, "sector_protect: addr=0x%06x prot=%d\n",
138 addr, protect);
139
140 if (protect)
141 cmd[0] = AT26_OP_SECTOR_PROTECT;
142 else
143 cmd[0] = AT26_OP_SECTOR_UNPROTECT;
144 cmd[1] = (addr & 0x00FF0000) >> 16;
145 cmd[2] = (addr & 0x0000FF00) >> 8;
146 cmd[3] = (addr & 0x000000FF);
147
148 do_spi_transfer(1, cmd, 4, cmd, 4, NULL, 0, NULL, 0);
149}
150
151/*
152 * Erase blocks of flash.
153 */
154static int at91_dataflash26_erase(struct mtd_info *mtd,
155 struct erase_info *instr)
156{
157 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
158 unsigned char cmd[4];
159
160 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_erase: addr=0x%06x len=%i\n",
161 instr->addr, instr->len);
162
163 /* Sanity checks */
164 if (priv->page_size != 4096)
165 return -EINVAL; /* Can't handle other sizes at the moment */
166
167 if ( ((instr->len % mtd->erasesize) != 0)
168 || ((instr->len % priv->page_size) != 0)
169 || ((instr->addr % priv->page_size) != 0)
170 || ((instr->addr + instr->len) > mtd->size))
171 return -EINVAL;
172
173 spi_access_bus(priv->spi);
174
175 while (instr->len > 0) {
176 at91_dataflash26_write_enable(1);
177 at91_dataflash26_sector_protect(instr->addr, 0);
178 at91_dataflash26_write_enable(1);
179 cmd[0] = AT26_OP_ERASE_PAGE_4K;
180 cmd[1] = (instr->addr & 0x00FF0000) >> 16;
181 cmd[2] = (instr->addr & 0x0000FF00) >> 8;
182 cmd[3] = (instr->addr & 0x000000FF);
183
184 DEBUG(MTD_DEBUG_LEVEL3, "ERASE: (0x%02x) 0x%02x 0x%02x"
185 "0x%02x\n",
186 cmd[0], cmd[1], cmd[2], cmd[3]);
187
188 do_spi_transfer(1, cmd, 4, cmd, 4, NULL, 0, NULL, 0);
189 at91_dataflash26_waitready();
190
191 instr->addr += priv->page_size; /* next page */
192 instr->len -= priv->page_size;
193 }
194
195 at91_dataflash26_write_enable(0);
196 spi_release_bus(priv->spi);
197
198 /* Inform MTD subsystem that erase is complete */
199 instr->state = MTD_ERASE_DONE;
200 if (instr->callback)
201 instr->callback(instr);
202
203 return 0;
204}
205
206/*
207 * Read from the DataFlash device.
208 * from : Start offset in flash device
209 * len : Number of bytes to read
210 * retlen : Number of bytes actually read
211 * buf : Buffer that will receive data
212 */
213static int at91_dataflash26_read(struct mtd_info *mtd, loff_t from, size_t len,
214 size_t *retlen, u_char *buf)
215{
216 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
217 unsigned char cmd[5];
218
219 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_read: %lli .. %lli\n",
220 from, from+len);
221
222 *retlen = 0;
223
224 /* Sanity checks */
225 if (!len)
226 return 0;
227 if (from + len > mtd->size)
228 return -EINVAL;
229
230 cmd[0] = AT26_OP_READ_ARRAY_FAST;
231 cmd[1] = (from & 0x00FF0000) >> 16;
232 cmd[2] = (from & 0x0000FF00) >> 8;
233 cmd[3] = (from & 0x000000FF);
234 /* cmd[4] is a "Don't care" byte */
235
236 DEBUG(MTD_DEBUG_LEVEL3, "READ: (0x%02x) 0x%02x 0x%02x 0x%02x\n",
237 cmd[0], cmd[1], cmd[2], cmd[3]);
238
239 spi_access_bus(priv->spi);
240 do_spi_transfer(2, cmd, 5, cmd, 5, buf, len, buf, len);
241 spi_release_bus(priv->spi);
242
243 *retlen = len;
244 return 0;
245}
246
247/*
248 * Write to the DataFlash device.
249 * to : Start offset in flash device
250 * len : Number of bytes to write
251 * retlen : Number of bytes actually written
252 * buf : Buffer containing the data
253 */
254static int at91_dataflash26_write(struct mtd_info *mtd, loff_t to, size_t len,
255 size_t *retlen, const u_char *buf)
256{
257 struct dataflash_local *priv = (struct dataflash_local *) mtd->priv;
258 unsigned int addr, buf_index = 0;
259 int ret = -EIO, sector, last_sector;
260 unsigned char status, cmd[5];
261
262 DEBUG(MTD_DEBUG_LEVEL1, "dataflash_write: %lli .. %lli\n", to, to+len);
263
264 *retlen = 0;
265
266 /* Sanity checks */
267 if (!len)
268 return 0;
269 if (to + len > mtd->size)
270 return -EINVAL;
271
272 spi_access_bus(priv->spi);
273
274 addr = to;
275 last_sector = -1;
276
277 while (buf_index < len) {
278 sector = addr / priv->page_size;
279 /* Write first byte if a new sector begins */
280 if (sector != last_sector) {
281 at91_dataflash26_write_enable(1);
282 at91_dataflash26_sector_protect(addr, 0);
283 at91_dataflash26_write_enable(1);
284
285 /* Program first byte of a new sector */
286 cmd[0] = AT26_OP_SEQUENTIAL_WRITE;
287 cmd[1] = (addr & 0x00FF0000) >> 16;
288 cmd[2] = (addr & 0x0000FF00) >> 8;
289 cmd[3] = (addr & 0x000000FF);
290 cmd[4] = buf[buf_index++];
291 do_spi_transfer(1, cmd, 5, cmd, 5, NULL, 0, NULL, 0);
292 status = at91_dataflash26_waitready();
293 addr++;
294 /* On write errors, the chip resets the write enable
295 flag. This also happens after the last byte of a
296 sector is successfully programmed. */
297 if ( ( !(status & AT26_STATUS_WRITE_ENABLE))
298 && ((addr % priv->page_size) != 0) ) {
299 DEBUG(MTD_DEBUG_LEVEL1,
300 "write error1: addr=0x%06x, "
301 "status=0x%02x\n", addr, status);
302 goto write_err;
303 }
304 (*retlen)++;
305 last_sector = sector;
306 }
307
308 /* Write subsequent bytes in the same sector */
309 cmd[0] = AT26_OP_SEQUENTIAL_WRITE;
310 cmd[1] = buf[buf_index++];
311 do_spi_transfer(1, cmd, 2, cmd, 2, NULL, 0, NULL, 0);
312 status = at91_dataflash26_waitready();
313 addr++;
314
315 if ( ( !(status & AT26_STATUS_WRITE_ENABLE))
316 && ((addr % priv->page_size) != 0) ) {
317 DEBUG(MTD_DEBUG_LEVEL1, "write error2: addr=0x%06x, "
318 "status=0x%02x\n", addr, status);
319 goto write_err;
320 }
321
322 (*retlen)++;
323 }
324
325 ret = 0;
326 at91_dataflash26_write_enable(0);
327write_err:
328 spi_release_bus(priv->spi);
329 return ret;
330}
331
332/*
333 * Initialize and register DataFlash device with MTD subsystem.
334 */
335static int __init add_dataflash(int channel, char *name, int nr_pages,
336 int pagesize)
337{
338 struct mtd_info *device;
339 struct dataflash_local *priv;
340
341 if (nr_devices >= DATAFLASH_MAX_DEVICES) {
342 printk(KERN_ERR "at91_dataflash26: Too many devices "
343 "detected\n");
344 return 0;
345 }
346
347 device = kzalloc(sizeof(struct mtd_info) + strlen(name) + 8,
348 GFP_KERNEL);
349 if (!device)
350 return -ENOMEM;
351
352 device->name = (char *)&device[1];
353 sprintf(device->name, "%s.spi%d", name, channel);
354 device->size = nr_pages * pagesize;
355 device->erasesize = pagesize;
356 device->owner = THIS_MODULE;
357 device->type = MTD_DATAFLASH;
358 device->flags = MTD_CAP_NORFLASH;
359 device->erase = at91_dataflash26_erase;
360 device->read = at91_dataflash26_read;
361 device->write = at91_dataflash26_write;
362
363 priv = (struct dataflash_local *)kzalloc(sizeof(struct dataflash_local),
364 GFP_KERNEL);
365 if (!priv) {
366 kfree(device);
367 return -ENOMEM;
368 }
369
370 priv->spi = channel;
371 priv->page_size = pagesize;
372 device->priv = priv;
373
374 mtd_devices[nr_devices] = device;
375 nr_devices++;
376 printk(KERN_INFO "at91_dataflash26: %s detected [spi%i] (%i bytes)\n",
377 name, channel, device->size);
378
379 return add_mtd_device(device);
380}
381
382/*
383 * Detect and initialize DataFlash device connected to specified SPI channel.
384 *
385 */
386
387struct dataflash26_types {
388 unsigned char id0;
389 unsigned char id1;
390 char *name;
391 int pagesize;
392 int nr_pages;
393};
394
395struct dataflash26_types df26_types[] = {
396 {
397 .id0 = 0x04,
398 .id1 = 0x00,
399 .name = "AT26F004",
400 .pagesize = 4096,
401 .nr_pages = 128,
402 },
403 {
404 .id0 = 0x45,
405 .id1 = 0x01,
406 .name = "AT26DF081A", /* Not tested ! */
407 .pagesize = 4096,
408 .nr_pages = 256,
409 },
410};
411
412static int __init at91_dataflash26_detect(int channel)
413{
414 unsigned char status, cmd[5];
415 int i;
416
417 spi_access_bus(channel);
418 status = at91_dataflash26_status();
419
420 if (status == 0 || status == 0xff) {
421 printk(KERN_ERR "at91_dataflash26_detect: status error %d\n",
422 status);
423 spi_release_bus(channel);
424 return -ENODEV;
425 }
426
427 cmd[0] = AT26_OP_READ_DEV_ID;
428 do_spi_transfer(1, cmd, 5, cmd, 5, NULL, 0, NULL, 0);
429 spi_release_bus(channel);
430
431 if (cmd[1] != MANUFACTURER_ID_ATMEL)
432 return -ENODEV;
433
434 for (i = 0; i < ARRAY_SIZE(df26_types); i++) {
435 if ( cmd[2] == df26_types[i].id0
436 && cmd[3] == df26_types[i].id1)
437 return add_dataflash(channel,
438 df26_types[i].name,
439 df26_types[i].nr_pages,
440 df26_types[i].pagesize);
441 }
442
443 printk(KERN_ERR "at91_dataflash26_detect: Unsupported device "
444 "(0x%02x/0x%02x)\n", cmd[2], cmd[3]);
445 return -ENODEV;
446}
447
448static int __init at91_dataflash26_init(void)
449{
450 spi_transfer_desc = kmalloc(sizeof(struct spi_transfer_list),
451 GFP_KERNEL);
452 if (!spi_transfer_desc)
453 return -ENOMEM;
454
455 /* DataFlash (SPI chip select 0) */
456 at91_dataflash26_detect(0);
457
458#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
459 /* DataFlash card (SPI chip select 3) */
460 at91_dataflash26_detect(3);
461#endif
462 return 0;
463}
464
465static void __exit at91_dataflash26_exit(void)
466{
467 int i;
468
469 for (i = 0; i < DATAFLASH_MAX_DEVICES; i++) {
470 if (mtd_devices[i]) {
471 del_mtd_device(mtd_devices[i]);
472 kfree(mtd_devices[i]->priv);
473 kfree(mtd_devices[i]);
474 }
475 }
476 nr_devices = 0;
477 kfree(spi_transfer_desc);
478}
479
480module_init(at91_dataflash26_init);
481module_exit(at91_dataflash26_exit);
482
483MODULE_LICENSE("GPL");
484MODULE_AUTHOR("Hans J. Koch");
485MODULE_DESCRIPTION("DataFlash AT26xxx driver for Atmel AT91RM9200");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index f9f2ce7806b0..ce47544dc120 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -40,56 +40,9 @@ struct block2mtd_dev {
40static LIST_HEAD(blkmtd_device_list); 40static LIST_HEAD(blkmtd_device_list);
41 41
42 42
43#define PAGE_READAHEAD 64 43static struct page* page_read(struct address_space *mapping, int index)
44static void cache_readahead(struct address_space *mapping, int index)
45{ 44{
46 filler_t *filler = (filler_t*)mapping->a_ops->readpage; 45 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47 int i, pagei;
48 unsigned ret = 0;
49 unsigned long end_index;
50 struct page *page;
51 LIST_HEAD(page_pool);
52 struct inode *inode = mapping->host;
53 loff_t isize = i_size_read(inode);
54
55 if (!isize) {
56 INFO("iSize=0 in cache_readahead\n");
57 return;
58 }
59
60 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
61
62 read_lock_irq(&mapping->tree_lock);
63 for (i = 0; i < PAGE_READAHEAD; i++) {
64 pagei = index + i;
65 if (pagei > end_index) {
66 INFO("Overrun end of disk in cache readahead\n");
67 break;
68 }
69 page = radix_tree_lookup(&mapping->page_tree, pagei);
70 if (page && (!i))
71 break;
72 if (page)
73 continue;
74 read_unlock_irq(&mapping->tree_lock);
75 page = page_cache_alloc_cold(mapping);
76 read_lock_irq(&mapping->tree_lock);
77 if (!page)
78 break;
79 page->index = pagei;
80 list_add(&page->lru, &page_pool);
81 ret++;
82 }
83 read_unlock_irq(&mapping->tree_lock);
84 if (ret)
85 read_cache_pages(mapping, &page_pool, filler, NULL);
86}
87
88
89static struct page* page_readahead(struct address_space *mapping, int index)
90{
91 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92 cache_readahead(mapping, index);
93 return read_cache_page(mapping, index, filler, NULL); 46 return read_cache_page(mapping, index, filler, NULL);
94} 47}
95 48
@@ -105,14 +58,14 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
105 u_long *max; 58 u_long *max;
106 59
107 while (pages) { 60 while (pages) {
108 page = page_readahead(mapping, index); 61 page = page_read(mapping, index);
109 if (!page) 62 if (!page)
110 return -ENOMEM; 63 return -ENOMEM;
111 if (IS_ERR(page)) 64 if (IS_ERR(page))
112 return PTR_ERR(page); 65 return PTR_ERR(page);
113 66
114 max = (u_long*)page_address(page) + PAGE_SIZE; 67 max = page_address(page) + PAGE_SIZE;
115 for (p=(u_long*)page_address(page); p<max; p++) 68 for (p=page_address(page); p<max; p++)
116 if (*p != -1UL) { 69 if (*p != -1UL) {
117 lock_page(page); 70 lock_page(page);
118 memset(page_address(page), 0xff, PAGE_SIZE); 71 memset(page_address(page), 0xff, PAGE_SIZE);
@@ -174,8 +127,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
174 cpylen = len; // this page 127 cpylen = len; // this page
175 len = len - cpylen; 128 len = len - cpylen;
176 129
177 // Get page 130 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
178 page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
179 if (!page) 131 if (!page)
180 return -ENOMEM; 132 return -ENOMEM;
181 if (IS_ERR(page)) 133 if (IS_ERR(page))
@@ -213,8 +165,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
213 cpylen = len; // this page 165 cpylen = len; // this page
214 len = len - cpylen; 166 len = len - cpylen;
215 167
216 // Get page 168 page = page_read(mapping, index);
217 page = page_readahead(mapping, index);
218 if (!page) 169 if (!page)
219 return -ENOMEM; 170 return -ENOMEM;
220 if (IS_ERR(page)) 171 if (IS_ERR(page))
@@ -308,9 +259,9 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
308 /* We might not have rootfs mounted at this point. Try 259 /* We might not have rootfs mounted at this point. Try
309 to resolve the device name by other means. */ 260 to resolve the device name by other means. */
310 261
311 dev_t dev = name_to_dev_t(devname); 262 dev_t devt = name_to_dev_t(devname);
312 if (dev != 0) { 263 if (devt) {
313 bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ); 264 bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
314 } 265 }
315 } 266 }
316#endif 267#endif
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bbf0553bdb2e..d990d8141ef5 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -6,7 +6,6 @@ menu "Mapping drivers for chip access"
6 6
7config MTD_COMPLEX_MAPPINGS 7config MTD_COMPLEX_MAPPINGS
8 bool "Support non-linear mappings of flash chips" 8 bool "Support non-linear mappings of flash chips"
9 depends on MTD
10 help 9 help
11 This causes the chip drivers to allow for complicated 10 This causes the chip drivers to allow for complicated
12 paged mappings of flash chips. 11 paged mappings of flash chips.
@@ -69,6 +68,39 @@ config MTD_PHYSMAP_OF
69 physically into the CPU's memory. The mapping description here is 68 physically into the CPU's memory. The mapping description here is
70 taken from OF device tree. 69 taken from OF device tree.
71 70
71config MTD_PMC_MSP_EVM
72 tristate "CFI Flash device mapped on PMC-Sierra MSP"
73 depends on PMC_MSP && MTD_CFI
74 select MTD_PARTITIONS
75 help
76 This provides a 'mapping' driver which support the way
77 in which user-programmable flash chips are connected on the
78 PMC-Sierra MSP eval/demo boards
79
80choice
81 prompt "Maximum mappable memory avialable for flash IO"
82 depends on MTD_PMC_MSP_EVM
83 default MSP_FLASH_MAP_LIMIT_32M
84
85config MSP_FLASH_MAP_LIMIT_32M
86 bool "32M"
87
88endchoice
89
90config MSP_FLASH_MAP_LIMIT
91 hex
92 default "0x02000000"
93 depends on MSP_FLASH_MAP_LIMIT_32M
94
95config MTD_PMC_MSP_RAMROOT
96 tristate "Embedded RAM block device for root on PMC-Sierra MSP"
97 depends on PMC_MSP_EMBEDDED_ROOTFS && \
98 (MTD_BLOCK || MTD_BLOCK_RO) && \
99 MTD_RAM
100 help
101 This provides support for the embedded root file system
102 on PMC MSP devices. This memory is mapped as a MTD block device.
103
72config MTD_SUN_UFLASH 104config MTD_SUN_UFLASH
73 tristate "Sun Microsystems userflash support" 105 tristate "Sun Microsystems userflash support"
74 depends on SPARC && MTD_CFI 106 depends on SPARC && MTD_CFI
@@ -240,13 +272,13 @@ config MTD_NETtel
240 272
241config MTD_ALCHEMY 273config MTD_ALCHEMY
242 tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support" 274 tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
243 depends on SOC_AU1X00 275 depends on SOC_AU1X00 && MTD_PARTITIONS && MTD_CFI
244 help 276 help
245 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards 277 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
246 278
247config MTD_MTX1 279config MTD_MTX1
248 tristate "4G Systems MTX-1 Flash device" 280 tristate "4G Systems MTX-1 Flash device"
249 depends on MIPS && MIPS_MTX1 281 depends on MIPS_MTX1 && MTD_CFI
250 help 282 help
251 Flash memory access on 4G Systems MTX-1 Board. If you have one of 283 Flash memory access on 4G Systems MTX-1 Board. If you have one of
252 these boards and would like to use the flash chips on it, say 'Y'. 284 these boards and would like to use the flash chips on it, say 'Y'.
@@ -384,7 +416,7 @@ config MTD_TQM834x
384 416
385config MTD_OCELOT 417config MTD_OCELOT
386 tristate "Momenco Ocelot boot flash device" 418 tristate "Momenco Ocelot boot flash device"
387 depends on MIPS && MOMENCO_OCELOT 419 depends on MOMENCO_OCELOT
388 help 420 help
389 This enables access routines for the boot flash device and for the 421 This enables access routines for the boot flash device and for the
390 NVRAM on the Momenco Ocelot board. If you have one of these boards 422 NVRAM on the Momenco Ocelot board. If you have one of these boards
@@ -517,7 +549,7 @@ config MTD_OMAP_NOR
517# This needs CFI or JEDEC, depending on the cards found. 549# This needs CFI or JEDEC, depending on the cards found.
518config MTD_PCI 550config MTD_PCI
519 tristate "PCI MTD driver" 551 tristate "PCI MTD driver"
520 depends on MTD && PCI && MTD_COMPLEX_MAPPINGS 552 depends on PCI && MTD_COMPLEX_MAPPINGS
521 help 553 help
522 Mapping for accessing flash devices on add-in cards like the Intel XScale 554 Mapping for accessing flash devices on add-in cards like the Intel XScale
523 IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode 555 IQ80310 card, and the Intel EBSA285 card in blank ROM programming mode
@@ -527,7 +559,7 @@ config MTD_PCI
527 559
528config MTD_PCMCIA 560config MTD_PCMCIA
529 tristate "PCMCIA MTD driver" 561 tristate "PCMCIA MTD driver"
530 depends on MTD && PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN 562 depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
531 help 563 help
532 Map driver for accessing PCMCIA linear flash memory cards. These 564 Map driver for accessing PCMCIA linear flash memory cards. These
533 cards are usually around 4-16MiB in size. This does not include 565 cards are usually around 4-16MiB in size. This does not include
@@ -591,13 +623,12 @@ config MTD_BAST_MAXSIZE
591 623
592config MTD_SHARP_SL 624config MTD_SHARP_SL
593 bool "ROM mapped on Sharp SL Series" 625 bool "ROM mapped on Sharp SL Series"
594 depends on MTD && ARCH_PXA 626 depends on ARCH_PXA
595 help 627 help
596 This enables access to the flash chip on the Sharp SL Series of PDAs. 628 This enables access to the flash chip on the Sharp SL Series of PDAs.
597 629
598config MTD_PLATRAM 630config MTD_PLATRAM
599 tristate "Map driver for platform device RAM (mtd-ram)" 631 tristate "Map driver for platform device RAM (mtd-ram)"
600 depends on MTD
601 select MTD_RAM 632 select MTD_RAM
602 help 633 help
603 Map driver for RAM areas described via the platform device 634 Map driver for RAM areas described via the platform device
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 071d0bf922b6..de036c5e6139 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o
27obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 27obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
28obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 28obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
29obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 29obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
30obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
31obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
30obj-$(CONFIG_MTD_PNC2000) += pnc2000.o 32obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
31obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 33obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
32obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 34obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 7fc8097e41d2..84fbe0e8c47e 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -1,10 +1,7 @@
1/* 1/*
2 * Flash memory access on AMD Alchemy evaluation boards 2 * Flash memory access on AMD Alchemy evaluation boards
3 * 3 *
4 * $Id: alchemy-flash.c,v 1.2 2005/11/07 11:14:26 gleixner Exp $
5 *
6 * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com> 4 * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
7 *
8 */ 5 */
9 6
10#include <linux/init.h> 7#include <linux/init.h>
@@ -18,12 +15,6 @@
18 15
19#include <asm/io.h> 16#include <asm/io.h>
20 17
21#ifdef DEBUG_RW
22#define DBG(x...) printk(x)
23#else
24#define DBG(x...)
25#endif
26
27#ifdef CONFIG_MIPS_PB1000 18#ifdef CONFIG_MIPS_PB1000
28#define BOARD_MAP_NAME "Pb1000 Flash" 19#define BOARD_MAP_NAME "Pb1000 Flash"
29#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */ 20#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d4a4d8ac789..688ef495888a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -338,7 +338,7 @@ static int __init init_ck804xrom(void)
338 } 338 }
339 return -ENXIO; 339 return -ENXIO;
340#if 0 340#if 0
341 return pci_module_init(&ck804xrom_driver); 341 return pci_register_driver(&ck804xrom_driver);
342#endif 342#endif
343} 343}
344 344
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 2b6504ecbbd1..894c0b271289 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -169,7 +169,8 @@ static int platram_probe(struct platform_device *pdev)
169 goto exit_free; 169 goto exit_free;
170 } 170 }
171 171
172 dev_dbg(&pdev->dev, "got platform resource %p (0x%lx)\n", res, res->start); 172 dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res,
173 (unsigned long long)res->start);
173 174
174 /* setup map parameters */ 175 /* setup map parameters */
175 176
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
new file mode 100644
index 000000000000..7e0377ec1c40
--- /dev/null
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -0,0 +1,184 @@
1/*
2 * Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
3 * Config with both CFI and JEDEC device support.
4 *
5 * Basically physmap.c with the addition of partitions and
6 * an array of mapping info to accomodate more than one flash type per board.
7 *
8 * Copyright 2005-2007 PMC-Sierra, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
17 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
21 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/kernel.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/partitions.h>
37
38#include <asm/io.h>
39
40#include <msp_prom.h>
41#include <msp_regs.h>
42
43
44static struct mtd_info **msp_flash;
45static struct mtd_partition **msp_parts;
46static struct map_info *msp_maps;
47static int fcnt;
48
49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n",__FUNCTION__,__LINE__)
50
51int __init init_msp_flash(void)
52{
53 int i, j;
54 int offset, coff;
55 char *env;
56 int pcnt;
57 char flash_name[] = "flash0";
58 char part_name[] = "flash0_0";
59 unsigned addr, size;
60
61 /* If ELB is disabled by "ful-mux" mode, we can't get at flash */
62 if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) &&
63 (*ELB_1PC_EN_REG & SINGLE_PCCARD)) {
64 printk(KERN_NOTICE "Single PC Card mode: no flash access\n");
65 return -ENXIO;
66 }
67
68 /* examine the prom environment for flash devices */
69 for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++)
70 flash_name[5] = '0' + fcnt + 1;
71
72 if (fcnt < 1)
73 return -ENXIO;
74
75 printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
76 msp_flash = (struct mtd_info **)kmalloc(
77 fcnt * sizeof(struct map_info *), GFP_KERNEL);
78 msp_parts = (struct mtd_partition **)kmalloc(
79 fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
80 msp_maps = (struct map_info *)kmalloc(
81 fcnt * sizeof(struct mtd_info), GFP_KERNEL);
82 memset(msp_maps, 0, fcnt * sizeof(struct mtd_info));
83
84 /* loop over the flash devices, initializing each */
85 for (i = 0; i < fcnt; i++) {
86 /* examine the prom environment for flash partititions */
87 part_name[5] = '0' + i;
88 part_name[7] = '0';
89 for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++)
90 part_name[7] = '0' + pcnt + 1;
91
92 if (pcnt == 0) {
93 printk(KERN_NOTICE "Skipping flash device %d "
94 "(no partitions defined)\n", i);
95 continue;
96 }
97
98 msp_parts[i] = (struct mtd_partition *)kmalloc(
99 pcnt * sizeof(struct mtd_partition), GFP_KERNEL);
100 memset(msp_parts[i], 0, pcnt * sizeof(struct mtd_partition));
101
102 /* now initialize the devices proper */
103 flash_name[5] = '0' + i;
104 env = prom_getenv(flash_name);
105
106 if (sscanf(env, "%x:%x", &addr, &size) < 2)
107 return -ENXIO;
108 addr = CPHYSADDR(addr);
109
110 printk(KERN_NOTICE
111 "MSP flash device \"%s\": 0x%08x at 0x%08x\n",
112 flash_name, size, addr);
113 /* This must matchs the actual size of the flash chip */
114 msp_maps[i].size = size;
115 msp_maps[i].phys = addr;
116
117 /*
118 * Platforms have a specific limit of the size of memory
119 * which may be mapped for flash:
120 */
121 if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
122 size = CONFIG_MSP_FLASH_MAP_LIMIT;
123 msp_maps[i].virt = ioremap(addr, size);
124 msp_maps[i].bankwidth = 1;
125 msp_maps[i].name = strncpy(kmalloc(7, GFP_KERNEL),
126 flash_name, 7);
127
128 if (msp_maps[i].virt == NULL)
129 return -ENXIO;
130
131 for (j = 0; j < pcnt; j++) {
132 part_name[5] = '0' + i;
133 part_name[7] = '0' + j;
134
135 env = prom_getenv(part_name);
136
137 if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2)
138 return -ENXIO;
139
140 msp_parts[i][j].size = size;
141 msp_parts[i][j].offset = offset;
142 msp_parts[i][j].name = env + coff;
143 }
144
145 /* now probe and add the device */
146 simple_map_init(&msp_maps[i]);
147 msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
148 if (msp_flash[i]) {
149 msp_flash[i]->owner = THIS_MODULE;
150 add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
151 } else {
152 printk(KERN_ERR "map probe failed for flash\n");
153 return -ENXIO;
154 }
155 }
156
157 return 0;
158}
159
160static void __exit cleanup_msp_flash(void)
161{
162 int i;
163
164 for (i = 0; i < sizeof(msp_flash) / sizeof(struct mtd_info **); i++) {
165 del_mtd_partitions(msp_flash[i]);
166 map_destroy(msp_flash[i]);
167 iounmap((void *)msp_maps[i].virt);
168
169 /* free the memory */
170 kfree(msp_maps[i].name);
171 kfree(msp_parts[i]);
172 }
173
174 kfree(msp_flash);
175 kfree(msp_parts);
176 kfree(msp_maps);
177}
178
179MODULE_AUTHOR("PMC-Sierra, Inc");
180MODULE_DESCRIPTION("MTD map driver for PMC-Sierra MSP boards");
181MODULE_LICENSE("GPL");
182
183module_init(init_msp_flash);
184module_exit(cleanup_msp_flash);
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
new file mode 100644
index 000000000000..18049bceba8d
--- /dev/null
+++ b/drivers/mtd/maps/pmcmsp-ramroot.c
@@ -0,0 +1,105 @@
1/*
2 * Mapping of the rootfs in a physical region of memory
3 *
4 * Copyright (C) 2005-2007 PMC-Sierra Inc.
5 * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
13 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
15 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
16 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
17 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
18 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
19 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
21 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/fs.h>
34#include <linux/root_dev.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/map.h>
37
38#include <asm/io.h>
39
40#include <msp_prom.h>
41
42static struct mtd_info *rr_mtd;
43
44struct map_info rr_map = {
45 .name = "ramroot",
46 .bankwidth = 4,
47};
48
49static int __init init_rrmap(void)
50{
51 void *ramroot_start;
52 unsigned long ramroot_size;
53
54 /* Check for supported rootfs types */
55 if (get_ramroot(&ramroot_start, &ramroot_size)) {
56 rr_map.phys = CPHYSADDR(ramroot_start);
57 rr_map.size = ramroot_size;
58
59 printk(KERN_NOTICE
60 "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
61 rr_map.size, (unsigned long)rr_map.phys);
62 } else {
63 printk(KERN_ERR
64 "init_rrmap: no supported embedded rootfs detected!\n");
65 return -ENXIO;
66 }
67
68 /* Map rootfs to I/O space for block device driver */
69 rr_map.virt = ioremap(rr_map.phys, rr_map.size);
70 if (!rr_map.virt) {
71 printk(KERN_ERR "Failed to ioremap\n");
72 return -EIO;
73 }
74
75 simple_map_init(&rr_map);
76
77 rr_mtd = do_map_probe("map_ram", &rr_map);
78 if (rr_mtd) {
79 rr_mtd->owner = THIS_MODULE;
80
81 add_mtd_device(rr_mtd);
82 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, rr_mtd->index);
83
84 return 0;
85 }
86
87 iounmap(rr_map.virt);
88 return -ENXIO;
89}
90
91static void __exit cleanup_rrmap(void)
92{
93 del_mtd_device(rr_mtd);
94 map_destroy(rr_mtd);
95
96 iounmap(rr_map.virt);
97 rr_map.virt = NULL;
98}
99
100MODULE_AUTHOR("PMC-Sierra, Inc");
101MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
102MODULE_LICENSE("GPL");
103
104module_init(init_rrmap);
105module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 4db2055cee31..001af7f7ddda 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -39,7 +39,7 @@ MODULE_VERSION("2.0");
39 39
40static LIST_HEAD(device_list); 40static LIST_HEAD(device_list);
41struct uflash_dev { 41struct uflash_dev {
42 char *name; /* device name */ 42 const char *name; /* device name */
43 struct map_info map; /* mtd map info */ 43 struct map_info map; /* mtd map info */
44 struct mtd_info *mtd; /* mtd info */ 44 struct mtd_info *mtd; /* mtd info */
45}; 45};
@@ -80,7 +80,7 @@ int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp)
80 80
81 up->name = of_get_property(dp, "model", NULL); 81 up->name = of_get_property(dp, "model", NULL);
82 if (up->name && 0 < strlen(up->name)) 82 if (up->name && 0 < strlen(up->name))
83 up->map.name = up->name; 83 up->map.name = (char *)up->name;
84 84
85 up->map.phys = res->start; 85 up->map.phys = res->start;
86 86
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index b879a66daa9e..524b83b5ebf5 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -20,6 +20,7 @@
20#include <linux/hdreg.h> 20#include <linux/hdreg.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/kthread.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
25static LIST_HEAD(blktrans_majors); 26static LIST_HEAD(blktrans_majors);
@@ -28,9 +29,7 @@ extern struct mutex mtd_table_mutex;
28extern struct mtd_info *mtd_table[]; 29extern struct mtd_info *mtd_table[];
29 30
30struct mtd_blkcore_priv { 31struct mtd_blkcore_priv {
31 struct completion thread_dead; 32 struct task_struct *thread;
32 int exiting;
33 wait_queue_head_t thread_wq;
34 struct request_queue *rq; 33 struct request_queue *rq;
35 spinlock_t queue_lock; 34 spinlock_t queue_lock;
36}; 35};
@@ -83,38 +82,19 @@ static int mtd_blktrans_thread(void *arg)
83 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 82 /* we might get involved when memory gets low, so use PF_MEMALLOC */
84 current->flags |= PF_MEMALLOC | PF_NOFREEZE; 83 current->flags |= PF_MEMALLOC | PF_NOFREEZE;
85 84
86 daemonize("%sd", tr->name);
87
88 /* daemonize() doesn't do this for us since some kernel threads
89 actually want to deal with signals. We can't just call
90 exit_sighand() since that'll cause an oops when we finally
91 do exit. */
92 spin_lock_irq(&current->sighand->siglock);
93 sigfillset(&current->blocked);
94 recalc_sigpending();
95 spin_unlock_irq(&current->sighand->siglock);
96
97 spin_lock_irq(rq->queue_lock); 85 spin_lock_irq(rq->queue_lock);
98 86 while (!kthread_should_stop()) {
99 while (!tr->blkcore_priv->exiting) {
100 struct request *req; 87 struct request *req;
101 struct mtd_blktrans_dev *dev; 88 struct mtd_blktrans_dev *dev;
102 int res = 0; 89 int res = 0;
103 DECLARE_WAITQUEUE(wait, current);
104 90
105 req = elv_next_request(rq); 91 req = elv_next_request(rq);
106 92
107 if (!req) { 93 if (!req) {
108 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
109 set_current_state(TASK_INTERRUPTIBLE); 94 set_current_state(TASK_INTERRUPTIBLE);
110
111 spin_unlock_irq(rq->queue_lock); 95 spin_unlock_irq(rq->queue_lock);
112
113 schedule(); 96 schedule();
114 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
115
116 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
117
118 continue; 98 continue;
119 } 99 }
120 100
@@ -133,13 +113,13 @@ static int mtd_blktrans_thread(void *arg)
133 } 113 }
134 spin_unlock_irq(rq->queue_lock); 114 spin_unlock_irq(rq->queue_lock);
135 115
136 complete_and_exit(&tr->blkcore_priv->thread_dead, 0); 116 return 0;
137} 117}
138 118
139static void mtd_blktrans_request(struct request_queue *rq) 119static void mtd_blktrans_request(struct request_queue *rq)
140{ 120{
141 struct mtd_blktrans_ops *tr = rq->queuedata; 121 struct mtd_blktrans_ops *tr = rq->queuedata;
142 wake_up(&tr->blkcore_priv->thread_wq); 122 wake_up_process(tr->blkcore_priv->thread);
143} 123}
144 124
145 125
@@ -388,8 +368,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
388 return ret; 368 return ret;
389 } 369 }
390 spin_lock_init(&tr->blkcore_priv->queue_lock); 370 spin_lock_init(&tr->blkcore_priv->queue_lock);
391 init_completion(&tr->blkcore_priv->thread_dead);
392 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
393 371
394 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 372 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
395 if (!tr->blkcore_priv->rq) { 373 if (!tr->blkcore_priv->rq) {
@@ -403,13 +381,14 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
403 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
404 tr->blkshift = ffs(tr->blksize) - 1; 382 tr->blkshift = ffs(tr->blksize) - 1;
405 383
406 ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL); 384 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
407 if (ret < 0) { 385 "%sd", tr->name);
386 if (IS_ERR(tr->blkcore_priv->thread)) {
408 blk_cleanup_queue(tr->blkcore_priv->rq); 387 blk_cleanup_queue(tr->blkcore_priv->rq);
409 unregister_blkdev(tr->major, tr->name); 388 unregister_blkdev(tr->major, tr->name);
410 kfree(tr->blkcore_priv); 389 kfree(tr->blkcore_priv);
411 mutex_unlock(&mtd_table_mutex); 390 mutex_unlock(&mtd_table_mutex);
412 return ret; 391 return PTR_ERR(tr->blkcore_priv->thread);
413 } 392 }
414 393
415 INIT_LIST_HEAD(&tr->devs); 394 INIT_LIST_HEAD(&tr->devs);
@@ -432,9 +411,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
432 mutex_lock(&mtd_table_mutex); 411 mutex_lock(&mtd_table_mutex);
433 412
434 /* Clean up the kernel thread */ 413 /* Clean up the kernel thread */
435 tr->blkcore_priv->exiting = 1; 414 kthread_stop(tr->blkcore_priv->thread);
436 wake_up(&tr->blkcore_priv->thread_wq);
437 wait_for_completion(&tr->blkcore_priv->thread_dead);
438 415
439 /* Remove it from the list of active majors */ 416 /* Remove it from the list of active majors */
440 list_del(&tr->list); 417 list_del(&tr->list);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 1592eac64e57..8c86b802f212 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -553,7 +553,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
553 ops.datbuf = NULL; 553 ops.datbuf = NULL;
554 ops.mode = MTD_OOB_PLACE; 554 ops.mode = MTD_OOB_PLACE;
555 555
556 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 556 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
557 return -EINVAL; 557 return -EINVAL;
558 558
559 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 559 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 2d12dcdd740c..d05873b8c155 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,10 +1,7 @@
1# drivers/mtd/nand/Kconfig 1# drivers/mtd/nand/Kconfig
2# $Id: Kconfig,v 1.35 2005/11/07 11:14:30 gleixner Exp $ 2# $Id: Kconfig,v 1.35 2005/11/07 11:14:30 gleixner Exp $
3 3
4menu "NAND Flash Device Drivers" 4menuconfig MTD_NAND
5 depends on MTD!=n
6
7config MTD_NAND
8 tristate "NAND Device Support" 5 tristate "NAND Device Support"
9 depends on MTD 6 depends on MTD
10 select MTD_NAND_IDS 7 select MTD_NAND_IDS
@@ -13,9 +10,10 @@ config MTD_NAND
13 devices. For further information see 10 devices. For further information see
14 <http://www.linux-mtd.infradead.org/doc/nand.html>. 11 <http://www.linux-mtd.infradead.org/doc/nand.html>.
15 12
13if MTD_NAND
14
16config MTD_NAND_VERIFY_WRITE 15config MTD_NAND_VERIFY_WRITE
17 bool "Verify NAND page writes" 16 bool "Verify NAND page writes"
18 depends on MTD_NAND
19 help 17 help
20 This adds an extra check when data is written to the flash. The 18 This adds an extra check when data is written to the flash. The
21 NAND flash device internally checks only bits transitioning 19 NAND flash device internally checks only bits transitioning
@@ -25,53 +23,61 @@ config MTD_NAND_VERIFY_WRITE
25 23
26config MTD_NAND_ECC_SMC 24config MTD_NAND_ECC_SMC
27 bool "NAND ECC Smart Media byte order" 25 bool "NAND ECC Smart Media byte order"
28 depends on MTD_NAND
29 default n 26 default n
30 help 27 help
31 Software ECC according to the Smart Media Specification. 28 Software ECC according to the Smart Media Specification.
32 The original Linux implementation had byte 0 and 1 swapped. 29 The original Linux implementation had byte 0 and 1 swapped.
33 30
31config MTD_NAND_MUSEUM_IDS
32 bool "Enable chip ids for obsolete ancient NAND devices"
33 depends on MTD_NAND
34 default n
35 help
36 Enable this option only when your board has first generation
37 NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
38 of these chips were reused by later, larger chips.
39
34config MTD_NAND_AUTCPU12 40config MTD_NAND_AUTCPU12
35 tristate "SmartMediaCard on autronix autcpu12 board" 41 tristate "SmartMediaCard on autronix autcpu12 board"
36 depends on MTD_NAND && ARCH_AUTCPU12 42 depends on ARCH_AUTCPU12
37 help 43 help
38 This enables the driver for the autronix autcpu12 board to 44 This enables the driver for the autronix autcpu12 board to
39 access the SmartMediaCard. 45 access the SmartMediaCard.
40 46
41config MTD_NAND_EDB7312 47config MTD_NAND_EDB7312
42 tristate "Support for Cirrus Logic EBD7312 evaluation board" 48 tristate "Support for Cirrus Logic EBD7312 evaluation board"
43 depends on MTD_NAND && ARCH_EDB7312 49 depends on ARCH_EDB7312
44 help 50 help
45 This enables the driver for the Cirrus Logic EBD7312 evaluation 51 This enables the driver for the Cirrus Logic EBD7312 evaluation
46 board to access the onboard NAND Flash. 52 board to access the onboard NAND Flash.
47 53
48config MTD_NAND_H1900 54config MTD_NAND_H1900
49 tristate "iPAQ H1900 flash" 55 tristate "iPAQ H1900 flash"
50 depends on MTD_NAND && ARCH_PXA && MTD_PARTITIONS 56 depends on ARCH_PXA && MTD_PARTITIONS
51 help 57 help
52 This enables the driver for the iPAQ h1900 flash. 58 This enables the driver for the iPAQ h1900 flash.
53 59
54config MTD_NAND_SPIA 60config MTD_NAND_SPIA
55 tristate "NAND Flash device on SPIA board" 61 tristate "NAND Flash device on SPIA board"
56 depends on ARCH_P720T && MTD_NAND 62 depends on ARCH_P720T
57 help 63 help
58 If you had to ask, you don't have one. Say 'N'. 64 If you had to ask, you don't have one. Say 'N'.
59 65
60config MTD_NAND_AMS_DELTA 66config MTD_NAND_AMS_DELTA
61 tristate "NAND Flash device on Amstrad E3" 67 tristate "NAND Flash device on Amstrad E3"
62 depends on MACH_AMS_DELTA && MTD_NAND 68 depends on MACH_AMS_DELTA
63 help 69 help
64 Support for NAND flash on Amstrad E3 (Delta). 70 Support for NAND flash on Amstrad E3 (Delta).
65 71
66config MTD_NAND_TOTO 72config MTD_NAND_TOTO
67 tristate "NAND Flash device on TOTO board" 73 tristate "NAND Flash device on TOTO board"
68 depends on ARCH_OMAP && MTD_NAND && BROKEN 74 depends on ARCH_OMAP && BROKEN
69 help 75 help
70 Support for NAND flash on Texas Instruments Toto platform. 76 Support for NAND flash on Texas Instruments Toto platform.
71 77
72config MTD_NAND_TS7250 78config MTD_NAND_TS7250
73 tristate "NAND Flash device on TS-7250 board" 79 tristate "NAND Flash device on TS-7250 board"
74 depends on MACH_TS72XX && MTD_NAND 80 depends on MACH_TS72XX
75 help 81 help
76 Support for NAND flash on Technologic Systems TS-7250 platform. 82 Support for NAND flash on Technologic Systems TS-7250 platform.
77 83
@@ -80,14 +86,14 @@ config MTD_NAND_IDS
80 86
81config MTD_NAND_AU1550 87config MTD_NAND_AU1550
82 tristate "Au1550/1200 NAND support" 88 tristate "Au1550/1200 NAND support"
83 depends on (SOC_AU1200 || SOC_AU1550) && MTD_NAND 89 depends on SOC_AU1200 || SOC_AU1550
84 help 90 help
85 This enables the driver for the NAND flash controller on the 91 This enables the driver for the NAND flash controller on the
86 AMD/Alchemy 1550 SOC. 92 AMD/Alchemy 1550 SOC.
87 93
88config MTD_NAND_RTC_FROM4 94config MTD_NAND_RTC_FROM4
89 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)" 95 tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
90 depends on MTD_NAND && SH_SOLUTION_ENGINE 96 depends on SH_SOLUTION_ENGINE
91 select REED_SOLOMON 97 select REED_SOLOMON
92 select REED_SOLOMON_DEC8 98 select REED_SOLOMON_DEC8
93 select BITREVERSE 99 select BITREVERSE
@@ -97,13 +103,13 @@ config MTD_NAND_RTC_FROM4
97 103
98config MTD_NAND_PPCHAMELEONEVB 104config MTD_NAND_PPCHAMELEONEVB
99 tristate "NAND Flash device on PPChameleonEVB board" 105 tristate "NAND Flash device on PPChameleonEVB board"
100 depends on PPCHAMELEONEVB && MTD_NAND && BROKEN 106 depends on PPCHAMELEONEVB && BROKEN
101 help 107 help
102 This enables the NAND flash driver on the PPChameleon EVB Board. 108 This enables the NAND flash driver on the PPChameleon EVB Board.
103 109
104config MTD_NAND_S3C2410 110config MTD_NAND_S3C2410
105 tristate "NAND Flash support for S3C2410/S3C2440 SoC" 111 tristate "NAND Flash support for S3C2410/S3C2440 SoC"
106 depends on ARCH_S3C2410 && MTD_NAND 112 depends on ARCH_S3C2410
107 help 113 help
108 This enables the NAND flash controller on the S3C2410 and S3C2440 114 This enables the NAND flash controller on the S3C2410 and S3C2440
109 SoCs 115 SoCs
@@ -128,7 +134,7 @@ config MTD_NAND_S3C2410_HWECC
128 134
129config MTD_NAND_NDFC 135config MTD_NAND_NDFC
130 tristate "NDFC NanD Flash Controller" 136 tristate "NDFC NanD Flash Controller"
131 depends on MTD_NAND && 44x 137 depends on 44x
132 select MTD_NAND_ECC_SMC 138 select MTD_NAND_ECC_SMC
133 help 139 help
134 NDFC Nand Flash Controllers are integrated in EP44x SoCs 140 NDFC Nand Flash Controllers are integrated in EP44x SoCs
@@ -145,7 +151,7 @@ config MTD_NAND_S3C2410_CLKSTOP
145 151
146config MTD_NAND_DISKONCHIP 152config MTD_NAND_DISKONCHIP
147 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 153 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
148 depends on MTD_NAND && EXPERIMENTAL 154 depends on EXPERIMENTAL
149 select REED_SOLOMON 155 select REED_SOLOMON
150 select REED_SOLOMON_DEC16 156 select REED_SOLOMON_DEC16
151 help 157 help
@@ -215,11 +221,11 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
215 221
216config MTD_NAND_SHARPSL 222config MTD_NAND_SHARPSL
217 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 223 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
218 depends on MTD_NAND && ARCH_PXA 224 depends on ARCH_PXA
219 225
220config MTD_NAND_BASLER_EXCITE 226config MTD_NAND_BASLER_EXCITE
221 tristate "Support for NAND Flash on Basler eXcite" 227 tristate "Support for NAND Flash on Basler eXcite"
222 depends on MTD_NAND && BASLER_EXCITE 228 depends on BASLER_EXCITE
223 help 229 help
224 This enables the driver for the NAND flash device found on the 230 This enables the driver for the NAND flash device found on the
225 Basler eXcite Smart Camera. If built as a module, the driver 231 Basler eXcite Smart Camera. If built as a module, the driver
@@ -227,14 +233,14 @@ config MTD_NAND_BASLER_EXCITE
227 233
228config MTD_NAND_CAFE 234config MTD_NAND_CAFE
229 tristate "NAND support for OLPC CAFÉ chip" 235 tristate "NAND support for OLPC CAFÉ chip"
230 depends on MTD_NAND && PCI 236 depends on PCI
231 help 237 help
232 Use NAND flash attached to the CAFÉ chip designed for the $100 238 Use NAND flash attached to the CAFÉ chip designed for the $100
233 laptop. 239 laptop.
234 240
235config MTD_NAND_CS553X 241config MTD_NAND_CS553X
236 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" 242 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
237 depends on MTD_NAND && X86_32 && (X86_PC || X86_GENERICARCH) 243 depends on X86_32 && (X86_PC || X86_GENERICARCH)
238 help 244 help
239 The CS553x companion chips for the AMD Geode processor 245 The CS553x companion chips for the AMD Geode processor
240 include NAND flash controllers with built-in hardware ECC 246 include NAND flash controllers with built-in hardware ECC
@@ -247,16 +253,21 @@ config MTD_NAND_CS553X
247 253
248config MTD_NAND_AT91 254config MTD_NAND_AT91
249 bool "Support for NAND Flash / SmartMedia on AT91" 255 bool "Support for NAND Flash / SmartMedia on AT91"
250 depends on MTD_NAND && ARCH_AT91 256 depends on ARCH_AT91
251 help 257 help
252 Enables support for NAND Flash / Smart Media Card interface 258 Enables support for NAND Flash / Smart Media Card interface
253 on Atmel AT91 processors. 259 on Atmel AT91 processors.
254 260
261config MTD_NAND_CM_X270
262 tristate "Support for NAND Flash on CM-X270 modules"
263 depends on MTD_NAND && MACH_ARMCORE
264
265
255config MTD_NAND_NANDSIM 266config MTD_NAND_NANDSIM
256 tristate "Support for NAND Flash Simulator" 267 tristate "Support for NAND Flash Simulator"
257 depends on MTD_NAND && MTD_PARTITIONS 268 depends on MTD_PARTITIONS
258 help 269 help
259 The simulator may simulate various NAND flash chips for the 270 The simulator may simulate various NAND flash chips for the
260 MTD nand layer. 271 MTD nand layer.
261 272
262endmenu 273endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 80f1dfc77949..6872031a3fb2 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o 26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
27obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
28 29
29nand-objs := nand_base.o nand_bbt.o 30nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/cafe.c b/drivers/mtd/nand/cafe.c
index fd6bb3ed40df..c328a7514510 100644
--- a/drivers/mtd/nand/cafe.c
+++ b/drivers/mtd/nand/cafe.c
@@ -530,7 +530,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
530{ 530{
531 struct mtd_info *mtd; 531 struct mtd_info *mtd;
532 struct cafe_priv *cafe; 532 struct cafe_priv *cafe;
533 uint32_t timing1, timing2, timing3;
534 uint32_t ctrl; 533 uint32_t ctrl;
535 int err = 0; 534 int err = 0;
536 535
@@ -587,21 +586,19 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
587 } 586 }
588 587
589 if (numtimings == 3) { 588 if (numtimings == 3) {
590 timing1 = timing[0];
591 timing2 = timing[1];
592 timing3 = timing[2];
593 cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n", 589 cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
594 timing1, timing2, timing3); 590 timing[0], timing[1], timing[2]);
595 } else { 591 } else {
596 timing1 = cafe_readl(cafe, NAND_TIMING1); 592 timing[0] = cafe_readl(cafe, NAND_TIMING1);
597 timing2 = cafe_readl(cafe, NAND_TIMING2); 593 timing[1] = cafe_readl(cafe, NAND_TIMING2);
598 timing3 = cafe_readl(cafe, NAND_TIMING3); 594 timing[2] = cafe_readl(cafe, NAND_TIMING3);
599 595
600 if (timing1 | timing2 | timing3) { 596 if (timing[0] | timing[1] | timing[2]) {
601 cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n", timing1, timing2, timing3); 597 cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
598 timing[0], timing[1], timing[2]);
602 } else { 599 } else {
603 dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n"); 600 dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
604 timing1 = timing2 = timing3 = 0xffffffff; 601 timing[0] = timing[1] = timing[2] = 0xffffffff;
605 } 602 }
606 } 603 }
607 604
@@ -609,9 +606,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
609 cafe_writel(cafe, 1, NAND_RESET); 606 cafe_writel(cafe, 1, NAND_RESET);
610 cafe_writel(cafe, 0, NAND_RESET); 607 cafe_writel(cafe, 0, NAND_RESET);
611 608
612 cafe_writel(cafe, timing1, NAND_TIMING1); 609 cafe_writel(cafe, timing[0], NAND_TIMING1);
613 cafe_writel(cafe, timing2, NAND_TIMING2); 610 cafe_writel(cafe, timing[1], NAND_TIMING2);
614 cafe_writel(cafe, timing3, NAND_TIMING3); 611 cafe_writel(cafe, timing[2], NAND_TIMING3);
615 612
616 cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK); 613 cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
617 err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED, 614 err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
new file mode 100644
index 000000000000..cb663ef245d5
--- /dev/null
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -0,0 +1,267 @@
1/*
2 * linux/drivers/mtd/nand/cmx270-nand.c
3 *
4 * Copyright (C) 2006 Compulab, Ltd.
5 * Mike Rapoport <mike@compulab.co.il>
6 *
7 * Derived from drivers/mtd/nand/h1910.c
8 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
9 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * Overview:
17 * This is a device driver for the NAND flash device found on the
18 * CM-X270 board.
19 */
20
21#include <linux/mtd/nand.h>
22#include <linux/mtd/partitions.h>
23
24#include <asm/io.h>
25#include <asm/irq.h>
26
27#include <asm/arch/hardware.h>
28#include <asm/arch/pxa-regs.h>
29
30#define GPIO_NAND_CS (11)
31#define GPIO_NAND_RB (89)
32
33/* This macro needed to ensure in-order operation of GPIO and local
34 * bus. Without both asm command and dummy uncached read there're
35 * states when NAND access is broken. I've looked for such macro(s) in
36 * include/asm-arm but found nothing approptiate.
37 * dmac_clean_range is close, but is makes cache invalidation
38 * unnecessary here and it cannot be used in module
39 */
40#define DRAIN_WB() \
41 do { \
42 unsigned char dummy; \
43 asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \
44 dummy=*((unsigned char*)UNCACHED_ADDR); \
45 } while(0)
46
47/* MTD structure for CM-X270 board */
48static struct mtd_info *cmx270_nand_mtd;
49
50/* remaped IO address of the device */
51static void __iomem *cmx270_nand_io;
52
53/*
54 * Define static partitions for flash device
55 */
56static struct mtd_partition partition_info[] = {
57 [0] = {
58 .name = "cmx270-0",
59 .offset = 0,
60 .size = MTDPART_SIZ_FULL
61 }
62};
63#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
64
65const char *part_probes[] = { "cmdlinepart", NULL };
66
67static u_char cmx270_read_byte(struct mtd_info *mtd)
68{
69 struct nand_chip *this = mtd->priv;
70
71 return (readl(this->IO_ADDR_R) >> 16);
72}
73
74static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
75{
76 int i;
77 struct nand_chip *this = mtd->priv;
78
79 for (i=0; i<len; i++)
80 writel((*buf++ << 16), this->IO_ADDR_W);
81}
82
83static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
84{
85 int i;
86 struct nand_chip *this = mtd->priv;
87
88 for (i=0; i<len; i++)
89 *buf++ = readl(this->IO_ADDR_R) >> 16;
90}
91
92static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
93{
94 int i;
95 struct nand_chip *this = mtd->priv;
96
97 for (i=0; i<len; i++)
98 if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
99 return -EFAULT;
100
101 return 0;
102}
103
104static inline void nand_cs_on(void)
105{
106 GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS);
107}
108
109static void nand_cs_off(void)
110{
111 DRAIN_WB();
112
113 GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS);
114}
115
116/*
117 * hardware specific access to control-lines
118 */
119static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
120 unsigned int ctrl)
121{
122 struct nand_chip* this = mtd->priv;
123 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
124
125 DRAIN_WB();
126
127 if (ctrl & NAND_CTRL_CHANGE) {
128 if ( ctrl & NAND_ALE )
129 nandaddr |= (1 << 3);
130 else
131 nandaddr &= ~(1 << 3);
132 if ( ctrl & NAND_CLE )
133 nandaddr |= (1 << 2);
134 else
135 nandaddr &= ~(1 << 2);
136 if ( ctrl & NAND_NCE )
137 nand_cs_on();
138 else
139 nand_cs_off();
140 }
141
142 DRAIN_WB();
143 this->IO_ADDR_W = (void __iomem*)nandaddr;
144 if (dat != NAND_CMD_NONE)
145 writel((dat << 16), this->IO_ADDR_W);
146
147 DRAIN_WB();
148}
149
150/*
151 * read device ready pin
152 */
153static int cmx270_device_ready(struct mtd_info *mtd)
154{
155 DRAIN_WB();
156
157 return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB));
158}
159
160/*
161 * Main initialization routine
162 */
163static int cmx270_init(void)
164{
165 struct nand_chip *this;
166 const char *part_type;
167 struct mtd_partition *mtd_parts;
168 int mtd_parts_nb = 0;
169 int ret;
170
171 /* Allocate memory for MTD device structure and private data */
172 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
173 sizeof(struct nand_chip),
174 GFP_KERNEL);
175 if (!cmx270_nand_mtd) {
176 printk("Unable to allocate CM-X270 NAND MTD device structure.\n");
177 return -ENOMEM;
178 }
179
180 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
181 if (!cmx270_nand_io) {
182 printk("Unable to ioremap NAND device\n");
183 ret = -EINVAL;
184 goto err1;
185 }
186
187 /* Get pointer to private data */
188 this = (struct nand_chip *)(&cmx270_nand_mtd[1]);
189
190 /* Link the private data with the MTD structure */
191 cmx270_nand_mtd->owner = THIS_MODULE;
192 cmx270_nand_mtd->priv = this;
193
194 /* insert callbacks */
195 this->IO_ADDR_R = cmx270_nand_io;
196 this->IO_ADDR_W = cmx270_nand_io;
197 this->cmd_ctrl = cmx270_hwcontrol;
198 this->dev_ready = cmx270_device_ready;
199
200 /* 15 us command delay time */
201 this->chip_delay = 20;
202 this->ecc.mode = NAND_ECC_SOFT;
203
204 /* read/write functions */
205 this->read_byte = cmx270_read_byte;
206 this->read_buf = cmx270_read_buf;
207 this->write_buf = cmx270_write_buf;
208 this->verify_buf = cmx270_verify_buf;
209
210 /* Scan to find existence of the device */
211 if (nand_scan (cmx270_nand_mtd, 1)) {
212 printk(KERN_NOTICE "No NAND device\n");
213 ret = -ENXIO;
214 goto err2;
215 }
216
217#ifdef CONFIG_MTD_CMDLINE_PARTS
218 mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
219 &mtd_parts, 0);
220 if (mtd_parts_nb > 0)
221 part_type = "command line";
222 else
223 mtd_parts_nb = 0;
224#endif
225 if (!mtd_parts_nb) {
226 mtd_parts = partition_info;
227 mtd_parts_nb = NUM_PARTITIONS;
228 part_type = "static";
229 }
230
231 /* Register the partitions */
232 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
233 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
234 if (ret)
235 goto err2;
236
237 /* Return happy */
238 return 0;
239
240err2:
241 iounmap(cmx270_nand_io);
242err1:
243 kfree(cmx270_nand_mtd);
244
245 return ret;
246
247}
248module_init(cmx270_init);
249
250/*
251 * Clean up routine
252 */
253static void cmx270_cleanup(void)
254{
255 /* Release resources, unregister device */
256 nand_release(cmx270_nand_mtd);
257
258 iounmap(cmx270_nand_io);
259
260 /* Free the MTD device structure */
261 kfree (cmx270_nand_mtd);
262}
263module_exit(cmx270_cleanup);
264
265MODULE_LICENSE("GPL");
266MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
267MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6af37b8cff65..04de315e4937 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -312,7 +312,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
312 /* Select the NAND device */ 312 /* Select the NAND device */
313 chip->select_chip(mtd, chipnr); 313 chip->select_chip(mtd, chipnr);
314 } else 314 } else
315 page = (int)ofs; 315 page = (int)(ofs >> chip->page_shift);
316 316
317 if (chip->options & NAND_BUSWIDTH_16) { 317 if (chip->options & NAND_BUSWIDTH_16) {
318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, 318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
@@ -350,7 +350,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
350 int block, ret; 350 int block, ret;
351 351
352 /* Get block number */ 352 /* Get block number */
353 block = ((int)ofs) >> chip->bbt_erase_shift; 353 block = (int)(ofs >> chip->bbt_erase_shift);
354 if (chip->bbt) 354 if (chip->bbt)
355 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 355 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
356 356
@@ -771,7 +771,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
771 uint8_t *ecc_code = chip->buffers->ecccode; 771 uint8_t *ecc_code = chip->buffers->ecccode;
772 int *eccpos = chip->ecc.layout->eccpos; 772 int *eccpos = chip->ecc.layout->eccpos;
773 773
774 nand_read_page_raw(mtd, chip, buf); 774 chip->ecc.read_page_raw(mtd, chip, buf);
775 775
776 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 776 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
777 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 777 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1426,7 +1426,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1426 for (i = 0; i < chip->ecc.total; i++) 1426 for (i = 0; i < chip->ecc.total; i++)
1427 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1427 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1428 1428
1429 nand_write_page_raw(mtd, chip, buf); 1429 chip->ecc.write_page_raw(mtd, chip, buf);
1430} 1430}
1431 1431
1432/** 1432/**
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2e2cdf2fc91d..2fc674a190cf 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -24,6 +24,8 @@
24* 512 512 Byte page size 24* 512 512 Byte page size
25*/ 25*/
26struct nand_flash_dev nand_flash_ids[] = { 26struct nand_flash_dev nand_flash_ids[] = {
27
28#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
27 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, 29 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
28 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, 30 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
29 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, 31 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
@@ -39,6 +41,7 @@ struct nand_flash_dev nand_flash_ids[] = {
39 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, 41 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
40 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 42 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
41 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 43 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
44#endif
42 45
43 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, 46 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
44 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, 47 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
@@ -137,6 +140,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
137 {NAND_MFR_RENESAS, "Renesas"}, 140 {NAND_MFR_RENESAS, "Renesas"},
138 {NAND_MFR_STMICRO, "ST Micro"}, 141 {NAND_MFR_STMICRO, "ST Micro"},
139 {NAND_MFR_HYNIX, "Hynix"}, 142 {NAND_MFR_HYNIX, "Hynix"},
143 {NAND_MFR_MICRON, "Micron"},
140 {0x0, "Unknown"} 144 {0x0, "Unknown"}
141}; 145};
142 146
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c3bca9590ad2..205df0f771fe 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -37,6 +37,8 @@
37#include <linux/mtd/nand.h> 37#include <linux/mtd/nand.h>
38#include <linux/mtd/partitions.h> 38#include <linux/mtd/partitions.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/list.h>
41#include <linux/random.h>
40 42
41/* Default simulator parameters values */ 43/* Default simulator parameters values */
42#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 44#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -90,6 +92,15 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
90static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; 92static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
91static uint log = CONFIG_NANDSIM_LOG; 93static uint log = CONFIG_NANDSIM_LOG;
92static uint dbg = CONFIG_NANDSIM_DBG; 94static uint dbg = CONFIG_NANDSIM_DBG;
95static unsigned long parts[MAX_MTD_DEVICES];
96static unsigned int parts_num;
97static char *badblocks = NULL;
98static char *weakblocks = NULL;
99static char *weakpages = NULL;
100static unsigned int bitflips = 0;
101static char *gravepages = NULL;
102static unsigned int rptwear = 0;
103static unsigned int overridesize = 0;
93 104
94module_param(first_id_byte, uint, 0400); 105module_param(first_id_byte, uint, 0400);
95module_param(second_id_byte, uint, 0400); 106module_param(second_id_byte, uint, 0400);
@@ -104,8 +115,16 @@ module_param(bus_width, uint, 0400);
104module_param(do_delays, uint, 0400); 115module_param(do_delays, uint, 0400);
105module_param(log, uint, 0400); 116module_param(log, uint, 0400);
106module_param(dbg, uint, 0400); 117module_param(dbg, uint, 0400);
107 118module_param_array(parts, ulong, &parts_num, 0400);
108MODULE_PARM_DESC(first_id_byte, "The fist byte returned by NAND Flash 'read ID' command (manufaturer ID)"); 119module_param(badblocks, charp, 0400);
120module_param(weakblocks, charp, 0400);
121module_param(weakpages, charp, 0400);
122module_param(bitflips, uint, 0400);
123module_param(gravepages, charp, 0400);
124module_param(rptwear, uint, 0400);
125module_param(overridesize, uint, 0400);
126
127MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
109MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); 128MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
110MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); 129MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
111MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); 130MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
@@ -118,6 +137,23 @@ MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
118MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); 137MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
119MODULE_PARM_DESC(log, "Perform logging if not zero"); 138MODULE_PARM_DESC(log, "Perform logging if not zero");
120MODULE_PARM_DESC(dbg, "Output debug information if not zero"); 139MODULE_PARM_DESC(dbg, "Output debug information if not zero");
140MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
141/* Page and erase block positions for the following parameters are independent of any partitions */
142MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
143MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
144 " separated by commas e.g. 113:2 means eb 113"
145 " can be erased only twice before failing");
146MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
147 " separated by commas e.g. 1401:2 means page 1401"
148 " can be written only twice before failing");
149MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
150MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
151 " separated by commas e.g. 1401:2 means page 1401"
152 " can be read only twice before failing");
153MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if not zero");
154MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
155 "The size is specified in erase blocks and as the exponent of a power of two"
156 " e.g. 5 means a size of 32 erase blocks");
121 157
122/* The largest possible page size */ 158/* The largest possible page size */
123#define NS_LARGEST_PAGE_SIZE 2048 159#define NS_LARGEST_PAGE_SIZE 2048
@@ -131,9 +167,11 @@ MODULE_PARM_DESC(dbg, "Output debug information if not zero");
131#define NS_DBG(args...) \ 167#define NS_DBG(args...) \
132 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0) 168 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
133#define NS_WARN(args...) \ 169#define NS_WARN(args...) \
134 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warnig: " args); } while(0) 170 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
135#define NS_ERR(args...) \ 171#define NS_ERR(args...) \
136 do { printk(KERN_ERR NS_OUTPUT_PREFIX " errorr: " args); } while(0) 172 do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
173#define NS_INFO(args...) \
174 do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
137 175
138/* Busy-wait delay macros (microseconds, milliseconds) */ 176/* Busy-wait delay macros (microseconds, milliseconds) */
139#define NS_UDELAY(us) \ 177#define NS_UDELAY(us) \
@@ -238,7 +276,8 @@ union ns_mem {
238 * The structure which describes all the internal simulator data. 276 * The structure which describes all the internal simulator data.
239 */ 277 */
240struct nandsim { 278struct nandsim {
241 struct mtd_partition part; 279 struct mtd_partition partitions[MAX_MTD_DEVICES];
280 unsigned int nbparts;
242 281
243 uint busw; /* flash chip bus width (8 or 16) */ 282 uint busw; /* flash chip bus width (8 or 16) */
244 u_char ids[4]; /* chip's ID bytes */ 283 u_char ids[4]; /* chip's ID bytes */
@@ -338,6 +377,38 @@ static struct nandsim_operations {
338 STATE_DATAOUT, STATE_READY}} 377 STATE_DATAOUT, STATE_READY}}
339}; 378};
340 379
380struct weak_block {
381 struct list_head list;
382 unsigned int erase_block_no;
383 unsigned int max_erases;
384 unsigned int erases_done;
385};
386
387static LIST_HEAD(weak_blocks);
388
389struct weak_page {
390 struct list_head list;
391 unsigned int page_no;
392 unsigned int max_writes;
393 unsigned int writes_done;
394};
395
396static LIST_HEAD(weak_pages);
397
398struct grave_page {
399 struct list_head list;
400 unsigned int page_no;
401 unsigned int max_reads;
402 unsigned int reads_done;
403};
404
405static LIST_HEAD(grave_pages);
406
407static unsigned long *erase_block_wear = NULL;
408static unsigned int wear_eb_count = 0;
409static unsigned long total_wear = 0;
410static unsigned int rptwear_cnt = 0;
411
341/* MTD structure for NAND controller */ 412/* MTD structure for NAND controller */
342static struct mtd_info *nsmtd; 413static struct mtd_info *nsmtd;
343 414
@@ -381,6 +452,13 @@ static void free_device(struct nandsim *ns)
381 } 452 }
382} 453}
383 454
455static char *get_partition_name(int i)
456{
457 char buf[64];
458 sprintf(buf, "NAND simulator partition %d", i);
459 return kstrdup(buf, GFP_KERNEL);
460}
461
384/* 462/*
385 * Initialize the nandsim structure. 463 * Initialize the nandsim structure.
386 * 464 *
@@ -390,7 +468,9 @@ static int init_nandsim(struct mtd_info *mtd)
390{ 468{
391 struct nand_chip *chip = (struct nand_chip *)mtd->priv; 469 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
392 struct nandsim *ns = (struct nandsim *)(chip->priv); 470 struct nandsim *ns = (struct nandsim *)(chip->priv);
393 int i; 471 int i, ret = 0;
472 u_int32_t remains;
473 u_int32_t next_offset;
394 474
395 if (NS_IS_INITIALIZED(ns)) { 475 if (NS_IS_INITIALIZED(ns)) {
396 NS_ERR("init_nandsim: nandsim is already initialized\n"); 476 NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -448,6 +528,40 @@ static int init_nandsim(struct mtd_info *mtd)
448 } 528 }
449 } 529 }
450 530
531 /* Fill the partition_info structure */
532 if (parts_num > ARRAY_SIZE(ns->partitions)) {
533 NS_ERR("too many partitions.\n");
534 ret = -EINVAL;
535 goto error;
536 }
537 remains = ns->geom.totsz;
538 next_offset = 0;
539 for (i = 0; i < parts_num; ++i) {
540 unsigned long part = parts[i];
541 if (!part || part > remains / ns->geom.secsz) {
542 NS_ERR("bad partition size.\n");
543 ret = -EINVAL;
544 goto error;
545 }
546 ns->partitions[i].name = get_partition_name(i);
547 ns->partitions[i].offset = next_offset;
548 ns->partitions[i].size = part * ns->geom.secsz;
549 next_offset += ns->partitions[i].size;
550 remains -= ns->partitions[i].size;
551 }
552 ns->nbparts = parts_num;
553 if (remains) {
554 if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
555 NS_ERR("too many partitions.\n");
556 ret = -EINVAL;
557 goto error;
558 }
559 ns->partitions[i].name = get_partition_name(i);
560 ns->partitions[i].offset = next_offset;
561 ns->partitions[i].size = remains;
562 ns->nbparts += 1;
563 }
564
451 /* Detect how many ID bytes the NAND chip outputs */ 565 /* Detect how many ID bytes the NAND chip outputs */
452 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 566 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
453 if (second_id_byte != nand_flash_ids[i].id) 567 if (second_id_byte != nand_flash_ids[i].id)
@@ -474,7 +588,7 @@ static int init_nandsim(struct mtd_info *mtd)
474 printk("sector address bytes: %u\n", ns->geom.secaddrbytes); 588 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
475 printk("options: %#x\n", ns->options); 589 printk("options: %#x\n", ns->options);
476 590
477 if (alloc_device(ns) != 0) 591 if ((ret = alloc_device(ns)) != 0)
478 goto error; 592 goto error;
479 593
480 /* Allocate / initialize the internal buffer */ 594 /* Allocate / initialize the internal buffer */
@@ -482,21 +596,17 @@ static int init_nandsim(struct mtd_info *mtd)
482 if (!ns->buf.byte) { 596 if (!ns->buf.byte) {
483 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", 597 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
484 ns->geom.pgszoob); 598 ns->geom.pgszoob);
599 ret = -ENOMEM;
485 goto error; 600 goto error;
486 } 601 }
487 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); 602 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
488 603
489 /* Fill the partition_info structure */
490 ns->part.name = "NAND simulator partition";
491 ns->part.offset = 0;
492 ns->part.size = ns->geom.totsz;
493
494 return 0; 604 return 0;
495 605
496error: 606error:
497 free_device(ns); 607 free_device(ns);
498 608
499 return -ENOMEM; 609 return ret;
500} 610}
501 611
502/* 612/*
@@ -510,6 +620,287 @@ static void free_nandsim(struct nandsim *ns)
510 return; 620 return;
511} 621}
512 622
623static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
624{
625 char *w;
626 int zero_ok;
627 unsigned int erase_block_no;
628 loff_t offset;
629
630 if (!badblocks)
631 return 0;
632 w = badblocks;
633 do {
634 zero_ok = (*w == '0' ? 1 : 0);
635 erase_block_no = simple_strtoul(w, &w, 0);
636 if (!zero_ok && !erase_block_no) {
637 NS_ERR("invalid badblocks.\n");
638 return -EINVAL;
639 }
640 offset = erase_block_no * ns->geom.secsz;
641 if (mtd->block_markbad(mtd, offset)) {
642 NS_ERR("invalid badblocks.\n");
643 return -EINVAL;
644 }
645 if (*w == ',')
646 w += 1;
647 } while (*w);
648 return 0;
649}
650
651static int parse_weakblocks(void)
652{
653 char *w;
654 int zero_ok;
655 unsigned int erase_block_no;
656 unsigned int max_erases;
657 struct weak_block *wb;
658
659 if (!weakblocks)
660 return 0;
661 w = weakblocks;
662 do {
663 zero_ok = (*w == '0' ? 1 : 0);
664 erase_block_no = simple_strtoul(w, &w, 0);
665 if (!zero_ok && !erase_block_no) {
666 NS_ERR("invalid weakblocks.\n");
667 return -EINVAL;
668 }
669 max_erases = 3;
670 if (*w == ':') {
671 w += 1;
672 max_erases = simple_strtoul(w, &w, 0);
673 }
674 if (*w == ',')
675 w += 1;
676 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
677 if (!wb) {
678 NS_ERR("unable to allocate memory.\n");
679 return -ENOMEM;
680 }
681 wb->erase_block_no = erase_block_no;
682 wb->max_erases = max_erases;
683 list_add(&wb->list, &weak_blocks);
684 } while (*w);
685 return 0;
686}
687
688static int erase_error(unsigned int erase_block_no)
689{
690 struct weak_block *wb;
691
692 list_for_each_entry(wb, &weak_blocks, list)
693 if (wb->erase_block_no == erase_block_no) {
694 if (wb->erases_done >= wb->max_erases)
695 return 1;
696 wb->erases_done += 1;
697 return 0;
698 }
699 return 0;
700}
701
702static int parse_weakpages(void)
703{
704 char *w;
705 int zero_ok;
706 unsigned int page_no;
707 unsigned int max_writes;
708 struct weak_page *wp;
709
710 if (!weakpages)
711 return 0;
712 w = weakpages;
713 do {
714 zero_ok = (*w == '0' ? 1 : 0);
715 page_no = simple_strtoul(w, &w, 0);
716 if (!zero_ok && !page_no) {
717 NS_ERR("invalid weakpagess.\n");
718 return -EINVAL;
719 }
720 max_writes = 3;
721 if (*w == ':') {
722 w += 1;
723 max_writes = simple_strtoul(w, &w, 0);
724 }
725 if (*w == ',')
726 w += 1;
727 wp = kzalloc(sizeof(*wp), GFP_KERNEL);
728 if (!wp) {
729 NS_ERR("unable to allocate memory.\n");
730 return -ENOMEM;
731 }
732 wp->page_no = page_no;
733 wp->max_writes = max_writes;
734 list_add(&wp->list, &weak_pages);
735 } while (*w);
736 return 0;
737}
738
739static int write_error(unsigned int page_no)
740{
741 struct weak_page *wp;
742
743 list_for_each_entry(wp, &weak_pages, list)
744 if (wp->page_no == page_no) {
745 if (wp->writes_done >= wp->max_writes)
746 return 1;
747 wp->writes_done += 1;
748 return 0;
749 }
750 return 0;
751}
752
753static int parse_gravepages(void)
754{
755 char *g;
756 int zero_ok;
757 unsigned int page_no;
758 unsigned int max_reads;
759 struct grave_page *gp;
760
761 if (!gravepages)
762 return 0;
763 g = gravepages;
764 do {
765 zero_ok = (*g == '0' ? 1 : 0);
766 page_no = simple_strtoul(g, &g, 0);
767 if (!zero_ok && !page_no) {
768 NS_ERR("invalid gravepagess.\n");
769 return -EINVAL;
770 }
771 max_reads = 3;
772 if (*g == ':') {
773 g += 1;
774 max_reads = simple_strtoul(g, &g, 0);
775 }
776 if (*g == ',')
777 g += 1;
778 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
779 if (!gp) {
780 NS_ERR("unable to allocate memory.\n");
781 return -ENOMEM;
782 }
783 gp->page_no = page_no;
784 gp->max_reads = max_reads;
785 list_add(&gp->list, &grave_pages);
786 } while (*g);
787 return 0;
788}
789
790static int read_error(unsigned int page_no)
791{
792 struct grave_page *gp;
793
794 list_for_each_entry(gp, &grave_pages, list)
795 if (gp->page_no == page_no) {
796 if (gp->reads_done >= gp->max_reads)
797 return 1;
798 gp->reads_done += 1;
799 return 0;
800 }
801 return 0;
802}
803
804static void free_lists(void)
805{
806 struct list_head *pos, *n;
807 list_for_each_safe(pos, n, &weak_blocks) {
808 list_del(pos);
809 kfree(list_entry(pos, struct weak_block, list));
810 }
811 list_for_each_safe(pos, n, &weak_pages) {
812 list_del(pos);
813 kfree(list_entry(pos, struct weak_page, list));
814 }
815 list_for_each_safe(pos, n, &grave_pages) {
816 list_del(pos);
817 kfree(list_entry(pos, struct grave_page, list));
818 }
819 kfree(erase_block_wear);
820}
821
822static int setup_wear_reporting(struct mtd_info *mtd)
823{
824 size_t mem;
825
826 if (!rptwear)
827 return 0;
828 wear_eb_count = mtd->size / mtd->erasesize;
829 mem = wear_eb_count * sizeof(unsigned long);
830 if (mem / sizeof(unsigned long) != wear_eb_count) {
831 NS_ERR("Too many erase blocks for wear reporting\n");
832 return -ENOMEM;
833 }
834 erase_block_wear = kzalloc(mem, GFP_KERNEL);
835 if (!erase_block_wear) {
836 NS_ERR("Too many erase blocks for wear reporting\n");
837 return -ENOMEM;
838 }
839 return 0;
840}
841
842static void update_wear(unsigned int erase_block_no)
843{
844 unsigned long wmin = -1, wmax = 0, avg;
845 unsigned long deciles[10], decile_max[10], tot = 0;
846 unsigned int i;
847
848 if (!erase_block_wear)
849 return;
850 total_wear += 1;
851 if (total_wear == 0)
852 NS_ERR("Erase counter total overflow\n");
853 erase_block_wear[erase_block_no] += 1;
854 if (erase_block_wear[erase_block_no] == 0)
855 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
856 rptwear_cnt += 1;
857 if (rptwear_cnt < rptwear)
858 return;
859 rptwear_cnt = 0;
860 /* Calc wear stats */
861 for (i = 0; i < wear_eb_count; ++i) {
862 unsigned long wear = erase_block_wear[i];
863 if (wear < wmin)
864 wmin = wear;
865 if (wear > wmax)
866 wmax = wear;
867 tot += wear;
868 }
869 for (i = 0; i < 9; ++i) {
870 deciles[i] = 0;
871 decile_max[i] = (wmax * (i + 1) + 5) / 10;
872 }
873 deciles[9] = 0;
874 decile_max[9] = wmax;
875 for (i = 0; i < wear_eb_count; ++i) {
876 int d;
877 unsigned long wear = erase_block_wear[i];
878 for (d = 0; d < 10; ++d)
879 if (wear <= decile_max[d]) {
880 deciles[d] += 1;
881 break;
882 }
883 }
884 avg = tot / wear_eb_count;
885 /* Output wear report */
886 NS_INFO("*** Wear Report ***\n");
887 NS_INFO("Total numbers of erases: %lu\n", tot);
888 NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
889 NS_INFO("Average number of erases: %lu\n", avg);
890 NS_INFO("Maximum number of erases: %lu\n", wmax);
891 NS_INFO("Minimum number of erases: %lu\n", wmin);
892 for (i = 0; i < 10; ++i) {
893 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
894 if (from > decile_max[i])
895 continue;
896 NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
897 from,
898 decile_max[i],
899 deciles[i]);
900 }
901 NS_INFO("*** End of Wear Report ***\n");
902}
903
513/* 904/*
514 * Returns the string representation of 'state' state. 905 * Returns the string representation of 'state' state.
515 */ 906 */
@@ -822,9 +1213,31 @@ static void read_page(struct nandsim *ns, int num)
822 NS_DBG("read_page: page %d not allocated\n", ns->regs.row); 1213 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
823 memset(ns->buf.byte, 0xFF, num); 1214 memset(ns->buf.byte, 0xFF, num);
824 } else { 1215 } else {
1216 unsigned int page_no = ns->regs.row;
825 NS_DBG("read_page: page %d allocated, reading from %d\n", 1217 NS_DBG("read_page: page %d allocated, reading from %d\n",
826 ns->regs.row, ns->regs.column + ns->regs.off); 1218 ns->regs.row, ns->regs.column + ns->regs.off);
1219 if (read_error(page_no)) {
1220 int i;
1221 memset(ns->buf.byte, 0xFF, num);
1222 for (i = 0; i < num; ++i)
1223 ns->buf.byte[i] = random32();
1224 NS_WARN("simulating read error in page %u\n", page_no);
1225 return;
1226 }
827 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); 1227 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1228 if (bitflips && random32() < (1 << 22)) {
1229 int flips = 1;
1230 if (bitflips > 1)
1231 flips = (random32() % (int) bitflips) + 1;
1232 while (flips--) {
1233 int pos = random32() % (num * 8);
1234 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1235 NS_WARN("read_page: flipping bit %d in page %d "
1236 "reading from %d ecc: corrected=%u failed=%u\n",
1237 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1238 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1239 }
1240 }
828 } 1241 }
829} 1242}
830 1243
@@ -883,6 +1296,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
883{ 1296{
884 int num; 1297 int num;
885 int busdiv = ns->busw == 8 ? 1 : 2; 1298 int busdiv = ns->busw == 8 ? 1 : 2;
1299 unsigned int erase_block_no, page_no;
886 1300
887 action &= ACTION_MASK; 1301 action &= ACTION_MASK;
888 1302
@@ -942,14 +1356,24 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
942 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column; 1356 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
943 ns->regs.column = 0; 1357 ns->regs.column = 0;
944 1358
1359 erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1360
945 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n", 1361 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
946 ns->regs.row, NS_RAW_OFFSET(ns)); 1362 ns->regs.row, NS_RAW_OFFSET(ns));
947 NS_LOG("erase sector %d\n", ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift)); 1363 NS_LOG("erase sector %u\n", erase_block_no);
948 1364
949 erase_sector(ns); 1365 erase_sector(ns);
950 1366
951 NS_MDELAY(erase_delay); 1367 NS_MDELAY(erase_delay);
952 1368
1369 if (erase_block_wear)
1370 update_wear(erase_block_no);
1371
1372 if (erase_error(erase_block_no)) {
1373 NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1374 return -1;
1375 }
1376
953 break; 1377 break;
954 1378
955 case ACTION_PRGPAGE: 1379 case ACTION_PRGPAGE:
@@ -972,6 +1396,8 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
972 if (prog_page(ns, num) == -1) 1396 if (prog_page(ns, num) == -1)
973 return -1; 1397 return -1;
974 1398
1399 page_no = ns->regs.row;
1400
975 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n", 1401 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
976 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off); 1402 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
977 NS_LOG("programm page %d\n", ns->regs.row); 1403 NS_LOG("programm page %d\n", ns->regs.row);
@@ -979,6 +1405,11 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
979 NS_UDELAY(programm_delay); 1405 NS_UDELAY(programm_delay);
980 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv); 1406 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
981 1407
1408 if (write_error(page_no)) {
1409 NS_WARN("simulating write failure in page %u\n", page_no);
1410 return -1;
1411 }
1412
982 break; 1413 break;
983 1414
984 case ACTION_ZEROOFF: 1415 case ACTION_ZEROOFF:
@@ -1503,7 +1934,7 @@ static int __init ns_init_module(void)
1503{ 1934{
1504 struct nand_chip *chip; 1935 struct nand_chip *chip;
1505 struct nandsim *nand; 1936 struct nandsim *nand;
1506 int retval = -ENOMEM; 1937 int retval = -ENOMEM, i;
1507 1938
1508 if (bus_width != 8 && bus_width != 16) { 1939 if (bus_width != 8 && bus_width != 16) {
1509 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); 1940 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
@@ -1533,6 +1964,8 @@ static int __init ns_init_module(void)
1533 chip->verify_buf = ns_nand_verify_buf; 1964 chip->verify_buf = ns_nand_verify_buf;
1534 chip->read_word = ns_nand_read_word; 1965 chip->read_word = ns_nand_read_word;
1535 chip->ecc.mode = NAND_ECC_SOFT; 1966 chip->ecc.mode = NAND_ECC_SOFT;
1967 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
1968 /* and 'badblocks' parameters to work */
1536 chip->options |= NAND_SKIP_BBTSCAN; 1969 chip->options |= NAND_SKIP_BBTSCAN;
1537 1970
1538 /* 1971 /*
@@ -1557,6 +1990,15 @@ static int __init ns_init_module(void)
1557 1990
1558 nsmtd->owner = THIS_MODULE; 1991 nsmtd->owner = THIS_MODULE;
1559 1992
1993 if ((retval = parse_weakblocks()) != 0)
1994 goto error;
1995
1996 if ((retval = parse_weakpages()) != 0)
1997 goto error;
1998
1999 if ((retval = parse_gravepages()) != 0)
2000 goto error;
2001
1560 if ((retval = nand_scan(nsmtd, 1)) != 0) { 2002 if ((retval = nand_scan(nsmtd, 1)) != 0) {
1561 NS_ERR("can't register NAND Simulator\n"); 2003 NS_ERR("can't register NAND Simulator\n");
1562 if (retval > 0) 2004 if (retval > 0)
@@ -1564,23 +2006,44 @@ static int __init ns_init_module(void)
1564 goto error; 2006 goto error;
1565 } 2007 }
1566 2008
1567 if ((retval = init_nandsim(nsmtd)) != 0) { 2009 if (overridesize) {
1568 NS_ERR("scan_bbt: can't initialize the nandsim structure\n"); 2010 u_int32_t new_size = nsmtd->erasesize << overridesize;
1569 goto error; 2011 if (new_size >> overridesize != nsmtd->erasesize) {
2012 NS_ERR("overridesize is too big\n");
2013 goto err_exit;
2014 }
2015 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
2016 nsmtd->size = new_size;
2017 chip->chipsize = new_size;
2018 chip->chip_shift = ffs(new_size) - 1;
1570 } 2019 }
1571 2020
1572 if ((retval = nand_default_bbt(nsmtd)) != 0) { 2021 if ((retval = setup_wear_reporting(nsmtd)) != 0)
1573 free_nandsim(nand); 2022 goto err_exit;
1574 goto error; 2023
1575 } 2024 if ((retval = init_nandsim(nsmtd)) != 0)
2025 goto err_exit;
1576 2026
1577 /* Register NAND as one big partition */ 2027 if ((retval = parse_badblocks(nand, nsmtd)) != 0)
1578 add_mtd_partitions(nsmtd, &nand->part, 1); 2028 goto err_exit;
2029
2030 if ((retval = nand_default_bbt(nsmtd)) != 0)
2031 goto err_exit;
2032
2033 /* Register NAND partitions */
2034 if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
2035 goto err_exit;
1579 2036
1580 return 0; 2037 return 0;
1581 2038
2039err_exit:
2040 free_nandsim(nand);
2041 nand_release(nsmtd);
2042 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
2043 kfree(nand->partitions[i].name);
1582error: 2044error:
1583 kfree(nsmtd); 2045 kfree(nsmtd);
2046 free_lists();
1584 2047
1585 return retval; 2048 return retval;
1586} 2049}
@@ -1593,10 +2056,14 @@ module_init(ns_init_module);
1593static void __exit ns_cleanup_module(void) 2056static void __exit ns_cleanup_module(void)
1594{ 2057{
1595 struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv); 2058 struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
2059 int i;
1596 2060
1597 free_nandsim(ns); /* Free nandsim private resources */ 2061 free_nandsim(ns); /* Free nandsim private resources */
1598 nand_release(nsmtd); /* Unregisterd drived */ 2062 nand_release(nsmtd); /* Unregister driver */
2063 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2064 kfree(ns->partitions[i].name);
1599 kfree(nsmtd); /* Free other structures */ 2065 kfree(nsmtd); /* Free other structures */
2066 free_lists();
1600} 2067}
1601 2068
1602module_exit(ns_cleanup_module); 2069module_exit(ns_cleanup_module);
@@ -1604,4 +2071,3 @@ module_exit(ns_cleanup_module);
1604MODULE_LICENSE ("GPL"); 2071MODULE_LICENSE ("GPL");
1605MODULE_AUTHOR ("Artem B. Bityuckiy"); 2072MODULE_AUTHOR ("Artem B. Bityuckiy");
1606MODULE_DESCRIPTION ("The NAND flash simulator"); 2073MODULE_DESCRIPTION ("The NAND flash simulator");
1607
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 373bddce8f1c..c257d397d08a 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -2,20 +2,18 @@
2# linux/drivers/mtd/onenand/Kconfig 2# linux/drivers/mtd/onenand/Kconfig
3# 3#
4 4
5menu "OneNAND Flash Device Drivers" 5menuconfig MTD_ONENAND
6 depends on MTD != n
7
8config MTD_ONENAND
9 tristate "OneNAND Device Support" 6 tristate "OneNAND Device Support"
10 depends on MTD 7 depends on MTD
11 help 8 help
12 This enables support for accessing all type of OneNAND flash 9 This enables support for accessing all type of OneNAND flash
13 devices. For further information see 10 devices. For further information see
14 <http://www.samsung.com/Products/Semiconductor/Flash/OneNAND_TM/index.htm>. 11 <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
12
13if MTD_ONENAND
15 14
16config MTD_ONENAND_VERIFY_WRITE 15config MTD_ONENAND_VERIFY_WRITE
17 bool "Verify OneNAND page writes" 16 bool "Verify OneNAND page writes"
18 depends on MTD_ONENAND
19 help 17 help
20 This adds an extra check when data is written to the flash. The 18 This adds an extra check when data is written to the flash. The
21 OneNAND flash device internally checks only bits transitioning 19 OneNAND flash device internally checks only bits transitioning
@@ -25,13 +23,12 @@ config MTD_ONENAND_VERIFY_WRITE
25 23
26config MTD_ONENAND_GENERIC 24config MTD_ONENAND_GENERIC
27 tristate "OneNAND Flash device via platform device driver" 25 tristate "OneNAND Flash device via platform device driver"
28 depends on MTD_ONENAND && ARM 26 depends on ARM
29 help 27 help
30 Support for OneNAND flash via platform device driver. 28 Support for OneNAND flash via platform device driver.
31 29
32config MTD_ONENAND_OTP 30config MTD_ONENAND_OTP
33 bool "OneNAND OTP Support" 31 bool "OneNAND OTP Support"
34 depends on MTD_ONENAND
35 help 32 help
36 One Block of the NAND Flash Array memory is reserved as 33 One Block of the NAND Flash Array memory is reserved as
37 a One-Time Programmable Block memory area. 34 a One-Time Programmable Block memory area.
@@ -43,4 +40,4 @@ config MTD_ONENAND_OTP
43 40
44 OTP block is fully-guaranteed to be a valid block. 41 OTP block is fully-guaranteed to be a valid block.
45 42
46endmenu 43endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 9e14a26ca4e8..000794c6caf5 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -836,9 +836,11 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
836 int readcol = column; 836 int readcol = column;
837 int readend = column + thislen; 837 int readend = column + thislen;
838 int lastgap = 0; 838 int lastgap = 0;
839 unsigned int i;
839 uint8_t *oob_buf = this->oob_buf; 840 uint8_t *oob_buf = this->oob_buf;
840 841
841 for (free = this->ecclayout->oobfree; free->length; ++free) { 842 free = this->ecclayout->oobfree;
843 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
842 if (readcol >= lastgap) 844 if (readcol >= lastgap)
843 readcol += free->offset - lastgap; 845 readcol += free->offset - lastgap;
844 if (readend >= lastgap) 846 if (readend >= lastgap)
@@ -846,7 +848,8 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
846 lastgap = free->offset + free->length; 848 lastgap = free->offset + free->length;
847 } 849 }
848 this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); 850 this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
849 for (free = this->ecclayout->oobfree; free->length; ++free) { 851 free = this->ecclayout->oobfree;
852 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
850 int free_end = free->offset + free->length; 853 int free_end = free->offset + free->length;
851 if (free->offset < readend && free_end > readcol) { 854 if (free->offset < readend && free_end > readcol) {
852 int st = max_t(int,free->offset,readcol); 855 int st = max_t(int,free->offset,readcol);
@@ -854,7 +857,7 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
854 int n = ed - st; 857 int n = ed - st;
855 memcpy(buf, oob_buf + st, n); 858 memcpy(buf, oob_buf + st, n);
856 buf += n; 859 buf += n;
857 } else 860 } else if (column == 0)
858 break; 861 break;
859 } 862 }
860 return 0; 863 return 0;
@@ -1280,15 +1283,18 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1280 int writecol = column; 1283 int writecol = column;
1281 int writeend = column + thislen; 1284 int writeend = column + thislen;
1282 int lastgap = 0; 1285 int lastgap = 0;
1286 unsigned int i;
1283 1287
1284 for (free = this->ecclayout->oobfree; free->length; ++free) { 1288 free = this->ecclayout->oobfree;
1289 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1285 if (writecol >= lastgap) 1290 if (writecol >= lastgap)
1286 writecol += free->offset - lastgap; 1291 writecol += free->offset - lastgap;
1287 if (writeend >= lastgap) 1292 if (writeend >= lastgap)
1288 writeend += free->offset - lastgap; 1293 writeend += free->offset - lastgap;
1289 lastgap = free->offset + free->length; 1294 lastgap = free->offset + free->length;
1290 } 1295 }
1291 for (free = this->ecclayout->oobfree; free->length; ++free) { 1296 free = this->ecclayout->oobfree;
1297 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
1292 int free_end = free->offset + free->length; 1298 int free_end = free->offset + free->length;
1293 if (free->offset < writeend && free_end > writecol) { 1299 if (free->offset < writeend && free_end > writecol) {
1294 int st = max_t(int,free->offset,writecol); 1300 int st = max_t(int,free->offset,writecol);
@@ -1296,7 +1302,7 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
1296 int n = ed - st; 1302 int n = ed - st;
1297 memcpy(oob_buf + st, buf, n); 1303 memcpy(oob_buf + st, buf, n);
1298 buf += n; 1304 buf += n;
1299 } else 1305 } else if (column == 0)
1300 break; 1306 break;
1301 } 1307 }
1302 return 0; 1308 return 0;
@@ -2386,7 +2392,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
2386 * the out of band area 2392 * the out of band area
2387 */ 2393 */
2388 this->ecclayout->oobavail = 0; 2394 this->ecclayout->oobavail = 0;
2389 for (i = 0; this->ecclayout->oobfree[i].length; i++) 2395 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
2396 this->ecclayout->oobfree[i].length; i++)
2390 this->ecclayout->oobavail += 2397 this->ecclayout->oobavail +=
2391 this->ecclayout->oobfree[i].length; 2398 this->ecclayout->oobfree[i].length;
2392 mtd->oobavail = this->ecclayout->oobavail; 2399 mtd->oobavail = this->ecclayout->oobavail;
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
new file mode 100644
index 000000000000..b9daf159a4a7
--- /dev/null
+++ b/drivers/mtd/ubi/Kconfig
@@ -0,0 +1,58 @@
1# drivers/mtd/ubi/Kconfig
2
3menu "UBI - Unsorted block images"
4 depends on MTD
5
6config MTD_UBI
7 tristate "Enable UBI"
8 depends on MTD
9 select CRC32
10 help
11 UBI is a software layer above MTD layer which admits of LVM-like
12 logical volumes on top of MTD devices, hides some complexities of
13 flash chips like wear and bad blocks and provides some other useful
14 capabilities. Please, consult the MTD web site for more details
15 (www.linux-mtd.infradead.org).
16
17config MTD_UBI_WL_THRESHOLD
18 int "UBI wear-leveling threshold"
19 default 4096
20 range 2 65536
21 depends on MTD_UBI
22 help
23 This parameter defines the maximum difference between the highest
24 erase counter value and the lowest erase counter value of eraseblocks
25 of UBI devices. When this threshold is exceeded, UBI starts performing
26 wear leveling by means of moving data from eraseblock with low erase
27 counter to eraseblocks with high erase counter. Leave the default
28 value if unsure.
29
30config MTD_UBI_BEB_RESERVE
31 int "Percentage of reserved eraseblocks for bad eraseblocks handling"
32 default 1
33 range 0 25
34 depends on MTD_UBI
35 help
36 If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
37 reserves some amount of physical eraseblocks to handle new bad
38 eraseblocks. For example, if a flash physical eraseblock becomes bad,
39 UBI uses these reserved physical eraseblocks to relocate the bad one.
40 This option specifies how many physical eraseblocks will be reserved
41 for bad eraseblock handling (percents of total number of good flash
42 eraseblocks). If the underlying flash does not admit of bad
43 eraseblocks (e.g. NOR flash), this value is ignored and nothing is
44 reserved. Leave the default value if unsure.
45
46config MTD_UBI_GLUEBI
47 bool "Emulate MTD devices"
48 default n
49 depends on MTD_UBI
50 help
51 This option enables MTD devices emulation on top of UBI volumes: for
52 each UBI volumes an MTD device is created, and all I/O to this MTD
53 device is redirected to the UBI volume. This is handy to make
54 MTD-oriented software (like JFFS2) work on top of UBI. Do not enable
55 this if no legacy software will be used.
56
57source "drivers/mtd/ubi/Kconfig.debug"
58endmenu
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
new file mode 100644
index 000000000000..1e2ee22edeff
--- /dev/null
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -0,0 +1,104 @@
1comment "UBI debugging options"
2 depends on MTD_UBI
3
4config MTD_UBI_DEBUG
5 bool "UBI debugging"
6 depends on SYSFS
7 depends on MTD_UBI
8 select DEBUG_FS
9 select KALLSYMS_ALL
10 help
11 This option enables UBI debugging.
12
13config MTD_UBI_DEBUG_MSG
14 bool "UBI debugging messages"
15 depends on MTD_UBI_DEBUG
16 default n
17 help
18 This option enables UBI debugging messages.
19
20config MTD_UBI_DEBUG_PARANOID
21 bool "Extra self-checks"
22 default n
23 depends on MTD_UBI_DEBUG
24 help
25 This option enables extra checks in UBI code. Note this slows UBI down
26 significantly.
27
28config MTD_UBI_DEBUG_DISABLE_BGT
29 bool "Do not enable the UBI background thread"
30 depends on MTD_UBI_DEBUG
31 default n
32 help
33 This option switches the background thread off by default. The thread
34 may be also be enabled/disabled via UBI sysfs.
35
36config MTD_UBI_DEBUG_USERSPACE_IO
37 bool "Direct user-space write/erase support"
38 default n
39 depends on MTD_UBI_DEBUG
40 help
41 By default, users cannot directly write and erase individual
42 eraseblocks of dynamic volumes, and have to use update operation
43 instead. This option enables this capability - it is very useful for
44 debugging and testing.
45
46config MTD_UBI_DEBUG_EMULATE_BITFLIPS
47 bool "Emulate flash bit-flips"
48 depends on MTD_UBI_DEBUG
49 default n
50 help
51 This option emulates bit-flips with probability 1/50, which in turn
52 causes scrubbing. Useful for debugging and stressing UBI.
53
54config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
55 bool "Emulate flash write failures"
56 depends on MTD_UBI_DEBUG
57 default n
58 help
59 This option emulates write failures with probability 1/100. Useful for
60 debugging and testing how UBI handlines errors.
61
62config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
63 bool "Emulate flash erase failures"
64 depends on MTD_UBI_DEBUG
65 default n
66 help
67 This option emulates erase failures with probability 1/100. Useful for
68 debugging and testing how UBI handlines errors.
69
70menu "Additional UBI debugging messages"
71 depends on MTD_UBI_DEBUG
72
73config MTD_UBI_DEBUG_MSG_BLD
74 bool "Additional UBI initialization and build messages"
75 default n
76 depends on MTD_UBI_DEBUG
77 help
78 This option enables detailed UBI initialization and device build
79 debugging messages.
80
81config MTD_UBI_DEBUG_MSG_EBA
82 bool "Eraseblock association unit messages"
83 default n
84 depends on MTD_UBI_DEBUG
85 help
86 This option enables debugging messages from the UBI eraseblock
87 association unit.
88
89config MTD_UBI_DEBUG_MSG_WL
90 bool "Wear-leveling unit messages"
91 default n
92 depends on MTD_UBI_DEBUG
93 help
94 This option enables debugging messages from the UBI wear-leveling
95 unit.
96
97config MTD_UBI_DEBUG_MSG_IO
98 bool "Input/output unit messages"
99 default n
100 depends on MTD_UBI_DEBUG
101 help
102 This option enables debugging messages from the UBI input/output unit.
103
104endmenu # UBI debugging messages
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
new file mode 100644
index 000000000000..dd834e04151b
--- /dev/null
+++ b/drivers/mtd/ubi/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_MTD_UBI) += ubi.o
2
3ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
4ubi-y += misc.o
5
6ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
7ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
new file mode 100644
index 000000000000..555d594d1811
--- /dev/null
+++ b/drivers/mtd/ubi/build.c
@@ -0,0 +1,848 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём),
20 * Frank Haverkamp
21 */
22
23/*
24 * This file includes UBI initialization and building of UBI devices. At the
25 * moment UBI devices may only be added while UBI is initialized, but dynamic
26 * device add/remove functionality is planned. Also, at the moment we only
27 * attach UBI devices by scanning, which will become a bottleneck when flashes
28 * reach certain large size. Then one may improve UBI and add other methods.
29 */
30
31#include <linux/err.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/stringify.h>
35#include <linux/stat.h>
36#include "ubi.h"
37
38/* Maximum length of the 'mtd=' parameter */
39#define MTD_PARAM_LEN_MAX 64
40
41/**
42 * struct mtd_dev_param - MTD device parameter description data structure.
43 * @name: MTD device name or number string
44 * @vid_hdr_offs: VID header offset
45 * @data_offs: data offset
46 */
47struct mtd_dev_param
48{
49 char name[MTD_PARAM_LEN_MAX];
50 int vid_hdr_offs;
51 int data_offs;
52};
53
54/* Numbers of elements set in the @mtd_dev_param array */
55static int mtd_devs = 0;
56
57/* MTD devices specification parameters */
58static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
59
60/* Number of UBI devices in system */
61int ubi_devices_cnt;
62
63/* All UBI devices in system */
64struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
65
66/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
67struct class *ubi_class;
68
69/* "Show" method for files in '/<sysfs>/class/ubi/' */
70static ssize_t ubi_version_show(struct class *class, char *buf)
71{
72 return sprintf(buf, "%d\n", UBI_VERSION);
73}
74
75/* UBI version attribute ('/<sysfs>/class/ubi/version') */
76static struct class_attribute ubi_version =
77 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
78
79static ssize_t dev_attribute_show(struct device *dev,
80 struct device_attribute *attr, char *buf);
81
82/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
83static struct device_attribute dev_eraseblock_size =
84 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
85static struct device_attribute dev_avail_eraseblocks =
86 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
87static struct device_attribute dev_total_eraseblocks =
88 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
89static struct device_attribute dev_volumes_count =
90 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
91static struct device_attribute dev_max_ec =
92 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
93static struct device_attribute dev_reserved_for_bad =
94 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
95static struct device_attribute dev_bad_peb_count =
96 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
97static struct device_attribute dev_max_vol_count =
98 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
99static struct device_attribute dev_min_io_size =
100 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
101static struct device_attribute dev_bgt_enabled =
102 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
103
104/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
105static ssize_t dev_attribute_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 const struct ubi_device *ubi;
109
110 ubi = container_of(dev, struct ubi_device, dev);
111 if (attr == &dev_eraseblock_size)
112 return sprintf(buf, "%d\n", ubi->leb_size);
113 else if (attr == &dev_avail_eraseblocks)
114 return sprintf(buf, "%d\n", ubi->avail_pebs);
115 else if (attr == &dev_total_eraseblocks)
116 return sprintf(buf, "%d\n", ubi->good_peb_count);
117 else if (attr == &dev_volumes_count)
118 return sprintf(buf, "%d\n", ubi->vol_count);
119 else if (attr == &dev_max_ec)
120 return sprintf(buf, "%d\n", ubi->max_ec);
121 else if (attr == &dev_reserved_for_bad)
122 return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
123 else if (attr == &dev_bad_peb_count)
124 return sprintf(buf, "%d\n", ubi->bad_peb_count);
125 else if (attr == &dev_max_vol_count)
126 return sprintf(buf, "%d\n", ubi->vtbl_slots);
127 else if (attr == &dev_min_io_size)
128 return sprintf(buf, "%d\n", ubi->min_io_size);
129 else if (attr == &dev_bgt_enabled)
130 return sprintf(buf, "%d\n", ubi->thread_enabled);
131 else
132 BUG();
133
134 return 0;
135}
136
137/* Fake "release" method for UBI devices */
138static void dev_release(struct device *dev) { }
139
140/**
141 * ubi_sysfs_init - initialize sysfs for an UBI device.
142 * @ubi: UBI device description object
143 *
144 * This function returns zero in case of success and a negative error code in
145 * case of failure.
146 */
147static int ubi_sysfs_init(struct ubi_device *ubi)
148{
149 int err;
150
151 ubi->dev.release = dev_release;
152 ubi->dev.devt = MKDEV(ubi->major, 0);
153 ubi->dev.class = ubi_class;
154 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
155 err = device_register(&ubi->dev);
156 if (err)
157 goto out;
158
159 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
160 if (err)
161 goto out_unregister;
162 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
163 if (err)
164 goto out_eraseblock_size;
165 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
166 if (err)
167 goto out_avail_eraseblocks;
168 err = device_create_file(&ubi->dev, &dev_volumes_count);
169 if (err)
170 goto out_total_eraseblocks;
171 err = device_create_file(&ubi->dev, &dev_max_ec);
172 if (err)
173 goto out_volumes_count;
174 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
175 if (err)
176 goto out_volumes_max_ec;
177 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
178 if (err)
179 goto out_reserved_for_bad;
180 err = device_create_file(&ubi->dev, &dev_max_vol_count);
181 if (err)
182 goto out_bad_peb_count;
183 err = device_create_file(&ubi->dev, &dev_min_io_size);
184 if (err)
185 goto out_max_vol_count;
186 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
187 if (err)
188 goto out_min_io_size;
189
190 return 0;
191
192out_min_io_size:
193 device_remove_file(&ubi->dev, &dev_min_io_size);
194out_max_vol_count:
195 device_remove_file(&ubi->dev, &dev_max_vol_count);
196out_bad_peb_count:
197 device_remove_file(&ubi->dev, &dev_bad_peb_count);
198out_reserved_for_bad:
199 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
200out_volumes_max_ec:
201 device_remove_file(&ubi->dev, &dev_max_ec);
202out_volumes_count:
203 device_remove_file(&ubi->dev, &dev_volumes_count);
204out_total_eraseblocks:
205 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
206out_avail_eraseblocks:
207 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
208out_eraseblock_size:
209 device_remove_file(&ubi->dev, &dev_eraseblock_size);
210out_unregister:
211 device_unregister(&ubi->dev);
212out:
213 ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
214 return err;
215}
216
217/**
218 * ubi_sysfs_close - close sysfs for an UBI device.
219 * @ubi: UBI device description object
220 */
221static void ubi_sysfs_close(struct ubi_device *ubi)
222{
223 device_remove_file(&ubi->dev, &dev_bgt_enabled);
224 device_remove_file(&ubi->dev, &dev_min_io_size);
225 device_remove_file(&ubi->dev, &dev_max_vol_count);
226 device_remove_file(&ubi->dev, &dev_bad_peb_count);
227 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
228 device_remove_file(&ubi->dev, &dev_max_ec);
229 device_remove_file(&ubi->dev, &dev_volumes_count);
230 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
231 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
232 device_remove_file(&ubi->dev, &dev_eraseblock_size);
233 device_unregister(&ubi->dev);
234}
235
236/**
237 * kill_volumes - destroy all volumes.
238 * @ubi: UBI device description object
239 */
240static void kill_volumes(struct ubi_device *ubi)
241{
242 int i;
243
244 for (i = 0; i < ubi->vtbl_slots; i++)
245 if (ubi->volumes[i])
246 ubi_free_volume(ubi, i);
247}
248
249/**
250 * uif_init - initialize user interfaces for an UBI device.
251 * @ubi: UBI device description object
252 *
253 * This function returns zero in case of success and a negative error code in
254 * case of failure.
255 */
256static int uif_init(struct ubi_device *ubi)
257{
258 int i, err;
259 dev_t dev;
260
261 mutex_init(&ubi->vtbl_mutex);
262 spin_lock_init(&ubi->volumes_lock);
263
264 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
265
266 /*
267 * Major numbers for the UBI character devices are allocated
268 * dynamically. Major numbers of volume character devices are
269 * equivalent to ones of the corresponding UBI character device. Minor
270 * numbers of UBI character devices are 0, while minor numbers of
271 * volume character devices start from 1. Thus, we allocate one major
272 * number and ubi->vtbl_slots + 1 minor numbers.
273 */
274 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
275 if (err) {
276 ubi_err("cannot register UBI character devices");
277 return err;
278 }
279
280 cdev_init(&ubi->cdev, &ubi_cdev_operations);
281 ubi->major = MAJOR(dev);
282 dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
283 ubi->cdev.owner = THIS_MODULE;
284
285 dev = MKDEV(ubi->major, 0);
286 err = cdev_add(&ubi->cdev, dev, 1);
287 if (err) {
288 ubi_err("cannot add character device %s", ubi->ubi_name);
289 goto out_unreg;
290 }
291
292 err = ubi_sysfs_init(ubi);
293 if (err)
294 goto out_cdev;
295
296 for (i = 0; i < ubi->vtbl_slots; i++)
297 if (ubi->volumes[i]) {
298 err = ubi_add_volume(ubi, i);
299 if (err)
300 goto out_volumes;
301 }
302
303 return 0;
304
305out_volumes:
306 kill_volumes(ubi);
307 ubi_sysfs_close(ubi);
308out_cdev:
309 cdev_del(&ubi->cdev);
310out_unreg:
311 unregister_chrdev_region(MKDEV(ubi->major, 0),
312 ubi->vtbl_slots + 1);
313 return err;
314}
315
316/**
317 * uif_close - close user interfaces for an UBI device.
318 * @ubi: UBI device description object
319 */
320static void uif_close(struct ubi_device *ubi)
321{
322 kill_volumes(ubi);
323 ubi_sysfs_close(ubi);
324 cdev_del(&ubi->cdev);
325 unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1);
326}
327
328/**
329 * attach_by_scanning - attach an MTD device using scanning method.
330 * @ubi: UBI device descriptor
331 *
332 * This function returns zero in case of success and a negative error code in
333 * case of failure.
334 *
335 * Note, currently this is the only method to attach UBI devices. Hopefully in
336 * the future we'll have more scalable attaching methods and avoid full media
337 * scanning. But even in this case scanning will be needed as a fall-back
338 * attaching method if there are some on-flash table corruptions.
339 */
340static int attach_by_scanning(struct ubi_device *ubi)
341{
342 int err;
343 struct ubi_scan_info *si;
344
345 si = ubi_scan(ubi);
346 if (IS_ERR(si))
347 return PTR_ERR(si);
348
349 ubi->bad_peb_count = si->bad_peb_count;
350 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
351 ubi->max_ec = si->max_ec;
352 ubi->mean_ec = si->mean_ec;
353
354 err = ubi_read_volume_table(ubi, si);
355 if (err)
356 goto out_si;
357
358 err = ubi_wl_init_scan(ubi, si);
359 if (err)
360 goto out_vtbl;
361
362 err = ubi_eba_init_scan(ubi, si);
363 if (err)
364 goto out_wl;
365
366 ubi_scan_destroy_si(si);
367 return 0;
368
369out_wl:
370 ubi_wl_close(ubi);
371out_vtbl:
372 kfree(ubi->vtbl);
373out_si:
374 ubi_scan_destroy_si(si);
375 return err;
376}
377
378/**
379 * io_init - initialize I/O unit for a given UBI device.
380 * @ubi: UBI device description object
381 *
382 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
383 * assumed:
384 * o EC header is always at offset zero - this cannot be changed;
385 * o VID header starts just after the EC header at the closest address
386 * aligned to @io->@hdrs_min_io_size;
387 * o data starts just after the VID header at the closest address aligned to
388 * @io->@min_io_size
389 *
390 * This function returns zero in case of success and a negative error code in
391 * case of failure.
392 */
393static int io_init(struct ubi_device *ubi)
394{
395 if (ubi->mtd->numeraseregions != 0) {
396 /*
397 * Some flashes have several erase regions. Different regions
398 * may have different eraseblock size and other
399 * characteristics. It looks like mostly multi-region flashes
400 * have one "main" region and one or more small regions to
401 * store boot loader code or boot parameters or whatever. I
402 * guess we should just pick the largest region. But this is
403 * not implemented.
404 */
405 ubi_err("multiple regions, not implemented");
406 return -EINVAL;
407 }
408
409 /*
410 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
411 * physical eraseblocks maximum.
412 */
413
414 ubi->peb_size = ubi->mtd->erasesize;
415 ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize;
416 ubi->flash_size = ubi->mtd->size;
417
418 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
419 ubi->bad_allowed = 1;
420
421 ubi->min_io_size = ubi->mtd->writesize;
422 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
423
424 /* Make sure minimal I/O unit is power of 2 */
425 if (ubi->min_io_size == 0 ||
426 (ubi->min_io_size & (ubi->min_io_size - 1))) {
427 ubi_err("bad min. I/O unit");
428 return -EINVAL;
429 }
430
431 ubi_assert(ubi->hdrs_min_io_size > 0);
432 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
433 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
434
435 /* Calculate default aligned sizes of EC and VID headers */
436 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
437 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
438
439 dbg_msg("min_io_size %d", ubi->min_io_size);
440 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
441 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
442 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
443
444 if (ubi->vid_hdr_offset == 0)
445 /* Default offset */
446 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
447 ubi->ec_hdr_alsize;
448 else {
449 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
450 ~(ubi->hdrs_min_io_size - 1);
451 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
452 ubi->vid_hdr_aloffset;
453 }
454
455 /* Similar for the data offset */
456 if (ubi->leb_start == 0) {
457 ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
458 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
459 }
460
461 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
462 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
463 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
464 dbg_msg("leb_start %d", ubi->leb_start);
465
466 /* The shift must be aligned to 32-bit boundary */
467 if (ubi->vid_hdr_shift % 4) {
468 ubi_err("unaligned VID header shift %d",
469 ubi->vid_hdr_shift);
470 return -EINVAL;
471 }
472
473 /* Check sanity */
474 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
475 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
476 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
477 ubi->leb_start % ubi->min_io_size) {
478 ubi_err("bad VID header (%d) or data offsets (%d)",
479 ubi->vid_hdr_offset, ubi->leb_start);
480 return -EINVAL;
481 }
482
483 /*
484 * It may happen that EC and VID headers are situated in one minimal
485 * I/O unit. In this case we can only accept this UBI image in
486 * read-only mode.
487 */
488 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
489 ubi_warn("EC and VID headers are in the same minimal I/O unit, "
490 "switch to read-only mode");
491 ubi->ro_mode = 1;
492 }
493
494 ubi->leb_size = ubi->peb_size - ubi->leb_start;
495
496 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
497 ubi_msg("MTD device %d is write-protected, attach in "
498 "read-only mode", ubi->mtd->index);
499 ubi->ro_mode = 1;
500 }
501
502 dbg_msg("leb_size %d", ubi->leb_size);
503 dbg_msg("ro_mode %d", ubi->ro_mode);
504
505 /*
506 * Note, ideally, we have to initialize ubi->bad_peb_count here. But
507 * unfortunately, MTD does not provide this information. We should loop
508 * over all physical eraseblocks and invoke mtd->block_is_bad() for
509 * each physical eraseblock. So, we skip ubi->bad_peb_count
510 * uninitialized and initialize it after scanning.
511 */
512
513 return 0;
514}
515
516/**
517 * attach_mtd_dev - attach an MTD device.
518 * @mtd_dev: MTD device name or number string
519 * @vid_hdr_offset: VID header offset
520 * @data_offset: data offset
521 *
522 * This function attaches an MTD device to UBI. It first treats @mtd_dev as the
523 * MTD device name, and tries to open it by this name. If it is unable to open,
524 * it tries to convert @mtd_dev to an integer and open the MTD device by its
525 * number. Returns zero in case of success and a negative error code in case of
526 * failure.
527 */
528static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
529 int data_offset)
530{
531 struct ubi_device *ubi;
532 struct mtd_info *mtd;
533 int i, err;
534
535 mtd = get_mtd_device_nm(mtd_dev);
536 if (IS_ERR(mtd)) {
537 int mtd_num;
538 char *endp;
539
540 if (PTR_ERR(mtd) != -ENODEV)
541 return PTR_ERR(mtd);
542
543 /*
544 * Probably this is not MTD device name but MTD device number -
545 * check this out.
546 */
547 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
548 if (*endp != '\0' || mtd_dev == endp) {
549 ubi_err("incorrect MTD device: \"%s\"", mtd_dev);
550 return -ENODEV;
551 }
552
553 mtd = get_mtd_device(NULL, mtd_num);
554 if (IS_ERR(mtd))
555 return PTR_ERR(mtd);
556 }
557
558 /* Check if we already have the same MTD device attached */
559 for (i = 0; i < ubi_devices_cnt; i++)
560 if (ubi_devices[i]->mtd->index == mtd->index) {
561 ubi_err("mtd%d is already attached to ubi%d",
562 mtd->index, i);
563 err = -EINVAL;
564 goto out_mtd;
565 }
566
567 ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
568 GFP_KERNEL);
569 if (!ubi) {
570 err = -ENOMEM;
571 goto out_mtd;
572 }
573
574 ubi->ubi_num = ubi_devices_cnt;
575 ubi->mtd = mtd;
576
577 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d",
578 ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset);
579
580 ubi->vid_hdr_offset = vid_hdr_offset;
581 ubi->leb_start = data_offset;
582 err = io_init(ubi);
583 if (err)
584 goto out_free;
585
586 err = attach_by_scanning(ubi);
587 if (err) {
588 dbg_err("failed to attach by scanning, error %d", err);
589 goto out_free;
590 }
591
592 err = uif_init(ubi);
593 if (err)
594 goto out_detach;
595
596 ubi_devices_cnt += 1;
597
598 ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
599 ubi_msg("MTD device name: \"%s\"", ubi->mtd->name);
600 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
601 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
602 ubi->peb_size, ubi->peb_size >> 10);
603 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
604 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
605 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
606 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
607 ubi_msg("VID header offset: %d (aligned %d)",
608 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
609 ubi_msg("data offset: %d", ubi->leb_start);
610 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
611 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
612 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
613 ubi_msg("number of user volumes: %d",
614 ubi->vol_count - UBI_INT_VOL_COUNT);
615 ubi_msg("available PEBs: %d", ubi->avail_pebs);
616 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
617 ubi_msg("number of PEBs reserved for bad PEB handling: %d",
618 ubi->beb_rsvd_pebs);
619 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
620
621 /* Enable the background thread */
622 if (!DBG_DISABLE_BGT) {
623 ubi->thread_enabled = 1;
624 wake_up_process(ubi->bgt_thread);
625 }
626
627 return 0;
628
629out_detach:
630 ubi_eba_close(ubi);
631 ubi_wl_close(ubi);
632 kfree(ubi->vtbl);
633out_free:
634 kfree(ubi);
635out_mtd:
636 put_mtd_device(mtd);
637 ubi_devices[ubi_devices_cnt] = NULL;
638 return err;
639}
640
641/**
642 * detach_mtd_dev - detach an MTD device.
643 * @ubi: UBI device description object
644 */
645static void detach_mtd_dev(struct ubi_device *ubi)
646{
647 int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index;
648
649 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
650 uif_close(ubi);
651 ubi_eba_close(ubi);
652 ubi_wl_close(ubi);
653 kfree(ubi->vtbl);
654 put_mtd_device(ubi->mtd);
655 kfree(ubi_devices[ubi_num]);
656 ubi_devices[ubi_num] = NULL;
657 ubi_devices_cnt -= 1;
658 ubi_assert(ubi_devices_cnt >= 0);
659 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
660}
661
662static int __init ubi_init(void)
663{
664 int err, i, k;
665
666 /* Ensure that EC and VID headers have correct size */
667 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
668 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
669
670 if (mtd_devs > UBI_MAX_DEVICES) {
671 printk("UBI error: too many MTD devices, maximum is %d\n",
672 UBI_MAX_DEVICES);
673 return -EINVAL;
674 }
675
676 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
677 if (IS_ERR(ubi_class))
678 return PTR_ERR(ubi_class);
679
680 err = class_create_file(ubi_class, &ubi_version);
681 if (err)
682 goto out_class;
683
684 /* Attach MTD devices */
685 for (i = 0; i < mtd_devs; i++) {
686 struct mtd_dev_param *p = &mtd_dev_param[i];
687
688 cond_resched();
689
690 if (!p->name) {
691 dbg_err("empty name");
692 err = -EINVAL;
693 goto out_detach;
694 }
695
696 err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
697 if (err)
698 goto out_detach;
699 }
700
701 return 0;
702
703out_detach:
704 for (k = 0; k < i; k++)
705 detach_mtd_dev(ubi_devices[k]);
706 class_remove_file(ubi_class, &ubi_version);
707out_class:
708 class_destroy(ubi_class);
709 return err;
710}
711module_init(ubi_init);
712
713static void __exit ubi_exit(void)
714{
715 int i, n = ubi_devices_cnt;
716
717 for (i = 0; i < n; i++)
718 detach_mtd_dev(ubi_devices[i]);
719 class_remove_file(ubi_class, &ubi_version);
720 class_destroy(ubi_class);
721}
722module_exit(ubi_exit);
723
724/**
725 * bytes_str_to_int - convert a string representing number of bytes to an
726 * integer.
727 * @str: the string to convert
728 *
729 * This function returns positive resulting integer in case of success and a
730 * negative error code in case of failure.
731 */
732static int __init bytes_str_to_int(const char *str)
733{
734 char *endp;
735 unsigned long result;
736
737 result = simple_strtoul(str, &endp, 0);
738 if (str == endp || result < 0) {
739 printk("UBI error: incorrect bytes count: \"%s\"\n", str);
740 return -EINVAL;
741 }
742
743 switch (*endp) {
744 case 'G':
745 result *= 1024;
746 case 'M':
747 result *= 1024;
748 case 'K':
749 case 'k':
750 result *= 1024;
751 if (endp[1] == 'i' && (endp[2] == '\0' ||
752 endp[2] == 'B' || endp[2] == 'b'))
753 endp += 2;
754 case '\0':
755 break;
756 default:
757 printk("UBI error: incorrect bytes count: \"%s\"\n", str);
758 return -EINVAL;
759 }
760
761 return result;
762}
763
764/**
765 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
766 * @val: the parameter value to parse
767 * @kp: not used
768 *
769 * This function returns zero in case of success and a negative error code in
770 * case of error.
771 */
772static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
773{
774 int i, len;
775 struct mtd_dev_param *p;
776 char buf[MTD_PARAM_LEN_MAX];
777 char *pbuf = &buf[0];
778 char *tokens[3] = {NULL, NULL, NULL};
779
780 if (mtd_devs == UBI_MAX_DEVICES) {
781 printk("UBI error: too many parameters, max. is %d\n",
782 UBI_MAX_DEVICES);
783 return -EINVAL;
784 }
785
786 len = strnlen(val, MTD_PARAM_LEN_MAX);
787 if (len == MTD_PARAM_LEN_MAX) {
788 printk("UBI error: parameter \"%s\" is too long, max. is %d\n",
789 val, MTD_PARAM_LEN_MAX);
790 return -EINVAL;
791 }
792
793 if (len == 0) {
794 printk("UBI warning: empty 'mtd=' parameter - ignored\n");
795 return 0;
796 }
797
798 strcpy(buf, val);
799
800 /* Get rid of the final newline */
801 if (buf[len - 1] == '\n')
802 buf[len - 1] = 0;
803
804 for (i = 0; i < 3; i++)
805 tokens[i] = strsep(&pbuf, ",");
806
807 if (pbuf) {
808 printk("UBI error: too many arguments at \"%s\"\n", val);
809 return -EINVAL;
810 }
811
812 if (tokens[0] == '\0')
813 return -EINVAL;
814
815 p = &mtd_dev_param[mtd_devs];
816 strcpy(&p->name[0], tokens[0]);
817
818 if (tokens[1])
819 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
820 if (tokens[2])
821 p->data_offs = bytes_str_to_int(tokens[2]);
822
823 if (p->vid_hdr_offs < 0)
824 return p->vid_hdr_offs;
825 if (p->data_offs < 0)
826 return p->data_offs;
827
828 mtd_devs += 1;
829 return 0;
830}
831
832module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
833MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
834 "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. "
835 "Multiple \"mtd\" parameters may be specified.\n"
836 "MTD devices may be specified by their number or name. "
837 "Optional \"vid_hdr_offs\" and \"data_offs\" parameters "
838 "specify UBI VID header position and data starting "
839 "position to be used by UBI.\n"
840 "Example: mtd=content,1984,2048 mtd=4 - attach MTD device"
841 "with name content using VID header offset 1984 and data "
842 "start 2048, and MTD device number 4 using default "
843 "offsets");
844
845MODULE_VERSION(__stringify(UBI_VERSION));
846MODULE_DESCRIPTION("UBI - Unsorted Block Images");
847MODULE_AUTHOR("Artem Bityutskiy");
848MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
new file mode 100644
index 000000000000..6612eb79bf17
--- /dev/null
+++ b/drivers/mtd/ubi/cdev.c
@@ -0,0 +1,722 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * This file includes implementation of UBI character device operations.
23 *
24 * There are two kinds of character devices in UBI: UBI character devices and
25 * UBI volume character devices. UBI character devices allow users to
26 * manipulate whole volumes: create, remove, and re-size them. Volume character
27 * devices provide volume I/O capabilities.
28 *
29 * Major and minor numbers are assigned dynamically to both UBI and volume
30 * character devices.
31 */
32
33#include <linux/module.h>
34#include <linux/stat.h>
35#include <linux/ioctl.h>
36#include <linux/capability.h>
37#include <mtd/ubi-user.h>
38#include <asm/uaccess.h>
39#include <asm/div64.h>
40#include "ubi.h"
41
42/*
43 * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
44 * logical eraseblock erase is a debug-only feature).
45 */
46#define UBI_CDEV_IOC_MAX_SEQ 2
47#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
48#define VOL_CDEV_IOC_MAX_SEQ 1
49#else
50#define VOL_CDEV_IOC_MAX_SEQ 2
51#endif
52
53/**
54 * major_to_device - get UBI device object by character device major number.
55 * @major: major number
56 *
57 * This function returns a pointer to the UBI device object.
58 */
59static struct ubi_device *major_to_device(int major)
60{
61 int i;
62
63 for (i = 0; i < ubi_devices_cnt; i++)
64 if (ubi_devices[i] && ubi_devices[i]->major == major)
65 return ubi_devices[i];
66 BUG();
67}
68
69/**
70 * get_exclusive - get exclusive access to an UBI volume.
71 * @desc: volume descriptor
72 *
73 * This function changes UBI volume open mode to "exclusive". Returns previous
74 * mode value (positive integer) in case of success and a negative error code
75 * in case of failure.
76 */
77static int get_exclusive(struct ubi_volume_desc *desc)
78{
79 int users, err;
80 struct ubi_volume *vol = desc->vol;
81
82 spin_lock(&vol->ubi->volumes_lock);
83 users = vol->readers + vol->writers + vol->exclusive;
84 ubi_assert(users > 0);
85 if (users > 1) {
86 dbg_err("%d users for volume %d", users, vol->vol_id);
87 err = -EBUSY;
88 } else {
89 vol->readers = vol->writers = 0;
90 vol->exclusive = 1;
91 err = desc->mode;
92 desc->mode = UBI_EXCLUSIVE;
93 }
94 spin_unlock(&vol->ubi->volumes_lock);
95
96 return err;
97}
98
99/**
100 * revoke_exclusive - revoke exclusive mode.
101 * @desc: volume descriptor
102 * @mode: new mode to switch to
103 */
104static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
105{
106 struct ubi_volume *vol = desc->vol;
107
108 spin_lock(&vol->ubi->volumes_lock);
109 ubi_assert(vol->readers == 0 && vol->writers == 0);
110 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
111 vol->exclusive = 0;
112 if (mode == UBI_READONLY)
113 vol->readers = 1;
114 else if (mode == UBI_READWRITE)
115 vol->writers = 1;
116 else
117 vol->exclusive = 1;
118 spin_unlock(&vol->ubi->volumes_lock);
119
120 desc->mode = mode;
121}
122
123static int vol_cdev_open(struct inode *inode, struct file *file)
124{
125 struct ubi_volume_desc *desc;
126 const struct ubi_device *ubi = major_to_device(imajor(inode));
127 int vol_id = iminor(inode) - 1;
128 int mode;
129
130 if (file->f_mode & FMODE_WRITE)
131 mode = UBI_READWRITE;
132 else
133 mode = UBI_READONLY;
134
135 dbg_msg("open volume %d, mode %d", vol_id, mode);
136
137 desc = ubi_open_volume(ubi->ubi_num, vol_id, mode);
138 if (IS_ERR(desc))
139 return PTR_ERR(desc);
140
141 file->private_data = desc;
142 return 0;
143}
144
145static int vol_cdev_release(struct inode *inode, struct file *file)
146{
147 struct ubi_volume_desc *desc = file->private_data;
148 struct ubi_volume *vol = desc->vol;
149
150 dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode);
151
152 if (vol->updating) {
153 ubi_warn("update of volume %d not finished, volume is damaged",
154 vol->vol_id);
155 vol->updating = 0;
156 kfree(vol->upd_buf);
157 }
158
159 ubi_close_volume(desc);
160 return 0;
161}
162
163static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
164{
165 struct ubi_volume_desc *desc = file->private_data;
166 struct ubi_volume *vol = desc->vol;
167 loff_t new_offset;
168
169 if (vol->updating) {
170 /* Update is in progress, seeking is prohibited */
171 dbg_err("updating");
172 return -EBUSY;
173 }
174
175 switch (origin) {
176 case 0: /* SEEK_SET */
177 new_offset = offset;
178 break;
179 case 1: /* SEEK_CUR */
180 new_offset = file->f_pos + offset;
181 break;
182 case 2: /* SEEK_END */
183 new_offset = vol->used_bytes + offset;
184 break;
185 default:
186 return -EINVAL;
187 }
188
189 if (new_offset < 0 || new_offset > vol->used_bytes) {
190 dbg_err("bad seek %lld", new_offset);
191 return -EINVAL;
192 }
193
194 dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld",
195 vol->vol_id, offset, origin, new_offset);
196
197 file->f_pos = new_offset;
198 return new_offset;
199}
200
201static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
202 loff_t *offp)
203{
204 struct ubi_volume_desc *desc = file->private_data;
205 struct ubi_volume *vol = desc->vol;
206 struct ubi_device *ubi = vol->ubi;
207 int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size;
208 size_t count_save = count;
209 void *tbuf;
210 uint64_t tmp;
211
212 dbg_msg("read %zd bytes from offset %lld of volume %d",
213 count, *offp, vol_id);
214
215 if (vol->updating) {
216 dbg_err("updating");
217 return -EBUSY;
218 }
219 if (vol->upd_marker) {
220 dbg_err("damaged volume, update marker is set");
221 return -EBADF;
222 }
223 if (*offp == vol->used_bytes || count == 0)
224 return 0;
225
226 if (vol->corrupted)
227 dbg_msg("read from corrupted volume %d", vol_id);
228
229 if (*offp + count > vol->used_bytes)
230 count_save = count = vol->used_bytes - *offp;
231
232 tbuf_size = vol->usable_leb_size;
233 if (count < tbuf_size)
234 tbuf_size = ALIGN(count, ubi->min_io_size);
235 tbuf = kmalloc(tbuf_size, GFP_KERNEL);
236 if (!tbuf)
237 return -ENOMEM;
238
239 len = count > tbuf_size ? tbuf_size : count;
240
241 tmp = *offp;
242 off = do_div(tmp, vol->usable_leb_size);
243 lnum = tmp;
244
245 do {
246 cond_resched();
247
248 if (off + len >= vol->usable_leb_size)
249 len = vol->usable_leb_size - off;
250
251 err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0);
252 if (err)
253 break;
254
255 off += len;
256 if (off == vol->usable_leb_size) {
257 lnum += 1;
258 off -= vol->usable_leb_size;
259 }
260
261 count -= len;
262 *offp += len;
263
264 err = copy_to_user(buf, tbuf, len);
265 if (err) {
266 err = -EFAULT;
267 break;
268 }
269
270 buf += len;
271 len = count > tbuf_size ? tbuf_size : count;
272 } while (count);
273
274 kfree(tbuf);
275 return err ? err : count_save - count;
276}
277
278#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
279
280/*
281 * This function allows to directly write to dynamic UBI volumes, without
282 * issuing the volume update operation. Available only as a debugging feature.
283 * Very useful for testing UBI.
284 */
285static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
286 size_t count, loff_t *offp)
287{
288 struct ubi_volume_desc *desc = file->private_data;
289 struct ubi_volume *vol = desc->vol;
290 struct ubi_device *ubi = vol->ubi;
291 int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0;
292 size_t count_save = count;
293 char *tbuf;
294 uint64_t tmp;
295
296 dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
297 count, *offp, desc->vol->vol_id);
298
299 if (vol->vol_type == UBI_STATIC_VOLUME)
300 return -EROFS;
301
302 tmp = *offp;
303 off = do_div(tmp, vol->usable_leb_size);
304 lnum = tmp;
305
306 if (off % ubi->min_io_size) {
307 dbg_err("unaligned position");
308 return -EINVAL;
309 }
310
311 if (*offp + count > vol->used_bytes)
312 count_save = count = vol->used_bytes - *offp;
313
314 /* We can write only in fractions of the minimum I/O unit */
315 if (count % ubi->min_io_size) {
316 dbg_err("unaligned write length");
317 return -EINVAL;
318 }
319
320 tbuf_size = vol->usable_leb_size;
321 if (count < tbuf_size)
322 tbuf_size = ALIGN(count, ubi->min_io_size);
323 tbuf = kmalloc(tbuf_size, GFP_KERNEL);
324 if (!tbuf)
325 return -ENOMEM;
326
327 len = count > tbuf_size ? tbuf_size : count;
328
329 while (count) {
330 cond_resched();
331
332 if (off + len >= vol->usable_leb_size)
333 len = vol->usable_leb_size - off;
334
335 err = copy_from_user(tbuf, buf, len);
336 if (err) {
337 err = -EFAULT;
338 break;
339 }
340
341 err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len,
342 UBI_UNKNOWN);
343 if (err)
344 break;
345
346 off += len;
347 if (off == vol->usable_leb_size) {
348 lnum += 1;
349 off -= vol->usable_leb_size;
350 }
351
352 count -= len;
353 *offp += len;
354 buf += len;
355 len = count > tbuf_size ? tbuf_size : count;
356 }
357
358 kfree(tbuf);
359 return err ? err : count_save - count;
360}
361
362#else
363#define vol_cdev_direct_write(file, buf, count, offp) -EPERM
364#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
365
366static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
367 size_t count, loff_t *offp)
368{
369 int err = 0;
370 struct ubi_volume_desc *desc = file->private_data;
371 struct ubi_volume *vol = desc->vol;
372 struct ubi_device *ubi = vol->ubi;
373
374 if (!vol->updating)
375 return vol_cdev_direct_write(file, buf, count, offp);
376
377 err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
378 if (err < 0) {
379 ubi_err("cannot write %zd bytes of update data", count);
380 return err;
381 }
382
383 if (err) {
384 /*
385 * Update is finished, @err contains number of actually written
386 * bytes now.
387 */
388 count = err;
389
390 err = ubi_check_volume(ubi, vol->vol_id);
391 if (err < 0)
392 return err;
393
394 if (err) {
395 ubi_warn("volume %d on UBI device %d is corrupted",
396 vol->vol_id, ubi->ubi_num);
397 vol->corrupted = 1;
398 }
399 vol->checked = 1;
400 revoke_exclusive(desc, UBI_READWRITE);
401 }
402
403 *offp += count;
404 return count;
405}
406
407static int vol_cdev_ioctl(struct inode *inode, struct file *file,
408 unsigned int cmd, unsigned long arg)
409{
410 int err = 0;
411 struct ubi_volume_desc *desc = file->private_data;
412 struct ubi_volume *vol = desc->vol;
413 struct ubi_device *ubi = vol->ubi;
414 void __user *argp = (void __user *)arg;
415
416 if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ ||
417 _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC)
418 return -ENOTTY;
419
420 if (_IOC_DIR(cmd) && _IOC_READ)
421 err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
422 else if (_IOC_DIR(cmd) && _IOC_WRITE)
423 err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
424 if (err)
425 return -EFAULT;
426
427 switch (cmd) {
428
429 /* Volume update command */
430 case UBI_IOCVOLUP:
431 {
432 int64_t bytes, rsvd_bytes;
433
434 if (!capable(CAP_SYS_RESOURCE)) {
435 err = -EPERM;
436 break;
437 }
438
439 err = copy_from_user(&bytes, argp, sizeof(int64_t));
440 if (err) {
441 err = -EFAULT;
442 break;
443 }
444
445 if (desc->mode == UBI_READONLY) {
446 err = -EROFS;
447 break;
448 }
449
450 rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad);
451 if (bytes < 0 || bytes > rsvd_bytes) {
452 err = -EINVAL;
453 break;
454 }
455
456 err = get_exclusive(desc);
457 if (err < 0)
458 break;
459
460 err = ubi_start_update(ubi, vol->vol_id, bytes);
461 if (bytes == 0)
462 revoke_exclusive(desc, UBI_READWRITE);
463
464 file->f_pos = 0;
465 break;
466 }
467
468#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
469 /* Logical eraseblock erasure command */
470 case UBI_IOCEBER:
471 {
472 int32_t lnum;
473
474 err = __get_user(lnum, (__user int32_t *)argp);
475 if (err) {
476 err = -EFAULT;
477 break;
478 }
479
480 if (desc->mode == UBI_READONLY) {
481 err = -EROFS;
482 break;
483 }
484
485 if (lnum < 0 || lnum >= vol->reserved_pebs) {
486 err = -EINVAL;
487 break;
488 }
489
490 if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
491 err = -EROFS;
492 break;
493 }
494
495 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
496 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum);
497 if (err)
498 break;
499
500 err = ubi_wl_flush(ubi);
501 break;
502 }
503#endif
504
505 default:
506 err = -ENOTTY;
507 break;
508 }
509
510 return err;
511}
512
513/**
514 * verify_mkvol_req - verify volume creation request.
515 * @ubi: UBI device description object
516 * @req: the request to check
517 *
518 * This function zero if the request is correct, and %-EINVAL if not.
519 */
520static int verify_mkvol_req(const struct ubi_device *ubi,
521 const struct ubi_mkvol_req *req)
522{
523 int n, err = -EINVAL;
524
525 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
526 req->name_len < 0)
527 goto bad;
528
529 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
530 req->vol_id != UBI_VOL_NUM_AUTO)
531 goto bad;
532
533 if (req->alignment == 0)
534 goto bad;
535
536 if (req->bytes == 0)
537 goto bad;
538
539 if (req->vol_type != UBI_DYNAMIC_VOLUME &&
540 req->vol_type != UBI_STATIC_VOLUME)
541 goto bad;
542
543 if (req->alignment > ubi->leb_size)
544 goto bad;
545
546 n = req->alignment % ubi->min_io_size;
547 if (req->alignment != 1 && n)
548 goto bad;
549
550 if (req->name_len > UBI_VOL_NAME_MAX) {
551 err = -ENAMETOOLONG;
552 goto bad;
553 }
554
555 return 0;
556
557bad:
558 dbg_err("bad volume creation request");
559 ubi_dbg_dump_mkvol_req(req);
560 return err;
561}
562
563/**
564 * verify_rsvol_req - verify volume re-size request.
565 * @ubi: UBI device description object
566 * @req: the request to check
567 *
568 * This function returns zero if the request is correct, and %-EINVAL if not.
569 */
570static int verify_rsvol_req(const struct ubi_device *ubi,
571 const struct ubi_rsvol_req *req)
572{
573 if (req->bytes <= 0)
574 return -EINVAL;
575
576 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
577 return -EINVAL;
578
579 return 0;
580}
581
582static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
583 unsigned int cmd, unsigned long arg)
584{
585 int err = 0;
586 struct ubi_device *ubi;
587 struct ubi_volume_desc *desc;
588 void __user *argp = (void __user *)arg;
589
590 if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ ||
591 _IOC_TYPE(cmd) != UBI_IOC_MAGIC)
592 return -ENOTTY;
593
594 if (_IOC_DIR(cmd) && _IOC_READ)
595 err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
596 else if (_IOC_DIR(cmd) && _IOC_WRITE)
597 err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
598 if (err)
599 return -EFAULT;
600
601 if (!capable(CAP_SYS_RESOURCE))
602 return -EPERM;
603
604 ubi = major_to_device(imajor(inode));
605 if (IS_ERR(ubi))
606 return PTR_ERR(ubi);
607
608 switch (cmd) {
609 /* Create volume command */
610 case UBI_IOCMKVOL:
611 {
612 struct ubi_mkvol_req req;
613
614 dbg_msg("create volume");
615 err = __copy_from_user(&req, argp,
616 sizeof(struct ubi_mkvol_req));
617 if (err) {
618 err = -EFAULT;
619 break;
620 }
621
622 err = verify_mkvol_req(ubi, &req);
623 if (err)
624 break;
625
626 req.name[req.name_len] = '\0';
627
628 err = ubi_create_volume(ubi, &req);
629 if (err)
630 break;
631
632 err = __put_user(req.vol_id, (__user int32_t *)argp);
633 if (err)
634 err = -EFAULT;
635
636 break;
637 }
638
639 /* Remove volume command */
640 case UBI_IOCRMVOL:
641 {
642 int vol_id;
643
644 dbg_msg("remove volume");
645 err = __get_user(vol_id, (__user int32_t *)argp);
646 if (err) {
647 err = -EFAULT;
648 break;
649 }
650
651 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
652 if (IS_ERR(desc)) {
653 err = PTR_ERR(desc);
654 break;
655 }
656
657 err = ubi_remove_volume(desc);
658 if (err)
659 ubi_close_volume(desc);
660
661 break;
662 }
663
664 /* Re-size volume command */
665 case UBI_IOCRSVOL:
666 {
667 int pebs;
668 uint64_t tmp;
669 struct ubi_rsvol_req req;
670
671 dbg_msg("re-size volume");
672 err = __copy_from_user(&req, argp,
673 sizeof(struct ubi_rsvol_req));
674 if (err) {
675 err = -EFAULT;
676 break;
677 }
678
679 err = verify_rsvol_req(ubi, &req);
680 if (err)
681 break;
682
683 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
684 if (IS_ERR(desc)) {
685 err = PTR_ERR(desc);
686 break;
687 }
688
689 tmp = req.bytes;
690 pebs = !!do_div(tmp, desc->vol->usable_leb_size);
691 pebs += tmp;
692
693 err = ubi_resize_volume(desc, pebs);
694 ubi_close_volume(desc);
695 break;
696 }
697
698 default:
699 err = -ENOTTY;
700 break;
701 }
702
703 return err;
704}
705
706/* UBI character device operations */
707struct file_operations ubi_cdev_operations = {
708 .owner = THIS_MODULE,
709 .ioctl = ubi_cdev_ioctl,
710 .llseek = no_llseek
711};
712
713/* UBI volume character device operations */
714struct file_operations ubi_vol_cdev_operations = {
715 .owner = THIS_MODULE,
716 .open = vol_cdev_open,
717 .release = vol_cdev_release,
718 .llseek = vol_cdev_llseek,
719 .read = vol_cdev_read,
720 .write = vol_cdev_write,
721 .ioctl = vol_cdev_ioctl
722};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
new file mode 100644
index 000000000000..86364221fafe
--- /dev/null
+++ b/drivers/mtd/ubi/debug.c
@@ -0,0 +1,224 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * Here we keep all the UBI debugging stuff which should normally be disabled
23 * and compiled-out, but it is extremely helpful when hunting bugs or doing big
24 * changes.
25 */
26
27#ifdef CONFIG_MTD_UBI_DEBUG_MSG
28
29#include "ubi.h"
30
31/**
32 * ubi_dbg_dump_ec_hdr - dump an erase counter header.
33 * @ec_hdr: the erase counter header to dump
34 */
35void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
36{
37 dbg_msg("erase counter header dump:");
38 dbg_msg("magic %#08x", ubi32_to_cpu(ec_hdr->magic));
39 dbg_msg("version %d", (int)ec_hdr->version);
40 dbg_msg("ec %llu", (long long)ubi64_to_cpu(ec_hdr->ec));
41 dbg_msg("vid_hdr_offset %d", ubi32_to_cpu(ec_hdr->vid_hdr_offset));
42 dbg_msg("data_offset %d", ubi32_to_cpu(ec_hdr->data_offset));
43 dbg_msg("hdr_crc %#08x", ubi32_to_cpu(ec_hdr->hdr_crc));
44 dbg_msg("erase counter header hexdump:");
45 ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE);
46}
47
48/**
49 * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
50 * @vid_hdr: the volume identifier header to dump
51 */
52void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
53{
54 dbg_msg("volume identifier header dump:");
55 dbg_msg("magic %08x", ubi32_to_cpu(vid_hdr->magic));
56 dbg_msg("version %d", (int)vid_hdr->version);
57 dbg_msg("vol_type %d", (int)vid_hdr->vol_type);
58 dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag);
59 dbg_msg("compat %d", (int)vid_hdr->compat);
60 dbg_msg("vol_id %d", ubi32_to_cpu(vid_hdr->vol_id));
61 dbg_msg("lnum %d", ubi32_to_cpu(vid_hdr->lnum));
62 dbg_msg("leb_ver %u", ubi32_to_cpu(vid_hdr->leb_ver));
63 dbg_msg("data_size %d", ubi32_to_cpu(vid_hdr->data_size));
64 dbg_msg("used_ebs %d", ubi32_to_cpu(vid_hdr->used_ebs));
65 dbg_msg("data_pad %d", ubi32_to_cpu(vid_hdr->data_pad));
66 dbg_msg("sqnum %llu",
67 (unsigned long long)ubi64_to_cpu(vid_hdr->sqnum));
68 dbg_msg("hdr_crc %08x", ubi32_to_cpu(vid_hdr->hdr_crc));
69 dbg_msg("volume identifier header hexdump:");
70}
71
72/**
73 * ubi_dbg_dump_vol_info- dump volume information.
74 * @vol: UBI volume description object
75 */
76void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
77{
78 dbg_msg("volume information dump:");
79 dbg_msg("vol_id %d", vol->vol_id);
80 dbg_msg("reserved_pebs %d", vol->reserved_pebs);
81 dbg_msg("alignment %d", vol->alignment);
82 dbg_msg("data_pad %d", vol->data_pad);
83 dbg_msg("vol_type %d", vol->vol_type);
84 dbg_msg("name_len %d", vol->name_len);
85 dbg_msg("usable_leb_size %d", vol->usable_leb_size);
86 dbg_msg("used_ebs %d", vol->used_ebs);
87 dbg_msg("used_bytes %lld", vol->used_bytes);
88 dbg_msg("last_eb_bytes %d", vol->last_eb_bytes);
89 dbg_msg("corrupted %d", vol->corrupted);
90 dbg_msg("upd_marker %d", vol->upd_marker);
91
92 if (vol->name_len <= UBI_VOL_NAME_MAX &&
93 strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
94 dbg_msg("name %s", vol->name);
95 } else {
96 dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
97 vol->name[0], vol->name[1], vol->name[2],
98 vol->name[3], vol->name[4]);
99 }
100}
101
102/**
103 * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
104 * @r: the object to dump
105 * @idx: volume table index
106 */
107void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
108{
109 int name_len = ubi16_to_cpu(r->name_len);
110
111 dbg_msg("volume table record %d dump:", idx);
112 dbg_msg("reserved_pebs %d", ubi32_to_cpu(r->reserved_pebs));
113 dbg_msg("alignment %d", ubi32_to_cpu(r->alignment));
114 dbg_msg("data_pad %d", ubi32_to_cpu(r->data_pad));
115 dbg_msg("vol_type %d", (int)r->vol_type);
116 dbg_msg("upd_marker %d", (int)r->upd_marker);
117 dbg_msg("name_len %d", name_len);
118
119 if (r->name[0] == '\0') {
120 dbg_msg("name NULL");
121 return;
122 }
123
124 if (name_len <= UBI_VOL_NAME_MAX &&
125 strnlen(&r->name[0], name_len + 1) == name_len) {
126 dbg_msg("name %s", &r->name[0]);
127 } else {
128 dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
129 r->name[0], r->name[1], r->name[2], r->name[3],
130 r->name[4]);
131 }
132 dbg_msg("crc %#08x", ubi32_to_cpu(r->crc));
133}
134
135/**
136 * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
137 * @sv: the object to dump
138 */
139void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
140{
141 dbg_msg("volume scanning information dump:");
142 dbg_msg("vol_id %d", sv->vol_id);
143 dbg_msg("highest_lnum %d", sv->highest_lnum);
144 dbg_msg("leb_count %d", sv->leb_count);
145 dbg_msg("compat %d", sv->compat);
146 dbg_msg("vol_type %d", sv->vol_type);
147 dbg_msg("used_ebs %d", sv->used_ebs);
148 dbg_msg("last_data_size %d", sv->last_data_size);
149 dbg_msg("data_pad %d", sv->data_pad);
150}
151
152/**
153 * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
154 * @seb: the object to dump
155 * @type: object type: 0 - not corrupted, 1 - corrupted
156 */
157void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
158{
159 dbg_msg("eraseblock scanning information dump:");
160 dbg_msg("ec %d", seb->ec);
161 dbg_msg("pnum %d", seb->pnum);
162 if (type == 0) {
163 dbg_msg("lnum %d", seb->lnum);
164 dbg_msg("scrub %d", seb->scrub);
165 dbg_msg("sqnum %llu", seb->sqnum);
166 dbg_msg("leb_ver %u", seb->leb_ver);
167 }
168}
169
170/**
171 * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
172 * @req: the object to dump
173 */
174void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
175{
176 char nm[17];
177
178 dbg_msg("volume creation request dump:");
179 dbg_msg("vol_id %d", req->vol_id);
180 dbg_msg("alignment %d", req->alignment);
181 dbg_msg("bytes %lld", (long long)req->bytes);
182 dbg_msg("vol_type %d", req->vol_type);
183 dbg_msg("name_len %d", req->name_len);
184
185 memcpy(nm, req->name, 16);
186 nm[16] = 0;
187 dbg_msg("the 1st 16 characters of the name: %s", nm);
188}
189
190#define BYTES_PER_LINE 32
191
192/**
193 * ubi_dbg_hexdump - dump a buffer.
194 * @ptr: the buffer to dump
195 * @size: buffer size which must be multiple of 4 bytes
196 */
197void ubi_dbg_hexdump(const void *ptr, int size)
198{
199 int i, k = 0, rows, columns;
200 const uint8_t *p = ptr;
201
202 size = ALIGN(size, 4);
203 rows = size/BYTES_PER_LINE + size % BYTES_PER_LINE;
204 for (i = 0; i < rows; i++) {
205 int j;
206
207 cond_resched();
208 columns = min(size - k, BYTES_PER_LINE) / 4;
209 if (columns == 0)
210 break;
211 printk(KERN_DEBUG "%5d: ", i * BYTES_PER_LINE);
212 for (j = 0; j < columns; j++) {
213 int n, N;
214
215 N = size - k > 4 ? 4 : size - k;
216 for (n = 0; n < N; n++)
217 printk("%02x", p[k++]);
218 printk(" ");
219 }
220 printk("\n");
221 }
222}
223
224#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
new file mode 100644
index 000000000000..f816ad9a36c0
--- /dev/null
+++ b/drivers/mtd/ubi/debug.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21#ifndef __UBI_DEBUG_H__
22#define __UBI_DEBUG_H__
23
24#ifdef CONFIG_MTD_UBI_DEBUG
25#include <linux/random.h>
26
27#define ubi_assert(expr) BUG_ON(!(expr))
28#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
29#else
30#define ubi_assert(expr) ({})
31#define dbg_err(fmt, ...) ({})
32#endif
33
34#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
35#define DBG_DISABLE_BGT 1
36#else
37#define DBG_DISABLE_BGT 0
38#endif
39
40#ifdef CONFIG_MTD_UBI_DEBUG_MSG
41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__)
44
45#define ubi_dbg_dump_stack() dump_stack()
46
47struct ubi_ec_hdr;
48struct ubi_vid_hdr;
49struct ubi_volume;
50struct ubi_vtbl_record;
51struct ubi_scan_volume;
52struct ubi_scan_leb;
53struct ubi_mkvol_req;
54
55void ubi_dbg_print(int type, const char *func, const char *fmt, ...);
56void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
57void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
58void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
59void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
60void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
61void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
62void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
63void ubi_dbg_hexdump(const void *buf, int size);
64
65#else
66
67#define dbg_msg(fmt, ...) ({})
68#define ubi_dbg_dump_stack() ({})
69#define ubi_dbg_print(func, fmt, ...) ({})
70#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
71#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
72#define ubi_dbg_dump_vol_info(vol) ({})
73#define ubi_dbg_dump_vtbl_record(r, idx) ({})
74#define ubi_dbg_dump_sv(sv) ({})
75#define ubi_dbg_dump_seb(seb, type) ({})
76#define ubi_dbg_dump_mkvol_req(req) ({})
77#define ubi_dbg_hexdump(buf, size) ({})
78
79#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
80
81#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
82/* Messages from the eraseblock association unit */
83#define dbg_eba(fmt, ...) \
84 printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
85 ##__VA_ARGS__)
86#else
87#define dbg_eba(fmt, ...) ({})
88#endif
89
90#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
91/* Messages from the wear-leveling unit */
92#define dbg_wl(fmt, ...) \
93 printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
94 ##__VA_ARGS__)
95#else
96#define dbg_wl(fmt, ...) ({})
97#endif
98
99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
100/* Messages from the input/output unit */
101#define dbg_io(fmt, ...) \
102 printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
103 ##__VA_ARGS__)
104#else
105#define dbg_io(fmt, ...) ({})
106#endif
107
108#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
109/* Initialization and build messages */
110#define dbg_bld(fmt, ...) \
111 printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
112 ##__VA_ARGS__)
113#else
114#define dbg_bld(fmt, ...) ({})
115#endif
116
117#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
118/**
119 * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
120 *
121 * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
122 */
123static inline int ubi_dbg_is_bitflip(void)
124{
125 return !(random32() % 200);
126}
127#else
128#define ubi_dbg_is_bitflip() 0
129#endif
130
131#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
132/**
133 * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
134 *
135 * Returns non-zero if a write failure should be emulated, otherwise returns
136 * zero.
137 */
138static inline int ubi_dbg_is_write_failure(void)
139{
140 return !(random32() % 500);
141}
142#else
143#define ubi_dbg_is_write_failure() 0
144#endif
145
146#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
147/**
148 * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
149 *
150 * Returns non-zero if an erase failure should be emulated, otherwise returns
151 * zero.
152 */
153static inline int ubi_dbg_is_erase_failure(void)
154{
155 return !(random32() % 400);
156}
157#else
158#define ubi_dbg_is_erase_failure() 0
159#endif
160
161#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
new file mode 100644
index 000000000000..d847ee1da3d9
--- /dev/null
+++ b/drivers/mtd/ubi/eba.c
@@ -0,0 +1,1241 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * The UBI Eraseblock Association (EBA) unit.
23 *
24 * This unit is responsible for I/O to/from logical eraseblock.
25 *
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
29 *
30 * The EBA unit implements per-logical eraseblock locking. Before accessing a
31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs.
36 *
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
42 */
43
44#include <linux/slab.h>
45#include <linux/crc32.h>
46#include <linux/err.h>
47#include "ubi.h"
48
49/**
50 * struct ltree_entry - an entry in the lock tree.
51 * @rb: links RB-tree nodes
52 * @vol_id: volume ID of the locked logical eraseblock
53 * @lnum: locked logical eraseblock number
54 * @users: how many tasks are using this logical eraseblock or wait for it
55 * @mutex: read/write mutex to implement read/write access serialization to
56 * the (@vol_id, @lnum) logical eraseblock
57 *
58 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
59 * object is inserted to the lock tree (@ubi->ltree).
60 */
61struct ltree_entry {
62 struct rb_node rb;
63 int vol_id;
64 int lnum;
65 int users;
66 struct rw_semaphore mutex;
67};
68
69/* Slab cache for lock-tree entries */
70static struct kmem_cache *ltree_slab;
71
72/**
73 * next_sqnum - get next sequence number.
74 * @ubi: UBI device description object
75 *
76 * This function returns next sequence number to use, which is just the current
77 * global sequence counter value. It also increases the global sequence
78 * counter.
79 */
80static unsigned long long next_sqnum(struct ubi_device *ubi)
81{
82 unsigned long long sqnum;
83
84 spin_lock(&ubi->ltree_lock);
85 sqnum = ubi->global_sqnum++;
86 spin_unlock(&ubi->ltree_lock);
87
88 return sqnum;
89}
90
91/**
92 * ubi_get_compat - get compatibility flags of a volume.
93 * @ubi: UBI device description object
94 * @vol_id: volume ID
95 *
96 * This function returns compatibility flags for an internal volume. User
97 * volumes have no compatibility flags, so %0 is returned.
98 */
99static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
100{
101 if (vol_id == UBI_LAYOUT_VOL_ID)
102 return UBI_LAYOUT_VOLUME_COMPAT;
103 return 0;
104}
105
106/**
107 * ltree_lookup - look up the lock tree.
108 * @ubi: UBI device description object
109 * @vol_id: volume ID
110 * @lnum: logical eraseblock number
111 *
112 * This function returns a pointer to the corresponding &struct ltree_entry
113 * object if the logical eraseblock is locked and %NULL if it is not.
114 * @ubi->ltree_lock has to be locked.
115 */
116static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
117 int lnum)
118{
119 struct rb_node *p;
120
121 p = ubi->ltree.rb_node;
122 while (p) {
123 struct ltree_entry *le;
124
125 le = rb_entry(p, struct ltree_entry, rb);
126
127 if (vol_id < le->vol_id)
128 p = p->rb_left;
129 else if (vol_id > le->vol_id)
130 p = p->rb_right;
131 else {
132 if (lnum < le->lnum)
133 p = p->rb_left;
134 else if (lnum > le->lnum)
135 p = p->rb_right;
136 else
137 return le;
138 }
139 }
140
141 return NULL;
142}
143
144/**
145 * ltree_add_entry - add new entry to the lock tree.
146 * @ubi: UBI device description object
147 * @vol_id: volume ID
148 * @lnum: logical eraseblock number
149 *
150 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
151 * lock tree. If such entry is already there, its usage counter is increased.
152 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
153 * failed.
154 */
155static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
156 int lnum)
157{
158 struct ltree_entry *le, *le1, *le_free;
159
160 le = kmem_cache_alloc(ltree_slab, GFP_KERNEL);
161 if (!le)
162 return ERR_PTR(-ENOMEM);
163
164 le->vol_id = vol_id;
165 le->lnum = lnum;
166
167 spin_lock(&ubi->ltree_lock);
168 le1 = ltree_lookup(ubi, vol_id, lnum);
169
170 if (le1) {
171 /*
172 * This logical eraseblock is already locked. The newly
173 * allocated lock entry is not needed.
174 */
175 le_free = le;
176 le = le1;
177 } else {
178 struct rb_node **p, *parent = NULL;
179
180 /*
181 * No lock entry, add the newly allocated one to the
182 * @ubi->ltree RB-tree.
183 */
184 le_free = NULL;
185
186 p = &ubi->ltree.rb_node;
187 while (*p) {
188 parent = *p;
189 le1 = rb_entry(parent, struct ltree_entry, rb);
190
191 if (vol_id < le1->vol_id)
192 p = &(*p)->rb_left;
193 else if (vol_id > le1->vol_id)
194 p = &(*p)->rb_right;
195 else {
196 ubi_assert(lnum != le1->lnum);
197 if (lnum < le1->lnum)
198 p = &(*p)->rb_left;
199 else
200 p = &(*p)->rb_right;
201 }
202 }
203
204 rb_link_node(&le->rb, parent, p);
205 rb_insert_color(&le->rb, &ubi->ltree);
206 }
207 le->users += 1;
208 spin_unlock(&ubi->ltree_lock);
209
210 if (le_free)
211 kmem_cache_free(ltree_slab, le_free);
212
213 return le;
214}
215
216/**
217 * leb_read_lock - lock logical eraseblock for reading.
218 * @ubi: UBI device description object
219 * @vol_id: volume ID
220 * @lnum: logical eraseblock number
221 *
222 * This function locks a logical eraseblock for reading. Returns zero in case
223 * of success and a negative error code in case of failure.
224 */
225static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
226{
227 struct ltree_entry *le;
228
229 le = ltree_add_entry(ubi, vol_id, lnum);
230 if (IS_ERR(le))
231 return PTR_ERR(le);
232 down_read(&le->mutex);
233 return 0;
234}
235
236/**
237 * leb_read_unlock - unlock logical eraseblock.
238 * @ubi: UBI device description object
239 * @vol_id: volume ID
240 * @lnum: logical eraseblock number
241 */
242static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
243{
244 int free = 0;
245 struct ltree_entry *le;
246
247 spin_lock(&ubi->ltree_lock);
248 le = ltree_lookup(ubi, vol_id, lnum);
249 le->users -= 1;
250 ubi_assert(le->users >= 0);
251 if (le->users == 0) {
252 rb_erase(&le->rb, &ubi->ltree);
253 free = 1;
254 }
255 spin_unlock(&ubi->ltree_lock);
256
257 up_read(&le->mutex);
258 if (free)
259 kmem_cache_free(ltree_slab, le);
260}
261
262/**
263 * leb_write_lock - lock logical eraseblock for writing.
264 * @ubi: UBI device description object
265 * @vol_id: volume ID
266 * @lnum: logical eraseblock number
267 *
268 * This function locks a logical eraseblock for writing. Returns zero in case
269 * of success and a negative error code in case of failure.
270 */
271static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
272{
273 struct ltree_entry *le;
274
275 le = ltree_add_entry(ubi, vol_id, lnum);
276 if (IS_ERR(le))
277 return PTR_ERR(le);
278 down_write(&le->mutex);
279 return 0;
280}
281
282/**
283 * leb_write_unlock - unlock logical eraseblock.
284 * @ubi: UBI device description object
285 * @vol_id: volume ID
286 * @lnum: logical eraseblock number
287 */
288static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
289{
290 int free;
291 struct ltree_entry *le;
292
293 spin_lock(&ubi->ltree_lock);
294 le = ltree_lookup(ubi, vol_id, lnum);
295 le->users -= 1;
296 ubi_assert(le->users >= 0);
297 if (le->users == 0) {
298 rb_erase(&le->rb, &ubi->ltree);
299 free = 1;
300 } else
301 free = 0;
302 spin_unlock(&ubi->ltree_lock);
303
304 up_write(&le->mutex);
305 if (free)
306 kmem_cache_free(ltree_slab, le);
307}
308
309/**
310 * ubi_eba_unmap_leb - un-map logical eraseblock.
311 * @ubi: UBI device description object
312 * @vol_id: volume ID
313 * @lnum: logical eraseblock number
314 *
315 * This function un-maps logical eraseblock @lnum and schedules corresponding
316 * physical eraseblock for erasure. Returns zero in case of success and a
317 * negative error code in case of failure.
318 */
319int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
320{
321 int idx = vol_id2idx(ubi, vol_id), err, pnum;
322 struct ubi_volume *vol = ubi->volumes[idx];
323
324 if (ubi->ro_mode)
325 return -EROFS;
326
327 err = leb_write_lock(ubi, vol_id, lnum);
328 if (err)
329 return err;
330
331 pnum = vol->eba_tbl[lnum];
332 if (pnum < 0)
333 /* This logical eraseblock is already unmapped */
334 goto out_unlock;
335
336 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
337
338 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
339 err = ubi_wl_put_peb(ubi, pnum, 0);
340
341out_unlock:
342 leb_write_unlock(ubi, vol_id, lnum);
343 return err;
344}
345
346/**
347 * ubi_eba_read_leb - read data.
348 * @ubi: UBI device description object
349 * @vol_id: volume ID
350 * @lnum: logical eraseblock number
351 * @buf: buffer to store the read data
352 * @offset: offset from where to read
353 * @len: how many bytes to read
354 * @check: data CRC check flag
355 *
356 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
357 * bytes. The @check flag only makes sense for static volumes and forces
358 * eraseblock data CRC checking.
359 *
360 * In case of success this function returns zero. In case of a static volume,
361 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
362 * returned for any volume type if an ECC error was detected by the MTD device
363 * driver. Other negative error cored may be returned in case of other errors.
364 */
365int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
366 int offset, int len, int check)
367{
368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
369 struct ubi_vid_hdr *vid_hdr;
370 struct ubi_volume *vol = ubi->volumes[idx];
371 uint32_t crc, crc1;
372
373 err = leb_read_lock(ubi, vol_id, lnum);
374 if (err)
375 return err;
376
377 pnum = vol->eba_tbl[lnum];
378 if (pnum < 0) {
379 /*
380 * The logical eraseblock is not mapped, fill the whole buffer
381 * with 0xFF bytes. The exception is static volumes for which
382 * it is an error to read unmapped logical eraseblocks.
383 */
384 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
385 len, offset, vol_id, lnum);
386 leb_read_unlock(ubi, vol_id, lnum);
387 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
388 memset(buf, 0xFF, len);
389 return 0;
390 }
391
392 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
393 len, offset, vol_id, lnum, pnum);
394
395 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
396 check = 0;
397
398retry:
399 if (check) {
400 vid_hdr = ubi_zalloc_vid_hdr(ubi);
401 if (!vid_hdr) {
402 err = -ENOMEM;
403 goto out_unlock;
404 }
405
406 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
407 if (err && err != UBI_IO_BITFLIPS) {
408 if (err > 0) {
409 /*
410 * The header is either absent or corrupted.
411 * The former case means there is a bug -
412 * switch to read-only mode just in case.
413 * The latter case means a real corruption - we
414 * may try to recover data. FIXME: but this is
415 * not implemented.
416 */
417 if (err == UBI_IO_BAD_VID_HDR) {
418 ubi_warn("bad VID header at PEB %d, LEB"
419 "%d:%d", pnum, vol_id, lnum);
420 err = -EBADMSG;
421 } else
422 ubi_ro_mode(ubi);
423 }
424 goto out_free;
425 } else if (err == UBI_IO_BITFLIPS)
426 scrub = 1;
427
428 ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs));
429 ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size));
430
431 crc = ubi32_to_cpu(vid_hdr->data_crc);
432 ubi_free_vid_hdr(ubi, vid_hdr);
433 }
434
435 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
436 if (err) {
437 if (err == UBI_IO_BITFLIPS) {
438 scrub = 1;
439 err = 0;
440 } else if (err == -EBADMSG) {
441 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
442 goto out_unlock;
443 scrub = 1;
444 if (!check) {
445 ubi_msg("force data checking");
446 check = 1;
447 goto retry;
448 }
449 } else
450 goto out_unlock;
451 }
452
453 if (check) {
454 crc1 = crc32(UBI_CRC32_INIT, buf, len);
455 if (crc1 != crc) {
456 ubi_warn("CRC error: calculated %#08x, must be %#08x",
457 crc1, crc);
458 err = -EBADMSG;
459 goto out_unlock;
460 }
461 }
462
463 if (scrub)
464 err = ubi_wl_scrub_peb(ubi, pnum);
465
466 leb_read_unlock(ubi, vol_id, lnum);
467 return err;
468
469out_free:
470 ubi_free_vid_hdr(ubi, vid_hdr);
471out_unlock:
472 leb_read_unlock(ubi, vol_id, lnum);
473 return err;
474}
475
476/**
477 * recover_peb - recover from write failure.
478 * @ubi: UBI device description object
479 * @pnum: the physical eraseblock to recover
480 * @vol_id: volume ID
481 * @lnum: logical eraseblock number
482 * @buf: data which was not written because of the write failure
483 * @offset: offset of the failed write
484 * @len: how many bytes should have been written
485 *
486 * This function is called in case of a write failure and moves all good data
487 * from the potentially bad physical eraseblock to a good physical eraseblock.
488 * This function also writes the data which was not written due to the failure.
489 * Returns new physical eraseblock number in case of success, and a negative
490 * error code in case of failure.
491 */
492static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
493 const void *buf, int offset, int len)
494{
495 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
496 struct ubi_volume *vol = ubi->volumes[idx];
497 struct ubi_vid_hdr *vid_hdr;
498 unsigned char *new_buf;
499
500 vid_hdr = ubi_zalloc_vid_hdr(ubi);
501 if (!vid_hdr) {
502 return -ENOMEM;
503 }
504
505retry:
506 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
507 if (new_pnum < 0) {
508 ubi_free_vid_hdr(ubi, vid_hdr);
509 return new_pnum;
510 }
511
512 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
513
514 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
515 if (err && err != UBI_IO_BITFLIPS) {
516 if (err > 0)
517 err = -EIO;
518 goto out_put;
519 }
520
521 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
522 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
523 if (err)
524 goto write_error;
525
526 data_size = offset + len;
527 new_buf = kmalloc(data_size, GFP_KERNEL);
528 if (!new_buf) {
529 err = -ENOMEM;
530 goto out_put;
531 }
532 memset(new_buf + offset, 0xFF, len);
533
534 /* Read everything before the area where the write failure happened */
535 if (offset > 0) {
536 err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
537 if (err && err != UBI_IO_BITFLIPS) {
538 kfree(new_buf);
539 goto out_put;
540 }
541 }
542
543 memcpy(new_buf + offset, buf, len);
544
545 err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
546 if (err) {
547 kfree(new_buf);
548 goto write_error;
549 }
550
551 kfree(new_buf);
552 ubi_free_vid_hdr(ubi, vid_hdr);
553
554 vol->eba_tbl[lnum] = new_pnum;
555 ubi_wl_put_peb(ubi, pnum, 1);
556
557 ubi_msg("data was successfully recovered");
558 return 0;
559
560out_put:
561 ubi_wl_put_peb(ubi, new_pnum, 1);
562 ubi_free_vid_hdr(ubi, vid_hdr);
563 return err;
564
565write_error:
566 /*
567 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
568 * get another one.
569 */
570 ubi_warn("failed to write to PEB %d", new_pnum);
571 ubi_wl_put_peb(ubi, new_pnum, 1);
572 if (++tries > UBI_IO_RETRIES) {
573 ubi_free_vid_hdr(ubi, vid_hdr);
574 return err;
575 }
576 ubi_msg("try again");
577 goto retry;
578}
579
580/**
581 * ubi_eba_write_leb - write data to dynamic volume.
582 * @ubi: UBI device description object
583 * @vol_id: volume ID
584 * @lnum: logical eraseblock number
585 * @buf: the data to write
586 * @offset: offset within the logical eraseblock where to write
587 * @len: how many bytes to write
588 * @dtype: data type
589 *
590 * This function writes data to logical eraseblock @lnum of a dynamic volume
591 * @vol_id. Returns zero in case of success and a negative error code in case
592 * of failure. In case of error, it is possible that something was still
593 * written to the flash media, but may be some garbage.
594 */
595int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
596 const void *buf, int offset, int len, int dtype)
597{
598 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
599 struct ubi_volume *vol = ubi->volumes[idx];
600 struct ubi_vid_hdr *vid_hdr;
601
602 if (ubi->ro_mode)
603 return -EROFS;
604
605 err = leb_write_lock(ubi, vol_id, lnum);
606 if (err)
607 return err;
608
609 pnum = vol->eba_tbl[lnum];
610 if (pnum >= 0) {
611 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
612 len, offset, vol_id, lnum, pnum);
613
614 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
615 if (err) {
616 ubi_warn("failed to write data to PEB %d", pnum);
617 if (err == -EIO && ubi->bad_allowed)
618 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
619 if (err)
620 ubi_ro_mode(ubi);
621 }
622 leb_write_unlock(ubi, vol_id, lnum);
623 return err;
624 }
625
626 /*
627 * The logical eraseblock is not mapped. We have to get a free physical
628 * eraseblock and write the volume identifier header there first.
629 */
630 vid_hdr = ubi_zalloc_vid_hdr(ubi);
631 if (!vid_hdr) {
632 leb_write_unlock(ubi, vol_id, lnum);
633 return -ENOMEM;
634 }
635
636 vid_hdr->vol_type = UBI_VID_DYNAMIC;
637 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
638 vid_hdr->vol_id = cpu_to_ubi32(vol_id);
639 vid_hdr->lnum = cpu_to_ubi32(lnum);
640 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
641 vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
642
643retry:
644 pnum = ubi_wl_get_peb(ubi, dtype);
645 if (pnum < 0) {
646 ubi_free_vid_hdr(ubi, vid_hdr);
647 leb_write_unlock(ubi, vol_id, lnum);
648 return pnum;
649 }
650
651 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
652 len, offset, vol_id, lnum, pnum);
653
654 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
655 if (err) {
656 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
657 vol_id, lnum, pnum);
658 goto write_error;
659 }
660
661 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
662 if (err) {
663 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
664 "PEB %d", len, offset, vol_id, lnum, pnum);
665 goto write_error;
666 }
667
668 vol->eba_tbl[lnum] = pnum;
669
670 leb_write_unlock(ubi, vol_id, lnum);
671 ubi_free_vid_hdr(ubi, vid_hdr);
672 return 0;
673
674write_error:
675 if (err != -EIO || !ubi->bad_allowed) {
676 ubi_ro_mode(ubi);
677 leb_write_unlock(ubi, vol_id, lnum);
678 ubi_free_vid_hdr(ubi, vid_hdr);
679 return err;
680 }
681
682 /*
683 * Fortunately, this is the first write operation to this physical
684 * eraseblock, so just put it and request a new one. We assume that if
685 * this physical eraseblock went bad, the erase code will handle that.
686 */
687 err = ubi_wl_put_peb(ubi, pnum, 1);
688 if (err || ++tries > UBI_IO_RETRIES) {
689 ubi_ro_mode(ubi);
690 leb_write_unlock(ubi, vol_id, lnum);
691 ubi_free_vid_hdr(ubi, vid_hdr);
692 return err;
693 }
694
695 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
696 ubi_msg("try another PEB");
697 goto retry;
698}
699
700/**
701 * ubi_eba_write_leb_st - write data to static volume.
702 * @ubi: UBI device description object
703 * @vol_id: volume ID
704 * @lnum: logical eraseblock number
705 * @buf: data to write
706 * @len: how many bytes to write
707 * @dtype: data type
708 * @used_ebs: how many logical eraseblocks will this volume contain
709 *
710 * This function writes data to logical eraseblock @lnum of static volume
711 * @vol_id. The @used_ebs argument should contain total number of logical
712 * eraseblock in this static volume.
713 *
714 * When writing to the last logical eraseblock, the @len argument doesn't have
715 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
716 * to the real data size, although the @buf buffer has to contain the
717 * alignment. In all other cases, @len has to be aligned.
718 *
719 * It is prohibited to write more then once to logical eraseblocks of static
720 * volumes. This function returns zero in case of success and a negative error
721 * code in case of failure.
722 */
723int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
724 const void *buf, int len, int dtype, int used_ebs)
725{
726 int err, pnum, tries = 0, data_size = len;
727 int idx = vol_id2idx(ubi, vol_id);
728 struct ubi_volume *vol = ubi->volumes[idx];
729 struct ubi_vid_hdr *vid_hdr;
730 uint32_t crc;
731
732 if (ubi->ro_mode)
733 return -EROFS;
734
735 if (lnum == used_ebs - 1)
736 /* If this is the last LEB @len may be unaligned */
737 len = ALIGN(data_size, ubi->min_io_size);
738 else
739 ubi_assert(len % ubi->min_io_size == 0);
740
741 vid_hdr = ubi_zalloc_vid_hdr(ubi);
742 if (!vid_hdr)
743 return -ENOMEM;
744
745 err = leb_write_lock(ubi, vol_id, lnum);
746 if (err) {
747 ubi_free_vid_hdr(ubi, vid_hdr);
748 return err;
749 }
750
751 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
752 vid_hdr->vol_id = cpu_to_ubi32(vol_id);
753 vid_hdr->lnum = cpu_to_ubi32(lnum);
754 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
755 vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
756
757 crc = crc32(UBI_CRC32_INIT, buf, data_size);
758 vid_hdr->vol_type = UBI_VID_STATIC;
759 vid_hdr->data_size = cpu_to_ubi32(data_size);
760 vid_hdr->used_ebs = cpu_to_ubi32(used_ebs);
761 vid_hdr->data_crc = cpu_to_ubi32(crc);
762
763retry:
764 pnum = ubi_wl_get_peb(ubi, dtype);
765 if (pnum < 0) {
766 ubi_free_vid_hdr(ubi, vid_hdr);
767 leb_write_unlock(ubi, vol_id, lnum);
768 return pnum;
769 }
770
771 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
772 len, vol_id, lnum, pnum, used_ebs);
773
774 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
775 if (err) {
776 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
777 vol_id, lnum, pnum);
778 goto write_error;
779 }
780
781 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
782 if (err) {
783 ubi_warn("failed to write %d bytes of data to PEB %d",
784 len, pnum);
785 goto write_error;
786 }
787
788 ubi_assert(vol->eba_tbl[lnum] < 0);
789 vol->eba_tbl[lnum] = pnum;
790
791 leb_write_unlock(ubi, vol_id, lnum);
792 ubi_free_vid_hdr(ubi, vid_hdr);
793 return 0;
794
795write_error:
796 if (err != -EIO || !ubi->bad_allowed) {
797 /*
798 * This flash device does not admit of bad eraseblocks or
799 * something nasty and unexpected happened. Switch to read-only
800 * mode just in case.
801 */
802 ubi_ro_mode(ubi);
803 leb_write_unlock(ubi, vol_id, lnum);
804 ubi_free_vid_hdr(ubi, vid_hdr);
805 return err;
806 }
807
808 err = ubi_wl_put_peb(ubi, pnum, 1);
809 if (err || ++tries > UBI_IO_RETRIES) {
810 ubi_ro_mode(ubi);
811 leb_write_unlock(ubi, vol_id, lnum);
812 ubi_free_vid_hdr(ubi, vid_hdr);
813 return err;
814 }
815
816 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
817 ubi_msg("try another PEB");
818 goto retry;
819}
820
821/*
822 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
823 * @ubi: UBI device description object
824 * @vol_id: volume ID
825 * @lnum: logical eraseblock number
826 * @buf: data to write
827 * @len: how many bytes to write
828 * @dtype: data type
829 *
830 * This function changes the contents of a logical eraseblock atomically. @buf
831 * has to contain new logical eraseblock data, and @len - the length of the
832 * data, which has to be aligned. This function guarantees that in case of an
833 * unclean reboot the old contents is preserved. Returns zero in case of
834 * success and a negative error code in case of failure.
835 */
836int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
837 const void *buf, int len, int dtype)
838{
839 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
840 struct ubi_volume *vol = ubi->volumes[idx];
841 struct ubi_vid_hdr *vid_hdr;
842 uint32_t crc;
843
844 if (ubi->ro_mode)
845 return -EROFS;
846
847 vid_hdr = ubi_zalloc_vid_hdr(ubi);
848 if (!vid_hdr)
849 return -ENOMEM;
850
851 err = leb_write_lock(ubi, vol_id, lnum);
852 if (err) {
853 ubi_free_vid_hdr(ubi, vid_hdr);
854 return err;
855 }
856
857 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
858 vid_hdr->vol_id = cpu_to_ubi32(vol_id);
859 vid_hdr->lnum = cpu_to_ubi32(lnum);
860 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
861 vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
862
863 crc = crc32(UBI_CRC32_INIT, buf, len);
864 vid_hdr->vol_type = UBI_VID_STATIC;
865 vid_hdr->data_size = cpu_to_ubi32(len);
866 vid_hdr->copy_flag = 1;
867 vid_hdr->data_crc = cpu_to_ubi32(crc);
868
869retry:
870 pnum = ubi_wl_get_peb(ubi, dtype);
871 if (pnum < 0) {
872 ubi_free_vid_hdr(ubi, vid_hdr);
873 leb_write_unlock(ubi, vol_id, lnum);
874 return pnum;
875 }
876
877 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
878 vol_id, lnum, vol->eba_tbl[lnum], pnum);
879
880 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
881 if (err) {
882 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
883 vol_id, lnum, pnum);
884 goto write_error;
885 }
886
887 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
888 if (err) {
889 ubi_warn("failed to write %d bytes of data to PEB %d",
890 len, pnum);
891 goto write_error;
892 }
893
894 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
895 if (err) {
896 ubi_free_vid_hdr(ubi, vid_hdr);
897 leb_write_unlock(ubi, vol_id, lnum);
898 return err;
899 }
900
901 vol->eba_tbl[lnum] = pnum;
902 leb_write_unlock(ubi, vol_id, lnum);
903 ubi_free_vid_hdr(ubi, vid_hdr);
904 return 0;
905
906write_error:
907 if (err != -EIO || !ubi->bad_allowed) {
908 /*
909 * This flash device does not admit of bad eraseblocks or
910 * something nasty and unexpected happened. Switch to read-only
911 * mode just in case.
912 */
913 ubi_ro_mode(ubi);
914 leb_write_unlock(ubi, vol_id, lnum);
915 ubi_free_vid_hdr(ubi, vid_hdr);
916 return err;
917 }
918
919 err = ubi_wl_put_peb(ubi, pnum, 1);
920 if (err || ++tries > UBI_IO_RETRIES) {
921 ubi_ro_mode(ubi);
922 leb_write_unlock(ubi, vol_id, lnum);
923 ubi_free_vid_hdr(ubi, vid_hdr);
924 return err;
925 }
926
927 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
928 ubi_msg("try another PEB");
929 goto retry;
930}
931
932/**
933 * ltree_entry_ctor - lock tree entries slab cache constructor.
934 * @obj: the lock-tree entry to construct
935 * @cache: the lock tree entry slab cache
936 * @flags: constructor flags
937 */
938static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
939 unsigned long flags)
940{
941 struct ltree_entry *le = obj;
942
943 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) !=
944 SLAB_CTOR_CONSTRUCTOR)
945 return;
946
947 le->users = 0;
948 init_rwsem(&le->mutex);
949}
950
951/**
952 * ubi_eba_copy_leb - copy logical eraseblock.
953 * @ubi: UBI device description object
954 * @from: physical eraseblock number from where to copy
955 * @to: physical eraseblock number where to copy
956 * @vid_hdr: VID header of the @from physical eraseblock
957 *
958 * This function copies logical eraseblock from physical eraseblock @from to
959 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
960 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
961 * was canceled because bit-flips were detected at the target PEB, and a
962 * negative error code in case of failure.
963 */
964int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
965 struct ubi_vid_hdr *vid_hdr)
966{
967 int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
968 struct ubi_volume *vol;
969 uint32_t crc;
970 void *buf, *buf1 = NULL;
971
972 vol_id = ubi32_to_cpu(vid_hdr->vol_id);
973 lnum = ubi32_to_cpu(vid_hdr->lnum);
974
975 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
976
977 if (vid_hdr->vol_type == UBI_VID_STATIC) {
978 data_size = ubi32_to_cpu(vid_hdr->data_size);
979 aldata_size = ALIGN(data_size, ubi->min_io_size);
980 } else
981 data_size = aldata_size =
982 ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad);
983
984 buf = kmalloc(aldata_size, GFP_KERNEL);
985 if (!buf)
986 return -ENOMEM;
987
988 /*
989 * We do not want anybody to write to this logical eraseblock while we
990 * are moving it, so we lock it.
991 */
992 err = leb_write_lock(ubi, vol_id, lnum);
993 if (err) {
994 kfree(buf);
995 return err;
996 }
997
998 /*
999 * But the logical eraseblock might have been put by this time.
1000 * Cancel if it is true.
1001 */
1002 idx = vol_id2idx(ubi, vol_id);
1003
1004 /*
1005 * We may race with volume deletion/re-size, so we have to hold
1006 * @ubi->volumes_lock.
1007 */
1008 spin_lock(&ubi->volumes_lock);
1009 vol = ubi->volumes[idx];
1010 if (!vol) {
1011 dbg_eba("volume %d was removed meanwhile", vol_id);
1012 spin_unlock(&ubi->volumes_lock);
1013 goto out_unlock;
1014 }
1015
1016 pnum = vol->eba_tbl[lnum];
1017 if (pnum != from) {
1018 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1019 "PEB %d, cancel", vol_id, lnum, from, pnum);
1020 spin_unlock(&ubi->volumes_lock);
1021 goto out_unlock;
1022 }
1023 spin_unlock(&ubi->volumes_lock);
1024
1025 /* OK, now the LEB is locked and we can safely start moving it */
1026
1027 dbg_eba("read %d bytes of data", aldata_size);
1028 err = ubi_io_read_data(ubi, buf, from, 0, aldata_size);
1029 if (err && err != UBI_IO_BITFLIPS) {
1030 ubi_warn("error %d while reading data from PEB %d",
1031 err, from);
1032 goto out_unlock;
1033 }
1034
1035 /*
1036 * Now we have got to calculate how much data we have to to copy. In
1037 * case of a static volume it is fairly easy - the VID header contains
1038 * the data size. In case of a dynamic volume it is more difficult - we
1039 * have to read the contents, cut 0xFF bytes from the end and copy only
1040 * the first part. We must do this to avoid writing 0xFF bytes as it
1041 * may have some side-effects. And not only this. It is important not
1042 * to include those 0xFFs to CRC because later the they may be filled
1043 * by data.
1044 */
1045 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1046 aldata_size = data_size =
1047 ubi_calc_data_len(ubi, buf, data_size);
1048
1049 cond_resched();
1050 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1051 cond_resched();
1052
1053 /*
1054 * It may turn out to me that the whole @from physical eraseblock
1055 * contains only 0xFF bytes. Then we have to only write the VID header
1056 * and do not write any data. This also means we should not set
1057 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1058 */
1059 if (data_size > 0) {
1060 vid_hdr->copy_flag = 1;
1061 vid_hdr->data_size = cpu_to_ubi32(data_size);
1062 vid_hdr->data_crc = cpu_to_ubi32(crc);
1063 }
1064 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
1065
1066 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1067 if (err)
1068 goto out_unlock;
1069
1070 cond_resched();
1071
1072 /* Read the VID header back and check if it was written correctly */
1073 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1074 if (err) {
1075 if (err != UBI_IO_BITFLIPS)
1076 ubi_warn("cannot read VID header back from PEB %d", to);
1077 goto out_unlock;
1078 }
1079
1080 if (data_size > 0) {
1081 err = ubi_io_write_data(ubi, buf, to, 0, aldata_size);
1082 if (err)
1083 goto out_unlock;
1084
1085 /*
1086 * We've written the data and are going to read it back to make
1087 * sure it was written correctly.
1088 */
1089 buf1 = kmalloc(aldata_size, GFP_KERNEL);
1090 if (!buf1) {
1091 err = -ENOMEM;
1092 goto out_unlock;
1093 }
1094
1095 cond_resched();
1096
1097 err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size);
1098 if (err) {
1099 if (err != UBI_IO_BITFLIPS)
1100 ubi_warn("cannot read data back from PEB %d",
1101 to);
1102 goto out_unlock;
1103 }
1104
1105 cond_resched();
1106
1107 if (memcmp(buf, buf1, aldata_size)) {
1108 ubi_warn("read data back from PEB %d - it is different",
1109 to);
1110 goto out_unlock;
1111 }
1112 }
1113
1114 ubi_assert(vol->eba_tbl[lnum] == from);
1115 vol->eba_tbl[lnum] = to;
1116
1117 leb_write_unlock(ubi, vol_id, lnum);
1118 kfree(buf);
1119 kfree(buf1);
1120
1121 return 0;
1122
1123out_unlock:
1124 leb_write_unlock(ubi, vol_id, lnum);
1125 kfree(buf);
1126 kfree(buf1);
1127 return err;
1128}
1129
1130/**
1131 * ubi_eba_init_scan - initialize the EBA unit using scanning information.
1132 * @ubi: UBI device description object
1133 * @si: scanning information
1134 *
1135 * This function returns zero in case of success and a negative error code in
1136 * case of failure.
1137 */
1138int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1139{
1140 int i, j, err, num_volumes;
1141 struct ubi_scan_volume *sv;
1142 struct ubi_volume *vol;
1143 struct ubi_scan_leb *seb;
1144 struct rb_node *rb;
1145
1146 dbg_eba("initialize EBA unit");
1147
1148 spin_lock_init(&ubi->ltree_lock);
1149 ubi->ltree = RB_ROOT;
1150
1151 if (ubi_devices_cnt == 0) {
1152 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1153 sizeof(struct ltree_entry), 0,
1154 0, &ltree_entry_ctor, NULL);
1155 if (!ltree_slab)
1156 return -ENOMEM;
1157 }
1158
1159 ubi->global_sqnum = si->max_sqnum + 1;
1160 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1161
1162 for (i = 0; i < num_volumes; i++) {
1163 vol = ubi->volumes[i];
1164 if (!vol)
1165 continue;
1166
1167 cond_resched();
1168
1169 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1170 GFP_KERNEL);
1171 if (!vol->eba_tbl) {
1172 err = -ENOMEM;
1173 goto out_free;
1174 }
1175
1176 for (j = 0; j < vol->reserved_pebs; j++)
1177 vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1178
1179 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1180 if (!sv)
1181 continue;
1182
1183 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
1184 if (seb->lnum >= vol->reserved_pebs)
1185 /*
1186 * This may happen in case of an unclean reboot
1187 * during re-size.
1188 */
1189 ubi_scan_move_to_list(sv, seb, &si->erase);
1190 vol->eba_tbl[seb->lnum] = seb->pnum;
1191 }
1192 }
1193
1194 if (ubi->bad_allowed) {
1195 ubi_calculate_reserved(ubi);
1196
1197 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1198 /* No enough free physical eraseblocks */
1199 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1200 ubi_warn("cannot reserve enough PEBs for bad PEB "
1201 "handling, reserved %d, need %d",
1202 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1203 } else
1204 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1205
1206 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1207 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1208 }
1209
1210 dbg_eba("EBA unit is initialized");
1211 return 0;
1212
1213out_free:
1214 for (i = 0; i < num_volumes; i++) {
1215 if (!ubi->volumes[i])
1216 continue;
1217 kfree(ubi->volumes[i]->eba_tbl);
1218 }
1219 if (ubi_devices_cnt == 0)
1220 kmem_cache_destroy(ltree_slab);
1221 return err;
1222}
1223
1224/**
1225 * ubi_eba_close - close EBA unit.
1226 * @ubi: UBI device description object
1227 */
1228void ubi_eba_close(const struct ubi_device *ubi)
1229{
1230 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1231
1232 dbg_eba("close EBA unit");
1233
1234 for (i = 0; i < num_volumes; i++) {
1235 if (!ubi->volumes[i])
1236 continue;
1237 kfree(ubi->volumes[i]->eba_tbl);
1238 }
1239 if (ubi_devices_cnt == 1)
1240 kmem_cache_destroy(ltree_slab);
1241}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
new file mode 100644
index 000000000000..fc9478d605ff
--- /dev/null
+++ b/drivers/mtd/ubi/gluebi.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel
19 */
20
21/*
22 * This file includes implementation of fake MTD devices for each UBI volume.
23 * This sounds strange, but it is in fact quite useful to make MTD-oriented
24 * software (including all the legacy software) to work on top of UBI.
25 *
26 * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
27 * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The
28 * eraseblock size is equivalent to the logical eraseblock size of the volume.
29 */
30
31#include <asm/div64.h>
32#include "ubi.h"
33
34/**
35 * gluebi_get_device - get MTD device reference.
36 * @mtd: the MTD device description object
37 *
38 * This function is called every time the MTD device is being opened and
39 * implements the MTD get_device() operation. Returns zero in case of success
40 * and a negative error code in case of failure.
41 */
42static int gluebi_get_device(struct mtd_info *mtd)
43{
44 struct ubi_volume *vol;
45
46 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
47
48 /*
49 * We do not introduce locks for gluebi reference count because the
50 * get_device()/put_device() calls are already serialized at MTD.
51 */
52 if (vol->gluebi_refcount > 0) {
53 /*
54 * The MTD device is already referenced and this is just one
55 * more reference. MTD allows many users to open the same
56 * volume simultaneously and do not distinguish between
57 * readers/writers/exclusive openers as UBI does. So we do not
58 * open the UBI volume again - just increase the reference
59 * counter and return.
60 */
61 vol->gluebi_refcount += 1;
62 return 0;
63 }
64
65 /*
66 * This is the first reference to this UBI volume via the MTD device
67 * interface. Open the corresponding volume in read-write mode.
68 */
69 vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id,
70 UBI_READWRITE);
71 if (IS_ERR(vol->gluebi_desc))
72 return PTR_ERR(vol->gluebi_desc);
73 vol->gluebi_refcount += 1;
74 return 0;
75}
76
77/**
78 * gluebi_put_device - put MTD device reference.
79 * @mtd: the MTD device description object
80 *
81 * This function is called every time the MTD device is being put. Returns
82 * zero in case of success and a negative error code in case of failure.
83 */
84static void gluebi_put_device(struct mtd_info *mtd)
85{
86 struct ubi_volume *vol;
87
88 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
89 vol->gluebi_refcount -= 1;
90 ubi_assert(vol->gluebi_refcount >= 0);
91 if (vol->gluebi_refcount == 0)
92 ubi_close_volume(vol->gluebi_desc);
93}
94
95/**
96 * gluebi_read - read operation of emulated MTD devices.
97 * @mtd: MTD device description object
98 * @from: absolute offset from where to read
99 * @len: how many bytes to read
100 * @retlen: count of read bytes is returned here
101 * @buf: buffer to store the read data
102 *
103 * This function returns zero in case of success and a negative error code in
104 * case of failure.
105 */
106static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
107 size_t *retlen, unsigned char *buf)
108{
109 int err = 0, lnum, offs, total_read;
110 struct ubi_volume *vol;
111 struct ubi_device *ubi;
112 uint64_t tmp = from;
113
114 dbg_msg("read %zd bytes from offset %lld", len, from);
115
116 if (len < 0 || from < 0 || from + len > mtd->size)
117 return -EINVAL;
118
119 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
120 ubi = vol->ubi;
121
122 offs = do_div(tmp, mtd->erasesize);
123 lnum = tmp;
124
125 total_read = len;
126 while (total_read) {
127 size_t to_read = mtd->erasesize - offs;
128
129 if (to_read > total_read)
130 to_read = total_read;
131
132 err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs,
133 to_read, 0);
134 if (err)
135 break;
136
137 lnum += 1;
138 offs = 0;
139 total_read -= to_read;
140 buf += to_read;
141 }
142
143 *retlen = len - total_read;
144 return err;
145}
146
147/**
148 * gluebi_write - write operation of emulated MTD devices.
149 * @mtd: MTD device description object
150 * @to: absolute offset where to write
151 * @len: how many bytes to write
152 * @retlen: count of written bytes is returned here
153 * @buf: buffer with data to write
154 *
155 * This function returns zero in case of success and a negative error code in
156 * case of failure.
157 */
158static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
159 size_t *retlen, const u_char *buf)
160{
161 int err = 0, lnum, offs, total_written;
162 struct ubi_volume *vol;
163 struct ubi_device *ubi;
164 uint64_t tmp = to;
165
166 dbg_msg("write %zd bytes to offset %lld", len, to);
167
168 if (len < 0 || to < 0 || len + to > mtd->size)
169 return -EINVAL;
170
171 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
172 ubi = vol->ubi;
173
174 if (ubi->ro_mode)
175 return -EROFS;
176
177 offs = do_div(tmp, mtd->erasesize);
178 lnum = tmp;
179
180 if (len % mtd->writesize || offs % mtd->writesize)
181 return -EINVAL;
182
183 total_written = len;
184 while (total_written) {
185 size_t to_write = mtd->erasesize - offs;
186
187 if (to_write > total_written)
188 to_write = total_written;
189
190 err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs,
191 to_write, UBI_UNKNOWN);
192 if (err)
193 break;
194
195 lnum += 1;
196 offs = 0;
197 total_written -= to_write;
198 buf += to_write;
199 }
200
201 *retlen = len - total_written;
202 return err;
203}
204
205/**
206 * gluebi_erase - erase operation of emulated MTD devices.
207 * @mtd: the MTD device description object
208 * @instr: the erase operation description
209 *
210 * This function calls the erase callback when finishes. Returns zero in case
211 * of success and a negative error code in case of failure.
212 */
213static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
214{
215 int err, i, lnum, count;
216 struct ubi_volume *vol;
217 struct ubi_device *ubi;
218
219 dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr);
220
221 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
222 return -EINVAL;
223
224 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
225 return -EINVAL;
226
227 if (instr->addr % mtd->writesize || instr->len % mtd->writesize)
228 return -EINVAL;
229
230 lnum = instr->addr / mtd->erasesize;
231 count = instr->len / mtd->erasesize;
232
233 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
234 ubi = vol->ubi;
235
236 if (ubi->ro_mode)
237 return -EROFS;
238
239 for (i = 0; i < count; i++) {
240 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i);
241 if (err)
242 goto out_err;
243 }
244
245 /*
246 * MTD erase operations are synchronous, so we have to make sure the
247 * physical eraseblock is wiped out.
248 */
249 err = ubi_wl_flush(ubi);
250 if (err)
251 goto out_err;
252
253 instr->state = MTD_ERASE_DONE;
254 mtd_erase_callback(instr);
255 return 0;
256
257out_err:
258 instr->state = MTD_ERASE_FAILED;
259 instr->fail_addr = lnum * mtd->erasesize;
260 return err;
261}
262
263/**
264 * ubi_create_gluebi - initialize gluebi for an UBI volume.
265 * @ubi: UBI device description object
266 * @vol: volume description object
267 *
268 * This function is called when an UBI volume is created in order to create
269 * corresponding fake MTD device. Returns zero in case of success and a
270 * negative error code in case of failure.
271 */
272int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
273{
274 struct mtd_info *mtd = &vol->gluebi_mtd;
275
276 mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL);
277 if (!mtd->name)
278 return -ENOMEM;
279
280 mtd->type = MTD_UBIVOLUME;
281 if (!ubi->ro_mode)
282 mtd->flags = MTD_WRITEABLE;
283 mtd->writesize = ubi->min_io_size;
284 mtd->owner = THIS_MODULE;
285 mtd->size = vol->usable_leb_size * vol->reserved_pebs;
286 mtd->erasesize = vol->usable_leb_size;
287 mtd->read = gluebi_read;
288 mtd->write = gluebi_write;
289 mtd->erase = gluebi_erase;
290 mtd->get_device = gluebi_get_device;
291 mtd->put_device = gluebi_put_device;
292
293 if (add_mtd_device(mtd)) {
294 ubi_err("cannot not add MTD device\n");
295 kfree(mtd->name);
296 return -ENFILE;
297 }
298
299 dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u",
300 mtd->index, mtd->name, mtd->size, mtd->erasesize);
301 return 0;
302}
303
304/**
305 * ubi_destroy_gluebi - close gluebi for an UBI volume.
306 * @vol: volume description object
307 *
308 * This function is called when an UBI volume is removed in order to remove
309 * corresponding fake MTD device. Returns zero in case of success and a
310 * negative error code in case of failure.
311 */
312int ubi_destroy_gluebi(struct ubi_volume *vol)
313{
314 int err;
315 struct mtd_info *mtd = &vol->gluebi_mtd;
316
317 dbg_msg("remove mtd%d", mtd->index);
318 err = del_mtd_device(mtd);
319 if (err)
320 return err;
321 kfree(mtd->name);
322 return 0;
323}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
new file mode 100644
index 000000000000..438914d05151
--- /dev/null
+++ b/drivers/mtd/ubi/io.c
@@ -0,0 +1,1259 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2006, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём)
20 */
21
22/*
23 * UBI input/output unit.
24 *
25 * This unit provides a uniform way to work with all kinds of the underlying
26 * MTD devices. It also implements handy functions for reading and writing UBI
27 * headers.
28 *
29 * We are trying to have a paranoid mindset and not to trust to what we read
30 * from the flash media in order to be more secure and robust. So this unit
31 * validates every single header it reads from the flash media.
32 *
33 * Some words about how the eraseblock headers are stored.
34 *
35 * The erase counter header is always stored at offset zero. By default, the
36 * VID header is stored after the EC header at the closest aligned offset
37 * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
38 * header at the closest aligned offset. But this default layout may be
39 * changed. For example, for different reasons (e.g., optimization) UBI may be
40 * asked to put the VID header at further offset, and even at an unaligned
41 * offset. Of course, if the offset of the VID header is unaligned, UBI adds
42 * proper padding in front of it. Data offset may also be changed but it has to
43 * be aligned.
44 *
45 * About minimal I/O units. In general, UBI assumes flash device model where
46 * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
47 * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
48 * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
49 * (smaller) minimal I/O unit size for EC and VID headers to make it possible
50 * to do different optimizations.
51 *
52 * This is extremely useful in case of NAND flashes which admit of several
53 * write operations to one NAND page. In this case UBI can fit EC and VID
54 * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
55 * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
56 * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
57 * users.
58 *
59 * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
60 * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
61 * headers.
62 *
63 * Q: why not just to treat sub-page as a minimal I/O unit of this flash
64 * device, e.g., make @ubi->min_io_size = 512 in the example above?
65 *
66 * A: because when writing a sub-page, MTD still writes a full 2K page but the
67 * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
68 * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
69 * prefer to use sub-pages only for EV and VID headers.
70 *
71 * As it was noted above, the VID header may start at a non-aligned offset.
72 * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
73 * the VID header may reside at offset 1984 which is the last 64 bytes of the
74 * last sub-page (EC header is always at offset zero). This causes some
75 * difficulties when reading and writing VID headers.
76 *
77 * Suppose we have a 64-byte buffer and we read a VID header at it. We change
78 * the data and want to write this VID header out. As we can only write in
79 * 512-byte chunks, we have to allocate one more buffer and copy our VID header
80 * to offset 448 of this buffer.
81 *
82 * The I/O unit does the following trick in order to avoid this extra copy.
83 * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header
84 * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the
85 * VID header is being written out, it shifts the VID header pointer back and
86 * writes the whole sub-page.
87 */
88
89#include <linux/crc32.h>
90#include <linux/err.h>
91#include "ubi.h"
92
93#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
94static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
95static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
96static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
97 const struct ubi_ec_hdr *ec_hdr);
98static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
99static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr);
101static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
102 int offset, int len);
103#else
104#define paranoid_check_not_bad(ubi, pnum) 0
105#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
106#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
107#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
108#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
109#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
110#endif
111
112/**
113 * ubi_io_read - read data from a physical eraseblock.
114 * @ubi: UBI device description object
115 * @buf: buffer where to store the read data
116 * @pnum: physical eraseblock number to read from
117 * @offset: offset within the physical eraseblock from where to read
118 * @len: how many bytes to read
119 *
120 * This function reads data from offset @offset of physical eraseblock @pnum
121 * and stores the read data in the @buf buffer. The following return codes are
122 * possible:
123 *
124 * o %0 if all the requested data were successfully read;
125 * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
126 * correctable bit-flips were detected; this is harmless but may indicate
127 * that this eraseblock may become bad soon (but do not have to);
128 * o %-EBADMSG if the MTD subsystem reported about data data integrity
129 * problems, for example it can me an ECC error in case of NAND; this most
130 * probably means that the data is corrupted;
131 * o %-EIO if some I/O error occurred;
132 * o other negative error codes in case of other errors.
133 */
134int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
135 int len)
136{
137 int err, retries = 0;
138 size_t read;
139 loff_t addr;
140
141 dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
142
143 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
144 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
145 ubi_assert(len > 0);
146
147 err = paranoid_check_not_bad(ubi, pnum);
148 if (err)
149 return err > 0 ? -EINVAL : err;
150
151 addr = (loff_t)pnum * ubi->peb_size + offset;
152retry:
153 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
154 if (err) {
155 if (err == -EUCLEAN) {
156 /*
157 * -EUCLEAN is reported if there was a bit-flip which
158 * was corrected, so this is harmless.
159 */
160 ubi_msg("fixable bit-flip detected at PEB %d", pnum);
161 ubi_assert(len == read);
162 return UBI_IO_BITFLIPS;
163 }
164
165 if (read != len && retries++ < UBI_IO_RETRIES) {
166 dbg_io("error %d while reading %d bytes from PEB %d:%d, "
167 "read only %zd bytes, retry",
168 err, len, pnum, offset, read);
169 yield();
170 goto retry;
171 }
172
173 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
174 "read %zd bytes", err, len, pnum, offset, read);
175 ubi_dbg_dump_stack();
176 } else {
177 ubi_assert(len == read);
178
179 if (ubi_dbg_is_bitflip()) {
180 dbg_msg("bit-flip (emulated)");
181 err = UBI_IO_BITFLIPS;
182 }
183 }
184
185 return err;
186}
187
188/**
189 * ubi_io_write - write data to a physical eraseblock.
190 * @ubi: UBI device description object
191 * @buf: buffer with the data to write
192 * @pnum: physical eraseblock number to write to
193 * @offset: offset within the physical eraseblock where to write
194 * @len: how many bytes to write
195 *
196 * This function writes @len bytes of data from buffer @buf to offset @offset
197 * of physical eraseblock @pnum. If all the data were successfully written,
198 * zero is returned. If an error occurred, this function returns a negative
199 * error code. If %-EIO is returned, the physical eraseblock most probably went
200 * bad.
201 *
202 * Note, in case of an error, it is possible that something was still written
203 * to the flash media, but may be some garbage.
204 */
205int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum,
206 int offset, int len)
207{
208 int err;
209 size_t written;
210 loff_t addr;
211
212 dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
213
214 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
215 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
216 ubi_assert(offset % ubi->hdrs_min_io_size == 0);
217 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
218
219 if (ubi->ro_mode) {
220 ubi_err("read-only mode");
221 return -EROFS;
222 }
223
224 /* The below has to be compiled out if paranoid checks are disabled */
225
226 err = paranoid_check_not_bad(ubi, pnum);
227 if (err)
228 return err > 0 ? -EINVAL : err;
229
230 /* The area we are writing to has to contain all 0xFF bytes */
231 err = paranoid_check_all_ff(ubi, pnum, offset, len);
232 if (err)
233 return err > 0 ? -EINVAL : err;
234
235 if (offset >= ubi->leb_start) {
236 /*
237 * We write to the data area of the physical eraseblock. Make
238 * sure it has valid EC and VID headers.
239 */
240 err = paranoid_check_peb_ec_hdr(ubi, pnum);
241 if (err)
242 return err > 0 ? -EINVAL : err;
243 err = paranoid_check_peb_vid_hdr(ubi, pnum);
244 if (err)
245 return err > 0 ? -EINVAL : err;
246 }
247
248 if (ubi_dbg_is_write_failure()) {
249 dbg_err("cannot write %d bytes to PEB %d:%d "
250 "(emulated)", len, pnum, offset);
251 ubi_dbg_dump_stack();
252 return -EIO;
253 }
254
255 addr = (loff_t)pnum * ubi->peb_size + offset;
256 err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
257 if (err) {
258 ubi_err("error %d while writing %d bytes to PEB %d:%d, written"
259 " %zd bytes", err, len, pnum, offset, written);
260 ubi_dbg_dump_stack();
261 } else
262 ubi_assert(written == len);
263
264 return err;
265}
266
267/**
268 * erase_callback - MTD erasure call-back.
269 * @ei: MTD erase information object.
270 *
271 * Note, even though MTD erase interface is asynchronous, all the current
272 * implementations are synchronous anyway.
273 */
274static void erase_callback(struct erase_info *ei)
275{
276 wake_up_interruptible((wait_queue_head_t *)ei->priv);
277}
278
279/**
280 * do_sync_erase - synchronously erase a physical eraseblock.
281 * @ubi: UBI device description object
282 * @pnum: the physical eraseblock number to erase
283 *
284 * This function synchronously erases physical eraseblock @pnum and returns
285 * zero in case of success and a negative error code in case of failure. If
286 * %-EIO is returned, the physical eraseblock most probably went bad.
287 */
288static int do_sync_erase(const struct ubi_device *ubi, int pnum)
289{
290 int err, retries = 0;
291 struct erase_info ei;
292 wait_queue_head_t wq;
293
294 dbg_io("erase PEB %d", pnum);
295
296retry:
297 init_waitqueue_head(&wq);
298 memset(&ei, 0, sizeof(struct erase_info));
299
300 ei.mtd = ubi->mtd;
301 ei.addr = pnum * ubi->peb_size;
302 ei.len = ubi->peb_size;
303 ei.callback = erase_callback;
304 ei.priv = (unsigned long)&wq;
305
306 err = ubi->mtd->erase(ubi->mtd, &ei);
307 if (err) {
308 if (retries++ < UBI_IO_RETRIES) {
309 dbg_io("error %d while erasing PEB %d, retry",
310 err, pnum);
311 yield();
312 goto retry;
313 }
314 ubi_err("cannot erase PEB %d, error %d", pnum, err);
315 ubi_dbg_dump_stack();
316 return err;
317 }
318
319 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
320 ei.state == MTD_ERASE_FAILED);
321 if (err) {
322 ubi_err("interrupted PEB %d erasure", pnum);
323 return -EINTR;
324 }
325
326 if (ei.state == MTD_ERASE_FAILED) {
327 if (retries++ < UBI_IO_RETRIES) {
328 dbg_io("error while erasing PEB %d, retry", pnum);
329 yield();
330 goto retry;
331 }
332 ubi_err("cannot erase PEB %d", pnum);
333 ubi_dbg_dump_stack();
334 return -EIO;
335 }
336
337 err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
338 if (err)
339 return err > 0 ? -EINVAL : err;
340
341 if (ubi_dbg_is_erase_failure() && !err) {
342 dbg_err("cannot erase PEB %d (emulated)", pnum);
343 return -EIO;
344 }
345
346 return 0;
347}
348
349/**
350 * check_pattern - check if buffer contains only a certain byte pattern.
351 * @buf: buffer to check
352 * @patt: the pattern to check
353 * @size: buffer size in bytes
354 *
355 * This function returns %1 in there are only @patt bytes in @buf, and %0 if
356 * something else was also found.
357 */
358static int check_pattern(const void *buf, uint8_t patt, int size)
359{
360 int i;
361
362 for (i = 0; i < size; i++)
363 if (((const uint8_t *)buf)[i] != patt)
364 return 0;
365 return 1;
366}
367
368/* Patterns to write to a physical eraseblock when torturing it */
369static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
370
371/**
372 * torture_peb - test a supposedly bad physical eraseblock.
373 * @ubi: UBI device description object
374 * @pnum: the physical eraseblock number to test
375 *
376 * This function returns %-EIO if the physical eraseblock did not pass the
377 * test, a positive number of erase operations done if the test was
378 * successfully passed, and other negative error codes in case of other errors.
379 */
380static int torture_peb(const struct ubi_device *ubi, int pnum)
381{
382 void *buf;
383 int err, i, patt_count;
384
385 buf = kmalloc(ubi->peb_size, GFP_KERNEL);
386 if (!buf)
387 return -ENOMEM;
388
389 patt_count = ARRAY_SIZE(patterns);
390 ubi_assert(patt_count > 0);
391
392 for (i = 0; i < patt_count; i++) {
393 err = do_sync_erase(ubi, pnum);
394 if (err)
395 goto out;
396
397 /* Make sure the PEB contains only 0xFF bytes */
398 err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size);
399 if (err)
400 goto out;
401
402 err = check_pattern(buf, 0xFF, ubi->peb_size);
403 if (err == 0) {
404 ubi_err("erased PEB %d, but a non-0xFF byte found",
405 pnum);
406 err = -EIO;
407 goto out;
408 }
409
410 /* Write a pattern and check it */
411 memset(buf, patterns[i], ubi->peb_size);
412 err = ubi_io_write(ubi, buf, pnum, 0, ubi->peb_size);
413 if (err)
414 goto out;
415
416 memset(buf, ~patterns[i], ubi->peb_size);
417 err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size);
418 if (err)
419 goto out;
420
421 err = check_pattern(buf, patterns[i], ubi->peb_size);
422 if (err == 0) {
423 ubi_err("pattern %x checking failed for PEB %d",
424 patterns[i], pnum);
425 err = -EIO;
426 goto out;
427 }
428 }
429
430 err = patt_count;
431
432out:
433 if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
434 /*
435 * If a bit-flip or data integrity error was detected, the test
436 * has not passed because it happened on a freshly erased
437 * physical eraseblock which means something is wrong with it.
438 */
439 err = -EIO;
440 kfree(buf);
441 return err;
442}
443
444/**
445 * ubi_io_sync_erase - synchronously erase a physical eraseblock.
446 * @ubi: UBI device description object
447 * @pnum: physical eraseblock number to erase
448 * @torture: if this physical eraseblock has to be tortured
449 *
450 * This function synchronously erases physical eraseblock @pnum. If @torture
451 * flag is not zero, the physical eraseblock is checked by means of writing
452 * different patterns to it and reading them back. If the torturing is enabled,
453 * the physical eraseblock is erased more then once.
454 *
455 * This function returns the number of erasures made in case of success, %-EIO
456 * if the erasure failed or the torturing test failed, and other negative error
457 * codes in case of other errors. Note, %-EIO means that the physical
458 * eraseblock is bad.
459 */
460int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture)
461{
462 int err, ret = 0;
463
464 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
465
466 err = paranoid_check_not_bad(ubi, pnum);
467 if (err != 0)
468 return err > 0 ? -EINVAL : err;
469
470 if (ubi->ro_mode) {
471 ubi_err("read-only mode");
472 return -EROFS;
473 }
474
475 if (torture) {
476 ret = torture_peb(ubi, pnum);
477 if (ret < 0)
478 return ret;
479 }
480
481 err = do_sync_erase(ubi, pnum);
482 if (err)
483 return err;
484
485 return ret + 1;
486}
487
488/**
489 * ubi_io_is_bad - check if a physical eraseblock is bad.
490 * @ubi: UBI device description object
491 * @pnum: the physical eraseblock number to check
492 *
493 * This function returns a positive number if the physical eraseblock is bad,
494 * zero if not, and a negative error code if an error occurred.
495 */
496int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
497{
498 struct mtd_info *mtd = ubi->mtd;
499
500 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
501
502 if (ubi->bad_allowed) {
503 int ret;
504
505 ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
506 if (ret < 0)
507 ubi_err("error %d while checking if PEB %d is bad",
508 ret, pnum);
509 else if (ret)
510 dbg_io("PEB %d is bad", pnum);
511 return ret;
512 }
513
514 return 0;
515}
516
517/**
518 * ubi_io_mark_bad - mark a physical eraseblock as bad.
519 * @ubi: UBI device description object
520 * @pnum: the physical eraseblock number to mark
521 *
522 * This function returns zero in case of success and a negative error code in
523 * case of failure.
524 */
525int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
526{
527 int err;
528 struct mtd_info *mtd = ubi->mtd;
529
530 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
531
532 if (ubi->ro_mode) {
533 ubi_err("read-only mode");
534 return -EROFS;
535 }
536
537 if (!ubi->bad_allowed)
538 return 0;
539
540 err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
541 if (err)
542 ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
543 return err;
544}
545
546/**
547 * validate_ec_hdr - validate an erase counter header.
548 * @ubi: UBI device description object
549 * @ec_hdr: the erase counter header to check
550 *
551 * This function returns zero if the erase counter header is OK, and %1 if
552 * not.
553 */
554static int validate_ec_hdr(const struct ubi_device *ubi,
555 const struct ubi_ec_hdr *ec_hdr)
556{
557 long long ec;
558 int vid_hdr_offset, leb_start;
559
560 ec = ubi64_to_cpu(ec_hdr->ec);
561 vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset);
562 leb_start = ubi32_to_cpu(ec_hdr->data_offset);
563
564 if (ec_hdr->version != UBI_VERSION) {
565 ubi_err("node with incompatible UBI version found: "
566 "this UBI version is %d, image version is %d",
567 UBI_VERSION, (int)ec_hdr->version);
568 goto bad;
569 }
570
571 if (vid_hdr_offset != ubi->vid_hdr_offset) {
572 ubi_err("bad VID header offset %d, expected %d",
573 vid_hdr_offset, ubi->vid_hdr_offset);
574 goto bad;
575 }
576
577 if (leb_start != ubi->leb_start) {
578 ubi_err("bad data offset %d, expected %d",
579 leb_start, ubi->leb_start);
580 goto bad;
581 }
582
583 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
584 ubi_err("bad erase counter %lld", ec);
585 goto bad;
586 }
587
588 return 0;
589
590bad:
591 ubi_err("bad EC header");
592 ubi_dbg_dump_ec_hdr(ec_hdr);
593 ubi_dbg_dump_stack();
594 return 1;
595}
596
597/**
598 * ubi_io_read_ec_hdr - read and check an erase counter header.
599 * @ubi: UBI device description object
600 * @pnum: physical eraseblock to read from
601 * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
602 * header
603 * @verbose: be verbose if the header is corrupted or was not found
604 *
605 * This function reads erase counter header from physical eraseblock @pnum and
606 * stores it in @ec_hdr. This function also checks CRC checksum of the read
607 * erase counter header. The following codes may be returned:
608 *
609 * o %0 if the CRC checksum is correct and the header was successfully read;
610 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
611 * and corrected by the flash driver; this is harmless but may indicate that
612 * this eraseblock may become bad soon (but may be not);
613 * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error);
614 * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty;
615 * o a negative error code in case of failure.
616 */
617int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
618 struct ubi_ec_hdr *ec_hdr, int verbose)
619{
620 int err, read_err = 0;
621 uint32_t crc, magic, hdr_crc;
622
623 dbg_io("read EC header from PEB %d", pnum);
624 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
625
626 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
627 if (err) {
628 if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
629 return err;
630
631 /*
632 * We read all the data, but either a correctable bit-flip
633 * occurred, or MTD reported about some data integrity error,
634 * like an ECC error in case of NAND. The former is harmless,
635 * the later may mean that the read data is corrupted. But we
636 * have a CRC check-sum and we will detect this. If the EC
637 * header is still OK, we just report this as there was a
638 * bit-flip.
639 */
640 read_err = err;
641 }
642
643 magic = ubi32_to_cpu(ec_hdr->magic);
644 if (magic != UBI_EC_HDR_MAGIC) {
645 /*
646 * The magic field is wrong. Let's check if we have read all
647 * 0xFF. If yes, this physical eraseblock is assumed to be
648 * empty.
649 *
650 * But if there was a read error, we do not test it for all
651 * 0xFFs. Even if it does contain all 0xFFs, this error
652 * indicates that something is still wrong with this physical
653 * eraseblock and we anyway cannot treat it as empty.
654 */
655 if (read_err != -EBADMSG &&
656 check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
657 /* The physical eraseblock is supposedly empty */
658
659 /*
660 * The below is just a paranoid check, it has to be
661 * compiled out if paranoid checks are disabled.
662 */
663 err = paranoid_check_all_ff(ubi, pnum, 0,
664 ubi->peb_size);
665 if (err)
666 return err > 0 ? UBI_IO_BAD_EC_HDR : err;
667
668 if (verbose)
669 ubi_warn("no EC header found at PEB %d, "
670 "only 0xFF bytes", pnum);
671 return UBI_IO_PEB_EMPTY;
672 }
673
674 /*
675 * This is not a valid erase counter header, and these are not
676 * 0xFF bytes. Report that the header is corrupted.
677 */
678 if (verbose) {
679 ubi_warn("bad magic number at PEB %d: %08x instead of "
680 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
681 ubi_dbg_dump_ec_hdr(ec_hdr);
682 }
683 return UBI_IO_BAD_EC_HDR;
684 }
685
686 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
687 hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
688
689 if (hdr_crc != crc) {
690 if (verbose) {
691 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x,"
692 " read %#08x", pnum, crc, hdr_crc);
693 ubi_dbg_dump_ec_hdr(ec_hdr);
694 }
695 return UBI_IO_BAD_EC_HDR;
696 }
697
698 /* And of course validate what has just been read from the media */
699 err = validate_ec_hdr(ubi, ec_hdr);
700 if (err) {
701 ubi_err("validation failed for PEB %d", pnum);
702 return -EINVAL;
703 }
704
705 return read_err ? UBI_IO_BITFLIPS : 0;
706}
707
708/**
709 * ubi_io_write_ec_hdr - write an erase counter header.
710 * @ubi: UBI device description object
711 * @pnum: physical eraseblock to write to
712 * @ec_hdr: the erase counter header to write
713 *
714 * This function writes erase counter header described by @ec_hdr to physical
715 * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
716 * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
717 * field.
718 *
719 * This function returns zero in case of success and a negative error code in
720 * case of failure. If %-EIO is returned, the physical eraseblock most probably
721 * went bad.
722 */
723int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
724 struct ubi_ec_hdr *ec_hdr)
725{
726 int err;
727 uint32_t crc;
728
729 dbg_io("write EC header to PEB %d", pnum);
730 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
731
732 ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC);
733 ec_hdr->version = UBI_VERSION;
734 ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset);
735 ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start);
736 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
737 ec_hdr->hdr_crc = cpu_to_ubi32(crc);
738
739 err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
740 if (err)
741 return -EINVAL;
742
743 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
744 return err;
745}
746
747/**
748 * validate_vid_hdr - validate a volume identifier header.
749 * @ubi: UBI device description object
750 * @vid_hdr: the volume identifier header to check
751 *
752 * This function checks that data stored in the volume identifier header
753 * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
754 */
755static int validate_vid_hdr(const struct ubi_device *ubi,
756 const struct ubi_vid_hdr *vid_hdr)
757{
758 int vol_type = vid_hdr->vol_type;
759 int copy_flag = vid_hdr->copy_flag;
760 int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
761 int lnum = ubi32_to_cpu(vid_hdr->lnum);
762 int compat = vid_hdr->compat;
763 int data_size = ubi32_to_cpu(vid_hdr->data_size);
764 int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
765 int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
766 int data_crc = ubi32_to_cpu(vid_hdr->data_crc);
767 int usable_leb_size = ubi->leb_size - data_pad;
768
769 if (copy_flag != 0 && copy_flag != 1) {
770 dbg_err("bad copy_flag");
771 goto bad;
772 }
773
774 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
775 data_pad < 0) {
776 dbg_err("negative values");
777 goto bad;
778 }
779
780 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
781 dbg_err("bad vol_id");
782 goto bad;
783 }
784
785 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
786 dbg_err("bad compat");
787 goto bad;
788 }
789
790 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
791 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
792 compat != UBI_COMPAT_REJECT) {
793 dbg_err("bad compat");
794 goto bad;
795 }
796
797 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
798 dbg_err("bad vol_type");
799 goto bad;
800 }
801
802 if (data_pad >= ubi->leb_size / 2) {
803 dbg_err("bad data_pad");
804 goto bad;
805 }
806
807 if (vol_type == UBI_VID_STATIC) {
808 /*
809 * Although from high-level point of view static volumes may
810 * contain zero bytes of data, but no VID headers can contain
811 * zero at these fields, because they empty volumes do not have
812 * mapped logical eraseblocks.
813 */
814 if (used_ebs == 0) {
815 dbg_err("zero used_ebs");
816 goto bad;
817 }
818 if (data_size == 0) {
819 dbg_err("zero data_size");
820 goto bad;
821 }
822 if (lnum < used_ebs - 1) {
823 if (data_size != usable_leb_size) {
824 dbg_err("bad data_size");
825 goto bad;
826 }
827 } else if (lnum == used_ebs - 1) {
828 if (data_size == 0) {
829 dbg_err("bad data_size at last LEB");
830 goto bad;
831 }
832 } else {
833 dbg_err("too high lnum");
834 goto bad;
835 }
836 } else {
837 if (copy_flag == 0) {
838 if (data_crc != 0) {
839 dbg_err("non-zero data CRC");
840 goto bad;
841 }
842 if (data_size != 0) {
843 dbg_err("non-zero data_size");
844 goto bad;
845 }
846 } else {
847 if (data_size == 0) {
848 dbg_err("zero data_size of copy");
849 goto bad;
850 }
851 }
852 if (used_ebs != 0) {
853 dbg_err("bad used_ebs");
854 goto bad;
855 }
856 }
857
858 return 0;
859
860bad:
861 ubi_err("bad VID header");
862 ubi_dbg_dump_vid_hdr(vid_hdr);
863 ubi_dbg_dump_stack();
864 return 1;
865}
866
867/**
868 * ubi_io_read_vid_hdr - read and check a volume identifier header.
869 * @ubi: UBI device description object
870 * @pnum: physical eraseblock number to read from
871 * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
872 * identifier header
873 * @verbose: be verbose if the header is corrupted or wasn't found
874 *
875 * This function reads the volume identifier header from physical eraseblock
876 * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
877 * volume identifier header. The following codes may be returned:
878 *
879 * o %0 if the CRC checksum is correct and the header was successfully read;
880 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
881 * and corrected by the flash driver; this is harmless but may indicate that
882 * this eraseblock may become bad soon;
883 * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
884 * error detected);
885 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
886 * header there);
887 * o a negative error code in case of failure.
888 */
889int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
890 struct ubi_vid_hdr *vid_hdr, int verbose)
891{
892 int err, read_err = 0;
893 uint32_t crc, magic, hdr_crc;
894 void *p;
895
896 dbg_io("read VID header from PEB %d", pnum);
897 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
898
899 p = (char *)vid_hdr - ubi->vid_hdr_shift;
900 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
901 ubi->vid_hdr_alsize);
902 if (err) {
903 if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
904 return err;
905
906 /*
907 * We read all the data, but either a correctable bit-flip
908 * occurred, or MTD reported about some data integrity error,
909 * like an ECC error in case of NAND. The former is harmless,
910 * the later may mean the read data is corrupted. But we have a
911 * CRC check-sum and we will identify this. If the VID header is
912 * still OK, we just report this as there was a bit-flip.
913 */
914 read_err = err;
915 }
916
917 magic = ubi32_to_cpu(vid_hdr->magic);
918 if (magic != UBI_VID_HDR_MAGIC) {
919 /*
920 * If we have read all 0xFF bytes, the VID header probably does
921 * not exist and the physical eraseblock is assumed to be free.
922 *
923 * But if there was a read error, we do not test the data for
924 * 0xFFs. Even if it does contain all 0xFFs, this error
925 * indicates that something is still wrong with this physical
926 * eraseblock and it cannot be regarded as free.
927 */
928 if (read_err != -EBADMSG &&
929 check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
930 /* The physical eraseblock is supposedly free */
931
932 /*
933 * The below is just a paranoid check, it has to be
934 * compiled out if paranoid checks are disabled.
935 */
936 err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
937 ubi->leb_size);
938 if (err)
939 return err > 0 ? UBI_IO_BAD_VID_HDR : err;
940
941 if (verbose)
942 ubi_warn("no VID header found at PEB %d, "
943 "only 0xFF bytes", pnum);
944 return UBI_IO_PEB_FREE;
945 }
946
947 /*
948 * This is not a valid VID header, and these are not 0xFF
949 * bytes. Report that the header is corrupted.
950 */
951 if (verbose) {
952 ubi_warn("bad magic number at PEB %d: %08x instead of "
953 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
954 ubi_dbg_dump_vid_hdr(vid_hdr);
955 }
956 return UBI_IO_BAD_VID_HDR;
957 }
958
959 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
960 hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
961
962 if (hdr_crc != crc) {
963 if (verbose) {
964 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
965 "read %#08x", pnum, crc, hdr_crc);
966 ubi_dbg_dump_vid_hdr(vid_hdr);
967 }
968 return UBI_IO_BAD_VID_HDR;
969 }
970
971 /* Validate the VID header that we have just read */
972 err = validate_vid_hdr(ubi, vid_hdr);
973 if (err) {
974 ubi_err("validation failed for PEB %d", pnum);
975 return -EINVAL;
976 }
977
978 return read_err ? UBI_IO_BITFLIPS : 0;
979}
980
981/**
982 * ubi_io_write_vid_hdr - write a volume identifier header.
983 * @ubi: UBI device description object
984 * @pnum: the physical eraseblock number to write to
985 * @vid_hdr: the volume identifier header to write
986 *
987 * This function writes the volume identifier header described by @vid_hdr to
988 * physical eraseblock @pnum. This function automatically fills the
989 * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
990 * header CRC checksum and stores it at vid_hdr->hdr_crc.
991 *
992 * This function returns zero in case of success and a negative error code in
993 * case of failure. If %-EIO is returned, the physical eraseblock probably went
994 * bad.
995 */
996int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
997 struct ubi_vid_hdr *vid_hdr)
998{
999 int err;
1000 uint32_t crc;
1001 void *p;
1002
1003 dbg_io("write VID header to PEB %d", pnum);
1004 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1005
1006 err = paranoid_check_peb_ec_hdr(ubi, pnum);
1007 if (err)
1008 return err > 0 ? -EINVAL: err;
1009
1010 vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC);
1011 vid_hdr->version = UBI_VERSION;
1012 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1013 vid_hdr->hdr_crc = cpu_to_ubi32(crc);
1014
1015 err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
1016 if (err)
1017 return -EINVAL;
1018
1019 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1020 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1021 ubi->vid_hdr_alsize);
1022 return err;
1023}
1024
1025#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1026
1027/**
1028 * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
1029 * @ubi: UBI device description object
1030 * @pnum: physical eraseblock number to check
1031 *
1032 * This function returns zero if the physical eraseblock is good, a positive
1033 * number if it is bad and a negative error code if an error occurred.
1034 */
1035static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
1036{
1037 int err;
1038
1039 err = ubi_io_is_bad(ubi, pnum);
1040 if (!err)
1041 return err;
1042
1043 ubi_err("paranoid check failed for PEB %d", pnum);
1044 ubi_dbg_dump_stack();
1045 return err;
1046}
1047
1048/**
1049 * paranoid_check_ec_hdr - check if an erase counter header is all right.
1050 * @ubi: UBI device description object
1051 * @pnum: physical eraseblock number the erase counter header belongs to
1052 * @ec_hdr: the erase counter header to check
1053 *
1054 * This function returns zero if the erase counter header contains valid
1055 * values, and %1 if not.
1056 */
1057static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1058 const struct ubi_ec_hdr *ec_hdr)
1059{
1060 int err;
1061 uint32_t magic;
1062
1063 magic = ubi32_to_cpu(ec_hdr->magic);
1064 if (magic != UBI_EC_HDR_MAGIC) {
1065 ubi_err("bad magic %#08x, must be %#08x",
1066 magic, UBI_EC_HDR_MAGIC);
1067 goto fail;
1068 }
1069
1070 err = validate_ec_hdr(ubi, ec_hdr);
1071 if (err) {
1072 ubi_err("paranoid check failed for PEB %d", pnum);
1073 goto fail;
1074 }
1075
1076 return 0;
1077
1078fail:
1079 ubi_dbg_dump_ec_hdr(ec_hdr);
1080 ubi_dbg_dump_stack();
1081 return 1;
1082}
1083
1084/**
1085 * paranoid_check_peb_ec_hdr - check that the erase counter header of a
1086 * physical eraseblock is in-place and is all right.
1087 * @ubi: UBI device description object
1088 * @pnum: the physical eraseblock number to check
1089 *
1090 * This function returns zero if the erase counter header is all right, %1 if
1091 * not, and a negative error code if an error occurred.
1092 */
1093static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1094{
1095 int err;
1096 uint32_t crc, hdr_crc;
1097 struct ubi_ec_hdr *ec_hdr;
1098
1099 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1100 if (!ec_hdr)
1101 return -ENOMEM;
1102
1103 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1104 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
1105 goto exit;
1106
1107 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1108 hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
1109 if (hdr_crc != crc) {
1110 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
1111 ubi_err("paranoid check failed for PEB %d", pnum);
1112 ubi_dbg_dump_ec_hdr(ec_hdr);
1113 ubi_dbg_dump_stack();
1114 err = 1;
1115 goto exit;
1116 }
1117
1118 err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
1119
1120exit:
1121 kfree(ec_hdr);
1122 return err;
1123}
1124
1125/**
1126 * paranoid_check_vid_hdr - check that a volume identifier header is all right.
1127 * @ubi: UBI device description object
1128 * @pnum: physical eraseblock number the volume identifier header belongs to
1129 * @vid_hdr: the volume identifier header to check
1130 *
1131 * This function returns zero if the volume identifier header is all right, and
1132 * %1 if not.
1133 */
1134static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1135 const struct ubi_vid_hdr *vid_hdr)
1136{
1137 int err;
1138 uint32_t magic;
1139
1140 magic = ubi32_to_cpu(vid_hdr->magic);
1141 if (magic != UBI_VID_HDR_MAGIC) {
1142 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
1143 magic, pnum, UBI_VID_HDR_MAGIC);
1144 goto fail;
1145 }
1146
1147 err = validate_vid_hdr(ubi, vid_hdr);
1148 if (err) {
1149 ubi_err("paranoid check failed for PEB %d", pnum);
1150 goto fail;
1151 }
1152
1153 return err;
1154
1155fail:
1156 ubi_err("paranoid check failed for PEB %d", pnum);
1157 ubi_dbg_dump_vid_hdr(vid_hdr);
1158 ubi_dbg_dump_stack();
1159 return 1;
1160
1161}
1162
1163/**
1164 * paranoid_check_peb_vid_hdr - check that the volume identifier header of a
1165 * physical eraseblock is in-place and is all right.
1166 * @ubi: UBI device description object
1167 * @pnum: the physical eraseblock number to check
1168 *
1169 * This function returns zero if the volume identifier header is all right,
1170 * %1 if not, and a negative error code if an error occurred.
1171 */
1172static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1173{
1174 int err;
1175 uint32_t crc, hdr_crc;
1176 struct ubi_vid_hdr *vid_hdr;
1177 void *p;
1178
1179 vid_hdr = ubi_zalloc_vid_hdr(ubi);
1180 if (!vid_hdr)
1181 return -ENOMEM;
1182
1183 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1184 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1185 ubi->vid_hdr_alsize);
1186 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
1187 goto exit;
1188
1189 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1190 hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
1191 if (hdr_crc != crc) {
1192 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
1193 "read %#08x", pnum, crc, hdr_crc);
1194 ubi_err("paranoid check failed for PEB %d", pnum);
1195 ubi_dbg_dump_vid_hdr(vid_hdr);
1196 ubi_dbg_dump_stack();
1197 err = 1;
1198 goto exit;
1199 }
1200
1201 err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
1202
1203exit:
1204 ubi_free_vid_hdr(ubi, vid_hdr);
1205 return err;
1206}
1207
1208/**
1209 * paranoid_check_all_ff - check that a region of flash is empty.
1210 * @ubi: UBI device description object
1211 * @pnum: the physical eraseblock number to check
1212 * @offset: the starting offset within the physical eraseblock to check
1213 * @len: the length of the region to check
1214 *
1215 * This function returns zero if only 0xFF bytes are present at offset
1216 * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
1217 * code if an error occurred.
1218 */
1219static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
1220 int offset, int len)
1221{
1222 size_t read;
1223 int err;
1224 void *buf;
1225 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1226
1227 buf = kzalloc(len, GFP_KERNEL);
1228 if (!buf)
1229 return -ENOMEM;
1230
1231 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
1232 if (err && err != -EUCLEAN) {
1233 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1234 "read %zd bytes", err, len, pnum, offset, read);
1235 goto error;
1236 }
1237
1238 err = check_pattern(buf, 0xFF, len);
1239 if (err == 0) {
1240 ubi_err("flash region at PEB %d:%d, length %d does not "
1241 "contain all 0xFF bytes", pnum, offset, len);
1242 goto fail;
1243 }
1244
1245 kfree(buf);
1246 return 0;
1247
1248fail:
1249 ubi_err("paranoid check failed for PEB %d", pnum);
1250 dbg_msg("hex dump of the %d-%d region", offset, offset + len);
1251 ubi_dbg_hexdump(buf, len);
1252 err = 1;
1253error:
1254 ubi_dbg_dump_stack();
1255 kfree(buf);
1256 return err;
1257}
1258
1259#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
new file mode 100644
index 000000000000..d352c4575c3d
--- /dev/null
+++ b/drivers/mtd/ubi/kapi.c
@@ -0,0 +1,575 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/* This file mostly implements UBI kernel API functions */
22
23#include <linux/module.h>
24#include <linux/err.h>
25#include <asm/div64.h>
26#include "ubi.h"
27
28/**
29 * ubi_get_device_info - get information about UBI device.
30 * @ubi_num: UBI device number
31 * @di: the information is stored here
32 *
33 * This function returns %0 in case of success and a %-ENODEV if there is no
34 * such UBI device.
35 */
36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
37{
38 const struct ubi_device *ubi;
39
40 if (!try_module_get(THIS_MODULE))
41 return -ENODEV;
42
43 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
44 !ubi_devices[ubi_num]) {
45 module_put(THIS_MODULE);
46 return -ENODEV;
47 }
48
49 ubi = ubi_devices[ubi_num];
50 di->ubi_num = ubi->ubi_num;
51 di->leb_size = ubi->leb_size;
52 di->min_io_size = ubi->min_io_size;
53 di->ro_mode = ubi->ro_mode;
54 di->cdev = MKDEV(ubi->major, 0);
55 module_put(THIS_MODULE);
56 return 0;
57}
58EXPORT_SYMBOL_GPL(ubi_get_device_info);
59
60/**
61 * ubi_get_volume_info - get information about UBI volume.
62 * @desc: volume descriptor
63 * @vi: the information is stored here
64 */
65void ubi_get_volume_info(struct ubi_volume_desc *desc,
66 struct ubi_volume_info *vi)
67{
68 const struct ubi_volume *vol = desc->vol;
69 const struct ubi_device *ubi = vol->ubi;
70
71 vi->vol_id = vol->vol_id;
72 vi->ubi_num = ubi->ubi_num;
73 vi->size = vol->reserved_pebs;
74 vi->used_bytes = vol->used_bytes;
75 vi->vol_type = vol->vol_type;
76 vi->corrupted = vol->corrupted;
77 vi->upd_marker = vol->upd_marker;
78 vi->alignment = vol->alignment;
79 vi->usable_leb_size = vol->usable_leb_size;
80 vi->name_len = vol->name_len;
81 vi->name = vol->name;
82 vi->cdev = MKDEV(ubi->major, vi->vol_id + 1);
83}
84EXPORT_SYMBOL_GPL(ubi_get_volume_info);
85
86/**
87 * ubi_open_volume - open UBI volume.
88 * @ubi_num: UBI device number
89 * @vol_id: volume ID
90 * @mode: open mode
91 *
92 * The @mode parameter specifies if the volume should be opened in read-only
93 * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
94 * nobody else will be able to open this volume. UBI allows to have many volume
95 * readers and one writer at a time.
96 *
97 * If a static volume is being opened for the first time since boot, it will be
98 * checked by this function, which means it will be fully read and the CRC
99 * checksum of each logical eraseblock will be checked.
100 *
101 * This function returns volume descriptor in case of success and a negative
102 * error code in case of failure.
103 */
104struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
105{
106 int err;
107 struct ubi_volume_desc *desc;
108 struct ubi_device *ubi = ubi_devices[ubi_num];
109 struct ubi_volume *vol;
110
111 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
112
113 err = -ENODEV;
114 if (!try_module_get(THIS_MODULE))
115 return ERR_PTR(err);
116
117 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi)
118 goto out_put;
119
120 err = -EINVAL;
121 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
122 goto out_put;
123 if (mode != UBI_READONLY && mode != UBI_READWRITE &&
124 mode != UBI_EXCLUSIVE)
125 goto out_put;
126
127 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
128 if (!desc) {
129 err = -ENOMEM;
130 goto out_put;
131 }
132
133 spin_lock(&ubi->volumes_lock);
134 vol = ubi->volumes[vol_id];
135 if (!vol) {
136 err = -ENODEV;
137 goto out_unlock;
138 }
139
140 err = -EBUSY;
141 switch (mode) {
142 case UBI_READONLY:
143 if (vol->exclusive)
144 goto out_unlock;
145 vol->readers += 1;
146 break;
147
148 case UBI_READWRITE:
149 if (vol->exclusive || vol->writers > 0)
150 goto out_unlock;
151 vol->writers += 1;
152 break;
153
154 case UBI_EXCLUSIVE:
155 if (vol->exclusive || vol->writers || vol->readers)
156 goto out_unlock;
157 vol->exclusive = 1;
158 break;
159 }
160 spin_unlock(&ubi->volumes_lock);
161
162 desc->vol = vol;
163 desc->mode = mode;
164
165 /*
166 * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
167 * although it is not the purpose it was introduced for.
168 */
169 mutex_lock(&ubi->vtbl_mutex);
170 if (!vol->checked) {
171 /* This is the first open - check the volume */
172 err = ubi_check_volume(ubi, vol_id);
173 if (err < 0) {
174 mutex_unlock(&ubi->vtbl_mutex);
175 ubi_close_volume(desc);
176 return ERR_PTR(err);
177 }
178 if (err == 1) {
179 ubi_warn("volume %d on UBI device %d is corrupted",
180 vol_id, ubi->ubi_num);
181 vol->corrupted = 1;
182 }
183 vol->checked = 1;
184 }
185 mutex_unlock(&ubi->vtbl_mutex);
186 return desc;
187
188out_unlock:
189 spin_unlock(&ubi->volumes_lock);
190 kfree(desc);
191out_put:
192 module_put(THIS_MODULE);
193 return ERR_PTR(err);
194}
195EXPORT_SYMBOL_GPL(ubi_open_volume);
196
197/**
198 * ubi_open_volume_nm - open UBI volume by name.
199 * @ubi_num: UBI device number
200 * @name: volume name
201 * @mode: open mode
202 *
203 * This function is similar to 'ubi_open_volume()', but opens a volume by name.
204 */
205struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
206 int mode)
207{
208 int i, vol_id = -1, len;
209 struct ubi_volume_desc *ret;
210 struct ubi_device *ubi;
211
212 dbg_msg("open volume %s, mode %d", name, mode);
213
214 if (!name)
215 return ERR_PTR(-EINVAL);
216
217 len = strnlen(name, UBI_VOL_NAME_MAX + 1);
218 if (len > UBI_VOL_NAME_MAX)
219 return ERR_PTR(-EINVAL);
220
221 ret = ERR_PTR(-ENODEV);
222 if (!try_module_get(THIS_MODULE))
223 return ret;
224
225 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
226 goto out_put;
227
228 ubi = ubi_devices[ubi_num];
229
230 spin_lock(&ubi->volumes_lock);
231 /* Walk all volumes of this UBI device */
232 for (i = 0; i < ubi->vtbl_slots; i++) {
233 struct ubi_volume *vol = ubi->volumes[i];
234
235 if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
236 vol_id = i;
237 break;
238 }
239 }
240 spin_unlock(&ubi->volumes_lock);
241
242 if (vol_id < 0)
243 goto out_put;
244
245 ret = ubi_open_volume(ubi_num, vol_id, mode);
246
247out_put:
248 module_put(THIS_MODULE);
249 return ret;
250}
251EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
252
253/**
254 * ubi_close_volume - close UBI volume.
255 * @desc: volume descriptor
256 */
257void ubi_close_volume(struct ubi_volume_desc *desc)
258{
259 struct ubi_volume *vol = desc->vol;
260
261 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
262
263 spin_lock(&vol->ubi->volumes_lock);
264 switch (desc->mode) {
265 case UBI_READONLY:
266 vol->readers -= 1;
267 break;
268 case UBI_READWRITE:
269 vol->writers -= 1;
270 break;
271 case UBI_EXCLUSIVE:
272 vol->exclusive = 0;
273 }
274 spin_unlock(&vol->ubi->volumes_lock);
275
276 kfree(desc);
277 module_put(THIS_MODULE);
278}
279EXPORT_SYMBOL_GPL(ubi_close_volume);
280
281/**
282 * ubi_leb_read - read data.
283 * @desc: volume descriptor
284 * @lnum: logical eraseblock number to read from
285 * @buf: buffer where to store the read data
286 * @offset: offset within the logical eraseblock to read from
287 * @len: how many bytes to read
288 * @check: whether UBI has to check the read data's CRC or not.
289 *
290 * This function reads data from offset @offset of logical eraseblock @lnum and
291 * stores the data at @buf. When reading from static volumes, @check specifies
292 * whether the data has to be checked or not. If yes, the whole logical
293 * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
294 * checksum is per-eraseblock). So checking may substantially slow down the
295 * read speed. The @check argument is ignored for dynamic volumes.
296 *
297 * In case of success, this function returns zero. In case of failure, this
298 * function returns a negative error code.
299 *
300 * %-EBADMSG error code is returned:
301 * o for both static and dynamic volumes if MTD driver has detected a data
302 * integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
303 * o for static volumes in case of data CRC mismatch.
304 *
305 * If the volume is damaged because of an interrupted update this function just
306 * returns immediately with %-EBADF error code.
307 */
308int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
309 int len, int check)
310{
311 struct ubi_volume *vol = desc->vol;
312 struct ubi_device *ubi = vol->ubi;
313 int err, vol_id = vol->vol_id;
314
315 dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
316
317 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
318 lnum >= vol->used_ebs || offset < 0 || len < 0 ||
319 offset + len > vol->usable_leb_size)
320 return -EINVAL;
321
322 if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 &&
323 offset + len > vol->last_eb_bytes)
324 return -EINVAL;
325
326 if (vol->upd_marker)
327 return -EBADF;
328 if (len == 0)
329 return 0;
330
331 err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check);
332 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
333 ubi_warn("mark volume %d as corrupted", vol_id);
334 vol->corrupted = 1;
335 }
336
337 return err;
338}
339EXPORT_SYMBOL_GPL(ubi_leb_read);
340
341/**
342 * ubi_leb_write - write data.
343 * @desc: volume descriptor
344 * @lnum: logical eraseblock number to write to
345 * @buf: data to write
346 * @offset: offset within the logical eraseblock where to write
347 * @len: how many bytes to write
348 * @dtype: expected data type
349 *
350 * This function writes @len bytes of data from @buf to offset @offset of
351 * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
352 * the data.
353 *
354 * This function takes care of physical eraseblock write failures. If write to
355 * the physical eraseblock write operation fails, the logical eraseblock is
356 * re-mapped to another physical eraseblock, the data is recovered, and the
357 * write finishes. UBI has a pool of reserved physical eraseblocks for this.
358 *
359 * If all the data were successfully written, zero is returned. If an error
360 * occurred and UBI has not been able to recover from it, this function returns
361 * a negative error code. Note, in case of an error, it is possible that
362 * something was still written to the flash media, but that may be some
363 * garbage.
364 *
365 * If the volume is damaged because of an interrupted update this function just
366 * returns immediately with %-EBADF code.
367 */
368int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
369 int offset, int len, int dtype)
370{
371 struct ubi_volume *vol = desc->vol;
372 struct ubi_device *ubi = vol->ubi;
373 int vol_id = vol->vol_id;
374
375 dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
376
377 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
378 return -EINVAL;
379
380 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
381 return -EROFS;
382
383 if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
384 offset + len > vol->usable_leb_size || offset % ubi->min_io_size ||
385 len % ubi->min_io_size)
386 return -EINVAL;
387
388 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
389 dtype != UBI_UNKNOWN)
390 return -EINVAL;
391
392 if (vol->upd_marker)
393 return -EBADF;
394
395 if (len == 0)
396 return 0;
397
398 return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype);
399}
400EXPORT_SYMBOL_GPL(ubi_leb_write);
401
402/*
403 * ubi_leb_change - change logical eraseblock atomically.
404 * @desc: volume descriptor
405 * @lnum: logical eraseblock number to change
406 * @buf: data to write
407 * @len: how many bytes to write
408 * @dtype: expected data type
409 *
410 * This function changes the contents of a logical eraseblock atomically. @buf
411 * has to contain new logical eraseblock data, and @len - the length of the
412 * data, which has to be aligned. The length may be shorter then the logical
413 * eraseblock size, ant the logical eraseblock may be appended to more times
414 * later on. This function guarantees that in case of an unclean reboot the old
415 * contents is preserved. Returns zero in case of success and a negative error
416 * code in case of failure.
417 */
418int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
419 int len, int dtype)
420{
421 struct ubi_volume *vol = desc->vol;
422 struct ubi_device *ubi = vol->ubi;
423 int vol_id = vol->vol_id;
424
425 dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
426
427 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
428 return -EINVAL;
429
430 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
431 return -EROFS;
432
433 if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
434 len > vol->usable_leb_size || len % ubi->min_io_size)
435 return -EINVAL;
436
437 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
438 dtype != UBI_UNKNOWN)
439 return -EINVAL;
440
441 if (vol->upd_marker)
442 return -EBADF;
443
444 if (len == 0)
445 return 0;
446
447 return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype);
448}
449EXPORT_SYMBOL_GPL(ubi_leb_change);
450
451/**
452 * ubi_leb_erase - erase logical eraseblock.
453 * @desc: volume descriptor
454 * @lnum: logical eraseblock number
455 *
456 * This function un-maps logical eraseblock @lnum and synchronously erases the
457 * correspondent physical eraseblock. Returns zero in case of success and a
458 * negative error code in case of failure.
459 *
460 * If the volume is damaged because of an interrupted update this function just
461 * returns immediately with %-EBADF code.
462 */
463int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
464{
465 struct ubi_volume *vol = desc->vol;
466 struct ubi_device *ubi = vol->ubi;
467 int err, vol_id = vol->vol_id;
468
469 dbg_msg("erase LEB %d:%d", vol_id, lnum);
470
471 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
472 return -EROFS;
473
474 if (lnum < 0 || lnum >= vol->reserved_pebs)
475 return -EINVAL;
476
477 if (vol->upd_marker)
478 return -EBADF;
479
480 err = ubi_eba_unmap_leb(ubi, vol_id, lnum);
481 if (err)
482 return err;
483
484 return ubi_wl_flush(ubi);
485}
486EXPORT_SYMBOL_GPL(ubi_leb_erase);
487
488/**
489 * ubi_leb_unmap - un-map logical eraseblock.
490 * @desc: volume descriptor
491 * @lnum: logical eraseblock number
492 *
493 * This function un-maps logical eraseblock @lnum and schedules the
494 * corresponding physical eraseblock for erasure, so that it will eventually be
495 * physically erased in background. This operation is much faster then the
496 * erase operation.
497 *
498 * Unlike erase, the un-map operation does not guarantee that the logical
499 * eraseblock will contain all 0xFF bytes when UBI is initialized again. For
500 * example, if several logical eraseblocks are un-mapped, and an unclean reboot
501 * happens after this, the logical eraseblocks will not necessarily be
502 * un-mapped again when this MTD device is attached. They may actually be
503 * mapped to the same physical eraseblocks again. So, this function has to be
504 * used with care.
505 *
506 * In other words, when un-mapping a logical eraseblock, UBI does not store
507 * any information about this on the flash media, it just marks the logical
508 * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
509 * eraseblock is physically erased, it will be mapped again to the same logical
510 * eraseblock when the MTD device is attached again.
511 *
512 * The main and obvious use-case of this function is when the contents of a
513 * logical eraseblock has to be re-written. Then it is much more efficient to
514 * first un-map it, then write new data, rather then first erase it, then write
515 * new data. Note, once new data has been written to the logical eraseblock,
516 * UBI guarantees that the old contents has gone forever. In other words, if an
517 * unclean reboot happens after the logical eraseblock has been un-mapped and
518 * then written to, it will contain the last written data.
519 *
520 * This function returns zero in case of success and a negative error code in
521 * case of failure. If the volume is damaged because of an interrupted update
522 * this function just returns immediately with %-EBADF code.
523 */
524int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
525{
526 struct ubi_volume *vol = desc->vol;
527 struct ubi_device *ubi = vol->ubi;
528 int vol_id = vol->vol_id;
529
530 dbg_msg("unmap LEB %d:%d", vol_id, lnum);
531
532 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
533 return -EROFS;
534
535 if (lnum < 0 || lnum >= vol->reserved_pebs)
536 return -EINVAL;
537
538 if (vol->upd_marker)
539 return -EBADF;
540
541 return ubi_eba_unmap_leb(ubi, vol_id, lnum);
542}
543EXPORT_SYMBOL_GPL(ubi_leb_unmap);
544
545/**
546 * ubi_is_mapped - check if logical eraseblock is mapped.
547 * @desc: volume descriptor
548 * @lnum: logical eraseblock number
549 *
550 * This function checks if logical eraseblock @lnum is mapped to a physical
551 * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
552 * mean it will still be un-mapped after the UBI device is re-attached. The
553 * logical eraseblock may become mapped to the physical eraseblock it was last
554 * mapped to.
555 *
556 * This function returns %1 if the LEB is mapped, %0 if not, and a negative
557 * error code in case of failure. If the volume is damaged because of an
558 * interrupted update this function just returns immediately with %-EBADF error
559 * code.
560 */
561int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
562{
563 struct ubi_volume *vol = desc->vol;
564
565 dbg_msg("test LEB %d:%d", vol->vol_id, lnum);
566
567 if (lnum < 0 || lnum >= vol->reserved_pebs)
568 return -EINVAL;
569
570 if (vol->upd_marker)
571 return -EBADF;
572
573 return vol->eba_tbl[lnum] >= 0;
574}
575EXPORT_SYMBOL_GPL(ubi_is_mapped);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
new file mode 100644
index 000000000000..38d4e6757dc7
--- /dev/null
+++ b/drivers/mtd/ubi/misc.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/* Here we keep miscellaneous functions which are used all over the UBI code */
22
23#include "ubi.h"
24
25/**
26 * calc_data_len - calculate how much real data is stored in a buffer.
27 * @ubi: UBI device description object
28 * @buf: a buffer with the contents of the physical eraseblock
29 * @length: the buffer length
30 *
31 * This function calculates how much "real data" is stored in @buf and returnes
32 * the length. Continuous 0xFF bytes at the end of the buffer are not
33 * considered as "real data".
34 */
35int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
36 int length)
37{
38 int i;
39
40 ubi_assert(length % ubi->min_io_size == 0);
41
42 for (i = length - 1; i >= 0; i--)
43 if (((const uint8_t *)buf)[i] != 0xFF)
44 break;
45
46 /* The resulting length must be aligned to the minimum flash I/O size */
47 length = ALIGN(i + 1, ubi->min_io_size);
48 return length;
49}
50
51/**
52 * ubi_check_volume - check the contents of a static volume.
53 * @ubi: UBI device description object
54 * @vol_id: ID of the volume to check
55 *
56 * This function checks if static volume @vol_id is corrupted by fully reading
57 * it and checking data CRC. This function returns %0 if the volume is not
58 * corrupted, %1 if it is corrupted and a negative error code in case of
59 * failure. Dynamic volumes are not checked and zero is returned immediately.
60 */
61int ubi_check_volume(struct ubi_device *ubi, int vol_id)
62{
63 void *buf;
64 int err = 0, i;
65 struct ubi_volume *vol = ubi->volumes[vol_id];
66
67 if (vol->vol_type != UBI_STATIC_VOLUME)
68 return 0;
69
70 buf = kmalloc(vol->usable_leb_size, GFP_KERNEL);
71 if (!buf)
72 return -ENOMEM;
73
74 for (i = 0; i < vol->used_ebs; i++) {
75 int size;
76
77 if (i == vol->used_ebs - 1)
78 size = vol->last_eb_bytes;
79 else
80 size = vol->usable_leb_size;
81
82 err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1);
83 if (err) {
84 if (err == -EBADMSG)
85 err = 1;
86 break;
87 }
88 }
89
90 kfree(buf);
91 return err;
92}
93
94/**
95 * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
96 * eraseblock handling.
97 * @ubi: UBI device description object
98 */
99void ubi_calculate_reserved(struct ubi_device *ubi)
100{
101 ubi->beb_rsvd_level = ubi->good_peb_count/100;
102 ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
103 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
104 ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
105}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
new file mode 100644
index 000000000000..473f3200b868
--- /dev/null
+++ b/drivers/mtd/ubi/scan.c
@@ -0,0 +1,1368 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * UBI scanning unit.
23 *
24 * This unit is responsible for scanning the flash media, checking UBI
25 * headers and providing complete information about the UBI flash image.
26 *
27 * The scanning information is reoresented by a &struct ubi_scan_info' object.
28 * Information about found volumes is represented by &struct ubi_scan_volume
29 * objects which are kept in volume RB-tree with root at the @volumes field.
30 * The RB-tree is indexed by the volume ID.
31 *
32 * Found logical eraseblocks are represented by &struct ubi_scan_leb objects.
33 * These objects are kept in per-volume RB-trees with the root at the
34 * corresponding &struct ubi_scan_volume object. To put it differently, we keep
35 * an RB-tree of per-volume objects and each of these objects is the root of
36 * RB-tree of per-eraseblock objects.
37 *
38 * Corrupted physical eraseblocks are put to the @corr list, free physical
39 * eraseblocks are put to the @free list and the physical eraseblock to be
40 * erased are put to the @erase list.
41 */
42
43#include <linux/err.h>
44#include <linux/crc32.h>
45#include "ubi.h"
46
47#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
48static int paranoid_check_si(const struct ubi_device *ubi,
49 struct ubi_scan_info *si);
50#else
51#define paranoid_check_si(ubi, si) 0
52#endif
53
54/* Temporary variables used during scanning */
55static struct ubi_ec_hdr *ech;
56static struct ubi_vid_hdr *vidh;
57
58int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
59 struct list_head *list)
60{
61 struct ubi_scan_leb *seb;
62
63 if (list == &si->free)
64 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
65 else if (list == &si->erase)
66 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
67 else if (list == &si->corr)
68 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
69 else if (list == &si->alien)
70 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
71 else
72 BUG();
73
74 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
75 if (!seb)
76 return -ENOMEM;
77
78 seb->pnum = pnum;
79 seb->ec = ec;
80 list_add_tail(&seb->u.list, list);
81 return 0;
82}
83
84/**
85 * commit_to_mean_value - commit intermediate results to the final mean erase
86 * counter value.
87 * @si: scanning information
88 *
89 * This is a helper function which calculates partial mean erase counter mean
90 * value and adds it to the resulting mean value. As we can work only in
91 * integer arithmetic and we want to calculate the mean value of erase counter
92 * accurately, we first sum erase counter values in @si->ec_sum variable and
93 * count these components in @si->ec_count. If this temporary @si->ec_sum is
94 * going to overflow, we calculate the partial mean value
95 * (@si->ec_sum/@si->ec_count) and add it to @si->mean_ec.
96 */
97static void commit_to_mean_value(struct ubi_scan_info *si)
98{
99 si->ec_sum /= si->ec_count;
100 if (si->ec_sum % si->ec_count >= si->ec_count / 2)
101 si->mean_ec += 1;
102 si->mean_ec += si->ec_sum;
103}
104
105/**
106 * validate_vid_hdr - check that volume identifier header is correct and
107 * consistent.
108 * @vid_hdr: the volume identifier header to check
109 * @sv: information about the volume this logical eraseblock belongs to
110 * @pnum: physical eraseblock number the VID header came from
111 *
112 * This function checks that data stored in @vid_hdr is consistent. Returns
113 * non-zero if an inconsistency was found and zero if not.
114 *
115 * Note, UBI does sanity check of everything it reads from the flash media.
116 * Most of the checks are done in the I/O unit. Here we check that the
117 * information in the VID header is consistent to the information in other VID
118 * headers of the same volume.
119 */
120static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
121 const struct ubi_scan_volume *sv, int pnum)
122{
123 int vol_type = vid_hdr->vol_type;
124 int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
125 int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
126 int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
127
128 if (sv->leb_count != 0) {
129 int sv_vol_type;
130
131 /*
132 * This is not the first logical eraseblock belonging to this
133 * volume. Ensure that the data in its VID header is consistent
134 * to the data in previous logical eraseblock headers.
135 */
136
137 if (vol_id != sv->vol_id) {
138 dbg_err("inconsistent vol_id");
139 goto bad;
140 }
141
142 if (sv->vol_type == UBI_STATIC_VOLUME)
143 sv_vol_type = UBI_VID_STATIC;
144 else
145 sv_vol_type = UBI_VID_DYNAMIC;
146
147 if (vol_type != sv_vol_type) {
148 dbg_err("inconsistent vol_type");
149 goto bad;
150 }
151
152 if (used_ebs != sv->used_ebs) {
153 dbg_err("inconsistent used_ebs");
154 goto bad;
155 }
156
157 if (data_pad != sv->data_pad) {
158 dbg_err("inconsistent data_pad");
159 goto bad;
160 }
161 }
162
163 return 0;
164
165bad:
166 ubi_err("inconsistent VID header at PEB %d", pnum);
167 ubi_dbg_dump_vid_hdr(vid_hdr);
168 ubi_dbg_dump_sv(sv);
169 return -EINVAL;
170}
171
172/**
173 * add_volume - add volume to the scanning information.
174 * @si: scanning information
175 * @vol_id: ID of the volume to add
176 * @pnum: physical eraseblock number
177 * @vid_hdr: volume identifier header
178 *
179 * If the volume corresponding to the @vid_hdr logical eraseblock is already
180 * present in the scanning information, this function does nothing. Otherwise
181 * it adds corresponding volume to the scanning information. Returns a pointer
182 * to the scanning volume object in case of success and a negative error code
183 * in case of failure.
184 */
185static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
186 int pnum,
187 const struct ubi_vid_hdr *vid_hdr)
188{
189 struct ubi_scan_volume *sv;
190 struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
191
192 ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id));
193
194 /* Walk the volume RB-tree to look if this volume is already present */
195 while (*p) {
196 parent = *p;
197 sv = rb_entry(parent, struct ubi_scan_volume, rb);
198
199 if (vol_id == sv->vol_id)
200 return sv;
201
202 if (vol_id > sv->vol_id)
203 p = &(*p)->rb_left;
204 else
205 p = &(*p)->rb_right;
206 }
207
208 /* The volume is absent - add it */
209 sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
210 if (!sv)
211 return ERR_PTR(-ENOMEM);
212
213 sv->highest_lnum = sv->leb_count = 0;
214 si->max_sqnum = 0;
215 sv->vol_id = vol_id;
216 sv->root = RB_ROOT;
217 sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
218 sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad);
219 sv->compat = vid_hdr->compat;
220 sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
221 : UBI_STATIC_VOLUME;
222 if (vol_id > si->highest_vol_id)
223 si->highest_vol_id = vol_id;
224
225 rb_link_node(&sv->rb, parent, p);
226 rb_insert_color(&sv->rb, &si->volumes);
227 si->vols_found += 1;
228 dbg_bld("added volume %d", vol_id);
229 return sv;
230}
231
232/**
233 * compare_lebs - find out which logical eraseblock is newer.
234 * @ubi: UBI device description object
235 * @seb: first logical eraseblock to compare
236 * @pnum: physical eraseblock number of the second logical eraseblock to
237 * compare
238 * @vid_hdr: volume identifier header of the second logical eraseblock
239 *
240 * This function compares 2 copies of a LEB and informs which one is newer. In
241 * case of success this function returns a positive value, in case of failure, a
242 * negative error code is returned. The success return codes use the following
243 * bits:
244 * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
245 * second PEB (described by @pnum and @vid_hdr);
246 * o bit 0 is set: the second PEB is newer;
247 * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
248 * o bit 1 is set: bit-flips were detected in the newer LEB;
249 * o bit 2 is cleared: the older LEB is not corrupted;
250 * o bit 2 is set: the older LEB is corrupted.
251 */
252static int compare_lebs(const struct ubi_device *ubi,
253 const struct ubi_scan_leb *seb, int pnum,
254 const struct ubi_vid_hdr *vid_hdr)
255{
256 void *buf;
257 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
258 uint32_t data_crc, crc;
259 struct ubi_vid_hdr *vidh = NULL;
260 unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum);
261
262 if (seb->sqnum == 0 && sqnum2 == 0) {
263 long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver);
264
265 /*
266 * UBI constantly increases the logical eraseblock version
267 * number and it can overflow. Thus, we have to bear in mind
268 * that versions that are close to %0xFFFFFFFF are less then
269 * versions that are close to %0.
270 *
271 * The UBI WL unit guarantees that the number of pending tasks
272 * is not greater then %0x7FFFFFFF. So, if the difference
273 * between any two versions is greater or equivalent to
274 * %0x7FFFFFFF, there was an overflow and the logical
275 * eraseblock with lower version is actually newer then the one
276 * with higher version.
277 *
278 * FIXME: but this is anyway obsolete and will be removed at
279 * some point.
280 */
281
282 dbg_bld("using old crappy leb_ver stuff");
283
284 abs = v1 - v2;
285 if (abs < 0)
286 abs = -abs;
287
288 if (abs < 0x7FFFFFFF)
289 /* Non-overflow situation */
290 second_is_newer = (v2 > v1);
291 else
292 second_is_newer = (v2 < v1);
293 } else
294 /* Obviously the LEB with lower sequence counter is older */
295 second_is_newer = sqnum2 > seb->sqnum;
296
297 /*
298 * Now we know which copy is newer. If the copy flag of the PEB with
299 * newer version is not set, then we just return, otherwise we have to
300 * check data CRC. For the second PEB we already have the VID header,
301 * for the first one - we'll need to re-read it from flash.
302 *
303 * FIXME: this may be optimized so that we wouldn't read twice.
304 */
305
306 if (second_is_newer) {
307 if (!vid_hdr->copy_flag) {
308 /* It is not a copy, so it is newer */
309 dbg_bld("second PEB %d is newer, copy_flag is unset",
310 pnum);
311 return 1;
312 }
313 } else {
314 pnum = seb->pnum;
315
316 vidh = ubi_zalloc_vid_hdr(ubi);
317 if (!vidh)
318 return -ENOMEM;
319
320 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
321 if (err) {
322 if (err == UBI_IO_BITFLIPS)
323 bitflips = 1;
324 else {
325 dbg_err("VID of PEB %d header is bad, but it "
326 "was OK earlier", pnum);
327 if (err > 0)
328 err = -EIO;
329
330 goto out_free_vidh;
331 }
332 }
333
334 if (!vidh->copy_flag) {
335 /* It is not a copy, so it is newer */
336 dbg_bld("first PEB %d is newer, copy_flag is unset",
337 pnum);
338 err = bitflips << 1;
339 goto out_free_vidh;
340 }
341
342 vid_hdr = vidh;
343 }
344
345 /* Read the data of the copy and check the CRC */
346
347 len = ubi32_to_cpu(vid_hdr->data_size);
348 buf = kmalloc(len, GFP_KERNEL);
349 if (!buf) {
350 err = -ENOMEM;
351 goto out_free_vidh;
352 }
353
354 err = ubi_io_read_data(ubi, buf, pnum, 0, len);
355 if (err && err != UBI_IO_BITFLIPS)
356 goto out_free_buf;
357
358 data_crc = ubi32_to_cpu(vid_hdr->data_crc);
359 crc = crc32(UBI_CRC32_INIT, buf, len);
360 if (crc != data_crc) {
361 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
362 pnum, crc, data_crc);
363 corrupted = 1;
364 bitflips = 0;
365 second_is_newer = !second_is_newer;
366 } else {
367 dbg_bld("PEB %d CRC is OK", pnum);
368 bitflips = !!err;
369 }
370
371 kfree(buf);
372 ubi_free_vid_hdr(ubi, vidh);
373
374 if (second_is_newer)
375 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
376 else
377 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
378
379 return second_is_newer | (bitflips << 1) | (corrupted << 2);
380
381out_free_buf:
382 kfree(buf);
383out_free_vidh:
384 ubi_free_vid_hdr(ubi, vidh);
385 ubi_assert(err < 0);
386 return err;
387}
388
389/**
390 * ubi_scan_add_used - add information about a physical eraseblock to the
391 * scanning information.
392 * @ubi: UBI device description object
393 * @si: scanning information
394 * @pnum: the physical eraseblock number
395 * @ec: erase counter
396 * @vid_hdr: the volume identifier header
397 * @bitflips: if bit-flips were detected when this physical eraseblock was read
398 *
399 * This function returns zero in case of success and a negative error code in
400 * case of failure.
401 */
402int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
403 int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
404 int bitflips)
405{
406 int err, vol_id, lnum;
407 uint32_t leb_ver;
408 unsigned long long sqnum;
409 struct ubi_scan_volume *sv;
410 struct ubi_scan_leb *seb;
411 struct rb_node **p, *parent = NULL;
412
413 vol_id = ubi32_to_cpu(vid_hdr->vol_id);
414 lnum = ubi32_to_cpu(vid_hdr->lnum);
415 sqnum = ubi64_to_cpu(vid_hdr->sqnum);
416 leb_ver = ubi32_to_cpu(vid_hdr->leb_ver);
417
418 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
419 pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
420
421 sv = add_volume(si, vol_id, pnum, vid_hdr);
422 if (IS_ERR(sv) < 0)
423 return PTR_ERR(sv);
424
425 /*
426 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
427 * if this is the first instance of this logical eraseblock or not.
428 */
429 p = &sv->root.rb_node;
430 while (*p) {
431 int cmp_res;
432
433 parent = *p;
434 seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
435 if (lnum != seb->lnum) {
436 if (lnum < seb->lnum)
437 p = &(*p)->rb_left;
438 else
439 p = &(*p)->rb_right;
440 continue;
441 }
442
443 /*
444 * There is already a physical eraseblock describing the same
445 * logical eraseblock present.
446 */
447
448 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
449 "LEB ver %u, EC %d", seb->pnum, seb->sqnum,
450 seb->leb_ver, seb->ec);
451
452 /*
453 * Make sure that the logical eraseblocks have different
454 * versions. Otherwise the image is bad.
455 */
456 if (seb->leb_ver == leb_ver && leb_ver != 0) {
457 ubi_err("two LEBs with same version %u", leb_ver);
458 ubi_dbg_dump_seb(seb, 0);
459 ubi_dbg_dump_vid_hdr(vid_hdr);
460 return -EINVAL;
461 }
462
463 /*
464 * Make sure that the logical eraseblocks have different
465 * sequence numbers. Otherwise the image is bad.
466 *
467 * FIXME: remove 'sqnum != 0' check when leb_ver is removed.
468 */
469 if (seb->sqnum == sqnum && sqnum != 0) {
470 ubi_err("two LEBs with same sequence number %llu",
471 sqnum);
472 ubi_dbg_dump_seb(seb, 0);
473 ubi_dbg_dump_vid_hdr(vid_hdr);
474 return -EINVAL;
475 }
476
477 /*
478 * Now we have to drop the older one and preserve the newer
479 * one.
480 */
481 cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
482 if (cmp_res < 0)
483 return cmp_res;
484
485 if (cmp_res & 1) {
486 /*
487 * This logical eraseblock is newer then the one
488 * found earlier.
489 */
490 err = validate_vid_hdr(vid_hdr, sv, pnum);
491 if (err)
492 return err;
493
494 if (cmp_res & 4)
495 err = ubi_scan_add_to_list(si, seb->pnum,
496 seb->ec, &si->corr);
497 else
498 err = ubi_scan_add_to_list(si, seb->pnum,
499 seb->ec, &si->erase);
500 if (err)
501 return err;
502
503 seb->ec = ec;
504 seb->pnum = pnum;
505 seb->scrub = ((cmp_res & 2) || bitflips);
506 seb->sqnum = sqnum;
507 seb->leb_ver = leb_ver;
508
509 if (sv->highest_lnum == lnum)
510 sv->last_data_size =
511 ubi32_to_cpu(vid_hdr->data_size);
512
513 return 0;
514 } else {
515 /*
516 * This logical eraseblock is older then the one found
517 * previously.
518 */
519 if (cmp_res & 4)
520 return ubi_scan_add_to_list(si, pnum, ec,
521 &si->corr);
522 else
523 return ubi_scan_add_to_list(si, pnum, ec,
524 &si->erase);
525 }
526 }
527
528 /*
529 * We've met this logical eraseblock for the first time, add it to the
530 * scanning information.
531 */
532
533 err = validate_vid_hdr(vid_hdr, sv, pnum);
534 if (err)
535 return err;
536
537 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
538 if (!seb)
539 return -ENOMEM;
540
541 seb->ec = ec;
542 seb->pnum = pnum;
543 seb->lnum = lnum;
544 seb->sqnum = sqnum;
545 seb->scrub = bitflips;
546 seb->leb_ver = leb_ver;
547
548 if (sv->highest_lnum <= lnum) {
549 sv->highest_lnum = lnum;
550 sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size);
551 }
552
553 if (si->max_sqnum < sqnum)
554 si->max_sqnum = sqnum;
555
556 sv->leb_count += 1;
557 rb_link_node(&seb->u.rb, parent, p);
558 rb_insert_color(&seb->u.rb, &sv->root);
559 return 0;
560}
561
562/**
563 * ubi_scan_find_sv - find information about a particular volume in the
564 * scanning information.
565 * @si: scanning information
566 * @vol_id: the requested volume ID
567 *
568 * This function returns a pointer to the volume description or %NULL if there
569 * are no data about this volume in the scanning information.
570 */
571struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
572 int vol_id)
573{
574 struct ubi_scan_volume *sv;
575 struct rb_node *p = si->volumes.rb_node;
576
577 while (p) {
578 sv = rb_entry(p, struct ubi_scan_volume, rb);
579
580 if (vol_id == sv->vol_id)
581 return sv;
582
583 if (vol_id > sv->vol_id)
584 p = p->rb_left;
585 else
586 p = p->rb_right;
587 }
588
589 return NULL;
590}
591
592/**
593 * ubi_scan_find_seb - find information about a particular logical
594 * eraseblock in the volume scanning information.
595 * @sv: a pointer to the volume scanning information
596 * @lnum: the requested logical eraseblock
597 *
598 * This function returns a pointer to the scanning logical eraseblock or %NULL
599 * if there are no data about it in the scanning volume information.
600 */
601struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
602 int lnum)
603{
604 struct ubi_scan_leb *seb;
605 struct rb_node *p = sv->root.rb_node;
606
607 while (p) {
608 seb = rb_entry(p, struct ubi_scan_leb, u.rb);
609
610 if (lnum == seb->lnum)
611 return seb;
612
613 if (lnum > seb->lnum)
614 p = p->rb_left;
615 else
616 p = p->rb_right;
617 }
618
619 return NULL;
620}
621
622/**
623 * ubi_scan_rm_volume - delete scanning information about a volume.
624 * @si: scanning information
625 * @sv: the volume scanning information to delete
626 */
627void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
628{
629 struct rb_node *rb;
630 struct ubi_scan_leb *seb;
631
632 dbg_bld("remove scanning information about volume %d", sv->vol_id);
633
634 while ((rb = rb_first(&sv->root))) {
635 seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
636 rb_erase(&seb->u.rb, &sv->root);
637 list_add_tail(&seb->u.list, &si->erase);
638 }
639
640 rb_erase(&sv->rb, &si->volumes);
641 kfree(sv);
642 si->vols_found -= 1;
643}
644
645/**
646 * ubi_scan_erase_peb - erase a physical eraseblock.
647 * @ubi: UBI device description object
648 * @si: scanning information
649 * @pnum: physical eraseblock number to erase;
650 * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
651 *
652 * This function erases physical eraseblock 'pnum', and writes the erase
653 * counter header to it. This function should only be used on UBI device
654 * initialization stages, when the EBA unit had not been yet initialized. This
655 * function returns zero in case of success and a negative error code in case
656 * of failure.
657 */
658int ubi_scan_erase_peb(const struct ubi_device *ubi,
659 const struct ubi_scan_info *si, int pnum, int ec)
660{
661 int err;
662 struct ubi_ec_hdr *ec_hdr;
663
664 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
665 if (!ec_hdr)
666 return -ENOMEM;
667
668 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
669 /*
670 * Erase counter overflow. Upgrade UBI and use 64-bit
671 * erase counters internally.
672 */
673 ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
674 return -EINVAL;
675 }
676
677 ec_hdr->ec = cpu_to_ubi64(ec);
678
679 err = ubi_io_sync_erase(ubi, pnum, 0);
680 if (err < 0)
681 goto out_free;
682
683 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
684
685out_free:
686 kfree(ec_hdr);
687 return err;
688}
689
690/**
691 * ubi_scan_get_free_peb - get a free physical eraseblock.
692 * @ubi: UBI device description object
693 * @si: scanning information
694 *
695 * This function returns a free physical eraseblock. It is supposed to be
696 * called on the UBI initialization stages when the wear-leveling unit is not
697 * initialized yet. This function picks a physical eraseblocks from one of the
698 * lists, writes the EC header if it is needed, and removes it from the list.
699 *
700 * This function returns scanning physical eraseblock information in case of
701 * success and an error code in case of failure.
702 */
703struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
704 struct ubi_scan_info *si)
705{
706 int err = 0, i;
707 struct ubi_scan_leb *seb;
708
709 if (!list_empty(&si->free)) {
710 seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
711 list_del(&seb->u.list);
712 dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
713 return seb;
714 }
715
716 for (i = 0; i < 2; i++) {
717 struct list_head *head;
718 struct ubi_scan_leb *tmp_seb;
719
720 if (i == 0)
721 head = &si->erase;
722 else
723 head = &si->corr;
724
725 /*
726 * We try to erase the first physical eraseblock from the @head
727 * list and pick it if we succeed, or try to erase the
728 * next one if not. And so forth. We don't want to take care
729 * about bad eraseblocks here - they'll be handled later.
730 */
731 list_for_each_entry_safe(seb, tmp_seb, head, u.list) {
732 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
733 seb->ec = si->mean_ec;
734
735 err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
736 if (err)
737 continue;
738
739 seb->ec += 1;
740 list_del(&seb->u.list);
741 dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
742 return seb;
743 }
744 }
745
746 ubi_err("no eraseblocks found");
747 return ERR_PTR(-ENOSPC);
748}
749
750/**
751 * process_eb - read UBI headers, check them and add corresponding data
752 * to the scanning information.
753 * @ubi: UBI device description object
754 * @si: scanning information
755 * @pnum: the physical eraseblock number
756 *
757 * This function returns a zero if the physical eraseblock was succesfully
758 * handled and a negative error code in case of failure.
759 */
760static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
761{
762 long long ec;
763 int err, bitflips = 0, vol_id, ec_corr = 0;
764
765 dbg_bld("scan PEB %d", pnum);
766
767 /* Skip bad physical eraseblocks */
768 err = ubi_io_is_bad(ubi, pnum);
769 if (err < 0)
770 return err;
771 else if (err) {
772 /*
773 * FIXME: this is actually duty of the I/O unit to initialize
774 * this, but MTD does not provide enough information.
775 */
776 si->bad_peb_count += 1;
777 return 0;
778 }
779
780 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
781 if (err < 0)
782 return err;
783 else if (err == UBI_IO_BITFLIPS)
784 bitflips = 1;
785 else if (err == UBI_IO_PEB_EMPTY)
786 return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC,
787 &si->erase);
788 else if (err == UBI_IO_BAD_EC_HDR) {
789 /*
790 * We have to also look at the VID header, possibly it is not
791 * corrupted. Set %bitflips flag in order to make this PEB be
792 * moved and EC be re-created.
793 */
794 ec_corr = 1;
795 ec = UBI_SCAN_UNKNOWN_EC;
796 bitflips = 1;
797 }
798
799 si->is_empty = 0;
800
801 if (!ec_corr) {
802 /* Make sure UBI version is OK */
803 if (ech->version != UBI_VERSION) {
804 ubi_err("this UBI version is %d, image version is %d",
805 UBI_VERSION, (int)ech->version);
806 return -EINVAL;
807 }
808
809 ec = ubi64_to_cpu(ech->ec);
810 if (ec > UBI_MAX_ERASECOUNTER) {
811 /*
812 * Erase counter overflow. The EC headers have 64 bits
813 * reserved, but we anyway make use of only 31 bit
814 * values, as this seems to be enough for any existing
815 * flash. Upgrade UBI and use 64-bit erase counters
816 * internally.
817 */
818 ubi_err("erase counter overflow, max is %d",
819 UBI_MAX_ERASECOUNTER);
820 ubi_dbg_dump_ec_hdr(ech);
821 return -EINVAL;
822 }
823 }
824
825 /* OK, we've done with the EC header, let's look at the VID header */
826
827 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
828 if (err < 0)
829 return err;
830 else if (err == UBI_IO_BITFLIPS)
831 bitflips = 1;
832 else if (err == UBI_IO_BAD_VID_HDR ||
833 (err == UBI_IO_PEB_FREE && ec_corr)) {
834 /* VID header is corrupted */
835 err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
836 if (err)
837 return err;
838 goto adjust_mean_ec;
839 } else if (err == UBI_IO_PEB_FREE) {
840 /* No VID header - the physical eraseblock is free */
841 err = ubi_scan_add_to_list(si, pnum, ec, &si->free);
842 if (err)
843 return err;
844 goto adjust_mean_ec;
845 }
846
847 vol_id = ubi32_to_cpu(vidh->vol_id);
848 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
849 int lnum = ubi32_to_cpu(vidh->lnum);
850
851 /* Unsupported internal volume */
852 switch (vidh->compat) {
853 case UBI_COMPAT_DELETE:
854 ubi_msg("\"delete\" compatible internal volume %d:%d"
855 " found, remove it", vol_id, lnum);
856 err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
857 if (err)
858 return err;
859 break;
860
861 case UBI_COMPAT_RO:
862 ubi_msg("read-only compatible internal volume %d:%d"
863 " found, switch to read-only mode",
864 vol_id, lnum);
865 ubi->ro_mode = 1;
866 break;
867
868 case UBI_COMPAT_PRESERVE:
869 ubi_msg("\"preserve\" compatible internal volume %d:%d"
870 " found", vol_id, lnum);
871 err = ubi_scan_add_to_list(si, pnum, ec, &si->alien);
872 if (err)
873 return err;
874 si->alien_peb_count += 1;
875 return 0;
876
877 case UBI_COMPAT_REJECT:
878 ubi_err("incompatible internal volume %d:%d found",
879 vol_id, lnum);
880 return -EINVAL;
881 }
882 }
883
884 /* Both UBI headers seem to be fine */
885 err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
886 if (err)
887 return err;
888
889adjust_mean_ec:
890 if (!ec_corr) {
891 if (si->ec_sum + ec < ec) {
892 commit_to_mean_value(si);
893 si->ec_sum = 0;
894 si->ec_count = 0;
895 } else {
896 si->ec_sum += ec;
897 si->ec_count += 1;
898 }
899
900 if (ec > si->max_ec)
901 si->max_ec = ec;
902 if (ec < si->min_ec)
903 si->min_ec = ec;
904 }
905
906 return 0;
907}
908
909/**
910 * ubi_scan - scan an MTD device.
911 * @ubi: UBI device description object
912 *
913 * This function does full scanning of an MTD device and returns complete
914 * information about it. In case of failure, an error code is returned.
915 */
916struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
917{
918 int err, pnum;
919 struct rb_node *rb1, *rb2;
920 struct ubi_scan_volume *sv;
921 struct ubi_scan_leb *seb;
922 struct ubi_scan_info *si;
923
924 si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
925 if (!si)
926 return ERR_PTR(-ENOMEM);
927
928 INIT_LIST_HEAD(&si->corr);
929 INIT_LIST_HEAD(&si->free);
930 INIT_LIST_HEAD(&si->erase);
931 INIT_LIST_HEAD(&si->alien);
932 si->volumes = RB_ROOT;
933 si->is_empty = 1;
934
935 err = -ENOMEM;
936 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
937 if (!ech)
938 goto out_si;
939
940 vidh = ubi_zalloc_vid_hdr(ubi);
941 if (!vidh)
942 goto out_ech;
943
944 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
945 cond_resched();
946
947 dbg_msg("process PEB %d", pnum);
948 err = process_eb(ubi, si, pnum);
949 if (err < 0)
950 goto out_vidh;
951 }
952
953 dbg_msg("scanning is finished");
954
955 /* Finish mean erase counter calculations */
956 if (si->ec_count)
957 commit_to_mean_value(si);
958
959 if (si->is_empty)
960 ubi_msg("empty MTD device detected");
961
962 /*
963 * In case of unknown erase counter we use the mean erase counter
964 * value.
965 */
966 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
967 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
968 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
969 seb->ec = si->mean_ec;
970 }
971
972 list_for_each_entry(seb, &si->free, u.list) {
973 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
974 seb->ec = si->mean_ec;
975 }
976
977 list_for_each_entry(seb, &si->corr, u.list)
978 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
979 seb->ec = si->mean_ec;
980
981 list_for_each_entry(seb, &si->erase, u.list)
982 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
983 seb->ec = si->mean_ec;
984
985 err = paranoid_check_si(ubi, si);
986 if (err) {
987 if (err > 0)
988 err = -EINVAL;
989 goto out_vidh;
990 }
991
992 ubi_free_vid_hdr(ubi, vidh);
993 kfree(ech);
994
995 return si;
996
997out_vidh:
998 ubi_free_vid_hdr(ubi, vidh);
999out_ech:
1000 kfree(ech);
1001out_si:
1002 ubi_scan_destroy_si(si);
1003 return ERR_PTR(err);
1004}
1005
1006/**
1007 * destroy_sv - free the scanning volume information
1008 * @sv: scanning volume information
1009 *
1010 * This function destroys the volume RB-tree (@sv->root) and the scanning
1011 * volume information.
1012 */
1013static void destroy_sv(struct ubi_scan_volume *sv)
1014{
1015 struct ubi_scan_leb *seb;
1016 struct rb_node *this = sv->root.rb_node;
1017
1018 while (this) {
1019 if (this->rb_left)
1020 this = this->rb_left;
1021 else if (this->rb_right)
1022 this = this->rb_right;
1023 else {
1024 seb = rb_entry(this, struct ubi_scan_leb, u.rb);
1025 this = rb_parent(this);
1026 if (this) {
1027 if (this->rb_left == &seb->u.rb)
1028 this->rb_left = NULL;
1029 else
1030 this->rb_right = NULL;
1031 }
1032
1033 kfree(seb);
1034 }
1035 }
1036 kfree(sv);
1037}
1038
1039/**
1040 * ubi_scan_destroy_si - destroy scanning information.
1041 * @si: scanning information
1042 */
1043void ubi_scan_destroy_si(struct ubi_scan_info *si)
1044{
1045 struct ubi_scan_leb *seb, *seb_tmp;
1046 struct ubi_scan_volume *sv;
1047 struct rb_node *rb;
1048
1049 list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
1050 list_del(&seb->u.list);
1051 kfree(seb);
1052 }
1053 list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
1054 list_del(&seb->u.list);
1055 kfree(seb);
1056 }
1057 list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
1058 list_del(&seb->u.list);
1059 kfree(seb);
1060 }
1061 list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
1062 list_del(&seb->u.list);
1063 kfree(seb);
1064 }
1065
1066 /* Destroy the volume RB-tree */
1067 rb = si->volumes.rb_node;
1068 while (rb) {
1069 if (rb->rb_left)
1070 rb = rb->rb_left;
1071 else if (rb->rb_right)
1072 rb = rb->rb_right;
1073 else {
1074 sv = rb_entry(rb, struct ubi_scan_volume, rb);
1075
1076 rb = rb_parent(rb);
1077 if (rb) {
1078 if (rb->rb_left == &sv->rb)
1079 rb->rb_left = NULL;
1080 else
1081 rb->rb_right = NULL;
1082 }
1083
1084 destroy_sv(sv);
1085 }
1086 }
1087
1088 kfree(si);
1089}
1090
1091#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1092
1093/**
1094 * paranoid_check_si - check if the scanning information is correct and
1095 * consistent.
1096 * @ubi: UBI device description object
1097 * @si: scanning information
1098 *
1099 * This function returns zero if the scanning information is all right, %1 if
1100 * not and a negative error code if an error occurred.
1101 */
1102static int paranoid_check_si(const struct ubi_device *ubi,
1103 struct ubi_scan_info *si)
1104{
1105 int pnum, err, vols_found = 0;
1106 struct rb_node *rb1, *rb2;
1107 struct ubi_scan_volume *sv;
1108 struct ubi_scan_leb *seb, *last_seb;
1109 uint8_t *buf;
1110
1111 /*
1112 * At first, check that scanning information is ok.
1113 */
1114 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1115 int leb_count = 0;
1116
1117 cond_resched();
1118
1119 vols_found += 1;
1120
1121 if (si->is_empty) {
1122 ubi_err("bad is_empty flag");
1123 goto bad_sv;
1124 }
1125
1126 if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
1127 sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
1128 sv->data_pad < 0 || sv->last_data_size < 0) {
1129 ubi_err("negative values");
1130 goto bad_sv;
1131 }
1132
1133 if (sv->vol_id >= UBI_MAX_VOLUMES &&
1134 sv->vol_id < UBI_INTERNAL_VOL_START) {
1135 ubi_err("bad vol_id");
1136 goto bad_sv;
1137 }
1138
1139 if (sv->vol_id > si->highest_vol_id) {
1140 ubi_err("highest_vol_id is %d, but vol_id %d is there",
1141 si->highest_vol_id, sv->vol_id);
1142 goto out;
1143 }
1144
1145 if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
1146 sv->vol_type != UBI_STATIC_VOLUME) {
1147 ubi_err("bad vol_type");
1148 goto bad_sv;
1149 }
1150
1151 if (sv->data_pad > ubi->leb_size / 2) {
1152 ubi_err("bad data_pad");
1153 goto bad_sv;
1154 }
1155
1156 last_seb = NULL;
1157 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1158 cond_resched();
1159
1160 last_seb = seb;
1161 leb_count += 1;
1162
1163 if (seb->pnum < 0 || seb->ec < 0) {
1164 ubi_err("negative values");
1165 goto bad_seb;
1166 }
1167
1168 if (seb->ec < si->min_ec) {
1169 ubi_err("bad si->min_ec (%d), %d found",
1170 si->min_ec, seb->ec);
1171 goto bad_seb;
1172 }
1173
1174 if (seb->ec > si->max_ec) {
1175 ubi_err("bad si->max_ec (%d), %d found",
1176 si->max_ec, seb->ec);
1177 goto bad_seb;
1178 }
1179
1180 if (seb->pnum >= ubi->peb_count) {
1181 ubi_err("too high PEB number %d, total PEBs %d",
1182 seb->pnum, ubi->peb_count);
1183 goto bad_seb;
1184 }
1185
1186 if (sv->vol_type == UBI_STATIC_VOLUME) {
1187 if (seb->lnum >= sv->used_ebs) {
1188 ubi_err("bad lnum or used_ebs");
1189 goto bad_seb;
1190 }
1191 } else {
1192 if (sv->used_ebs != 0) {
1193 ubi_err("non-zero used_ebs");
1194 goto bad_seb;
1195 }
1196 }
1197
1198 if (seb->lnum > sv->highest_lnum) {
1199 ubi_err("incorrect highest_lnum or lnum");
1200 goto bad_seb;
1201 }
1202 }
1203
1204 if (sv->leb_count != leb_count) {
1205 ubi_err("bad leb_count, %d objects in the tree",
1206 leb_count);
1207 goto bad_sv;
1208 }
1209
1210 if (!last_seb)
1211 continue;
1212
1213 seb = last_seb;
1214
1215 if (seb->lnum != sv->highest_lnum) {
1216 ubi_err("bad highest_lnum");
1217 goto bad_seb;
1218 }
1219 }
1220
1221 if (vols_found != si->vols_found) {
1222 ubi_err("bad si->vols_found %d, should be %d",
1223 si->vols_found, vols_found);
1224 goto out;
1225 }
1226
1227 /* Check that scanning information is correct */
1228 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1229 last_seb = NULL;
1230 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1231 int vol_type;
1232
1233 cond_resched();
1234
1235 last_seb = seb;
1236
1237 err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
1238 if (err && err != UBI_IO_BITFLIPS) {
1239 ubi_err("VID header is not OK (%d)", err);
1240 if (err > 0)
1241 err = -EIO;
1242 return err;
1243 }
1244
1245 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1246 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1247 if (sv->vol_type != vol_type) {
1248 ubi_err("bad vol_type");
1249 goto bad_vid_hdr;
1250 }
1251
1252 if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) {
1253 ubi_err("bad sqnum %llu", seb->sqnum);
1254 goto bad_vid_hdr;
1255 }
1256
1257 if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) {
1258 ubi_err("bad vol_id %d", sv->vol_id);
1259 goto bad_vid_hdr;
1260 }
1261
1262 if (sv->compat != vidh->compat) {
1263 ubi_err("bad compat %d", vidh->compat);
1264 goto bad_vid_hdr;
1265 }
1266
1267 if (seb->lnum != ubi32_to_cpu(vidh->lnum)) {
1268 ubi_err("bad lnum %d", seb->lnum);
1269 goto bad_vid_hdr;
1270 }
1271
1272 if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) {
1273 ubi_err("bad used_ebs %d", sv->used_ebs);
1274 goto bad_vid_hdr;
1275 }
1276
1277 if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) {
1278 ubi_err("bad data_pad %d", sv->data_pad);
1279 goto bad_vid_hdr;
1280 }
1281
1282 if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) {
1283 ubi_err("bad leb_ver %u", seb->leb_ver);
1284 goto bad_vid_hdr;
1285 }
1286 }
1287
1288 if (!last_seb)
1289 continue;
1290
1291 if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) {
1292 ubi_err("bad highest_lnum %d", sv->highest_lnum);
1293 goto bad_vid_hdr;
1294 }
1295
1296 if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) {
1297 ubi_err("bad last_data_size %d", sv->last_data_size);
1298 goto bad_vid_hdr;
1299 }
1300 }
1301
1302 /*
1303 * Make sure that all the physical eraseblocks are in one of the lists
1304 * or trees.
1305 */
1306 buf = kmalloc(ubi->peb_count, GFP_KERNEL);
1307 if (!buf)
1308 return -ENOMEM;
1309
1310 memset(buf, 1, ubi->peb_count);
1311 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1312 err = ubi_io_is_bad(ubi, pnum);
1313 if (err < 0)
1314 return err;
1315 else if (err)
1316 buf[pnum] = 0;
1317 }
1318
1319 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
1320 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
1321 buf[seb->pnum] = 0;
1322
1323 list_for_each_entry(seb, &si->free, u.list)
1324 buf[seb->pnum] = 0;
1325
1326 list_for_each_entry(seb, &si->corr, u.list)
1327 buf[seb->pnum] = 0;
1328
1329 list_for_each_entry(seb, &si->erase, u.list)
1330 buf[seb->pnum] = 0;
1331
1332 list_for_each_entry(seb, &si->alien, u.list)
1333 buf[seb->pnum] = 0;
1334
1335 err = 0;
1336 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1337 if (buf[pnum]) {
1338 ubi_err("PEB %d is not referred", pnum);
1339 err = 1;
1340 }
1341
1342 kfree(buf);
1343 if (err)
1344 goto out;
1345 return 0;
1346
1347bad_seb:
1348 ubi_err("bad scanning information about LEB %d", seb->lnum);
1349 ubi_dbg_dump_seb(seb, 0);
1350 ubi_dbg_dump_sv(sv);
1351 goto out;
1352
1353bad_sv:
1354 ubi_err("bad scanning information about volume %d", sv->vol_id);
1355 ubi_dbg_dump_sv(sv);
1356 goto out;
1357
1358bad_vid_hdr:
1359 ubi_err("bad scanning information about volume %d", sv->vol_id);
1360 ubi_dbg_dump_sv(sv);
1361 ubi_dbg_dump_vid_hdr(vidh);
1362
1363out:
1364 ubi_dbg_dump_stack();
1365 return 1;
1366}
1367
1368#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
new file mode 100644
index 000000000000..3949f6192c76
--- /dev/null
+++ b/drivers/mtd/ubi/scan.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21#ifndef __UBI_SCAN_H__
22#define __UBI_SCAN_H__
23
24/* The erase counter value for this physical eraseblock is unknown */
25#define UBI_SCAN_UNKNOWN_EC (-1)
26
27/**
28 * struct ubi_scan_leb - scanning information about a physical eraseblock.
29 * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
30 * @pnum: physical eraseblock number
31 * @lnum: logical eraseblock number
32 * @scrub: if this physical eraseblock needs scrubbing
33 * @sqnum: sequence number
34 * @u: unions RB-tree or @list links
35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
36 * @u.list: link in one of the eraseblock lists
37 * @leb_ver: logical eraseblock version (obsolete)
38 *
39 * One object of this type is allocated for each physical eraseblock during
40 * scanning.
41 */
42struct ubi_scan_leb {
43 int ec;
44 int pnum;
45 int lnum;
46 int scrub;
47 unsigned long long sqnum;
48 union {
49 struct rb_node rb;
50 struct list_head list;
51 } u;
52 uint32_t leb_ver;
53};
54
55/**
56 * struct ubi_scan_volume - scanning information about a volume.
57 * @vol_id: volume ID
58 * @highest_lnum: highest logical eraseblock number in this volume
59 * @leb_count: number of logical eraseblocks in this volume
60 * @vol_type: volume type
61 * @used_ebs: number of used logical eraseblocks in this volume (only for
62 * static volumes)
63 * @last_data_size: amount of data in the last logical eraseblock of this
64 * volume (always equivalent to the usable logical eraseblock size in case of
65 * dynamic volumes)
66 * @data_pad: how many bytes at the end of logical eraseblocks of this volume
67 * are not used (due to volume alignment)
68 * @compat: compatibility flags of this volume
69 * @rb: link in the volume RB-tree
70 * @root: root of the RB-tree containing all the eraseblock belonging to this
71 * volume (&struct ubi_scan_leb objects)
72 *
73 * One object of this type is allocated for each volume during scanning.
74 */
75struct ubi_scan_volume {
76 int vol_id;
77 int highest_lnum;
78 int leb_count;
79 int vol_type;
80 int used_ebs;
81 int last_data_size;
82 int data_pad;
83 int compat;
84 struct rb_node rb;
85 struct rb_root root;
86};
87
88/**
89 * struct ubi_scan_info - UBI scanning information.
90 * @volumes: root of the volume RB-tree
91 * @corr: list of corrupted physical eraseblocks
92 * @free: list of free physical eraseblocks
93 * @erase: list of physical eraseblocks which have to be erased
94 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
95 * @bad_peb_count: count of bad physical eraseblocks
96 * those belonging to "preserve"-compatible internal volumes)
97 * @vols_found: number of volumes found during scanning
98 * @highest_vol_id: highest volume ID
99 * @alien_peb_count: count of physical eraseblocks in the @alien list
100 * @is_empty: flag indicating whether the MTD device is empty or not
101 * @min_ec: lowest erase counter value
102 * @max_ec: highest erase counter value
103 * @max_sqnum: highest sequence number value
104 * @mean_ec: mean erase counter value
105 * @ec_sum: a temporary variable used when calculating @mean_ec
106 * @ec_count: a temporary variable used when calculating @mean_ec
107 *
108 * This data structure contains the result of scanning and may be used by other
109 * UBI units to build final UBI data structures, further error-recovery and so
110 * on.
111 */
112struct ubi_scan_info {
113 struct rb_root volumes;
114 struct list_head corr;
115 struct list_head free;
116 struct list_head erase;
117 struct list_head alien;
118 int bad_peb_count;
119 int vols_found;
120 int highest_vol_id;
121 int alien_peb_count;
122 int is_empty;
123 int min_ec;
124 int max_ec;
125 unsigned long long max_sqnum;
126 int mean_ec;
127 int ec_sum;
128 int ec_count;
129};
130
131struct ubi_device;
132struct ubi_vid_hdr;
133
134/*
135 * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a
136 * list.
137 *
138 * @sv: volume scanning information
139 * @seb: scanning eraseblock infprmation
140 * @list: the list to move to
141 */
142static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
143 struct ubi_scan_leb *seb,
144 struct list_head *list)
145{
146 rb_erase(&seb->u.rb, &sv->root);
147 list_add_tail(&seb->u.list, list);
148}
149
150int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
151 struct list_head *list);
152int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
153 int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
154 int bitflips);
155struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
156 int vol_id);
157struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
158 int lnum);
159void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
160struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
161 struct ubi_scan_info *si);
162int ubi_scan_erase_peb(const struct ubi_device *ubi,
163 const struct ubi_scan_info *si, int pnum, int ec);
164struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
165void ubi_scan_destroy_si(struct ubi_scan_info *si);
166
167#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
new file mode 100644
index 000000000000..feb647f108f0
--- /dev/null
+++ b/drivers/mtd/ubi/ubi.h
@@ -0,0 +1,535 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2006, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём)
20 */
21
22#ifndef __UBI_UBI_H__
23#define __UBI_UBI_H__
24
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/list.h>
28#include <linux/rbtree.h>
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/mutex.h>
32#include <linux/rwsem.h>
33#include <linux/spinlock.h>
34#include <linux/fs.h>
35#include <linux/cdev.h>
36#include <linux/device.h>
37#include <linux/string.h>
38#include <linux/mtd/mtd.h>
39
40#include <mtd/ubi-header.h>
41#include <linux/mtd/ubi.h>
42
43#include "scan.h"
44#include "debug.h"
45
46/* Maximum number of supported UBI devices */
47#define UBI_MAX_DEVICES 32
48
49/* UBI name used for character devices, sysfs, etc */
50#define UBI_NAME_STR "ubi"
51
52/* Normal UBI messages */
53#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
54/* UBI warning messages */
55#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
56 __FUNCTION__, ##__VA_ARGS__)
57/* UBI error messages */
58#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
59 __FUNCTION__, ##__VA_ARGS__)
60
61/* Lowest number PEBs reserved for bad PEB handling */
62#define MIN_RESEVED_PEBS 2
63
64/* Background thread name pattern */
65#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
66
67/* This marker in the EBA table means that the LEB is um-mapped */
68#define UBI_LEB_UNMAPPED -1
69
70/*
71 * In case of errors, UBI tries to repeat the operation several times before
72 * returning error. The below constant defines how many times UBI re-tries.
73 */
74#define UBI_IO_RETRIES 3
75
76/*
77 * Error codes returned by the I/O unit.
78 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
80 * 0xFF bytes
81 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a
82 * valid erase counter header, and the rest are %0xFF bytes
83 * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC)
84 * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or
85 * CRC)
86 * UBI_IO_BITFLIPS: bit-flips were detected and corrected
87 */
88enum {
89 UBI_IO_PEB_EMPTY = 1,
90 UBI_IO_PEB_FREE,
91 UBI_IO_BAD_EC_HDR,
92 UBI_IO_BAD_VID_HDR,
93 UBI_IO_BITFLIPS
94};
95
96extern int ubi_devices_cnt;
97extern struct ubi_device *ubi_devices[];
98
99struct ubi_volume_desc;
100
101/**
102 * struct ubi_volume - UBI volume description data structure.
103 * @dev: device object to make use of the the Linux device model
104 * @cdev: character device object to create character device
105 * @ubi: reference to the UBI device description object
106 * @vol_id: volume ID
107 * @readers: number of users holding this volume in read-only mode
108 * @writers: number of users holding this volume in read-write mode
109 * @exclusive: whether somebody holds this volume in exclusive mode
110 * @removed: if the volume was removed
111 * @checked: if this static volume was checked
112 *
113 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
114 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
115 * @usable_leb_size: logical eraseblock size without padding
116 * @used_ebs: how many logical eraseblocks in this volume contain data
117 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
118 * @used_bytes: how many bytes of data this volume contains
119 * @upd_marker: non-zero if the update marker is set for this volume
120 * @corrupted: non-zero if the volume is corrupted (static volumes only)
121 * @alignment: volume alignment
122 * @data_pad: how many bytes are not used at the end of physical eraseblocks to
123 * satisfy the requested alignment
124 * @name_len: volume name length
125 * @name: volume name
126 *
127 * @updating: whether the volume is being updated
128 * @upd_ebs: how many eraseblocks are expected to be updated
129 * @upd_bytes: how many bytes are expected to be received
130 * @upd_received: how many update bytes were already received
131 * @upd_buf: update buffer which is used to collect update data
132 *
133 * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
134 *
135 * @gluebi_desc: gluebi UBI volume descriptor
136 * @gluebi_refcount: reference count of the gluebi MTD device
137 * @gluebi_mtd: MTD device description object of the gluebi MTD device
138 *
139 * The @corrupted field indicates that the volume's contents is corrupted.
140 * Since UBI protects only static volumes, this field is not relevant to
141 * dynamic volumes - it is user's responsibility to assure their data
142 * integrity.
143 *
144 * The @upd_marker flag indicates that this volume is either being updated at
145 * the moment or is damaged because of an unclean reboot.
146 */
147struct ubi_volume {
148 struct device dev;
149 struct cdev cdev;
150 struct ubi_device *ubi;
151 int vol_id;
152 int readers;
153 int writers;
154 int exclusive;
155 int removed;
156 int checked;
157
158 int reserved_pebs;
159 int vol_type;
160 int usable_leb_size;
161 int used_ebs;
162 int last_eb_bytes;
163 long long used_bytes;
164 int upd_marker;
165 int corrupted;
166 int alignment;
167 int data_pad;
168 int name_len;
169 char name[UBI_VOL_NAME_MAX+1];
170
171 int updating;
172 int upd_ebs;
173 long long upd_bytes;
174 long long upd_received;
175 void *upd_buf;
176
177 int *eba_tbl;
178
179#ifdef CONFIG_MTD_UBI_GLUEBI
180 /* Gluebi-related stuff may be compiled out */
181 struct ubi_volume_desc *gluebi_desc;
182 int gluebi_refcount;
183 struct mtd_info gluebi_mtd;
184#endif
185};
186
187/**
188 * struct ubi_volume_desc - descriptor of the UBI volume returned when it is
189 * opened.
190 * @vol: reference to the corresponding volume description object
191 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
192 */
193struct ubi_volume_desc {
194 struct ubi_volume *vol;
195 int mode;
196};
197
198struct ubi_wl_entry;
199
200/**
201 * struct ubi_device - UBI device description structure
202 * @dev: class device object to use the the Linux device model
203 * @cdev: character device object to create character device
204 * @ubi_num: UBI device number
205 * @ubi_name: UBI device name
206 * @major: character device major number
207 * @vol_count: number of volumes in this UBI device
208 * @volumes: volumes of this UBI device
209 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
210 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers,
211 * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and
212 * @vol->eba_tbl.
213 *
214 * @rsvd_pebs: count of reserved physical eraseblocks
215 * @avail_pebs: count of available physical eraseblocks
216 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
217 * handling
218 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
219 *
220 * @vtbl_slots: how many slots are available in the volume table
221 * @vtbl_size: size of the volume table in bytes
222 * @vtbl: in-RAM volume table copy
223 *
224 * @max_ec: current highest erase counter value
225 * @mean_ec: current mean erase counter value
226 *
227 * global_sqnum: global sequence number
228 * @ltree_lock: protects the lock tree and @global_sqnum
229 * @ltree: the lock tree
230 * @vtbl_mutex: protects on-flash volume table
231 *
232 * @used: RB-tree of used physical eraseblocks
233 * @free: RB-tree of free physical eraseblocks
234 * @scrub: RB-tree of physical eraseblocks which need scrubbing
235 * @prot: protection trees
236 * @prot.pnum: protection tree indexed by physical eraseblock numbers
237 * @prot.aec: protection tree indexed by absolute erase counter value
238 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
239 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
240 * fields
241 * @wl_scheduled: non-zero if the wear-leveling was scheduled
242 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
243 * physical eraseblock
244 * @abs_ec: absolute erase counter
245 * @move_from: physical eraseblock from where the data is being moved
246 * @move_to: physical eraseblock where the data is being moved to
247 * @move_from_put: if the "from" PEB was put
248 * @move_to_put: if the "to" PEB was put
249 * @works: list of pending works
250 * @works_count: count of pending works
251 * @bgt_thread: background thread description object
252 * @thread_enabled: if the background thread is enabled
253 * @bgt_name: background thread name
254 *
255 * @flash_size: underlying MTD device size (in bytes)
256 * @peb_count: count of physical eraseblocks on the MTD device
257 * @peb_size: physical eraseblock size
258 * @bad_peb_count: count of bad physical eraseblocks
259 * @good_peb_count: count of good physical eraseblocks
260 * @min_io_size: minimal input/output unit size of the underlying MTD device
261 * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
262 * @ro_mode: if the UBI device is in read-only mode
263 * @leb_size: logical eraseblock size
264 * @leb_start: starting offset of logical eraseblocks within physical
265 * eraseblocks
266 * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
267 * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
268 * @vid_hdr_offset: starting offset of the volume identifier header (might be
269 * unaligned)
270 * @vid_hdr_aloffset: starting offset of the VID header aligned to
271 * @hdrs_min_io_size
272 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
273 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
274 * not
275 * @mtd: MTD device descriptor
276 */
277struct ubi_device {
278 struct cdev cdev;
279 struct device dev;
280 int ubi_num;
281 char ubi_name[sizeof(UBI_NAME_STR)+5];
282 int major;
283 int vol_count;
284 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
285 spinlock_t volumes_lock;
286
287 int rsvd_pebs;
288 int avail_pebs;
289 int beb_rsvd_pebs;
290 int beb_rsvd_level;
291
292 int vtbl_slots;
293 int vtbl_size;
294 struct ubi_vtbl_record *vtbl;
295 struct mutex vtbl_mutex;
296
297 int max_ec;
298 int mean_ec;
299
300 /* EBA unit's stuff */
301 unsigned long long global_sqnum;
302 spinlock_t ltree_lock;
303 struct rb_root ltree;
304
305 /* Wear-leveling unit's stuff */
306 struct rb_root used;
307 struct rb_root free;
308 struct rb_root scrub;
309 struct {
310 struct rb_root pnum;
311 struct rb_root aec;
312 } prot;
313 spinlock_t wl_lock;
314 int wl_scheduled;
315 struct ubi_wl_entry **lookuptbl;
316 unsigned long long abs_ec;
317 struct ubi_wl_entry *move_from;
318 struct ubi_wl_entry *move_to;
319 int move_from_put;
320 int move_to_put;
321 struct list_head works;
322 int works_count;
323 struct task_struct *bgt_thread;
324 int thread_enabled;
325 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
326
327 /* I/O unit's stuff */
328 long long flash_size;
329 int peb_count;
330 int peb_size;
331 int bad_peb_count;
332 int good_peb_count;
333 int min_io_size;
334 int hdrs_min_io_size;
335 int ro_mode;
336 int leb_size;
337 int leb_start;
338 int ec_hdr_alsize;
339 int vid_hdr_alsize;
340 int vid_hdr_offset;
341 int vid_hdr_aloffset;
342 int vid_hdr_shift;
343 int bad_allowed;
344 struct mtd_info *mtd;
345};
346
347extern struct file_operations ubi_cdev_operations;
348extern struct file_operations ubi_vol_cdev_operations;
349extern struct class *ubi_class;
350
351/* vtbl.c */
352int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
353 struct ubi_vtbl_record *vtbl_rec);
354int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
355
356/* vmt.c */
357int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
358int ubi_remove_volume(struct ubi_volume_desc *desc);
359int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
360int ubi_add_volume(struct ubi_device *ubi, int vol_id);
361void ubi_free_volume(struct ubi_device *ubi, int vol_id);
362
363/* upd.c */
364int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
365int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
366 const void __user *buf, int count);
367
368/* misc.c */
369int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
370int ubi_check_volume(struct ubi_device *ubi, int vol_id);
371void ubi_calculate_reserved(struct ubi_device *ubi);
372
373/* gluebi.c */
374#ifdef CONFIG_MTD_UBI_GLUEBI
375int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
376int ubi_destroy_gluebi(struct ubi_volume *vol);
377#else
378#define ubi_create_gluebi(ubi, vol) 0
379#define ubi_destroy_gluebi(vol) 0
380#endif
381
382/* eba.c */
383int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum);
384int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
385 int offset, int len, int check);
386int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
387 const void *buf, int offset, int len, int dtype);
388int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
389 const void *buf, int len, int dtype,
390 int used_ebs);
391int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
392 const void *buf, int len, int dtype);
393int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
394 struct ubi_vid_hdr *vid_hdr);
395int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
396void ubi_eba_close(const struct ubi_device *ubi);
397
398/* wl.c */
399int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
400int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
401int ubi_wl_flush(struct ubi_device *ubi);
402int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
403int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
404void ubi_wl_close(struct ubi_device *ubi);
405
406/* io.c */
407int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
408 int len);
409int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum,
410 int offset, int len);
411int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture);
412int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
413int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
414int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
415 struct ubi_ec_hdr *ec_hdr, int verbose);
416int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
417 struct ubi_ec_hdr *ec_hdr);
418int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
419 struct ubi_vid_hdr *vid_hdr, int verbose);
420int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
421 struct ubi_vid_hdr *vid_hdr);
422
423/*
424 * ubi_rb_for_each_entry - walk an RB-tree.
425 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
426 * @pos: a pointer to RB-tree entry type to use as a loop counter
427 * @root: RB-tree's root
428 * @member: the name of the 'struct rb_node' within the RB-tree entry
429 */
430#define ubi_rb_for_each_entry(rb, pos, root, member) \
431 for (rb = rb_first(root), \
432 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
433 rb; \
434 rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member))
435
436/**
437 * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
438 * @ubi: UBI device description object
439 *
440 * This function returns a pointer to the newly allocated and zero-filled
441 * volume identifier header object in case of success and %NULL in case of
442 * failure.
443 */
444static inline struct ubi_vid_hdr *ubi_zalloc_vid_hdr(const struct ubi_device *ubi)
445{
446 void *vid_hdr;
447
448 vid_hdr = kzalloc(ubi->vid_hdr_alsize, GFP_KERNEL);
449 if (!vid_hdr)
450 return NULL;
451
452 /*
453 * VID headers may be stored at un-aligned flash offsets, so we shift
454 * the pointer.
455 */
456 return vid_hdr + ubi->vid_hdr_shift;
457}
458
459/**
460 * ubi_free_vid_hdr - free a volume identifier header object.
461 * @ubi: UBI device description object
462 * @vid_hdr: the object to free
463 */
464static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
465 struct ubi_vid_hdr *vid_hdr)
466{
467 void *p = vid_hdr;
468
469 if (!p)
470 return;
471
472 kfree(p - ubi->vid_hdr_shift);
473}
474
475/*
476 * This function is equivalent to 'ubi_io_read()', but @offset is relative to
477 * the beginning of the logical eraseblock, not to the beginning of the
478 * physical eraseblock.
479 */
480static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
481 int pnum, int offset, int len)
482{
483 ubi_assert(offset >= 0);
484 return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
485}
486
487/*
488 * This function is equivalent to 'ubi_io_write()', but @offset is relative to
489 * the beginning of the logical eraseblock, not to the beginning of the
490 * physical eraseblock.
491 */
492static inline int ubi_io_write_data(const struct ubi_device *ubi, const void *buf,
493 int pnum, int offset, int len)
494{
495 ubi_assert(offset >= 0);
496 return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
497}
498
499/**
500 * ubi_ro_mode - switch to read-only mode.
501 * @ubi: UBI device description object
502 */
503static inline void ubi_ro_mode(struct ubi_device *ubi)
504{
505 ubi->ro_mode = 1;
506 ubi_warn("switch to read-only mode");
507}
508
509/**
510 * vol_id2idx - get table index by volume ID.
511 * @ubi: UBI device description object
512 * @vol_id: volume ID
513 */
514static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
515{
516 if (vol_id >= UBI_INTERNAL_VOL_START)
517 return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
518 else
519 return vol_id;
520}
521
522/**
523 * idx2vol_id - get volume ID by table index.
524 * @ubi: UBI device description object
525 * @idx: table index
526 */
527static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
528{
529 if (idx >= ubi->vtbl_slots)
530 return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
531 else
532 return idx;
533}
534
535#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
new file mode 100644
index 000000000000..8925b977e3dc
--- /dev/null
+++ b/drivers/mtd/ubi/upd.c
@@ -0,0 +1,348 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2006
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём)
20 *
21 * Jan 2007: Alexander Schmidt, hacked per-volume update.
22 */
23
24/*
25 * This file contains implementation of the volume update functionality.
26 *
27 * The update operation is based on the per-volume update marker which is
28 * stored in the volume table. The update marker is set before the update
29 * starts, and removed after the update has been finished. So if the update was
30 * interrupted by an unclean re-boot or due to some other reasons, the update
31 * marker stays on the flash media and UBI finds it when it attaches the MTD
32 * device next time. If the update marker is set for a volume, the volume is
33 * treated as damaged and most I/O operations are prohibited. Only a new update
34 * operation is allowed.
35 *
36 * Note, in general it is possible to implement the update operation as a
37 * transaction with a roll-back capability.
38 */
39
40#include <linux/err.h>
41#include <asm/uaccess.h>
42#include <asm/div64.h>
43#include "ubi.h"
44
45/**
46 * set_update_marker - set update marker.
47 * @ubi: UBI device description object
48 * @vol_id: volume ID
49 *
50 * This function sets the update marker flag for volume @vol_id. Returns zero
51 * in case of success and a negative error code in case of failure.
52 */
53static int set_update_marker(struct ubi_device *ubi, int vol_id)
54{
55 int err;
56 struct ubi_vtbl_record vtbl_rec;
57 struct ubi_volume *vol = ubi->volumes[vol_id];
58
59 dbg_msg("set update marker for volume %d", vol_id);
60
61 if (vol->upd_marker) {
62 ubi_assert(ubi->vtbl[vol_id].upd_marker);
63 dbg_msg("already set");
64 return 0;
65 }
66
67 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
68 vtbl_rec.upd_marker = 1;
69
70 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
71 vol->upd_marker = 1;
72 return err;
73}
74
75/**
76 * clear_update_marker - clear update marker.
77 * @ubi: UBI device description object
78 * @vol_id: volume ID
79 * @bytes: new data size in bytes
80 *
81 * This function clears the update marker for volume @vol_id, sets new volume
82 * data size and clears the "corrupted" flag (static volumes only). Returns
83 * zero in case of success and a negative error code in case of failure.
84 */
85static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes)
86{
87 int err;
88 uint64_t tmp;
89 struct ubi_vtbl_record vtbl_rec;
90 struct ubi_volume *vol = ubi->volumes[vol_id];
91
92 dbg_msg("clear update marker for volume %d", vol_id);
93
94 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
95 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
96 vtbl_rec.upd_marker = 0;
97
98 if (vol->vol_type == UBI_STATIC_VOLUME) {
99 vol->corrupted = 0;
100 vol->used_bytes = tmp = bytes;
101 vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size);
102 vol->used_ebs = tmp;
103 if (vol->last_eb_bytes)
104 vol->used_ebs += 1;
105 else
106 vol->last_eb_bytes = vol->usable_leb_size;
107 }
108
109 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
110 vol->upd_marker = 0;
111 return err;
112}
113
114/**
115 * ubi_start_update - start volume update.
116 * @ubi: UBI device description object
117 * @vol_id: volume ID
118 * @bytes: update bytes
119 *
120 * This function starts volume update operation. If @bytes is zero, the volume
121 * is just wiped out. Returns zero in case of success and a negative error code
122 * in case of failure.
123 */
124int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
125{
126 int i, err;
127 uint64_t tmp;
128 struct ubi_volume *vol = ubi->volumes[vol_id];
129
130 dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes);
131 vol->updating = 1;
132
133 err = set_update_marker(ubi, vol_id);
134 if (err)
135 return err;
136
137 /* Before updating - wipe out the volume */
138 for (i = 0; i < vol->reserved_pebs; i++) {
139 err = ubi_eba_unmap_leb(ubi, vol_id, i);
140 if (err)
141 return err;
142 }
143
144 if (bytes == 0) {
145 err = clear_update_marker(ubi, vol_id, 0);
146 if (err)
147 return err;
148 err = ubi_wl_flush(ubi);
149 if (!err)
150 vol->updating = 0;
151 }
152
153 vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL);
154 if (!vol->upd_buf)
155 return -ENOMEM;
156
157 tmp = bytes;
158 vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size);
159 vol->upd_ebs += tmp;
160 vol->upd_bytes = bytes;
161 vol->upd_received = 0;
162 return 0;
163}
164
165/**
166 * write_leb - write update data.
167 * @ubi: UBI device description object
168 * @vol_id: volume ID
169 * @lnum: logical eraseblock number
170 * @buf: data to write
171 * @len: data size
172 * @used_ebs: how many logical eraseblocks will this volume contain (static
173 * volumes only)
174 *
175 * This function writes update data to corresponding logical eraseblock. In
176 * case of dynamic volume, this function checks if the data contains 0xFF bytes
177 * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
178 * buffer contains only 0xFF bytes, the LEB is left unmapped.
179 *
180 * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
181 * that we want to make sure that more data may be appended to the logical
182 * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
183 * this PEB won't be writable anymore. So if one writes the file-system image
184 * to the UBI volume where 0xFFs mean free space - UBI makes sure this free
185 * space is writable after the update.
186 *
187 * We do not do this for static volumes because they are read-only. But this
188 * also cannot be done because we have to store per-LEB CRC and the correct
189 * data length.
190 *
191 * This function returns zero in case of success and a negative error code in
192 * case of failure.
193 */
194static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
195 int len, int used_ebs)
196{
197 int err, l;
198 struct ubi_volume *vol = ubi->volumes[vol_id];
199
200 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
201 l = ALIGN(len, ubi->min_io_size);
202 memset(buf + len, 0xFF, l - len);
203
204 l = ubi_calc_data_len(ubi, buf, l);
205 if (l == 0) {
206 dbg_msg("all %d bytes contain 0xFF - skip", len);
207 return 0;
208 }
209 if (len != l)
210 dbg_msg("skip last %d bytes (0xFF)", len - l);
211
212 err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l,
213 UBI_UNKNOWN);
214 } else {
215 /*
216 * When writing static volume, and this is the last logical
217 * eraseblock, the length (@len) does not have to be aligned to
218 * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
219 * function accepts exact (unaligned) length and stores it in
220 * the VID header. And it takes care of proper alignment by
221 * padding the buffer. Here we just make sure the padding will
222 * contain zeros, not random trash.
223 */
224 memset(buf + len, 0, vol->usable_leb_size - len);
225 err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len,
226 UBI_UNKNOWN, used_ebs);
227 }
228
229 return err;
230}
231
232/**
233 * ubi_more_update_data - write more update data.
234 * @vol: volume description object
235 * @buf: write data (user-space memory buffer)
236 * @count: how much bytes to write
237 *
238 * This function writes more data to the volume which is being updated. It may
239 * be called arbitrary number of times until all of the update data arrive.
240 * This function returns %0 in case of success, number of bytes written during
241 * the last call if the whole volume update was successfully finished, and a
242 * negative error code in case of failure.
243 */
244int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
245 const void __user *buf, int count)
246{
247 uint64_t tmp;
248 struct ubi_volume *vol = ubi->volumes[vol_id];
249 int lnum, offs, err = 0, len, to_write = count;
250
251 dbg_msg("write %d of %lld bytes, %lld already passed",
252 count, vol->upd_bytes, vol->upd_received);
253
254 if (ubi->ro_mode)
255 return -EROFS;
256
257 tmp = vol->upd_received;
258 offs = do_div(tmp, vol->usable_leb_size);
259 lnum = tmp;
260
261 if (vol->upd_received + count > vol->upd_bytes)
262 to_write = count = vol->upd_bytes - vol->upd_received;
263
264 /*
265 * When updating volumes, we accumulate whole logical eraseblock of
266 * data and write it at once.
267 */
268 if (offs != 0) {
269 /*
270 * This is a write to the middle of the logical eraseblock. We
271 * copy the data to our update buffer and wait for more data or
272 * flush it if the whole eraseblock is written or the update
273 * is finished.
274 */
275
276 len = vol->usable_leb_size - offs;
277 if (len > count)
278 len = count;
279
280 err = copy_from_user(vol->upd_buf + offs, buf, len);
281 if (err)
282 return -EFAULT;
283
284 if (offs + len == vol->usable_leb_size ||
285 vol->upd_received + len == vol->upd_bytes) {
286 int flush_len = offs + len;
287
288 /*
289 * OK, we gathered either the whole eraseblock or this
290 * is the last chunk, it's time to flush the buffer.
291 */
292 ubi_assert(flush_len <= vol->usable_leb_size);
293 err = write_leb(ubi, vol_id, lnum, vol->upd_buf,
294 flush_len, vol->upd_ebs);
295 if (err)
296 return err;
297 }
298
299 vol->upd_received += len;
300 count -= len;
301 buf += len;
302 lnum += 1;
303 }
304
305 /*
306 * If we've got more to write, let's continue. At this point we know we
307 * are starting from the beginning of an eraseblock.
308 */
309 while (count) {
310 if (count > vol->usable_leb_size)
311 len = vol->usable_leb_size;
312 else
313 len = count;
314
315 err = copy_from_user(vol->upd_buf, buf, len);
316 if (err)
317 return -EFAULT;
318
319 if (len == vol->usable_leb_size ||
320 vol->upd_received + len == vol->upd_bytes) {
321 err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len,
322 vol->upd_ebs);
323 if (err)
324 break;
325 }
326
327 vol->upd_received += len;
328 count -= len;
329 lnum += 1;
330 buf += len;
331 }
332
333 ubi_assert(vol->upd_received <= vol->upd_bytes);
334 if (vol->upd_received == vol->upd_bytes) {
335 /* The update is finished, clear the update marker */
336 err = clear_update_marker(ubi, vol_id, vol->upd_bytes);
337 if (err)
338 return err;
339 err = ubi_wl_flush(ubi);
340 if (err == 0) {
341 err = to_write;
342 kfree(vol->upd_buf);
343 vol->updating = 0;
344 }
345 }
346
347 return err;
348}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
new file mode 100644
index 000000000000..622d0d18952c
--- /dev/null
+++ b/drivers/mtd/ubi/vmt.c
@@ -0,0 +1,809 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * This file contains implementation of volume creation, deletion, updating and
23 * resizing.
24 */
25
26#include <linux/err.h>
27#include <asm/div64.h>
28#include "ubi.h"
29
30#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
31static void paranoid_check_volumes(struct ubi_device *ubi);
32#else
33#define paranoid_check_volumes(ubi)
34#endif
35
36static ssize_t vol_attribute_show(struct device *dev,
37 struct device_attribute *attr, char *buf);
38
39/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
40static struct device_attribute vol_reserved_ebs =
41 __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
42static struct device_attribute vol_type =
43 __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
44static struct device_attribute vol_name =
45 __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
46static struct device_attribute vol_corrupted =
47 __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
48static struct device_attribute vol_alignment =
49 __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
50static struct device_attribute vol_usable_eb_size =
51 __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
52static struct device_attribute vol_data_bytes =
53 __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
54static struct device_attribute vol_upd_marker =
55 __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
56
57/*
58 * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
59 *
60 * Consider a situation:
61 * A. process 1 opens a sysfs file related to volume Y, say
62 * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
63 * B. process 2 removes volume Y;
64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
65 *
66 * What we want to do in a situation like that is to return error when the file
67 * is read. This is done by means of the 'removed' flag and the 'vol_lock' of
68 * the UBI volume description object.
69 */
70static ssize_t vol_attribute_show(struct device *dev,
71 struct device_attribute *attr, char *buf)
72{
73 int ret;
74 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
75
76 spin_lock(&vol->ubi->volumes_lock);
77 if (vol->removed) {
78 spin_unlock(&vol->ubi->volumes_lock);
79 return -ENODEV;
80 }
81 if (attr == &vol_reserved_ebs)
82 ret = sprintf(buf, "%d\n", vol->reserved_pebs);
83 else if (attr == &vol_type) {
84 const char *tp;
85 tp = vol->vol_type == UBI_DYNAMIC_VOLUME ? "dynamic" : "static";
86 ret = sprintf(buf, "%s\n", tp);
87 } else if (attr == &vol_name)
88 ret = sprintf(buf, "%s\n", vol->name);
89 else if (attr == &vol_corrupted)
90 ret = sprintf(buf, "%d\n", vol->corrupted);
91 else if (attr == &vol_alignment)
92 ret = sprintf(buf, "%d\n", vol->alignment);
93 else if (attr == &vol_usable_eb_size) {
94 ret = sprintf(buf, "%d\n", vol->usable_leb_size);
95 } else if (attr == &vol_data_bytes)
96 ret = sprintf(buf, "%lld\n", vol->used_bytes);
97 else if (attr == &vol_upd_marker)
98 ret = sprintf(buf, "%d\n", vol->upd_marker);
99 else
100 BUG();
101 spin_unlock(&vol->ubi->volumes_lock);
102 return ret;
103}
104
105/* Release method for volume devices */
106static void vol_release(struct device *dev)
107{
108 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
109 ubi_assert(vol->removed);
110 kfree(vol);
111}
112
113/**
114 * volume_sysfs_init - initialize sysfs for new volume.
115 * @ubi: UBI device description object
116 * @vol: volume description object
117 *
118 * This function returns zero in case of success and a negative error code in
119 * case of failure.
120 *
121 * Note, this function does not free allocated resources in case of failure -
122 * the caller does it. This is because this would cause release() here and the
123 * caller would oops.
124 */
125static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
126{
127 int err;
128
129 err = device_create_file(&vol->dev, &vol_reserved_ebs);
130 if (err)
131 return err;
132 err = device_create_file(&vol->dev, &vol_type);
133 if (err)
134 return err;
135 err = device_create_file(&vol->dev, &vol_name);
136 if (err)
137 return err;
138 err = device_create_file(&vol->dev, &vol_corrupted);
139 if (err)
140 return err;
141 err = device_create_file(&vol->dev, &vol_alignment);
142 if (err)
143 return err;
144 err = device_create_file(&vol->dev, &vol_usable_eb_size);
145 if (err)
146 return err;
147 err = device_create_file(&vol->dev, &vol_data_bytes);
148 if (err)
149 return err;
150 err = device_create_file(&vol->dev, &vol_upd_marker);
151 if (err)
152 return err;
153 return 0;
154}
155
156/**
157 * volume_sysfs_close - close sysfs for a volume.
158 * @vol: volume description object
159 */
160static void volume_sysfs_close(struct ubi_volume *vol)
161{
162 device_remove_file(&vol->dev, &vol_upd_marker);
163 device_remove_file(&vol->dev, &vol_data_bytes);
164 device_remove_file(&vol->dev, &vol_usable_eb_size);
165 device_remove_file(&vol->dev, &vol_alignment);
166 device_remove_file(&vol->dev, &vol_corrupted);
167 device_remove_file(&vol->dev, &vol_name);
168 device_remove_file(&vol->dev, &vol_type);
169 device_remove_file(&vol->dev, &vol_reserved_ebs);
170 device_unregister(&vol->dev);
171}
172
173/**
174 * ubi_create_volume - create volume.
175 * @ubi: UBI device description object
176 * @req: volume creation request
177 *
178 * This function creates volume described by @req. If @req->vol_id id
179 * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume
180 * and saves it in @req->vol_id. Returns zero in case of success and a negative
181 * error code in case of failure.
182 */
183int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
184{
185 int i, err, vol_id = req->vol_id;
186 struct ubi_volume *vol;
187 struct ubi_vtbl_record vtbl_rec;
188 uint64_t bytes;
189
190 if (ubi->ro_mode)
191 return -EROFS;
192
193 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
194 if (!vol)
195 return -ENOMEM;
196
197 spin_lock(&ubi->volumes_lock);
198
199 if (vol_id == UBI_VOL_NUM_AUTO) {
200 /* Find unused volume ID */
201 dbg_msg("search for vacant volume ID");
202 for (i = 0; i < ubi->vtbl_slots; i++)
203 if (!ubi->volumes[i]) {
204 vol_id = i;
205 break;
206 }
207
208 if (vol_id == UBI_VOL_NUM_AUTO) {
209 dbg_err("out of volume IDs");
210 err = -ENFILE;
211 goto out_unlock;
212 }
213 req->vol_id = vol_id;
214 }
215
216 dbg_msg("volume ID %d, %llu bytes, type %d, name %s",
217 vol_id, (unsigned long long)req->bytes,
218 (int)req->vol_type, req->name);
219
220 /* Ensure that this volume does not exist */
221 err = -EEXIST;
222 if (ubi->volumes[vol_id]) {
223 dbg_err("volume %d already exists", vol_id);
224 goto out_unlock;
225 }
226
227 /* Ensure that the name is unique */
228 for (i = 0; i < ubi->vtbl_slots; i++)
229 if (ubi->volumes[i] &&
230 ubi->volumes[i]->name_len == req->name_len &&
231 strcmp(ubi->volumes[i]->name, req->name) == 0) {
232 dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
233 goto out_unlock;
234 }
235
236 /* Calculate how many eraseblocks are requested */
237 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
238 bytes = req->bytes;
239 if (do_div(bytes, vol->usable_leb_size))
240 vol->reserved_pebs = 1;
241 vol->reserved_pebs += bytes;
242
243 /* Reserve physical eraseblocks */
244 if (vol->reserved_pebs > ubi->avail_pebs) {
245 dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
246 spin_unlock(&ubi->volumes_lock);
247 err = -ENOSPC;
248 goto out_unlock;
249 }
250 ubi->avail_pebs -= vol->reserved_pebs;
251 ubi->rsvd_pebs += vol->reserved_pebs;
252
253 vol->vol_id = vol_id;
254 vol->alignment = req->alignment;
255 vol->data_pad = ubi->leb_size % vol->alignment;
256 vol->vol_type = req->vol_type;
257 vol->name_len = req->name_len;
258 memcpy(vol->name, req->name, vol->name_len + 1);
259 vol->exclusive = 1;
260 vol->ubi = ubi;
261 ubi->volumes[vol_id] = vol;
262 spin_unlock(&ubi->volumes_lock);
263
264 /*
265 * Finish all pending erases because there may be some LEBs belonging
266 * to the same volume ID.
267 */
268 err = ubi_wl_flush(ubi);
269 if (err)
270 goto out_acc;
271
272 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
273 if (!vol->eba_tbl) {
274 err = -ENOMEM;
275 goto out_acc;
276 }
277
278 for (i = 0; i < vol->reserved_pebs; i++)
279 vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
280
281 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
282 vol->used_ebs = vol->reserved_pebs;
283 vol->last_eb_bytes = vol->usable_leb_size;
284 vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
285 } else {
286 bytes = vol->used_bytes;
287 vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
288 vol->used_ebs = bytes;
289 if (vol->last_eb_bytes)
290 vol->used_ebs += 1;
291 else
292 vol->last_eb_bytes = vol->usable_leb_size;
293 }
294
295 /* Register character device for the volume */
296 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
297 vol->cdev.owner = THIS_MODULE;
298 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1);
299 if (err) {
300 ubi_err("cannot add character device for volume %d", vol_id);
301 goto out_mapping;
302 }
303
304 err = ubi_create_gluebi(ubi, vol);
305 if (err)
306 goto out_cdev;
307
308 vol->dev.release = vol_release;
309 vol->dev.parent = &ubi->dev;
310 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
311 vol->dev.class = ubi_class;
312 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
313 err = device_register(&vol->dev);
314 if (err)
315 goto out_gluebi;
316
317 err = volume_sysfs_init(ubi, vol);
318 if (err)
319 goto out_sysfs;
320
321 /* Fill volume table record */
322 memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
323 vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs);
324 vtbl_rec.alignment = cpu_to_ubi32(vol->alignment);
325 vtbl_rec.data_pad = cpu_to_ubi32(vol->data_pad);
326 vtbl_rec.name_len = cpu_to_ubi16(vol->name_len);
327 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
328 vtbl_rec.vol_type = UBI_VID_DYNAMIC;
329 else
330 vtbl_rec.vol_type = UBI_VID_STATIC;
331 memcpy(vtbl_rec.name, vol->name, vol->name_len + 1);
332
333 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
334 if (err)
335 goto out_sysfs;
336
337 spin_lock(&ubi->volumes_lock);
338 ubi->vol_count += 1;
339 vol->exclusive = 0;
340 spin_unlock(&ubi->volumes_lock);
341
342 paranoid_check_volumes(ubi);
343 return 0;
344
345out_gluebi:
346 err = ubi_destroy_gluebi(vol);
347out_cdev:
348 cdev_del(&vol->cdev);
349out_mapping:
350 kfree(vol->eba_tbl);
351out_acc:
352 spin_lock(&ubi->volumes_lock);
353 ubi->rsvd_pebs -= vol->reserved_pebs;
354 ubi->avail_pebs += vol->reserved_pebs;
355out_unlock:
356 spin_unlock(&ubi->volumes_lock);
357 kfree(vol);
358 return err;
359
360 /*
361 * We are registered, so @vol is destroyed in the release function and
362 * we have to de-initialize differently.
363 */
364out_sysfs:
365 err = ubi_destroy_gluebi(vol);
366 cdev_del(&vol->cdev);
367 kfree(vol->eba_tbl);
368 spin_lock(&ubi->volumes_lock);
369 ubi->rsvd_pebs -= vol->reserved_pebs;
370 ubi->avail_pebs += vol->reserved_pebs;
371 spin_unlock(&ubi->volumes_lock);
372 volume_sysfs_close(vol);
373 return err;
374}
375
376/**
377 * ubi_remove_volume - remove volume.
378 * @desc: volume descriptor
379 *
380 * This function removes volume described by @desc. The volume has to be opened
381 * in "exclusive" mode. Returns zero in case of success and a negative error
382 * code in case of failure.
383 */
384int ubi_remove_volume(struct ubi_volume_desc *desc)
385{
386 struct ubi_volume *vol = desc->vol;
387 struct ubi_device *ubi = vol->ubi;
388 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
389
390 dbg_msg("remove UBI volume %d", vol_id);
391 ubi_assert(desc->mode == UBI_EXCLUSIVE);
392 ubi_assert(vol == ubi->volumes[vol_id]);
393
394 if (ubi->ro_mode)
395 return -EROFS;
396
397 err = ubi_destroy_gluebi(vol);
398 if (err)
399 return err;
400
401 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
402 if (err)
403 return err;
404
405 for (i = 0; i < vol->reserved_pebs; i++) {
406 err = ubi_eba_unmap_leb(ubi, vol_id, i);
407 if (err)
408 return err;
409 }
410
411 spin_lock(&ubi->volumes_lock);
412 vol->removed = 1;
413 ubi->volumes[vol_id] = NULL;
414 spin_unlock(&ubi->volumes_lock);
415
416 kfree(vol->eba_tbl);
417 vol->eba_tbl = NULL;
418 cdev_del(&vol->cdev);
419 volume_sysfs_close(vol);
420 kfree(desc);
421
422 spin_lock(&ubi->volumes_lock);
423 ubi->rsvd_pebs -= reserved_pebs;
424 ubi->avail_pebs += reserved_pebs;
425 i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
426 if (i > 0) {
427 i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
428 ubi->avail_pebs -= i;
429 ubi->rsvd_pebs += i;
430 ubi->beb_rsvd_pebs += i;
431 if (i > 0)
432 ubi_msg("reserve more %d PEBs", i);
433 }
434 ubi->vol_count -= 1;
435 spin_unlock(&ubi->volumes_lock);
436
437 paranoid_check_volumes(ubi);
438 module_put(THIS_MODULE);
439 return 0;
440}
441
442/**
443 * ubi_resize_volume - re-size volume.
444 * @desc: volume descriptor
445 * @reserved_pebs: new size in physical eraseblocks
446 *
447 * This function returns zero in case of success, and a negative error code in
448 * case of failure.
449 */
450int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
451{
452 int i, err, pebs, *new_mapping;
453 struct ubi_volume *vol = desc->vol;
454 struct ubi_device *ubi = vol->ubi;
455 struct ubi_vtbl_record vtbl_rec;
456 int vol_id = vol->vol_id;
457
458 if (ubi->ro_mode)
459 return -EROFS;
460
461 dbg_msg("re-size volume %d to from %d to %d PEBs",
462 vol_id, vol->reserved_pebs, reserved_pebs);
463 ubi_assert(desc->mode == UBI_EXCLUSIVE);
464 ubi_assert(vol == ubi->volumes[vol_id]);
465
466 if (vol->vol_type == UBI_STATIC_VOLUME &&
467 reserved_pebs < vol->used_ebs) {
468 dbg_err("too small size %d, %d LEBs contain data",
469 reserved_pebs, vol->used_ebs);
470 return -EINVAL;
471 }
472
473 /* If the size is the same, we have nothing to do */
474 if (reserved_pebs == vol->reserved_pebs)
475 return 0;
476
477 new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
478 if (!new_mapping)
479 return -ENOMEM;
480
481 for (i = 0; i < reserved_pebs; i++)
482 new_mapping[i] = UBI_LEB_UNMAPPED;
483
484 /* Reserve physical eraseblocks */
485 pebs = reserved_pebs - vol->reserved_pebs;
486 if (pebs > 0) {
487 spin_lock(&ubi->volumes_lock);
488 if (pebs > ubi->avail_pebs) {
489 dbg_err("not enough PEBs: requested %d, available %d",
490 pebs, ubi->avail_pebs);
491 spin_unlock(&ubi->volumes_lock);
492 err = -ENOSPC;
493 goto out_free;
494 }
495 ubi->avail_pebs -= pebs;
496 ubi->rsvd_pebs += pebs;
497 for (i = 0; i < vol->reserved_pebs; i++)
498 new_mapping[i] = vol->eba_tbl[i];
499 kfree(vol->eba_tbl);
500 vol->eba_tbl = new_mapping;
501 spin_unlock(&ubi->volumes_lock);
502 }
503
504 /* Change volume table record */
505 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
506 vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs);
507 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
508 if (err)
509 goto out_acc;
510
511 if (pebs < 0) {
512 for (i = 0; i < -pebs; i++) {
513 err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i);
514 if (err)
515 goto out_acc;
516 }
517 spin_lock(&ubi->volumes_lock);
518 ubi->rsvd_pebs += pebs;
519 ubi->avail_pebs -= pebs;
520 pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
521 if (pebs > 0) {
522 pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
523 ubi->avail_pebs -= pebs;
524 ubi->rsvd_pebs += pebs;
525 ubi->beb_rsvd_pebs += pebs;
526 if (pebs > 0)
527 ubi_msg("reserve more %d PEBs", pebs);
528 }
529 for (i = 0; i < reserved_pebs; i++)
530 new_mapping[i] = vol->eba_tbl[i];
531 kfree(vol->eba_tbl);
532 vol->eba_tbl = new_mapping;
533 spin_unlock(&ubi->volumes_lock);
534 }
535
536 vol->reserved_pebs = reserved_pebs;
537 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
538 vol->used_ebs = reserved_pebs;
539 vol->last_eb_bytes = vol->usable_leb_size;
540 vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
541 }
542
543 paranoid_check_volumes(ubi);
544 return 0;
545
546out_acc:
547 if (pebs > 0) {
548 spin_lock(&ubi->volumes_lock);
549 ubi->rsvd_pebs -= pebs;
550 ubi->avail_pebs += pebs;
551 spin_unlock(&ubi->volumes_lock);
552 }
553out_free:
554 kfree(new_mapping);
555 return err;
556}
557
558/**
559 * ubi_add_volume - add volume.
560 * @ubi: UBI device description object
561 * @vol_id: volume ID
562 *
563 * This function adds an existin volume and initializes all its data
564 * structures. Returnes zero in case of success and a negative error code in
565 * case of failure.
566 */
567int ubi_add_volume(struct ubi_device *ubi, int vol_id)
568{
569 int err;
570 struct ubi_volume *vol = ubi->volumes[vol_id];
571
572 dbg_msg("add volume %d", vol_id);
573 ubi_dbg_dump_vol_info(vol);
574 ubi_assert(vol);
575
576 /* Register character device for the volume */
577 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
578 vol->cdev.owner = THIS_MODULE;
579 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1);
580 if (err) {
581 ubi_err("cannot add character device for volume %d", vol_id);
582 return err;
583 }
584
585 err = ubi_create_gluebi(ubi, vol);
586 if (err)
587 goto out_cdev;
588
589 vol->dev.release = vol_release;
590 vol->dev.parent = &ubi->dev;
591 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
592 vol->dev.class = ubi_class;
593 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
594 err = device_register(&vol->dev);
595 if (err)
596 goto out_gluebi;
597
598 err = volume_sysfs_init(ubi, vol);
599 if (err) {
600 cdev_del(&vol->cdev);
601 err = ubi_destroy_gluebi(vol);
602 volume_sysfs_close(vol);
603 return err;
604 }
605
606 paranoid_check_volumes(ubi);
607 return 0;
608
609out_gluebi:
610 err = ubi_destroy_gluebi(vol);
611out_cdev:
612 cdev_del(&vol->cdev);
613 return err;
614}
615
616/**
617 * ubi_free_volume - free volume.
618 * @ubi: UBI device description object
619 * @vol_id: volume ID
620 *
621 * This function frees all resources for volume @vol_id but does not remove it.
622 * Used only when the UBI device is detached.
623 */
624void ubi_free_volume(struct ubi_device *ubi, int vol_id)
625{
626 int err;
627 struct ubi_volume *vol = ubi->volumes[vol_id];
628
629 dbg_msg("free volume %d", vol_id);
630 ubi_assert(vol);
631
632 vol->removed = 1;
633 err = ubi_destroy_gluebi(vol);
634 ubi->volumes[vol_id] = NULL;
635 cdev_del(&vol->cdev);
636 volume_sysfs_close(vol);
637}
638
639#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
640
641/**
642 * paranoid_check_volume - check volume information.
643 * @ubi: UBI device description object
644 * @vol_id: volume ID
645 */
646static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
647{
648 int idx = vol_id2idx(ubi, vol_id);
649 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
650 const struct ubi_volume *vol = ubi->volumes[idx];
651 long long n;
652 const char *name;
653
654 reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
655
656 if (!vol) {
657 if (reserved_pebs) {
658 ubi_err("no volume info, but volume exists");
659 goto fail;
660 }
661 return;
662 }
663
664 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
665 vol->name_len < 0) {
666 ubi_err("negative values");
667 goto fail;
668 }
669 if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
670 ubi_err("bad alignment");
671 goto fail;
672 }
673
674 n = vol->alignment % ubi->min_io_size;
675 if (vol->alignment != 1 && n) {
676 ubi_err("alignment is not multiple of min I/O unit");
677 goto fail;
678 }
679
680 n = ubi->leb_size % vol->alignment;
681 if (vol->data_pad != n) {
682 ubi_err("bad data_pad, has to be %lld", n);
683 goto fail;
684 }
685
686 if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
687 vol->vol_type != UBI_STATIC_VOLUME) {
688 ubi_err("bad vol_type");
689 goto fail;
690 }
691
692 if (vol->upd_marker != 0 && vol->upd_marker != 1) {
693 ubi_err("bad upd_marker");
694 goto fail;
695 }
696
697 if (vol->upd_marker && vol->corrupted) {
698 dbg_err("update marker and corrupted simultaneously");
699 goto fail;
700 }
701
702 if (vol->reserved_pebs > ubi->good_peb_count) {
703 ubi_err("too large reserved_pebs");
704 goto fail;
705 }
706
707 n = ubi->leb_size - vol->data_pad;
708 if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
709 ubi_err("bad usable_leb_size, has to be %lld", n);
710 goto fail;
711 }
712
713 if (vol->name_len > UBI_VOL_NAME_MAX) {
714 ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
715 goto fail;
716 }
717
718 if (!vol->name) {
719 ubi_err("NULL volume name");
720 goto fail;
721 }
722
723 n = strnlen(vol->name, vol->name_len + 1);
724 if (n != vol->name_len) {
725 ubi_err("bad name_len %lld", n);
726 goto fail;
727 }
728
729 n = vol->used_ebs * vol->usable_leb_size;
730 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
731 if (vol->corrupted != 0) {
732 ubi_err("corrupted dynamic volume");
733 goto fail;
734 }
735 if (vol->used_ebs != vol->reserved_pebs) {
736 ubi_err("bad used_ebs");
737 goto fail;
738 }
739 if (vol->last_eb_bytes != vol->usable_leb_size) {
740 ubi_err("bad last_eb_bytes");
741 goto fail;
742 }
743 if (vol->used_bytes != n) {
744 ubi_err("bad used_bytes");
745 goto fail;
746 }
747 } else {
748 if (vol->corrupted != 0 && vol->corrupted != 1) {
749 ubi_err("bad corrupted");
750 goto fail;
751 }
752 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
753 ubi_err("bad used_ebs");
754 goto fail;
755 }
756 if (vol->last_eb_bytes < 0 ||
757 vol->last_eb_bytes > vol->usable_leb_size) {
758 ubi_err("bad last_eb_bytes");
759 goto fail;
760 }
761 if (vol->used_bytes < 0 || vol->used_bytes > n ||
762 vol->used_bytes < n - vol->usable_leb_size) {
763 ubi_err("bad used_bytes");
764 goto fail;
765 }
766 }
767
768 alignment = ubi32_to_cpu(ubi->vtbl[vol_id].alignment);
769 data_pad = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad);
770 name_len = ubi16_to_cpu(ubi->vtbl[vol_id].name_len);
771 upd_marker = ubi->vtbl[vol_id].upd_marker;
772 name = &ubi->vtbl[vol_id].name[0];
773 if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
774 vol_type = UBI_DYNAMIC_VOLUME;
775 else
776 vol_type = UBI_STATIC_VOLUME;
777
778 if (alignment != vol->alignment || data_pad != vol->data_pad ||
779 upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
780 name_len!= vol->name_len || strncmp(name, vol->name, name_len)) {
781 ubi_err("volume info is different");
782 goto fail;
783 }
784
785 return;
786
787fail:
788 ubi_err("paranoid check failed");
789 ubi_dbg_dump_vol_info(vol);
790 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
791 BUG();
792}
793
794/**
795 * paranoid_check_volumes - check information about all volumes.
796 * @ubi: UBI device description object
797 */
798static void paranoid_check_volumes(struct ubi_device *ubi)
799{
800 int i;
801
802 mutex_lock(&ubi->vtbl_mutex);
803 spin_lock(&ubi->volumes_lock);
804 for (i = 0; i < ubi->vtbl_slots; i++)
805 paranoid_check_volume(ubi, i);
806 spin_unlock(&ubi->volumes_lock);
807 mutex_unlock(&ubi->vtbl_mutex);
808}
809#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
new file mode 100644
index 000000000000..b6fd6bbd941e
--- /dev/null
+++ b/drivers/mtd/ubi/vtbl.c
@@ -0,0 +1,809 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2006, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём)
20 */
21
22/*
23 * This file includes volume table manipulation code. The volume table is an
24 * on-flash table containing volume meta-data like name, number of reserved
25 * physical eraseblocks, type, etc. The volume table is stored in the so-called
26 * "layout volume".
27 *
28 * The layout volume is an internal volume which is organized as follows. It
29 * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
30 * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
31 * other. This redundancy guarantees robustness to unclean reboots. The volume
32 * table is basically an array of volume table records. Each record contains
33 * full information about the volume and protected by a CRC checksum.
34 *
35 * The volume table is changed, it is first changed in RAM. Then LEB 0 is
36 * erased, and the updated volume table is written back to LEB 0. Then same for
37 * LEB 1. This scheme guarantees recoverability from unclean reboots.
38 *
39 * In this UBI implementation the on-flash volume table does not contain any
40 * information about how many data static volumes contain. This information may
41 * be found from the scanning data.
42 *
43 * But it would still be beneficial to store this information in the volume
44 * table. For example, suppose we have a static volume X, and all its physical
45 * eraseblocks became bad for some reasons. Suppose we are attaching the
46 * corresponding MTD device, the scanning has found no logical eraseblocks
47 * corresponding to the volume X. According to the volume table volume X does
48 * exist. So we don't know whether it is just empty or all its physical
49 * eraseblocks went bad. So we cannot alarm the user about this corruption.
50 *
51 * The volume table also stores so-called "update marker", which is used for
52 * volume updates. Before updating the volume, the update marker is set, and
53 * after the update operation is finished, the update marker is cleared. So if
54 * the update operation was interrupted (e.g. by an unclean reboot) - the
55 * update marker is still there and we know that the volume's contents is
56 * damaged.
57 */
58
59#include <linux/crc32.h>
60#include <linux/err.h>
61#include <asm/div64.h>
62#include "ubi.h"
63
64#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
65static void paranoid_vtbl_check(const struct ubi_device *ubi);
66#else
67#define paranoid_vtbl_check(ubi)
68#endif
69
70/* Empty volume table record */
71static struct ubi_vtbl_record empty_vtbl_record;
72
73/**
74 * ubi_change_vtbl_record - change volume table record.
75 * @ubi: UBI device description object
76 * @idx: table index to change
77 * @vtbl_rec: new volume table record
78 *
79 * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
80 * volume table record is written. The caller does not have to calculate CRC of
81 * the record as it is done by this function. Returns zero in case of success
82 * and a negative error code in case of failure.
83 */
84int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
85 struct ubi_vtbl_record *vtbl_rec)
86{
87 int i, err;
88 uint32_t crc;
89
90 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
91
92 if (!vtbl_rec)
93 vtbl_rec = &empty_vtbl_record;
94 else {
95 crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
96 vtbl_rec->crc = cpu_to_ubi32(crc);
97 }
98
99 dbg_msg("change record %d", idx);
100 ubi_dbg_dump_vtbl_record(vtbl_rec, idx);
101
102 mutex_lock(&ubi->vtbl_mutex);
103 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
104 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
105 err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i);
106 if (err) {
107 mutex_unlock(&ubi->vtbl_mutex);
108 return err;
109 }
110 err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0,
111 ubi->vtbl_size, UBI_LONGTERM);
112 if (err) {
113 mutex_unlock(&ubi->vtbl_mutex);
114 return err;
115 }
116 }
117
118 paranoid_vtbl_check(ubi);
119 mutex_unlock(&ubi->vtbl_mutex);
120 return ubi_wl_flush(ubi);
121}
122
123/**
124 * vol_til_check - check if volume table is not corrupted and contains sensible
125 * data.
126 *
127 * @ubi: UBI device description object
128 * @vtbl: volume table
129 *
130 * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
131 * and %-EINVAL if it contains inconsistent data.
132 */
133static int vtbl_check(const struct ubi_device *ubi,
134 const struct ubi_vtbl_record *vtbl)
135{
136 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
137 int upd_marker;
138 uint32_t crc;
139 const char *name;
140
141 for (i = 0; i < ubi->vtbl_slots; i++) {
142 cond_resched();
143
144 reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
145 alignment = ubi32_to_cpu(vtbl[i].alignment);
146 data_pad = ubi32_to_cpu(vtbl[i].data_pad);
147 upd_marker = vtbl[i].upd_marker;
148 vol_type = vtbl[i].vol_type;
149 name_len = ubi16_to_cpu(vtbl[i].name_len);
150 name = &vtbl[i].name[0];
151
152 crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
153 if (ubi32_to_cpu(vtbl[i].crc) != crc) {
154 ubi_err("bad CRC at record %u: %#08x, not %#08x",
155 i, crc, ubi32_to_cpu(vtbl[i].crc));
156 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
157 return 1;
158 }
159
160 if (reserved_pebs == 0) {
161 if (memcmp(&vtbl[i], &empty_vtbl_record,
162 UBI_VTBL_RECORD_SIZE)) {
163 dbg_err("bad empty record");
164 goto bad;
165 }
166 continue;
167 }
168
169 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
170 name_len < 0) {
171 dbg_err("negative values");
172 goto bad;
173 }
174
175 if (alignment > ubi->leb_size || alignment == 0) {
176 dbg_err("bad alignment");
177 goto bad;
178 }
179
180 n = alignment % ubi->min_io_size;
181 if (alignment != 1 && n) {
182 dbg_err("alignment is not multiple of min I/O unit");
183 goto bad;
184 }
185
186 n = ubi->leb_size % alignment;
187 if (data_pad != n) {
188 dbg_err("bad data_pad, has to be %d", n);
189 goto bad;
190 }
191
192 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
193 dbg_err("bad vol_type");
194 goto bad;
195 }
196
197 if (upd_marker != 0 && upd_marker != 1) {
198 dbg_err("bad upd_marker");
199 goto bad;
200 }
201
202 if (reserved_pebs > ubi->good_peb_count) {
203 dbg_err("too large reserved_pebs, good PEBs %d",
204 ubi->good_peb_count);
205 goto bad;
206 }
207
208 if (name_len > UBI_VOL_NAME_MAX) {
209 dbg_err("too long volume name, max %d",
210 UBI_VOL_NAME_MAX);
211 goto bad;
212 }
213
214 if (name[0] == '\0') {
215 dbg_err("NULL volume name");
216 goto bad;
217 }
218
219 if (name_len != strnlen(name, name_len + 1)) {
220 dbg_err("bad name_len");
221 goto bad;
222 }
223 }
224
225 /* Checks that all names are unique */
226 for (i = 0; i < ubi->vtbl_slots - 1; i++) {
227 for (n = i + 1; n < ubi->vtbl_slots; n++) {
228 int len1 = ubi16_to_cpu(vtbl[i].name_len);
229 int len2 = ubi16_to_cpu(vtbl[n].name_len);
230
231 if (len1 > 0 && len1 == len2 &&
232 !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
233 ubi_err("volumes %d and %d have the same name"
234 " \"%s\"", i, n, vtbl[i].name);
235 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
236 ubi_dbg_dump_vtbl_record(&vtbl[n], n);
237 return -EINVAL;
238 }
239 }
240 }
241
242 return 0;
243
244bad:
245 ubi_err("volume table check failed, record %d", i);
246 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
247 return -EINVAL;
248}
249
250/**
251 * create_vtbl - create a copy of volume table.
252 * @ubi: UBI device description object
253 * @si: scanning information
254 * @copy: number of the volume table copy
255 * @vtbl: contents of the volume table
256 *
257 * This function returns zero in case of success and a negative error code in
258 * case of failure.
259 */
260static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si,
261 int copy, void *vtbl)
262{
263 int err, tries = 0;
264 static struct ubi_vid_hdr *vid_hdr;
265 struct ubi_scan_volume *sv;
266 struct ubi_scan_leb *new_seb, *old_seb = NULL;
267
268 ubi_msg("create volume table (copy #%d)", copy + 1);
269
270 vid_hdr = ubi_zalloc_vid_hdr(ubi);
271 if (!vid_hdr)
272 return -ENOMEM;
273
274 /*
275 * Check if there is a logical eraseblock which would have to contain
276 * this volume table copy was found during scanning. It has to be wiped
277 * out.
278 */
279 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
280 if (sv)
281 old_seb = ubi_scan_find_seb(sv, copy);
282
283retry:
284 new_seb = ubi_scan_get_free_peb(ubi, si);
285 if (IS_ERR(new_seb)) {
286 err = PTR_ERR(new_seb);
287 goto out_free;
288 }
289
290 vid_hdr->vol_type = UBI_VID_DYNAMIC;
291 vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID);
292 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
293 vid_hdr->data_size = vid_hdr->used_ebs =
294 vid_hdr->data_pad = cpu_to_ubi32(0);
295 vid_hdr->lnum = cpu_to_ubi32(copy);
296 vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum);
297 vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0);
298
299 /* The EC header is already there, write the VID header */
300 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
301 if (err)
302 goto write_error;
303
304 /* Write the layout volume contents */
305 err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
306 if (err)
307 goto write_error;
308
309 /*
310 * And add it to the scanning information. Don't delete the old
311 * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
312 */
313 err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
314 vid_hdr, 0);
315 kfree(new_seb);
316 ubi_free_vid_hdr(ubi, vid_hdr);
317 return err;
318
319write_error:
320 kfree(new_seb);
321 /* May be this physical eraseblock went bad, try to pick another one */
322 if (++tries <= 5) {
323 err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec,
324 &si->corr);
325 if (!err)
326 goto retry;
327 }
328out_free:
329 ubi_free_vid_hdr(ubi, vid_hdr);
330 return err;
331
332}
333
334/**
335 * process_lvol - process the layout volume.
336 * @ubi: UBI device description object
337 * @si: scanning information
338 * @sv: layout volume scanning information
339 *
340 * This function is responsible for reading the layout volume, ensuring it is
341 * not corrupted, and recovering from corruptions if needed. Returns volume
342 * table in case of success and a negative error code in case of failure.
343 */
344static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
345 struct ubi_scan_info *si,
346 struct ubi_scan_volume *sv)
347{
348 int err;
349 struct rb_node *rb;
350 struct ubi_scan_leb *seb;
351 struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
352 int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
353
354 /*
355 * UBI goes through the following steps when it changes the layout
356 * volume:
357 * a. erase LEB 0;
358 * b. write new data to LEB 0;
359 * c. erase LEB 1;
360 * d. write new data to LEB 1.
361 *
362 * Before the change, both LEBs contain the same data.
363 *
364 * Due to unclean reboots, the contents of LEB 0 may be lost, but there
365 * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
366 * Similarly, LEB 1 may be lost, but there should be LEB 0. And
367 * finally, unclean reboots may result in a situation when neither LEB
368 * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
369 * 0 contains more recent information.
370 *
371 * So the plan is to first check LEB 0. Then
372 * a. if LEB 0 is OK, it must be containing the most resent data; then
373 * we compare it with LEB 1, and if they are different, we copy LEB
374 * 0 to LEB 1;
375 * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
376 * to LEB 0.
377 */
378
379 dbg_msg("check layout volume");
380
381 /* Read both LEB 0 and LEB 1 into memory */
382 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
383 leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL);
384 if (!leb[seb->lnum]) {
385 err = -ENOMEM;
386 goto out_free;
387 }
388
389 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
390 ubi->vtbl_size);
391 if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
392 /* Scrub the PEB later */
393 seb->scrub = 1;
394 else if (err)
395 goto out_free;
396 }
397
398 err = -EINVAL;
399 if (leb[0]) {
400 leb_corrupted[0] = vtbl_check(ubi, leb[0]);
401 if (leb_corrupted[0] < 0)
402 goto out_free;
403 }
404
405 if (!leb_corrupted[0]) {
406 /* LEB 0 is OK */
407 if (leb[1])
408 leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size);
409 if (leb_corrupted[1]) {
410 ubi_warn("volume table copy #2 is corrupted");
411 err = create_vtbl(ubi, si, 1, leb[0]);
412 if (err)
413 goto out_free;
414 ubi_msg("volume table was restored");
415 }
416
417 /* Both LEB 1 and LEB 2 are OK and consistent */
418 kfree(leb[1]);
419 return leb[0];
420 } else {
421 /* LEB 0 is corrupted or does not exist */
422 if (leb[1]) {
423 leb_corrupted[1] = vtbl_check(ubi, leb[1]);
424 if (leb_corrupted[1] < 0)
425 goto out_free;
426 }
427 if (leb_corrupted[1]) {
428 /* Both LEB 0 and LEB 1 are corrupted */
429 ubi_err("both volume tables are corrupted");
430 goto out_free;
431 }
432
433 ubi_warn("volume table copy #1 is corrupted");
434 err = create_vtbl(ubi, si, 0, leb[1]);
435 if (err)
436 goto out_free;
437 ubi_msg("volume table was restored");
438
439 kfree(leb[0]);
440 return leb[1];
441 }
442
443out_free:
444 kfree(leb[0]);
445 kfree(leb[1]);
446 return ERR_PTR(err);
447}
448
449/**
450 * create_empty_lvol - create empty layout volume.
451 * @ubi: UBI device description object
452 * @si: scanning information
453 *
454 * This function returns volume table contents in case of success and a
455 * negative error code in case of failure.
456 */
457static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
458 struct ubi_scan_info *si)
459{
460 int i;
461 struct ubi_vtbl_record *vtbl;
462
463 vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL);
464 if (!vtbl)
465 return ERR_PTR(-ENOMEM);
466
467 for (i = 0; i < ubi->vtbl_slots; i++)
468 memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
469
470 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
471 int err;
472
473 err = create_vtbl(ubi, si, i, vtbl);
474 if (err) {
475 kfree(vtbl);
476 return ERR_PTR(err);
477 }
478 }
479
480 return vtbl;
481}
482
483/**
484 * init_volumes - initialize volume information for existing volumes.
485 * @ubi: UBI device description object
486 * @si: scanning information
487 * @vtbl: volume table
488 *
489 * This function allocates volume description objects for existing volumes.
490 * Returns zero in case of success and a negative error code in case of
491 * failure.
492 */
493static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
494 const struct ubi_vtbl_record *vtbl)
495{
496 int i, reserved_pebs = 0;
497 struct ubi_scan_volume *sv;
498 struct ubi_volume *vol;
499
500 for (i = 0; i < ubi->vtbl_slots; i++) {
501 cond_resched();
502
503 if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0)
504 continue; /* Empty record */
505
506 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
507 if (!vol)
508 return -ENOMEM;
509
510 vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
511 vol->alignment = ubi32_to_cpu(vtbl[i].alignment);
512 vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad);
513 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
514 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
515 vol->name_len = ubi16_to_cpu(vtbl[i].name_len);
516 vol->usable_leb_size = ubi->leb_size - vol->data_pad;
517 memcpy(vol->name, vtbl[i].name, vol->name_len);
518 vol->name[vol->name_len] = '\0';
519 vol->vol_id = i;
520
521 ubi_assert(!ubi->volumes[i]);
522 ubi->volumes[i] = vol;
523 ubi->vol_count += 1;
524 vol->ubi = ubi;
525 reserved_pebs += vol->reserved_pebs;
526
527 /*
528 * In case of dynamic volume UBI knows nothing about how many
529 * data is stored there. So assume the whole volume is used.
530 */
531 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
532 vol->used_ebs = vol->reserved_pebs;
533 vol->last_eb_bytes = vol->usable_leb_size;
534 vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
535 continue;
536 }
537
538 /* Static volumes only */
539 sv = ubi_scan_find_sv(si, i);
540 if (!sv) {
541 /*
542 * No eraseblocks belonging to this volume found. We
543 * don't actually know whether this static volume is
544 * completely corrupted or just contains no data. And
545 * we cannot know this as long as data size is not
546 * stored on flash. So we just assume the volume is
547 * empty. FIXME: this should be handled.
548 */
549 continue;
550 }
551
552 if (sv->leb_count != sv->used_ebs) {
553 /*
554 * We found a static volume which misses several
555 * eraseblocks. Treat it as corrupted.
556 */
557 ubi_warn("static volume %d misses %d LEBs - corrupted",
558 sv->vol_id, sv->used_ebs - sv->leb_count);
559 vol->corrupted = 1;
560 continue;
561 }
562
563 vol->used_ebs = sv->used_ebs;
564 vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size;
565 vol->used_bytes += sv->last_data_size;
566 vol->last_eb_bytes = sv->last_data_size;
567 }
568
569 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
570 if (!vol)
571 return -ENOMEM;
572
573 vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
574 vol->alignment = 1;
575 vol->vol_type = UBI_DYNAMIC_VOLUME;
576 vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
577 memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
578 vol->usable_leb_size = ubi->leb_size;
579 vol->used_ebs = vol->reserved_pebs;
580 vol->last_eb_bytes = vol->reserved_pebs;
581 vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad);
582 vol->vol_id = UBI_LAYOUT_VOL_ID;
583
584 ubi_assert(!ubi->volumes[i]);
585 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
586 reserved_pebs += vol->reserved_pebs;
587 ubi->vol_count += 1;
588 vol->ubi = ubi;
589
590 if (reserved_pebs > ubi->avail_pebs)
591 ubi_err("not enough PEBs, required %d, available %d",
592 reserved_pebs, ubi->avail_pebs);
593 ubi->rsvd_pebs += reserved_pebs;
594 ubi->avail_pebs -= reserved_pebs;
595
596 return 0;
597}
598
599/**
600 * check_sv - check volume scanning information.
601 * @vol: UBI volume description object
602 * @sv: volume scanning information
603 *
604 * This function returns zero if the volume scanning information is consistent
605 * to the data read from the volume tabla, and %-EINVAL if not.
606 */
607static int check_sv(const struct ubi_volume *vol,
608 const struct ubi_scan_volume *sv)
609{
610 if (sv->highest_lnum >= vol->reserved_pebs) {
611 dbg_err("bad highest_lnum");
612 goto bad;
613 }
614 if (sv->leb_count > vol->reserved_pebs) {
615 dbg_err("bad leb_count");
616 goto bad;
617 }
618 if (sv->vol_type != vol->vol_type) {
619 dbg_err("bad vol_type");
620 goto bad;
621 }
622 if (sv->used_ebs > vol->reserved_pebs) {
623 dbg_err("bad used_ebs");
624 goto bad;
625 }
626 if (sv->data_pad != vol->data_pad) {
627 dbg_err("bad data_pad");
628 goto bad;
629 }
630 return 0;
631
632bad:
633 ubi_err("bad scanning information");
634 ubi_dbg_dump_sv(sv);
635 ubi_dbg_dump_vol_info(vol);
636 return -EINVAL;
637}
638
639/**
640 * check_scanning_info - check that scanning information.
641 * @ubi: UBI device description object
642 * @si: scanning information
643 *
644 * Even though we protect on-flash data by CRC checksums, we still don't trust
645 * the media. This function ensures that scanning information is consistent to
646 * the information read from the volume table. Returns zero if the scanning
647 * information is OK and %-EINVAL if it is not.
648 */
649static int check_scanning_info(const struct ubi_device *ubi,
650 struct ubi_scan_info *si)
651{
652 int err, i;
653 struct ubi_scan_volume *sv;
654 struct ubi_volume *vol;
655
656 if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
657 ubi_err("scanning found %d volumes, maximum is %d + %d",
658 si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
659 return -EINVAL;
660 }
661
662 if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&&
663 si->highest_vol_id < UBI_INTERNAL_VOL_START) {
664 ubi_err("too large volume ID %d found by scanning",
665 si->highest_vol_id);
666 return -EINVAL;
667 }
668
669
670 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
671 cond_resched();
672
673 sv = ubi_scan_find_sv(si, i);
674 vol = ubi->volumes[i];
675 if (!vol) {
676 if (sv)
677 ubi_scan_rm_volume(si, sv);
678 continue;
679 }
680
681 if (vol->reserved_pebs == 0) {
682 ubi_assert(i < ubi->vtbl_slots);
683
684 if (!sv)
685 continue;
686
687 /*
688 * During scanning we found a volume which does not
689 * exist according to the information in the volume
690 * table. This must have happened due to an unclean
691 * reboot while the volume was being removed. Discard
692 * these eraseblocks.
693 */
694 ubi_msg("finish volume %d removal", sv->vol_id);
695 ubi_scan_rm_volume(si, sv);
696 } else if (sv) {
697 err = check_sv(vol, sv);
698 if (err)
699 return err;
700 }
701 }
702
703 return 0;
704}
705
706/**
707 * ubi_read_volume_table - read volume table.
708 * information.
709 * @ubi: UBI device description object
710 * @si: scanning information
711 *
712 * This function reads volume table, checks it, recover from errors if needed,
713 * or creates it if needed. Returns zero in case of success and a negative
714 * error code in case of failure.
715 */
716int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
717{
718 int i, err;
719 struct ubi_scan_volume *sv;
720
721 empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b);
722
723 /*
724 * The number of supported volumes is limited by the eraseblock size
725 * and by the UBI_MAX_VOLUMES constant.
726 */
727 ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
728 if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
729 ubi->vtbl_slots = UBI_MAX_VOLUMES;
730
731 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
732 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
733
734 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
735 if (!sv) {
736 /*
737 * No logical eraseblocks belonging to the layout volume were
738 * found. This could mean that the flash is just empty. In
739 * this case we create empty layout volume.
740 *
741 * But if flash is not empty this must be a corruption or the
742 * MTD device just contains garbage.
743 */
744 if (si->is_empty) {
745 ubi->vtbl = create_empty_lvol(ubi, si);
746 if (IS_ERR(ubi->vtbl))
747 return PTR_ERR(ubi->vtbl);
748 } else {
749 ubi_err("the layout volume was not found");
750 return -EINVAL;
751 }
752 } else {
753 if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
754 /* This must not happen with proper UBI images */
755 dbg_err("too many LEBs (%d) in layout volume",
756 sv->leb_count);
757 return -EINVAL;
758 }
759
760 ubi->vtbl = process_lvol(ubi, si, sv);
761 if (IS_ERR(ubi->vtbl))
762 return PTR_ERR(ubi->vtbl);
763 }
764
765 ubi->avail_pebs = ubi->good_peb_count;
766
767 /*
768 * The layout volume is OK, initialize the corresponding in-RAM data
769 * structures.
770 */
771 err = init_volumes(ubi, si, ubi->vtbl);
772 if (err)
773 goto out_free;
774
775 /*
776 * Get sure that the scanning information is consistent to the
777 * information stored in the volume table.
778 */
779 err = check_scanning_info(ubi, si);
780 if (err)
781 goto out_free;
782
783 return 0;
784
785out_free:
786 kfree(ubi->vtbl);
787 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
788 if (ubi->volumes[i]) {
789 kfree(ubi->volumes[i]);
790 ubi->volumes[i] = NULL;
791 }
792 return err;
793}
794
795#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
796
797/**
798 * paranoid_vtbl_check - check volume table.
799 * @ubi: UBI device description object
800 */
801static void paranoid_vtbl_check(const struct ubi_device *ubi)
802{
803 if (vtbl_check(ubi, ubi->vtbl)) {
804 ubi_err("paranoid check failed");
805 BUG();
806 }
807}
808
809#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
new file mode 100644
index 000000000000..9ecaf77eca9e
--- /dev/null
+++ b/drivers/mtd/ubi/wl.c
@@ -0,0 +1,1671 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19 */
20
21/*
22 * UBI wear-leveling unit.
23 *
24 * This unit is responsible for wear-leveling. It works in terms of physical
25 * eraseblocks and erase counters and knows nothing about logical eraseblocks,
26 * volumes, etc. From this unit's perspective all physical eraseblocks are of
27 * two types - used and free. Used physical eraseblocks are those that were
28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
29 * those that were put by the 'ubi_wl_put_peb()' function.
30 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only 0xFF bytes.
33 *
34 * When physical eraseblocks are returned to the WL unit by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL unit.
38 *
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
42 *
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL unit may pick a free physical eraseblock with low erase counter, and
47 * so forth.
48 *
49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
50 *
51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a
52 * physical eraseblock, it has to be moved. Technically this is the same as
53 * moving it for wear-leveling reasons.
54 *
55 * As it was said, for the UBI unit all physical eraseblocks are either "free"
56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
57 * eraseblocks are kept in a set of different RB-trees: @wl->used,
58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
59 *
60 * Note, in this implementation, we keep a small in-RAM object for each physical
61 * eraseblock. This is surely not a scalable solution. But it appears to be good
62 * enough for moderately large flashes and it is simple. In future, one may
63 * re-work this unit and make it more scalable.
64 *
65 * At the moment this unit does not utilize the sequence number, which was
66 * introduced relatively recently. But it would be wise to do this because the
67 * sequence number of a logical eraseblock characterizes how old is it. For
68 * example, when we move a PEB with low erase counter, and we need to pick the
69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
70 * pick target PEB with an average EC if our PEB is not very "old". This is a
71 * room for future re-works of the WL unit.
72 *
73 * FIXME: looks too complex, should be simplified (later).
74 */
75
76#include <linux/slab.h>
77#include <linux/crc32.h>
78#include <linux/freezer.h>
79#include <linux/kthread.h>
80#include "ubi.h"
81
82/* Number of physical eraseblocks reserved for wear-leveling purposes */
83#define WL_RESERVED_PEBS 1
84
85/*
86 * How many erase cycles are short term, unknown, and long term physical
87 * eraseblocks protected.
88 */
89#define ST_PROTECTION 16
90#define U_PROTECTION 10
91#define LT_PROTECTION 4
92
93/*
94 * Maximum difference between two erase counters. If this threshold is
95 * exceeded, the WL unit starts moving data from used physical eraseblocks with
96 * low erase counter to free physical eraseblocks with high erase counter.
97 */
98#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
99
100/*
101 * When a physical eraseblock is moved, the WL unit has to pick the target
102 * physical eraseblock to move to. The simplest way would be just to pick the
103 * one with the highest erase counter. But in certain workloads this could lead
104 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
105 * situation when the picked physical eraseblock is constantly erased after the
106 * data is written to it. So, we have a constant which limits the highest erase
107 * counter of the free physical eraseblock to pick. Namely, the WL unit does
108 * not pick eraseblocks with erase counter greater then the lowest erase
109 * counter plus %WL_FREE_MAX_DIFF.
110 */
111#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
112
113/*
114 * Maximum number of consecutive background thread failures which is enough to
115 * switch to read-only mode.
116 */
117#define WL_MAX_FAILURES 32
118
119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree
138 * @abs_ec: the absolute erase counter value when the protection ends
139 * @e: the wear-leveling entry of the physical eraseblock under protection
140 *
141 * When the WL unit returns a physical eraseblock, the physical eraseblock is
142 * protected from being moved for some "time". For this reason, the physical
143 * eraseblock is not directly moved from the @wl->free tree to the @wl->used
144 * tree. There is one more tree in between where this physical eraseblock is
145 * temporarily stored (@wl->prot).
146 *
147 * All this protection stuff is needed because:
148 * o we don't want to move physical eraseblocks just after we have given them
149 * to the user; instead, we first want to let users fill them up with data;
150 *
151 * o there is a chance that the user will put the physical eraseblock very
152 * soon, so it makes sense not to move it for some time, but wait; this is
153 * especially important in case of "short term" physical eraseblocks.
154 *
155 * Physical eraseblocks stay protected only for limited time. But the "time" is
156 * measured in erase cycles in this case. This is implemented with help of the
157 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
158 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
159 * the @wl->used tree.
160 *
161 * Protected physical eraseblocks are searched by physical eraseblock number
162 * (when they are put) and by the absolute erase counter (to check if it is
163 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
164 * storing the protected physical eraseblocks: @wl->prot.pnum and
165 * @wl->prot.aec. They are referred to as the "protection" trees. The
166 * first one is indexed by the physical eraseblock number. The second one is
167 * indexed by the absolute erase counter. Both trees store
168 * &struct ubi_wl_prot_entry objects.
169 *
170 * Each physical eraseblock has 2 main states: free and used. The former state
171 * corresponds to the @wl->free tree. The latter state is split up on several
172 * sub-states:
173 * o the WL movement is allowed (@wl->used tree);
174 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
175 * @wl->prot.aec trees);
176 * o scrubbing is needed (@wl->scrub tree).
177 *
178 * Depending on the sub-state, wear-leveling entries of the used physical
179 * eraseblocks may be kept in one of those trees.
180 */
181struct ubi_wl_prot_entry {
182 struct rb_node rb_pnum;
183 struct rb_node rb_aec;
184 unsigned long long abs_ec;
185 struct ubi_wl_entry *e;
186};
187
188/**
189 * struct ubi_work - UBI work description data structure.
190 * @list: a link in the list of pending works
191 * @func: worker function
192 * @priv: private data of the worker function
193 *
194 * @e: physical eraseblock to erase
195 * @torture: if the physical eraseblock has to be tortured
196 *
197 * The @func pointer points to the worker function. If the @cancel argument is
198 * not zero, the worker has to free the resources and exit immediately. The
199 * worker has to return zero in case of success and a negative error code in
200 * case of failure.
201 */
202struct ubi_work {
203 struct list_head list;
204 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
205 /* The below fields are only relevant to erasure works */
206 struct ubi_wl_entry *e;
207 int torture;
208};
209
210#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
211static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec);
212static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
213 struct rb_root *root);
214#else
215#define paranoid_check_ec(ubi, pnum, ec) 0
216#define paranoid_check_in_wl_tree(e, root)
217#endif
218
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/**
223 * tree_empty - a helper function to check if an RB-tree is empty.
224 * @root: the root of the tree
225 *
226 * This function returns non-zero if the RB-tree is empty and zero if not.
227 */
228static inline int tree_empty(struct rb_root *root)
229{
230 return root->rb_node == NULL;
231}
232
233/**
234 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
235 * @e: the wear-leveling entry to add
236 * @root: the root of the tree
237 *
238 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
239 * the @ubi->used and @ubi->free RB-trees.
240 */
241static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
242{
243 struct rb_node **p, *parent = NULL;
244
245 p = &root->rb_node;
246 while (*p) {
247 struct ubi_wl_entry *e1;
248
249 parent = *p;
250 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
251
252 if (e->ec < e1->ec)
253 p = &(*p)->rb_left;
254 else if (e->ec > e1->ec)
255 p = &(*p)->rb_right;
256 else {
257 ubi_assert(e->pnum != e1->pnum);
258 if (e->pnum < e1->pnum)
259 p = &(*p)->rb_left;
260 else
261 p = &(*p)->rb_right;
262 }
263 }
264
265 rb_link_node(&e->rb, parent, p);
266 rb_insert_color(&e->rb, root);
267}
268
269
270/*
271 * Helper functions to add and delete wear-leveling entries from different
272 * trees.
273 */
274
275static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
276{
277 wl_tree_add(e, &ubi->free);
278}
279static inline void used_tree_add(struct ubi_device *ubi,
280 struct ubi_wl_entry *e)
281{
282 wl_tree_add(e, &ubi->used);
283}
284static inline void scrub_tree_add(struct ubi_device *ubi,
285 struct ubi_wl_entry *e)
286{
287 wl_tree_add(e, &ubi->scrub);
288}
289static inline void free_tree_del(struct ubi_device *ubi,
290 struct ubi_wl_entry *e)
291{
292 paranoid_check_in_wl_tree(e, &ubi->free);
293 rb_erase(&e->rb, &ubi->free);
294}
295static inline void used_tree_del(struct ubi_device *ubi,
296 struct ubi_wl_entry *e)
297{
298 paranoid_check_in_wl_tree(e, &ubi->used);
299 rb_erase(&e->rb, &ubi->used);
300}
301static inline void scrub_tree_del(struct ubi_device *ubi,
302 struct ubi_wl_entry *e)
303{
304 paranoid_check_in_wl_tree(e, &ubi->scrub);
305 rb_erase(&e->rb, &ubi->scrub);
306}
307
308/**
309 * do_work - do one pending work.
310 * @ubi: UBI device description object
311 *
312 * This function returns zero in case of success and a negative error code in
313 * case of failure.
314 */
315static int do_work(struct ubi_device *ubi)
316{
317 int err;
318 struct ubi_work *wrk;
319
320 spin_lock(&ubi->wl_lock);
321
322 if (list_empty(&ubi->works)) {
323 spin_unlock(&ubi->wl_lock);
324 return 0;
325 }
326
327 wrk = list_entry(ubi->works.next, struct ubi_work, list);
328 list_del(&wrk->list);
329 spin_unlock(&ubi->wl_lock);
330
331 /*
332 * Call the worker function. Do not touch the work structure
333 * after this call as it will have been freed or reused by that
334 * time by the worker function.
335 */
336 err = wrk->func(ubi, wrk, 0);
337 if (err)
338 ubi_err("work failed with error code %d", err);
339
340 spin_lock(&ubi->wl_lock);
341 ubi->works_count -= 1;
342 ubi_assert(ubi->works_count >= 0);
343 spin_unlock(&ubi->wl_lock);
344 return err;
345}
346
347/**
348 * produce_free_peb - produce a free physical eraseblock.
349 * @ubi: UBI device description object
350 *
351 * This function tries to make a free PEB by means of synchronous execution of
352 * pending works. This may be needed if, for example the background thread is
353 * disabled. Returns zero in case of success and a negative error code in case
354 * of failure.
355 */
356static int produce_free_peb(struct ubi_device *ubi)
357{
358 int err;
359
360 spin_lock(&ubi->wl_lock);
361 while (tree_empty(&ubi->free)) {
362 spin_unlock(&ubi->wl_lock);
363
364 dbg_wl("do one work synchronously");
365 err = do_work(ubi);
366 if (err)
367 return err;
368
369 spin_lock(&ubi->wl_lock);
370 }
371 spin_unlock(&ubi->wl_lock);
372
373 return 0;
374}
375
376/**
377 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
378 * @e: the wear-leveling entry to check
379 * @root: the root of the tree
380 *
381 * This function returns non-zero if @e is in the @root RB-tree and zero if it
382 * is not.
383 */
384static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
385{
386 struct rb_node *p;
387
388 p = root->rb_node;
389 while (p) {
390 struct ubi_wl_entry *e1;
391
392 e1 = rb_entry(p, struct ubi_wl_entry, rb);
393
394 if (e->pnum == e1->pnum) {
395 ubi_assert(e == e1);
396 return 1;
397 }
398
399 if (e->ec < e1->ec)
400 p = p->rb_left;
401 else if (e->ec > e1->ec)
402 p = p->rb_right;
403 else {
404 ubi_assert(e->pnum != e1->pnum);
405 if (e->pnum < e1->pnum)
406 p = p->rb_left;
407 else
408 p = p->rb_right;
409 }
410 }
411
412 return 0;
413}
414
415/**
416 * prot_tree_add - add physical eraseblock to protection trees.
417 * @ubi: UBI device description object
418 * @e: the physical eraseblock to add
419 * @pe: protection entry object to use
420 * @abs_ec: absolute erase counter value when this physical eraseblock has
421 * to be removed from the protection trees.
422 *
423 * @wl->lock has to be locked.
424 */
425static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
426 struct ubi_wl_prot_entry *pe, int abs_ec)
427{
428 struct rb_node **p, *parent = NULL;
429 struct ubi_wl_prot_entry *pe1;
430
431 pe->e = e;
432 pe->abs_ec = ubi->abs_ec + abs_ec;
433
434 p = &ubi->prot.pnum.rb_node;
435 while (*p) {
436 parent = *p;
437 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
438
439 if (e->pnum < pe1->e->pnum)
440 p = &(*p)->rb_left;
441 else
442 p = &(*p)->rb_right;
443 }
444 rb_link_node(&pe->rb_pnum, parent, p);
445 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
446
447 p = &ubi->prot.aec.rb_node;
448 parent = NULL;
449 while (*p) {
450 parent = *p;
451 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
452
453 if (pe->abs_ec < pe1->abs_ec)
454 p = &(*p)->rb_left;
455 else
456 p = &(*p)->rb_right;
457 }
458 rb_link_node(&pe->rb_aec, parent, p);
459 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
460}
461
462/**
463 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
464 * @root: the RB-tree where to look for
465 * @max: highest possible erase counter
466 *
467 * This function looks for a wear leveling entry with erase counter closest to
468 * @max and less then @max.
469 */
470static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
471{
472 struct rb_node *p;
473 struct ubi_wl_entry *e;
474
475 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
476 max += e->ec;
477
478 p = root->rb_node;
479 while (p) {
480 struct ubi_wl_entry *e1;
481
482 e1 = rb_entry(p, struct ubi_wl_entry, rb);
483 if (e1->ec >= max)
484 p = p->rb_left;
485 else {
486 p = p->rb_right;
487 e = e1;
488 }
489 }
490
491 return e;
492}
493
494/**
495 * ubi_wl_get_peb - get a physical eraseblock.
496 * @ubi: UBI device description object
497 * @dtype: type of data which will be stored in this physical eraseblock
498 *
499 * This function returns a physical eraseblock in case of success and a
500 * negative error code in case of failure. Might sleep.
501 */
502int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
503{
504 int err, protect, medium_ec;
505 struct ubi_wl_entry *e, *first, *last;
506 struct ubi_wl_prot_entry *pe;
507
508 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
509 dtype == UBI_UNKNOWN);
510
511 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL);
512 if (!pe)
513 return -ENOMEM;
514
515retry:
516 spin_lock(&ubi->wl_lock);
517 if (tree_empty(&ubi->free)) {
518 if (ubi->works_count == 0) {
519 ubi_assert(list_empty(&ubi->works));
520 ubi_err("no free eraseblocks");
521 spin_unlock(&ubi->wl_lock);
522 kfree(pe);
523 return -ENOSPC;
524 }
525 spin_unlock(&ubi->wl_lock);
526
527 err = produce_free_peb(ubi);
528 if (err < 0) {
529 kfree(pe);
530 return err;
531 }
532 goto retry;
533 }
534
535 switch (dtype) {
536 case UBI_LONGTERM:
537 /*
538 * For long term data we pick a physical eraseblock
539 * with high erase counter. But the highest erase
540 * counter we can pick is bounded by the the lowest
541 * erase counter plus %WL_FREE_MAX_DIFF.
542 */
543 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
544 protect = LT_PROTECTION;
545 break;
546 case UBI_UNKNOWN:
547 /*
548 * For unknown data we pick a physical eraseblock with
549 * medium erase counter. But we by no means can pick a
550 * physical eraseblock with erase counter greater or
551 * equivalent than the lowest erase counter plus
552 * %WL_FREE_MAX_DIFF.
553 */
554 first = rb_entry(rb_first(&ubi->free),
555 struct ubi_wl_entry, rb);
556 last = rb_entry(rb_last(&ubi->free),
557 struct ubi_wl_entry, rb);
558
559 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
560 e = rb_entry(ubi->free.rb_node,
561 struct ubi_wl_entry, rb);
562 else {
563 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
564 e = find_wl_entry(&ubi->free, medium_ec);
565 }
566 protect = U_PROTECTION;
567 break;
568 case UBI_SHORTTERM:
569 /*
570 * For short term data we pick a physical eraseblock
571 * with the lowest erase counter as we expect it will
572 * be erased soon.
573 */
574 e = rb_entry(rb_first(&ubi->free),
575 struct ubi_wl_entry, rb);
576 protect = ST_PROTECTION;
577 break;
578 default:
579 protect = 0;
580 e = NULL;
581 BUG();
582 }
583
584 /*
585 * Move the physical eraseblock to the protection trees where it will
586 * be protected from being moved for some time.
587 */
588 free_tree_del(ubi, e);
589 prot_tree_add(ubi, e, pe, protect);
590
591 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
592 spin_unlock(&ubi->wl_lock);
593
594 return e->pnum;
595}
596
597/**
598 * prot_tree_del - remove a physical eraseblock from the protection trees
599 * @ubi: UBI device description object
600 * @pnum: the physical eraseblock to remove
601 */
602static void prot_tree_del(struct ubi_device *ubi, int pnum)
603{
604 struct rb_node *p;
605 struct ubi_wl_prot_entry *pe = NULL;
606
607 p = ubi->prot.pnum.rb_node;
608 while (p) {
609
610 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
611
612 if (pnum == pe->e->pnum)
613 break;
614
615 if (pnum < pe->e->pnum)
616 p = p->rb_left;
617 else
618 p = p->rb_right;
619 }
620
621 ubi_assert(pe->e->pnum == pnum);
622 rb_erase(&pe->rb_aec, &ubi->prot.aec);
623 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
624 kfree(pe);
625}
626
627/**
628 * sync_erase - synchronously erase a physical eraseblock.
629 * @ubi: UBI device description object
630 * @e: the the physical eraseblock to erase
631 * @torture: if the physical eraseblock has to be tortured
632 *
633 * This function returns zero in case of success and a negative error code in
634 * case of failure.
635 */
636static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
637{
638 int err;
639 struct ubi_ec_hdr *ec_hdr;
640 unsigned long long ec = e->ec;
641
642 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
643
644 err = paranoid_check_ec(ubi, e->pnum, e->ec);
645 if (err > 0)
646 return -EINVAL;
647
648 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
649 if (!ec_hdr)
650 return -ENOMEM;
651
652 err = ubi_io_sync_erase(ubi, e->pnum, torture);
653 if (err < 0)
654 goto out_free;
655
656 ec += err;
657 if (ec > UBI_MAX_ERASECOUNTER) {
658 /*
659 * Erase counter overflow. Upgrade UBI and use 64-bit
660 * erase counters internally.
661 */
662 ubi_err("erase counter overflow at PEB %d, EC %llu",
663 e->pnum, ec);
664 err = -EINVAL;
665 goto out_free;
666 }
667
668 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
669
670 ec_hdr->ec = cpu_to_ubi64(ec);
671
672 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
673 if (err)
674 goto out_free;
675
676 e->ec = ec;
677 spin_lock(&ubi->wl_lock);
678 if (e->ec > ubi->max_ec)
679 ubi->max_ec = e->ec;
680 spin_unlock(&ubi->wl_lock);
681
682out_free:
683 kfree(ec_hdr);
684 return err;
685}
686
687/**
688 * check_protection_over - check if it is time to stop protecting some
689 * physical eraseblocks.
690 * @ubi: UBI device description object
691 *
692 * This function is called after each erase operation, when the absolute erase
693 * counter is incremented, to check if some physical eraseblock have not to be
694 * protected any longer. These physical eraseblocks are moved from the
695 * protection trees to the used tree.
696 */
697static void check_protection_over(struct ubi_device *ubi)
698{
699 struct ubi_wl_prot_entry *pe;
700
701 /*
702 * There may be several protected physical eraseblock to remove,
703 * process them all.
704 */
705 while (1) {
706 spin_lock(&ubi->wl_lock);
707 if (tree_empty(&ubi->prot.aec)) {
708 spin_unlock(&ubi->wl_lock);
709 break;
710 }
711
712 pe = rb_entry(rb_first(&ubi->prot.aec),
713 struct ubi_wl_prot_entry, rb_aec);
714
715 if (pe->abs_ec > ubi->abs_ec) {
716 spin_unlock(&ubi->wl_lock);
717 break;
718 }
719
720 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
721 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
722 rb_erase(&pe->rb_aec, &ubi->prot.aec);
723 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
724 used_tree_add(ubi, pe->e);
725 spin_unlock(&ubi->wl_lock);
726
727 kfree(pe);
728 cond_resched();
729 }
730}
731
732/**
733 * schedule_ubi_work - schedule a work.
734 * @ubi: UBI device description object
735 * @wrk: the work to schedule
736 *
737 * This function enqueues a work defined by @wrk to the tail of the pending
738 * works list.
739 */
740static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
741{
742 spin_lock(&ubi->wl_lock);
743 list_add_tail(&wrk->list, &ubi->works);
744 ubi_assert(ubi->works_count >= 0);
745 ubi->works_count += 1;
746 if (ubi->thread_enabled)
747 wake_up_process(ubi->bgt_thread);
748 spin_unlock(&ubi->wl_lock);
749}
750
751static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
752 int cancel);
753
754/**
755 * schedule_erase - schedule an erase work.
756 * @ubi: UBI device description object
757 * @e: the WL entry of the physical eraseblock to erase
758 * @torture: if the physical eraseblock has to be tortured
759 *
760 * This function returns zero in case of success and a %-ENOMEM in case of
761 * failure.
762 */
763static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
764 int torture)
765{
766 struct ubi_work *wl_wrk;
767
768 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
769 e->pnum, e->ec, torture);
770
771 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
772 if (!wl_wrk)
773 return -ENOMEM;
774
775 wl_wrk->func = &erase_worker;
776 wl_wrk->e = e;
777 wl_wrk->torture = torture;
778
779 schedule_ubi_work(ubi, wl_wrk);
780 return 0;
781}
782
783/**
784 * wear_leveling_worker - wear-leveling worker function.
785 * @ubi: UBI device description object
786 * @wrk: the work object
787 * @cancel: non-zero if the worker has to free memory and exit
788 *
789 * This function copies a more worn out physical eraseblock to a less worn out
790 * one. Returns zero in case of success and a negative error code in case of
791 * failure.
792 */
793static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
794 int cancel)
795{
796 int err, put = 0;
797 struct ubi_wl_entry *e1, *e2;
798 struct ubi_vid_hdr *vid_hdr;
799
800 kfree(wrk);
801
802 if (cancel)
803 return 0;
804
805 vid_hdr = ubi_zalloc_vid_hdr(ubi);
806 if (!vid_hdr)
807 return -ENOMEM;
808
809 spin_lock(&ubi->wl_lock);
810
811 /*
812 * Only one WL worker at a time is supported at this implementation, so
813 * make sure a PEB is not being moved already.
814 */
815 if (ubi->move_to || tree_empty(&ubi->free) ||
816 (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) {
817 /*
818 * Only one WL worker at a time is supported at this
819 * implementation, so if a LEB is already being moved, cancel.
820 *
821 * No free physical eraseblocks? Well, we cancel wear-leveling
822 * then. It will be triggered again when a free physical
823 * eraseblock appears.
824 *
825 * No used physical eraseblocks? They must be temporarily
826 * protected from being moved. They will be moved to the
827 * @ubi->used tree later and the wear-leveling will be
828 * triggered again.
829 */
830 dbg_wl("cancel WL, a list is empty: free %d, used %d",
831 tree_empty(&ubi->free), tree_empty(&ubi->used));
832 ubi->wl_scheduled = 0;
833 spin_unlock(&ubi->wl_lock);
834 ubi_free_vid_hdr(ubi, vid_hdr);
835 return 0;
836 }
837
838 if (tree_empty(&ubi->scrub)) {
839 /*
840 * Now pick the least worn-out used physical eraseblock and a
841 * highly worn-out free physical eraseblock. If the erase
842 * counters differ much enough, start wear-leveling.
843 */
844 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
845 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
846
847 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
848 dbg_wl("no WL needed: min used EC %d, max free EC %d",
849 e1->ec, e2->ec);
850 ubi->wl_scheduled = 0;
851 spin_unlock(&ubi->wl_lock);
852 ubi_free_vid_hdr(ubi, vid_hdr);
853 return 0;
854 }
855 used_tree_del(ubi, e1);
856 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
857 e1->pnum, e1->ec, e2->pnum, e2->ec);
858 } else {
859 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
860 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
861 scrub_tree_del(ubi, e1);
862 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
863 }
864
865 free_tree_del(ubi, e2);
866 ubi_assert(!ubi->move_from && !ubi->move_to);
867 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
868 ubi->move_from = e1;
869 ubi->move_to = e2;
870 spin_unlock(&ubi->wl_lock);
871
872 /*
873 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
874 * We so far do not know which logical eraseblock our physical
875 * eraseblock (@e1) belongs to. We have to read the volume identifier
876 * header first.
877 */
878
879 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
880 if (err && err != UBI_IO_BITFLIPS) {
881 if (err == UBI_IO_PEB_FREE) {
882 /*
883 * We are trying to move PEB without a VID header. UBI
884 * always write VID headers shortly after the PEB was
885 * given, so we have a situation when it did not have
886 * chance to write it down because it was preempted.
887 * Just re-schedule the work, so that next time it will
888 * likely have the VID header in place.
889 */
890 dbg_wl("PEB %d has no VID header", e1->pnum);
891 err = 0;
892 } else {
893 ubi_err("error %d while reading VID header from PEB %d",
894 err, e1->pnum);
895 if (err > 0)
896 err = -EIO;
897 }
898 goto error;
899 }
900
901 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
902 if (err) {
903 if (err == UBI_IO_BITFLIPS)
904 err = 0;
905 goto error;
906 }
907
908 ubi_free_vid_hdr(ubi, vid_hdr);
909 spin_lock(&ubi->wl_lock);
910 if (!ubi->move_to_put)
911 used_tree_add(ubi, e2);
912 else
913 put = 1;
914 ubi->move_from = ubi->move_to = NULL;
915 ubi->move_from_put = ubi->move_to_put = 0;
916 ubi->wl_scheduled = 0;
917 spin_unlock(&ubi->wl_lock);
918
919 if (put) {
920 /*
921 * Well, the target PEB was put meanwhile, schedule it for
922 * erasure.
923 */
924 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
925 err = schedule_erase(ubi, e2, 0);
926 if (err) {
927 kmem_cache_free(wl_entries_slab, e2);
928 ubi_ro_mode(ubi);
929 }
930 }
931
932 err = schedule_erase(ubi, e1, 0);
933 if (err) {
934 kmem_cache_free(wl_entries_slab, e1);
935 ubi_ro_mode(ubi);
936 }
937
938 dbg_wl("done");
939 return err;
940
941 /*
942 * Some error occurred. @e1 was not changed, so return it back. @e2
943 * might be changed, schedule it for erasure.
944 */
945error:
946 if (err)
947 dbg_wl("error %d occurred, cancel operation", err);
948 ubi_assert(err <= 0);
949
950 ubi_free_vid_hdr(ubi, vid_hdr);
951 spin_lock(&ubi->wl_lock);
952 ubi->wl_scheduled = 0;
953 if (ubi->move_from_put)
954 put = 1;
955 else
956 used_tree_add(ubi, e1);
957 ubi->move_from = ubi->move_to = NULL;
958 ubi->move_from_put = ubi->move_to_put = 0;
959 spin_unlock(&ubi->wl_lock);
960
961 if (put) {
962 /*
963 * Well, the target PEB was put meanwhile, schedule it for
964 * erasure.
965 */
966 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
967 err = schedule_erase(ubi, e1, 0);
968 if (err) {
969 kmem_cache_free(wl_entries_slab, e1);
970 ubi_ro_mode(ubi);
971 }
972 }
973
974 err = schedule_erase(ubi, e2, 0);
975 if (err) {
976 kmem_cache_free(wl_entries_slab, e2);
977 ubi_ro_mode(ubi);
978 }
979
980 yield();
981 return err;
982}
983
984/**
985 * ensure_wear_leveling - schedule wear-leveling if it is needed.
986 * @ubi: UBI device description object
987 *
988 * This function checks if it is time to start wear-leveling and schedules it
989 * if yes. This function returns zero in case of success and a negative error
990 * code in case of failure.
991 */
992static int ensure_wear_leveling(struct ubi_device *ubi)
993{
994 int err = 0;
995 struct ubi_wl_entry *e1;
996 struct ubi_wl_entry *e2;
997 struct ubi_work *wrk;
998
999 spin_lock(&ubi->wl_lock);
1000 if (ubi->wl_scheduled)
1001 /* Wear-leveling is already in the work queue */
1002 goto out_unlock;
1003
1004 /*
1005 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1006 * the WL worker has to be scheduled anyway.
1007 */
1008 if (tree_empty(&ubi->scrub)) {
1009 if (tree_empty(&ubi->used) || tree_empty(&ubi->free))
1010 /* No physical eraseblocks - no deal */
1011 goto out_unlock;
1012
1013 /*
1014 * We schedule wear-leveling only if the difference between the
1015 * lowest erase counter of used physical eraseblocks and a high
1016 * erase counter of free physical eraseblocks is greater then
1017 * %UBI_WL_THRESHOLD.
1018 */
1019 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
1020 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1021
1022 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1023 goto out_unlock;
1024 dbg_wl("schedule wear-leveling");
1025 } else
1026 dbg_wl("schedule scrubbing");
1027
1028 ubi->wl_scheduled = 1;
1029 spin_unlock(&ubi->wl_lock);
1030
1031 wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
1032 if (!wrk) {
1033 err = -ENOMEM;
1034 goto out_cancel;
1035 }
1036
1037 wrk->func = &wear_leveling_worker;
1038 schedule_ubi_work(ubi, wrk);
1039 return err;
1040
1041out_cancel:
1042 spin_lock(&ubi->wl_lock);
1043 ubi->wl_scheduled = 0;
1044out_unlock:
1045 spin_unlock(&ubi->wl_lock);
1046 return err;
1047}
1048
1049/**
1050 * erase_worker - physical eraseblock erase worker function.
1051 * @ubi: UBI device description object
1052 * @wl_wrk: the work object
1053 * @cancel: non-zero if the worker has to free memory and exit
1054 *
1055 * This function erases a physical eraseblock and perform torture testing if
1056 * needed. It also takes care about marking the physical eraseblock bad if
1057 * needed. Returns zero in case of success and a negative error code in case of
1058 * failure.
1059 */
1060static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1061 int cancel)
1062{
1063 int err;
1064 struct ubi_wl_entry *e = wl_wrk->e;
1065 int pnum = e->pnum;
1066
1067 if (cancel) {
1068 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1069 kfree(wl_wrk);
1070 kmem_cache_free(wl_entries_slab, e);
1071 return 0;
1072 }
1073
1074 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1075
1076 err = sync_erase(ubi, e, wl_wrk->torture);
1077 if (!err) {
1078 /* Fine, we've erased it successfully */
1079 kfree(wl_wrk);
1080
1081 spin_lock(&ubi->wl_lock);
1082 ubi->abs_ec += 1;
1083 free_tree_add(ubi, e);
1084 spin_unlock(&ubi->wl_lock);
1085
1086 /*
1087 * One more erase operation has happened, take care about protected
1088 * physical eraseblocks.
1089 */
1090 check_protection_over(ubi);
1091
1092 /* And take care about wear-leveling */
1093 err = ensure_wear_leveling(ubi);
1094 return err;
1095 }
1096
1097 kfree(wl_wrk);
1098 kmem_cache_free(wl_entries_slab, e);
1099
1100 if (err != -EIO) {
1101 /*
1102 * If this is not %-EIO, we have no idea what to do. Scheduling
1103 * this physical eraseblock for erasure again would cause
1104 * errors again and again. Well, lets switch to RO mode.
1105 */
1106 ubi_ro_mode(ubi);
1107 return err;
1108 }
1109
1110 /* It is %-EIO, the PEB went bad */
1111
1112 if (!ubi->bad_allowed) {
1113 ubi_err("bad physical eraseblock %d detected", pnum);
1114 ubi_ro_mode(ubi);
1115 err = -EIO;
1116 } else {
1117 int need;
1118
1119 spin_lock(&ubi->volumes_lock);
1120 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1121 if (need > 0) {
1122 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1123 ubi->avail_pebs -= need;
1124 ubi->rsvd_pebs += need;
1125 ubi->beb_rsvd_pebs += need;
1126 if (need > 0)
1127 ubi_msg("reserve more %d PEBs", need);
1128 }
1129
1130 if (ubi->beb_rsvd_pebs == 0) {
1131 spin_unlock(&ubi->volumes_lock);
1132 ubi_err("no reserved physical eraseblocks");
1133 ubi_ro_mode(ubi);
1134 return -EIO;
1135 }
1136
1137 spin_unlock(&ubi->volumes_lock);
1138 ubi_msg("mark PEB %d as bad", pnum);
1139
1140 err = ubi_io_mark_bad(ubi, pnum);
1141 if (err) {
1142 ubi_ro_mode(ubi);
1143 return err;
1144 }
1145
1146 spin_lock(&ubi->volumes_lock);
1147 ubi->beb_rsvd_pebs -= 1;
1148 ubi->bad_peb_count += 1;
1149 ubi->good_peb_count -= 1;
1150 ubi_calculate_reserved(ubi);
1151 if (ubi->beb_rsvd_pebs == 0)
1152 ubi_warn("last PEB from the reserved pool was used");
1153 spin_unlock(&ubi->volumes_lock);
1154 }
1155
1156 return err;
1157}
1158
1159/**
1160 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
1161 * unit.
1162 * @ubi: UBI device description object
1163 * @pnum: physical eraseblock to return
1164 * @torture: if this physical eraseblock has to be tortured
1165 *
1166 * This function is called to return physical eraseblock @pnum to the pool of
1167 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1168 * occurred to this @pnum and it has to be tested. This function returns zero
1169 * in case of success and a negative error code in case of failure.
1170 */
1171int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1172{
1173 int err;
1174 struct ubi_wl_entry *e;
1175
1176 dbg_wl("PEB %d", pnum);
1177 ubi_assert(pnum >= 0);
1178 ubi_assert(pnum < ubi->peb_count);
1179
1180 spin_lock(&ubi->wl_lock);
1181
1182 e = ubi->lookuptbl[pnum];
1183 if (e == ubi->move_from) {
1184 /*
1185 * User is putting the physical eraseblock which was selected to
1186 * be moved. It will be scheduled for erasure in the
1187 * wear-leveling worker.
1188 */
1189 dbg_wl("PEB %d is being moved", pnum);
1190 ubi_assert(!ubi->move_from_put);
1191 ubi->move_from_put = 1;
1192 spin_unlock(&ubi->wl_lock);
1193 return 0;
1194 } else if (e == ubi->move_to) {
1195 /*
1196 * User is putting the physical eraseblock which was selected
1197 * as the target the data is moved to. It may happen if the EBA
1198 * unit already re-mapped the LEB but the WL unit did has not
1199 * put the PEB to the "used" tree.
1200 */
1201 dbg_wl("PEB %d is the target of data moving", pnum);
1202 ubi_assert(!ubi->move_to_put);
1203 ubi->move_to_put = 1;
1204 spin_unlock(&ubi->wl_lock);
1205 return 0;
1206 } else {
1207 if (in_wl_tree(e, &ubi->used))
1208 used_tree_del(ubi, e);
1209 else if (in_wl_tree(e, &ubi->scrub))
1210 scrub_tree_del(ubi, e);
1211 else
1212 prot_tree_del(ubi, e->pnum);
1213 }
1214 spin_unlock(&ubi->wl_lock);
1215
1216 err = schedule_erase(ubi, e, torture);
1217 if (err) {
1218 spin_lock(&ubi->wl_lock);
1219 used_tree_add(ubi, e);
1220 spin_unlock(&ubi->wl_lock);
1221 }
1222
1223 return err;
1224}
1225
1226/**
1227 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1228 * @ubi: UBI device description object
1229 * @pnum: the physical eraseblock to schedule
1230 *
1231 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1232 * needs scrubbing. This function schedules a physical eraseblock for
1233 * scrubbing which is done in background. This function returns zero in case of
1234 * success and a negative error code in case of failure.
1235 */
1236int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1237{
1238 struct ubi_wl_entry *e;
1239
1240 ubi_msg("schedule PEB %d for scrubbing", pnum);
1241
1242retry:
1243 spin_lock(&ubi->wl_lock);
1244 e = ubi->lookuptbl[pnum];
1245 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1246 spin_unlock(&ubi->wl_lock);
1247 return 0;
1248 }
1249
1250 if (e == ubi->move_to) {
1251 /*
1252 * This physical eraseblock was used to move data to. The data
1253 * was moved but the PEB was not yet inserted to the proper
1254 * tree. We should just wait a little and let the WL worker
1255 * proceed.
1256 */
1257 spin_unlock(&ubi->wl_lock);
1258 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1259 yield();
1260 goto retry;
1261 }
1262
1263 if (in_wl_tree(e, &ubi->used))
1264 used_tree_del(ubi, e);
1265 else
1266 prot_tree_del(ubi, pnum);
1267
1268 scrub_tree_add(ubi, e);
1269 spin_unlock(&ubi->wl_lock);
1270
1271 /*
1272 * Technically scrubbing is the same as wear-leveling, so it is done
1273 * by the WL worker.
1274 */
1275 return ensure_wear_leveling(ubi);
1276}
1277
1278/**
1279 * ubi_wl_flush - flush all pending works.
1280 * @ubi: UBI device description object
1281 *
1282 * This function returns zero in case of success and a negative error code in
1283 * case of failure.
1284 */
1285int ubi_wl_flush(struct ubi_device *ubi)
1286{
1287 int err, pending_count;
1288
1289 pending_count = ubi->works_count;
1290
1291 dbg_wl("flush (%d pending works)", pending_count);
1292
1293 /*
1294 * Erase while the pending works queue is not empty, but not more then
1295 * the number of currently pending works.
1296 */
1297 while (pending_count-- > 0) {
1298 err = do_work(ubi);
1299 if (err)
1300 return err;
1301 }
1302
1303 return 0;
1304}
1305
1306/**
1307 * tree_destroy - destroy an RB-tree.
1308 * @root: the root of the tree to destroy
1309 */
1310static void tree_destroy(struct rb_root *root)
1311{
1312 struct rb_node *rb;
1313 struct ubi_wl_entry *e;
1314
1315 rb = root->rb_node;
1316 while (rb) {
1317 if (rb->rb_left)
1318 rb = rb->rb_left;
1319 else if (rb->rb_right)
1320 rb = rb->rb_right;
1321 else {
1322 e = rb_entry(rb, struct ubi_wl_entry, rb);
1323
1324 rb = rb_parent(rb);
1325 if (rb) {
1326 if (rb->rb_left == &e->rb)
1327 rb->rb_left = NULL;
1328 else
1329 rb->rb_right = NULL;
1330 }
1331
1332 kmem_cache_free(wl_entries_slab, e);
1333 }
1334 }
1335}
1336
1337/**
1338 * ubi_thread - UBI background thread.
1339 * @u: the UBI device description object pointer
1340 */
1341static int ubi_thread(void *u)
1342{
1343 int failures = 0;
1344 struct ubi_device *ubi = u;
1345
1346 ubi_msg("background thread \"%s\" started, PID %d",
1347 ubi->bgt_name, current->pid);
1348
1349 for (;;) {
1350 int err;
1351
1352 if (kthread_should_stop())
1353 goto out;
1354
1355 if (try_to_freeze())
1356 continue;
1357
1358 spin_lock(&ubi->wl_lock);
1359 if (list_empty(&ubi->works) || ubi->ro_mode ||
1360 !ubi->thread_enabled) {
1361 set_current_state(TASK_INTERRUPTIBLE);
1362 spin_unlock(&ubi->wl_lock);
1363 schedule();
1364 continue;
1365 }
1366 spin_unlock(&ubi->wl_lock);
1367
1368 err = do_work(ubi);
1369 if (err) {
1370 ubi_err("%s: work failed with error code %d",
1371 ubi->bgt_name, err);
1372 if (failures++ > WL_MAX_FAILURES) {
1373 /*
1374 * Too many failures, disable the thread and
1375 * switch to read-only mode.
1376 */
1377 ubi_msg("%s: %d consecutive failures",
1378 ubi->bgt_name, WL_MAX_FAILURES);
1379 ubi_ro_mode(ubi);
1380 break;
1381 }
1382 } else
1383 failures = 0;
1384
1385 cond_resched();
1386 }
1387
1388out:
1389 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1390 return 0;
1391}
1392
1393/**
1394 * cancel_pending - cancel all pending works.
1395 * @ubi: UBI device description object
1396 */
1397static void cancel_pending(struct ubi_device *ubi)
1398{
1399 while (!list_empty(&ubi->works)) {
1400 struct ubi_work *wrk;
1401
1402 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1403 list_del(&wrk->list);
1404 wrk->func(ubi, wrk, 1);
1405 ubi->works_count -= 1;
1406 ubi_assert(ubi->works_count >= 0);
1407 }
1408}
1409
1410/**
1411 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1412 * information.
1413 * @ubi: UBI device description object
1414 * @si: scanning information
1415 *
1416 * This function returns zero in case of success, and a negative error code in
1417 * case of failure.
1418 */
1419int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1420{
1421 int err;
1422 struct rb_node *rb1, *rb2;
1423 struct ubi_scan_volume *sv;
1424 struct ubi_scan_leb *seb, *tmp;
1425 struct ubi_wl_entry *e;
1426
1427
1428 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1429 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1430 spin_lock_init(&ubi->wl_lock);
1431 ubi->max_ec = si->max_ec;
1432 INIT_LIST_HEAD(&ubi->works);
1433
1434 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1435
1436 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1437 if (IS_ERR(ubi->bgt_thread)) {
1438 err = PTR_ERR(ubi->bgt_thread);
1439 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1440 err);
1441 return err;
1442 }
1443
1444 if (ubi_devices_cnt == 0) {
1445 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1446 sizeof(struct ubi_wl_entry),
1447 0, 0, NULL, NULL);
1448 if (!wl_entries_slab)
1449 return -ENOMEM;
1450 }
1451
1452 err = -ENOMEM;
1453 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1454 if (!ubi->lookuptbl)
1455 goto out_free;
1456
1457 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1458 cond_resched();
1459
1460 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1461 if (!e)
1462 goto out_free;
1463
1464 e->pnum = seb->pnum;
1465 e->ec = seb->ec;
1466 ubi->lookuptbl[e->pnum] = e;
1467 if (schedule_erase(ubi, e, 0)) {
1468 kmem_cache_free(wl_entries_slab, e);
1469 goto out_free;
1470 }
1471 }
1472
1473 list_for_each_entry(seb, &si->free, u.list) {
1474 cond_resched();
1475
1476 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1477 if (!e)
1478 goto out_free;
1479
1480 e->pnum = seb->pnum;
1481 e->ec = seb->ec;
1482 ubi_assert(e->ec >= 0);
1483 free_tree_add(ubi, e);
1484 ubi->lookuptbl[e->pnum] = e;
1485 }
1486
1487 list_for_each_entry(seb, &si->corr, u.list) {
1488 cond_resched();
1489
1490 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1491 if (!e)
1492 goto out_free;
1493
1494 e->pnum = seb->pnum;
1495 e->ec = seb->ec;
1496 ubi->lookuptbl[e->pnum] = e;
1497 if (schedule_erase(ubi, e, 0)) {
1498 kmem_cache_free(wl_entries_slab, e);
1499 goto out_free;
1500 }
1501 }
1502
1503 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1504 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1505 cond_resched();
1506
1507 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1508 if (!e)
1509 goto out_free;
1510
1511 e->pnum = seb->pnum;
1512 e->ec = seb->ec;
1513 ubi->lookuptbl[e->pnum] = e;
1514 if (!seb->scrub) {
1515 dbg_wl("add PEB %d EC %d to the used tree",
1516 e->pnum, e->ec);
1517 used_tree_add(ubi, e);
1518 } else {
1519 dbg_wl("add PEB %d EC %d to the scrub tree",
1520 e->pnum, e->ec);
1521 scrub_tree_add(ubi, e);
1522 }
1523 }
1524 }
1525
1526 if (WL_RESERVED_PEBS > ubi->avail_pebs) {
1527 ubi_err("no enough physical eraseblocks (%d, need %d)",
1528 ubi->avail_pebs, WL_RESERVED_PEBS);
1529 goto out_free;
1530 }
1531 ubi->avail_pebs -= WL_RESERVED_PEBS;
1532 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1533
1534 /* Schedule wear-leveling if needed */
1535 err = ensure_wear_leveling(ubi);
1536 if (err)
1537 goto out_free;
1538
1539 return 0;
1540
1541out_free:
1542 cancel_pending(ubi);
1543 tree_destroy(&ubi->used);
1544 tree_destroy(&ubi->free);
1545 tree_destroy(&ubi->scrub);
1546 kfree(ubi->lookuptbl);
1547 if (ubi_devices_cnt == 0)
1548 kmem_cache_destroy(wl_entries_slab);
1549 return err;
1550}
1551
1552/**
1553 * protection_trees_destroy - destroy the protection RB-trees.
1554 * @ubi: UBI device description object
1555 */
1556static void protection_trees_destroy(struct ubi_device *ubi)
1557{
1558 struct rb_node *rb;
1559 struct ubi_wl_prot_entry *pe;
1560
1561 rb = ubi->prot.aec.rb_node;
1562 while (rb) {
1563 if (rb->rb_left)
1564 rb = rb->rb_left;
1565 else if (rb->rb_right)
1566 rb = rb->rb_right;
1567 else {
1568 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1569
1570 rb = rb_parent(rb);
1571 if (rb) {
1572 if (rb->rb_left == &pe->rb_aec)
1573 rb->rb_left = NULL;
1574 else
1575 rb->rb_right = NULL;
1576 }
1577
1578 kmem_cache_free(wl_entries_slab, pe->e);
1579 kfree(pe);
1580 }
1581 }
1582}
1583
1584/**
1585 * ubi_wl_close - close the wear-leveling unit.
1586 * @ubi: UBI device description object
1587 */
1588void ubi_wl_close(struct ubi_device *ubi)
1589{
1590 dbg_wl("disable \"%s\"", ubi->bgt_name);
1591 if (ubi->bgt_thread)
1592 kthread_stop(ubi->bgt_thread);
1593
1594 dbg_wl("close the UBI wear-leveling unit");
1595
1596 cancel_pending(ubi);
1597 protection_trees_destroy(ubi);
1598 tree_destroy(&ubi->used);
1599 tree_destroy(&ubi->free);
1600 tree_destroy(&ubi->scrub);
1601 kfree(ubi->lookuptbl);
1602 if (ubi_devices_cnt == 1)
1603 kmem_cache_destroy(wl_entries_slab);
1604}
1605
1606#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1607
1608/**
1609 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1610 * is correct.
1611 * @ubi: UBI device description object
1612 * @pnum: the physical eraseblock number to check
1613 * @ec: the erase counter to check
1614 *
1615 * This function returns zero if the erase counter of physical eraseblock @pnum
1616 * is equivalent to @ec, %1 if not, and a negative error code if an error
1617 * occurred.
1618 */
1619static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
1620{
1621 int err;
1622 long long read_ec;
1623 struct ubi_ec_hdr *ec_hdr;
1624
1625 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1626 if (!ec_hdr)
1627 return -ENOMEM;
1628
1629 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1630 if (err && err != UBI_IO_BITFLIPS) {
1631 /* The header does not have to exist */
1632 err = 0;
1633 goto out_free;
1634 }
1635
1636 read_ec = ubi64_to_cpu(ec_hdr->ec);
1637 if (ec != read_ec) {
1638 ubi_err("paranoid check failed for PEB %d", pnum);
1639 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1640 ubi_dbg_dump_stack();
1641 err = 1;
1642 } else
1643 err = 0;
1644
1645out_free:
1646 kfree(ec_hdr);
1647 return err;
1648}
1649
1650/**
1651 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1652 * in a WL RB-tree.
1653 * @e: the wear-leveling entry to check
1654 * @root: the root of the tree
1655 *
1656 * This function returns zero if @e is in the @root RB-tree and %1 if it
1657 * is not.
1658 */
1659static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1660 struct rb_root *root)
1661{
1662 if (in_wl_tree(e, root))
1663 return 0;
1664
1665 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1666 e->pnum, e->ec, root);
1667 ubi_dbg_dump_stack();
1668 return 1;
1669}
1670
1671#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 06e33786078d..4bee99ba7dbb 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -735,7 +735,6 @@ static void el_receive(struct net_device *dev)
735 else 735 else
736 { 736 {
737 skb_reserve(skb,2); /* Force 16 byte alignment */ 737 skb_reserve(skb,2); /* Force 16 byte alignment */
738 skb->dev = dev;
739 /* 738 /*
740 * The read increments through the bytes. The interrupt 739 * The read increments through the bytes. The interrupt
741 * handler will fix the pointer when it returns to 740 * handler will fix the pointer when it returns to
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 702bfb2a5e99..e985a85a5623 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -615,7 +615,6 @@ static void receive_packet(struct net_device *dev, int len)
615 if (test_and_set_bit(0, (void *) &adapter->dmaing)) 615 if (test_and_set_bit(0, (void *) &adapter->dmaing))
616 printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); 616 printk(KERN_ERR "%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
617 617
618 skb->dev = dev;
619 adapter->current_dma.direction = 0; 618 adapter->current_dma.direction = 0;
620 adapter->current_dma.length = rlen; 619 adapter->current_dma.length = rlen;
621 adapter->current_dma.skb = skb; 620 adapter->current_dma.skb = skb;
@@ -1026,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb)
1026 adapter->current_dma.start_time = jiffies; 1025 adapter->current_dma.start_time = jiffies;
1027 1026
1028 if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { 1027 if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
1029 memcpy(adapter->dma_buffer, skb->data, nlen); 1028 skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
1030 memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); 1029 memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
1031 target = isa_virt_to_bus(adapter->dma_buffer); 1030 target = isa_virt_to_bus(adapter->dma_buffer);
1032 } 1031 }
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 54e1d5aebed3..eed4299dc426 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -873,7 +873,6 @@ static void el16_rx(struct net_device *dev)
873 } 873 }
874 874
875 skb_reserve(skb,2); 875 skb_reserve(skb,2);
876 skb->dev = dev;
877 876
878 /* 'skb->data' points to the start of sk_buff data area. */ 877 /* 'skb->data' points to the start of sk_buff data area. */
879 memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); 878 memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index f791bf026e51..c7511c4d3b68 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1091,7 +1091,6 @@ el3_rx(struct net_device *dev)
1091 printk("Receiving packet size %d status %4.4x.\n", 1091 printk("Receiving packet size %d status %4.4x.\n",
1092 pkt_len, rx_status); 1092 pkt_len, rx_status);
1093 if (skb != NULL) { 1093 if (skb != NULL) {
1094 skb->dev = dev;
1095 skb_reserve(skb, 2); /* Align IP on 16 byte */ 1094 skb_reserve(skb, 2); /* Align IP on 16 byte */
1096 1095
1097 /* 'skb->data' points to the start of sk_buff data area. */ 1096 /* 'skb->data' points to the start of sk_buff data area. */
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index c307ce66145c..290166d5e7d1 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1292,7 +1292,6 @@ static int corkscrew_rx(struct net_device *dev)
1292 printk("Receiving packet size %d status %4.4x.\n", 1292 printk("Receiving packet size %d status %4.4x.\n",
1293 pkt_len, rx_status); 1293 pkt_len, rx_status);
1294 if (skb != NULL) { 1294 if (skb != NULL) {
1295 skb->dev = dev;
1296 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1295 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1297 /* 'skb_put()' points to the start of sk_buff data area. */ 1296 /* 'skb_put()' points to the start of sk_buff data area. */
1298 insl(ioaddr + RX_FIFO, 1297 insl(ioaddr + RX_FIFO,
@@ -1363,7 +1362,6 @@ static int boomerang_rx(struct net_device *dev)
1363 copying to a properly sized skbuff. */ 1362 copying to a properly sized skbuff. */
1364 if (pkt_len < rx_copybreak 1363 if (pkt_len < rx_copybreak
1365 && (skb = dev_alloc_skb(pkt_len + 4)) != 0) { 1364 && (skb = dev_alloc_skb(pkt_len + 4)) != 0) {
1366 skb->dev = dev;
1367 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1365 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1368 /* 'skb_put()' points to the start of sk_buff data area. */ 1366 /* 'skb_put()' points to the start of sk_buff data area. */
1369 memcpy(skb_put(skb, pkt_len), 1367 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 17d61eb0a7e5..da1a22c13865 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -988,7 +988,6 @@ static void elmc_rcv_int(struct net_device *dev)
988 rbd->status = 0; 988 rbd->status = 0;
989 skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); 989 skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
990 if (skb != NULL) { 990 if (skb != NULL) {
991 skb->dev = dev;
992 skb_reserve(skb, 2); /* 16 byte alignment */ 991 skb_reserve(skb, 2); /* 16 byte alignment */
993 skb_put(skb,totlen); 992 skb_put(skb,totlen);
994 eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); 993 eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
@@ -1146,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1146 1145
1147 if (len != skb->len) 1146 if (len != skb->len)
1148 memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); 1147 memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
1149 memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); 1148 skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
1150 1149
1151#if (NUM_XMIT_BUFFS == 1) 1150#if (NUM_XMIT_BUFFS == 1)
1152#ifdef NO_NOPCOMMANDS 1151#ifdef NO_NOPCOMMANDS
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6c7437e60bd2..c7b571be20e0 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1189,7 +1189,6 @@ static void mc32_rx_ring(struct net_device *dev)
1189 } 1189 }
1190 1190
1191 skb->protocol=eth_type_trans(skb,dev); 1191 skb->protocol=eth_type_trans(skb,dev);
1192 skb->dev=dev;
1193 dev->last_rx = jiffies; 1192 dev->last_rx = jiffies;
1194 lp->net_stats.rx_packets++; 1193 lp->net_stats.rx_packets++;
1195 lp->net_stats.rx_bytes += length; 1194 lp->net_stats.rx_bytes += length;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index b406ecfa7268..80924f76dee8 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2414,7 +2414,6 @@ static int vortex_rx(struct net_device *dev)
2414 printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", 2414 printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
2415 pkt_len, rx_status); 2415 pkt_len, rx_status);
2416 if (skb != NULL) { 2416 if (skb != NULL) {
2417 skb->dev = dev;
2418 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2417 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2419 /* 'skb_put()' points to the start of sk_buff data area. */ 2418 /* 'skb_put()' points to the start of sk_buff data area. */
2420 if (vp->bus_master && 2419 if (vp->bus_master &&
@@ -2491,7 +2490,6 @@ boomerang_rx(struct net_device *dev)
2491 /* Check if the packet is long enough to just accept without 2490 /* Check if the packet is long enough to just accept without
2492 copying to a properly sized skbuff. */ 2491 copying to a properly sized skbuff. */
2493 if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { 2492 if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
2494 skb->dev = dev;
2495 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2493 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2496 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2494 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2497 /* 'skb_put()' points to the start of sk_buff data area. */ 2495 /* 'skb_put()' points to the start of sk_buff data area. */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 1b3d11ed6cff..d396f996af57 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -331,7 +331,6 @@ static int lance_rx (struct net_device *dev)
331 return 0; 331 return 0;
332 } 332 }
333 333
334 skb->dev = dev;
335 skb_reserve (skb, 2); /* 16 byte align */ 334 skb_reserve (skb, 2); /* 16 byte align */
336 skb_put (skb, len); /* make room */ 335 skb_put (skb, len); /* make room */
337 eth_copy_and_sum(skb, 336 eth_copy_and_sum(skb,
@@ -568,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
568 567
569 if (skb->len < ETH_ZLEN) 568 if (skb->len < ETH_ZLEN)
570 memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); 569 memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
571 memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); 570 skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen);
572 571
573 /* Now, give the packet to the lance */ 572 /* Now, give the packet to the lance */
574 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); 573 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 12c8453f44bc..e8c9f27817b0 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -573,7 +573,6 @@ rx_status_loop:
573 } 573 }
574 574
575 skb_reserve(new_skb, RX_OFFSET); 575 skb_reserve(new_skb, RX_OFFSET);
576 new_skb->dev = dev;
577 576
578 pci_unmap_single(cp->pdev, mapping, 577 pci_unmap_single(cp->pdev, mapping,
579 buflen, PCI_DMA_FROMDEVICE); 578 buflen, PCI_DMA_FROMDEVICE);
@@ -807,7 +806,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
807 if (mss) 806 if (mss)
808 flags |= LargeSend | ((mss & MSSMask) << MSSShift); 807 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
809 else if (skb->ip_summed == CHECKSUM_PARTIAL) { 808 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 const struct iphdr *ip = skb->nh.iph; 809 const struct iphdr *ip = ip_hdr(skb);
811 if (ip->protocol == IPPROTO_TCP) 810 if (ip->protocol == IPPROTO_TCP)
812 flags |= IPCS | TCPCS; 811 flags |= IPCS | TCPCS;
813 else if (ip->protocol == IPPROTO_UDP) 812 else if (ip->protocol == IPPROTO_UDP)
@@ -826,7 +825,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
826 u32 first_len, first_eor; 825 u32 first_len, first_eor;
827 dma_addr_t first_mapping; 826 dma_addr_t first_mapping;
828 int frag, first_entry = entry; 827 int frag, first_entry = entry;
829 const struct iphdr *ip = skb->nh.iph; 828 const struct iphdr *ip = ip_hdr(skb);
830 829
831 /* We must give this initial chunk to the device last. 830 /* We must give this initial chunk to the device last.
832 * Otherwise we could race with the device. 831 * Otherwise we could race with the device.
@@ -1082,7 +1081,6 @@ static int cp_refill_rx (struct cp_private *cp)
1082 if (!skb) 1081 if (!skb)
1083 goto err_out; 1082 goto err_out;
1084 1083
1085 skb->dev = cp->dev;
1086 skb_reserve(skb, RX_OFFSET); 1084 skb_reserve(skb, RX_OFFSET);
1087 1085
1088 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, 1086 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 99304b2aa86e..a844b1fe2dc4 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1904,10 +1904,10 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
1904 u32 left = RX_BUF_LEN - offset; 1904 u32 left = RX_BUF_LEN - offset;
1905 1905
1906 if (size > left) { 1906 if (size > left) {
1907 memcpy(skb->data, ring + offset, left); 1907 skb_copy_to_linear_data(skb, ring + offset, left);
1908 memcpy(skb->data+left, ring, size - left); 1908 skb_copy_to_linear_data_offset(skb, left, ring, size - left);
1909 } else 1909 } else
1910 memcpy(skb->data, ring + offset, size); 1910 skb_copy_to_linear_data(skb, ring + offset, size);
1911} 1911}
1912#endif 1912#endif
1913 1913
@@ -2013,7 +2013,6 @@ no_early_rx:
2013 2013
2014 skb = dev_alloc_skb (pkt_size + 2); 2014 skb = dev_alloc_skb (pkt_size + 2);
2015 if (likely(skb)) { 2015 if (likely(skb)) {
2016 skb->dev = dev;
2017 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 2016 skb_reserve (skb, 2); /* 16 byte align the IP fields. */
2018#if RX_BUF_IDX == 3 2017#if RX_BUF_IDX == 3
2019 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2018 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 640d7ca2ebcf..3ff1155459a3 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -830,7 +830,6 @@ memory_squeeze:
830 lp->stats.rx_dropped++; 830 lp->stats.rx_dropped++;
831 } 831 }
832 else { 832 else {
833 skb->dev = dev;
834 if (!rx_in_place) { 833 if (!rx_in_place) {
835 /* 16 byte align the data fields */ 834 /* 16 byte align the data fields */
836 skb_reserve(skb, 2); 835 skb_reserve(skb, 2);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 753695b624b3..890c85e2b4ae 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2263,6 +2263,7 @@ config GIANFAR
2263 tristate "Gianfar Ethernet" 2263 tristate "Gianfar Ethernet"
2264 depends on 85xx || 83xx || PPC_86xx 2264 depends on 85xx || 83xx || PPC_86xx
2265 select PHYLIB 2265 select PHYLIB
2266 select CRC32
2266 help 2267 help
2267 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, 2268 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
2268 and MPC86xx family of chips, and the FEC on the 8540. 2269 and MPC86xx family of chips, and the FEC on the 8540.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 33af833667da..58527322a39d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -206,7 +206,7 @@ obj-$(CONFIG_TR) += tokenring/
206obj-$(CONFIG_WAN) += wan/ 206obj-$(CONFIG_WAN) += wan/
207obj-$(CONFIG_ARCNET) += arcnet/ 207obj-$(CONFIG_ARCNET) += arcnet/
208obj-$(CONFIG_NET_PCMCIA) += pcmcia/ 208obj-$(CONFIG_NET_PCMCIA) += pcmcia/
209obj-$(CONFIG_NET_RADIO) += wireless/ 209obj-y += wireless/
210obj-$(CONFIG_NET_TULIP) += tulip/ 210obj-$(CONFIG_NET_TULIP) += tulip/
211obj-$(CONFIG_HAMRADIO) += hamradio/ 211obj-$(CONFIG_HAMRADIO) += hamradio/
212obj-$(CONFIG_IRDA) += irda/ 212obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index d76548e75350..1226cbba0450 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -320,7 +320,6 @@ static int lance_rx (struct net_device *dev)
320 return 0; 320 return 0;
321 } 321 }
322 322
323 skb->dev = dev;
324 skb_reserve (skb, 2); /* 16 byte align */ 323 skb_reserve (skb, 2); /* 16 byte align */
325 skb_put (skb, len); /* make room */ 324 skb_put (skb, len); /* make room */
326 eth_copy_and_sum(skb, 325 eth_copy_and_sum(skb,
@@ -599,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
599 ib->btx_ring [entry].length = (-len) | 0xf000; 598 ib->btx_ring [entry].length = (-len) | 0xf000;
600 ib->btx_ring [entry].misc = 0; 599 ib->btx_ring [entry].misc = 0;
601 600
602 memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); 601 skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
603 602
604 /* Clear the slack of the packet, do I need this? */ 603 /* Clear the slack of the packet, do I need this? */
605 if (len != skblen) 604 if (len != skblen)
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 7138e0e025bc..7122b7ba8d61 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2027,7 +2027,6 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2027 */ 2027 */
2028 csum = retdesc->tcp_udp_csum; 2028 csum = retdesc->tcp_udp_csum;
2029 2029
2030 skb->dev = dev;
2031 skb->protocol = eth_type_trans(skb, dev); 2030 skb->protocol = eth_type_trans(skb, dev);
2032 2031
2033 /* 2032 /*
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 962c954c2d56..675fe918421b 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -798,9 +798,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
798 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], 798 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
799 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 799 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
800 skb_put(skb, pkt_len); 800 skb_put(skb, pkt_len);
801 skb->dev = dev;
802 lp->rx_skbuff[rx_index] = new_skb; 801 lp->rx_skbuff[rx_index] = new_skb;
803 new_skb->dev = dev;
804 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, 802 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
805 new_skb->data, 803 new_skb->data,
806 lp->rx_buff_len-2, 804 lp->rx_buff_len-2,
@@ -926,9 +924,7 @@ static int amd8111e_rx(struct net_device *dev)
926 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], 924 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
927 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 925 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
928 skb_put(skb, pkt_len); 926 skb_put(skb, pkt_len);
929 skb->dev = dev;
930 lp->rx_skbuff[rx_index] = new_skb; 927 lp->rx_skbuff[rx_index] = new_skb;
931 new_skb->dev = dev;
932 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, 928 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
933 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE); 929 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
934 930
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dba5e5165452..da6ffa8cd81e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -853,9 +853,9 @@ static void cops_rx(struct net_device *dev)
853 return; 853 return;
854 } 854 }
855 855
856 skb->mac.raw = skb->data; /* Point to entire packet. */ 856 skb_reset_mac_header(skb); /* Point to entire packet. */
857 skb_pull(skb,3); 857 skb_pull(skb,3);
858 skb->h.raw = skb->data; /* Point to data (Skip header). */ 858 skb_reset_transport_header(skb); /* Point to data (Skip header). */
859 859
860 /* Update the counters. */ 860 /* Update the counters. */
861 lp->stats.rx_packets++; 861 lp->stats.rx_packets++;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 2ea44ce49810..6a6cbd331a16 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -770,13 +770,13 @@ static int sendup_buffer (struct net_device *dev)
770 skb->data[0] = dnode; 770 skb->data[0] = dnode;
771 skb->data[1] = snode; 771 skb->data[1] = snode;
772 skb->data[2] = llaptype; 772 skb->data[2] = llaptype;
773 skb->mac.raw = skb->data; /* save pointer to llap header */ 773 skb_reset_mac_header(skb); /* save pointer to llap header */
774 skb_pull(skb,3); 774 skb_pull(skb,3);
775 775
776 /* copy ddp(s,e)hdr + contents */ 776 /* copy ddp(s,e)hdr + contents */
777 memcpy(skb->data,(void*)ltdmabuf,len); 777 skb_copy_to_linear_data(skb, ltdmabuf, len);
778 778
779 skb->h.raw = skb->data; 779 skb_reset_transport_header(skb);
780 780
781 stats->rx_packets++; 781 stats->rx_packets++;
782 stats->rx_bytes+=skb->len; 782 stats->rx_bytes+=skb->len;
@@ -917,13 +917,14 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
917 917
918 int i; 918 int i;
919 struct lt_sendlap cbuf; 919 struct lt_sendlap cbuf;
920 unsigned char *hdr;
920 921
921 cbuf.command = LT_SENDLAP; 922 cbuf.command = LT_SENDLAP;
922 cbuf.dnode = skb->data[0]; 923 cbuf.dnode = skb->data[0];
923 cbuf.laptype = skb->data[2]; 924 cbuf.laptype = skb->data[2];
924 skb_pull(skb,3); /* skip past LLAP header */ 925 skb_pull(skb,3); /* skip past LLAP header */
925 cbuf.length = skb->len; /* this is host order */ 926 cbuf.length = skb->len; /* this is host order */
926 skb->h.raw=skb->data; 927 skb_reset_transport_header(skb);
927 928
928 if(debug & DEBUG_UPPER) { 929 if(debug & DEBUG_UPPER) {
929 printk("command "); 930 printk("command ");
@@ -932,11 +933,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
932 printk("\n"); 933 printk("\n");
933 } 934 }
934 935
935 do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len); 936 hdr = skb_transport_header(skb);
937 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
936 938
937 if(debug & DEBUG_UPPER) { 939 if(debug & DEBUG_UPPER) {
938 printk("sent %d ddp bytes\n",skb->len); 940 printk("sent %d ddp bytes\n",skb->len);
939 for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]); 941 for (i = 0; i < skb->len; i++)
942 printk("%02x ", hdr[i]);
940 printk("\n"); 943 printk("\n");
941 } 944 }
942 945
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 6318814a11a8..e0a18e7c73cb 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -110,7 +110,7 @@ static void rx(struct net_device *dev, int bufnum,
110 110
111 pkt = (struct archdr *) skb->data; 111 pkt = (struct archdr *) skb->data;
112 112
113 skb->mac.raw = skb->data; 113 skb_reset_mac_header(skb);
114 skb_pull(skb, ARC_HDR_SIZE); 114 skb_pull(skb, ARC_HDR_SIZE);
115 115
116 /* up to sizeof(pkt->soft) has already been copied from the card */ 116 /* up to sizeof(pkt->soft) has already been copied from the card */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 83004fdab0a4..681e20b8466f 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -519,9 +519,12 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
519 * real header when we do rebuild_header. 519 * real header when we do rebuild_header.
520 */ 520 */
521 *(uint16_t *) skb_push(skb, 2) = type; 521 *(uint16_t *) skb_push(skb, 2) = type;
522 if (skb->nh.raw - skb->mac.raw != 2) 522 /*
523 * XXX: Why not use skb->mac_len?
524 */
525 if (skb->network_header - skb->mac_header != 2)
523 BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", 526 BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n",
524 (int)(skb->nh.raw - skb->mac.raw)); 527 (int)(skb->network_header - skb->mac_header));
525 return -2; /* return error -- can't transmit yet! */ 528 return -2; /* return error -- can't transmit yet! */
526 } 529 }
527 else { 530 else {
@@ -554,11 +557,13 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
554 unsigned short type; 557 unsigned short type;
555 uint8_t daddr=0; 558 uint8_t daddr=0;
556 struct ArcProto *proto; 559 struct ArcProto *proto;
557 560 /*
558 if (skb->nh.raw - skb->mac.raw != 2) { 561 * XXX: Why not use skb->mac_len?
562 */
563 if (skb->network_header - skb->mac_header != 2) {
559 BUGMSG(D_NORMAL, 564 BUGMSG(D_NORMAL,
560 "rebuild_header: shouldn't be here! (hdrsize=%d)\n", 565 "rebuild_header: shouldn't be here! (hdrsize=%d)\n",
561 (int)(skb->nh.raw - skb->mac.raw)); 566 (int)(skb->network_header - skb->mac_header));
562 return 0; 567 return 0;
563 } 568 }
564 type = *(uint16_t *) skb_pull(skb, 2); 569 type = *(uint16_t *) skb_pull(skb, 2);
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 66485585ab39..cc4610db6395 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -122,10 +122,8 @@ static void rx(struct net_device *dev, int bufnum,
122 } 122 }
123 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); 123 skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
124 skb->dev = dev; 124 skb->dev = dev;
125 125 skb_reset_mac_header(skb);
126 pkt = (struct archdr *) skb->data; 126 pkt = (struct archdr *)skb_mac_header(skb);
127
128 skb->mac.raw = skb->data;
129 skb_pull(skb, ARC_HDR_SIZE); 127 skb_pull(skb, ARC_HDR_SIZE);
130 128
131 /* up to sizeof(pkt->soft) has already been copied from the card */ 129 /* up to sizeof(pkt->soft) has already been copied from the card */
@@ -270,13 +268,13 @@ static int ack_tx(struct net_device *dev, int acked)
270 skb_put(ackskb, length + ARC_HDR_SIZE ); 268 skb_put(ackskb, length + ARC_HDR_SIZE );
271 ackskb->dev = dev; 269 ackskb->dev = dev;
272 270
273 ackpkt = (struct archdr *) ackskb->data; 271 skb_reset_mac_header(ackskb);
274 272 ackpkt = (struct archdr *)skb_mac_header(ackskb);
275 ackskb->mac.raw = ackskb->data;
276 /* skb_pull(ackskb, ARC_HDR_SIZE); */ 273 /* skb_pull(ackskb, ARC_HDR_SIZE); */
277 274
278 275
279 memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap)); 276 skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
277 ARC_HDR_SIZE + sizeof(struct arc_cap));
280 ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ 278 ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
281 ackpkt->soft.cap.mes.ack=acked; 279 ackpkt->soft.cap.mes.ack=acked;
282 280
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 6d6c69f036ef..2de8877ece29 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -94,7 +94,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
95 95
96 /* Pull off the arcnet header. */ 96 /* Pull off the arcnet header. */
97 skb->mac.raw = skb->data; 97 skb_reset_mac_header(skb);
98 skb_pull(skb, hdr_size); 98 skb_pull(skb, hdr_size);
99 99
100 if (pkt->hard.dest == 0) 100 if (pkt->hard.dest == 0)
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index bee34226abfa..460a095000c2 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -96,7 +96,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev)
96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; 96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
97 97
98 /* Pull off the arcnet header. */ 98 /* Pull off the arcnet header. */
99 skb->mac.raw = skb->data; 99 skb_reset_mac_header(skb);
100 skb_pull(skb, hdr_size); 100 skb_pull(skb, hdr_size);
101 101
102 if (pkt->hard.dest == 0) 102 if (pkt->hard.dest == 0)
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 9dfc09b181c1..a0e68e718531 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -743,7 +743,6 @@ static int ariadne_rx(struct net_device *dev)
743 } 743 }
744 744
745 745
746 skb->dev = dev;
747 skb_reserve(skb,2); /* 16 byte align */ 746 skb_reserve(skb,2); /* 16 byte align */
748 skb_put(skb,pkt_len); /* Make room */ 747 skb_put(skb,pkt_len); /* Make room */
749 eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); 748 eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index ddd12d44ff22..8f0d7ce503c9 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -526,7 +526,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
526 skb = dev_alloc_skb(len + 2); 526 skb = dev_alloc_skb(len + 2);
527 527
528 if (skb) { 528 if (skb) {
529 skb->dev = dev;
530 skb_reserve(skb, 2); 529 skb_reserve(skb, 2);
531 530
532 am_readbuffer(dev, pktaddr, skb_put(skb, len), len); 531 am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 1621b8fe35cf..152fa7a042b8 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -858,7 +858,6 @@ static void at91ether_rx(struct net_device *dev)
858 skb_reserve(skb, 2); 858 skb_reserve(skb, 2);
859 memcpy(skb_put(skb, pktlen), p_recv, pktlen); 859 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
860 860
861 skb->dev = dev;
862 skb->protocol = eth_type_trans(skb, dev); 861 skb->protocol = eth_type_trans(skb, dev);
863 dev->last_rx = jiffies; 862 dev->last_rx = jiffies;
864 lp->stats.rx_bytes += pktlen; 863 lp->stats.rx_bytes += pktlen;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index dd698b033a62..2438c5bff237 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -255,7 +255,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
255 255
256 skb = dev_alloc_skb(length + 2); 256 skb = dev_alloc_skb(length + 2);
257 if (likely(skb != NULL)) { 257 if (likely(skb != NULL)) {
258 skb->dev = dev;
259 skb_reserve(skb, 2); 258 skb_reserve(skb, 2);
260 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, 259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
261 length, DMA_FROM_DEVICE); 260 length, DMA_FROM_DEVICE);
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index a2921882eba8..f075cebe84ad 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -875,7 +875,6 @@ ether1_recv_done (struct net_device *dev)
875 skb = dev_alloc_skb (length + 2); 875 skb = dev_alloc_skb (length + 2);
876 876
877 if (skb) { 877 if (skb) {
878 skb->dev = dev;
879 skb_reserve (skb, 2); 878 skb_reserve (skb, 2);
880 879
881 ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); 880 ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 841178343a07..32da2eb9bcee 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -661,7 +661,6 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
661 if (skb) { 661 if (skb) {
662 unsigned char *buf; 662 unsigned char *buf;
663 663
664 skb->dev = dev;
665 skb_reserve(skb, 2); 664 skb_reserve(skb, 2);
666 buf = skb_put(skb, length); 665 buf = skb_put(skb, length);
667 ether3_readbuffer(dev, buf + 12, length - 12); 666 ether3_readbuffer(dev, buf + 12, length - 12);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 56ae8babd919..bed8e0ebaf19 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -768,7 +768,6 @@ net_rx(struct net_device *dev)
768 lp->stats.rx_dropped++; 768 lp->stats.rx_dropped++;
769 break; 769 break;
770 } 770 }
771 skb->dev = dev;
772 skb_reserve(skb,2); 771 skb_reserve(skb,2);
773 772
774 insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); 773 insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c
index 4e3bf6a1f22c..3d87bd2b4194 100644
--- a/drivers/net/atari_bionet.c
+++ b/drivers/net/atari_bionet.c
@@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
453 stdma_lock(bionet_intr, NULL); 453 stdma_lock(bionet_intr, NULL);
454 local_irq_restore(flags); 454 local_irq_restore(flags);
455 if( !STRAM_ADDR(buf+length-1) ) { 455 if( !STRAM_ADDR(buf+length-1) ) {
456 memcpy(nic_packet->buffer, skb->data, length); 456 skb_copy_from_linear_data(skb, nic_packet->buffer,
457 length);
457 buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; 458 buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
458 } 459 }
459 460
@@ -544,13 +545,13 @@ bionet_poll_rx(struct net_device *dev) {
544 break; 545 break;
545 } 546 }
546 547
547 skb->dev = dev;
548 skb_reserve( skb, 2 ); /* 16 Byte align */ 548 skb_reserve( skb, 2 ); /* 16 Byte align */
549 skb_put( skb, pkt_len ); /* make room */ 549 skb_put( skb, pkt_len ); /* make room */
550 550
551 /* 'skb->data' points to the start of sk_buff data area. 551 /* 'skb->data' points to the start of sk_buff data area.
552 */ 552 */
553 memcpy(skb->data, nic_packet->buffer, pkt_len); 553 skb_copy_to_linear_data(skb, nic_packet->buffer,
554 pkt_len);
554 skb->protocol = eth_type_trans( skb, dev ); 555 skb->protocol = eth_type_trans( skb, dev );
555 netif_rx(skb); 556 netif_rx(skb);
556 dev->last_rx = jiffies; 557 dev->last_rx = jiffies;
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c
index 3b5436149286..54714409a09b 100644
--- a/drivers/net/atari_pamsnet.c
+++ b/drivers/net/atari_pamsnet.c
@@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) {
717 717
718 local_irq_restore(flags); 718 local_irq_restore(flags);
719 if( !STRAM_ADDR(buf+length-1) ) { 719 if( !STRAM_ADDR(buf+length-1) ) {
720 memcpy(nic_packet->buffer, skb->data, length); 720 skb_copy_from_linear_data(skb, nic_packet->buffer,
721 length);
721 buf = (unsigned long)phys_nic_packet; 722 buf = (unsigned long)phys_nic_packet;
722 } 723 }
723 724
@@ -792,7 +793,8 @@ pamsnet_poll_rx(struct net_device *dev) {
792 793
793 /* 'skb->data' points to the start of sk_buff data area. 794 /* 'skb->data' points to the start of sk_buff data area.
794 */ 795 */
795 memcpy(skb->data, nic_packet->buffer, pkt_len); 796 skb_copy_to_linear_data(skb, nic_packet->buffer,
797 pkt_len);
796 netif_rx(skb); 798 netif_rx(skb);
797 dev->last_rx = jiffies; 799 dev->last_rx = jiffies;
798 lp->stats.rx_packets++; 800 lp->stats.rx_packets++;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 7e37ac86a69a..dfa8b9ba4c80 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1047,7 +1047,6 @@ static int lance_rx( struct net_device *dev )
1047 pkt_len ); 1047 pkt_len );
1048 } 1048 }
1049 1049
1050 skb->dev = dev;
1051 skb_reserve( skb, 2 ); /* 16 byte align */ 1050 skb_reserve( skb, 2 ); /* 16 byte align */
1052 skb_put( skb, pkt_len ); /* Make room */ 1051 skb_put( skb, pkt_len ); /* Make room */
1053 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); 1052 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 8606eac5bec8..4b1d4d153ecf 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -408,7 +408,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
408static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) 408static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
409{ 409{
410 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 410 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
411 struct net_device *netdev = adapter->netdev;
412 struct pci_dev *pdev = adapter->pdev; 411 struct pci_dev *pdev = adapter->pdev;
413 struct page *page; 412 struct page *page;
414 unsigned long offset; 413 unsigned long offset;
@@ -444,7 +443,6 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
444 * the 14 byte MAC header is removed 443 * the 14 byte MAC header is removed
445 */ 444 */
446 skb_reserve(skb, NET_IP_ALIGN); 445 skb_reserve(skb, NET_IP_ALIGN);
447 skb->dev = netdev;
448 446
449 buffer_info->alloced = 1; 447 buffer_info->alloced = 1;
450 buffer_info->skb = skb; 448 buffer_info->skb = skb;
@@ -1296,19 +1294,21 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1296 } 1294 }
1297 1295
1298 if (skb->protocol == ntohs(ETH_P_IP)) { 1296 if (skb->protocol == ntohs(ETH_P_IP)) {
1299 skb->nh.iph->tot_len = 0; 1297 struct iphdr *iph = ip_hdr(skb);
1300 skb->nh.iph->check = 0; 1298
1301 skb->h.th->check = 1299 iph->tot_len = 0;
1302 ~csum_tcpudp_magic(skb->nh.iph->saddr, 1300 iph->check = 0;
1303 skb->nh.iph->daddr, 0, 1301 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1304 IPPROTO_TCP, 0); 1302 iph->daddr, 0,
1305 ipofst = skb->nh.raw - skb->data; 1303 IPPROTO_TCP,
1304 0);
1305 ipofst = skb_network_offset(skb);
1306 if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ 1306 if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
1307 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; 1307 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1308 1308
1309 tso->tsopl |= (skb->nh.iph->ihl & 1309 tso->tsopl |= (iph->ihl &
1310 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; 1310 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
1311 tso->tsopl |= ((skb->h.th->doff << 2) & 1311 tso->tsopl |= (tcp_hdrlen(skb) &
1312 TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; 1312 TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
1313 tso->tsopl |= (skb_shinfo(skb)->gso_size & 1313 tso->tsopl |= (skb_shinfo(skb)->gso_size &
1314 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; 1314 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
@@ -1327,8 +1327,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1327 u8 css, cso; 1327 u8 css, cso;
1328 1328
1329 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1329 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1330 cso = skb->h.raw - skb->data; 1330 cso = skb_transport_offset(skb);
1331 css = (skb->h.raw + skb->csum_offset) - skb->data; 1331 css = cso + skb->csum_offset;
1332 if (unlikely(cso & 0x1)) { 1332 if (unlikely(cso & 0x1)) {
1333 printk(KERN_DEBUG "%s: payload offset != even number\n", 1333 printk(KERN_DEBUG "%s: payload offset != even number\n",
1334 atl1_driver_name); 1334 atl1_driver_name);
@@ -1370,8 +1370,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
1370 1370
1371 if (tcp_seg) { 1371 if (tcp_seg) {
1372 /* TSO/GSO */ 1372 /* TSO/GSO */
1373 proto_hdr_len = 1373 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1374 ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1375 buffer_info->length = proto_hdr_len; 1374 buffer_info->length = proto_hdr_len;
1376 page = virt_to_page(skb->data); 1375 page = virt_to_page(skb->data);
1377 offset = (unsigned long)skb->data & ~PAGE_MASK; 1376 offset = (unsigned long)skb->data & ~PAGE_MASK;
@@ -1563,8 +1562,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1563 mss = skb_shinfo(skb)->gso_size; 1562 mss = skb_shinfo(skb)->gso_size;
1564 if (mss) { 1563 if (mss) {
1565 if (skb->protocol == htons(ETH_P_IP)) { 1564 if (skb->protocol == htons(ETH_P_IP)) {
1566 proto_hdr_len = ((skb->h.raw - skb->data) + 1565 proto_hdr_len = (skb_transport_offset(skb) +
1567 (skb->h.th->doff << 2)); 1566 tcp_hdrlen(skb));
1568 if (unlikely(proto_hdr_len > len)) { 1567 if (unlikely(proto_hdr_len > len)) {
1569 dev_kfree_skb_any(skb); 1568 dev_kfree_skb_any(skb);
1570 return NETDEV_TX_OK; 1569 return NETDEV_TX_OK;
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2d306fcb7f36..18aba838c1ff 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -793,7 +793,6 @@ static void net_rx(struct net_device *dev)
793 lp->stats.rx_dropped++; 793 lp->stats.rx_dropped++;
794 goto done; 794 goto done;
795 } 795 }
796 skb->dev = dev;
797 796
798 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 797 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
799 read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); 798 read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 69ae229b680e..d10fb80e9a63 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1125 } 1125 }
1126 1126
1127 pDB = aup->tx_db_inuse[aup->tx_head]; 1127 pDB = aup->tx_db_inuse[aup->tx_head];
1128 memcpy((void *)pDB->vaddr, skb->data, skb->len); 1128 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
1129 if (skb->len < ETH_ZLEN) { 1129 if (skb->len < ETH_ZLEN) {
1130 for (i=skb->len; i<ETH_ZLEN; i++) { 1130 for (i=skb->len; i<ETH_ZLEN; i++) {
1131 ((char *)pDB->vaddr)[i] = 0; 1131 ((char *)pDB->vaddr)[i] = 0;
@@ -1205,7 +1205,6 @@ static int au1000_rx(struct net_device *dev)
1205 aup->stats.rx_dropped++; 1205 aup->stats.rx_dropped++;
1206 continue; 1206 continue;
1207 } 1207 }
1208 skb->dev = dev;
1209 skb_reserve(skb, 2); /* 16 byte IP header align */ 1208 skb_reserve(skb, 2); /* 16 byte IP header align */
1210 eth_copy_and_sum(skb, 1209 eth_copy_and_sum(skb,
1211 (unsigned char *)pDB->vaddr, frmlen, 0); 1210 (unsigned char *)pDB->vaddr, frmlen, 0);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index d742bfe24471..879a2fff474e 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -825,12 +825,11 @@ static int b44_rx(struct b44 *bp, int budget)
825 if (copy_skb == NULL) 825 if (copy_skb == NULL)
826 goto drop_it_no_recycle; 826 goto drop_it_no_recycle;
827 827
828 copy_skb->dev = bp->dev;
829 skb_reserve(copy_skb, 2); 828 skb_reserve(copy_skb, 2);
830 skb_put(copy_skb, len); 829 skb_put(copy_skb, len);
831 /* DMA sync done above, copy just the actual packet */ 830 /* DMA sync done above, copy just the actual packet */
832 memcpy(copy_skb->data, skb->data+bp->rx_offset, len); 831 skb_copy_from_linear_data_offset(skb, bp->rx_offset,
833 832 copy_skb->data, len);
834 skb = copy_skb; 833 skb = copy_skb;
835 } 834 }
836 skb->ip_summed = CHECKSUM_NONE; 835 skb->ip_summed = CHECKSUM_NONE;
@@ -1007,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1007 goto err_out; 1006 goto err_out;
1008 } 1007 }
1009 1008
1010 memcpy(skb_put(bounce_skb, len), skb->data, skb->len); 1009 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
1010 skb->len);
1011 dev_kfree_skb_any(skb); 1011 dev_kfree_skb_any(skb);
1012 skb = bounce_skb; 1012 skb = bounce_skb;
1013 } 1013 }
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index c143304dcff5..4612725965df 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -715,7 +715,6 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
715 if (skb != NULL) { 715 if (skb != NULL) {
716 nb -= ETHERCRC; 716 nb -= ETHERCRC;
717 skb_put(skb, nb); 717 skb_put(skb, nb);
718 skb->dev = dev;
719 skb->protocol = eth_type_trans(skb, dev); 718 skb->protocol = eth_type_trans(skb, dev);
720 netif_rx(skb); 719 netif_rx(skb);
721 dev->last_rx = jiffies; 720 dev->last_rx = jiffies;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0b7aded8dcfd..f98a2205a090 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
54 54
55#define DRV_MODULE_NAME "bnx2" 55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": " 56#define PFX DRV_MODULE_NAME ": "
57#define DRV_MODULE_VERSION "1.5.7" 57#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "March 29, 2007" 58#define DRV_MODULE_RELDATE "April 24, 2007"
59 59
60#define RUN_AT(x) (jiffies + (x)) 60#define RUN_AT(x) (jiffies + (x))
61 61
@@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1884 goto reuse_rx; 1884 goto reuse_rx;
1885 1885
1886 /* aligned copy */ 1886 /* aligned copy */
1887 memcpy(new_skb->data, 1887 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1888 skb->data + bp->rx_offset - 2, 1888 new_skb->data, len + 2);
1889 len + 2);
1890
1891 skb_reserve(new_skb, 2); 1889 skb_reserve(new_skb, 2);
1892 skb_put(new_skb, len); 1890 skb_put(new_skb, len);
1893 1891
@@ -3421,6 +3419,9 @@ bnx2_init_chip(struct bnx2 *bp)
3421 val = REG_RD(bp, BNX2_MQ_CONFIG); 3419 val = REG_RD(bp, BNX2_MQ_CONFIG);
3422 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3420 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3423 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3421 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3422 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3423 val |= BNX2_MQ_CONFIG_HALT_DIS;
3424
3424 REG_WR(bp, BNX2_MQ_CONFIG, val); 3425 REG_WR(bp, BNX2_MQ_CONFIG, val);
3425 3426
3426 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3427 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
@@ -4510,6 +4511,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4510 if ((mss = skb_shinfo(skb)->gso_size) && 4511 if ((mss = skb_shinfo(skb)->gso_size) &&
4511 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 4512 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4512 u32 tcp_opt_len, ip_tcp_len; 4513 u32 tcp_opt_len, ip_tcp_len;
4514 struct iphdr *iph;
4513 4515
4514 if (skb_header_cloned(skb) && 4516 if (skb_header_cloned(skb) &&
4515 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 4517 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -4517,25 +4519,23 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4517 return NETDEV_TX_OK; 4519 return NETDEV_TX_OK;
4518 } 4520 }
4519 4521
4520 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4521 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; 4522 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4522 4523
4523 tcp_opt_len = 0; 4524 tcp_opt_len = 0;
4524 if (skb->h.th->doff > 5) { 4525 if (tcp_hdr(skb)->doff > 5)
4525 tcp_opt_len = (skb->h.th->doff - 5) << 2; 4526 tcp_opt_len = tcp_optlen(skb);
4526 } 4527
4527 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr); 4528 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4528 4529
4529 skb->nh.iph->check = 0; 4530 iph = ip_hdr(skb);
4530 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); 4531 iph->check = 0;
4531 skb->h.th->check = 4532 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4532 ~csum_tcpudp_magic(skb->nh.iph->saddr, 4533 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4533 skb->nh.iph->daddr, 4534 iph->daddr, 0,
4534 0, IPPROTO_TCP, 0); 4535 IPPROTO_TCP, 0);
4535 4536 if (tcp_opt_len || (iph->ihl > 5)) {
4536 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) { 4537 vlan_tag_flags |= ((iph->ihl - 5) +
4537 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) + 4538 (tcp_opt_len >> 2)) << 8;
4538 (tcp_opt_len >> 2)) << 8;
4539 } 4539 }
4540 } 4540 }
4541 else 4541 else
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index ccbdf81c6599..878eee58f12a 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6518,6 +6518,7 @@ struct bnx2 {
6518#define CHIP_ID_5708_B0 0x57081000 6518#define CHIP_ID_5708_B0 0x57081000
6519#define CHIP_ID_5708_B1 0x57081010 6519#define CHIP_ID_5708_B1 0x57081010
6520#define CHIP_ID_5709_A0 0x57090000 6520#define CHIP_ID_5709_A0 0x57090000
6521#define CHIP_ID_5709_A1 0x57090010
6521 6522
6522#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) 6523#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf)
6523 6524
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3fb354d9c515..7e03f41ae2c2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -884,8 +884,8 @@ static int ad_lacpdu_send(struct port *port)
884 } 884 }
885 885
886 skb->dev = slave->dev; 886 skb->dev = slave->dev;
887 skb->mac.raw = skb->data; 887 skb_reset_mac_header(skb);
888 skb->nh.raw = skb->data + ETH_HLEN; 888 skb->network_header = skb->mac_header + ETH_HLEN;
889 skb->protocol = PKT_TYPE_LACPDU; 889 skb->protocol = PKT_TYPE_LACPDU;
890 skb->priority = TC_PRIO_CONTROL; 890 skb->priority = TC_PRIO_CONTROL;
891 891
@@ -928,8 +928,8 @@ static int ad_marker_send(struct port *port, struct marker *marker)
928 skb_reserve(skb, 16); 928 skb_reserve(skb, 16);
929 929
930 skb->dev = slave->dev; 930 skb->dev = slave->dev;
931 skb->mac.raw = skb->data; 931 skb_reset_mac_header(skb);
932 skb->nh.raw = skb->data + ETH_HLEN; 932 skb->network_header = skb->mac_header + ETH_HLEN;
933 skb->protocol = PKT_TYPE_LACPDU; 933 skb->protocol = PKT_TYPE_LACPDU;
934 934
935 marker_header = (struct marker_header *)skb_put(skb, length); 935 marker_header = (struct marker_header *)skb_put(skb, length);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 217a2eedee0a..92c3b6f6a8e7 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -104,10 +104,15 @@ struct arp_pkt {
104}; 104};
105#pragma pack() 105#pragma pack()
106 106
107static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
108{
109 return (struct arp_pkt *)skb_network_header(skb);
110}
111
107/* Forward declaration */ 112/* Forward declaration */
108static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 113static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
109 114
110static inline u8 _simple_hash(u8 *hash_start, int hash_size) 115static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
111{ 116{
112 int i; 117 int i;
113 u8 hash = 0; 118 u8 hash = 0;
@@ -613,7 +618,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip)
613static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) 618static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
614{ 619{
615 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 620 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
616 struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; 621 struct arp_pkt *arp = arp_pkt(skb);
617 struct slave *assigned_slave; 622 struct slave *assigned_slave;
618 struct rlb_client_info *client_info; 623 struct rlb_client_info *client_info;
619 u32 hash_index = 0; 624 u32 hash_index = 0;
@@ -701,7 +706,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
701 */ 706 */
702static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) 707static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
703{ 708{
704 struct arp_pkt *arp = (struct arp_pkt *)skb->nh.raw; 709 struct arp_pkt *arp = arp_pkt(skb);
705 struct slave *tx_slave = NULL; 710 struct slave *tx_slave = NULL;
706 711
707 if (arp->op_code == __constant_htons(ARPOP_REPLY)) { 712 if (arp->op_code == __constant_htons(ARPOP_REPLY)) {
@@ -890,8 +895,8 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
890 data = skb_put(skb, size); 895 data = skb_put(skb, size);
891 memcpy(data, &pkt, size); 896 memcpy(data, &pkt, size);
892 897
893 skb->mac.raw = data; 898 skb_reset_mac_header(skb);
894 skb->nh.raw = data + ETH_HLEN; 899 skb->network_header = skb->mac_header + ETH_HLEN;
895 skb->protocol = pkt.type; 900 skb->protocol = pkt.type;
896 skb->priority = TC_PRIO_CONTROL; 901 skb->priority = TC_PRIO_CONTROL;
897 skb->dev = slave->dev; 902 skb->dev = slave->dev;
@@ -1263,10 +1268,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1263 int hash_size = 0; 1268 int hash_size = 0;
1264 int do_tx_balance = 1; 1269 int do_tx_balance = 1;
1265 u32 hash_index = 0; 1270 u32 hash_index = 0;
1266 u8 *hash_start = NULL; 1271 const u8 *hash_start = NULL;
1267 int res = 1; 1272 int res = 1;
1268 1273
1269 skb->mac.raw = (unsigned char *)skb->data; 1274 skb_reset_mac_header(skb);
1270 eth_data = eth_hdr(skb); 1275 eth_data = eth_hdr(skb);
1271 1276
1272 /* make sure that the curr_active_slave and the slaves list do 1277 /* make sure that the curr_active_slave and the slaves list do
@@ -1280,15 +1285,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1280 } 1285 }
1281 1286
1282 switch (ntohs(skb->protocol)) { 1287 switch (ntohs(skb->protocol)) {
1283 case ETH_P_IP: 1288 case ETH_P_IP: {
1289 const struct iphdr *iph = ip_hdr(skb);
1290
1284 if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || 1291 if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) ||
1285 (skb->nh.iph->daddr == ip_bcast) || 1292 (iph->daddr == ip_bcast) ||
1286 (skb->nh.iph->protocol == IPPROTO_IGMP)) { 1293 (iph->protocol == IPPROTO_IGMP)) {
1287 do_tx_balance = 0; 1294 do_tx_balance = 0;
1288 break; 1295 break;
1289 } 1296 }
1290 hash_start = (char*)&(skb->nh.iph->daddr); 1297 hash_start = (char *)&(iph->daddr);
1291 hash_size = sizeof(skb->nh.iph->daddr); 1298 hash_size = sizeof(iph->daddr);
1299 }
1292 break; 1300 break;
1293 case ETH_P_IPV6: 1301 case ETH_P_IPV6:
1294 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { 1302 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
@@ -1296,8 +1304,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1296 break; 1304 break;
1297 } 1305 }
1298 1306
1299 hash_start = (char*)&(skb->nh.ipv6h->daddr); 1307 hash_start = (char *)&(ipv6_hdr(skb)->daddr);
1300 hash_size = sizeof(skb->nh.ipv6h->daddr); 1308 hash_size = sizeof(ipv6_hdr(skb)->daddr);
1301 break; 1309 break;
1302 case ETH_P_IPX: 1310 case ETH_P_IPX:
1303 if (ipx_hdr(skb)->ipx_checksum != 1311 if (ipx_hdr(skb)->ipx_checksum !=
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e4724d874e7c..cea3783c92c5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2524,7 +2524,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2524 (2 * sizeof(u32))))) 2524 (2 * sizeof(u32)))))
2525 goto out_unlock; 2525 goto out_unlock;
2526 2526
2527 arp = skb->nh.arph; 2527 arp = arp_hdr(skb);
2528 if (arp->ar_hln != dev->addr_len || 2528 if (arp->ar_hln != dev->addr_len ||
2529 skb->pkt_type == PACKET_OTHERHOST || 2529 skb->pkt_type == PACKET_OTHERHOST ||
2530 skb->pkt_type == PACKET_LOOPBACK || 2530 skb->pkt_type == PACKET_LOOPBACK ||
@@ -3476,7 +3476,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3476 struct net_device *bond_dev, int count) 3476 struct net_device *bond_dev, int count)
3477{ 3477{
3478 struct ethhdr *data = (struct ethhdr *)skb->data; 3478 struct ethhdr *data = (struct ethhdr *)skb->data;
3479 struct iphdr *iph = skb->nh.iph; 3479 struct iphdr *iph = ip_hdr(skb);
3480 u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); 3480 u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl);
3481 int layer4_xor = 0; 3481 int layer4_xor = 0;
3482 3482
@@ -3640,9 +3640,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
3640 read_lock_bh(&bond->lock); 3640 read_lock_bh(&bond->lock);
3641 3641
3642 bond_for_each_slave(bond, slave, i) { 3642 bond_for_each_slave(bond, slave, i) {
3643 if (slave->dev->get_stats) { 3643 sstats = slave->dev->get_stats(slave->dev);
3644 sstats = slave->dev->get_stats(slave->dev); 3644 if (sstats) {
3645
3646 stats->rx_packets += sstats->rx_packets; 3645 stats->rx_packets += sstats->rx_packets;
3647 stats->rx_bytes += sstats->rx_bytes; 3646 stats->rx_bytes += sstats->rx_bytes;
3648 stats->rx_errors += sstats->rx_errors; 3647 stats->rx_errors += sstats->rx_errors;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index c8126484c2be..4aec747d9e43 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -1995,7 +1995,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1995 return -1; 1995 return -1;
1996 1996
1997 *skbref = skb; 1997 *skbref = skb;
1998 skb->dev = cp->dev;
1999 skb_reserve(skb, swivel); 1998 skb_reserve(skb, swivel);
2000 1999
2001 p = skb->data; 2000 p = skb->data;
@@ -2822,10 +2821,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2822 2821
2823 ctrl = 0; 2822 ctrl = 0;
2824 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2823 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2825 u64 csum_start_off, csum_stuff_off; 2824 const u64 csum_start_off = skb_transport_offset(skb);
2826 2825 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2827 csum_start_off = (u64) (skb->h.raw - skb->data);
2828 csum_stuff_off = csum_start_off + skb->csum_offset;
2829 2826
2830 ctrl = TX_DESC_CSUM_EN | 2827 ctrl = TX_DESC_CSUM_EN |
2831 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2828 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
@@ -2849,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2849 ctrl | TX_DESC_SOF, 0); 2846 ctrl | TX_DESC_SOF, 0);
2850 entry = TX_DESC_NEXT(ring, entry); 2847 entry = TX_DESC_NEXT(ring, entry);
2851 2848
2852 memcpy(tx_tiny_buf(cp, ring, entry), skb->data + 2849 skb_copy_from_linear_data_offset(skb, len - tabort,
2853 len - tabort, tabort); 2850 tx_tiny_buf(cp, ring, entry), tabort);
2854 mapping = tx_tiny_map(cp, ring, entry, tentry); 2851 mapping = tx_tiny_map(cp, ring, entry, tentry);
2855 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2852 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2856 (nr_frags == 0)); 2853 (nr_frags == 0));
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 326d4a665123..e4f874a70fe5 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1062 pci_unmap_addr(ce, dma_addr), 1062 pci_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len), 1063 pci_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE); 1064 PCI_DMA_FROMDEVICE);
1065 memcpy(skb->data, ce->skb->data, len); 1065 skb_copy_from_linear_data(ce->skb, skb->data, len);
1066 pci_dma_sync_single_for_device(pdev, 1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr), 1067 pci_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len), 1068 pci_unmap_len(ce, dma_len),
@@ -1379,12 +1379,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1379 } 1379 }
1380 __skb_pull(skb, sizeof(*p)); 1380 __skb_pull(skb, sizeof(*p));
1381 1381
1382 skb->dev = adapter->port[p->iff].dev;
1383 skb->dev->last_rx = jiffies; 1382 skb->dev->last_rx = jiffies;
1384 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); 1383 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
1385 st->rx_packets++; 1384 st->rx_packets++;
1386 1385
1387 skb->protocol = eth_type_trans(skb, skb->dev); 1386 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
1388 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1387 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1389 skb->protocol == htons(ETH_P_IP) && 1388 skb->protocol == htons(ETH_P_IP) &&
1390 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1389 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
@@ -1866,14 +1865,14 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1866 1865
1867 ++st->tx_tso; 1866 ++st->tx_tso;
1868 1867
1869 eth_type = skb->nh.raw - skb->data == ETH_HLEN ? 1868 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1870 CPL_ETH_II : CPL_ETH_II_VLAN; 1869 CPL_ETH_II : CPL_ETH_II_VLAN;
1871 1870
1872 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); 1871 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1873 hdr->opcode = CPL_TX_PKT_LSO; 1872 hdr->opcode = CPL_TX_PKT_LSO;
1874 hdr->ip_csum_dis = hdr->l4_csum_dis = 0; 1873 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1875 hdr->ip_hdr_words = skb->nh.iph->ihl; 1874 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1876 hdr->tcp_hdr_words = skb->h.th->doff; 1875 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1877 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1876 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1878 skb_shinfo(skb)->gso_size)); 1877 skb_shinfo(skb)->gso_size));
1879 hdr->len = htonl(skb->len - sizeof(*hdr)); 1878 hdr->len = htonl(skb->len - sizeof(*hdr));
@@ -1913,7 +1912,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1913 1912
1914 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1913 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1915 skb->ip_summed == CHECKSUM_PARTIAL && 1914 skb->ip_summed == CHECKSUM_PARTIAL &&
1916 skb->nh.iph->protocol == IPPROTO_UDP) { 1915 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1917 if (unlikely(skb_checksum_help(skb))) { 1916 if (unlikely(skb_checksum_help(skb))) {
1918 pr_debug("%s: unable to do udp checksum\n", dev->name); 1917 pr_debug("%s: unable to do udp checksum\n", dev->name);
1919 dev_kfree_skb_any(skb); 1918 dev_kfree_skb_any(skb);
@@ -1926,7 +1925,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 */ 1925 */
1927 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { 1926 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1928 if (skb->protocol == htons(ETH_P_ARP) && 1927 if (skb->protocol == htons(ETH_P_ARP) &&
1929 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { 1928 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1930 adapter->sge->espibug_skb[dev->if_port] = skb; 1929 adapter->sge->espibug_skb[dev->if_port] = skb;
1931 /* We want to re-use this skb later. We 1930 /* We want to re-use this skb later. We
1932 * simply bump the reference count and it 1931 * simply bump the reference count and it
@@ -2096,10 +2095,14 @@ static void espibug_workaround_t204(unsigned long data)
2096 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 2095 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2097 }; 2096 };
2098 2097
2099 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2098 skb_copy_to_linear_data_offset(skb,
2100 ch_mac_addr, ETH_ALEN); 2099 sizeof(struct cpl_tx_pkt),
2101 memcpy(skb->data + skb->len - 10, 2100 ch_mac_addr,
2102 ch_mac_addr, ETH_ALEN); 2101 ETH_ALEN);
2102 skb_copy_to_linear_data_offset(skb,
2103 skb->len - 10,
2104 ch_mac_addr,
2105 ETH_ALEN);
2103 skb->cb[0] = 0xff; 2106 skb->cb[0] = 0xff;
2104 } 2107 }
2105 2108
@@ -2126,10 +2129,14 @@ static void espibug_workaround(unsigned long data)
2126 if (!skb->cb[0]) { 2129 if (!skb->cb[0]) {
2127 u8 ch_mac_addr[ETH_ALEN] = 2130 u8 ch_mac_addr[ETH_ALEN] =
2128 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2131 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2129 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2132 skb_copy_to_linear_data_offset(skb,
2130 ch_mac_addr, ETH_ALEN); 2133 sizeof(struct cpl_tx_pkt),
2131 memcpy(skb->data + skb->len - 10, ch_mac_addr, 2134 ch_mac_addr,
2132 ETH_ALEN); 2135 ETH_ALEN);
2136 skb_copy_to_linear_data_offset(skb,
2137 skb->len - 10,
2138 ch_mac_addr,
2139 ETH_ALEN);
2133 skb->cb[0] = 0xff; 2140 skb->cb[0] = 0xff;
2134 } 2141 }
2135 2142
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 8eb571276000..5bdf5ca85a65 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1348,7 +1348,8 @@ e100_rx(struct net_device *dev)
1348 1348
1349#ifdef ETHDEBUG 1349#ifdef ETHDEBUG
1350 printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", 1350 printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
1351 skb->head, skb->data, skb->tail, skb->end); 1351 skb->head, skb->data, skb_tail_pointer(skb),
1352 skb_end_pointer(skb));
1352 printk("copying packet to 0x%x.\n", skb_data_ptr); 1353 printk("copying packet to 0x%x.\n", skb_data_ptr);
1353#endif 1354#endif
1354 1355
@@ -1375,7 +1376,6 @@ e100_rx(struct net_device *dev)
1375 myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); 1376 myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
1376 } 1377 }
1377 1378
1378 skb->dev = dev;
1379 skb->protocol = eth_type_trans(skb, dev); 1379 skb->protocol = eth_type_trans(skb, dev);
1380 1380
1381 /* Send the packet to the upper layers */ 1381 /* Send the packet to the upper layers */
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4612f71a7106..9774bb1b3e80 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1004,7 +1004,6 @@ skip_this_frame:
1004 return; 1004 return;
1005 } 1005 }
1006 skb_reserve(skb, 2); /* longword align L3 header */ 1006 skb_reserve(skb, 2); /* longword align L3 header */
1007 skb->dev = dev;
1008 1007
1009 if (bp + length > lp->end_dma_buff) { 1008 if (bp + length > lp->end_dma_buff) {
1010 int semi_cnt = lp->end_dma_buff - bp; 1009 int semi_cnt = lp->end_dma_buff - bp;
@@ -1702,7 +1701,6 @@ net_rx(struct net_device *dev)
1702 return; 1701 return;
1703 } 1702 }
1704 skb_reserve(skb, 2); /* longword align L3 header */ 1703 skb_reserve(skb, 2); /* longword align L3 header */
1705 skb->dev = dev;
1706 1704
1707 readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1); 1705 readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
1708 if (length & 1) 1706 if (length & 1)
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index e14862b43d17..483a594210a7 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, 67static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
68 unsigned int tid) 68 unsigned int tid)
69{ 69{
70 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL; 70 struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
71 &(t->tid_tab[tid]) : NULL;
72
73 return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
71} 74}
72 75
73/* 76/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 48649244673e..ebcf35e4cf5b 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
508 508
509 spin_lock_bh(&td->tid_release_lock); 509 spin_lock_bh(&td->tid_release_lock);
510 p->ctx = (void *)td->tid_release_list; 510 p->ctx = (void *)td->tid_release_list;
511 p->client = NULL;
511 td->tid_release_list = p; 512 td->tid_release_list = p;
512 if (!p->ctx) 513 if (!p->ctx)
513 schedule_work(&td->tid_release_task); 514 schedule_work(&td->tid_release_task);
@@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
623 struct t3c_tid_entry *t3c_tid; 624 struct t3c_tid_entry *t3c_tid;
624 625
625 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 626 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
626 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers && 627 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
628 t3c_tid->client->handlers &&
627 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 629 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
628 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 630 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
629 t3c_tid-> 631 t3c_tid->
@@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
642 struct t3c_tid_entry *t3c_tid; 644 struct t3c_tid_entry *t3c_tid;
643 645
644 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 646 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
645 if (t3c_tid->ctx && t3c_tid->client->handlers && 647 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
646 t3c_tid->client->handlers[p->opcode]) { 648 t3c_tid->client->handlers[p->opcode]) {
647 return t3c_tid->client->handlers[p->opcode] (dev, skb, 649 return t3c_tid->client->handlers[p->opcode] (dev, skb,
648 t3c_tid->ctx); 650 t3c_tid->ctx);
@@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
660 struct t3c_tid_entry *t3c_tid; 662 struct t3c_tid_entry *t3c_tid;
661 663
662 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 664 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
663 if (t3c_tid->ctx && t3c_tid->client->handlers && 665 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
664 t3c_tid->client->handlers[p->opcode]) { 666 t3c_tid->client->handlers[p->opcode]) {
665 return t3c_tid->client->handlers[p->opcode] 667 return t3c_tid->client->handlers[p->opcode]
666 (dev, skb, t3c_tid->ctx); 668 (dev, skb, t3c_tid->ctx);
@@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
689 } 691 }
690} 692}
691 693
694/*
695 * Returns an sk_buff for a reply CPL message of size len. If the input
696 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
697 * is allocated. The input skb must be of size at least len. Note that this
698 * operation does not destroy the original skb data even if it decides to reuse
699 * the buffer.
700 */
701static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
702 int gfp)
703{
704 if (likely(!skb_cloned(skb))) {
705 BUG_ON(skb->len < len);
706 __skb_trim(skb, len);
707 skb_get(skb);
708 } else {
709 skb = alloc_skb(len, gfp);
710 if (skb)
711 __skb_put(skb, len);
712 }
713 return skb;
714}
715
692static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 716static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
693{ 717{
694 union opcode_tid *p = cplhdr(skb); 718 union opcode_tid *p = cplhdr(skb);
@@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
696 struct t3c_tid_entry *t3c_tid; 720 struct t3c_tid_entry *t3c_tid;
697 721
698 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 722 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
699 if (t3c_tid->ctx && t3c_tid->client->handlers && 723 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
700 t3c_tid->client->handlers[p->opcode]) { 724 t3c_tid->client->handlers[p->opcode]) {
701 return t3c_tid->client->handlers[p->opcode] 725 return t3c_tid->client->handlers[p->opcode]
702 (dev, skb, t3c_tid->ctx); 726 (dev, skb, t3c_tid->ctx);
703 } else { 727 } else {
704 struct cpl_abort_req_rss *req = cplhdr(skb); 728 struct cpl_abort_req_rss *req = cplhdr(skb);
705 struct cpl_abort_rpl *rpl; 729 struct cpl_abort_rpl *rpl;
730 struct sk_buff *reply_skb;
731 unsigned int tid = GET_TID(req);
732 u8 cmd = req->status;
733
734 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
735 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
736 goto out;
706 737
707 struct sk_buff *skb = 738 reply_skb = cxgb3_get_cpl_reply_skb(skb,
708 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC); 739 sizeof(struct
709 if (!skb) { 740 cpl_abort_rpl),
741 GFP_ATOMIC);
742
743 if (!reply_skb) {
710 printk("do_abort_req_rss: couldn't get skb!\n"); 744 printk("do_abort_req_rss: couldn't get skb!\n");
711 goto out; 745 goto out;
712 } 746 }
713 skb->priority = CPL_PRIORITY_DATA; 747 reply_skb->priority = CPL_PRIORITY_DATA;
714 __skb_put(skb, sizeof(struct cpl_abort_rpl)); 748 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
715 rpl = cplhdr(skb); 749 rpl = cplhdr(reply_skb);
716 rpl->wr.wr_hi = 750 rpl->wr.wr_hi =
717 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 751 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
718 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); 752 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
719 OPCODE_TID(rpl) = 753 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
720 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); 754 rpl->cmd = cmd;
721 rpl->cmd = req->status; 755 cxgb3_ofld_send(dev, reply_skb);
722 cxgb3_ofld_send(dev, skb);
723out: 756out:
724 return CPL_RET_BUF_DONE; 757 return CPL_RET_BUF_DONE;
725 } 758 }
@@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
732 struct t3c_tid_entry *t3c_tid; 765 struct t3c_tid_entry *t3c_tid;
733 766
734 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 767 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
735 if (t3c_tid->ctx && t3c_tid->client->handlers && 768 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
736 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 769 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
737 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 770 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
738 (dev, skb, t3c_tid->ctx); 771 (dev, skb, t3c_tid->ctx);
@@ -750,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
750 skb->protocol = htons(0xffff); 783 skb->protocol = htons(0xffff);
751 skb->dev = dev->lldev; 784 skb->dev = dev->lldev;
752 skb_pull(skb, sizeof(*p)); 785 skb_pull(skb, sizeof(*p));
753 skb->mac.raw = skb->data; 786 skb_reset_mac_header(skb);
754 netif_receive_skb(skb); 787 netif_receive_skb(skb);
755 return 0; 788 return 0;
756} 789}
@@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
762 struct t3c_tid_entry *t3c_tid; 795 struct t3c_tid_entry *t3c_tid;
763 796
764 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 797 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
765 if (t3c_tid->ctx && t3c_tid->client->handlers && 798 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
766 t3c_tid->client->handlers[opcode]) { 799 t3c_tid->client->handlers[opcode]) {
767 return t3c_tid->client->handlers[opcode] (dev, skb, 800 return t3c_tid->client->handlers[opcode] (dev, skb,
768 t3c_tid->ctx); 801 t3c_tid->ctx);
@@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
961 for (tid = 0; tid < ti->ntids; tid++) { 994 for (tid = 0; tid < ti->ntids; tid++) {
962 te = lookup_tid(ti, tid); 995 te = lookup_tid(ti, tid);
963 BUG_ON(!te); 996 BUG_ON(!te);
964 if (te->ctx && te->client && te->client->redirect) { 997 if (te && te->ctx && te->client && te->client->redirect) {
965 update_tcb = te->client->redirect(te->ctx, old, new, e); 998 update_tcb = te->client->redirect(te->ctx, old, new, e);
966 if (update_tcb) { 999 if (update_tcb) {
967 l2t_hold(L2DATA(tdev), e); 1000 l2t_hold(L2DATA(tdev), e);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 027ab2c3825c..3666586a4831 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -661,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
661 661
662 if (skb) { 662 if (skb) {
663 __skb_put(skb, IMMED_PKT_SIZE); 663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE); 664 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
665 } 665 }
666 return skb; 666 return skb;
667} 667}
@@ -897,11 +897,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
897 d->flit[2] = 0; 897 d->flit[2] = 0;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl); 899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb->nh.raw - skb->data == ETH_HLEN ? 900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN; 901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) | 902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | 903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(skb->h.th->doff); 904 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
905 hdr->lso_info = htonl(tso_info); 905 hdr->lso_info = htonl(tso_info);
906 flits = 3; 906 flits = 3;
907 } else { 907 } else {
@@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
913 if (skb->len <= WR_LEN - sizeof(*cpl)) { 913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL; 914 q->sdesc[pidx].skb = NULL;
915 if (!skb->data_len) 915 if (!skb->data_len)
916 memcpy(&d->flit[2], skb->data, skb->len); 916 skb_copy_from_linear_data(skb, &d->flit[2],
917 skb->len);
917 else 918 else
918 skb_copy_bits(skb, 0, &d->flit[2], skb->len); 919 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
919 920
@@ -1319,16 +1320,19 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1319 /* Only TX_DATA builds SGLs */ 1320 /* Only TX_DATA builds SGLs */
1320 1321
1321 from = (struct work_request_hdr *)skb->data; 1322 from = (struct work_request_hdr *)skb->data;
1322 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from)); 1323 memcpy(&d->flit[1], &from[1],
1324 skb_transport_offset(skb) - sizeof(*from));
1323 1325
1324 flits = (skb->h.raw - skb->data) / 8; 1326 flits = skb_transport_offset(skb) / 8;
1325 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1327 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1326 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, 1328 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1329 skb->tail - skb->transport_header,
1327 adap->pdev); 1330 adap->pdev);
1328 if (need_skb_unmap()) { 1331 if (need_skb_unmap()) {
1329 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1332 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1330 skb->destructor = deferred_unmap_destructor; 1333 skb->destructor = deferred_unmap_destructor;
1331 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; 1334 ((struct unmap_info *)skb->cb)->len = (skb->tail -
1335 skb->transport_header);
1332 } 1336 }
1333 1337
1334 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1338 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
@@ -1349,8 +1353,8 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1349 if (skb->len <= WR_LEN && cnt == 0) 1353 if (skb->len <= WR_LEN && cnt == 0)
1350 return 1; /* packet fits as immediate data */ 1354 return 1; /* packet fits as immediate data */
1351 1355
1352 flits = (skb->h.raw - skb->data) / 8; /* headers */ 1356 flits = skb_transport_offset(skb) / 8; /* headers */
1353 if (skb->tail != skb->h.raw) 1357 if (skb->tail != skb->transport_header)
1354 cnt++; 1358 cnt++;
1355 return flits_to_desc(flits + sgl_len(cnt)); 1359 return flits_to_desc(flits + sgl_len(cnt));
1356} 1360}
@@ -1620,7 +1624,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1620 unsigned int gather_idx) 1624 unsigned int gather_idx)
1621{ 1625{
1622 rq->offload_pkts++; 1626 rq->offload_pkts++;
1623 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data; 1627 skb_reset_mac_header(skb);
1628 skb_reset_network_header(skb);
1629 skb_reset_transport_header(skb);
1624 1630
1625 if (rq->polling) { 1631 if (rq->polling) {
1626 rx_gather[gather_idx++] = skb; 1632 rx_gather[gather_idx++] = skb;
@@ -1684,9 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1684 struct port_info *pi; 1690 struct port_info *pi;
1685 1691
1686 skb_pull(skb, sizeof(*p) + pad); 1692 skb_pull(skb, sizeof(*p) + pad);
1687 skb->dev = adap->port[p->iff];
1688 skb->dev->last_rx = jiffies; 1693 skb->dev->last_rx = jiffies;
1689 skb->protocol = eth_type_trans(skb, skb->dev); 1694 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1690 pi = netdev_priv(skb->dev); 1695 pi = netdev_priv(skb->dev);
1691 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && 1696 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1692 !p->fragment) { 1697 !p->fragment) {
@@ -1717,11 +1722,11 @@ static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1717{ 1722{
1718 skb->len = len; 1723 skb->len = len;
1719 if (len <= SKB_DATA_SIZE) { 1724 if (len <= SKB_DATA_SIZE) {
1720 memcpy(skb->data, p->va, len); 1725 skb_copy_to_linear_data(skb, p->va, len);
1721 skb->tail += len; 1726 skb->tail += len;
1722 put_page(p->frag.page); 1727 put_page(p->frag.page);
1723 } else { 1728 } else {
1724 memcpy(skb->data, p->va, SKB_DATA_SIZE); 1729 skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
1725 skb_shinfo(skb)->frags[0].page = p->frag.page; 1730 skb_shinfo(skb)->frags[0].page = p->frag.page;
1726 skb_shinfo(skb)->frags[0].page_offset = 1731 skb_shinfo(skb)->frags[0].page_offset =
1727 p->frag.page_offset + SKB_DATA_SIZE; 1732 p->frag.page_offset + SKB_DATA_SIZE;
@@ -1767,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1767 __skb_put(skb, len); 1772 __skb_put(skb, len);
1768 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, 1773 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1769 PCI_DMA_FROMDEVICE); 1774 PCI_DMA_FROMDEVICE);
1770 memcpy(skb->data, sd->t.skb->data, len); 1775 skb_copy_from_linear_data(sd->t.skb, skb->data, len);
1771 pci_dma_sync_single_for_device(adap->pdev, mapping, len, 1776 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1772 PCI_DMA_FROMDEVICE); 1777 PCI_DMA_FROMDEVICE);
1773 } else if (!drop_thres) 1778 } else if (!drop_thres)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index d83f075ef2d7..fb485d0a43d8 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1523 */ 1523 */
1524int t3_phy_intr_handler(struct adapter *adapter) 1524int t3_phy_intr_handler(struct adapter *adapter)
1525{ 1525{
1526 static const int intr_gpio_bits[] = { 8, 0x20 }; 1526 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1527
1528 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); 1527 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1529 1528
1530 for_each_port(adapter, i) { 1529 for_each_port(adapter, i) {
1531 if (cause & intr_gpio_bits[i]) { 1530 struct port_info *p = adap2pinfo(adapter, i);
1532 struct cphy *phy = &adap2pinfo(adapter, i)->phy; 1531
1533 int phy_cause = phy->ops->intr_handler(phy); 1532 mask = gpi - (gpi & (gpi - 1));
1533 gpi -= mask;
1534
1535 if (!(p->port_type->caps & SUPPORTED_IRQ))
1536 continue;
1537
1538 if (cause & mask) {
1539 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1534 1540
1535 if (phy_cause & cphy_cause_link_change) 1541 if (phy_cause & cphy_cause_link_change)
1536 t3_link_changed(adapter, i); 1542 t3_link_changed(adapter, i);
1537 if (phy_cause & cphy_cause_fifo_error) 1543 if (phy_cause & cphy_cause_fifo_error)
1538 phy->fifo_errors++; 1544 p->phy.fifo_errors++;
1539 } 1545 }
1540 } 1546 }
1541 1547
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index e547ce14eefe..dae97b860daa 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -359,7 +359,6 @@ static void de600_rx_intr(struct net_device *dev)
359 } 359 }
360 /* else */ 360 /* else */
361 361
362 skb->dev = dev;
363 skb_reserve(skb,2); /* Align */ 362 skb_reserve(skb,2); /* Align */
364 363
365 /* 'skb->data' points to the start of sk_buff data area. */ 364 /* 'skb->data' points to the start of sk_buff data area. */
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index b6ad0cb50552..dc4892426174 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -697,7 +697,6 @@ static int de620_rx_intr(struct net_device *dev)
697 } 697 }
698 else { /* Yep! Go get it! */ 698 else { /* Yep! Go get it! */
699 skb_reserve(skb,2); /* Align */ 699 skb_reserve(skb,2); /* Align */
700 skb->dev = dev;
701 /* skb->data points to the start of sk_buff data area */ 700 /* skb->data points to the start of sk_buff data area */
702 buffer = skb_put(skb,size); 701 buffer = skb_put(skb,size);
703 /* copy the packet into the buffer */ 702 /* copy the packet into the buffer */
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 9f7e1db8ce62..95d854e2295c 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -616,7 +616,6 @@ static int lance_rx(struct net_device *dev)
616 } 616 }
617 lp->stats.rx_bytes += len; 617 lp->stats.rx_bytes += len;
618 618
619 skb->dev = dev;
620 skb_reserve(skb, 2); /* 16 byte align */ 619 skb_reserve(skb, 2); /* 16 byte align */
621 skb_put(skb, len); /* make room */ 620 skb_put(skb, len); /* make room */
622 621
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 07d2731c1aa8..571d82f8008c 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -3091,13 +3091,13 @@ static void dfx_rcv_queue_process(
3091 { 3091 {
3092 /* Receive buffer allocated, pass receive packet up */ 3092 /* Receive buffer allocated, pass receive packet up */
3093 3093
3094 memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3); 3094 skb_copy_to_linear_data(skb,
3095 p_buff + RCV_BUFF_K_PADDING,
3096 pkt_len + 3);
3095 } 3097 }
3096 3098
3097 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ 3099 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
3098 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ 3100 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
3099 skb->dev = bp->dev; /* pass up device pointer */
3100
3101 skb->protocol = fddi_type_trans(skb, bp->dev); 3101 skb->protocol = fddi_type_trans(skb, bp->dev);
3102 bp->rcv_total_bytes += skb->len; 3102 bp->rcv_total_bytes += skb->len;
3103 netif_rx(skb); 3103 netif_rx(skb);
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 5113eef755b9..183497020bfc 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1044,7 +1044,6 @@ static int depca_rx(struct net_device *dev)
1044 unsigned char *buf; 1044 unsigned char *buf;
1045 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1045 skb_reserve(skb, 2); /* 16 byte align the IP header */
1046 buf = skb_put(skb, pkt_len); 1046 buf = skb_put(skb, pkt_len);
1047 skb->dev = dev;
1048 if (entry < lp->rx_old) { /* Wrapped buffer */ 1047 if (entry < lp->rx_old) { /* Wrapped buffer */
1049 len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; 1048 len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
1050 memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); 1049 memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
@@ -1491,8 +1490,9 @@ static void __init depca_platform_probe (void)
1491 depca_io_ports[i].device = pldev; 1490 depca_io_ports[i].device = pldev;
1492 1491
1493 if (platform_device_add(pldev)) { 1492 if (platform_device_add(pldev)) {
1494 platform_device_put(pldev);
1495 depca_io_ports[i].device = NULL; 1493 depca_io_ports[i].device = NULL;
1494 pldev->dev.platform_data = NULL;
1495 platform_device_put(pldev);
1496 continue; 1496 continue;
1497 } 1497 }
1498 1498
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c
index a79520295fd0..df62c0232f36 100644
--- a/drivers/net/dgrs.c
+++ b/drivers/net/dgrs.c
@@ -503,7 +503,6 @@ dgrs_rcv_frame(
503 /* discarding the frame */ 503 /* discarding the frame */
504 goto out; 504 goto out;
505 } 505 }
506 skb->dev = devN;
507 skb_reserve(skb, 2); /* Align IP header */ 506 skb_reserve(skb, 2); /* Align IP header */
508 507
509again: 508again:
@@ -742,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN)
742 } 741 }
743 742
744 amt = min_t(unsigned int, len, rbdp->size - count); 743 amt = min_t(unsigned int, len, rbdp->size - count);
745 memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt); 744 skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt);
746 i += amt; 745 i += amt;
747 count += amt; 746 count += amt;
748 len -= amt; 747 len -= amt;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 9d446a0fe0bf..74ec64a1625d 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -504,7 +504,6 @@ rio_timer (unsigned long data)
504 break; 504 break;
505 } 505 }
506 np->rx_skbuff[entry] = skb; 506 np->rx_skbuff[entry] = skb;
507 skb->dev = dev;
508 /* 16 byte align the IP header */ 507 /* 16 byte align the IP header */
509 skb_reserve (skb, 2); 508 skb_reserve (skb, 2);
510 np->rx_ring[entry].fraginfo = 509 np->rx_ring[entry].fraginfo =
@@ -575,7 +574,6 @@ alloc_list (struct net_device *dev)
575 dev->name); 574 dev->name);
576 break; 575 break;
577 } 576 }
578 skb->dev = dev; /* Mark as being used by this device. */
579 skb_reserve (skb, 2); /* 16 byte align the IP header. */ 577 skb_reserve (skb, 2); /* 16 byte align the IP header. */
580 /* Rubicon now supports 40 bits of addressing space. */ 578 /* Rubicon now supports 40 bits of addressing space. */
581 np->rx_ring[i].fraginfo = 579 np->rx_ring[i].fraginfo =
@@ -866,7 +864,6 @@ receive_packet (struct net_device *dev)
866 DMA_48BIT_MASK, 864 DMA_48BIT_MASK,
867 np->rx_buf_sz, 865 np->rx_buf_sz,
868 PCI_DMA_FROMDEVICE); 866 PCI_DMA_FROMDEVICE);
869 skb->dev = dev;
870 /* 16 byte align the IP header */ 867 /* 16 byte align the IP header */
871 skb_reserve (skb, 2); 868 skb_reserve (skb, 2);
872 eth_copy_and_sum (skb, 869 eth_copy_and_sum (skb,
@@ -910,7 +907,6 @@ receive_packet (struct net_device *dev)
910 break; 907 break;
911 } 908 }
912 np->rx_skbuff[entry] = skb; 909 np->rx_skbuff[entry] = skb;
913 skb->dev = dev;
914 /* 16 byte align the IP header */ 910 /* 16 byte align the IP header */
915 skb_reserve (skb, 2); 911 skb_reserve (skb, 2);
916 np->rx_ring[entry].fraginfo = 912 np->rx_ring[entry].fraginfo =
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 615d2b14efa7..8cc1174e7f64 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -954,7 +954,6 @@ dm9000_rx(struct net_device *dev)
954 /* Move data from DM9000 */ 954 /* Move data from DM9000 */
955 if (GoodPacket 955 if (GoodPacket
956 && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) { 956 && ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
957 skb->dev = dev;
958 skb_reserve(skb, 2); 957 skb_reserve(skb, 2);
959 rdptr = (u8 *) skb_put(skb, RxLen - 4); 958 rdptr = (u8 *) skb_put(skb, RxLen - 4);
960 959
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 0cefef5e3f06..4d0e0aea72bf 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1769,7 +1769,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1769 1769
1770 /* Align, init, and map the RFD. */ 1770 /* Align, init, and map the RFD. */
1771 skb_reserve(rx->skb, NET_IP_ALIGN); 1771 skb_reserve(rx->skb, NET_IP_ALIGN);
1772 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1772 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1773 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1773 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1774 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1774 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1775 1775
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 1d08e937af82..9267f16b1b32 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -409,25 +409,21 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
409{ 409{
410 uint32_t ctrl_ext; 410 uint32_t ctrl_ext;
411 uint32_t swsm; 411 uint32_t swsm;
412 uint32_t extcnf;
413 412
414 /* Let firmware taken over control of h/w */ 413 /* Let firmware taken over control of h/w */
415 switch (adapter->hw.mac_type) { 414 switch (adapter->hw.mac_type) {
416 case e1000_82571:
417 case e1000_82572:
418 case e1000_80003es2lan:
419 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
420 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
421 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
422 break;
423 case e1000_82573: 415 case e1000_82573:
424 swsm = E1000_READ_REG(&adapter->hw, SWSM); 416 swsm = E1000_READ_REG(&adapter->hw, SWSM);
425 E1000_WRITE_REG(&adapter->hw, SWSM, 417 E1000_WRITE_REG(&adapter->hw, SWSM,
426 swsm & ~E1000_SWSM_DRV_LOAD); 418 swsm & ~E1000_SWSM_DRV_LOAD);
419 break;
420 case e1000_82571:
421 case e1000_82572:
422 case e1000_80003es2lan:
427 case e1000_ich8lan: 423 case e1000_ich8lan:
428 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); 424 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
429 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 425 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
430 extcnf & ~E1000_CTRL_EXT_DRV_LOAD); 426 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
431 break; 427 break;
432 default: 428 default:
433 break; 429 break;
@@ -450,26 +446,21 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
450{ 446{
451 uint32_t ctrl_ext; 447 uint32_t ctrl_ext;
452 uint32_t swsm; 448 uint32_t swsm;
453 uint32_t extcnf;
454 449
455 /* Let firmware know the driver has taken over */ 450 /* Let firmware know the driver has taken over */
456 switch (adapter->hw.mac_type) { 451 switch (adapter->hw.mac_type) {
457 case e1000_82571:
458 case e1000_82572:
459 case e1000_80003es2lan:
460 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
461 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
462 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
463 break;
464 case e1000_82573: 452 case e1000_82573:
465 swsm = E1000_READ_REG(&adapter->hw, SWSM); 453 swsm = E1000_READ_REG(&adapter->hw, SWSM);
466 E1000_WRITE_REG(&adapter->hw, SWSM, 454 E1000_WRITE_REG(&adapter->hw, SWSM,
467 swsm | E1000_SWSM_DRV_LOAD); 455 swsm | E1000_SWSM_DRV_LOAD);
468 break; 456 break;
457 case e1000_82571:
458 case e1000_82572:
459 case e1000_80003es2lan:
469 case e1000_ich8lan: 460 case e1000_ich8lan:
470 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); 461 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
471 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, 462 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
472 extcnf | E1000_EXTCNF_CTRL_SWFLAG); 463 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
473 break; 464 break;
474 default: 465 default:
475 break; 466 break;
@@ -522,14 +513,15 @@ e1000_release_manageability(struct e1000_adapter *adapter)
522 } 513 }
523} 514}
524 515
525int 516/**
526e1000_up(struct e1000_adapter *adapter) 517 * e1000_configure - configure the hardware for RX and TX
518 * @adapter = private board structure
519 **/
520static void e1000_configure(struct e1000_adapter *adapter)
527{ 521{
528 struct net_device *netdev = adapter->netdev; 522 struct net_device *netdev = adapter->netdev;
529 int i; 523 int i;
530 524
531 /* hardware has been reset, we need to reload some things */
532
533 e1000_set_multi(netdev); 525 e1000_set_multi(netdev);
534 526
535 e1000_restore_vlan(adapter); 527 e1000_restore_vlan(adapter);
@@ -548,14 +540,20 @@ e1000_up(struct e1000_adapter *adapter)
548 } 540 }
549 541
550 adapter->tx_queue_len = netdev->tx_queue_len; 542 adapter->tx_queue_len = netdev->tx_queue_len;
543}
544
545int e1000_up(struct e1000_adapter *adapter)
546{
547 /* hardware has been reset, we need to reload some things */
548 e1000_configure(adapter);
549
550 clear_bit(__E1000_DOWN, &adapter->flags);
551 551
552#ifdef CONFIG_E1000_NAPI 552#ifdef CONFIG_E1000_NAPI
553 netif_poll_enable(netdev); 553 netif_poll_enable(adapter->netdev);
554#endif 554#endif
555 e1000_irq_enable(adapter); 555 e1000_irq_enable(adapter);
556 556
557 clear_bit(__E1000_DOWN, &adapter->flags);
558
559 /* fire a link change interrupt to start the watchdog */ 557 /* fire a link change interrupt to start the watchdog */
560 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); 558 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
561 return 0; 559 return 0;
@@ -640,15 +638,15 @@ e1000_down(struct e1000_adapter *adapter)
640 * reschedule our watchdog timer */ 638 * reschedule our watchdog timer */
641 set_bit(__E1000_DOWN, &adapter->flags); 639 set_bit(__E1000_DOWN, &adapter->flags);
642 640
641#ifdef CONFIG_E1000_NAPI
642 netif_poll_disable(netdev);
643#endif
643 e1000_irq_disable(adapter); 644 e1000_irq_disable(adapter);
644 645
645 del_timer_sync(&adapter->tx_fifo_stall_timer); 646 del_timer_sync(&adapter->tx_fifo_stall_timer);
646 del_timer_sync(&adapter->watchdog_timer); 647 del_timer_sync(&adapter->watchdog_timer);
647 del_timer_sync(&adapter->phy_info_timer); 648 del_timer_sync(&adapter->phy_info_timer);
648 649
649#ifdef CONFIG_E1000_NAPI
650 netif_poll_disable(netdev);
651#endif
652 netdev->tx_queue_len = adapter->tx_queue_len; 650 netdev->tx_queue_len = adapter->tx_queue_len;
653 adapter->link_speed = 0; 651 adapter->link_speed = 0;
654 adapter->link_duplex = 0; 652 adapter->link_duplex = 0;
@@ -1410,21 +1408,17 @@ e1000_open(struct net_device *netdev)
1410 return -EBUSY; 1408 return -EBUSY;
1411 1409
1412 /* allocate transmit descriptors */ 1410 /* allocate transmit descriptors */
1413 if ((err = e1000_setup_all_tx_resources(adapter))) 1411 err = e1000_setup_all_tx_resources(adapter);
1412 if (err)
1414 goto err_setup_tx; 1413 goto err_setup_tx;
1415 1414
1416 /* allocate receive descriptors */ 1415 /* allocate receive descriptors */
1417 if ((err = e1000_setup_all_rx_resources(adapter))) 1416 err = e1000_setup_all_rx_resources(adapter);
1418 goto err_setup_rx;
1419
1420 err = e1000_request_irq(adapter);
1421 if (err) 1417 if (err)
1422 goto err_req_irq; 1418 goto err_setup_rx;
1423 1419
1424 e1000_power_up_phy(adapter); 1420 e1000_power_up_phy(adapter);
1425 1421
1426 if ((err = e1000_up(adapter)))
1427 goto err_up;
1428 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1422 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1429 if ((adapter->hw.mng_cookie.status & 1423 if ((adapter->hw.mng_cookie.status &
1430 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1424 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -1437,12 +1431,33 @@ e1000_open(struct net_device *netdev)
1437 e1000_check_mng_mode(&adapter->hw)) 1431 e1000_check_mng_mode(&adapter->hw))
1438 e1000_get_hw_control(adapter); 1432 e1000_get_hw_control(adapter);
1439 1433
1434 /* before we allocate an interrupt, we must be ready to handle it.
1435 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1436 * as soon as we call pci_request_irq, so we have to setup our
1437 * clean_rx handler before we do so. */
1438 e1000_configure(adapter);
1439
1440 err = e1000_request_irq(adapter);
1441 if (err)
1442 goto err_req_irq;
1443
1444 /* From here on the code is the same as e1000_up() */
1445 clear_bit(__E1000_DOWN, &adapter->flags);
1446
1447#ifdef CONFIG_E1000_NAPI
1448 netif_poll_enable(netdev);
1449#endif
1450
1451 e1000_irq_enable(adapter);
1452
1453 /* fire a link status change interrupt to start the watchdog */
1454 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
1455
1440 return E1000_SUCCESS; 1456 return E1000_SUCCESS;
1441 1457
1442err_up:
1443 e1000_power_down_phy(adapter);
1444 e1000_free_irq(adapter);
1445err_req_irq: 1458err_req_irq:
1459 e1000_release_hw_control(adapter);
1460 e1000_power_down_phy(adapter);
1446 e1000_free_all_rx_resources(adapter); 1461 e1000_free_all_rx_resources(adapter);
1447err_setup_rx: 1462err_setup_rx:
1448 e1000_free_all_tx_resources(adapter); 1463 e1000_free_all_tx_resources(adapter);
@@ -2887,33 +2902,30 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2887 return err; 2902 return err;
2888 } 2903 }
2889 2904
2890 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2905 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2891 mss = skb_shinfo(skb)->gso_size; 2906 mss = skb_shinfo(skb)->gso_size;
2892 if (skb->protocol == htons(ETH_P_IP)) { 2907 if (skb->protocol == htons(ETH_P_IP)) {
2893 skb->nh.iph->tot_len = 0; 2908 struct iphdr *iph = ip_hdr(skb);
2894 skb->nh.iph->check = 0; 2909 iph->tot_len = 0;
2895 skb->h.th->check = 2910 iph->check = 0;
2896 ~csum_tcpudp_magic(skb->nh.iph->saddr, 2911 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2897 skb->nh.iph->daddr, 2912 iph->daddr, 0,
2898 0, 2913 IPPROTO_TCP,
2899 IPPROTO_TCP, 2914 0);
2900 0);
2901 cmd_length = E1000_TXD_CMD_IP; 2915 cmd_length = E1000_TXD_CMD_IP;
2902 ipcse = skb->h.raw - skb->data - 1; 2916 ipcse = skb_transport_offset(skb) - 1;
2903 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2917 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2904 skb->nh.ipv6h->payload_len = 0; 2918 ipv6_hdr(skb)->payload_len = 0;
2905 skb->h.th->check = 2919 tcp_hdr(skb)->check =
2906 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2920 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2907 &skb->nh.ipv6h->daddr, 2921 &ipv6_hdr(skb)->daddr,
2908 0, 2922 0, IPPROTO_TCP, 0);
2909 IPPROTO_TCP,
2910 0);
2911 ipcse = 0; 2923 ipcse = 0;
2912 } 2924 }
2913 ipcss = skb->nh.raw - skb->data; 2925 ipcss = skb_network_offset(skb);
2914 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 2926 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2915 tucss = skb->h.raw - skb->data; 2927 tucss = skb_transport_offset(skb);
2916 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 2928 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2917 tucse = 0; 2929 tucse = 0;
2918 2930
2919 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2931 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
@@ -2954,7 +2966,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2954 uint8_t css; 2966 uint8_t css;
2955 2967
2956 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2968 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2957 css = skb->h.raw - skb->data; 2969 css = skb_transport_offset(skb);
2958 2970
2959 i = tx_ring->next_to_use; 2971 i = tx_ring->next_to_use;
2960 buffer_info = &tx_ring->buffer_info[i]; 2972 buffer_info = &tx_ring->buffer_info[i];
@@ -2962,7 +2974,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2962 2974
2963 context_desc->lower_setup.ip_config = 0; 2975 context_desc->lower_setup.ip_config = 0;
2964 context_desc->upper_setup.tcp_fields.tucss = css; 2976 context_desc->upper_setup.tcp_fields.tucss = css;
2965 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; 2977 context_desc->upper_setup.tcp_fields.tucso =
2978 css + skb->csum_offset;
2966 context_desc->upper_setup.tcp_fields.tucse = 0; 2979 context_desc->upper_setup.tcp_fields.tucse = 0;
2967 context_desc->tcp_seg_setup.data = 0; 2980 context_desc->tcp_seg_setup.data = 0;
2968 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2981 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3296,7 +3309,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3296 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3309 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3297 * points to just header, pull a few bytes of payload from 3310 * points to just header, pull a few bytes of payload from
3298 * frags into skb->data */ 3311 * frags into skb->data */
3299 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 3312 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3300 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { 3313 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3301 switch (adapter->hw.mac_type) { 3314 switch (adapter->hw.mac_type) {
3302 unsigned int pull_size; 3315 unsigned int pull_size;
@@ -3307,7 +3320,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3307 * NOTE: this is a TSO only workaround 3320 * NOTE: this is a TSO only workaround
3308 * if end byte alignment not correct move us 3321 * if end byte alignment not correct move us
3309 * into the next dword */ 3322 * into the next dword */
3310 if ((unsigned long)(skb->tail - 1) & 4) 3323 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3311 break; 3324 break;
3312 /* fall through */ 3325 /* fall through */
3313 case e1000_82571: 3326 case e1000_82571:
@@ -3363,12 +3376,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3363 (adapter->hw.mac_type == e1000_82573)) 3376 (adapter->hw.mac_type == e1000_82573))
3364 e1000_transfer_dhcp_info(adapter, skb); 3377 e1000_transfer_dhcp_info(adapter, skb);
3365 3378
3366 local_irq_save(flags); 3379 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
3367 if (!spin_trylock(&tx_ring->tx_lock)) {
3368 /* Collision - tell upper layer to requeue */ 3380 /* Collision - tell upper layer to requeue */
3369 local_irq_restore(flags);
3370 return NETDEV_TX_LOCKED; 3381 return NETDEV_TX_LOCKED;
3371 }
3372 3382
3373 /* need: count + 2 desc gap to keep tail from touching 3383 /* need: count + 2 desc gap to keep tail from touching
3374 * head, otherwise try next time */ 3384 * head, otherwise try next time */
@@ -3796,7 +3806,7 @@ e1000_intr_msi(int irq, void *data)
3796 3806
3797 for (i = 0; i < E1000_MAX_INTR; i++) 3807 for (i = 0; i < E1000_MAX_INTR; i++)
3798 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3808 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3799 e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3809 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3800 break; 3810 break;
3801 3811
3802 if (likely(adapter->itr_setting & 3)) 3812 if (likely(adapter->itr_setting & 3))
@@ -3899,7 +3909,7 @@ e1000_intr(int irq, void *data)
3899 3909
3900 for (i = 0; i < E1000_MAX_INTR; i++) 3910 for (i = 0; i < E1000_MAX_INTR; i++)
3901 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3911 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3902 e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3912 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3903 break; 3913 break;
3904 3914
3905 if (likely(adapter->itr_setting & 3)) 3915 if (likely(adapter->itr_setting & 3))
@@ -3949,7 +3959,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3949 poll_dev->quota -= work_done; 3959 poll_dev->quota -= work_done;
3950 3960
3951 /* If no Tx and not enough Rx work done, exit the polling mode */ 3961 /* If no Tx and not enough Rx work done, exit the polling mode */
3952 if ((tx_cleaned && (work_done < work_to_do)) || 3962 if ((!tx_cleaned && (work_done == 0)) ||
3953 !netif_running(poll_dev)) { 3963 !netif_running(poll_dev)) {
3954quit_polling: 3964quit_polling:
3955 if (likely(adapter->itr_setting & 3)) 3965 if (likely(adapter->itr_setting & 3))
@@ -3979,7 +3989,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3979#ifdef CONFIG_E1000_NAPI 3989#ifdef CONFIG_E1000_NAPI
3980 unsigned int count = 0; 3990 unsigned int count = 0;
3981#endif 3991#endif
3982 boolean_t cleaned = TRUE; 3992 boolean_t cleaned = FALSE;
3983 unsigned int total_tx_bytes=0, total_tx_packets=0; 3993 unsigned int total_tx_bytes=0, total_tx_packets=0;
3984 3994
3985 i = tx_ring->next_to_clean; 3995 i = tx_ring->next_to_clean;
@@ -4013,10 +4023,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4013#ifdef CONFIG_E1000_NAPI 4023#ifdef CONFIG_E1000_NAPI
4014#define E1000_TX_WEIGHT 64 4024#define E1000_TX_WEIGHT 64
4015 /* weight of a sort for tx, to avoid endless transmit cleanup */ 4025 /* weight of a sort for tx, to avoid endless transmit cleanup */
4016 if (count++ == E1000_TX_WEIGHT) { 4026 if (count++ == E1000_TX_WEIGHT) break;
4017 cleaned = FALSE;
4018 break;
4019 }
4020#endif 4027#endif
4021 } 4028 }
4022 4029
@@ -4230,9 +4237,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4230 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 4237 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
4231 if (new_skb) { 4238 if (new_skb) {
4232 skb_reserve(new_skb, NET_IP_ALIGN); 4239 skb_reserve(new_skb, NET_IP_ALIGN);
4233 memcpy(new_skb->data - NET_IP_ALIGN, 4240 skb_copy_to_linear_data_offset(new_skb,
4234 skb->data - NET_IP_ALIGN, 4241 -NET_IP_ALIGN,
4235 length + NET_IP_ALIGN); 4242 (skb->data -
4243 NET_IP_ALIGN),
4244 (length +
4245 NET_IP_ALIGN));
4236 /* save the skb in buffer_info as good */ 4246 /* save the skb in buffer_info as good */
4237 buffer_info->skb = skb; 4247 buffer_info->skb = skb;
4238 skb = new_skb; 4248 skb = new_skb;
@@ -4394,7 +4404,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4394 PCI_DMA_FROMDEVICE); 4404 PCI_DMA_FROMDEVICE);
4395 vaddr = kmap_atomic(ps_page->ps_page[0], 4405 vaddr = kmap_atomic(ps_page->ps_page[0],
4396 KM_SKB_DATA_SOFTIRQ); 4406 KM_SKB_DATA_SOFTIRQ);
4397 memcpy(skb->tail, vaddr, l1); 4407 memcpy(skb_tail_pointer(skb), vaddr, l1);
4398 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 4408 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4399 pci_dma_sync_single_for_device(pdev, 4409 pci_dma_sync_single_for_device(pdev,
4400 ps_page_dma->ps_page_dma[0], 4410 ps_page_dma->ps_page_dma[0],
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index b4463094c93a..39654e1e2bed 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1591,7 +1591,6 @@ eepro_rx(struct net_device *dev)
1591 1591
1592 break; 1592 break;
1593 } 1593 }
1594 skb->dev = dev;
1595 skb_reserve(skb,2); 1594 skb_reserve(skb,2);
1596 1595
1597 if (lp->version == LAN595) 1596 if (lp->version == LAN595)
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index e28bb1e38f8d..6c267c38df97 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1793,7 +1793,6 @@ speedo_rx(struct net_device *dev)
1793 copying to a properly sized skbuff. */ 1793 copying to a properly sized skbuff. */
1794 if (pkt_len < rx_copybreak 1794 if (pkt_len < rx_copybreak
1795 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { 1795 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1796 skb->dev = dev;
1797 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1796 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1798 /* 'skb_put()' points to the start of sk_buff data area. */ 1797 /* 'skb_put()' points to the start of sk_buff data area. */
1799 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], 1798 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
@@ -1805,8 +1804,9 @@ speedo_rx(struct net_device *dev)
1805 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); 1804 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
1806 skb_put(skb, pkt_len); 1805 skb_put(skb, pkt_len);
1807#else 1806#else
1808 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data, 1807 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1809 pkt_len); 1808 skb_put(skb, pkt_len),
1809 pkt_len);
1810#endif 1810#endif
1811 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], 1811 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1812 sizeof(struct RxFD) + pkt_len, 1812 sizeof(struct RxFD) + pkt_len,
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 3868b8031266..8aaf5ec0c360 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -976,7 +976,6 @@ static void eexp_hw_rx_pio(struct net_device *dev)
976 lp->stats.rx_dropped++; 976 lp->stats.rx_dropped++;
977 break; 977 break;
978 } 978 }
979 skb->dev = dev;
980 skb_reserve(skb, 2); 979 skb_reserve(skb, 2);
981 outw(pbuf+10, ioaddr+READ_PTR); 980 outw(pbuf+10, ioaddr+READ_PTR);
982 insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); 981 insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 0e4042bc0a48..58364a0ff378 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -391,8 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget)
391 if (!skb) 391 if (!skb)
392 break; 392 break;
393 } 393 }
394 memcpy(skb->data, ((char*)cqe) + 64, 394 skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
395 cqe->num_bytes_transfered - 4); 395 cqe->num_bytes_transfered - 4);
396 ehea_fill_skb(dev, skb, cqe); 396 ehea_fill_skb(dev, skb, cqe);
397 } else if (rq == 2) { /* RQ2 */ 397 } else if (rq == 2) { /* RQ2 */
398 skb = get_skb_by_index(skb_arr_rq2, 398 skb = get_skb_by_index(skb_arr_rq2,
@@ -1262,8 +1262,8 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1262static inline void write_ip_start_end(struct ehea_swqe *swqe, 1262static inline void write_ip_start_end(struct ehea_swqe *swqe,
1263 const struct sk_buff *skb) 1263 const struct sk_buff *skb)
1264{ 1264{
1265 swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); 1265 swqe->ip_start = skb_network_offset(skb);
1266 swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); 1266 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1267} 1267}
1268 1268
1269static inline void write_tcp_offset_end(struct ehea_swqe *swqe, 1269static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
@@ -1300,13 +1300,13 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1300 /* copy only eth/ip/tcp headers to immediate data and 1300 /* copy only eth/ip/tcp headers to immediate data and
1301 * the rest of skb->data to sg1entry 1301 * the rest of skb->data to sg1entry
1302 */ 1302 */
1303 headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); 1303 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1304 1304
1305 skb_data_size = skb->len - skb->data_len; 1305 skb_data_size = skb->len - skb->data_len;
1306 1306
1307 if (skb_data_size >= headersize) { 1307 if (skb_data_size >= headersize) {
1308 /* copy immediate data */ 1308 /* copy immediate data */
1309 memcpy(imm_data, skb->data, headersize); 1309 skb_copy_from_linear_data(skb, imm_data, headersize);
1310 swqe->immediate_data_length = headersize; 1310 swqe->immediate_data_length = headersize;
1311 1311
1312 if (skb_data_size > headersize) { 1312 if (skb_data_size > headersize) {
@@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
1337 */ 1337 */
1338 if (skb_data_size >= SWQE2_MAX_IMM) { 1338 if (skb_data_size >= SWQE2_MAX_IMM) {
1339 /* copy immediate data */ 1339 /* copy immediate data */
1340 memcpy(imm_data, skb->data, SWQE2_MAX_IMM); 1340 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1341 1341
1342 swqe->immediate_data_length = SWQE2_MAX_IMM; 1342 swqe->immediate_data_length = SWQE2_MAX_IMM;
1343 1343
@@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
1350 swqe->descriptors++; 1350 swqe->descriptors++;
1351 } 1351 }
1352 } else { 1352 } else {
1353 memcpy(imm_data, skb->data, skb_data_size); 1353 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1354 swqe->immediate_data_length = skb_data_size; 1354 swqe->immediate_data_length = skb_data_size;
1355 } 1355 }
1356} 1356}
@@ -1688,6 +1688,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1688 struct ehea_swqe *swqe, u32 lkey) 1688 struct ehea_swqe *swqe, u32 lkey)
1689{ 1689{
1690 if (skb->protocol == htons(ETH_P_IP)) { 1690 if (skb->protocol == htons(ETH_P_IP)) {
1691 const struct iphdr *iph = ip_hdr(skb);
1691 /* IPv4 */ 1692 /* IPv4 */
1692 swqe->tx_control |= EHEA_SWQE_CRC 1693 swqe->tx_control |= EHEA_SWQE_CRC
1693 | EHEA_SWQE_IP_CHECKSUM 1694 | EHEA_SWQE_IP_CHECKSUM
@@ -1697,15 +1698,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1697 1698
1698 write_ip_start_end(swqe, skb); 1699 write_ip_start_end(swqe, skb);
1699 1700
1700 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1701 if (iph->protocol == IPPROTO_UDP) {
1701 if ((skb->nh.iph->frag_off & IP_MF) || 1702 if ((iph->frag_off & IP_MF) ||
1702 (skb->nh.iph->frag_off & IP_OFFSET)) 1703 (iph->frag_off & IP_OFFSET))
1703 /* IP fragment, so don't change cs */ 1704 /* IP fragment, so don't change cs */
1704 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; 1705 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1705 else 1706 else
1706 write_udp_offset_end(swqe, skb); 1707 write_udp_offset_end(swqe, skb);
1707 1708
1708 } else if (skb->nh.iph->protocol == IPPROTO_TCP) { 1709 } else if (iph->protocol == IPPROTO_TCP) {
1709 write_tcp_offset_end(swqe, skb); 1710 write_tcp_offset_end(swqe, skb);
1710 } 1711 }
1711 1712
@@ -1731,10 +1732,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1731 int i; 1732 int i;
1732 1733
1733 if (skb->protocol == htons(ETH_P_IP)) { 1734 if (skb->protocol == htons(ETH_P_IP)) {
1735 const struct iphdr *iph = ip_hdr(skb);
1734 /* IPv4 */ 1736 /* IPv4 */
1735 write_ip_start_end(swqe, skb); 1737 write_ip_start_end(swqe, skb);
1736 1738
1737 if (skb->nh.iph->protocol == IPPROTO_TCP) { 1739 if (iph->protocol == IPPROTO_TCP) {
1738 swqe->tx_control |= EHEA_SWQE_CRC 1740 swqe->tx_control |= EHEA_SWQE_CRC
1739 | EHEA_SWQE_IP_CHECKSUM 1741 | EHEA_SWQE_IP_CHECKSUM
1740 | EHEA_SWQE_TCP_CHECKSUM 1742 | EHEA_SWQE_TCP_CHECKSUM
@@ -1742,9 +1744,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1742 1744
1743 write_tcp_offset_end(swqe, skb); 1745 write_tcp_offset_end(swqe, skb);
1744 1746
1745 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 1747 } else if (iph->protocol == IPPROTO_UDP) {
1746 if ((skb->nh.iph->frag_off & IP_MF) || 1748 if ((iph->frag_off & IP_MF) ||
1747 (skb->nh.iph->frag_off & IP_OFFSET)) 1749 (iph->frag_off & IP_OFFSET))
1748 /* IP fragment, so don't change cs */ 1750 /* IP fragment, so don't change cs */
1749 swqe->tx_control |= EHEA_SWQE_CRC 1751 swqe->tx_control |= EHEA_SWQE_CRC
1750 | EHEA_SWQE_IMM_DATA_PRESENT; 1752 | EHEA_SWQE_IMM_DATA_PRESENT;
@@ -1770,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1770 /* copy (immediate) data */ 1772 /* copy (immediate) data */
1771 if (nfrags == 0) { 1773 if (nfrags == 0) {
1772 /* data is in a single piece */ 1774 /* data is in a single piece */
1773 memcpy(imm_data, skb->data, skb->len); 1775 skb_copy_from_linear_data(skb, imm_data, skb->len);
1774 } else { 1776 } else {
1775 /* first copy data from the skb->data buffer ... */ 1777 /* first copy data from the skb->data buffer ... */
1776 memcpy(imm_data, skb->data, skb->len - skb->data_len); 1778 skb_copy_from_linear_data(skb, imm_data,
1779 skb->len - skb->data_len);
1777 imm_data += skb->len - skb->data_len; 1780 imm_data += skb->len - skb->data_len;
1778 1781
1779 /* ... then copy data from the fragments */ 1782 /* ... then copy data from the fragments */
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 3a6a83d3ee1c..4e3f14c9c717 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -934,7 +934,6 @@ static void epic_init_ring(struct net_device *dev)
934 ep->rx_skbuff[i] = skb; 934 ep->rx_skbuff[i] = skb;
935 if (skb == NULL) 935 if (skb == NULL)
936 break; 936 break;
937 skb->dev = dev; /* Mark as being used by this device. */
938 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 937 skb_reserve(skb, 2); /* 16 byte align the IP header. */
939 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 938 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
940 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 939 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1199,7 +1198,6 @@ static int epic_rx(struct net_device *dev, int budget)
1199 to a minimally-sized skbuff. */ 1198 to a minimally-sized skbuff. */
1200 if (pkt_len < rx_copybreak 1199 if (pkt_len < rx_copybreak
1201 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1200 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1202 skb->dev = dev;
1203 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1201 skb_reserve(skb, 2); /* 16 byte align the IP header */
1204 pci_dma_sync_single_for_cpu(ep->pci_dev, 1202 pci_dma_sync_single_for_cpu(ep->pci_dev,
1205 ep->rx_ring[entry].bufaddr, 1203 ep->rx_ring[entry].bufaddr,
@@ -1236,7 +1234,6 @@ static int epic_rx(struct net_device *dev, int budget)
1236 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); 1234 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1237 if (skb == NULL) 1235 if (skb == NULL)
1238 break; 1236 break;
1239 skb->dev = dev; /* Mark as being used by this device. */
1240 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1237 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1241 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 1238 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1242 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1239 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 93283e386f3a..04abf59e5007 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1175,7 +1175,6 @@ static void eth16i_rx(struct net_device *dev)
1175 break; 1175 break;
1176 } 1176 }
1177 1177
1178 skb->dev = dev;
1179 skb_reserve(skb,2); 1178 skb_reserve(skb,2);
1180 1179
1181 /* 1180 /*
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 714ea1176ec7..cb0792c187ba 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -993,7 +993,6 @@ static int ewrk3_rx(struct net_device *dev)
993 993
994 if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 994 if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
995 unsigned char *p; 995 unsigned char *p;
996 skb->dev = dev;
997 skb_reserve(skb, 2); /* Align to 16 bytes */ 996 skb_reserve(skb, 2); /* Align to 16 bytes */
998 p = skb_put(skb, pkt_len); 997 p = skb_put(skb, pkt_len);
999 998
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 38a13f440530..abe9b089c610 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1719,7 +1719,6 @@ static int netdev_rx(struct net_device *dev)
1719 to a minimally-sized skbuff. */ 1719 to a minimally-sized skbuff. */
1720 if (pkt_len < rx_copybreak && 1720 if (pkt_len < rx_copybreak &&
1721 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1721 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1722 skb->dev = dev;
1723 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1722 skb_reserve(skb, 2); /* 16 byte align the IP header */
1724 pci_dma_sync_single_for_cpu(np->pci_dev, 1723 pci_dma_sync_single_for_cpu(np->pci_dev,
1725 np->cur_rx->buffer, 1724 np->cur_rx->buffer,
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 6764281b4531..255b09124e11 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -647,7 +647,6 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
647 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 647 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
648 fep->stats.rx_dropped++; 648 fep->stats.rx_dropped++;
649 } else { 649 } else {
650 skb->dev = dev;
651 skb_put(skb,pkt_len-4); /* Make room */ 650 skb_put(skb,pkt_len-4); /* Make room */
652 eth_copy_and_sum(skb, data, pkt_len-4, 0); 651 eth_copy_and_sum(skb, data, pkt_len-4, 0);
653 skb->protocol=eth_type_trans(skb,dev); 652 skb->protocol=eth_type_trans(skb,dev);
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c
index 77f747a5afa7..e824d5d231af 100644
--- a/drivers/net/fec_8xx/fec_main.c
+++ b/drivers/net/fec_8xx/fec_main.c
@@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget)
551 skbn = dev_alloc_skb(pkt_len + 2); 551 skbn = dev_alloc_skb(pkt_len + 2);
552 if (skbn != NULL) { 552 if (skbn != NULL) {
553 skb_reserve(skbn, 2); /* align IP header */ 553 skb_reserve(skbn, 2); /* align IP header */
554 memcpy(skbn->data, skb->data, pkt_len); 554 skb_copy_from_linear_data(skb
555 skbn->data,
556 pkt_len);
555 /* swap */ 557 /* swap */
556 skbt = skb; 558 skbt = skb;
557 skb = skbn; 559 skb = skbn;
@@ -561,7 +563,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget)
561 skbn = dev_alloc_skb(ENET_RX_FRSIZE); 563 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
562 564
563 if (skbn != NULL) { 565 if (skbn != NULL) {
564 skb->dev = dev;
565 skb_put(skb, pkt_len); /* Make room */ 566 skb_put(skb, pkt_len); /* Make room */
566 skb->protocol = eth_type_trans(skb, dev); 567 skb->protocol = eth_type_trans(skb, dev);
567 received++; 568 received++;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d04214e4e581..7a018027fcc0 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1385,11 +1385,12 @@ static int nv_alloc_rx(struct net_device *dev)
1385 while (np->put_rx.orig != less_rx) { 1385 while (np->put_rx.orig != less_rx) {
1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1387 if (skb) { 1387 if (skb) {
1388 skb->dev = dev;
1389 np->put_rx_ctx->skb = skb; 1388 np->put_rx_ctx->skb = skb;
1390 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, 1389 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1391 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1390 skb->data,
1392 np->put_rx_ctx->dma_len = skb->end-skb->data; 1391 skb_tailroom(skb),
1392 PCI_DMA_FROMDEVICE);
1393 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1393 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1394 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1394 wmb(); 1395 wmb();
1395 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1396 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
@@ -1416,11 +1417,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1416 while (np->put_rx.ex != less_rx) { 1417 while (np->put_rx.ex != less_rx) {
1417 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1418 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1418 if (skb) { 1419 if (skb) {
1419 skb->dev = dev;
1420 np->put_rx_ctx->skb = skb; 1420 np->put_rx_ctx->skb = skb;
1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, 1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1422 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1422 skb->data,
1423 np->put_rx_ctx->dma_len = skb->end-skb->data; 1423 skb_tailroom(skb),
1424 PCI_DMA_FROMDEVICE);
1425 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1424 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; 1426 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1425 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; 1427 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1426 wmb(); 1428 wmb();
@@ -1604,8 +1606,9 @@ static void nv_drain_rx(struct net_device *dev)
1604 wmb(); 1606 wmb();
1605 if (np->rx_skb[i].skb) { 1607 if (np->rx_skb[i].skb) {
1606 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1608 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1607 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, 1609 (skb_end_pointer(np->rx_skb[i].skb) -
1608 PCI_DMA_FROMDEVICE); 1610 np->rx_skb[i].skb->data),
1611 PCI_DMA_FROMDEVICE);
1609 dev_kfree_skb(np->rx_skb[i].skb); 1612 dev_kfree_skb(np->rx_skb[i].skb);
1610 np->rx_skb[i].skb = NULL; 1613 np->rx_skb[i].skb = NULL;
1611 } 1614 }
@@ -4376,11 +4379,12 @@ static int nv_loopback_test(struct net_device *dev)
4376 ret = 0; 4379 ret = 0;
4377 goto out; 4380 goto out;
4378 } 4381 }
4382 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4383 skb_tailroom(tx_skb),
4384 PCI_DMA_FROMDEVICE);
4379 pkt_data = skb_put(tx_skb, pkt_len); 4385 pkt_data = skb_put(tx_skb, pkt_len);
4380 for (i = 0; i < pkt_len; i++) 4386 for (i = 0; i < pkt_len; i++)
4381 pkt_data[i] = (u8)(i & 0xff); 4387 pkt_data[i] = (u8)(i & 0xff);
4382 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4383 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
4384 4388
4385 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4386 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4390 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
@@ -4437,7 +4441,7 @@ static int nv_loopback_test(struct net_device *dev)
4437 } 4441 }
4438 4442
4439 pci_unmap_page(np->pci_dev, test_dma_addr, 4443 pci_unmap_page(np->pci_dev, test_dma_addr,
4440 tx_skb->end-tx_skb->data, 4444 (skb_end_pointer(tx_skb) - tx_skb->data),
4441 PCI_DMA_TODEVICE); 4445 PCI_DMA_TODEVICE);
4442 dev_kfree_skb_any(tx_skb); 4446 dev_kfree_skb_any(tx_skb);
4443 out: 4447 out:
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 4a05c14bf7ec..e2ddd617493a 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
160 skbn = dev_alloc_skb(pkt_len + 2); 160 skbn = dev_alloc_skb(pkt_len + 2);
161 if (skbn != NULL) { 161 if (skbn != NULL) {
162 skb_reserve(skbn, 2); /* align IP header */ 162 skb_reserve(skbn, 2); /* align IP header */
163 memcpy(skbn->data, skb->data, pkt_len); 163 skb_copy_from_linear_data(skb,
164 skbn->data, pkt_len);
164 /* swap */ 165 /* swap */
165 skbt = skb; 166 skbt = skb;
166 skb = skbn; 167 skb = skbn;
@@ -170,7 +171,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
170 skbn = dev_alloc_skb(ENET_RX_FRSIZE); 171 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
171 172
172 if (skbn != NULL) { 173 if (skbn != NULL) {
173 skb->dev = dev;
174 skb_put(skb, pkt_len); /* Make room */ 174 skb_put(skb, pkt_len); /* Make room */
175 skb->protocol = eth_type_trans(skb, dev); 175 skb->protocol = eth_type_trans(skb, dev);
176 received++; 176 received++;
@@ -294,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
294 skbn = dev_alloc_skb(pkt_len + 2); 294 skbn = dev_alloc_skb(pkt_len + 2);
295 if (skbn != NULL) { 295 if (skbn != NULL) {
296 skb_reserve(skbn, 2); /* align IP header */ 296 skb_reserve(skbn, 2); /* align IP header */
297 memcpy(skbn->data, skb->data, pkt_len); 297 skb_copy_from_linear_data(skb,
298 skbn->data, pkt_len);
298 /* swap */ 299 /* swap */
299 skbt = skb; 300 skbt = skb;
300 skb = skbn; 301 skb = skbn;
@@ -304,7 +305,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
304 skbn = dev_alloc_skb(ENET_RX_FRSIZE); 305 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
305 306
306 if (skbn != NULL) { 307 if (skbn != NULL) {
307 skb->dev = dev;
308 skb_put(skb, pkt_len); /* Make room */ 308 skb_put(skb, pkt_len); /* Make room */
309 skb->protocol = eth_type_trans(skb, dev); 309 skb->protocol = eth_type_trans(skb, dev);
310 received++; 310 received++;
@@ -516,7 +516,6 @@ void fs_init_bds(struct net_device *dev)
516 break; 516 break;
517 } 517 }
518 fep->rx_skbuff[i] = skb; 518 fep->rx_skbuff[i] = skb;
519 skb->dev = dev;
520 CBDW_BUFADDR(bdp, 519 CBDW_BUFADDR(bdp,
521 dma_map_single(fep->dev, skb->data, 520 dma_map_single(fep->dev, skb->data,
522 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 521 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d981d4c41dd3..b666a0cc0642 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -942,18 +942,18 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
942 942
943 /* Tell the controller what the protocol is */ 943 /* Tell the controller what the protocol is */
944 /* And provide the already calculated phcs */ 944 /* And provide the already calculated phcs */
945 if (skb->nh.iph->protocol == IPPROTO_UDP) { 945 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
946 flags |= TXFCB_UDP; 946 flags |= TXFCB_UDP;
947 fcb->phcs = skb->h.uh->check; 947 fcb->phcs = udp_hdr(skb)->check;
948 } else 948 } else
949 fcb->phcs = skb->h.th->check; 949 fcb->phcs = udp_hdr(skb)->check;
950 950
951 /* l3os is the distance between the start of the 951 /* l3os is the distance between the start of the
952 * frame (skb->data) and the start of the IP hdr. 952 * frame (skb->data) and the start of the IP hdr.
953 * l4os is the distance between the start of the 953 * l4os is the distance between the start of the
954 * l3 hdr and the l4 hdr */ 954 * l3 hdr and the l4 hdr */
955 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 955 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
956 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 956 fcb->l4os = skb_network_header_len(skb);
957 957
958 fcb->flags = flags; 958 fcb->flags = flags;
959} 959}
@@ -1295,8 +1295,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1295 */ 1295 */
1296 skb_reserve(skb, alignamount); 1296 skb_reserve(skb, alignamount);
1297 1297
1298 skb->dev = dev;
1299
1300 bdp->bufPtr = dma_map_single(NULL, skb->data, 1298 bdp->bufPtr = dma_map_single(NULL, skb->data,
1301 priv->rx_buffer_size, DMA_FROM_DEVICE); 1299 priv->rx_buffer_size, DMA_FROM_DEVICE);
1302 1300
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index c3c0d67fc383..2521b111b3a5 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1568,7 +1568,6 @@ static int hamachi_rx(struct net_device *dev)
1568 printk(KERN_ERR "%s: rx_copybreak non-zero " 1568 printk(KERN_ERR "%s: rx_copybreak non-zero "
1569 "not good with RX_CHECKSUM\n", dev->name); 1569 "not good with RX_CHECKSUM\n", dev->name);
1570#endif 1570#endif
1571 skb->dev = dev;
1572 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1571 skb_reserve(skb, 2); /* 16 byte align the IP header */
1573 pci_dma_sync_single_for_cpu(hmp->pci_dev, 1572 pci_dma_sync_single_for_cpu(hmp->pci_dev,
1574 hmp->rx_ring[entry].addr, 1573 hmp->rx_ring[entry].addr,
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 59214e74b9cf..30baf6ecfc63 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -75,12 +75,14 @@
75#include <linux/ioport.h> 75#include <linux/ioport.h>
76#include <linux/string.h> 76#include <linux/string.h>
77#include <linux/init.h> 77#include <linux/init.h>
78#include <asm/uaccess.h>
79#include <asm/io.h>
80#include <linux/hdlcdrv.h> 78#include <linux/hdlcdrv.h>
81#include <linux/baycom.h> 79#include <linux/baycom.h>
82#include <linux/jiffies.h> 80#include <linux/jiffies.h>
83 81
82#include <asm/uaccess.h>
83#include <asm/io.h>
84#include <asm/irq.h>
85
84/* --------------------------------------------------------------------- */ 86/* --------------------------------------------------------------------- */
85 87
86#define BAYCOM_DEBUG 88#define BAYCOM_DEBUG
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d2542697e298..656f2789c9ba 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -282,7 +282,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
282 } 282 }
283 283
284 skb->protocol = ax25_type_trans(skb, dev); 284 skb->protocol = ax25_type_trans(skb, dev);
285 skb->nh.raw = skb->data; 285 skb_reset_network_header(skb);
286 dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); 286 dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
287 bpq->stats.tx_packets++; 287 bpq->stats.tx_packets++;
288 bpq->stats.tx_bytes+=skb->len; 288 bpq->stats.tx_bytes+=skb->len;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0fbb414b5a4d..3be8c5047599 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
930 930
931 /* Transfer data to DMA buffer */ 931 /* Transfer data to DMA buffer */
932 i = priv->tx_head; 932 i = priv->tx_head;
933 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1); 933 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
934 priv->tx_len[i] = skb->len - 1; 934 priv->tx_len[i] = skb->len - 1;
935 935
936 /* Clear interrupts while we touch our circular buffers */ 936 /* Clear interrupts while we touch our circular buffers */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index f5a17ad9d3d6..b33adc6a340b 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
317 dev_kfree_skb_irq(skb); 317 dev_kfree_skb_irq(skb);
318 break; 318 break;
319 } 319 }
320 memcpy(s->hdlctx.buffer, skb->data+1, pkt_len); 320 skb_copy_from_linear_data_offset(skb, 1,
321 s->hdlctx.buffer,
322 pkt_len);
321 dev_kfree_skb_irq(skb); 323 dev_kfree_skb_irq(skb);
322 s->hdlctx.bp = s->hdlctx.buffer; 324 s->hdlctx.bp = s->hdlctx.buffer;
323 append_crc_ccitt(s->hdlctx.buffer, pkt_len); 325 append_crc_ccitt(s->hdlctx.buffer, pkt_len);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index ee3ea4fa729f..467559debfd6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
638 dev_kfree_skb_any(skb); 638 dev_kfree_skb_any(skb);
639 break; 639 break;
640 } 640 }
641 memcpy(yp->tx_buf, skb->data + 1, yp->tx_len); 641 skb_copy_from_linear_data_offset(skb, 1,
642 yp->tx_buf,
643 yp->tx_len);
642 dev_kfree_skb_any(skb); 644 dev_kfree_skb_any(skb);
643 yp->tx_count = 0; 645 yp->tx_count = 0;
644 yp->tx_crcl = 0x21; 646 yp->tx_crcl = 0x21;
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 7dc5185aa2c0..8118a6750b61 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1816,7 +1816,6 @@ static void hp100_rx(struct net_device *dev)
1816 u_char *ptr; 1816 u_char *ptr;
1817 1817
1818 skb_reserve(skb,2); 1818 skb_reserve(skb,2);
1819 skb->dev = dev;
1820 1819
1821 /* ptr to start of the sk_buff data area */ 1820 /* ptr to start of the sk_buff data area */
1822 skb_put(skb, pkt_len); 1821 skb_put(skb, pkt_len);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index dd8ad8746825..3d82d46f4998 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1338,7 +1338,7 @@ static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1338 dev_kfree_skb(dev->rx_sg_skb); 1338 dev_kfree_skb(dev->rx_sg_skb);
1339 dev->rx_sg_skb = NULL; 1339 dev->rx_sg_skb = NULL;
1340 } else { 1340 } else {
1341 cacheable_memcpy(dev->rx_sg_skb->tail, 1341 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1342 dev->rx_skb[slot]->data, len); 1342 dev->rx_skb[slot]->data, len);
1343 skb_put(dev->rx_sg_skb, len); 1343 skb_put(dev->rx_sg_skb, len);
1344 emac_recycle_rx_skb(dev, slot, len); 1344 emac_recycle_rx_skb(dev, slot, len);
@@ -1398,7 +1398,6 @@ static int emac_poll_rx(void *param, int budget)
1398 1398
1399 skb_put(skb, len); 1399 skb_put(skb, len);
1400 push_packet: 1400 push_packet:
1401 skb->dev = dev->ndev;
1402 skb->protocol = eth_type_trans(skb, dev->ndev); 1401 skb->protocol = eth_type_trans(skb, dev->ndev);
1403 emac_rx_csum(dev, skb, ctrl); 1402 emac_rx_csum(dev, skb, ctrl);
1404 1403
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 3f946c811511..fe85d6fcba33 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -601,7 +601,6 @@ static void irqrx_handler(struct net_device *dev)
601 601
602 /* set up skb fields */ 602 /* set up skb fields */
603 603
604 skb->dev = dev;
605 skb->protocol = eth_type_trans(skb, dev); 604 skb->protocol = eth_type_trans(skb, dev);
606 skb->ip_summed = CHECKSUM_NONE; 605 skb->ip_summed = CHECKSUM_NONE;
607 606
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 458db0538a9a..0573fcfcb2c4 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -798,7 +798,6 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
798 798
799 skb_reserve(skb, offset); 799 skb_reserve(skb, offset);
800 skb_put(skb, length); 800 skb_put(skb, length);
801 skb->dev = netdev;
802 skb->protocol = eth_type_trans(skb, netdev); 801 skb->protocol = eth_type_trans(skb, netdev);
803 802
804 netif_receive_skb(skb); /* send it up */ 803 netif_receive_skb(skb); /* send it up */
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 4ad780719a84..f749e07c6425 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -633,8 +633,6 @@ static inline void ioc3_rx(struct ioc3_private *ip)
633 633
634 ip->rx_skbs[rx_entry] = NULL; /* Poison */ 634 ip->rx_skbs[rx_entry] = NULL; /* Poison */
635 635
636 new_skb->dev = priv_netdev(ip);
637
638 /* Because we reserve afterwards. */ 636 /* Because we reserve afterwards. */
639 skb_put(new_skb, (1664 + RX_OFFSET)); 637 skb_put(new_skb, (1664 + RX_OFFSET));
640 rxb = (struct ioc3_erxbuf *) new_skb->data; 638 rxb = (struct ioc3_erxbuf *) new_skb->data;
@@ -940,7 +938,6 @@ static void ioc3_alloc_rings(struct net_device *dev)
940 } 938 }
941 939
942 ip->rx_skbs[i] = skb; 940 ip->rx_skbs[i] = skb;
943 skb->dev = dev;
944 941
945 /* Because we reserve afterwards. */ 942 /* Because we reserve afterwards. */
946 skb_put(skb, (1664 + RX_OFFSET)); 943 skb_put(skb, (1664 + RX_OFFSET));
@@ -1396,9 +1393,9 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1396 * manually. 1393 * manually.
1397 */ 1394 */
1398 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1395 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1399 int proto = ntohs(skb->nh.iph->protocol); 1396 const struct iphdr *ih = ip_hdr(skb);
1397 const int proto = ntohs(ih->protocol);
1400 unsigned int csoff; 1398 unsigned int csoff;
1401 struct iphdr *ih = skb->nh.iph;
1402 uint32_t csum, ehsum; 1399 uint32_t csum, ehsum;
1403 uint16_t *eh; 1400 uint16_t *eh;
1404 1401
@@ -1425,11 +1422,11 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1425 csoff = ETH_HLEN + (ih->ihl << 2); 1422 csoff = ETH_HLEN + (ih->ihl << 2);
1426 if (proto == IPPROTO_UDP) { 1423 if (proto == IPPROTO_UDP) {
1427 csoff += offsetof(struct udphdr, check); 1424 csoff += offsetof(struct udphdr, check);
1428 skb->h.uh->check = csum; 1425 udp_hdr(skb)->check = csum;
1429 } 1426 }
1430 if (proto == IPPROTO_TCP) { 1427 if (proto == IPPROTO_TCP) {
1431 csoff += offsetof(struct tcphdr, check); 1428 csoff += offsetof(struct tcphdr, check);
1432 skb->h.th->check = csum; 1429 tcp_hdr(skb)->check = csum;
1433 } 1430 }
1434 1431
1435 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); 1432 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
@@ -1446,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1446 1443
1447 if (len <= 104) { 1444 if (len <= 104) {
1448 /* Short packet, let's copy it directly into the ring. */ 1445 /* Short packet, let's copy it directly into the ring. */
1449 memcpy(desc->data, skb->data, skb->len); 1446 skb_copy_from_linear_data(skb, desc->data, skb->len);
1450 if (len < ETH_ZLEN) { 1447 if (len < ETH_ZLEN) {
1451 /* Very short packet, pad with zeros at the end. */ 1448 /* Very short packet, pad with zeros at the end. */
1452 memset(desc->data + len, 0, ETH_ZLEN - len); 1449 memset(desc->data + len, 0, ETH_ZLEN - len);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index cebf8c374bc5..f9c889c0dd07 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1472 1472
1473 self->stats.tx_bytes += skb->len; 1473 self->stats.tx_bytes += skb->len;
1474 1474
1475 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, 1475 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
1476 skb->len); 1476 skb->len);
1477
1478 self->tx_fifo.len++; 1477 self->tx_fifo.len++;
1479 self->tx_fifo.free++; 1478 self->tx_fifo.free++;
1480 1479
@@ -1924,7 +1923,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1924 1923
1925 /* Copy frame without CRC, CRC is removed by hardware*/ 1924 /* Copy frame without CRC, CRC is removed by hardware*/
1926 skb_put(skb, len); 1925 skb_put(skb, len);
1927 memcpy(skb->data, self->rx_buff.data, len); 1926 skb_copy_to_linear_data(skb, self->rx_buff.data, len);
1928 1927
1929 /* Move to next frame */ 1928 /* Move to next frame */
1930 self->rx_buff.data += len; 1929 self->rx_buff.data += len;
@@ -1932,7 +1931,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1932 self->stats.rx_packets++; 1931 self->stats.rx_packets++;
1933 1932
1934 skb->dev = self->netdev; 1933 skb->dev = self->netdev;
1935 skb->mac.raw = skb->data; 1934 skb_reset_mac_header(skb);
1936 skb->protocol = htons(ETH_P_IRDA); 1935 skb->protocol = htons(ETH_P_IRDA);
1937 netif_rx(skb); 1936 netif_rx(skb);
1938 self->netdev->last_rx = jiffies; 1937 self->netdev->last_rx = jiffies;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 37914dc5b90e..4dbdfaaf37bf 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
526 526
527 if (aup->speed == 4000000) { 527 if (aup->speed == 4000000) {
528 /* FIR */ 528 /* FIR */
529 memcpy((void *)pDB->vaddr, skb->data, skb->len); 529 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
530 ptxd->count_0 = skb->len & 0xff; 530 ptxd->count_0 = skb->len & 0xff;
531 ptxd->count_1 = (skb->len >> 8) & 0xff; 531 ptxd->count_1 = (skb->len >> 8) & 0xff;
532 532
@@ -604,9 +604,9 @@ static int au1k_irda_rx(struct net_device *dev)
604 skb_put(skb, count); 604 skb_put(skb, count);
605 else 605 else
606 skb_put(skb, count-2); 606 skb_put(skb, count-2);
607 memcpy(skb->data, (void *)pDB->vaddr, count-2); 607 skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
608 skb->dev = dev; 608 skb->dev = dev;
609 skb->mac.raw = skb->data; 609 skb_reset_mac_header(skb);
610 skb->protocol = htons(ETH_P_IRDA); 610 skb->protocol = htons(ETH_P_IRDA);
611 netif_rx(skb); 611 netif_rx(skb);
612 prxd->count_0 = 0; 612 prxd->count_0 = 0;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 11af0ae7510e..3ca47bf6dfec 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>');
1119 else 1119 else
1120 { 1120 {
1121 len = skb->len; 1121 len = skb->len;
1122 memcpy (self->tx_bufs[self->txs], skb->data, len); 1122 skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len);
1123 } 1123 }
1124 self->ring->tx[self->txs].len = len & 0x0fff; 1124 self->ring->tx[self->txs].len = len & 0x0fff;
1125 1125
@@ -1282,11 +1282,11 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1282 skb_reserve (skb, 1); 1282 skb_reserve (skb, 1);
1283 1283
1284 skb_put (skb, len); 1284 skb_put (skb, len);
1285 memcpy (skb->data, self->rx_bufs[self->rxs], len); 1285 skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
1286 1286 len);
1287 self->stats.rx_packets++; 1287 self->stats.rx_packets++;
1288 skb->dev = self->netdev; 1288 skb->dev = self->netdev;
1289 skb->mac.raw = skb->data; 1289 skb_reset_mac_header(skb);
1290 skb->protocol = htons (ETH_P_IRDA); 1290 skb->protocol = htons (ETH_P_IRDA);
1291 } 1291 }
1292 else 1292 else
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 1d510bdc9b84..0ac240ca905b 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
441 goto drop; 441 goto drop;
442 } 442 }
443 443
444 memcpy(self->tx_buff + self->header_length, skb->data, skb->len); 444 skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len);
445 445
446 /* Change setting for next frame */ 446 /* Change setting for next frame */
447 if (self->capability & IUC_STIR421X) { 447 if (self->capability & IUC_STIR421X) {
@@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb)
902 902
903 if(docopy) { 903 if(docopy) {
904 /* Copy packet, so we can recycle the original */ 904 /* Copy packet, so we can recycle the original */
905 memcpy(newskb->data, skb->data, urb->actual_length); 905 skb_copy_from_linear_data(skb, newskb->data, urb->actual_length);
906 /* Deliver this new skb */ 906 /* Deliver this new skb */
907 dataskb = newskb; 907 dataskb = newskb;
908 /* And hook the old skb to the URB 908 /* And hook the old skb to the URB
@@ -921,7 +921,7 @@ static void irda_usb_receive(struct urb *urb)
921 921
922 /* Ask the networking layer to queue the packet for the IrDA stack */ 922 /* Ask the networking layer to queue the packet for the IrDA stack */
923 dataskb->dev = self->netdev; 923 dataskb->dev = self->netdev;
924 dataskb->mac.raw = dataskb->data; 924 skb_reset_mac_header(dataskb);
925 dataskb->protocol = htons(ETH_P_IRDA); 925 dataskb->protocol = htons(ETH_P_IRDA);
926 len = dataskb->len; 926 len = dataskb->len;
927 netif_rx(dataskb); 927 netif_rx(dataskb);
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f0c61f3b2a82..0de867288a47 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -200,14 +200,14 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
200/* Setup a communication between mcs7780 and agilent chip. */ 200/* Setup a communication between mcs7780 and agilent chip. */
201static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs) 201static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
202{ 202{
203 IRDA_WARNING("This transceiver type is not supported yet."); 203 IRDA_WARNING("This transceiver type is not supported yet.\n");
204 return 1; 204 return 1;
205} 205}
206 206
207/* Setup a communication between mcs7780 and sharp chip. */ 207/* Setup a communication between mcs7780 and sharp chip. */
208static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs) 208static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
209{ 209{
210 IRDA_WARNING("This transceiver type is not supported yet."); 210 IRDA_WARNING("This transceiver type is not supported yet.\n");
211 return 1; 211 return 1;
212} 212}
213 213
@@ -279,7 +279,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
279 break; 279 break;
280 280
281 default: 281 default:
282 IRDA_WARNING("Unknown transceiver type: %d", 282 IRDA_WARNING("Unknown transceiver type: %d\n",
283 mcs->transceiver_type); 283 mcs->transceiver_type);
284 ret = 1; 284 ret = 1;
285 } 285 }
@@ -318,7 +318,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
318 return ret; 318 return ret;
319 319
320error: 320error:
321 IRDA_ERROR("%s", msg); 321 IRDA_ERROR("%s\n", msg);
322 return ret; 322 return ret;
323} 323}
324 324
@@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
353 buf[0] = len & 0xff; 353 buf[0] = len & 0xff;
354 buf[1] = (len >> 8) & 0xff; 354 buf[1] = (len >> 8) & 0xff;
355 /* copy the data into the tx buffer. */ 355 /* copy the data into the tx buffer. */
356 memcpy(buf+2, skb->data, skb->len); 356 skb_copy_from_linear_data(skb, buf + 2, skb->len);
357 /* put the fcs in the last four bytes in little endian order. */ 357 /* put the fcs in the last four bytes in little endian order. */
358 buf[len - 4] = fcs & 0xff; 358 buf[len - 4] = fcs & 0xff;
359 buf[len - 3] = (fcs >> 8) & 0xff; 359 buf[len - 3] = (fcs >> 8) & 0xff;
@@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
377 buf[0] = len & 0xff; 377 buf[0] = len & 0xff;
378 buf[1] = (len >> 8) & 0xff; 378 buf[1] = (len >> 8) & 0xff;
379 /* copy the data */ 379 /* copy the data */
380 memcpy(buf+2, skb->data, skb->len); 380 skb_copy_from_linear_data(skb, buf + 2, skb->len);
381 /* put the fcs in last two bytes in little endian order. */ 381 /* put the fcs in last two bytes in little endian order. */
382 buf[len - 2] = fcs & 0xff; 382 buf[len - 2] = fcs & 0xff;
383 buf[len - 1] = (fcs >> 8) & 0xff; 383 buf[len - 1] = (fcs >> 8) & 0xff;
@@ -426,9 +426,9 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
426 } 426 }
427 427
428 skb_reserve(skb, 1); 428 skb_reserve(skb, 1);
429 memcpy(skb->data, buf, new_len); 429 skb_copy_to_linear_data(skb, buf, new_len);
430 skb_put(skb, new_len); 430 skb_put(skb, new_len);
431 skb->mac.raw = skb->data; 431 skb_reset_mac_header(skb);
432 skb->protocol = htons(ETH_P_IRDA); 432 skb->protocol = htons(ETH_P_IRDA);
433 skb->dev = mcs->netdev; 433 skb->dev = mcs->netdev;
434 434
@@ -479,9 +479,9 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
479 } 479 }
480 480
481 skb_reserve(skb, 1); 481 skb_reserve(skb, 1);
482 memcpy(skb->data, buf, new_len); 482 skb_copy_to_linear_data(skb, buf, new_len);
483 skb_put(skb, new_len); 483 skb_put(skb, new_len);
484 skb->mac.raw = skb->data; 484 skb_reset_mac_header(skb);
485 skb->protocol = htons(ETH_P_IRDA); 485 skb->protocol = htons(ETH_P_IRDA);
486 skb->dev = mcs->netdev; 486 skb->dev = mcs->netdev;
487 487
@@ -587,7 +587,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
587 } while(cnt++ < 100 && (rval & MCS_IRINTX)); 587 } while(cnt++ < 100 && (rval & MCS_IRINTX));
588 588
589 if(cnt >= 100) { 589 if(cnt >= 100) {
590 IRDA_ERROR("unable to change speed"); 590 IRDA_ERROR("unable to change speed\n");
591 ret = -EIO; 591 ret = -EIO;
592 goto error; 592 goto error;
593 } 593 }
@@ -638,7 +638,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
638 638
639 default: 639 default:
640 ret = 1; 640 ret = 1;
641 IRDA_WARNING("Unknown transceiver type: %d", 641 IRDA_WARNING("Unknown transceiver type: %d\n",
642 mcs->transceiver_type); 642 mcs->transceiver_type);
643 } 643 }
644 if (unlikely(ret)) 644 if (unlikely(ret))
@@ -733,7 +733,7 @@ static int mcs_net_open(struct net_device *netdev)
733 sprintf(hwname, "usb#%d", mcs->usbdev->devnum); 733 sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
734 mcs->irlap = irlap_open(netdev, &mcs->qos, hwname); 734 mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
735 if (!mcs->irlap) { 735 if (!mcs->irlap) {
736 IRDA_ERROR("mcs7780: irlap_open failed"); 736 IRDA_ERROR("mcs7780: irlap_open failed\n");
737 goto error2; 737 goto error2;
738 } 738 }
739 739
@@ -862,7 +862,7 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
862 mcs->out_buf, wraplen, mcs_send_irq, mcs); 862 mcs->out_buf, wraplen, mcs_send_irq, mcs);
863 863
864 if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) { 864 if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
865 IRDA_ERROR("failed tx_urb: %d", ret); 865 IRDA_ERROR("failed tx_urb: %d\n", ret);
866 switch (ret) { 866 switch (ret) {
867 case -ENODEV: 867 case -ENODEV:
868 case -EPIPE: 868 case -EPIPE:
@@ -897,7 +897,7 @@ static int mcs_probe(struct usb_interface *intf,
897 if (!ndev) 897 if (!ndev)
898 goto error1; 898 goto error1;
899 899
900 IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.", udev->devnum); 900 IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
901 901
902 /* what is it realy for? */ 902 /* what is it realy for? */
903 SET_MODULE_OWNER(ndev); 903 SET_MODULE_OWNER(ndev);
@@ -905,7 +905,7 @@ static int mcs_probe(struct usb_interface *intf,
905 905
906 ret = usb_reset_configuration(udev); 906 ret = usb_reset_configuration(udev);
907 if (ret != 0) { 907 if (ret != 0) {
908 IRDA_ERROR("mcs7780: usb reset configuration failed"); 908 IRDA_ERROR("mcs7780: usb reset configuration failed\n");
909 goto error2; 909 goto error2;
910 } 910 }
911 911
@@ -950,7 +950,7 @@ static int mcs_probe(struct usb_interface *intf,
950 if (ret != 0) 950 if (ret != 0)
951 goto error2; 951 goto error2;
952 952
953 IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s", 953 IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n",
954 ndev->name); 954 ndev->name);
955 955
956 mcs->transceiver_type = transceiver_type; 956 mcs->transceiver_type = transceiver_type;
@@ -981,7 +981,7 @@ static void mcs_disconnect(struct usb_interface *intf)
981 free_netdev(mcs->netdev); 981 free_netdev(mcs->netdev);
982 982
983 usb_set_intfdata(intf, NULL); 983 usb_set_intfdata(intf, NULL);
984 IRDA_DEBUG(0, "MCS7780 now disconnected."); 984 IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
985} 985}
986 986
987/* Module insertion */ 987/* Module insertion */
@@ -992,7 +992,7 @@ static int __init mcs_init(void)
992 /* register this driver with the USB subsystem */ 992 /* register this driver with the USB subsystem */
993 result = usb_register(&mcs_driver); 993 result = usb_register(&mcs_driver);
994 if (result) 994 if (result)
995 IRDA_ERROR("usb_register failed. Error number %d", result); 995 IRDA_ERROR("usb_register failed. Error number %d\n", result);
996 996
997 return result; 997 return result;
998} 998}
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 29b5ccd29d0b..d96c89751a71 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1466 1466
1467 self->stats.tx_bytes += skb->len; 1467 self->stats.tx_bytes += skb->len;
1468 1468
1469 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, 1469 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
1470 skb->len); 1470 skb->len);
1471
1472 self->tx_fifo.len++; 1471 self->tx_fifo.len++;
1473 self->tx_fifo.free++; 1472 self->tx_fifo.free++;
1474 1473
@@ -1869,10 +1868,14 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1869 /* Copy frame without CRC */ 1868 /* Copy frame without CRC */
1870 if (self->io.speed < 4000000) { 1869 if (self->io.speed < 4000000) {
1871 skb_put(skb, len-2); 1870 skb_put(skb, len-2);
1872 memcpy(skb->data, self->rx_buff.data, len-2); 1871 skb_copy_to_linear_data(skb,
1872 self->rx_buff.data,
1873 len - 2);
1873 } else { 1874 } else {
1874 skb_put(skb, len-4); 1875 skb_put(skb, len-4);
1875 memcpy(skb->data, self->rx_buff.data, len-4); 1876 skb_copy_to_linear_data(skb,
1877 self->rx_buff.data,
1878 len - 4);
1876 } 1879 }
1877 1880
1878 /* Move to next frame */ 1881 /* Move to next frame */
@@ -1881,7 +1884,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1881 self->stats.rx_packets++; 1884 self->stats.rx_packets++;
1882 1885
1883 skb->dev = self->netdev; 1886 skb->dev = self->netdev;
1884 skb->mac.raw = skb->data; 1887 skb_reset_mac_header(skb);
1885 skb->protocol = htons(ETH_P_IRDA); 1888 skb->protocol = htons(ETH_P_IRDA);
1886 netif_rx(skb); 1889 netif_rx(skb);
1887 self->netdev->last_rx = jiffies; 1890 self->netdev->last_rx = jiffies;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 2272156af31e..fb196fd91855 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -386,12 +386,12 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
386 386
387 /* Align IP header to 20 bytes */ 387 /* Align IP header to 20 bytes */
388 skb_reserve(skb, 1); 388 skb_reserve(skb, 1);
389 memcpy(skb->data, si->dma_rx_buff, len); 389 skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
390 skb_put(skb, len); 390 skb_put(skb, len);
391 391
392 /* Feed it to IrLAP */ 392 /* Feed it to IrLAP */
393 skb->dev = dev; 393 skb->dev = dev;
394 skb->mac.raw = skb->data; 394 skb_reset_mac_header(skb);
395 skb->protocol = htons(ETH_P_IRDA); 395 skb->protocol = htons(ETH_P_IRDA);
396 netif_rx(skb); 396 netif_rx(skb);
397 397
@@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
484 unsigned long mtt = irda_get_mtt(skb); 484 unsigned long mtt = irda_get_mtt(skb);
485 485
486 si->dma_tx_buff_len = skb->len; 486 si->dma_tx_buff_len = skb->len;
487 memcpy(si->dma_tx_buff, skb->data, skb->len); 487 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
488 488
489 if (mtt) 489 if (mtt)
490 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) 490 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 937372d00398..056639f72bec 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -504,7 +504,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
504 504
505 skb_put(skb, len); 505 skb_put(skb, len);
506 skb->dev = dev; 506 skb->dev = dev;
507 skb->mac.raw = skb->data; 507 skb_reset_mac_header(skb);
508 skb->protocol = htons(ETH_P_IRDA); 508 skb->protocol = htons(ETH_P_IRDA);
509 si->stats.rx_packets++; 509 si->stats.rx_packets++;
510 si->stats.rx_bytes += len; 510 si->stats.rx_bytes += len;
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 31c623381ea8..198bf3bfa70f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -315,6 +315,7 @@ static struct smsc_chip __initdata lpc_chips_flat[] =
315{ 315{
316 /* Base address 0x2E or 0x4E */ 316 /* Base address 0x2E or 0x4E */
317 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, 317 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
318 { "47N227", KEY55_1|FIR|SERx4, 0x7a, 0x00 },
318 { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 }, 319 { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 },
319 { NULL } 320 { NULL }
320}; 321};
@@ -1161,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1161 self->new_speed = speed; 1162 self->new_speed = speed;
1162 } 1163 }
1163 1164
1164 memcpy(self->tx_buff.head, skb->data, skb->len); 1165 skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len);
1165 1166
1166 self->tx_buff.len = skb->len; 1167 self->tx_buff.len = skb->len;
1167 self->tx_buff.data = self->tx_buff.head; 1168 self->tx_buff.data = self->tx_buff.head;
@@ -1412,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1412 self->stats.rx_bytes += len; 1413 self->stats.rx_bytes += len;
1413 1414
1414 skb->dev = self->netdev; 1415 skb->dev = self->netdev;
1415 skb->mac.raw = skb->data; 1416 skb_reset_mac_header(skb);
1416 skb->protocol = htons(ETH_P_IRDA); 1417 skb->protocol = htons(ETH_P_IRDA);
1417 netif_rx(skb); 1418 netif_rx(skb);
1418} 1419}
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 20d306fea4cb..755aa444a4dd 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -52,7 +52,6 @@
52#include <linux/kthread.h> 52#include <linux/kthread.h>
53#include <linux/freezer.h> 53#include <linux/freezer.h>
54#include <net/irda/irda.h> 54#include <net/irda/irda.h>
55#include <net/irda/irlap.h>
56#include <net/irda/irda_device.h> 55#include <net/irda/irda_device.h>
57#include <net/irda/wrapper.h> 56#include <net/irda/wrapper.h>
58#include <net/irda/crc.h> 57#include <net/irda/crc.h>
@@ -349,7 +348,7 @@ static void fir_eof(struct stir_cb *stir)
349 } 348 }
350 skb_reserve(nskb, 1); 349 skb_reserve(nskb, 1);
351 skb = nskb; 350 skb = nskb;
352 memcpy(nskb->data, rx_buff->data, len); 351 skb_copy_to_linear_data(nskb, rx_buff->data, len);
353 } else { 352 } else {
354 nskb = dev_alloc_skb(rx_buff->truesize); 353 nskb = dev_alloc_skb(rx_buff->truesize);
355 if (unlikely(!nskb)) { 354 if (unlikely(!nskb)) {
@@ -364,7 +363,7 @@ static void fir_eof(struct stir_cb *stir)
364 363
365 skb_put(skb, len); 364 skb_put(skb, len);
366 365
367 skb->mac.raw = skb->data; 366 skb_reset_mac_header(skb);
368 skb->protocol = htons(ETH_P_IRDA); 367 skb->protocol = htons(ETH_P_IRDA);
369 skb->dev = stir->netdev; 368 skb->dev = stir->netdev;
370 369
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index c3ed9b3067e5..ff5358574d0a 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
925 925
926 self->tx_fifo.tail += skb->len; 926 self->tx_fifo.tail += skb->len;
927 self->stats.tx_bytes += skb->len; 927 self->stats.tx_bytes += skb->len;
928 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, 928 skb_copy_from_linear_data(skb,
929 skb->len); 929 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
930 self->tx_fifo.len++; 930 self->tx_fifo.len++;
931 self->tx_fifo.free++; 931 self->tx_fifo.free++;
932//F01 if (self->tx_fifo.len == 1) { 932//F01 if (self->tx_fifo.len == 1) {
@@ -1125,7 +1125,7 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1125 self->stats.rx_bytes += len; 1125 self->stats.rx_bytes += len;
1126 self->stats.rx_packets++; 1126 self->stats.rx_packets++;
1127 skb->dev = self->netdev; 1127 skb->dev = self->netdev;
1128 skb->mac.raw = skb->data; 1128 skb_reset_mac_header(skb);
1129 skb->protocol = htons(ETH_P_IRDA); 1129 skb->protocol = htons(ETH_P_IRDA);
1130 netif_rx(skb); 1130 netif_rx(skb);
1131 return TRUE; 1131 return TRUE;
@@ -1189,7 +1189,7 @@ F01_E */
1189 skb_reserve(skb, 1); 1189 skb_reserve(skb, 1);
1190 skb_put(skb, len - 4); 1190 skb_put(skb, len - 4);
1191 1191
1192 memcpy(skb->data, self->rx_buff.data, len - 4); 1192 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1193 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, 1193 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__,
1194 len - 4, self->rx_buff.data); 1194 len - 4, self->rx_buff.data);
1195 1195
@@ -1198,7 +1198,7 @@ F01_E */
1198 self->stats.rx_bytes += len; 1198 self->stats.rx_bytes += len;
1199 self->stats.rx_packets++; 1199 self->stats.rx_packets++;
1200 skb->dev = self->netdev; 1200 skb->dev = self->netdev;
1201 skb->mac.raw = skb->data; 1201 skb_reset_mac_header(skb);
1202 skb->protocol = htons(ETH_P_IRDA); 1202 skb->protocol = htons(ETH_P_IRDA);
1203 netif_rx(skb); 1203 netif_rx(skb);
1204 1204
@@ -1234,7 +1234,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1234 } 1234 }
1235 skb_reserve(skb, 1); 1235 skb_reserve(skb, 1);
1236 skb_put(skb, len - 4 + 1); 1236 skb_put(skb, len - 4 + 1);
1237 memcpy(skb->data, self->rx_buff.data, len - 4 + 1); 1237 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1238 st_fifo->tail++; 1238 st_fifo->tail++;
1239 st_fifo->len++; 1239 st_fifo->len++;
1240 if (st_fifo->tail > MAX_RX_WINDOW) 1240 if (st_fifo->tail > MAX_RX_WINDOW)
@@ -1244,7 +1244,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1244 self->stats.rx_bytes += len; 1244 self->stats.rx_bytes += len;
1245 self->stats.rx_packets++; 1245 self->stats.rx_packets++;
1246 skb->dev = self->netdev; 1246 skb->dev = self->netdev;
1247 skb->mac.raw = skb->data; 1247 skb_reset_mac_header(skb);
1248 skb->protocol = htons(ETH_P_IRDA); 1248 skb->protocol = htons(ETH_P_IRDA);
1249 netif_rx(skb); 1249 netif_rx(skb);
1250 if (st_fifo->len < (MAX_RX_WINDOW + 2)) { 1250 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
@@ -1303,7 +1303,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1303 } 1303 }
1304 skb_reserve(skb, 1); 1304 skb_reserve(skb, 1);
1305 skb_put(skb, len - 4); 1305 skb_put(skb, len - 4);
1306 memcpy(skb->data, self->rx_buff.data, len - 4); 1306 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1307 1307
1308 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, 1308 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__,
1309 len - 4, st_fifo->head); 1309 len - 4, st_fifo->head);
@@ -1313,7 +1313,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1313 self->stats.rx_bytes += len; 1313 self->stats.rx_bytes += len;
1314 self->stats.rx_packets++; 1314 self->stats.rx_packets++;
1315 skb->dev = self->netdev; 1315 skb->dev = self->netdev;
1316 skb->mac.raw = skb->data; 1316 skb_reset_mac_header(skb);
1317 skb->protocol = htons(ETH_P_IRDA); 1317 skb->protocol = htons(ETH_P_IRDA);
1318 netif_rx(skb); 1318 netif_rx(skb);
1319 } //while 1319 } //while
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 3457e9d8b667..c4be973867a6 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -595,7 +595,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
595 rd->skb = NULL; 595 rd->skb = NULL;
596 skb->dev = ndev; 596 skb->dev = ndev;
597 memcpy(skb_put(skb,len), rd->buf, len); 597 memcpy(skb_put(skb,len), rd->buf, len);
598 skb->mac.raw = skb->data; 598 skb_reset_mac_header(skb);
599 if (in_interrupt()) 599 if (in_interrupt())
600 netif_rx(skb); 600 netif_rx(skb);
601 else 601 else
@@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
993 goto drop; 993 goto drop;
994 } 994 }
995 else 995 else
996 memcpy(rd->buf, skb->data, len); 996 skb_copy_from_linear_data(skb, rd->buf, len);
997 } 997 }
998 998
999 rd->skb = skb; /* remember skb for tx-complete stats */ 999 rd->skb = skb; /* remember skb for tx-complete stats */
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4212657fa4f9..5182e800cc18 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
529 /* Decide if we should use PIO or DMA transfer */ 529 /* Decide if we should use PIO or DMA transfer */
530 if (self->io.speed > PIO_MAX_SPEED) { 530 if (self->io.speed > PIO_MAX_SPEED) {
531 self->tx_buff.data = self->tx_buff.head; 531 self->tx_buff.data = self->tx_buff.head;
532 memcpy(self->tx_buff.data, skb->data, skb->len); 532 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
533 self->tx_buff.len = skb->len; 533 self->tx_buff.len = skb->len;
534 534
535 mtt = irda_get_mtt(skb); 535 mtt = irda_get_mtt(skb);
@@ -908,10 +908,14 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
908 /* Copy frame without CRC */ 908 /* Copy frame without CRC */
909 if (self->io.speed < 4000000) { 909 if (self->io.speed < 4000000) {
910 skb_put(skb, len-2); 910 skb_put(skb, len-2);
911 memcpy(skb->data, self->rx_buff.data, len-2); 911 skb_copy_to_linear_data(skb,
912 self->rx_buff.data,
913 len - 2);
912 } else { 914 } else {
913 skb_put(skb, len-4); 915 skb_put(skb, len-4);
914 memcpy(skb->data, self->rx_buff.data, len-4); 916 skb_copy_to_linear_data(skb,
917 self->rx_buff.data,
918 len - 4);
915 } 919 }
916 920
917 /* Move to next frame */ 921 /* Move to next frame */
@@ -919,7 +923,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
919 self->stats.rx_packets++; 923 self->stats.rx_packets++;
920 924
921 skb->dev = self->netdev; 925 skb->dev = self->netdev;
922 skb->mac.raw = skb->data; 926 skb_reset_mac_header(skb);
923 skb->protocol = htons(ETH_P_IRDA); 927 skb->protocol = htons(ETH_P_IRDA);
924 netif_rx(skb); 928 netif_rx(skb);
925 self->netdev->last_rx = jiffies; 929 self->netdev->last_rx = jiffies;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 0e9ba3c3faf7..347d50cd77d4 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1540,7 +1540,6 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1540 } 1540 }
1541 1541
1542 skb_put(skb, length); 1542 skb_put(skb, length);
1543 skb->dev = dev;
1544 skb->protocol = eth_type_trans(skb, dev); 1543 skb->protocol = eth_type_trans(skb, dev);
1545 skb->ip_summed = CHECKSUM_NONE; 1544 skb->ip_summed = CHECKSUM_NONE;
1546 netif_rx(skb); /* send it up */ 1545 netif_rx(skb); /* send it up */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index afc2ec72529e..dfde80e54aef 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1182,24 +1182,27 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1182 1182
1183 if (likely(skb_is_gso(skb))) { 1183 if (likely(skb_is_gso(skb))) {
1184 struct ixgb_buffer *buffer_info; 1184 struct ixgb_buffer *buffer_info;
1185 struct iphdr *iph;
1186
1185 if (skb_header_cloned(skb)) { 1187 if (skb_header_cloned(skb)) {
1186 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1188 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1187 if (err) 1189 if (err)
1188 return err; 1190 return err;
1189 } 1191 }
1190 1192
1191 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1193 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1192 mss = skb_shinfo(skb)->gso_size; 1194 mss = skb_shinfo(skb)->gso_size;
1193 skb->nh.iph->tot_len = 0; 1195 iph = ip_hdr(skb);
1194 skb->nh.iph->check = 0; 1196 iph->tot_len = 0;
1195 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1197 iph->check = 0;
1196 skb->nh.iph->daddr, 1198 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1197 0, IPPROTO_TCP, 0); 1199 iph->daddr, 0,
1198 ipcss = skb->nh.raw - skb->data; 1200 IPPROTO_TCP, 0);
1199 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1201 ipcss = skb_network_offset(skb);
1200 ipcse = skb->h.raw - skb->data - 1; 1202 ipcso = (void *)&(iph->check) - (void *)skb->data;
1201 tucss = skb->h.raw - skb->data; 1203 ipcse = skb_transport_offset(skb) - 1;
1202 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1204 tucss = skb_transport_offset(skb);
1205 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1203 tucse = 0; 1206 tucse = 0;
1204 1207
1205 i = adapter->tx_ring.next_to_use; 1208 i = adapter->tx_ring.next_to_use;
@@ -1243,7 +1246,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1243 1246
1244 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1247 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1245 struct ixgb_buffer *buffer_info; 1248 struct ixgb_buffer *buffer_info;
1246 css = skb->h.raw - skb->data; 1249 css = skb_transport_offset(skb);
1247 cso = css + skb->csum_offset; 1250 cso = css + skb->csum_offset;
1248 1251
1249 i = adapter->tx_ring.next_to_use; 1252 i = adapter->tx_ring.next_to_use;
@@ -2014,9 +2017,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
2014 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 2017 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
2015 if (new_skb) { 2018 if (new_skb) {
2016 skb_reserve(new_skb, NET_IP_ALIGN); 2019 skb_reserve(new_skb, NET_IP_ALIGN);
2017 memcpy(new_skb->data - NET_IP_ALIGN, 2020 skb_copy_to_linear_data_offset(new_skb,
2018 skb->data - NET_IP_ALIGN, 2021 -NET_IP_ALIGN,
2019 length + NET_IP_ALIGN); 2022 (skb->data -
2023 NET_IP_ALIGN),
2024 (length +
2025 NET_IP_ALIGN));
2020 /* save the skb in buffer_info as good */ 2026 /* save the skb in buffer_info as good */
2021 buffer_info->skb = skb; 2027 buffer_info->skb = skb;
2022 skb = new_skb; 2028 skb = new_skb;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index a4eccb11d677..6683afc02aaa 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -110,11 +110,10 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
110 110
111 skb = dev_alloc_skb(desc->pkt_length + 2); 111 skb = dev_alloc_skb(desc->pkt_length + 2);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb->dev = nds[desc->channel];
114 skb_reserve(skb, 2); 113 skb_reserve(skb, 2);
115 eth_copy_and_sum(skb, buf, desc->pkt_length, 0); 114 eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
116 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
117 skb->protocol = eth_type_trans(skb, skb->dev); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
118 117
119 skb->dev->last_rx = jiffies; 118 skb->dev->last_rx = jiffies;
120 119
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index a3843320dbe1..0fe96c85828b 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
988 if (lance_debug > 5) 988 if (lance_debug > 5)
989 printk("%s: bouncing a high-memory packet (%#x).\n", 989 printk("%s: bouncing a high-memory packet (%#x).\n",
990 dev->name, (u32)isa_virt_to_bus(skb->data)); 990 dev->name, (u32)isa_virt_to_bus(skb->data));
991 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); 991 skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
992 lp->tx_ring[entry].base = 992 lp->tx_ring[entry].base =
993 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; 993 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
994 dev_kfree_skb(skb); 994 dev_kfree_skb(skb);
@@ -1184,7 +1184,6 @@ lance_rx(struct net_device *dev)
1184 } 1184 }
1185 break; 1185 break;
1186 } 1186 }
1187 skb->dev = dev;
1188 skb_reserve(skb,2); /* 16 byte align */ 1187 skb_reserve(skb,2); /* 16 byte align */
1189 skb_put(skb,pkt_len); /* Make room */ 1188 skb_put(skb,pkt_len); /* Make room */
1190 eth_copy_and_sum(skb, 1189 eth_copy_and_sum(skb,
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 452863d5d498..0edcd125fd61 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -801,7 +801,6 @@ memory_squeeze:
801 lp->stats.rx_dropped++; 801 lp->stats.rx_dropped++;
802 } 802 }
803 else { 803 else {
804 skb->dev = dev;
805 if (!rx_in_place) { 804 if (!rx_in_place) {
806 /* 16 byte align the data fields */ 805 /* 16 byte align the data fields */
807 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); 806 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e726c06b8dc6..5c86e737f954 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -722,7 +722,6 @@ static void ei_receive(struct net_device *dev)
722 else 722 else
723 { 723 {
724 skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ 724 skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
725 skb->dev = dev;
726 skb_put(skb, pkt_len); /* Make room */ 725 skb_put(skb, pkt_len); /* Make room */
727 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); 726 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
728 skb->protocol=eth_type_trans(skb,dev); 727 skb->protocol=eth_type_trans(skb,dev);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2b739fd584f1..6ba6ed2b480a 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -75,8 +75,9 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats);
75#ifdef LOOPBACK_TSO 75#ifdef LOOPBACK_TSO
76static void emulate_large_send_offload(struct sk_buff *skb) 76static void emulate_large_send_offload(struct sk_buff *skb)
77{ 77{
78 struct iphdr *iph = skb->nh.iph; 78 struct iphdr *iph = ip_hdr(skb);
79 struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); 79 struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
80 (iph->ihl * 4));
80 unsigned int doffset = (iph->ihl + th->doff) * 4; 81 unsigned int doffset = (iph->ihl + th->doff) * 4;
81 unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; 82 unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
82 unsigned int offset = 0; 83 unsigned int offset = 0;
@@ -90,10 +91,11 @@ static void emulate_large_send_offload(struct sk_buff *skb)
90 if (!nskb) 91 if (!nskb)
91 break; 92 break;
92 skb_reserve(nskb, 32); 93 skb_reserve(nskb, 32);
93 nskb->mac.raw = nskb->data - 14; 94 skb_set_mac_header(nskb, -ETH_HLEN);
94 nskb->nh.raw = nskb->data; 95 skb_reset_network_header(nskb);
95 iph = nskb->nh.iph; 96 iph = ip_hdr(nskb);
96 memcpy(nskb->data, skb->nh.raw, doffset); 97 skb_copy_to_linear_data(nskb, skb_network_header(skb),
98 doffset);
97 if (skb_copy_bits(skb, 99 if (skb_copy_bits(skb,
98 doffset + offset, 100 doffset + offset,
99 nskb->data + doffset, 101 nskb->data + doffset,
@@ -108,7 +110,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
108 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 110 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
109 nskb->pkt_type = skb->pkt_type; 111 nskb->pkt_type = skb->pkt_type;
110 112
111 th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4); 113 th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
112 iph->tot_len = htons(frag_size + doffset); 114 iph->tot_len = htons(frag_size + doffset);
113 iph->id = htons(id); 115 iph->id = htons(id);
114 iph->check = 0; 116 iph->check = 0;
@@ -137,7 +139,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
137 skb_orphan(skb); 139 skb_orphan(skb);
138 140
139 skb->protocol = eth_type_trans(skb,dev); 141 skb->protocol = eth_type_trans(skb,dev);
140 skb->dev = dev;
141#ifndef LOOPBACK_MUST_CHECKSUM 142#ifndef LOOPBACK_MUST_CHECKSUM
142 skb->ip_summed = CHECKSUM_UNNECESSARY; 143 skb->ip_summed = CHECKSUM_UNNECESSARY;
143#endif 144#endif
@@ -145,7 +146,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
145#ifdef LOOPBACK_TSO 146#ifdef LOOPBACK_TSO
146 if (skb_is_gso(skb)) { 147 if (skb_is_gso(skb)) {
147 BUG_ON(skb->protocol != htons(ETH_P_IP)); 148 BUG_ON(skb->protocol != htons(ETH_P_IP));
148 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 149 BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
149 150
150 emulate_large_send_offload(skb); 151 emulate_large_send_offload(skb);
151 return 0; 152 return 0;
@@ -163,11 +164,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
163 return 0; 164 return 0;
164} 165}
165 166
166static struct net_device_stats loopback_stats;
167
168static struct net_device_stats *get_stats(struct net_device *dev) 167static struct net_device_stats *get_stats(struct net_device *dev)
169{ 168{
170 struct net_device_stats *stats = &loopback_stats; 169 struct net_device_stats *stats = &dev->stats;
171 unsigned long bytes = 0; 170 unsigned long bytes = 0;
172 unsigned long packets = 0; 171 unsigned long packets = 0;
173 int i; 172 int i;
@@ -207,7 +206,6 @@ static const struct ethtool_ops loopback_ethtool_ops = {
207struct net_device loopback_dev = { 206struct net_device loopback_dev = {
208 .name = "lo", 207 .name = "lo",
209 .get_stats = &get_stats, 208 .get_stats = &get_stats,
210 .priv = &loopback_stats,
211 .mtu = (16 * 1024) + 20 + 20 + 12, 209 .mtu = (16 * 1024) + 20 + 20 + 12,
212 .hard_start_xmit = loopback_xmit, 210 .hard_start_xmit = loopback_xmit,
213 .hard_header = eth_header, 211 .hard_header = eth_header,
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 177c502f7385..5fc18da1873d 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -676,7 +676,6 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
676 return 1; 676 return 1;
677 } 677 }
678 678
679 skb->dev = dev;
680 memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); 679 memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
681 680
682 skb->protocol = eth_type_trans(skb,dev); 681 skb->protocol = eth_type_trans(skb,dev);
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index e960138011c0..90e695d53266 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -530,7 +530,6 @@ net_rx(struct net_device *dev)
530 return; 530 return;
531 } 531 }
532 skb_put(skb, length); 532 skb_put(skb, length);
533 skb->dev = dev;
534 533
535 memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length); 534 memcpy_fromio(skb->data, dev->mem_start + PP_RxFrame, length);
536 535
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 2e9571bf0736..0e04f7ac3f2e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -357,7 +357,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
357 } 357 }
358 358
359 skb_reserve(skb, RX_OFFSET); 359 skb_reserve(skb, RX_OFFSET);
360 skb->dev = bp->dev;
361 skb->ip_summed = CHECKSUM_NONE; 360 skb->ip_summed = CHECKSUM_NONE;
362 skb_put(skb, len); 361 skb_put(skb, len);
363 362
@@ -368,9 +367,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
368 BUG_ON(frag != last_frag); 367 BUG_ON(frag != last_frag);
369 frag_len = len - offset; 368 frag_len = len - offset;
370 } 369 }
371 memcpy(skb->data + offset, 370 skb_copy_to_linear_data_offset(skb, offset,
372 bp->rx_buffers + (RX_BUFFER_SIZE * frag), 371 (bp->rx_buffers +
373 frag_len); 372 (RX_BUFFER_SIZE * frag)),
373 frag_len);
374 offset += RX_BUFFER_SIZE; 374 offset += RX_BUFFER_SIZE;
375 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 375 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
376 wmb(); 376 wmb();
@@ -576,7 +576,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
576 int i; 576 int i;
577 dev_dbg(&bp->pdev->dev, 577 dev_dbg(&bp->pdev->dev,
578 "start_xmit: len %u head %p data %p tail %p end %p\n", 578 "start_xmit: len %u head %p data %p tail %p end %p\n",
579 skb->len, skb->head, skb->data, skb->tail, skb->end); 579 skb->len, skb->head, skb->data,
580 skb_tail_pointer(skb), skb_end_pointer(skb));
580 dev_dbg(&bp->pdev->dev, 581 dev_dbg(&bp->pdev->dev,
581 "data:"); 582 "data:");
582 for (i = 0; i < 16; i++) 583 for (i = 0; i < 16; i++)
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 9ec24f0d5d68..b3bd62394958 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -939,7 +939,6 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
939 else /* Ethernet header; mace includes FCS */ 939 else /* Ethernet header; mace includes FCS */
940 nb -= 8; 940 nb -= 8;
941 skb_put(skb, nb); 941 skb_put(skb, nb);
942 skb->dev = dev;
943 skb->protocol = eth_type_trans(skb, dev); 942 skb->protocol = eth_type_trans(skb, dev);
944 mp->stats.rx_bytes += skb->len; 943 mp->stats.rx_bytes += skb->len;
945 netif_rx(skb); 944 netif_rx(skb);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 5d541e873041..27911c07558d 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
420 mp->stats.tx_bytes += skb->len; 420 mp->stats.tx_bytes += skb->len;
421 421
422 /* We need to copy into our xmit buffer to take care of alignment and caching issues */ 422 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
423 423 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
424 memcpy((void *) mp->tx_ring, skb->data, skb->len);
425 424
426 /* load the Tx DMA and fire it off */ 425 /* load the Tx DMA and fire it off */
427 426
@@ -621,7 +620,6 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
621 skb_reserve(skb,2); 620 skb_reserve(skb,2);
622 memcpy(skb_put(skb, mf->len), mf->data, mf->len); 621 memcpy(skb_put(skb, mf->len), mf->data, mf->len);
623 622
624 skb->dev = dev;
625 skb->protocol = eth_type_trans(skb, dev); 623 skb->protocol = eth_type_trans(skb, dev);
626 netif_rx(skb); 624 netif_rx(skb);
627 dev->last_rx = jiffies; 625 dev->last_rx = jiffies;
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 7e69ca6edd91..0343ea12b299 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -421,7 +421,6 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
421 /* Write metadata, and then pass to the receive level */ 421 /* Write metadata, and then pass to the receive level */
422 skb_put(skb_c, len); 422 skb_put(skb_c, len);
423 priv->rx_skbs[priv->rx_write] = skb; 423 priv->rx_skbs[priv->rx_write] = skb;
424 skb_c->dev = dev;
425 skb_c->protocol = eth_type_trans(skb_c, dev); 424 skb_c->protocol = eth_type_trans(skb_c, dev);
426 dev->last_rx = jiffies; 425 dev->last_rx = jiffies;
427 priv->stats.rx_packets++; 426 priv->stats.rx_packets++;
@@ -609,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv,
609 608
610 desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); 609 desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
611 /* maybe I should set whole thing to 0 first... */ 610 /* maybe I should set whole thing to 0 first... */
612 memcpy(desc->data.dt + (120 - len), skb->data, skb->len); 611 skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
613 if (skb->len < len) 612 if (skb->len < len)
614 memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); 613 memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
615} 614}
@@ -627,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
627 626
628 /* unaligned part */ 627 /* unaligned part */
629 if (unaligned_len) { 628 if (unaligned_len) {
630 memcpy(desc->data.dt + (120 - unaligned_len), 629 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
631 skb->data, unaligned_len); 630 unaligned_len);
632 desc->header.raw |= (128 - unaligned_len) << 16; 631 desc->header.raw |= (128 - unaligned_len) << 16;
633 } 632 }
634 633
@@ -653,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv,
653 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); 652 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
654 /* unaligned part */ 653 /* unaligned part */
655 if (unaligned_len){ 654 if (unaligned_len){
656 memcpy(desc->data.dt + (120 - unaligned_len), 655 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
657 skb->data, unaligned_len); 656 unaligned_len);
658 desc->header.raw |= (128 - unaligned_len) << 16; 657 desc->header.raw |= (128 - unaligned_len) << 16;
659 } 658 }
660 659
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index f42b9e201937..403f63afd201 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -101,7 +101,6 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
101 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) 101 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
102 return -EFAULT; 102 return -EFAULT;
103 103
104 skb->dev = dev;
105 skb->protocol = eth_type_trans(skb, dev); 104 skb->protocol = eth_type_trans(skb, dev);
106 skb->ip_summed = CHECKSUM_UNNECESSARY; 105 skb->ip_summed = CHECKSUM_UNNECESSARY;
107 106
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8015a7c5b0c9..ab15ecd4b3d6 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -434,7 +434,6 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
434 * received packet 434 * received packet
435 */ 435 */
436 skb_put(skb, pkt_info.byte_cnt - 4); 436 skb_put(skb, pkt_info.byte_cnt - 4);
437 skb->dev = dev;
438 437
439 if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { 438 if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
440 skb->ip_summed = CHECKSUM_UNNECESSARY; 439 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1162,15 +1161,15 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1162 1161
1163 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1162 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
1164 ETH_GEN_IP_V_4_CHECKSUM | 1163 ETH_GEN_IP_V_4_CHECKSUM |
1165 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; 1164 ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT;
1166 1165
1167 switch (skb->nh.iph->protocol) { 1166 switch (ip_hdr(skb)->protocol) {
1168 case IPPROTO_UDP: 1167 case IPPROTO_UDP:
1169 cmd_sts |= ETH_UDP_FRAME; 1168 cmd_sts |= ETH_UDP_FRAME;
1170 desc->l4i_chk = skb->h.uh->check; 1169 desc->l4i_chk = udp_hdr(skb)->check;
1171 break; 1170 break;
1172 case IPPROTO_TCP: 1171 case IPPROTO_TCP:
1173 desc->l4i_chk = skb->h.th->check; 1172 desc->l4i_chk = tcp_hdr(skb)->check;
1174 break; 1173 break;
1175 default: 1174 default:
1176 BUG(); 1175 BUG();
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f8efe0e70a6b..16e3c4315e82 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -879,7 +879,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
879 * skb_pull() (for ether_pad and eth_type_trans()) requires 879 * skb_pull() (for ether_pad and eth_type_trans()) requires
880 * the beginning of the packet in skb_headlen(), move it 880 * the beginning of the packet in skb_headlen(), move it
881 * manually */ 881 * manually */
882 memcpy(skb->data, va, hlen); 882 skb_copy_to_linear_data(skb, va, hlen);
883 skb_shinfo(skb)->frags[0].page_offset += hlen; 883 skb_shinfo(skb)->frags[0].page_offset += hlen;
884 skb_shinfo(skb)->frags[0].size -= hlen; 884 skb_shinfo(skb)->frags[0].size -= hlen;
885 skb->data_len -= hlen; 885 skb->data_len -= hlen;
@@ -1020,7 +1020,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1020 skb_shinfo(skb)->nr_frags = 0; 1020 skb_shinfo(skb)->nr_frags = 0;
1021 } 1021 }
1022 skb->protocol = eth_type_trans(skb, dev); 1022 skb->protocol = eth_type_trans(skb, dev);
1023 skb->dev = dev;
1024 1023
1025 if (mgp->csum_flag) { 1024 if (mgp->csum_flag) {
1026 if ((skb->protocol == htons(ETH_P_IP)) || 1025 if ((skb->protocol == htons(ETH_P_IP)) ||
@@ -2030,7 +2029,7 @@ again:
2030 odd_flag = 0; 2029 odd_flag = 0;
2031 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); 2030 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2032 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2031 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2033 cksum_offset = (skb->h.raw - skb->data); 2032 cksum_offset = skb_transport_offset(skb);
2034 pseudo_hdr_offset = cksum_offset + skb->csum_offset; 2033 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2035 /* If the headers are excessively large, then we must 2034 /* If the headers are excessively large, then we must
2036 * fall back to a software checksum */ 2035 * fall back to a software checksum */
@@ -2055,7 +2054,7 @@ again:
2055 * send loop that we are still in the 2054 * send loop that we are still in the
2056 * header portion of the TSO packet. 2055 * header portion of the TSO packet.
2057 * TSO header must be at most 134 bytes long */ 2056 * TSO header must be at most 134 bytes long */
2058 cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2057 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2059 2058
2060 /* for TSO, pseudo_hdr_offset holds mss. 2059 /* for TSO, pseudo_hdr_offset holds mss.
2061 * The firmware figures out where to put 2060 * The firmware figures out where to put
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index ee26ef52289f..13444da93273 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
368 struct ethhdr *eth; 368 struct ethhdr *eth;
369 unsigned char *rawp; 369 unsigned char *rawp;
370 370
371 skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN); 371 skb_set_mac_header(skb, MYRI_PAD_LEN);
372 skb_pull(skb, dev->hard_header_len); 372 skb_pull(skb, dev->hard_header_len);
373 eth = eth_hdr(skb); 373 eth = eth_hdr(skb);
374 374
@@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
502 copy_skb->dev = dev; 502 copy_skb->dev = dev;
503 DRX(("resv_and_put ")); 503 DRX(("resv_and_put "));
504 skb_put(copy_skb, len); 504 skb_put(copy_skb, len);
505 memcpy(copy_skb->data, skb->data, len); 505 skb_copy_from_linear_data(skb, copy_skb->data, len);
506 506
507 /* Reuse original ring buffer. */ 507 /* Reuse original ring buffer. */
508 DRX(("reuse ")); 508 DRX(("reuse "));
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 349b96a3ec4c..a8d7ff2c96ac 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2289,7 +2289,6 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2289 * without copying to a minimally-sized skbuff. */ 2289 * without copying to a minimally-sized skbuff. */
2290 if (pkt_len < rx_copybreak 2290 if (pkt_len < rx_copybreak
2291 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { 2291 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
2292 skb->dev = dev;
2293 /* 16 byte align the IP header */ 2292 /* 16 byte align the IP header */
2294 skb_reserve(skb, RX_OFFSET); 2293 skb_reserve(skb, RX_OFFSET);
2295 pci_dma_sync_single_for_cpu(np->pci_dev, 2294 pci_dma_sync_single_for_cpu(np->pci_dev,
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index a53644f6a29b..2b8da0a54998 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -168,7 +168,6 @@ static void netx_eth_receive(struct net_device *ndev)
168 FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); 168 FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
169 169
170 ndev->last_rx = jiffies; 170 ndev->last_rx = jiffies;
171 skb->dev = ndev;
172 skb->protocol = eth_type_trans(skb, ndev); 171 skb->protocol = eth_type_trans(skb, ndev);
173 netif_rx(skb); 172 netif_rx(skb);
174 priv->stats.rx_packets++; 173 priv->stats.rx_packets++;
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 6537574a9cda..0fba8f190762 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -35,6 +35,8 @@
35#include "netxen_nic_hw.h" 35#include "netxen_nic_hw.h"
36#include "netxen_nic_phan_reg.h" 36#include "netxen_nic_phan_reg.h"
37 37
38#include <net/ip.h>
39
38/* PCI Windowing for DDR regions. */ 40/* PCI Windowing for DDR regions. */
39 41
40#define ADDR_IN_RANGE(addr, low, high) \ 42#define ADDR_IN_RANGE(addr, low, high) \
@@ -371,22 +373,21 @@ void netxen_tso_check(struct netxen_adapter *adapter,
371 struct cmd_desc_type0 *desc, struct sk_buff *skb) 373 struct cmd_desc_type0 *desc, struct sk_buff *skb)
372{ 374{
373 if (desc->mss) { 375 if (desc->mss) {
374 desc->total_hdr_length = sizeof(struct ethhdr) + 376 desc->total_hdr_length = (sizeof(struct ethhdr) +
375 ((skb->nh.iph)->ihl * sizeof(u32)) + 377 ip_hdrlen(skb) + tcp_hdrlen(skb));
376 ((skb->h.th)->doff * sizeof(u32));
377 netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); 378 netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
378 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 379 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
379 if (skb->nh.iph->protocol == IPPROTO_TCP) { 380 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
380 netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); 381 netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
381 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 382 } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
382 netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); 383 netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
383 } else { 384 } else {
384 return; 385 return;
385 } 386 }
386 } 387 }
387 adapter->stats.xmitcsummed++; 388 adapter->stats.xmitcsummed++;
388 desc->tcp_hdr_offset = skb->h.raw - skb->data; 389 desc->tcp_hdr_offset = skb_transport_offset(skb);
389 desc->ip_hdr_offset = skb->nh.raw - skb->data; 390 desc->ip_hdr_offset = skb_network_offset(skb);
390} 391}
391 392
392int netxen_is_flash_supported(struct netxen_adapter *adapter) 393int netxen_is_flash_supported(struct netxen_adapter *adapter)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index eff965dc5fff..5cd40562da7c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1129,7 +1129,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1129 port->stats.csummed++; 1129 port->stats.csummed++;
1130 skb->ip_summed = CHECKSUM_UNNECESSARY; 1130 skb->ip_summed = CHECKSUM_UNNECESSARY;
1131 } 1131 }
1132 skb->dev = netdev;
1133 if (desc_ctx == RCV_DESC_LRO_CTXID) { 1132 if (desc_ctx == RCV_DESC_LRO_CTXID) {
1134 /* True length was only available on the last pkt */ 1133 /* True length was only available on the last pkt */
1135 skb_put(skb, buffer->lro_length); 1134 skb_put(skb, buffer->lro_length);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 7d2525e76abb..ab25c225a07e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,6 +41,7 @@
41 41
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
44#include <net/ip.h>
44 45
45MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 46MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
46MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
@@ -778,9 +779,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
778 if (skb_shinfo(skb)->gso_size > 0) { 779 if (skb_shinfo(skb)->gso_size > 0) {
779 780
780 no_of_desc++; 781 no_of_desc++;
781 if (((skb->nh.iph)->ihl * sizeof(u32)) + 782 if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
782 ((skb->h.th)->doff * sizeof(u32)) + 783 sizeof(struct ethhdr)) >
783 sizeof(struct ethhdr) >
784 (sizeof(struct cmd_desc_type0) - 2)) { 784 (sizeof(struct cmd_desc_type0) - 2)) {
785 no_of_desc++; 785 no_of_desc++;
786 } 786 }
@@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
920 /* copy the next 64 bytes - should be enough except 920 /* copy the next 64 bytes - should be enough except
921 * for pathological case 921 * for pathological case
922 */ 922 */
923 memcpy((void *)hwdesc, (void *)(skb->data) + 923 skb_copy_from_linear_data_offset(skb, first_hdr_len,
924 first_hdr_len, hdr_len - first_hdr_len); 924 hwdesc,
925 (hdr_len -
926 first_hdr_len));
925 producer = get_next_index(producer, max_tx_desc_count); 927 producer = get_next_index(producer, max_tx_desc_count);
926 } 928 }
927 } 929 }
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 8be0d030d6f4..3d5b4232f65f 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -562,7 +562,6 @@ static void ni5010_rx(struct net_device *dev)
562 return; 562 return;
563 } 563 }
564 564
565 skb->dev = dev;
566 skb_reserve(skb, 2); 565 skb_reserve(skb, 2);
567 566
568 /* Read packet into buffer */ 567 /* Read packet into buffer */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a6f4b24b0176..8dbd6d1900b5 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -934,7 +934,6 @@ static void ni52_rcv_int(struct net_device *dev)
934 skb = (struct sk_buff *) dev_alloc_skb(totlen+2); 934 skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
935 if(skb != NULL) 935 if(skb != NULL)
936 { 936 {
937 skb->dev = dev;
938 skb_reserve(skb,2); 937 skb_reserve(skb,2);
939 skb_put(skb,totlen); 938 skb_put(skb,totlen);
940 eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); 939 eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
@@ -1183,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1183 else 1182 else
1184#endif 1183#endif
1185 { 1184 {
1186 memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); 1185 skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
1187 len = skb->len; 1186 len = skb->len;
1188 if (len < ETH_ZLEN) { 1187 if (len < ETH_ZLEN) {
1189 len = ETH_ZLEN; 1188 len = ETH_ZLEN;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 1578f4d98498..3818edf0ac18 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -610,7 +610,6 @@ static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
610 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); 610 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
611 return NULL; 611 return NULL;
612 } 612 }
613 skb->dev = dev;
614 skb_reserve(skb,2+16); 613 skb_reserve(skb,2+16);
615 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ 614 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
616 ptr = skb->data; 615 ptr = skb->data;
@@ -1094,7 +1093,6 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1094 if(skb) 1093 if(skb)
1095 { 1094 {
1096 skb_reserve(skb,2); 1095 skb_reserve(skb,2);
1097 skb->dev = dev;
1098#ifdef RCV_VIA_SKB 1096#ifdef RCV_VIA_SKB
1099 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { 1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1100 skb_put(skb,len); 1098 skb_put(skb,len);
@@ -1178,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1178 if( (unsigned long) (skb->data + skb->len) > 0x1000000) { 1176 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1179#endif 1177#endif
1180 1178
1181 memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data, 1179 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
1182 (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len); 1180 skb->len > T_BUF_SIZE ? T_BUF_SIZE :
1181 skb->len);
1183 if (len > skb->len) 1182 if (len > skb->len)
1184 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); 1183 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1185 dev_kfree_skb (skb); 1184 dev_kfree_skb (skb);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 9ec6e9e54f47..6a32338623f1 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -607,7 +607,6 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
607 res &= 0xf; 607 res &= 0xf;
608 skb_reserve(skb, res); 608 skb_reserve(skb, res);
609 609
610 skb->dev = ndev;
611 if (gfp != GFP_ATOMIC) 610 if (gfp != GFP_ATOMIC)
612 spin_lock_irqsave(&dev->rx_info.lock, flags); 611 spin_lock_irqsave(&dev->rx_info.lock, flags);
613 res = ns83820_add_rx_skb(dev, skb); 612 res = ns83820_add_rx_skb(dev, skb);
@@ -1157,9 +1156,9 @@ again:
1157 extsts = 0; 1156 extsts = 0;
1158 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1159 extsts |= EXTSTS_IPPKT; 1158 extsts |= EXTSTS_IPPKT;
1160 if (IPPROTO_TCP == skb->nh.iph->protocol) 1159 if (IPPROTO_TCP == ip_hdr(skb)->protocol)
1161 extsts |= EXTSTS_TCPPKT; 1160 extsts |= EXTSTS_TCPPKT;
1162 else if (IPPROTO_UDP == skb->nh.iph->protocol) 1161 else if (IPPROTO_UDP == ip_hdr(skb)->protocol)
1163 extsts |= EXTSTS_UDPPKT; 1162 extsts |= EXTSTS_UDPPKT;
1164 } 1163 }
1165 1164
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index d670ac74824f..76fe9dd8e841 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -334,8 +334,6 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
334 break; 334 break;
335 } 335 }
336 336
337 skb->dev = dev;
338
339 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, 337 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
340 PCI_DMA_FROMDEVICE); 338 PCI_DMA_FROMDEVICE);
341 339
@@ -731,16 +729,18 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
731 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; 729 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
732 730
733 if (skb->ip_summed == CHECKSUM_PARTIAL) { 731 if (skb->ip_summed == CHECKSUM_PARTIAL) {
734 switch (skb->nh.iph->protocol) { 732 const unsigned char *nh = skb_network_header(skb);
733
734 switch (ip_hdr(skb)->protocol) {
735 case IPPROTO_TCP: 735 case IPPROTO_TCP:
736 dflags |= XCT_MACTX_CSUM_TCP; 736 dflags |= XCT_MACTX_CSUM_TCP;
737 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); 737 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
738 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); 738 dflags |= XCT_MACTX_IPO(nh - skb->data);
739 break; 739 break;
740 case IPPROTO_UDP: 740 case IPPROTO_UDP:
741 dflags |= XCT_MACTX_CSUM_UDP; 741 dflags |= XCT_MACTX_CSUM_UDP;
742 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); 742 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
743 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); 743 dflags |= XCT_MACTX_IPO(nh - skb->data);
744 break; 744 break;
745 } 745 }
746 } 746 }
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 6ca4e4fa6b88..df8998b4f37e 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
1344 1344
1345 tp->tx_info[entry].skb = skb; 1345 tp->tx_info[entry].skb = skb;
1346 /* tp->tx_info[entry].mapping = 0; */ 1346 /* tp->tx_info[entry].mapping = 0; */
1347 memcpy (tp->tx_buf[entry], skb->data, skb->len); 1347 skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
1348 1348
1349 /* Note: the chip doesn't have auto-pad! */ 1349 /* Note: the chip doesn't have auto-pad! */
1350 NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), 1350 NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
@@ -1565,7 +1565,6 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1565 1565
1566 skb = dev_alloc_skb (pkt_size + 2); 1566 skb = dev_alloc_skb (pkt_size + 2);
1567 if (skb) { 1567 if (skb) {
1568 skb->dev = dev;
1569 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */
1570 1569
1571 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 1570 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index c7bd9c1c7f31..2b395ee21f75 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1056,7 +1056,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
1056 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 1056 DEBUG(3, " Receiving packet size %d status %4.4x.\n",
1057 pkt_len, rx_status); 1057 pkt_len, rx_status);
1058 if (skb != NULL) { 1058 if (skb != NULL) {
1059 skb->dev = dev;
1060 skb_reserve(skb, 2); 1059 skb_reserve(skb, 2);
1061 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 1060 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
1062 ((pkt_len+3)>>2)); 1061 ((pkt_len+3)>>2));
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 461e8274ef69..143ae2ff309e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -883,7 +883,6 @@ static int el3_rx(struct net_device *dev)
883 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 883 DEBUG(3, " Receiving packet size %d status %4.4x.\n",
884 pkt_len, rx_status); 884 pkt_len, rx_status);
885 if (skb != NULL) { 885 if (skb != NULL) {
886 skb->dev = dev;
887 skb_reserve(skb, 2); 886 skb_reserve(skb, 2);
888 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 887 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
889 (pkt_len+3)>>2); 888 (pkt_len+3)>>2);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 6139048f8117..808fae1577e0 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1136 ei_block_output(dev, length, skb->data, output_page); 1136 ei_block_output(dev, length, skb->data, output_page);
1137 else { 1137 else {
1138 memset(packet, 0, ETH_ZLEN); 1138 memset(packet, 0, ETH_ZLEN);
1139 memcpy(packet, skb->data, skb->len); 1139 skb_copy_from_linear_data(skb, packet, skb->len);
1140 ei_block_output(dev, length, packet, output_page); 1140 ei_block_output(dev, length, packet, output_page);
1141 } 1141 }
1142 1142
@@ -1496,7 +1496,6 @@ static void ei_receive(struct net_device *dev)
1496 else 1496 else
1497 { 1497 {
1498 skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ 1498 skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
1499 skb->dev = dev;
1500 skb_put(skb, pkt_len); /* Make room */ 1499 skb_put(skb, pkt_len); /* Make room */
1501 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); 1500 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
1502 skb->protocol=eth_type_trans(skb,dev); 1501 skb->protocol=eth_type_trans(skb,dev);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 0d7de617e535..3f93d4933235 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -999,7 +999,6 @@ static void fjn_rx(struct net_device *dev)
999 lp->stats.rx_dropped++; 999 lp->stats.rx_dropped++;
1000 break; 1000 break;
1001 } 1001 }
1002 skb->dev = dev;
1003 1002
1004 skb_reserve(skb, 2); 1003 skb_reserve(skb, 2);
1005 insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), 1004 insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 3b707747a811..73da611fd536 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1182,12 +1182,10 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1182 skb = dev_alloc_skb(pkt_len+2); 1182 skb = dev_alloc_skb(pkt_len+2);
1183 1183
1184 if (skb != NULL) { 1184 if (skb != NULL) {
1185 skb->dev = dev;
1186
1187 skb_reserve(skb, 2); 1185 skb_reserve(skb, 2);
1188 insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); 1186 insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
1189 if (pkt_len & 1) 1187 if (pkt_len & 1)
1190 *(skb->tail-1) = inb(ioaddr + AM2150_RCV); 1188 *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
1191 skb->protocol = eth_type_trans(skb, dev); 1189 skb->protocol = eth_type_trans(skb, dev);
1192 1190
1193 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ 1191 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 2561f76033ea..7912dbd14251 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1669,7 +1669,6 @@ static void smc_rx(struct net_device *dev)
1669 (packet_length+1)>>1); 1669 (packet_length+1)>>1);
1670 skb->protocol = eth_type_trans(skb, dev); 1670 skb->protocol = eth_type_trans(skb, dev);
1671 1671
1672 skb->dev = dev;
1673 netif_rx(skb); 1672 netif_rx(skb);
1674 dev->last_rx = jiffies; 1673 dev->last_rx = jiffies;
1675 smc->stats.rx_packets++; 1674 smc->stats.rx_packets++;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 5879e7c36988..809ec440b8eb 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1226,7 +1226,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
1226 (pktlen+1)>>1); 1226 (pktlen+1)>>1);
1227 } 1227 }
1228 skb->protocol = eth_type_trans(skb, dev); 1228 skb->protocol = eth_type_trans(skb, dev);
1229 skb->dev = dev;
1230 netif_rx(skb); 1229 netif_rx(skb);
1231 dev->last_rx = jiffies; 1230 dev->last_rx = jiffies;
1232 lp->stats.rx_packets++; 1231 lp->stats.rx_packets++;
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4d94ba7899bf..0791360a6a66 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1206,7 +1206,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
1206 PCI_DMA_FROMDEVICE); 1206 PCI_DMA_FROMDEVICE);
1207 skb_put(skb, pkt_len); 1207 skb_put(skb, pkt_len);
1208 lp->rx_skbuff[entry] = newskb; 1208 lp->rx_skbuff[entry] = newskb;
1209 newskb->dev = dev;
1210 lp->rx_dma_addr[entry] = 1209 lp->rx_dma_addr[entry] =
1211 pci_map_single(lp->pci_dev, 1210 pci_map_single(lp->pci_dev,
1212 newskb->data, 1211 newskb->data,
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 66da91bb1388..68c99b4c5255 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -276,21 +276,15 @@ static int fixed_mdio_register_device(int number, int speed, int duplex)
276 artificially, we are binding the driver here by hand; 276 artificially, we are binding the driver here by hand;
277 it will be the same for all the fixed phys anyway. 277 it will be the same for all the fixed phys anyway.
278 */ 278 */
279 down_write(&phydev->dev.bus->subsys.rwsem);
280
281 phydev->dev.driver = &fixed_mdio_driver.driver; 279 phydev->dev.driver = &fixed_mdio_driver.driver;
282 280
283 err = phydev->dev.driver->probe(&phydev->dev); 281 err = phydev->dev.driver->probe(&phydev->dev);
284 if(err < 0) { 282 if(err < 0) {
285 printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); 283 printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
286 up_write(&phydev->dev.bus->subsys.rwsem);
287 goto probe_fail; 284 goto probe_fail;
288 } 285 }
289 286
290 err = device_bind_driver(&phydev->dev); 287 err = device_bind_driver(&phydev->dev);
291
292 up_write(&phydev->dev.bus->subsys.rwsem);
293
294 if (err) 288 if (err)
295 goto probe_fail; 289 goto probe_fail;
296 290
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7d5b6d1838c8..8f01952c4850 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -208,16 +208,12 @@ struct phy_device *phy_attach(struct net_device *dev,
208 * exist, and we should use the genphy driver. */ 208 * exist, and we should use the genphy driver. */
209 if (NULL == d->driver) { 209 if (NULL == d->driver) {
210 int err; 210 int err;
211 down_write(&d->bus->subsys.rwsem);
212 d->driver = &genphy_driver.driver; 211 d->driver = &genphy_driver.driver;
213 212
214 err = d->driver->probe(d); 213 err = d->driver->probe(d);
215
216 if (err >= 0) 214 if (err >= 0)
217 err = device_bind_driver(d); 215 err = device_bind_driver(d);
218 216
219 up_write(&d->bus->subsys.rwsem);
220
221 if (err) 217 if (err)
222 return ERR_PTR(err); 218 return ERR_PTR(err);
223 } 219 }
@@ -258,11 +254,8 @@ void phy_detach(struct phy_device *phydev)
258 * was using the generic driver), we unbind the device 254 * was using the generic driver), we unbind the device
259 * from the generic driver so that there's a chance a 255 * from the generic driver so that there's a chance a
260 * real driver could be loaded */ 256 * real driver could be loaded */
261 if (phydev->dev.driver == &genphy_driver.driver) { 257 if (phydev->dev.driver == &genphy_driver.driver)
262 down_write(&phydev->dev.bus->subsys.rwsem);
263 device_release_driver(&phydev->dev); 258 device_release_driver(&phydev->dev);
264 up_write(&phydev->dev.bus->subsys.rwsem);
265 }
266} 259}
267EXPORT_SYMBOL(phy_detach); 260EXPORT_SYMBOL(phy_detach);
268 261
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 6bb085f54437..8754cf3356b0 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -546,7 +546,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
546 struct ethhdr *eth; 546 struct ethhdr *eth;
547 unsigned char *rawp; 547 unsigned char *rawp;
548 548
549 skb->mac.raw=skb->data; 549 skb_reset_mac_header(skb);
550 skb_pull(skb,dev->hard_header_len); 550 skb_pull(skb,dev->hard_header_len);
551 eth = eth_hdr(skb); 551 eth = eth_hdr(skb);
552 552
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 933e2f3c77aa..caabbc408c34 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -802,9 +802,9 @@ process_input_packet(struct asyncppp *ap)
802 802
803 /* check for address/control and protocol compression */ 803 /* check for address/control and protocol compression */
804 p = skb->data; 804 p = skb->data;
805 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { 805 if (p[0] == PPP_ALLSTATIONS) {
806 /* chop off address/control */ 806 /* chop off address/control */
807 if (skb->len < 3) 807 if (p[1] != PPP_UI || skb->len < 3)
808 goto err; 808 goto err;
809 p = skb_pull(skb, 2); 809 p = skb_pull(skb, 2);
810 } 810 }
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index ef58e4128782..6d596ca50cfd 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -88,8 +88,6 @@ struct ppp_file {
88#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 88#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
89#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 89#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
90 90
91#define ROUNDUP(n, x) (((n) + (x) - 1) / (x))
92
93/* 91/*
94 * Data structure describing one ppp unit. 92 * Data structure describing one ppp unit.
95 * A ppp unit corresponds to a ppp network interface device 93 * A ppp unit corresponds to a ppp network interface device
@@ -1297,7 +1295,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1297 */ 1295 */
1298 fragsize = len; 1296 fragsize = len;
1299 if (nfree > 1) 1297 if (nfree > 1)
1300 fragsize = ROUNDUP(fragsize, nfree); 1298 fragsize = DIV_ROUND_UP(fragsize, nfree);
1301 /* nbigger channels get fragsize bytes, the rest get fragsize-1, 1299 /* nbigger channels get fragsize bytes, the rest get fragsize-1,
1302 except if nbigger==0, then they all get fragsize. */ 1300 except if nbigger==0, then they all get fragsize. */
1303 nbigger = len % nfree; 1301 nbigger = len % nfree;
@@ -1685,7 +1683,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1685 skb_pull_rcsum(skb, 2); 1683 skb_pull_rcsum(skb, 2);
1686 skb->dev = ppp->dev; 1684 skb->dev = ppp->dev;
1687 skb->protocol = htons(npindex_to_ethertype[npi]); 1685 skb->protocol = htons(npindex_to_ethertype[npi]);
1688 skb->mac.raw = skb->data; 1686 skb_reset_mac_header(skb);
1689 netif_rx(skb); 1687 netif_rx(skb);
1690 ppp->dev->last_rx = jiffies; 1688 ppp->dev->last_rx = jiffies;
1691 } 1689 }
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index b6f0e9a25e26..5918fab38349 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
594 return NULL; 594 return NULL;
595 } 595 }
596 skb_reserve(npkt,2); 596 skb_reserve(npkt,2);
597 memcpy(skb_put(npkt,skb->len), skb->data, skb->len); 597 skb_copy_from_linear_data(skb,
598 skb_put(npkt, skb->len), skb->len);
598 kfree_skb(skb); 599 kfree_skb(skb);
599 skb = npkt; 600 skb = npkt;
600 } 601 }
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index ebfa2967cd68..6f98834e6ace 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -207,7 +207,7 @@ static inline struct pppox_sock *get_item(unsigned long sid,
207 207
208static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 208static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
209{ 209{
210 struct net_device *dev = NULL; 210 struct net_device *dev;
211 int ifindex; 211 int ifindex;
212 212
213 dev = dev_get_by_name(sp->sa_addr.pppoe.dev); 213 dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
@@ -218,20 +218,6 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
218 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); 218 return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
219} 219}
220 220
221static inline int set_item(struct pppox_sock *po)
222{
223 int i;
224
225 if (!po)
226 return -EINVAL;
227
228 write_lock_bh(&pppoe_hash_lock);
229 i = __set_item(po);
230 write_unlock_bh(&pppoe_hash_lock);
231
232 return i;
233}
234
235static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) 221static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
236{ 222{
237 struct pppox_sock *ret; 223 struct pppox_sock *ret;
@@ -255,54 +241,53 @@ static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int
255static void pppoe_flush_dev(struct net_device *dev) 241static void pppoe_flush_dev(struct net_device *dev)
256{ 242{
257 int hash; 243 int hash;
258
259 BUG_ON(dev == NULL); 244 BUG_ON(dev == NULL);
260 245
261 read_lock_bh(&pppoe_hash_lock); 246 write_lock_bh(&pppoe_hash_lock);
262 for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) { 247 for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) {
263 struct pppox_sock *po = item_hash_table[hash]; 248 struct pppox_sock *po = item_hash_table[hash];
264 249
265 while (po != NULL) { 250 while (po != NULL) {
266 if (po->pppoe_dev == dev) { 251 struct sock *sk = sk_pppox(po);
267 struct sock *sk = sk_pppox(po); 252 if (po->pppoe_dev != dev) {
268 253 po = po->next;
269 sock_hold(sk); 254 continue;
270 po->pppoe_dev = NULL; 255 }
256 po->pppoe_dev = NULL;
257 dev_put(dev);
271 258
272 /* We hold a reference to SK, now drop the
273 * hash table lock so that we may attempt
274 * to lock the socket (which can sleep).
275 */
276 read_unlock_bh(&pppoe_hash_lock);
277 259
278 lock_sock(sk); 260 /* We always grab the socket lock, followed by the
261 * pppoe_hash_lock, in that order. Since we should
262 * hold the sock lock while doing any unbinding,
263 * we need to release the lock we're holding.
264 * Hold a reference to the sock so it doesn't disappear
265 * as we're jumping between locks.
266 */
279 267
280 if (sk->sk_state & 268 sock_hold(sk);
281 (PPPOX_CONNECTED | PPPOX_BOUND)) {
282 pppox_unbind_sock(sk);
283 dev_put(dev);
284 sk->sk_state = PPPOX_ZOMBIE;
285 sk->sk_state_change(sk);
286 }
287 269
288 release_sock(sk); 270 write_unlock_bh(&pppoe_hash_lock);
271 lock_sock(sk);
289 272
290 sock_put(sk); 273 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
274 pppox_unbind_sock(sk);
275 sk->sk_state = PPPOX_ZOMBIE;
276 sk->sk_state_change(sk);
277 }
291 278
292 read_lock_bh(&pppoe_hash_lock); 279 release_sock(sk);
280 sock_put(sk);
293 281
294 /* Now restart from the beginning of this 282 /* Restart scan at the beginning of this hash chain.
295 * hash chain. We always NULL out pppoe_dev 283 * While the lock was dropped the chain contents may
296 * so we are guaranteed to make forward 284 * have changed.
297 * progress. 285 */
298 */ 286 write_lock_bh(&pppoe_hash_lock);
299 po = item_hash_table[hash]; 287 po = item_hash_table[hash];
300 continue;
301 }
302 po = po->next;
303 } 288 }
304 } 289 }
305 read_unlock_bh(&pppoe_hash_lock); 290 write_unlock_bh(&pppoe_hash_lock);
306} 291}
307 292
308static int pppoe_device_event(struct notifier_block *this, 293static int pppoe_device_event(struct notifier_block *this,
@@ -344,10 +329,10 @@ static struct notifier_block pppoe_notifier = {
344static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) 329static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
345{ 330{
346 struct pppox_sock *po = pppox_sk(sk); 331 struct pppox_sock *po = pppox_sk(sk);
347 struct pppox_sock *relay_po = NULL; 332 struct pppox_sock *relay_po;
348 333
349 if (sk->sk_state & PPPOX_BOUND) { 334 if (sk->sk_state & PPPOX_BOUND) {
350 struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; 335 struct pppoe_hdr *ph = pppoe_hdr(skb);
351 int len = ntohs(ph->length); 336 int len = ntohs(ph->length);
352 skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); 337 skb_pull_rcsum(skb, sizeof(struct pppoe_hdr));
353 if (pskb_trim_rcsum(skb, len)) 338 if (pskb_trim_rcsum(skb, len))
@@ -401,7 +386,7 @@ static int pppoe_rcv(struct sk_buff *skb,
401 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 386 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
402 goto out; 387 goto out;
403 388
404 ph = (struct pppoe_hdr *) skb->nh.raw; 389 ph = pppoe_hdr(skb);
405 390
406 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 391 po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
407 if (po != NULL) 392 if (po != NULL)
@@ -433,7 +418,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
433 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 418 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
434 goto out; 419 goto out;
435 420
436 ph = (struct pppoe_hdr *) skb->nh.raw; 421 ph = pppoe_hdr(skb);
437 if (ph->code != PADT_CODE) 422 if (ph->code != PADT_CODE)
438 goto abort; 423 goto abort;
439 424
@@ -514,36 +499,49 @@ static int pppoe_release(struct socket *sock)
514{ 499{
515 struct sock *sk = sock->sk; 500 struct sock *sk = sock->sk;
516 struct pppox_sock *po; 501 struct pppox_sock *po;
517 int error = 0;
518 502
519 if (!sk) 503 if (!sk)
520 return 0; 504 return 0;
521 505
522 if (sock_flag(sk, SOCK_DEAD)) 506 lock_sock(sk);
507 if (sock_flag(sk, SOCK_DEAD)){
508 release_sock(sk);
523 return -EBADF; 509 return -EBADF;
510 }
524 511
525 pppox_unbind_sock(sk); 512 pppox_unbind_sock(sk);
526 513
527 /* Signal the death of the socket. */ 514 /* Signal the death of the socket. */
528 sk->sk_state = PPPOX_DEAD; 515 sk->sk_state = PPPOX_DEAD;
529 516
517
518 /* Write lock on hash lock protects the entire "po" struct from
519 * concurrent updates via pppoe_flush_dev. The "po" struct should
520 * be considered part of the hash table contents, thus protected
521 * by the hash table lock */
522 write_lock_bh(&pppoe_hash_lock);
523
530 po = pppox_sk(sk); 524 po = pppox_sk(sk);
531 if (po->pppoe_pa.sid) { 525 if (po->pppoe_pa.sid) {
532 delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); 526 __delete_item(po->pppoe_pa.sid,
527 po->pppoe_pa.remote, po->pppoe_ifindex);
533 } 528 }
534 529
535 if (po->pppoe_dev) 530 if (po->pppoe_dev) {
536 dev_put(po->pppoe_dev); 531 dev_put(po->pppoe_dev);
532 po->pppoe_dev = NULL;
533 }
537 534
538 po->pppoe_dev = NULL; 535 write_unlock_bh(&pppoe_hash_lock);
539 536
540 sock_orphan(sk); 537 sock_orphan(sk);
541 sock->sk = NULL; 538 sock->sk = NULL;
542 539
543 skb_queue_purge(&sk->sk_receive_queue); 540 skb_queue_purge(&sk->sk_receive_queue);
541 release_sock(sk);
544 sock_put(sk); 542 sock_put(sk);
545 543
546 return error; 544 return 0;
547} 545}
548 546
549 547
@@ -599,14 +597,18 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
599 po->pppoe_dev = dev; 597 po->pppoe_dev = dev;
600 po->pppoe_ifindex = dev->ifindex; 598 po->pppoe_ifindex = dev->ifindex;
601 599
602 if (!(dev->flags & IFF_UP)) 600 write_lock_bh(&pppoe_hash_lock);
601 if (!(dev->flags & IFF_UP)){
602 write_unlock_bh(&pppoe_hash_lock);
603 goto err_put; 603 goto err_put;
604 }
604 605
605 memcpy(&po->pppoe_pa, 606 memcpy(&po->pppoe_pa,
606 &sp->sa_addr.pppoe, 607 &sp->sa_addr.pppoe,
607 sizeof(struct pppoe_addr)); 608 sizeof(struct pppoe_addr));
608 609
609 error = set_item(po); 610 error = __set_item(po);
611 write_unlock_bh(&pppoe_hash_lock);
610 if (error < 0) 612 if (error < 0)
611 goto err_put; 613 goto err_put;
612 614
@@ -762,10 +764,10 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
762static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, 764static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
763 struct msghdr *m, size_t total_len) 765 struct msghdr *m, size_t total_len)
764{ 766{
765 struct sk_buff *skb = NULL; 767 struct sk_buff *skb;
766 struct sock *sk = sock->sk; 768 struct sock *sk = sock->sk;
767 struct pppox_sock *po = pppox_sk(sk); 769 struct pppox_sock *po = pppox_sk(sk);
768 int error = 0; 770 int error;
769 struct pppoe_hdr hdr; 771 struct pppoe_hdr hdr;
770 struct pppoe_hdr *ph; 772 struct pppoe_hdr *ph;
771 struct net_device *dev; 773 struct net_device *dev;
@@ -799,7 +801,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
799 801
800 /* Reserve space for headers. */ 802 /* Reserve space for headers. */
801 skb_reserve(skb, dev->hard_header_len); 803 skb_reserve(skb, dev->hard_header_len);
802 skb->nh.raw = skb->data; 804 skb_reset_network_header(skb);
803 805
804 skb->dev = dev; 806 skb->dev = dev;
805 807
@@ -869,7 +871,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
869 goto abort; 871 goto abort;
870 872
871 skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); 873 skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr));
872 memcpy(skb_put(skb2, skb->len), skb->data, skb->len); 874 skb_copy_from_linear_data(skb, skb_put(skb2, skb->len),
875 skb->len);
873 } else { 876 } else {
874 /* Make a clone so as to not disturb the original skb, 877 /* Make a clone so as to not disturb the original skb,
875 * give dev_queue_xmit something it can free. 878 * give dev_queue_xmit something it can free.
@@ -884,7 +887,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
884 memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); 887 memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
885 skb2->protocol = __constant_htons(ETH_P_PPP_SES); 888 skb2->protocol = __constant_htons(ETH_P_PPP_SES);
886 889
887 skb2->nh.raw = skb2->data; 890 skb_reset_network_header(skb2);
888 891
889 skb2->dev = dev; 892 skb2->dev = dev;
890 893
@@ -929,10 +932,8 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
929 struct msghdr *m, size_t total_len, int flags) 932 struct msghdr *m, size_t total_len, int flags)
930{ 933{
931 struct sock *sk = sock->sk; 934 struct sock *sk = sock->sk;
932 struct sk_buff *skb = NULL; 935 struct sk_buff *skb;
933 int error = 0; 936 int error = 0;
934 int len;
935 struct pppoe_hdr *ph = NULL;
936 937
937 if (sk->sk_state & PPPOX_BOUND) { 938 if (sk->sk_state & PPPOX_BOUND) {
938 error = -EIO; 939 error = -EIO;
@@ -942,26 +943,21 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
942 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 943 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
943 flags & MSG_DONTWAIT, &error); 944 flags & MSG_DONTWAIT, &error);
944 945
945 if (error < 0) { 946 if (error < 0)
946 goto end; 947 goto end;
947 }
948 948
949 m->msg_namelen = 0; 949 m->msg_namelen = 0;
950 950
951 if (skb) { 951 if (skb) {
952 error = 0; 952 struct pppoe_hdr *ph = pppoe_hdr(skb);
953 ph = (struct pppoe_hdr *) skb->nh.raw; 953 const int len = ntohs(ph->length);
954 len = ntohs(ph->length);
955 954
956 error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); 955 error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len);
957 if (error < 0) 956 if (error == 0)
958 goto do_skb_free; 957 error = len;
959 error = len;
960 } 958 }
961 959
962do_skb_free: 960 kfree_skb(skb);
963 if (skb)
964 kfree_skb(skb);
965end: 961end:
966 return error; 962 return error;
967} 963}
@@ -991,7 +987,7 @@ out:
991 987
992static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos) 988static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos)
993{ 989{
994 struct pppox_sock *po = NULL; 990 struct pppox_sock *po;
995 int i = 0; 991 int i = 0;
996 992
997 for (; i < PPPOE_HASH_SIZE; i++) { 993 for (; i < PPPOE_HASH_SIZE; i++) {
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 9315046b3f55..3f8115db4d54 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk)
58{ 58{
59 /* Clear connection to ppp device, if attached. */ 59 /* Clear connection to ppp device, if attached. */
60 60
61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) { 61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) {
62 ppp_unregister_channel(&pppox_sk(sk)->chan); 62 ppp_unregister_channel(&pppox_sk(sk)->chan);
63 sk->sk_state = PPPOX_DEAD; 63 sk->sk_state = PPPOX_DEAD;
64 } 64 }
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a8246eb2f8d9..7b80fb7a9d9b 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1873,7 +1873,6 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1873 pci_unmap_len(lrg_buf_cb2, maplen), 1873 pci_unmap_len(lrg_buf_cb2, maplen),
1874 PCI_DMA_FROMDEVICE); 1874 PCI_DMA_FROMDEVICE);
1875 prefetch(skb->data); 1875 prefetch(skb->data);
1876 skb->dev = qdev->ndev;
1877 skb->ip_summed = CHECKSUM_NONE; 1876 skb->ip_summed = CHECKSUM_NONE;
1878 skb->protocol = eth_type_trans(skb, qdev->ndev); 1877 skb->protocol = eth_type_trans(skb, qdev->ndev);
1879 1878
@@ -1928,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1928 * Copy the ethhdr from first buffer to second. This 1927 * Copy the ethhdr from first buffer to second. This
1929 * is necessary for 3022 IP completions. 1928 * is necessary for 3022 IP completions.
1930 */ 1929 */
1931 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); 1930 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
1931 skb_push(skb2, size), size);
1932 } else { 1932 } else {
1933 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 1933 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1934 if (checksum & 1934 if (checksum &
@@ -1946,7 +1946,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1946 skb2->ip_summed = CHECKSUM_UNNECESSARY; 1946 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1947 } 1947 }
1948 } 1948 }
1949 skb2->dev = qdev->ndev;
1950 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1949 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1951 1950
1952 netif_receive_skb(skb2); 1951 netif_receive_skb(skb2);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6a77b8a92245..45876a854f00 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2284,7 +2284,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2284 return LargeSend | ((mss & MSSMask) << MSSShift); 2284 return LargeSend | ((mss & MSSMask) << MSSShift);
2285 } 2285 }
2286 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2286 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2287 const struct iphdr *ip = skb->nh.iph; 2287 const struct iphdr *ip = ip_hdr(skb);
2288 2288
2289 if (ip->protocol == IPPROTO_TCP) 2289 if (ip->protocol == IPPROTO_TCP)
2290 return IPCS | TCPCS; 2290 return IPCS | TCPCS;
@@ -2586,7 +2586,6 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2586 pci_action(tp->pci_dev, le64_to_cpu(desc->addr), 2586 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2587 tp->rx_buf_sz, PCI_DMA_FROMDEVICE); 2587 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2588 2588
2589 skb->dev = dev;
2590 skb_put(skb, pkt_size); 2589 skb_put(skb, pkt_size);
2591 skb->protocol = eth_type_trans(skb, dev); 2590 skb->protocol = eth_type_trans(skb, dev);
2592 2591
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index b7ff484af3e1..df6b73872fdb 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -115,7 +115,6 @@ static int rionet_rx_clean(struct net_device *ndev)
115 115
116 rnet->rx_skb[i]->data = data; 116 rnet->rx_skb[i]->data = data;
117 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); 117 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
118 rnet->rx_skb[i]->dev = ndev;
119 rnet->rx_skb[i]->protocol = 118 rnet->rx_skb[i]->protocol =
120 eth_type_trans(rnet->rx_skb[i], ndev); 119 eth_type_trans(rnet->rx_skb[i], ndev);
121 error = netif_rx(rnet->rx_skb[i]); 120 error = netif_rx(rnet->rx_skb[i]);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index d81536f90df6..25c73d47daad 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1029,7 +1029,6 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
1029 goto defer; 1029 goto defer;
1030 } 1030 }
1031 } 1031 }
1032 skb->dev = dev;
1033 skb->protocol = hippi_type_trans(skb, dev); 1032 skb->protocol = hippi_type_trans(skb, dev);
1034 1033
1035 netif_rx(skb); /* send it up */ 1034 netif_rx(skb); /* send it up */
@@ -1452,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1452 } 1451 }
1453 skb_reserve(new_skb, 8); 1452 skb_reserve(new_skb, 8);
1454 skb_put(new_skb, len); 1453 skb_put(new_skb, len);
1455 memcpy(new_skb->data, skb->data, len); 1454 skb_copy_from_linear_data(skb, new_skb->data, len);
1456 dev_kfree_skb(skb); 1455 dev_kfree_skb(skb);
1457 skb = new_skb; 1456 skb = new_skb;
1458 } 1457 }
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 46ebf141ee5a..600d3ff347fc 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2195,7 +2195,7 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2195 frag_list->next = NULL; 2195 frag_list->next = NULL;
2196 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2196 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2197 frag_list->data = tmp; 2197 frag_list->data = tmp;
2198 frag_list->tail = tmp; 2198 skb_reset_tail_pointer(frag_list);
2199 2199
2200 /* Buffer-2 receives L4 data payload */ 2200 /* Buffer-2 receives L4 data payload */
2201 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, 2201 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
@@ -2349,7 +2349,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2349 tmp += ALIGN_SIZE; 2349 tmp += ALIGN_SIZE;
2350 tmp &= ~ALIGN_SIZE; 2350 tmp &= ~ALIGN_SIZE;
2351 skb->data = (void *) (unsigned long)tmp; 2351 skb->data = (void *) (unsigned long)tmp;
2352 skb->tail = (void *) (unsigned long)tmp; 2352 skb_reset_tail_pointer(skb);
2353 2353
2354 if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) 2354 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2355 ((struct RxD3*)rxdp)->Buffer0_ptr = 2355 ((struct RxD3*)rxdp)->Buffer0_ptr =
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index 143958f1ef0a..ad94358ece89 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -688,7 +688,6 @@ static int lan_saa9730_rx(struct net_device *dev)
688 } else { 688 } else {
689 lp->stats.rx_bytes += len; 689 lp->stats.rx_bytes += len;
690 lp->stats.rx_packets++; 690 lp->stats.rx_packets++;
691 skb->dev = dev;
692 skb_reserve(skb, 2); /* 16 byte align */ 691 skb_reserve(skb, 2); /* 16 byte align */
693 skb_put(skb, len); /* make room */ 692 skb_put(skb, len); /* make room */
694 eth_copy_and_sum(skb, 693 eth_copy_and_sum(skb,
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index b9fa4fbb1398..1de3eec1a792 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -834,7 +834,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
834 goto dropped_frame; 834 goto dropped_frame;
835 } 835 }
836 skb->dev = dev; 836 skb->dev = dev;
837 skb->mac.raw = skb->data; 837 skb_reset_mac_header(skb);
838 skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; 838 skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
839 insw(ioaddr, skb_put(skb, NewDatagramDataSize), 839 insw(ioaddr, skb_put(skb, NewDatagramDataSize),
840 NewDatagramDataSize / 2); 840 NewDatagramDataSize / 2);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 103c3174ab54..0a3a379b634c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -933,9 +933,6 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
933 } 933 }
934 934
935 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); 935 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
936
937 /* mark skbuff owned by our device */
938 sb_new->dev = d->sbdma_eth->sbm_dev;
939 } 936 }
940 else { 937 else {
941 sb_new = sb; 938 sb_new = sb;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index c32c21af3fdd..5b7284c955dc 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -814,7 +814,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
814 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); 814 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
815 } 815 }
816 816
817 skb->dev = dev;
818 skb->protocol = eth_type_trans(skb, dev); 817 skb->protocol = eth_type_trans(skb, dev);
819 dev->last_rx = jiffies; 818 dev->last_rx = jiffies;
820 netif_rx(skb); 819 netif_rx(skb);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 0d6c95c7aedf..4bce7c4f373c 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -550,7 +550,6 @@ static void seeq8005_rx(struct net_device *dev)
550 lp->stats.rx_dropped++; 550 lp->stats.rx_dropped++;
551 break; 551 break;
552 } 552 }
553 skb->dev = dev;
554 skb_reserve(skb, 2); /* align data on 16 byte */ 553 skb_reserve(skb, 2); /* align data on 16 byte */
555 buf = skb_put(skb,pkt_len); 554 buf = skb_put(skb,pkt_len);
556 555
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 52ed522a234c..d8c9c5d66d4f 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -318,7 +318,6 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
318 skb = dev_alloc_skb(len + 2); 318 skb = dev_alloc_skb(len + 2);
319 319
320 if (skb) { 320 if (skb) {
321 skb->dev = dev;
322 skb_reserve(skb, 2); 321 skb_reserve(skb, 2);
323 skb_put(skb, len); 322 skb_put(skb, len);
324 323
@@ -535,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
535 * entry and the HPC got to the end of the chain before we 534 * entry and the HPC got to the end of the chain before we
536 * added this new entry and restarted it. 535 * added this new entry and restarted it.
537 */ 536 */
538 memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); 537 skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen);
539 if (len != skblen) 538 if (len != skblen)
540 memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); 539 memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen);
541 td->tdma.cntinfo = (len & HPCDMA_BCNT) | 540 td->tdma.cntinfo = (len & HPCDMA_BCNT) |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 34463ce6f132..bc8de48da313 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -632,7 +632,6 @@ static int sis190_rx_interrupt(struct net_device *dev,
632 pci_action(tp->pci_dev, le32_to_cpu(desc->addr), 632 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
633 tp->rx_buf_sz, PCI_DMA_FROMDEVICE); 633 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
634 634
635 skb->dev = dev;
636 skb_put(skb, pkt_size); 635 skb_put(skb, pkt_size);
637 skb->protocol = eth_type_trans(skb, dev); 636 skb->protocol = eth_type_trans(skb, dev);
638 637
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b3750f284279..2cb2e156c758 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1160,7 +1160,6 @@ sis900_init_rx_ring(struct net_device *net_dev)
1160 buffer */ 1160 buffer */
1161 break; 1161 break;
1162 } 1162 }
1163 skb->dev = net_dev;
1164 sis_priv->rx_skbuff[i] = skb; 1163 sis_priv->rx_skbuff[i] = skb;
1165 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; 1164 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1166 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, 1165 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
@@ -1754,6 +1753,25 @@ static int sis900_rx(struct net_device *net_dev)
1754 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1753 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1755 } else { 1754 } else {
1756 struct sk_buff * skb; 1755 struct sk_buff * skb;
1756 struct sk_buff * rx_skb;
1757
1758 pci_unmap_single(sis_priv->pci_dev,
1759 sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
1760 PCI_DMA_FROMDEVICE);
1761
1762 /* refill the Rx buffer, what if there is not enought
1763 * memory for new socket buffer ?? */
1764 if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
1765 /*
1766 * Not enough memory to refill the buffer
1767 * so we need to recycle the old one so
1768 * as to avoid creating a memory hole
1769 * in the rx ring
1770 */
1771 skb = sis_priv->rx_skbuff[entry];
1772 sis_priv->stats.rx_dropped++;
1773 goto refill_rx_ring;
1774 }
1757 1775
1758 /* This situation should never happen, but due to 1776 /* This situation should never happen, but due to
1759 some unknow bugs, it is possible that 1777 some unknow bugs, it is possible that
@@ -1768,14 +1786,11 @@ static int sis900_rx(struct net_device *net_dev)
1768 break; 1786 break;
1769 } 1787 }
1770 1788
1771 pci_unmap_single(sis_priv->pci_dev,
1772 sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
1773 PCI_DMA_FROMDEVICE);
1774 /* give the socket buffer to upper layers */ 1789 /* give the socket buffer to upper layers */
1775 skb = sis_priv->rx_skbuff[entry]; 1790 rx_skb = sis_priv->rx_skbuff[entry];
1776 skb_put(skb, rx_size); 1791 skb_put(rx_skb, rx_size);
1777 skb->protocol = eth_type_trans(skb, net_dev); 1792 rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
1778 netif_rx(skb); 1793 netif_rx(rx_skb);
1779 1794
1780 /* some network statistics */ 1795 /* some network statistics */
1781 if ((rx_status & BCAST) == MCAST) 1796 if ((rx_status & BCAST) == MCAST)
@@ -1783,33 +1798,13 @@ static int sis900_rx(struct net_device *net_dev)
1783 net_dev->last_rx = jiffies; 1798 net_dev->last_rx = jiffies;
1784 sis_priv->stats.rx_bytes += rx_size; 1799 sis_priv->stats.rx_bytes += rx_size;
1785 sis_priv->stats.rx_packets++; 1800 sis_priv->stats.rx_packets++;
1786 1801 sis_priv->dirty_rx++;
1787 /* refill the Rx buffer, what if there is not enought 1802refill_rx_ring:
1788 * memory for new socket buffer ?? */
1789 if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
1790 /* not enough memory for skbuff, this makes a
1791 * "hole" on the buffer ring, it is not clear
1792 * how the hardware will react to this kind
1793 * of degenerated buffer */
1794 if (netif_msg_rx_status(sis_priv))
1795 printk(KERN_INFO "%s: Memory squeeze,"
1796 "deferring packet.\n",
1797 net_dev->name);
1798 sis_priv->rx_skbuff[entry] = NULL;
1799 /* reset buffer descriptor state */
1800 sis_priv->rx_ring[entry].cmdsts = 0;
1801 sis_priv->rx_ring[entry].bufptr = 0;
1802 sis_priv->stats.rx_dropped++;
1803 sis_priv->cur_rx++;
1804 break;
1805 }
1806 skb->dev = net_dev;
1807 sis_priv->rx_skbuff[entry] = skb; 1803 sis_priv->rx_skbuff[entry] = skb;
1808 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1804 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1809 sis_priv->rx_ring[entry].bufptr = 1805 sis_priv->rx_ring[entry].bufptr =
1810 pci_map_single(sis_priv->pci_dev, skb->data, 1806 pci_map_single(sis_priv->pci_dev, skb->data,
1811 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1807 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1812 sis_priv->dirty_rx++;
1813 } 1808 }
1814 sis_priv->cur_rx++; 1809 sis_priv->cur_rx++;
1815 entry = sis_priv->cur_rx % NUM_RX_DESC; 1810 entry = sis_priv->cur_rx % NUM_RX_DESC;
@@ -1836,7 +1831,6 @@ static int sis900_rx(struct net_device *net_dev)
1836 sis_priv->stats.rx_dropped++; 1831 sis_priv->stats.rx_dropped++;
1837 break; 1832 break;
1838 } 1833 }
1839 skb->dev = net_dev;
1840 sis_priv->rx_skbuff[entry] = skb; 1834 sis_priv->rx_skbuff[entry] = skb;
1841 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1835 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1842 sis_priv->rx_ring[entry].bufptr = 1836 sis_priv->rx_ring[entry].bufptr =
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index e94ab256b540..e0a93005e6dc 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -1562,10 +1562,10 @@ struct sk_buff *pMessage) /* pointer to send-message */
1562 pTxd->pMBuf = pMessage; 1562 pTxd->pMBuf = pMessage;
1563 1563
1564 if (pMessage->ip_summed == CHECKSUM_PARTIAL) { 1564 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1565 u16 hdrlen = pMessage->h.raw - pMessage->data; 1565 u16 hdrlen = skb_transport_offset(pMessage);
1566 u16 offset = hdrlen + pMessage->csum_offset; 1566 u16 offset = hdrlen + pMessage->csum_offset;
1567 1567
1568 if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && 1568 if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
1569 (pAC->GIni.GIChipRev == 0) && 1569 (pAC->GIni.GIChipRev == 0) &&
1570 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { 1570 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1571 pTxd->TBControl = BMU_TCP_CHECK; 1571 pTxd->TBControl = BMU_TCP_CHECK;
@@ -1681,7 +1681,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1681 ** Does the HW need to evaluate checksum for TCP or UDP packets? 1681 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1682 */ 1682 */
1683 if (pMessage->ip_summed == CHECKSUM_PARTIAL) { 1683 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1684 u16 hdrlen = pMessage->h.raw - pMessage->data; 1684 u16 hdrlen = skb_transport_offset(pMessage);
1685 u16 offset = hdrlen + pMessage->csum_offset; 1685 u16 offset = hdrlen + pMessage->csum_offset;
1686 1686
1687 Control = BMU_STFWD; 1687 Control = BMU_STFWD;
@@ -1691,7 +1691,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1691 ** opcode for udp is not working in the hardware yet 1691 ** opcode for udp is not working in the hardware yet
1692 ** (Revision 2.0) 1692 ** (Revision 2.0)
1693 */ 1693 */
1694 if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && 1694 if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
1695 (pAC->GIni.GIChipRev == 0) && 1695 (pAC->GIni.GIChipRev == 0) &&
1696 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { 1696 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1697 Control |= BMU_TCP_CHECK; 1697 Control |= BMU_TCP_CHECK;
@@ -2127,7 +2127,7 @@ rx_start:
2127 (dma_addr_t) PhysAddr, 2127 (dma_addr_t) PhysAddr,
2128 FrameLength, 2128 FrameLength,
2129 PCI_DMA_FROMDEVICE); 2129 PCI_DMA_FROMDEVICE);
2130 memcpy(pNewMsg->data, pMsg, FrameLength); 2130 skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength);
2131 2131
2132 pci_dma_sync_single_for_device(pAC->PciDev, 2132 pci_dma_sync_single_for_device(pAC->PciDev,
2133 (dma_addr_t) PhysAddr, 2133 (dma_addr_t) PhysAddr,
@@ -2193,7 +2193,6 @@ rx_start:
2193 SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, 2193 SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,
2194 FrameLength, pRxPort->PortIndex); 2194 FrameLength, pRxPort->PortIndex);
2195 2195
2196 pMsg->dev = pAC->dev[pRxPort->PortIndex];
2197 pMsg->protocol = eth_type_trans(pMsg, 2196 pMsg->protocol = eth_type_trans(pMsg,
2198 pAC->dev[pRxPort->PortIndex]); 2197 pAC->dev[pRxPort->PortIndex]);
2199 netif_rx(pMsg); 2198 netif_rx(pMsg);
@@ -2246,7 +2245,6 @@ rx_start:
2246 (IFF_PROMISC | IFF_ALLMULTI)) != 0 || 2245 (IFF_PROMISC | IFF_ALLMULTI)) != 0 ||
2247 (ForRlmt & SK_RLMT_RX_PROTOCOL) == 2246 (ForRlmt & SK_RLMT_RX_PROTOCOL) ==
2248 SK_RLMT_RX_PROTOCOL) { 2247 SK_RLMT_RX_PROTOCOL) {
2249 pMsg->dev = pAC->dev[pRxPort->PortIndex];
2250 pMsg->protocol = eth_type_trans(pMsg, 2248 pMsg->protocol = eth_type_trans(pMsg,
2251 pAC->dev[pRxPort->PortIndex]); 2249 pAC->dev[pRxPort->PortIndex]);
2252 netif_rx(pMsg); 2250 netif_rx(pMsg);
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 9733a11c6146..a7ef6c8b7721 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -1680,7 +1680,6 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1680 rxd->rxd_os.skb = NULL; 1680 rxd->rxd_os.skb = NULL;
1681 skb_trim(skb, len); 1681 skb_trim(skb, len);
1682 skb->protocol = fddi_type_trans(skb, bp->dev); 1682 skb->protocol = fddi_type_trans(skb, bp->dev);
1683 skb->dev = bp->dev; /* pass up device pointer */
1684 1683
1685 netif_rx(skb); 1684 netif_rx(skb);
1686 bp->dev->last_rx = jiffies; 1685 bp->dev->last_rx = jiffies;
@@ -1938,7 +1937,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1938 } 1937 }
1939 skb_reserve(skb, 3); 1938 skb_reserve(skb, 3);
1940 skb_put(skb, len); 1939 skb_put(skb, len);
1941 memcpy(skb->data, look_ahead, len); 1940 skb_copy_to_linear_data(skb, look_ahead, len);
1942 1941
1943 // deliver frame to system 1942 // deliver frame to system
1944 skb->protocol = fddi_type_trans(skb, smc->os.dev); 1943 skb->protocol = fddi_type_trans(skb, smc->os.dev);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index d476a3cc2e94..f1a0e6c0fbdd 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2654,12 +2654,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2654 td->dma_hi = map >> 32; 2654 td->dma_hi = map >> 32;
2655 2655
2656 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2656 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2657 int offset = skb->h.raw - skb->data; 2657 const int offset = skb_transport_offset(skb);
2658 2658
2659 /* This seems backwards, but it is what the sk98lin 2659 /* This seems backwards, but it is what the sk98lin
2660 * does. Looks like hardware is wrong? 2660 * does. Looks like hardware is wrong?
2661 */ 2661 */
2662 if (skb->h.ipiph->protocol == IPPROTO_UDP 2662 if (ipip_hdr(skb)->protocol == IPPROTO_UDP
2663 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) 2663 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2664 control = BMU_TCP_CHECK; 2664 control = BMU_TCP_CHECK;
2665 else 2665 else
@@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
2950 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2950 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2951 pci_unmap_addr(e, mapaddr), 2951 pci_unmap_addr(e, mapaddr),
2952 len, PCI_DMA_FROMDEVICE); 2952 len, PCI_DMA_FROMDEVICE);
2953 memcpy(skb->data, e->skb->data, len); 2953 skb_copy_from_linear_data(e->skb, skb->data, len);
2954 pci_dma_sync_single_for_device(skge->hw->pdev, 2954 pci_dma_sync_single_for_device(skge->hw->pdev,
2955 pci_unmap_addr(e, mapaddr), 2955 pci_unmap_addr(e, mapaddr),
2956 len, PCI_DMA_FROMDEVICE); 2956 len, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 4a009b7b1777..238c2ca34da6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -32,6 +32,7 @@
32#include <linux/ethtool.h> 32#include <linux/ethtool.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/ip.h> 34#include <linux/ip.h>
35#include <net/ip.h>
35#include <linux/tcp.h> 36#include <linux/tcp.h>
36#include <linux/in.h> 37#include <linux/in.h>
37#include <linux/delay.h> 38#include <linux/delay.h>
@@ -49,7 +50,7 @@
49#include "sky2.h" 50#include "sky2.h"
50 51
51#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
52#define DRV_VERSION "1.13" 53#define DRV_VERSION "1.14"
53#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
54 55
55/* 56/*
@@ -123,7 +124,10 @@ static const struct pci_device_id sky2_id_table[] = {
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ 124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ 125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ 126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
127#ifdef broken
128 /* This device causes data corruption problems that are not resolved */
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ 129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
130#endif
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ 131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ 132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 133 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
@@ -740,12 +744,17 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
740 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { 744 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
741 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 745 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
742 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 746 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
743 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 747
744 /* set Tx GMAC FIFO Almost Empty Threshold */ 748 /* set Tx GMAC FIFO Almost Empty Threshold */
745 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180); 749 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
746 /* Disable Store & Forward mode for TX */ 750 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
747 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); 751
748 } 752 if (hw->dev[port]->mtu > ETH_DATA_LEN)
753 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
754 TX_JUMBO_ENA | TX_STFW_DIS);
755 else
756 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
757 TX_JUMBO_DIS | TX_STFW_ENA);
749 } 758 }
750 759
751} 760}
@@ -1278,7 +1287,7 @@ static int sky2_up(struct net_device *dev)
1278 /* Set almost empty threshold */ 1287 /* Set almost empty threshold */
1279 if (hw->chip_id == CHIP_ID_YUKON_EC_U 1288 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1280 && hw->chip_rev == CHIP_REV_YU_EC_U_A0) 1289 && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1281 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); 1290 sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1282 1291
1283 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1292 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1284 TX_RING_SIZE - 1); 1293 TX_RING_SIZE - 1);
@@ -1383,8 +1392,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1383 /* Check for TCP Segmentation Offload */ 1392 /* Check for TCP Segmentation Offload */
1384 mss = skb_shinfo(skb)->gso_size; 1393 mss = skb_shinfo(skb)->gso_size;
1385 if (mss != 0) { 1394 if (mss != 0) {
1386 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ 1395 mss += tcp_optlen(skb); /* TCP options */
1387 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 1396 mss += ip_hdrlen(skb) + sizeof(struct tcphdr);
1388 mss += ETH_HLEN; 1397 mss += ETH_HLEN;
1389 1398
1390 if (mss != sky2->tx_last_mss) { 1399 if (mss != sky2->tx_last_mss) {
@@ -1412,14 +1421,14 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1412 1421
1413 /* Handle TCP checksum offload */ 1422 /* Handle TCP checksum offload */
1414 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1423 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1415 unsigned offset = skb->h.raw - skb->data; 1424 const unsigned offset = skb_transport_offset(skb);
1416 u32 tcpsum; 1425 u32 tcpsum;
1417 1426
1418 tcpsum = offset << 16; /* sum start */ 1427 tcpsum = offset << 16; /* sum start */
1419 tcpsum |= offset + skb->csum_offset; /* sum write */ 1428 tcpsum |= offset + skb->csum_offset; /* sum write */
1420 1429
1421 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 1430 ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1422 if (skb->nh.iph->protocol == IPPROTO_UDP) 1431 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1423 ctrl |= UDPTCP; 1432 ctrl |= UDPTCP;
1424 1433
1425 if (tcpsum != sky2->tx_tcpsum) { 1434 if (tcpsum != sky2->tx_tcpsum) {
@@ -1584,13 +1593,6 @@ static int sky2_down(struct net_device *dev)
1584 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 1593 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1585 RB_RST_SET | RB_DIS_OP_MD); 1594 RB_RST_SET | RB_DIS_OP_MD);
1586 1595
1587 /* WA for dev. #4.209 */
1588 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1589 && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1590 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1591 sky2->speed != SPEED_1000 ?
1592 TX_STFW_ENA : TX_STFW_DIS);
1593
1594 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1596 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1595 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); 1597 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1596 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1598 gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -1890,6 +1892,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1890{ 1892{
1891 struct sky2_port *sky2 = netdev_priv(dev); 1893 struct sky2_port *sky2 = netdev_priv(dev);
1892 struct sky2_hw *hw = sky2->hw; 1894 struct sky2_hw *hw = sky2->hw;
1895 unsigned port = sky2->port;
1893 int err; 1896 int err;
1894 u16 ctl, mode; 1897 u16 ctl, mode;
1895 u32 imask; 1898 u32 imask;
@@ -1897,9 +1900,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1897 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1900 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1898 return -EINVAL; 1901 return -EINVAL;
1899 1902
1900 /* TSO on Yukon Ultra and MTU > 1500 not supported */ 1903 if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE)
1901 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) 1904 return -EINVAL;
1902 dev->features &= ~NETIF_F_TSO;
1903 1905
1904 if (!netif_running(dev)) { 1906 if (!netif_running(dev)) {
1905 dev->mtu = new_mtu; 1907 dev->mtu = new_mtu;
@@ -1915,8 +1917,18 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1915 1917
1916 synchronize_irq(hw->pdev->irq); 1918 synchronize_irq(hw->pdev->irq);
1917 1919
1918 ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); 1920 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
1919 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); 1921 if (new_mtu > ETH_DATA_LEN) {
1922 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1923 TX_JUMBO_ENA | TX_STFW_DIS);
1924 dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
1925 } else
1926 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1927 TX_JUMBO_DIS | TX_STFW_ENA);
1928 }
1929
1930 ctl = gma_read16(hw, port, GM_GP_CTRL);
1931 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1920 sky2_rx_stop(sky2); 1932 sky2_rx_stop(sky2);
1921 sky2_rx_clean(sky2); 1933 sky2_rx_clean(sky2);
1922 1934
@@ -1928,9 +1940,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1928 if (dev->mtu > ETH_DATA_LEN) 1940 if (dev->mtu > ETH_DATA_LEN)
1929 mode |= GM_SMOD_JUMBO_ENA; 1941 mode |= GM_SMOD_JUMBO_ENA;
1930 1942
1931 gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode); 1943 gma_write16(hw, port, GM_SERIAL_MODE, mode);
1932 1944
1933 sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); 1945 sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
1934 1946
1935 err = sky2_rx_start(sky2); 1947 err = sky2_rx_start(sky2);
1936 sky2_write32(hw, B0_IMSK, imask); 1948 sky2_write32(hw, B0_IMSK, imask);
@@ -1938,7 +1950,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1938 if (err) 1950 if (err)
1939 dev_close(dev); 1951 dev_close(dev);
1940 else { 1952 else {
1941 gma_write16(hw, sky2->port, GM_GP_CTRL, ctl); 1953 gma_write16(hw, port, GM_GP_CTRL, ctl);
1942 1954
1943 netif_poll_enable(hw->dev[0]); 1955 netif_poll_enable(hw->dev[0]);
1944 netif_wake_queue(dev); 1956 netif_wake_queue(dev);
@@ -1959,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
1959 skb_reserve(skb, 2); 1971 skb_reserve(skb, 2);
1960 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, 1972 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
1961 length, PCI_DMA_FROMDEVICE); 1973 length, PCI_DMA_FROMDEVICE);
1962 memcpy(skb->data, re->skb->data, length); 1974 skb_copy_from_linear_data(re->skb, skb->data, length);
1963 skb->ip_summed = re->skb->ip_summed; 1975 skb->ip_summed = re->skb->ip_summed;
1964 skb->csum = re->skb->csum; 1976 skb->csum = re->skb->csum;
1965 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 1977 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
@@ -2340,26 +2352,22 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2340 } 2352 }
2341} 2353}
2342 2354
2343/* This should never happen it is a fatal situation */ 2355/* This should never happen it is a bug. */
2344static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, 2356static void sky2_le_error(struct sky2_hw *hw, unsigned port,
2345 const char *rxtx, u32 mask) 2357 u16 q, unsigned ring_size)
2346{ 2358{
2347 struct net_device *dev = hw->dev[port]; 2359 struct net_device *dev = hw->dev[port];
2348 struct sky2_port *sky2 = netdev_priv(dev); 2360 struct sky2_port *sky2 = netdev_priv(dev);
2349 u32 imask; 2361 unsigned idx;
2350 2362 const u64 *le = (q == Q_R1 || q == Q_R2)
2351 printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n", 2363 ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
2352 dev ? dev->name : "<not registered>", rxtx);
2353 2364
2354 imask = sky2_read32(hw, B0_IMSK); 2365 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2355 imask &= ~mask; 2366 printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
2356 sky2_write32(hw, B0_IMSK, imask); 2367 dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
2368 (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2357 2369
2358 if (dev) { 2370 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2359 spin_lock(&sky2->phy_lock);
2360 sky2_link_down(sky2);
2361 spin_unlock(&sky2->phy_lock);
2362 }
2363} 2371}
2364 2372
2365/* If idle then force a fake soft NAPI poll once a second 2373/* If idle then force a fake soft NAPI poll once a second
@@ -2383,23 +2391,15 @@ static void sky2_idle(unsigned long arg)
2383 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); 2391 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2384} 2392}
2385 2393
2386 2394/* Hardware/software error handling */
2387static int sky2_poll(struct net_device *dev0, int *budget) 2395static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2388{ 2396{
2389 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; 2397 if (net_ratelimit())
2390 int work_limit = min(dev0->quota, *budget); 2398 dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2391 int work_done = 0;
2392 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2393 2399
2394 if (status & Y2_IS_HW_ERR) 2400 if (status & Y2_IS_HW_ERR)
2395 sky2_hw_intr(hw); 2401 sky2_hw_intr(hw);
2396 2402
2397 if (status & Y2_IS_IRQ_PHY1)
2398 sky2_phy_intr(hw, 0);
2399
2400 if (status & Y2_IS_IRQ_PHY2)
2401 sky2_phy_intr(hw, 1);
2402
2403 if (status & Y2_IS_IRQ_MAC1) 2403 if (status & Y2_IS_IRQ_MAC1)
2404 sky2_mac_intr(hw, 0); 2404 sky2_mac_intr(hw, 0);
2405 2405
@@ -2407,16 +2407,33 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2407 sky2_mac_intr(hw, 1); 2407 sky2_mac_intr(hw, 1);
2408 2408
2409 if (status & Y2_IS_CHK_RX1) 2409 if (status & Y2_IS_CHK_RX1)
2410 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); 2410 sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
2411 2411
2412 if (status & Y2_IS_CHK_RX2) 2412 if (status & Y2_IS_CHK_RX2)
2413 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); 2413 sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
2414 2414
2415 if (status & Y2_IS_CHK_TXA1) 2415 if (status & Y2_IS_CHK_TXA1)
2416 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); 2416 sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
2417 2417
2418 if (status & Y2_IS_CHK_TXA2) 2418 if (status & Y2_IS_CHK_TXA2)
2419 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2419 sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
2420}
2421
2422static int sky2_poll(struct net_device *dev0, int *budget)
2423{
2424 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
2425 int work_limit = min(dev0->quota, *budget);
2426 int work_done = 0;
2427 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2428
2429 if (unlikely(status & Y2_IS_ERROR))
2430 sky2_err_intr(hw, status);
2431
2432 if (status & Y2_IS_IRQ_PHY1)
2433 sky2_phy_intr(hw, 0);
2434
2435 if (status & Y2_IS_IRQ_PHY2)
2436 sky2_phy_intr(hw, 1);
2420 2437
2421 work_done = sky2_status_intr(hw, work_limit); 2438 work_done = sky2_status_intr(hw, work_limit);
2422 if (work_done < work_limit) { 2439 if (work_done < work_limit) {
@@ -2534,16 +2551,14 @@ static void sky2_reset(struct sky2_hw *hw)
2534 int i; 2551 int i;
2535 2552
2536 /* disable ASF */ 2553 /* disable ASF */
2537 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2554 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2538 if (hw->chip_id == CHIP_ID_YUKON_EX) { 2555 status = sky2_read16(hw, HCU_CCSR);
2539 status = sky2_read16(hw, HCU_CCSR); 2556 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2540 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | 2557 HCU_CCSR_UC_STATE_MSK);
2541 HCU_CCSR_UC_STATE_MSK); 2558 sky2_write16(hw, HCU_CCSR, status);
2542 sky2_write16(hw, HCU_CCSR, status); 2559 } else
2543 } else 2560 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2544 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2561 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2545 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2546 }
2547 2562
2548 /* do a SW reset */ 2563 /* do a SW reset */
2549 sky2_write8(hw, B0_CTST, CS_RST_SET); 2564 sky2_write8(hw, B0_CTST, CS_RST_SET);
@@ -3328,6 +3343,36 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3328 regs->len - B3_RI_WTO_R1); 3343 regs->len - B3_RI_WTO_R1);
3329} 3344}
3330 3345
3346/* In order to do Jumbo packets on these chips, need to turn off the
3347 * transmit store/forward. Therefore checksum offload won't work.
3348 */
3349static int no_tx_offload(struct net_device *dev)
3350{
3351 const struct sky2_port *sky2 = netdev_priv(dev);
3352 const struct sky2_hw *hw = sky2->hw;
3353
3354 return dev->mtu > ETH_DATA_LEN &&
3355 (hw->chip_id == CHIP_ID_YUKON_EX
3356 || hw->chip_id == CHIP_ID_YUKON_EC_U);
3357}
3358
3359static int sky2_set_tx_csum(struct net_device *dev, u32 data)
3360{
3361 if (data && no_tx_offload(dev))
3362 return -EINVAL;
3363
3364 return ethtool_op_set_tx_csum(dev, data);
3365}
3366
3367
3368static int sky2_set_tso(struct net_device *dev, u32 data)
3369{
3370 if (data && no_tx_offload(dev))
3371 return -EINVAL;
3372
3373 return ethtool_op_set_tso(dev, data);
3374}
3375
3331static const struct ethtool_ops sky2_ethtool_ops = { 3376static const struct ethtool_ops sky2_ethtool_ops = {
3332 .get_settings = sky2_get_settings, 3377 .get_settings = sky2_get_settings,
3333 .set_settings = sky2_set_settings, 3378 .set_settings = sky2_set_settings,
@@ -3343,9 +3388,9 @@ static const struct ethtool_ops sky2_ethtool_ops = {
3343 .get_sg = ethtool_op_get_sg, 3388 .get_sg = ethtool_op_get_sg,
3344 .set_sg = ethtool_op_set_sg, 3389 .set_sg = ethtool_op_set_sg,
3345 .get_tx_csum = ethtool_op_get_tx_csum, 3390 .get_tx_csum = ethtool_op_get_tx_csum,
3346 .set_tx_csum = ethtool_op_set_tx_csum, 3391 .set_tx_csum = sky2_set_tx_csum,
3347 .get_tso = ethtool_op_get_tso, 3392 .get_tso = ethtool_op_get_tso,
3348 .set_tso = ethtool_op_set_tso, 3393 .set_tso = sky2_set_tso,
3349 .get_rx_csum = sky2_get_rx_csum, 3394 .get_rx_csum = sky2_get_rx_csum,
3350 .set_rx_csum = sky2_set_rx_csum, 3395 .set_rx_csum = sky2_set_rx_csum,
3351 .get_strings = sky2_get_strings, 3396 .get_strings = sky2_get_strings,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ac24bdc42976..5efb5afc45ba 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -288,6 +288,9 @@ enum {
288 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, 288 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
289 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 289 Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
290 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, 290 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
291 Y2_IS_ERROR = Y2_IS_HW_ERR |
292 Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
293 Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
291}; 294};
292 295
293/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 296/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -738,6 +741,11 @@ enum {
738 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ 741 TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
739 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ 742 TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
740 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ 743 TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
744
745 /* Threshold values for Yukon-EC Ultra and Extreme */
746 ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
747 ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
748 ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
741}; 749};
742 750
743/* Descriptor Poll Timer Registers */ 751/* Descriptor Poll Timer Registers */
@@ -1631,6 +1639,9 @@ enum {
1631 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ 1639 TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
1632 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ 1640 TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
1633 1641
1642 TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
1643 TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
1644
1634 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ 1645 GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1635 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ 1646 GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1636 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ 1647 GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 2f4b1de7a2b4..65bd20fac820 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -363,7 +363,7 @@ sl_bump(struct slip *sl)
363 } 363 }
364 skb->dev = sl->dev; 364 skb->dev = sl->dev;
365 memcpy(skb_put(skb,count), sl->rbuff, count); 365 memcpy(skb_put(skb,count), sl->rbuff, count);
366 skb->mac.raw=skb->data; 366 skb_reset_mac_header(skb);
367 skb->protocol=htons(ETH_P_IP); 367 skb->protocol=htons(ETH_P_IP);
368 netif_rx(skb); 368 netif_rx(skb);
369 sl->dev->last_rx = jiffies; 369 sl->dev->last_rx = jiffies;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index c95614131980..8a2109a913b6 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -502,7 +502,6 @@ static inline void smc911x_rcv(struct net_device *dev)
502 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); 502 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
503 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); 503 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
504 dev->last_rx = jiffies; 504 dev->last_rx = jiffies;
505 skb->dev = dev;
506 skb->protocol = eth_type_trans(skb, dev); 505 skb->protocol = eth_type_trans(skb, dev);
507 netif_rx(skb); 506 netif_rx(skb);
508 lp->stats.rx_packets++; 507 lp->stats.rx_packets++;
@@ -1307,7 +1306,6 @@ smc911x_rx_dma_irq(int dma, void *data)
1307 lp->current_rx_skb = NULL; 1306 lp->current_rx_skb = NULL;
1308 PRINT_PKT(skb->data, skb->len); 1307 PRINT_PKT(skb->data, skb->len);
1309 dev->last_rx = jiffies; 1308 dev->last_rx = jiffies;
1310 skb->dev = dev;
1311 skb->protocol = eth_type_trans(skb, dev); 1309 skb->protocol = eth_type_trans(skb, dev);
1312 netif_rx(skb); 1310 netif_rx(skb);
1313 lp->stats.rx_packets++; 1311 lp->stats.rx_packets++;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index bd6e84506c29..36c1ebadbf20 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1262,7 +1262,6 @@ static void smc_rcv(struct net_device *dev)
1262 1262
1263 skb_reserve( skb, 2 ); /* 16 bit alignment */ 1263 skb_reserve( skb, 2 ); /* 16 bit alignment */
1264 1264
1265 skb->dev = dev;
1266 data = skb_put( skb, packet_length); 1265 data = skb_put( skb, packet_length);
1267 1266
1268#ifdef USE_32_BIT 1267#ifdef USE_32_BIT
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 49f4b7712ebf..01cc3c742c38 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -568,7 +568,6 @@ static inline void smc_rcv(struct net_device *dev)
568 PRINT_PKT(data, packet_len - 4); 568 PRINT_PKT(data, packet_len - 4);
569 569
570 dev->last_rx = jiffies; 570 dev->last_rx = jiffies;
571 skb->dev = dev;
572 skb->protocol = eth_type_trans(skb, dev); 571 skb->protocol = eth_type_trans(skb, dev);
573 netif_rx(skb); 572 netif_rx(skb);
574 lp->stats.rx_packets++; 573 lp->stats.rx_packets++;
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index ed7aa0a5acca..c6320c719931 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -85,7 +85,6 @@ static int sonic_open(struct net_device *dev)
85 dev->name); 85 dev->name);
86 return -ENOMEM; 86 return -ENOMEM;
87 } 87 }
88 skb->dev = dev;
89 /* align IP header unless DMA requires otherwise */ 88 /* align IP header unless DMA requires otherwise */
90 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) 89 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
91 skb_reserve(skb, 2); 90 skb_reserve(skb, 2);
@@ -451,7 +450,6 @@ static void sonic_rx(struct net_device *dev)
451 lp->stats.rx_dropped++; 450 lp->stats.rx_dropped++;
452 break; 451 break;
453 } 452 }
454 new_skb->dev = dev;
455 /* provide 16 byte IP header alignment unless DMA requires otherwise */ 453 /* provide 16 byte IP header alignment unless DMA requires otherwise */
456 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) 454 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
457 skb_reserve(new_skb, 2); 455 skb_reserve(new_skb, 2);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 3b91af89e4c7..230da14b1b68 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -719,8 +719,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
719 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 719 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
720 spin_unlock_irqrestore(&chain->lock, flags); 720 spin_unlock_irqrestore(&chain->lock, flags);
721 721
722 if (skb->protocol == htons(ETH_P_IP)) 722 if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL)
723 switch (skb->nh.iph->protocol) { 723 switch (ip_hdr(skb)->protocol) {
724 case IPPROTO_TCP: 724 case IPPROTO_TCP:
725 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 725 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
726 break; 726 break;
@@ -990,7 +990,6 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
990 netdev = card->netdev; 990 netdev = card->netdev;
991 991
992 skb = descr->skb; 992 skb = descr->skb;
993 skb->dev = netdev;
994 skb_put(skb, hwdescr->valid_size); 993 skb_put(skb, hwdescr->valid_size);
995 994
996 /* the card seems to add 2 bytes of junk in front 995 /* the card seems to add 2 bytes of junk in front
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 8bba2e3da7e1..9d6e454a8f98 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1452,7 +1452,6 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1452 to a minimally-sized skbuff. */ 1452 to a minimally-sized skbuff. */
1453 if (pkt_len < rx_copybreak 1453 if (pkt_len < rx_copybreak
1454 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1454 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1455 skb->dev = dev;
1456 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1455 skb_reserve(skb, 2); /* 16 byte align the IP header */
1457 pci_dma_sync_single_for_cpu(np->pci_dev, 1456 pci_dma_sync_single_for_cpu(np->pci_dev,
1458 np->rx_info[entry].mapping, 1457 np->rx_info[entry].mapping,
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 4757aa647c7a..396c3d961f88 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -775,7 +775,6 @@ static void sun3_82586_rcv_int(struct net_device *dev)
775 skb = (struct sk_buff *) dev_alloc_skb(totlen+2); 775 skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
776 if(skb != NULL) 776 if(skb != NULL)
777 { 777 {
778 skb->dev = dev;
779 skb_reserve(skb,2); 778 skb_reserve(skb,2);
780 skb_put(skb,totlen); 779 skb_put(skb,totlen);
781 eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); 780 eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
@@ -1027,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1027 memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); 1026 memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
1028 len = ETH_ZLEN; 1027 len = ETH_ZLEN;
1029 } 1028 }
1030 memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); 1029 skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len);
1031 1030
1032#if (NUM_XMIT_BUFFS == 1) 1031#if (NUM_XMIT_BUFFS == 1)
1033# ifdef NO_NOPCOMMANDS 1032# ifdef NO_NOPCOMMANDS
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 7bee45b42a2c..791e081fdc15 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
629 head->length = (-len) | 0xf000; 629 head->length = (-len) | 0xf000;
630 head->misc = 0; 630 head->misc = 0;
631 631
632 memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); 632 skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
633 if (len != skb->len) 633 if (len != skb->len)
634 memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); 634 memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
635 635
@@ -851,10 +851,9 @@ static int lance_rx( struct net_device *dev )
851 } 851 }
852 852
853 853
854 skb->dev = dev;
855 skb_reserve( skb, 2 ); /* 16 byte align */ 854 skb_reserve( skb, 2 ); /* 16 byte align */
856 skb_put( skb, pkt_len ); /* Make room */ 855 skb_put( skb, pkt_len ); /* Make room */
857// memcpy( skb->data, PKTBUF_ADDR(head), pkt_len ); 856// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
858 eth_copy_and_sum(skb, 857 eth_copy_and_sum(skb,
859 PKTBUF_ADDR(head), 858 PKTBUF_ADDR(head),
860 pkt_len, 0); 859 pkt_len, 0);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 18f88853e1e5..2ad8d58dee3b 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -855,7 +855,6 @@ static void bigmac_rx(struct bigmac *bp)
855 drops++; 855 drops++;
856 goto drop_it; 856 goto drop_it;
857 } 857 }
858 copy_skb->dev = bp->dev;
859 skb_reserve(copy_skb, 2); 858 skb_reserve(copy_skb, 2);
860 skb_put(copy_skb, len); 859 skb_put(copy_skb, len);
861 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, 860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index c06ecc8002b9..f51ba31970aa 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1308,7 +1308,6 @@ static void rx_poll(unsigned long data)
1308 to a minimally-sized skbuff. */ 1308 to a minimally-sized skbuff. */
1309 if (pkt_len < rx_copybreak 1309 if (pkt_len < rx_copybreak
1310 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1310 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1311 skb->dev = dev;
1312 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1311 skb_reserve(skb, 2); /* 16 byte align the IP header */
1313 pci_dma_sync_single_for_cpu(np->pci_dev, 1312 pci_dma_sync_single_for_cpu(np->pci_dev,
1314 desc->frag[0].addr, 1313 desc->frag[0].addr,
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 08ea61db46fe..5da73212ac91 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -64,11 +64,9 @@
64#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#include <asm/irq.h> 65#include <asm/irq.h>
66 66
67#ifdef __sparc__ 67#ifdef CONFIG_SPARC
68#include <asm/idprom.h> 68#include <asm/idprom.h>
69#include <asm/openprom.h> 69#include <asm/prom.h>
70#include <asm/oplib.h>
71#include <asm/pbm.h>
72#endif 70#endif
73 71
74#ifdef CONFIG_PPC_PMAC 72#ifdef CONFIG_PPC_PMAC
@@ -845,11 +843,10 @@ static int gem_rx(struct gem *gp, int work_to_do)
845 goto drop_it; 843 goto drop_it;
846 } 844 }
847 845
848 copy_skb->dev = gp->dev;
849 skb_reserve(copy_skb, 2); 846 skb_reserve(copy_skb, 2);
850 skb_put(copy_skb, len); 847 skb_put(copy_skb, len);
851 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 848 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
852 memcpy(copy_skb->data, skb->data, len); 849 skb_copy_from_linear_data(skb, copy_skb->data, len);
853 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 850 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
854 851
855 /* We'll reuse the original ring buffer. */ 852 /* We'll reuse the original ring buffer. */
@@ -1029,10 +1026,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1029 1026
1030 ctrl = 0; 1027 ctrl = 0;
1031 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1028 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1032 u64 csum_start_off, csum_stuff_off; 1029 const u64 csum_start_off = skb_transport_offset(skb);
1033 1030 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1034 csum_start_off = (u64) (skb->h.raw - skb->data);
1035 csum_stuff_off = csum_start_off + skb->csum_offset;
1036 1031
1037 ctrl = (TXDCTRL_CENAB | 1032 ctrl = (TXDCTRL_CENAB |
1038 (csum_start_off << 15) | 1033 (csum_start_off << 15) |
@@ -2849,7 +2844,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2849 return rc; 2844 return rc;
2850} 2845}
2851 2846
2852#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) 2847#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2853/* Fetch MAC address from vital product data of PCI ROM. */ 2848/* Fetch MAC address from vital product data of PCI ROM. */
2854static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2849static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2855{ 2850{
@@ -2904,36 +2899,19 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2904 2899
2905static int __devinit gem_get_device_address(struct gem *gp) 2900static int __devinit gem_get_device_address(struct gem *gp)
2906{ 2901{
2907#if defined(__sparc__) || defined(CONFIG_PPC_PMAC) 2902#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2908 struct net_device *dev = gp->dev; 2903 struct net_device *dev = gp->dev;
2909#endif
2910
2911#if defined(__sparc__)
2912 struct pci_dev *pdev = gp->pdev;
2913 struct pcidev_cookie *pcp = pdev->sysdata;
2914 int use_idprom = 1;
2915
2916 if (pcp != NULL) {
2917 unsigned char *addr;
2918 int len;
2919
2920 addr = of_get_property(pcp->prom_node, "local-mac-address",
2921 &len);
2922 if (addr && len == 6) {
2923 use_idprom = 0;
2924 memcpy(dev->dev_addr, addr, 6);
2925 }
2926 }
2927 if (use_idprom)
2928 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2929#elif defined(CONFIG_PPC_PMAC)
2930 const unsigned char *addr; 2904 const unsigned char *addr;
2931 2905
2932 addr = get_property(gp->of_node, "local-mac-address", NULL); 2906 addr = get_property(gp->of_node, "local-mac-address", NULL);
2933 if (addr == NULL) { 2907 if (addr == NULL) {
2908#ifdef CONFIG_SPARC
2909 addr = idprom->id_ethaddr;
2910#else
2934 printk("\n"); 2911 printk("\n");
2935 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2912 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2936 return -1; 2913 return -1;
2914#endif
2937 } 2915 }
2938 memcpy(dev->dev_addr, addr, 6); 2916 memcpy(dev->dev_addr, addr, 6);
2939#else 2917#else
@@ -3091,7 +3069,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3091 /* On Apple, we want a reference to the Open Firmware device-tree 3069 /* On Apple, we want a reference to the Open Firmware device-tree
3092 * node. We use it for clock control. 3070 * node. We use it for clock control.
3093 */ 3071 */
3094#ifdef CONFIG_PPC_PMAC 3072#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
3095 gp->of_node = pci_device_to_OF_node(pdev); 3073 gp->of_node = pci_device_to_OF_node(pdev);
3096#endif 3074#endif
3097 3075
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index a70067c85cc9..58cf87c5751e 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1025,7 +1025,7 @@ struct gem {
1025 1025
1026 struct pci_dev *pdev; 1026 struct pci_dev *pdev;
1027 struct net_device *dev; 1027 struct net_device *dev;
1028#ifdef CONFIG_PPC_PMAC 1028#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
1029 struct device_node *of_node; 1029 struct device_node *of_node;
1030#endif 1030#endif
1031}; 1031};
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index ef671739cfea..51c3fe2108a3 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -55,9 +55,6 @@
55 55
56#ifdef CONFIG_PCI 56#ifdef CONFIG_PCI
57#include <linux/pci.h> 57#include <linux/pci.h>
58#ifdef CONFIG_SPARC
59#include <asm/pbm.h>
60#endif
61#endif 58#endif
62 59
63#include "sunhme.h" 60#include "sunhme.h"
@@ -2058,11 +2055,10 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2058 goto drop_it; 2055 goto drop_it;
2059 } 2056 }
2060 2057
2061 copy_skb->dev = dev;
2062 skb_reserve(copy_skb, 2); 2058 skb_reserve(copy_skb, 2);
2063 skb_put(copy_skb, len); 2059 skb_put(copy_skb, len);
2064 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); 2060 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
2065 memcpy(copy_skb->data, skb->data, len); 2061 skb_copy_from_linear_data(skb, copy_skb->data, len);
2066 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); 2062 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
2067 2063
2068 /* Reuse original ring buffer. */ 2064 /* Reuse original ring buffer. */
@@ -2270,10 +2266,8 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2270 2266
2271 tx_flags = TXFLAG_OWN; 2267 tx_flags = TXFLAG_OWN;
2272 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2268 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2273 u32 csum_start_off, csum_stuff_off; 2269 const u32 csum_start_off = skb_transport_offset(skb);
2274 2270 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2275 csum_start_off = (u32) (skb->h.raw - skb->data);
2276 csum_stuff_off = csum_start_off + skb->csum_offset;
2277 2271
2278 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | 2272 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2279 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | 2273 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
@@ -2704,7 +2698,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
2704 dev->dev_addr[i] = macaddr[i]; 2698 dev->dev_addr[i] = macaddr[i];
2705 macaddr[5]++; 2699 macaddr[5]++;
2706 } else { 2700 } else {
2707 unsigned char *addr; 2701 const unsigned char *addr;
2708 int len; 2702 int len;
2709 2703
2710 addr = of_get_property(dp, "local-mac-address", &len); 2704 addr = of_get_property(dp, "local-mac-address", &len);
@@ -2986,7 +2980,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2986{ 2980{
2987 struct quattro *qp = NULL; 2981 struct quattro *qp = NULL;
2988#ifdef CONFIG_SPARC 2982#ifdef CONFIG_SPARC
2989 struct pcidev_cookie *pcp; 2983 struct device_node *dp;
2990#endif 2984#endif
2991 struct happy_meal *hp; 2985 struct happy_meal *hp;
2992 struct net_device *dev; 2986 struct net_device *dev;
@@ -2998,13 +2992,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2998 2992
2999 /* Now make sure pci_dev cookie is there. */ 2993 /* Now make sure pci_dev cookie is there. */
3000#ifdef CONFIG_SPARC 2994#ifdef CONFIG_SPARC
3001 pcp = pdev->sysdata; 2995 dp = pci_device_to_OF_node(pdev);
3002 if (pcp == NULL) { 2996 strcpy(prom_name, dp->name);
3003 printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n");
3004 return -ENODEV;
3005 }
3006
3007 strcpy(prom_name, pcp->prom_node->name);
3008#else 2997#else
3009 if (is_quattro_p(pdev)) 2998 if (is_quattro_p(pdev))
3010 strcpy(prom_name, "SUNW,qfe"); 2999 strcpy(prom_name, "SUNW,qfe");
@@ -3081,11 +3070,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3081 macaddr[5]++; 3070 macaddr[5]++;
3082 } else { 3071 } else {
3083#ifdef CONFIG_SPARC 3072#ifdef CONFIG_SPARC
3084 unsigned char *addr; 3073 const unsigned char *addr;
3085 int len; 3074 int len;
3086 3075
3087 if (qfe_slot != -1 && 3076 if (qfe_slot != -1 &&
3088 (addr = of_get_property(pcp->prom_node, 3077 (addr = of_get_property(dp,
3089 "local-mac-address", &len)) != NULL 3078 "local-mac-address", &len)) != NULL
3090 && len == 6) { 3079 && len == 6) {
3091 memcpy(dev->dev_addr, addr, 6); 3080 memcpy(dev->dev_addr, addr, 6);
@@ -3105,7 +3094,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3105 hp->tcvregs = (hpreg_base + 0x7000UL); 3094 hp->tcvregs = (hpreg_base + 0x7000UL);
3106 3095
3107#ifdef CONFIG_SPARC 3096#ifdef CONFIG_SPARC
3108 hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff); 3097 hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
3109 if (hp->hm_revision == 0xff) { 3098 if (hp->hm_revision == 0xff) {
3110 unsigned char prev; 3099 unsigned char prev;
3111 3100
@@ -3300,7 +3289,7 @@ static int __devinit hme_sbus_probe(struct of_device *dev, const struct of_devic
3300{ 3289{
3301 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 3290 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
3302 struct device_node *dp = dev->node; 3291 struct device_node *dp = dev->node;
3303 char *model = of_get_property(dp, "model", NULL); 3292 const char *model = of_get_property(dp, "model", NULL);
3304 int is_qfe = (match->data != NULL); 3293 int is_qfe = (match->data != NULL);
3305 3294
3306 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) 3295 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
@@ -3314,7 +3303,7 @@ static int __devexit hme_sbus_remove(struct of_device *dev)
3314 struct happy_meal *hp = dev_get_drvdata(&dev->dev); 3303 struct happy_meal *hp = dev_get_drvdata(&dev->dev);
3315 struct net_device *net_dev = hp->dev; 3304 struct net_device *net_dev = hp->dev;
3316 3305
3317 unregister_netdevice(net_dev); 3306 unregister_netdev(net_dev);
3318 3307
3319 /* XXX qfe parent interrupt... */ 3308 /* XXX qfe parent interrupt... */
3320 3309
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 5b00d79b5573..42722530ab24 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -547,7 +547,6 @@ static void lance_rx_dvma(struct net_device *dev)
547 547
548 lp->stats.rx_bytes += len; 548 lp->stats.rx_bytes += len;
549 549
550 skb->dev = dev;
551 skb_reserve(skb, 2); /* 16 byte align */ 550 skb_reserve(skb, 2); /* 16 byte align */
552 skb_put(skb, len); /* make room */ 551 skb_put(skb, len); /* make room */
553 eth_copy_and_sum(skb, 552 eth_copy_and_sum(skb,
@@ -721,7 +720,6 @@ static void lance_rx_pio(struct net_device *dev)
721 720
722 lp->stats.rx_bytes += len; 721 lp->stats.rx_bytes += len;
723 722
724 skb->dev = dev;
725 skb_reserve (skb, 2); /* 16 byte align */ 723 skb_reserve (skb, 2); /* 16 byte align */
726 skb_put(skb, len); /* make room */ 724 skb_put(skb, len); /* make room */
727 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); 725 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
@@ -1145,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1145 struct lance_init_block *ib = lp->init_block_mem; 1143 struct lance_init_block *ib = lp->init_block_mem;
1146 ib->btx_ring [entry].length = (-len) | 0xf000; 1144 ib->btx_ring [entry].length = (-len) | 0xf000;
1147 ib->btx_ring [entry].misc = 0; 1145 ib->btx_ring [entry].misc = 0;
1148 memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen); 1146 skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
1149 if (len != skblen) 1147 if (len != skblen)
1150 memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); 1148 memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
1151 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); 1149 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
@@ -1550,7 +1548,7 @@ static int __exit sunlance_sun4_remove(void)
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); 1548 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1549 struct net_device *net_dev = lp->dev;
1552 1550
1553 unregister_netdevice(net_dev); 1551 unregister_netdev(net_dev);
1554 1552
1555 lance_free_hwresources(lp); 1553 lance_free_hwresources(lp);
1556 1554
@@ -1590,7 +1588,7 @@ static int __devexit sunlance_sbus_remove(struct of_device *dev)
1590 struct lance_private *lp = dev_get_drvdata(&dev->dev); 1588 struct lance_private *lp = dev_get_drvdata(&dev->dev);
1591 struct net_device *net_dev = lp->dev; 1589 struct net_device *net_dev = lp->dev;
1592 1590
1593 unregister_netdevice(net_dev); 1591 unregister_netdev(net_dev);
1594 1592
1595 lance_free_hwresources(lp); 1593 lance_free_hwresources(lp);
1596 1594
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 7874eb1ef043..fa70e0b78af7 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -437,7 +437,6 @@ static void qe_rx(struct sunqe *qep)
437 drops++; 437 drops++;
438 qep->net_stats.rx_dropped++; 438 qep->net_stats.rx_dropped++;
439 } else { 439 } else {
440 skb->dev = qep->dev;
441 skb_reserve(skb, 2); 440 skb_reserve(skb, 2);
442 skb_put(skb, len); 441 skb_put(skb, len);
443 eth_copy_and_sum(skb, (unsigned char *) this_qbuf, 442 eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
@@ -593,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
593 /* Avoid a race... */ 592 /* Avoid a race... */
594 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; 593 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
595 594
596 memcpy(txbuf, skb->data, len); 595 skb_copy_from_linear_data(skb, txbuf, len);
597 596
598 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; 597 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
599 qep->qe_block->qe_txd[entry].tx_flags = 598 qep->qe_block->qe_txd[entry].tx_flags =
@@ -845,6 +844,8 @@ static int __init qec_ether_init(struct sbus_dev *sdev)
845 if (!dev) 844 if (!dev)
846 return -ENOMEM; 845 return -ENOMEM;
847 846
847 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
848
848 qe = netdev_priv(dev); 849 qe = netdev_priv(dev);
849 850
850 i = of_getintprop_default(sdev->ofdev.node, "channel#", -1); 851 i = of_getintprop_default(sdev->ofdev.node, "channel#", -1);
@@ -960,7 +961,7 @@ static int __devexit qec_sbus_remove(struct of_device *dev)
960 struct sunqe *qp = dev_get_drvdata(&dev->dev); 961 struct sunqe *qp = dev_get_drvdata(&dev->dev);
961 struct net_device *net_dev = qp->dev; 962 struct net_device *net_dev = qp->dev;
962 963
963 unregister_netdevice(net_dev); 964 unregister_netdev(net_dev);
964 965
965 sbus_iounmap(qp->qcregs, CREG_REG_SIZE); 966 sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
966 sbus_iounmap(qp->mregs, MREGS_REG_SIZE); 967 sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index e3a7e3ceab77..d7741e23f8de 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1145,7 +1145,6 @@ tc35815_rx(struct net_device *dev)
1145 break; 1145 break;
1146 } 1146 }
1147 skb_reserve(skb, 2); /* 16 bit alignment */ 1147 skb_reserve(skb, 2); /* 16 bit alignment */
1148 skb->dev = dev;
1149 1148
1150 data = skb_put(skb, pkt_len); 1149 data = skb_put(skb, pkt_len);
1151 1150
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 256969e1300c..9488f49ea569 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -40,16 +40,16 @@
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41 41
42#include <net/checksum.h> 42#include <net/checksum.h>
43#include <net/ip.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/io.h> 46#include <asm/io.h>
46#include <asm/byteorder.h> 47#include <asm/byteorder.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
49#ifdef CONFIG_SPARC64 50#ifdef CONFIG_SPARC
50#include <asm/idprom.h> 51#include <asm/idprom.h>
51#include <asm/oplib.h> 52#include <asm/prom.h>
52#include <asm/pbm.h>
53#endif 53#endif
54 54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -3349,7 +3349,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
3349 skb_reserve(copy_skb, 2); 3349 skb_reserve(copy_skb, 2);
3350 skb_put(copy_skb, len); 3350 skb_put(copy_skb, len);
3351 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3351 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3352 memcpy(copy_skb->data, skb->data, len); 3352 skb_copy_from_linear_data(skb, copy_skb->data, len);
3353 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3353 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3354 3354
3355 /* We'll reuse the original ring buffer. */ 3355 /* We'll reuse the original ring buffer. */
@@ -3908,20 +3908,20 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3908 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 3908 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3909 mss |= (skb_headlen(skb) - ETH_HLEN) << 9; 3909 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3910 else { 3910 else {
3911 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 3911 struct iphdr *iph = ip_hdr(skb);
3912 ip_tcp_len = (skb->nh.iph->ihl * 4) + 3912
3913 sizeof(struct tcphdr); 3913 tcp_opt_len = tcp_optlen(skb);
3914 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3914 3915
3915 skb->nh.iph->check = 0; 3916 iph->check = 0;
3916 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + 3917 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3917 tcp_opt_len);
3918 mss |= (ip_tcp_len + tcp_opt_len) << 9; 3918 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3919 } 3919 }
3920 3920
3921 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 3921 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3922 TXD_FLAG_CPU_POST_DMA); 3922 TXD_FLAG_CPU_POST_DMA);
3923 3923
3924 skb->h.th->check = 0; 3924 tcp_hdr(skb)->check = 0;
3925 3925
3926 } 3926 }
3927 else if (skb->ip_summed == CHECKSUM_PARTIAL) 3927 else if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -4055,6 +4055,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4055 mss = 0; 4055 mss = 0;
4056 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 4056 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4057 (mss = skb_shinfo(skb)->gso_size) != 0) { 4057 (mss = skb_shinfo(skb)->gso_size) != 0) {
4058 struct iphdr *iph;
4058 int tcp_opt_len, ip_tcp_len, hdr_len; 4059 int tcp_opt_len, ip_tcp_len, hdr_len;
4059 4060
4060 if (skb_header_cloned(skb) && 4061 if (skb_header_cloned(skb) &&
@@ -4063,8 +4064,8 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4063 goto out_unlock; 4064 goto out_unlock;
4064 } 4065 }
4065 4066
4066 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 4067 tcp_opt_len = tcp_optlen(skb);
4067 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 4068 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4068 4069
4069 hdr_len = ip_tcp_len + tcp_opt_len; 4070 hdr_len = ip_tcp_len + tcp_opt_len;
4070 if (unlikely((ETH_HLEN + hdr_len) > 80) && 4071 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
@@ -4074,34 +4075,31 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4074 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 4075 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4075 TXD_FLAG_CPU_POST_DMA); 4076 TXD_FLAG_CPU_POST_DMA);
4076 4077
4077 skb->nh.iph->check = 0; 4078 iph = ip_hdr(skb);
4078 skb->nh.iph->tot_len = htons(mss + hdr_len); 4079 iph->check = 0;
4080 iph->tot_len = htons(mss + hdr_len);
4079 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 4081 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4080 skb->h.th->check = 0; 4082 tcp_hdr(skb)->check = 0;
4081 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 4083 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4082 } 4084 } else
4083 else { 4085 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4084 skb->h.th->check = 4086 iph->daddr, 0,
4085 ~csum_tcpudp_magic(skb->nh.iph->saddr, 4087 IPPROTO_TCP,
4086 skb->nh.iph->daddr, 4088 0);
4087 0, IPPROTO_TCP, 0);
4088 }
4089 4089
4090 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 4090 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 4091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4092 if (tcp_opt_len || skb->nh.iph->ihl > 5) { 4092 if (tcp_opt_len || iph->ihl > 5) {
4093 int tsflags; 4093 int tsflags;
4094 4094
4095 tsflags = ((skb->nh.iph->ihl - 5) + 4095 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4096 (tcp_opt_len >> 2));
4097 mss |= (tsflags << 11); 4096 mss |= (tsflags << 11);
4098 } 4097 }
4099 } else { 4098 } else {
4100 if (tcp_opt_len || skb->nh.iph->ihl > 5) { 4099 if (tcp_opt_len || iph->ihl > 5) {
4101 int tsflags; 4100 int tsflags;
4102 4101
4103 tsflags = ((skb->nh.iph->ihl - 5) + 4102 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4104 (tcp_opt_len >> 2));
4105 base_flags |= tsflags << 12; 4103 base_flags |= tsflags << 12;
4106 } 4104 }
4107 } 4105 }
@@ -10988,24 +10986,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10988 return err; 10986 return err;
10989} 10987}
10990 10988
10991#ifdef CONFIG_SPARC64 10989#ifdef CONFIG_SPARC
10992static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) 10990static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10993{ 10991{
10994 struct net_device *dev = tp->dev; 10992 struct net_device *dev = tp->dev;
10995 struct pci_dev *pdev = tp->pdev; 10993 struct pci_dev *pdev = tp->pdev;
10996 struct pcidev_cookie *pcp = pdev->sysdata; 10994 struct device_node *dp = pci_device_to_OF_node(pdev);
10997 10995 const unsigned char *addr;
10998 if (pcp != NULL) { 10996 int len;
10999 unsigned char *addr; 10997
11000 int len; 10998 addr = of_get_property(dp, "local-mac-address", &len);
11001 10999 if (addr && len == 6) {
11002 addr = of_get_property(pcp->prom_node, "local-mac-address", 11000 memcpy(dev->dev_addr, addr, 6);
11003 &len); 11001 memcpy(dev->perm_addr, dev->dev_addr, 6);
11004 if (addr && len == 6) { 11002 return 0;
11005 memcpy(dev->dev_addr, addr, 6);
11006 memcpy(dev->perm_addr, dev->dev_addr, 6);
11007 return 0;
11008 }
11009 } 11003 }
11010 return -ENODEV; 11004 return -ENODEV;
11011} 11005}
@@ -11026,7 +11020,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
11026 u32 hi, lo, mac_offset; 11020 u32 hi, lo, mac_offset;
11027 int addr_ok = 0; 11021 int addr_ok = 0;
11028 11022
11029#ifdef CONFIG_SPARC64 11023#ifdef CONFIG_SPARC
11030 if (!tg3_get_macaddr_sparc(tp)) 11024 if (!tg3_get_macaddr_sparc(tp))
11031 return 0; 11025 return 0;
11032#endif 11026#endif
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f85f00251123..106dc1ef0acb 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1112 1112
1113 if ( bbuf ) { 1113 if ( bbuf ) {
1114 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1114 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1115 memcpy( tail_buffer, skb->data, skb->len ); 1115 skb_copy_from_linear_data(skb, tail_buffer, skb->len);
1116 } else { 1116 } else {
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); 1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE);
1118 TLan_StoreSKB(tail_list, skb); 1118 TLan_StoreSKB(tail_list, skb);
@@ -1577,7 +1577,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1577 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); 1577 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n");
1578 else { 1578 else {
1579 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1579 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1580 skb->dev = dev;
1581 skb_reserve(skb, 2); 1580 skb_reserve(skb, 2);
1582 t = (void *) skb_put(skb, frameSize); 1581 t = (void *) skb_put(skb, frameSize);
1583 1582
@@ -1608,7 +1607,6 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1608 skb->protocol = eth_type_trans( skb, dev ); 1607 skb->protocol = eth_type_trans( skb, dev );
1609 netif_rx( skb ); 1608 netif_rx( skb );
1610 1609
1611 new_skb->dev = dev;
1612 skb_reserve( new_skb, 2 ); 1610 skb_reserve( new_skb, 2 );
1613 t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); 1611 t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
1614 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1612 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7580bdeacadc..e22a3f5333ef 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -933,20 +933,21 @@ static void xl_rx(struct net_device *dev)
933 return ; 933 return ;
934 } 934 }
935 935
936 skb->dev = dev ;
937
938 while (xl_priv->rx_ring_tail != temp_ring_loc) { 936 while (xl_priv->rx_ring_tail != temp_ring_loc) {
939 copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; 937 copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ;
940 frame_length -= copy_len ; 938 frame_length -= copy_len ;
941 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 939 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
942 memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ; 940 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
941 skb_put(skb, copy_len),
942 copy_len);
943 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 943 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
944 adv_rx_ring(dev) ; 944 adv_rx_ring(dev) ;
945 } 945 }
946 946
947 /* Now we have found the last fragment */ 947 /* Now we have found the last fragment */
948 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 948 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
949 memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ; 949 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
950 skb_put(skb,copy_len), frame_length);
950/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ 951/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
951 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 952 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
952 adv_rx_ring(dev) ; 953 adv_rx_ring(dev) ;
@@ -967,8 +968,6 @@ static void xl_rx(struct net_device *dev)
967 return ; 968 return ;
968 } 969 }
969 970
970 skb->dev = dev ;
971
972 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; 971 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
973 pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 972 pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
974 skb_put(skb2, frame_length) ; 973 skb_put(skb2, frame_length) ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 01d55315ee8c..1e8958ee2d0a 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1771,7 +1771,6 @@ static void tr_rx(struct net_device *dev)
1771 /*BMS again, if she comes in with few but leaves with many */ 1771 /*BMS again, if she comes in with few but leaves with many */
1772 skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len); 1772 skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
1773 skb_put(skb, length); 1773 skb_put(skb, length);
1774 skb->dev = dev;
1775 data = skb->data; 1774 data = skb->data;
1776 rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len))); 1775 rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
1777 rbufdata = rbuf + offsetof(struct rec_buf, data); 1776 rbufdata = rbuf + offsetof(struct rec_buf, data);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index e999feb8c0bb..5d849c089a3b 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -944,8 +944,6 @@ static void streamer_rx(struct net_device *dev)
944 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); 944 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
945 streamer_priv->streamer_stats.rx_dropped++; 945 streamer_priv->streamer_stats.rx_dropped++;
946 } else { /* we allocated an skb OK */ 946 } else { /* we allocated an skb OK */
947 skb->dev = dev;
948
949 if (buffer_cnt == 1) { 947 if (buffer_cnt == 1) {
950 /* release the DMA mapping */ 948 /* release the DMA mapping */
951 pci_unmap_single(streamer_priv->pci_dev, 949 pci_unmap_single(streamer_priv->pci_dev,
@@ -1607,10 +1605,11 @@ static void streamer_arb_cmd(struct net_device *dev)
1607 frame_data, buffer_len); 1605 frame_data, buffer_len);
1608 } while (next_ptr && (buff_off = next_ptr)); 1606 } while (next_ptr && (buff_off = next_ptr));
1609 1607
1608 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1610#if STREAMER_NETWORK_MONITOR 1609#if STREAMER_NETWORK_MONITOR
1611 printk(KERN_WARNING "%s: Received MAC Frame, details: \n", 1610 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
1612 dev->name); 1611 dev->name);
1613 mac_hdr = (struct trh_hdr *) mac_frame->data; 1612 mac_hdr = tr_hdr(mac_frame);
1614 printk(KERN_WARNING 1613 printk(KERN_WARNING
1615 "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", 1614 "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
1616 dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], 1615 dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1],
@@ -1622,8 +1621,6 @@ static void streamer_arb_cmd(struct net_device *dev)
1622 mac_hdr->saddr[2], mac_hdr->saddr[3], 1621 mac_hdr->saddr[2], mac_hdr->saddr[3],
1623 mac_hdr->saddr[4], mac_hdr->saddr[5]); 1622 mac_hdr->saddr[4], mac_hdr->saddr[5]);
1624#endif 1623#endif
1625 mac_frame->dev = dev;
1626 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1627 netif_rx(mac_frame); 1624 netif_rx(mac_frame);
1628 1625
1629 /* Now tell the card we have dealt with the received frame */ 1626 /* Now tell the card we have dealt with the received frame */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 8f4ecc1109cb..09b3cfb8e809 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -814,8 +814,6 @@ static void olympic_rx(struct net_device *dev)
814 olympic_priv->rx_ring_last_received += i ; 814 olympic_priv->rx_ring_last_received += i ;
815 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 815 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
816 } else { 816 } else {
817 skb->dev = dev ;
818
819 /* Optimise based upon number of buffers used. 817 /* Optimise based upon number of buffers used.
820 If only one buffer is used we can simply swap the buffers around. 818 If only one buffer is used we can simply swap the buffers around.
821 If more than one then we must use the new buffer and copy the information 819 If more than one then we must use the new buffer and copy the information
@@ -847,7 +845,9 @@ static void olympic_rx(struct net_device *dev)
847 pci_dma_sync_single_for_cpu(olympic_priv->pdev, 845 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
848 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), 846 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
849 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 847 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
850 memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; 848 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
849 skb_put(skb,length - 4),
850 length - 4);
851 pci_dma_sync_single_for_device(olympic_priv->pdev, 851 pci_dma_sync_single_for_device(olympic_priv->pdev,
852 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), 852 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
853 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 853 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
@@ -864,7 +864,9 @@ static void olympic_rx(struct net_device *dev)
864 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 864 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
865 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); 865 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
866 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); 866 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
867 memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ; 867 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
868 skb_put(skb, cpy_length),
869 cpy_length);
868 pci_dma_sync_single_for_device(olympic_priv->pdev, 870 pci_dma_sync_single_for_device(olympic_priv->pdev,
869 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), 871 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
870 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 872 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
@@ -1440,16 +1442,16 @@ static void olympic_arb_cmd(struct net_device *dev)
1440 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); 1442 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1441 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr))); 1443 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1442 1444
1445 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1446
1443 if (olympic_priv->olympic_network_monitor) { 1447 if (olympic_priv->olympic_network_monitor) {
1444 struct trh_hdr *mac_hdr ; 1448 struct trh_hdr *mac_hdr ;
1445 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; 1449 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1446 mac_hdr = (struct trh_hdr *)mac_frame->data ; 1450 mac_hdr = tr_hdr(mac_frame);
1447 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; 1451 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1448 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; 1452 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1449 } 1453 }
1450 mac_frame->dev = dev ; 1454 netif_rx(mac_frame);
1451 mac_frame->protocol = tr_type_trans(mac_frame,dev);
1452 netif_rx(mac_frame) ;
1453 dev->last_rx = jiffies; 1455 dev->last_rx = jiffies;
1454 1456
1455drop_frame: 1457drop_frame:
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cec282a6f62d..9bbea5c8acf4 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3889,14 +3889,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3889 3889
3890 /* Slide data into a sleek skb. */ 3890 /* Slide data into a sleek skb. */
3891 skb_put(skb, skb->len); 3891 skb_put(skb, skb->len);
3892 memcpy(skb->data, rmf, skb->len); 3892 skb_copy_to_linear_data(skb, rmf, skb->len);
3893 3893
3894 /* Update Counters */ 3894 /* Update Counters */
3895 tp->MacStat.rx_packets++; 3895 tp->MacStat.rx_packets++;
3896 tp->MacStat.rx_bytes += skb->len; 3896 tp->MacStat.rx_bytes += skb->len;
3897 3897
3898 /* Kick the packet on up. */ 3898 /* Kick the packet on up. */
3899 skb->dev = dev;
3900 skb->protocol = tr_type_trans(skb, dev); 3899 skb->protocol = tr_type_trans(skb, dev);
3901 netif_rx(skb); 3900 netif_rx(skb);
3902 dev->last_rx = jiffies; 3901 dev->last_rx = jiffies;
@@ -4476,14 +4475,13 @@ static int smctr_rx_frame(struct net_device *dev)
4476 if (skb) { 4475 if (skb) {
4477 skb_put(skb, rx_size); 4476 skb_put(skb, rx_size);
4478 4477
4479 memcpy(skb->data, pbuff, rx_size); 4478 skb_copy_to_linear_data(skb, pbuff, rx_size);
4480 4479
4481 /* Update Counters */ 4480 /* Update Counters */
4482 tp->MacStat.rx_packets++; 4481 tp->MacStat.rx_packets++;
4483 tp->MacStat.rx_bytes += skb->len; 4482 tp->MacStat.rx_bytes += skb->len;
4484 4483
4485 /* Kick the packet on up. */ 4484 /* Kick the packet on up. */
4486 skb->dev = dev;
4487 skb->protocol = tr_type_trans(skb, dev); 4485 skb->protocol = tr_type_trans(skb, dev);
4488 netif_rx(skb); 4486 netif_rx(skb);
4489 dev->last_rx = jiffies; 4487 dev->last_rx = jiffies;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index ea797ca2b988..12bd294045a7 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
644 dmabuf = 0; 644 dmabuf = 0;
645 i = tp->TplFree->TPLIndex; 645 i = tp->TplFree->TPLIndex;
646 buf = tp->LocalTxBuffers[i]; 646 buf = tp->LocalTxBuffers[i];
647 memcpy(buf, skb->data, length); 647 skb_copy_from_linear_data(skb, buf, length);
648 newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; 648 newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer;
649 } 649 }
650 else { 650 else {
@@ -2168,7 +2168,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2168 } 2168 }
2169 else 2169 else
2170 { 2170 {
2171 skb->dev = dev;
2172 skb_put(skb, tp->MaxPacketSize); 2171 skb_put(skb, tp->MaxPacketSize);
2173 rpl->SkbStat = SKB_DATA_COPY; 2172 rpl->SkbStat = SKB_DATA_COPY;
2174 ReceiveDataPtr = rpl->MData; 2173 ReceiveDataPtr = rpl->MData;
@@ -2179,7 +2178,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2179 || rpl->SkbStat == SKB_DMA_DIRECT)) 2178 || rpl->SkbStat == SKB_DMA_DIRECT))
2180 { 2179 {
2181 if(rpl->SkbStat == SKB_DATA_COPY) 2180 if(rpl->SkbStat == SKB_DATA_COPY)
2182 memcpy(skb->data, ReceiveDataPtr, Length); 2181 skb_copy_to_linear_data(skb, ReceiveDataPtr,
2182 Length);
2183 2183
2184 /* Deliver frame to system */ 2184 /* Deliver frame to system */
2185 rpl->Skb = NULL; 2185 rpl->Skb = NULL;
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index d92c5c597e16..0bfc2c9c1c08 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -788,7 +788,6 @@ static int tsi108_complete_rx(struct net_device *dev, int budget)
788 printk(".\n"); 788 printk(".\n");
789 } 789 }
790 790
791 skb->dev = dev;
792 skb_put(skb, data->rxring[rx].len); 791 skb_put(skb, data->rxring[rx].len);
793 skb->protocol = eth_type_trans(skb, dev); 792 skb->protocol = eth_type_trans(skb, dev);
794 netif_receive_skb(skb); 793 netif_receive_skb(skb);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c82befa209a2..861729806dc1 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63 63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ 64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ 65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
66 || defined(__sparc__) || defined(__ia64__) \ 66 || defined(CONFIG_SPARC) || defined(__ia64__) \
67 || defined(__sh__) || defined(__mips__) 67 || defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518; 68static int rx_copybreak = 1518;
69#else 69#else
@@ -435,7 +435,6 @@ static void de_rx (struct de_private *de)
435 rx_work = 100; 435 rx_work = 100;
436 goto rx_next; 436 goto rx_next;
437 } 437 }
438 copy_skb->dev = de->dev;
439 438
440 if (!copying_skb) { 439 if (!copying_skb) {
441 pci_unmap_single(de->pdev, mapping, 440 pci_unmap_single(de->pdev, mapping,
@@ -450,8 +449,8 @@ static void de_rx (struct de_private *de)
450 } else { 449 } else {
451 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 450 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
452 skb_reserve(copy_skb, RX_OFFSET); 451 skb_reserve(copy_skb, RX_OFFSET);
453 memcpy(skb_put(copy_skb, len), skb->data, len); 452 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
454 453 len);
455 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 454 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
456 455
457 /* We'll reuse the original ring buffer. */ 456 /* We'll reuse the original ring buffer. */
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 4b3cd3d8b62a..62143f92c231 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1160,7 +1160,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1160 sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); 1160 sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id);
1161 1161
1162 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); 1162 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1163#if defined(__alpha__) || defined(__powerpc__) || defined(__sparc_v9__) || defined(DE4X5_DO_MEMCPY) 1163#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1164 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; 1164 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1165#endif 1165#endif
1166 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, 1166 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
@@ -1175,7 +1175,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1175 ** Set up the RX descriptor ring (Intels) 1175 ** Set up the RX descriptor ring (Intels)
1176 ** Allocate contiguous receive buffers, long word aligned (Alphas) 1176 ** Allocate contiguous receive buffers, long word aligned (Alphas)
1177 */ 1177 */
1178#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) 1178#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1179 for (i=0; i<NUM_RX_DESC; i++) { 1179 for (i=0; i<NUM_RX_DESC; i++) {
1180 lp->rx_ring[i].status = 0; 1180 lp->rx_ring[i].status = 0;
1181 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); 1181 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
@@ -1252,11 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1252 mii_get_phy(dev); 1252 mii_get_phy(dev);
1253 } 1253 }
1254 1254
1255#ifndef __sparc_v9__
1256 printk(" and requires IRQ%d (provided by %s).\n", dev->irq, 1255 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1257#else
1258 printk(" and requires IRQ%x (provided by %s).\n", dev->irq,
1259#endif
1260 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); 1256 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1261 } 1257 }
1262 1258
@@ -3627,14 +3623,13 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3627 struct de4x5_private *lp = netdev_priv(dev); 3623 struct de4x5_private *lp = netdev_priv(dev);
3628 struct sk_buff *p; 3624 struct sk_buff *p;
3629 3625
3630#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) 3626#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3631 struct sk_buff *ret; 3627 struct sk_buff *ret;
3632 u_long i=0, tmp; 3628 u_long i=0, tmp;
3633 3629
3634 p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); 3630 p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
3635 if (!p) return NULL; 3631 if (!p) return NULL;
3636 3632
3637 p->dev = dev;
3638 tmp = virt_to_bus(p->data); 3633 tmp = virt_to_bus(p->data);
3639 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; 3634 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3640 skb_reserve(p, i); 3635 skb_reserve(p, i);
@@ -3655,7 +3650,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3655 p = dev_alloc_skb(len + 2); 3650 p = dev_alloc_skb(len + 2);
3656 if (!p) return NULL; 3651 if (!p) return NULL;
3657 3652
3658 p->dev = dev;
3659 skb_reserve(p, 2); /* Align */ 3653 skb_reserve(p, 2); /* Align */
3660 if (index < lp->rx_old) { /* Wrapped buffer */ 3654 if (index < lp->rx_old) { /* Wrapped buffer */
3661 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; 3655 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9aeac76184f3..b3a64ca98634 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
682 682
683 /* transmit this packet */ 683 /* transmit this packet */
684 txptr = db->tx_insert_ptr; 684 txptr = db->tx_insert_ptr;
685 memcpy(txptr->tx_buf_ptr, skb->data, skb->len); 685 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
686 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); 686 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
687 687
688 /* Point to next transmit free descriptor */ 688 /* Point to next transmit free descriptor */
@@ -988,14 +988,14 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
988 988
989 skb = newskb; 989 skb = newskb;
990 /* size less than COPY_SIZE, allocate a rxlen SKB */ 990 /* size less than COPY_SIZE, allocate a rxlen SKB */
991 skb->dev = dev;
992 skb_reserve(skb, 2); /* 16byte align */ 991 skb_reserve(skb, 2); /* 16byte align */
993 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); 992 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
993 skb_put(skb, rxlen),
994 rxlen);
994 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 995 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
995 } else { 996 } else
996 skb->dev = dev;
997 skb_put(skb, rxlen); 997 skb_put(skb, rxlen);
998 } 998
999 skb->protocol = eth_type_trans(skb, dev); 999 skb->protocol = eth_type_trans(skb, dev);
1000 netif_rx(skb); 1000 netif_rx(skb);
1001 dev->last_rx = jiffies; 1001 dev->last_rx = jiffies;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index e3488d7b8ede..e86df07769a1 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -192,7 +192,6 @@ int tulip_poll(struct net_device *dev, int *budget)
192 to a minimally-sized skbuff. */ 192 to a minimally-sized skbuff. */
193 if (pkt_len < tulip_rx_copybreak 193 if (pkt_len < tulip_rx_copybreak
194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195 skb->dev = dev;
196 skb_reserve(skb, 2); /* 16 byte align the IP header */ 195 skb_reserve(skb, 2); /* 16 byte align the IP header */
197 pci_dma_sync_single_for_cpu(tp->pdev, 196 pci_dma_sync_single_for_cpu(tp->pdev,
198 tp->rx_buffers[entry].mapping, 197 tp->rx_buffers[entry].mapping,
@@ -416,7 +415,6 @@ static int tulip_rx(struct net_device *dev)
416 to a minimally-sized skbuff. */ 415 to a minimally-sized skbuff. */
417 if (pkt_len < tulip_rx_copybreak 416 if (pkt_len < tulip_rx_copybreak
418 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 417 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
419 skb->dev = dev;
420 skb_reserve(skb, 2); /* 16 byte align the IP header */ 418 skb_reserve(skb, 2); /* 16 byte align the IP header */
421 pci_dma_sync_single_for_cpu(tp->pdev, 419 pci_dma_sync_single_for_cpu(tp->pdev,
422 tp->rx_buffers[entry].mapping, 420 tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index e3774a522372..e9bf526ec534 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -36,8 +36,8 @@
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#ifdef __sparc__ 39#ifdef CONFIG_SPARC
40#include <asm/pbm.h> 40#include <asm/prom.h>
41#endif 41#endif
42 42
43static char version[] __devinitdata = 43static char version[] __devinitdata =
@@ -67,7 +67,7 @@ const char * const medianame[32] = {
67 67
68/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ 68/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ 69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
70 || defined(__sparc__) || defined(__ia64__) \ 70 || defined(CONFIG_SPARC) || defined(__ia64__) \
71 || defined(__sh__) || defined(__mips__) 71 || defined(__sh__) || defined(__mips__)
72static int rx_copybreak = 1518; 72static int rx_copybreak = 1518;
73#else 73#else
@@ -91,7 +91,7 @@ static int rx_copybreak = 100;
91static int csr0 = 0x01A00000 | 0xE000; 91static int csr0 = 0x01A00000 | 0xE000;
92#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) 92#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
93static int csr0 = 0x01A00000 | 0x8000; 93static int csr0 = 0x01A00000 | 0x8000;
94#elif defined(__sparc__) || defined(__hppa__) 94#elif defined(CONFIG_SPARC) || defined(__hppa__)
95/* The UltraSparc PCI controllers will disconnect at every 64-byte 95/* The UltraSparc PCI controllers will disconnect at every 64-byte
96 * crossing anyways so it makes no sense to tell Tulip to burst 96 * crossing anyways so it makes no sense to tell Tulip to burst
97 * any more than that. 97 * any more than that.
@@ -1315,7 +1315,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1315 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ 1315 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1316 if (tulip_uli_dm_quirk(pdev)) { 1316 if (tulip_uli_dm_quirk(pdev)) {
1317 csr0 &= ~0x01f100ff; 1317 csr0 &= ~0x01f100ff;
1318#if defined(__sparc__) 1318#if defined(CONFIG_SPARC)
1319 csr0 = (csr0 & ~0xff00) | 0xe000; 1319 csr0 = (csr0 & ~0xff00) | 0xe000;
1320#endif 1320#endif
1321 } 1321 }
@@ -1535,23 +1535,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1535 Many PCI BIOSes also incorrectly report the IRQ line, so we correct 1535 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1536 that here as well. */ 1536 that here as well. */
1537 if (sum == 0 || sum == 6*0xff) { 1537 if (sum == 0 || sum == 6*0xff) {
1538#if defined(__sparc__) 1538#if defined(CONFIG_SPARC)
1539 struct pcidev_cookie *pcp = pdev->sysdata; 1539 struct device_node *dp = pci_device_to_OF_node(pdev);
1540 const unsigned char *addr;
1541 int len;
1540#endif 1542#endif
1541 eeprom_missing = 1; 1543 eeprom_missing = 1;
1542 for (i = 0; i < 5; i++) 1544 for (i = 0; i < 5; i++)
1543 dev->dev_addr[i] = last_phys_addr[i]; 1545 dev->dev_addr[i] = last_phys_addr[i];
1544 dev->dev_addr[i] = last_phys_addr[i] + 1; 1546 dev->dev_addr[i] = last_phys_addr[i] + 1;
1545#if defined(__sparc__) 1547#if defined(CONFIG_SPARC)
1546 if (pcp) { 1548 addr = of_get_property(dp, "local-mac-address", &len);
1547 unsigned char *addr; 1549 if (addr && len == 6)
1548 int len; 1550 memcpy(dev->dev_addr, addr, 6);
1549
1550 addr = of_get_property(pcp->prom_node,
1551 "local-mac-address", &len);
1552 if (addr && len == 6)
1553 memcpy(dev->dev_addr, addr, 6);
1554 }
1555#endif 1551#endif
1556#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ 1552#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1557 if (last_irq) 1553 if (last_irq)
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 229158e8e4be..ca2548eb7d63 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 583
584 /* transmit this packet */ 584 /* transmit this packet */
585 txptr = db->tx_insert_ptr; 585 txptr = db->tx_insert_ptr;
586 memcpy(txptr->tx_buf_ptr, skb->data, skb->len); 586 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); 587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
588 588
589 /* Point to next transmit free descriptor */ 589 /* Point to next transmit free descriptor */
@@ -828,14 +828,14 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
828 ( (skb = dev_alloc_skb(rxlen + 2) ) 828 ( (skb = dev_alloc_skb(rxlen + 2) )
829 != NULL) ) { 829 != NULL) ) {
830 /* size less than COPY_SIZE, allocate a rxlen SKB */ 830 /* size less than COPY_SIZE, allocate a rxlen SKB */
831 skb->dev = dev;
832 skb_reserve(skb, 2); /* 16byte align */ 831 skb_reserve(skb, 2); /* 16byte align */
833 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 832 memcpy(skb_put(skb, rxlen),
833 skb_tail_pointer(rxptr->rx_skb_ptr),
834 rxlen);
834 uli526x_reuse_skb(db, rxptr->rx_skb_ptr); 835 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
835 } else { 836 } else
836 skb->dev = dev;
837 skb_put(skb, rxlen); 837 skb_put(skb, rxlen);
838 } 838
839 skb->protocol = eth_type_trans(skb, dev); 839 skb->protocol = eth_type_trans(skb, dev);
840 netif_rx(skb); 840 netif_rx(skb);
841 dev->last_rx = jiffies; 841 dev->last_rx = jiffies;
@@ -1177,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1177 1177
1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1179 rxptr->rx_skb_ptr = skb; 1179 rxptr->rx_skb_ptr = skb;
1180 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1180 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1181 skb_tail_pointer(skb),
1182 RX_ALLOC_SIZE,
1183 PCI_DMA_FROMDEVICE));
1181 wmb(); 1184 wmb();
1182 rxptr->rdes0 = cpu_to_le32(0x80000000); 1185 rxptr->rdes0 = cpu_to_le32(0x80000000);
1183 db->rx_avail_cnt++; 1186 db->rx_avail_cnt++;
@@ -1341,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db)
1341 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1344 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1342 break; 1345 break;
1343 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1346 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1344 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1347 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1348 skb_tail_pointer(skb),
1349 RX_ALLOC_SIZE,
1350 PCI_DMA_FROMDEVICE));
1345 wmb(); 1351 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000); 1352 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 rxptr = rxptr->next_rx_desc; 1353 rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 002a05e0722f..5b71ac78bca2 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -813,7 +813,6 @@ static void init_rxtx_rings(struct net_device *dev)
813 np->rx_skbuff[i] = skb; 813 np->rx_skbuff[i] = skb;
814 if (skb == NULL) 814 if (skb == NULL)
815 break; 815 break;
816 skb->dev = dev; /* Mark as being used by this device. */
817 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 816 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
818 np->rx_buf_sz,PCI_DMA_FROMDEVICE); 817 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
819 818
@@ -903,7 +902,7 @@ static void init_registers(struct net_device *dev)
903 } 902 }
904#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) 903#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
905 i |= 0xE000; 904 i |= 0xE000;
906#elif defined(__sparc__) || defined (CONFIG_PARISC) 905#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
907 i |= 0x4800; 906 i |= 0x4800;
908#else 907#else
909#warning Processor architecture undefined 908#warning Processor architecture undefined
@@ -1229,7 +1228,6 @@ static int netdev_rx(struct net_device *dev)
1229 to a minimally-sized skbuff. */ 1228 to a minimally-sized skbuff. */
1230 if (pkt_len < rx_copybreak 1229 if (pkt_len < rx_copybreak
1231 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1230 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1232 skb->dev = dev;
1233 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1231 skb_reserve(skb, 2); /* 16 byte align the IP header */
1234 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1235 np->rx_skbuff[entry]->len, 1233 np->rx_skbuff[entry]->len,
@@ -1278,7 +1276,6 @@ static int netdev_rx(struct net_device *dev)
1278 np->rx_skbuff[entry] = skb; 1276 np->rx_skbuff[entry] = skb;
1279 if (skb == NULL) 1277 if (skb == NULL)
1280 break; /* Better luck next round. */ 1278 break; /* Better luck next round. */
1281 skb->dev = dev; /* Mark as being used by this device. */
1282 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1279 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1283 skb->data, 1280 skb->data,
1284 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1281 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 61d313049dd0..985a1810ca59 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
411 sometimes sends more than you ask it to. */ 411 sometimes sends more than you ask it to. */
412 412
413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); 413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
414 memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); 414 skb_copy_from_linear_data(skb,
415 415 &(card->tx_buffer[bufferoffsets[desc] / 4]),
416 416 skb->len);
417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of 417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of
418 4 bytes. */ 418 4 bytes. */
419 419
@@ -1207,7 +1207,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1207 card->stats.rx_dropped++; 1207 card->stats.rx_dropped++;
1208 goto out; 1208 goto out;
1209 } 1209 }
1210 skb->dev = dev;
1211 skb_reserve(skb, 2); 1210 skb_reserve(skb, 2);
1212 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); 1211 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
1213 skb_put(skb, pkt_len); 1212 skb_put(skb, pkt_len);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index a998c5d0ae9c..f64172927377 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -65,7 +65,7 @@ static int rx_copybreak = 100;
65static int csr0 = 0x01A00000 | 0xE000; 65static int csr0 = 0x01A00000 | 0xE000;
66#elif defined(__powerpc__) 66#elif defined(__powerpc__)
67static int csr0 = 0x01B00000 | 0x8000; 67static int csr0 = 0x01B00000 | 0x8000;
68#elif defined(__sparc__) 68#elif defined(CONFIG_SPARC)
69static int csr0 = 0x01B00080 | 0x8000; 69static int csr0 = 0x01B00080 | 0x8000;
70#elif defined(__i386__) 70#elif defined(__i386__)
71static int csr0 = 0x01A00000 | 0x8000; 71static int csr0 = 0x01A00000 | 0x8000;
@@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
915 915
916 tp->tx_skbuff[entry] = skb; 916 tp->tx_skbuff[entry] = skb;
917 if (tp->chip_id == X3201_3) { 917 if (tp->chip_id == X3201_3) {
918 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); 918 skb_copy_from_linear_data(skb,
919 tp->tx_aligned_skbuff[entry]->data,
920 skb->len);
919 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); 921 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
920 } else 922 } else
921 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); 923 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
@@ -1238,7 +1240,6 @@ xircom_rx(struct net_device *dev)
1238 to a minimally-sized skbuff. */ 1240 to a minimally-sized skbuff. */
1239 if (pkt_len < rx_copybreak 1241 if (pkt_len < rx_copybreak
1240 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1241 skb->dev = dev;
1242 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1243 skb_reserve(skb, 2); /* 16 byte align the IP header */
1243#if ! defined(__alpha__) 1244#if ! defined(__alpha__)
1244 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1245 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5643d1e84ed6..a2c6caaaae93 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -18,6 +18,10 @@
18/* 18/*
19 * Changes: 19 * Changes:
20 * 20 *
21 * Brian Braunstein <linuxkernel@bristyle.com> 2007/03/23
22 * Fixed hw address handling. Now net_device.dev_addr is kept consistent
23 * with tun.dev_addr when the address is set by this module.
24 *
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 25 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation 26 * Add TUNSETLINK ioctl to set the link encapsulation
23 * 27 *
@@ -196,7 +200,10 @@ static void tun_net_init(struct net_device *dev)
196 dev->set_multicast_list = tun_net_mclist; 200 dev->set_multicast_list = tun_net_mclist;
197 201
198 ether_setup(dev); 202 ether_setup(dev);
199 random_ether_addr(dev->dev_addr); 203
204 /* random address already created for us by tun_set_iff, use it */
205 memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) );
206
200 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ 207 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
201 break; 208 break;
202 } 209 }
@@ -254,11 +261,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
254 return -EFAULT; 261 return -EFAULT;
255 } 262 }
256 263
257 skb->dev = tun->dev;
258 switch (tun->flags & TUN_TYPE_MASK) { 264 switch (tun->flags & TUN_TYPE_MASK) {
259 case TUN_TUN_DEV: 265 case TUN_TUN_DEV:
260 skb->mac.raw = skb->data; 266 skb_reset_mac_header(skb);
261 skb->protocol = pi.proto; 267 skb->protocol = pi.proto;
268 skb->dev = tun->dev;
262 break; 269 break;
263 case TUN_TAP_DEV: 270 case TUN_TAP_DEV:
264 skb->protocol = eth_type_trans(skb, tun->dev); 271 skb->protocol = eth_type_trans(skb, tun->dev);
@@ -386,8 +393,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
386 * - we are multicast promiscous. 393 * - we are multicast promiscous.
387 * - we belong to the multicast group. 394 * - we belong to the multicast group.
388 */ 395 */
389 memcpy(addr, skb->data, 396 skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr,
390 min_t(size_t, sizeof addr, skb->len)); 397 skb->len));
391 bit_nr = ether_crc(sizeof addr, addr) >> 26; 398 bit_nr = ether_crc(sizeof addr, addr) >> 26;
392 if ((tun->if_flags & IFF_PROMISC) || 399 if ((tun->if_flags & IFF_PROMISC) ||
393 memcmp(addr, tun->dev_addr, sizeof addr) == 0 || 400 memcmp(addr, tun->dev_addr, sizeof addr) == 0 ||
@@ -636,6 +643,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
636 return 0; 643 return 0;
637 644
638 case SIOCGIFHWADDR: 645 case SIOCGIFHWADDR:
646 /* Note: the actual net device's address may be different */
639 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr, 647 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr,
640 min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); 648 min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
641 if (copy_to_user( argp, &ifr, sizeof ifr)) 649 if (copy_to_user( argp, &ifr, sizeof ifr))
@@ -643,16 +651,24 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
643 return 0; 651 return 0;
644 652
645 case SIOCSIFHWADDR: 653 case SIOCSIFHWADDR:
646 /** Set the character device's hardware address. This is used when 654 {
647 * filtering packets being sent from the network device to the character 655 /* try to set the actual net device's hw address */
648 * device. */ 656 int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
649 memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data, 657
650 min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr)); 658 if (ret == 0) {
651 DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n", 659 /** Set the character device's hardware address. This is used when
652 tun->dev->name, 660 * filtering packets being sent from the network device to the character
653 tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2], 661 * device. */
654 tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]); 662 memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data,
655 return 0; 663 min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
664 DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n",
665 tun->dev->name,
666 tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2],
667 tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]);
668 }
669
670 return ret;
671 }
656 672
657 case SIOCADDMULTI: 673 case SIOCADDMULTI:
658 /** Add the specified group to the character device's multicast filter 674 /** Add the specified group to the character device's multicast filter
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 0d91d094edd9..f2dd7763cd0b 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1708,7 +1708,6 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1708 1708
1709 if(pkt_len < rx_copybreak && 1709 if(pkt_len < rx_copybreak &&
1710 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1710 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1711 new_skb->dev = tp->dev;
1712 skb_reserve(new_skb, 2); 1711 skb_reserve(new_skb, 2);
1713 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1712 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1714 PKT_BUF_SZ, 1713 PKT_BUF_SZ,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index f3a972e74e9a..adea290a9d5e 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1486,7 +1486,6 @@ static int rhine_rx(struct net_device *dev, int limit)
1486 copying to a minimally-sized skbuff. */ 1486 copying to a minimally-sized skbuff. */
1487 if (pkt_len < rx_copybreak && 1487 if (pkt_len < rx_copybreak &&
1488 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1488 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1489 skb->dev = dev;
1490 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1489 skb_reserve(skb, 2); /* 16 byte align the IP header */
1491 pci_dma_sync_single_for_cpu(rp->pdev, 1490 pci_dma_sync_single_for_cpu(rp->pdev,
1492 rp->rx_skbuff_dma[entry], 1491 rp->rx_skbuff_dma[entry],
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 8e5d82051bd4..25b75b615188 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1339 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1339 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
1340 skb_reserve(new_skb, 2); 1340 skb_reserve(new_skb, 2);
1341 1341
1342 memcpy(new_skb->data, rx_skb[0]->data, pkt_size); 1342 skb_copy_from_linear_data(rx_skb[0], new_skb->data,
1343 pkt_size);
1343 *rx_skb = new_skb; 1344 *rx_skb = new_skb;
1344 ret = 0; 1345 ret = 0;
1345 } 1346 }
@@ -1398,7 +1399,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1398 vptr->stats.multicast++; 1399 vptr->stats.multicast++;
1399 1400
1400 skb = rd_info->skb; 1401 skb = rd_info->skb;
1401 skb->dev = vptr->dev;
1402 1402
1403 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1403 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1404 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1404 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
@@ -1428,7 +1428,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1428 PCI_DMA_FROMDEVICE); 1428 PCI_DMA_FROMDEVICE);
1429 1429
1430 skb_put(skb, pkt_len - 4); 1430 skb_put(skb, pkt_len - 4);
1431 skb->protocol = eth_type_trans(skb, skb->dev); 1431 skb->protocol = eth_type_trans(skb, vptr->dev);
1432 1432
1433 stats->rx_bytes += pkt_len; 1433 stats->rx_bytes += pkt_len;
1434 netif_rx(skb); 1434 netif_rx(skb);
@@ -1928,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
1928 if (pktlen < ETH_ZLEN) { 1928 if (pktlen < ETH_ZLEN) {
1929 /* Cannot occur until ZC support */ 1929 /* Cannot occur until ZC support */
1930 pktlen = ETH_ZLEN; 1930 pktlen = ETH_ZLEN;
1931 memcpy(tdinfo->buf, skb->data, skb->len); 1931 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
1932 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); 1932 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
1933 tdinfo->skb = skb; 1933 tdinfo->skb = skb;
1934 tdinfo->skb_dma[0] = tdinfo->buf_dma; 1934 tdinfo->skb_dma[0] = tdinfo->buf_dma;
@@ -1944,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
1944 int nfrags = skb_shinfo(skb)->nr_frags; 1944 int nfrags = skb_shinfo(skb)->nr_frags;
1945 tdinfo->skb = skb; 1945 tdinfo->skb = skb;
1946 if (nfrags > 6) { 1946 if (nfrags > 6) {
1947 memcpy(tdinfo->buf, skb->data, skb->len); 1947 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
1948 tdinfo->skb_dma[0] = tdinfo->buf_dma; 1948 tdinfo->skb_dma[0] = tdinfo->buf_dma;
1949 td_ptr->tdesc0.pktsize = 1949 td_ptr->tdesc0.pktsize =
1950 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 1950 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
@@ -2007,7 +2007,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2007 */ 2007 */
2008 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) 2008 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2009 && (skb->ip_summed == CHECKSUM_PARTIAL)) { 2009 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2010 struct iphdr *ip = skb->nh.iph; 2010 const struct iphdr *ip = ip_hdr(skb);
2011 if (ip->protocol == IPPROTO_TCP) 2011 if (ip->protocol == IPPROTO_TCP)
2012 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2012 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2013 else if (ip->protocol == IPPROTO_UDP) 2013 else if (ip->protocol == IPPROTO_UDP)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 5b82e4fd0d73..23464735fa88 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -773,7 +773,7 @@ static int sppp_rx_done(struct channel_data *chan)
773 } 773 }
774 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); 774 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP);
775 chan->rx_skb->dev = chan->pppdev.dev; 775 chan->rx_skb->dev = chan->pppdev.dev;
776 chan->rx_skb->mac.raw = chan->rx_skb->data; 776 skb_reset_mac_header(chan->rx_skb);
777 chan->stats.rx_packets++; 777 chan->stats.rx_packets++;
778 chan->stats.rx_bytes += chan->cosa->rxsize; 778 chan->stats.rx_bytes += chan->cosa->rxsize;
779 netif_rx(chan->rx_skb); 779 netif_rx(chan->rx_skb);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index a631d1c2fa14..016b3ff3ea5e 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -834,7 +834,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
834 ++chan->ifstats.rx_packets; 834 ++chan->ifstats.rx_packets;
835 chan->ifstats.rx_bytes += pktlen; 835 chan->ifstats.rx_bytes += pktlen;
836 836
837 skb->mac.raw = skb->data; 837 skb_reset_mac_header(skb);
838 netif_rx(skb); 838 netif_rx(skb);
839 dev->last_rx = jiffies; /* timestamp */ 839 dev->last_rx = jiffies; /* timestamp */
840} 840}
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 736987559432..66be20c292b6 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -176,7 +176,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
176 if (process) 176 if (process)
177 { 177 {
178 /* we've set up the protocol, so discard the header */ 178 /* we've set up the protocol, so discard the header */
179 skb->mac.raw = skb->data; 179 skb_reset_mac_header(skb);
180 skb_pull(skb, header); 180 skb_pull(skb, header);
181 dlp->stats.rx_bytes += skb->len; 181 dlp->stats.rx_bytes += skb->len;
182 netif_rx(skb); 182 netif_rx(skb);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 25021a7992a9..dca024471455 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1904,7 +1904,8 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1904 struct TxFD *tx_fd = dpriv->tx_fd + last; 1904 struct TxFD *tx_fd = dpriv->tx_fd + last;
1905 1905
1906 skb->len = DUMMY_SKB_SIZE; 1906 skb->len = DUMMY_SKB_SIZE;
1907 memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE); 1907 skb_copy_to_linear_data(skb, version,
1908 strlen(version) % DUMMY_SKB_SIZE);
1908 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); 1909 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1909 tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, 1910 tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
1910 DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); 1911 DUMMY_SKB_SIZE, PCI_DMA_TODEVICE);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index c45d6a83339d..58a53b6d9b42 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -864,7 +864,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
864static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) 864static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
865{ 865{
866 skb->dev = dev; 866 skb->dev = dev;
867 skb->mac.raw = skb->data; 867 skb_reset_mac_header(skb);
868 skb->pkt_type = PACKET_HOST; 868 skb->pkt_type = PACKET_HOST;
869 return htons(ETH_P_CUST); 869 return htons(ETH_P_CUST);
870} 870}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index c9664fd8a917..00e0aaadabcc 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -124,7 +124,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
124 skb_put(skb, sizeof(struct cisco_packet)); 124 skb_put(skb, sizeof(struct cisco_packet));
125 skb->priority = TC_PRIO_CONTROL; 125 skb->priority = TC_PRIO_CONTROL;
126 skb->dev = dev; 126 skb->dev = dev;
127 skb->nh.raw = skb->data; 127 skb_reset_network_header(skb);
128 128
129 dev_queue_xmit(skb); 129 dev_queue_xmit(skb);
130} 130}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index c6c3c757d6f1..aeb2789adf26 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -533,7 +533,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
533 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); 533 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
534 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 534 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
535 } 535 }
536 data = skb->tail; 536 data = skb_tail_pointer(skb);
537 data[i++] = LMI_CALLREF; 537 data[i++] = LMI_CALLREF;
538 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 538 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
539 if (lmi == LMI_ANSI) 539 if (lmi == LMI_ANSI)
@@ -590,7 +590,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
590 skb_put(skb, i); 590 skb_put(skb, i);
591 skb->priority = TC_PRIO_CONTROL; 591 skb->priority = TC_PRIO_CONTROL;
592 skb->dev = dev; 592 skb->dev = dev;
593 skb->nh.raw = skb->data; 593 skb_reset_network_header(skb);
594 594
595 dev_queue_xmit(skb); 595 dev_queue_xmit(skb);
596} 596}
@@ -1011,7 +1011,6 @@ static int fr_rx(struct sk_buff *skb)
1011 stats->rx_bytes += skb->len; 1011 stats->rx_bytes += skb->len;
1012 if (pvc->state.becn) 1012 if (pvc->state.becn)
1013 stats->rx_compressed++; 1013 stats->rx_compressed++;
1014 skb->dev = dev;
1015 netif_rx(skb); 1014 netif_rx(skb);
1016 return NET_RX_SUCCESS; 1015 return NET_RX_SUCCESS;
1017 } else { 1016 } else {
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index a02c5fb40567..9ba3e4ee6ec7 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -59,7 +59,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
60 skb_trim(skb, skb->len-2); 60 skb_trim(skb, skb->len-2);
61 skb->protocol=__constant_htons(ETH_P_WAN_PPP); 61 skb->protocol=__constant_htons(ETH_P_WAN_PPP);
62 skb->mac.raw=skb->data; 62 skb_reset_mac_header(skb);
63 skb->dev=c->netdevice; 63 skb->dev=c->netdevice;
64 /* 64 /*
65 * Send it to the PPP layer. We don't have time to process 65 * Send it to the PPP layer. We don't have time to process
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 2b54f1bc3a0d..ae132c1c5459 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1636,7 +1636,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1636 if (nsb) { 1636 if (nsb) {
1637 sc->lmc_rxq[i] = nsb; 1637 sc->lmc_rxq[i] = nsb;
1638 nsb->dev = dev; 1638 nsb->dev = dev;
1639 sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); 1639 sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1640 } 1640 }
1641 sc->failed_recv_alloc = 1; 1641 sc->failed_recv_alloc = 1;
1642 goto skip_packet; 1642 goto skip_packet;
@@ -1667,8 +1667,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1667 skb_put (skb, len); 1667 skb_put (skb, len);
1668 skb->protocol = lmc_proto_type(sc, skb); 1668 skb->protocol = lmc_proto_type(sc, skb);
1669 skb->protocol = htons(ETH_P_WAN_PPP); 1669 skb->protocol = htons(ETH_P_WAN_PPP);
1670 skb->mac.raw = skb->data; 1670 skb_reset_mac_header(skb);
1671// skb->nh.raw = skb->data; 1671 /* skb_reset_network_header(skb); */
1672 skb->dev = dev; 1672 skb->dev = dev;
1673 lmc_proto_netif(sc, skb); 1673 lmc_proto_netif(sc, skb);
1674 1674
@@ -1679,7 +1679,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1679 if (nsb) { 1679 if (nsb) {
1680 sc->lmc_rxq[i] = nsb; 1680 sc->lmc_rxq[i] = nsb;
1681 nsb->dev = dev; 1681 nsb->dev = dev;
1682 sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); 1682 sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1683 /* Transferred to 21140 below */ 1683 /* Transferred to 21140 below */
1684 } 1684 }
1685 else { 1685 else {
@@ -1702,11 +1702,11 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1702 if(!nsb) { 1702 if(!nsb) {
1703 goto give_it_anyways; 1703 goto give_it_anyways;
1704 } 1704 }
1705 memcpy(skb_put(nsb, len), skb->data, len); 1705 skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1706 1706
1707 nsb->protocol = lmc_proto_type(sc, skb); 1707 nsb->protocol = lmc_proto_type(sc, skb);
1708 nsb->mac.raw = nsb->data; 1708 skb_reset_mac_header(nsb);
1709// nsb->nh.raw = nsb->data; 1709 /* skb_reset_network_header(nsb); */
1710 nsb->dev = dev; 1710 nsb->dev = dev;
1711 lmc_proto_netif(sc, nsb); 1711 lmc_proto_netif(sc, nsb);
1712 } 1712 }
@@ -1932,7 +1932,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1932 sc->lmc_rxring[i].status = 0x80000000; 1932 sc->lmc_rxring[i].status = 0x80000000;
1933 1933
1934 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ 1934 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1935 sc->lmc_rxring[i].length = skb->end - skb->data; 1935 sc->lmc_rxring[i].length = skb_tailroom(skb);
1936 1936
1937 /* use to be tail which is dumb since you're thinking why write 1937 /* use to be tail which is dumb since you're thinking why write
1938 * to the end of the packj,et but since there's nothing there tail == data 1938 * to the end of the packj,et but since there's nothing there tail == data
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 62184dee377c..999bf71937ca 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1755,17 +1755,17 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
1755 1755
1756 skb->dev = dev; 1756 skb->dev = dev;
1757 skb->protocol = htons(ETH_P_CUST); 1757 skb->protocol = htons(ETH_P_CUST);
1758 skb->mac.raw = skb->data; 1758 skb_reset_mac_header(skb);
1759 skb->pkt_type = PACKET_HOST; 1759 skb->pkt_type = PACKET_HOST;
1760 skb->len = 10 + skb_main->len; 1760 skb->len = 10 + skb_main->len;
1761 1761
1762 memcpy(skb->data, dev->name, 5); 1762 skb_copy_to_linear_data(skb, dev->name, 5);
1763 skb->data[5] = '['; 1763 skb->data[5] = '[';
1764 skb->data[6] = rx_tx; 1764 skb->data[6] = rx_tx;
1765 skb->data[7] = ']'; 1765 skb->data[7] = ']';
1766 skb->data[8] = ':'; 1766 skb->data[8] = ':';
1767 skb->data[9] = ' '; 1767 skb->data[9] = ' ';
1768 memcpy(&skb->data[10], skb_main->data, skb_main->len); 1768 skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len);
1769 1769
1770 netif_rx(skb); 1770 netif_rx(skb);
1771} 1771}
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 5873c346e7e9..07dbdfbfc15d 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -1003,17 +1003,17 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx)
1003 skb_put (skb, 10 + len); 1003 skb_put (skb, 10 + len);
1004 skb->dev = dev->dev; 1004 skb->dev = dev->dev;
1005 skb->protocol = htons(ETH_P_CUST); 1005 skb->protocol = htons(ETH_P_CUST);
1006 skb->mac.raw = skb->data; 1006 skb_reset_mac_header(skb);
1007 skb->pkt_type = PACKET_HOST; 1007 skb->pkt_type = PACKET_HOST;
1008 skb->len = 10 + len; 1008 skb->len = 10 + len;
1009 1009
1010 memcpy(skb->data,dev->dev->name,5); 1010 skb_copy_to_linear_data(skb, dev->dev->name, 5);
1011 skb->data[5] = '['; 1011 skb->data[5] = '[';
1012 skb->data[6] = rxtx; 1012 skb->data[6] = rxtx;
1013 skb->data[7] = ']'; 1013 skb->data[7] = ']';
1014 skb->data[8] = ':'; 1014 skb->data[8] = ':';
1015 skb->data[9] = ' '; 1015 skb->data[9] = ' ';
1016 memcpy(&skb->data[10], buf, len); 1016 skb_copy_to_linear_data_offset(skb, 10, buf, len);
1017 netif_rx(skb); 1017 netif_rx(skb);
1018} 1018}
1019 1019
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index fc5c0c611ffd..35eded7ffb2d 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -999,11 +999,6 @@ get_rx_buf( struct net_device *dev )
999 if( !skb ) 999 if( !skb )
1000 return NULL; 1000 return NULL;
1001 1001
1002#ifdef CONFIG_SBNI_MULTILINE
1003 skb->dev = ((struct net_local *) dev->priv)->master;
1004#else
1005 skb->dev = dev;
1006#endif
1007 skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ 1002 skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
1008 return skb; 1003 return skb;
1009} 1004}
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 70fb1b98b1dd..131358108c5a 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -61,7 +61,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
62 skb_trim(skb, skb->len-2); 62 skb_trim(skb, skb->len-2);
63 skb->protocol=htons(ETH_P_WAN_PPP); 63 skb->protocol=htons(ETH_P_WAN_PPP);
64 skb->mac.raw=skb->data; 64 skb_reset_mac_header(skb);
65 skb->dev=c->netdevice; 65 skb->dev=c->netdevice;
66 /* 66 /*
67 * Send it to the PPP layer. We don't have time to process 67 * Send it to the PPP layer. We don't have time to process
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 218f7b574ab3..67fc67cfd452 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -227,7 +227,7 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
227 unsigned long flags; 227 unsigned long flags;
228 228
229 skb->dev=dev; 229 skb->dev=dev;
230 skb->mac.raw=skb->data; 230 skb_reset_mac_header(skb);
231 231
232 if (dev->flags & IFF_RUNNING) 232 if (dev->flags & IFF_RUNNING)
233 { 233 {
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 8b4540bfc1b0..98ef400908b8 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1656,7 +1656,7 @@ static void z8530_rx_done(struct z8530_channel *c)
1656 else 1656 else
1657 { 1657 {
1658 skb_put(skb, ct); 1658 skb_put(skb, ct);
1659 memcpy(skb->data, rxb, ct); 1659 skb_copy_to_linear_data(skb, rxb, ct);
1660 c->stats.rx_packets++; 1660 c->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct; 1661 c->stats.rx_bytes+=ct;
1662 } 1662 }
@@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1782 */ 1782 */
1783 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; 1783 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1784 c->tx_dma_used^=1; /* Flip temp buffer */ 1784 c->tx_dma_used^=1; /* Flip temp buffer */
1785 memcpy(c->tx_next_ptr, skb->data, skb->len); 1785 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1786 } 1786 }
1787 else 1787 else
1788 c->tx_next_ptr=skb->data; 1788 c->tx_next_ptr=skb->data;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index ece3d9c2dc61..4426841b2be6 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -2,47 +2,21 @@
2# Wireless LAN device configuration 2# Wireless LAN device configuration
3# 3#
4 4
5menu "Wireless LAN (non-hamradio)" 5menu "Wireless LAN"
6 depends on NETDEVICES
7
8config NET_RADIO
9 bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions"
10 select WIRELESS_EXT
11 ---help---
12 Support for wireless LANs and everything having to do with radio,
13 but not with amateur radio or FM broadcasting.
14
15 Saying Y here also enables the Wireless Extensions (creates
16 /proc/net/wireless and enables iwconfig access). The Wireless
17 Extension is a generic API allowing a driver to expose to the user
18 space configuration and statistics specific to common Wireless LANs.
19 The beauty of it is that a single set of tool can support all the
20 variations of Wireless LANs, regardless of their type (as long as
21 the driver supports Wireless Extension). Another advantage is that
22 these parameters may be changed on the fly without restarting the
23 driver (or Linux). If you wish to use Wireless Extensions with
24 wireless PCMCIA (PC-) cards, you need to say Y here; you can fetch
25 the tools from
26 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
27 6
28config NET_WIRELESS_RTNETLINK 7config WLAN_PRE80211
29 bool "Wireless Extension API over RtNetlink" 8 bool "Wireless LAN (pre-802.11)"
30 depends on NET_RADIO 9 depends on NETDEVICES
31 ---help--- 10 ---help---
32 Support the Wireless Extension API over the RtNetlink socket 11 Say Y if you have any pre-802.11 wireless LAN hardware.
33 in addition to the traditional ioctl interface (selected above).
34 12
35 For now, few tools use this facility, but it might grow in the 13 This option does not affect the kernel build, it only
36 future. The only downside is that it adds 4.5 kB to your kernel. 14 lets you choose drivers.
37
38# Note : the cards are obsolete (can't buy them anymore), but the drivers
39# are not, as people are still using them...
40comment "Obsolete Wireless cards support (pre-802.11)"
41 depends on NET_RADIO && (INET || ISA || PCMCIA)
42 15
43config STRIP 16config STRIP
44 tristate "STRIP (Metricom starmode radio IP)" 17 tristate "STRIP (Metricom starmode radio IP)"
45 depends on NET_RADIO && INET 18 depends on INET && WLAN_PRE80211
19 select WIRELESS_EXT
46 ---help--- 20 ---help---
47 Say Y if you have a Metricom radio and intend to use Starmode Radio 21 Say Y if you have a Metricom radio and intend to use Starmode Radio
48 IP. STRIP is a radio protocol developed for the MosquitoNet project 22 IP. STRIP is a radio protocol developed for the MosquitoNet project
@@ -65,7 +39,8 @@ config STRIP
65 39
66config ARLAN 40config ARLAN
67 tristate "Aironet Arlan 655 & IC2200 DS support" 41 tristate "Aironet Arlan 655 & IC2200 DS support"
68 depends on NET_RADIO && ISA && !64BIT 42 depends on ISA && !64BIT && WLAN_PRE80211
43 select WIRELESS_EXT
69 ---help--- 44 ---help---
70 Aironet makes Arlan, a class of wireless LAN adapters. These use the 45 Aironet makes Arlan, a class of wireless LAN adapters. These use the
71 www.Telxon.com chip, which is also used on several similar cards. 46 www.Telxon.com chip, which is also used on several similar cards.
@@ -80,7 +55,8 @@ config ARLAN
80 55
81config WAVELAN 56config WAVELAN
82 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support" 57 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
83 depends on NET_RADIO && ISA 58 depends on ISA && WLAN_PRE80211
59 select WIRELESS_EXT
84 ---help--- 60 ---help---
85 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is 61 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
86 a Radio LAN (wireless Ethernet-like Local Area Network) using the 62 a Radio LAN (wireless Ethernet-like Local Area Network) using the
@@ -107,7 +83,8 @@ config WAVELAN
107 83
108config PCMCIA_WAVELAN 84config PCMCIA_WAVELAN
109 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support" 85 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
110 depends on NET_RADIO && PCMCIA 86 depends on PCMCIA && WLAN_PRE80211
87 select WIRELESS_EXT
111 help 88 help
112 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA 89 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
113 (PC-card) wireless Ethernet networking card to your computer. This 90 (PC-card) wireless Ethernet networking card to your computer. This
@@ -118,7 +95,8 @@ config PCMCIA_WAVELAN
118 95
119config PCMCIA_NETWAVE 96config PCMCIA_NETWAVE
120 tristate "Xircom Netwave AirSurfer Pcmcia wireless support" 97 tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
121 depends on NET_RADIO && PCMCIA 98 depends on PCMCIA && WLAN_PRE80211
99 select WIRELESS_EXT
122 help 100 help
123 Say Y here if you intend to attach this type of PCMCIA (PC-card) 101 Say Y here if you intend to attach this type of PCMCIA (PC-card)
124 wireless Ethernet networking card to your computer. 102 wireless Ethernet networking card to your computer.
@@ -126,12 +104,20 @@ config PCMCIA_NETWAVE
126 To compile this driver as a module, choose M here: the module will be 104 To compile this driver as a module, choose M here: the module will be
127 called netwave_cs. If unsure, say N. 105 called netwave_cs. If unsure, say N.
128 106
129comment "Wireless 802.11 Frequency Hopping cards support" 107
130 depends on NET_RADIO && PCMCIA 108config WLAN_80211
109 bool "Wireless LAN (IEEE 802.11)"
110 depends on NETDEVICES
111 ---help---
112 Say Y if you have any 802.11 wireless LAN hardware.
113
114 This option does not affect the kernel build, it only
115 lets you choose drivers.
131 116
132config PCMCIA_RAYCS 117config PCMCIA_RAYCS
133 tristate "Aviator/Raytheon 2.4MHz wireless support" 118 tristate "Aviator/Raytheon 2.4MHz wireless support"
134 depends on NET_RADIO && PCMCIA 119 depends on PCMCIA && WLAN_80211
120 select WIRELESS_EXT
135 ---help--- 121 ---help---
136 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA 122 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
137 (PC-card) wireless Ethernet networking card to your computer. 123 (PC-card) wireless Ethernet networking card to your computer.
@@ -141,12 +127,10 @@ config PCMCIA_RAYCS
141 To compile this driver as a module, choose M here: the module will be 127 To compile this driver as a module, choose M here: the module will be
142 called ray_cs. If unsure, say N. 128 called ray_cs. If unsure, say N.
143 129
144comment "Wireless 802.11b ISA/PCI cards support"
145 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
146
147config IPW2100 130config IPW2100
148 tristate "Intel PRO/Wireless 2100 Network Connection" 131 tristate "Intel PRO/Wireless 2100 Network Connection"
149 depends on NET_RADIO && PCI 132 depends on PCI && WLAN_80211
133 select WIRELESS_EXT
150 select FW_LOADER 134 select FW_LOADER
151 select IEEE80211 135 select IEEE80211
152 ---help--- 136 ---help---
@@ -200,7 +184,8 @@ config IPW2100_DEBUG
200 184
201config IPW2200 185config IPW2200
202 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 186 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
203 depends on NET_RADIO && PCI 187 depends on PCI && WLAN_80211
188 select WIRELESS_EXT
204 select FW_LOADER 189 select FW_LOADER
205 select IEEE80211 190 select IEEE80211
206 ---help--- 191 ---help---
@@ -282,7 +267,8 @@ config IPW2200_DEBUG
282 267
283config AIRO 268config AIRO
284 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 269 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
285 depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) 270 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
271 select WIRELESS_EXT
286 select CRYPTO 272 select CRYPTO
287 ---help--- 273 ---help---
288 This is the standard Linux driver to support Cisco/Aironet ISA and 274 This is the standard Linux driver to support Cisco/Aironet ISA and
@@ -299,7 +285,8 @@ config AIRO
299 285
300config HERMES 286config HERMES
301 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 287 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
302 depends on NET_RADIO && (PPC_PMAC || PCI || PCMCIA) 288 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
289 select WIRELESS_EXT
303 ---help--- 290 ---help---
304 A driver for 802.11b wireless cards based on the "Hermes" or 291 A driver for 802.11b wireless cards based on the "Hermes" or
305 Intersil HFA384x (Prism 2) MAC controller. This includes the vast 292 Intersil HFA384x (Prism 2) MAC controller. This includes the vast
@@ -373,7 +360,8 @@ config PCI_HERMES
373 360
374config ATMEL 361config ATMEL
375 tristate "Atmel at76c50x chipset 802.11b support" 362 tristate "Atmel at76c50x chipset 802.11b support"
376 depends on NET_RADIO && (PCI || PCMCIA) 363 depends on (PCI || PCMCIA) && WLAN_80211
364 select WIRELESS_EXT
377 select FW_LOADER 365 select FW_LOADER
378 select CRC32 366 select CRC32
379 ---help--- 367 ---help---
@@ -394,13 +382,9 @@ config PCI_ATMEL
394 Enable support for PCI and mini-PCI cards containing the 382 Enable support for PCI and mini-PCI cards containing the
395 Atmel at76c506 chip. 383 Atmel at76c506 chip.
396 384
397# If Pcmcia is compiled in, offer Pcmcia cards...
398comment "Wireless 802.11b Pcmcia/Cardbus cards support"
399 depends on NET_RADIO && PCMCIA
400
401config PCMCIA_HERMES 385config PCMCIA_HERMES
402 tristate "Hermes PCMCIA card support" 386 tristate "Hermes PCMCIA card support"
403 depends on NET_RADIO && PCMCIA && HERMES 387 depends on PCMCIA && HERMES
404 ---help--- 388 ---help---
405 A driver for "Hermes" chipset based PCMCIA wireless adaptors, such 389 A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
406 as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ 390 as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
@@ -420,7 +404,7 @@ config PCMCIA_HERMES
420 404
421config PCMCIA_SPECTRUM 405config PCMCIA_SPECTRUM
422 tristate "Symbol Spectrum24 Trilogy PCMCIA card support" 406 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
423 depends on NET_RADIO && PCMCIA && HERMES 407 depends on PCMCIA && HERMES
424 select FW_LOADER 408 select FW_LOADER
425 ---help--- 409 ---help---
426 410
@@ -434,7 +418,8 @@ config PCMCIA_SPECTRUM
434 418
435config AIRO_CS 419config AIRO_CS
436 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 420 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
437 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 421 depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211
422 select WIRELESS_EXT
438 select CRYPTO 423 select CRYPTO
439 select CRYPTO_AES 424 select CRYPTO_AES
440 ---help--- 425 ---help---
@@ -458,7 +443,8 @@ config AIRO_CS
458 443
459config PCMCIA_ATMEL 444config PCMCIA_ATMEL
460 tristate "Atmel at76c502/at76c504 PCMCIA cards" 445 tristate "Atmel at76c502/at76c504 PCMCIA cards"
461 depends on NET_RADIO && ATMEL && PCMCIA 446 depends on ATMEL && PCMCIA
447 select WIRELESS_EXT
462 select FW_LOADER 448 select FW_LOADER
463 select CRC32 449 select CRC32
464 ---help--- 450 ---help---
@@ -467,17 +453,17 @@ config PCMCIA_ATMEL
467 453
468config PCMCIA_WL3501 454config PCMCIA_WL3501
469 tristate "Planet WL3501 PCMCIA cards" 455 tristate "Planet WL3501 PCMCIA cards"
470 depends on NET_RADIO && EXPERIMENTAL && PCMCIA 456 depends on EXPERIMENTAL && PCMCIA && WLAN_80211
457 select WIRELESS_EXT
471 ---help--- 458 ---help---
472 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. 459 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
473 It has basic support for Linux wireless extensions and initial 460 It has basic support for Linux wireless extensions and initial
474 micro support for ethtool. 461 micro support for ethtool.
475 462
476comment "Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support"
477 depends on NET_RADIO && PCI
478config PRISM54 463config PRISM54
479 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus' 464 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus'
480 depends on PCI && NET_RADIO && EXPERIMENTAL 465 depends on PCI && EXPERIMENTAL && WLAN_80211
466 select WIRELESS_EXT
481 select FW_LOADER 467 select FW_LOADER
482 ---help--- 468 ---help---
483 Enable PCI and Cardbus support for the following chipset based cards: 469 Enable PCI and Cardbus support for the following chipset based cards:
@@ -523,7 +509,8 @@ config PRISM54
523 509
524config USB_ZD1201 510config USB_ZD1201
525 tristate "USB ZD1201 based Wireless device support" 511 tristate "USB ZD1201 based Wireless device support"
526 depends on USB && NET_RADIO 512 depends on USB && WLAN_80211
513 select WIRELESS_EXT
527 select FW_LOADER 514 select FW_LOADER
528 ---help--- 515 ---help---
529 Say Y if you want to use wireless LAN adapters based on the ZyDAS 516 Say Y if you want to use wireless LAN adapters based on the ZyDAS
@@ -542,11 +529,4 @@ source "drivers/net/wireless/hostap/Kconfig"
542source "drivers/net/wireless/bcm43xx/Kconfig" 529source "drivers/net/wireless/bcm43xx/Kconfig"
543source "drivers/net/wireless/zd1211rw/Kconfig" 530source "drivers/net/wireless/zd1211rw/Kconfig"
544 531
545# yes, this works even when no drivers are selected
546config NET_WIRELESS
547 bool
548 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
549 default y
550
551endmenu 532endmenu
552
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2ada76a93cb6..7fe0a61091a6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2444,7 +2444,7 @@ static int add_airo_dev( struct net_device *dev );
2444 2444
2445static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) 2445static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
2446{ 2446{
2447 memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); 2447 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
2448 return ETH_ALEN; 2448 return ETH_ALEN;
2449} 2449}
2450 2450
@@ -3411,14 +3411,12 @@ badrx:
3411 OUT4500( apriv, EVACK, EV_RX); 3411 OUT4500( apriv, EVACK, EV_RX);
3412 3412
3413 if (test_bit(FLAG_802_11, &apriv->flags)) { 3413 if (test_bit(FLAG_802_11, &apriv->flags)) {
3414 skb->mac.raw = skb->data; 3414 skb_reset_mac_header(skb);
3415 skb->pkt_type = PACKET_OTHERHOST; 3415 skb->pkt_type = PACKET_OTHERHOST;
3416 skb->dev = apriv->wifidev; 3416 skb->dev = apriv->wifidev;
3417 skb->protocol = htons(ETH_P_802_2); 3417 skb->protocol = htons(ETH_P_802_2);
3418 } else { 3418 } else
3419 skb->dev = dev;
3420 skb->protocol = eth_type_trans(skb,dev); 3419 skb->protocol = eth_type_trans(skb,dev);
3421 }
3422 skb->dev->last_rx = jiffies; 3420 skb->dev->last_rx = jiffies;
3423 skb->ip_summed = CHECKSUM_NONE; 3421 skb->ip_summed = CHECKSUM_NONE;
3424 3422
@@ -3641,7 +3639,6 @@ badmic:
3641 } 3639 }
3642#endif /* WIRELESS_SPY */ 3640#endif /* WIRELESS_SPY */
3643 3641
3644 skb->dev = ai->dev;
3645 skb->ip_summed = CHECKSUM_NONE; 3642 skb->ip_summed = CHECKSUM_NONE;
3646 skb->protocol = eth_type_trans(skb, ai->dev); 3643 skb->protocol = eth_type_trans(skb, ai->dev);
3647 skb->dev->last_rx = jiffies; 3644 skb->dev->last_rx = jiffies;
@@ -3749,7 +3746,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3749 wireless_spy_update(ai->dev, sa, &wstats); 3746 wireless_spy_update(ai->dev, sa, &wstats);
3750 } 3747 }
3751#endif /* IW_WIRELESS_SPY */ 3748#endif /* IW_WIRELESS_SPY */
3752 skb->mac.raw = skb->data; 3749 skb_reset_mac_header(skb);
3753 skb->pkt_type = PACKET_OTHERHOST; 3750 skb->pkt_type = PACKET_OTHERHOST;
3754 skb->dev = ai->wifidev; 3751 skb->dev = ai->wifidev;
3755 skb->protocol = htons(ETH_P_802_2); 3752 skb->protocol = htons(ETH_P_802_2);
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index 4688e56b69c7..498e8486d125 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1500,7 +1500,6 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1500 break; 1500 break;
1501 } 1501 }
1502 skb_reserve(skb, 2); 1502 skb_reserve(skb, 2);
1503 skb->dev = dev;
1504 skbtmp = skb_put(skb, pkt_len); 1503 skbtmp = skb_put(skb, pkt_len);
1505 1504
1506 memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN); 1505 memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN);
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 23eba698aec5..51a7db53afa5 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
827 if (priv->wep_is_on) 827 if (priv->wep_is_on)
828 frame_ctl |= IEEE80211_FCTL_PROTECTED; 828 frame_ctl |= IEEE80211_FCTL_PROTECTED;
829 if (priv->operating_mode == IW_MODE_ADHOC) { 829 if (priv->operating_mode == IW_MODE_ADHOC) {
830 memcpy(&header.addr1, skb->data, 6); 830 skb_copy_from_linear_data(skb, &header.addr1, 6);
831 memcpy(&header.addr2, dev->dev_addr, 6); 831 memcpy(&header.addr2, dev->dev_addr, 6);
832 memcpy(&header.addr3, priv->BSSID, 6); 832 memcpy(&header.addr3, priv->BSSID, 6);
833 } else { 833 } else {
834 frame_ctl |= IEEE80211_FCTL_TODS; 834 frame_ctl |= IEEE80211_FCTL_TODS;
835 memcpy(&header.addr1, priv->CurrentBSSID, 6); 835 memcpy(&header.addr1, priv->CurrentBSSID, 6);
836 memcpy(&header.addr2, dev->dev_addr, 6); 836 memcpy(&header.addr2, dev->dev_addr, 6);
837 memcpy(&header.addr3, skb->data, 6); 837 skb_copy_from_linear_data(skb, &header.addr3, 6);
838 } 838 }
839 839
840 if (priv->use_wpa) 840 if (priv->use_wpa)
@@ -920,7 +920,6 @@ static void fast_rx_path(struct atmel_private *priv,
920 memcpy(&skbp[6], header->addr2, 6); /* source address */ 920 memcpy(&skbp[6], header->addr2, 6); /* source address */
921 921
922 priv->dev->last_rx = jiffies; 922 priv->dev->last_rx = jiffies;
923 skb->dev = priv->dev;
924 skb->protocol = eth_type_trans(skb, priv->dev); 923 skb->protocol = eth_type_trans(skb, priv->dev);
925 skb->ip_summed = CHECKSUM_NONE; 924 skb->ip_summed = CHECKSUM_NONE;
926 netif_rx(skb); 925 netif_rx(skb);
@@ -1028,7 +1027,6 @@ static void frag_rx_path(struct atmel_private *priv,
1028 priv->rx_buf, 1027 priv->rx_buf,
1029 priv->frag_len + 12); 1028 priv->frag_len + 12);
1030 priv->dev->last_rx = jiffies; 1029 priv->dev->last_rx = jiffies;
1031 skb->dev = priv->dev;
1032 skb->protocol = eth_type_trans(skb, priv->dev); 1030 skb->protocol = eth_type_trans(skb, priv->dev);
1033 skb->ip_summed = CHECKSUM_NONE; 1031 skb->ip_summed = CHECKSUM_NONE;
1034 netif_rx(skb); 1032 netif_rx(skb);
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
index 533993f538fc..ce397e4284f4 100644
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ b/drivers/net/wireless/bcm43xx/Kconfig
@@ -1,6 +1,7 @@
1config BCM43XX 1config BCM43XX
2 tristate "Broadcom BCM43xx wireless support" 2 tristate "Broadcom BCM43xx wireless support"
3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL 3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL
4 select WIRELESS_EXT
4 select FW_LOADER 5 select FW_LOADER
5 select HW_RANDOM 6 select HW_RANDOM
6 ---help--- 7 ---help---
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index 6e0dc76400e5..e3d2e61a31ee 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
998 assert(0); 998 assert(0);
999 return; 999 return;
1000 } 1000 }
1001 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1001 skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
1002 skb->len);
1002 dev_kfree_skb_any(skb); 1003 dev_kfree_skb_any(skb);
1003 skb = bounce_skb; 1004 skb = bounce_skb;
1004 } 1005 }
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 308f773ad566..1fef33169fdd 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -1,6 +1,7 @@
1config HOSTAP 1config HOSTAP
2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" 2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
3 depends on NET_RADIO 3 depends on WLAN_80211
4 select WIRELESS_EXT
4 select IEEE80211 5 select IEEE80211
5 select IEEE80211_CRYPT_WEP 6 select IEEE80211_CRYPT_WEP
6 ---help--- 7 ---help---
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 7e04dc94b3bc..cbedc9ee740a 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -167,7 +167,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
167 167
168 ret = skb->len - phdrlen; 168 ret = skb->len - phdrlen;
169 skb->dev = dev; 169 skb->dev = dev;
170 skb->mac.raw = skb->data; 170 skb_reset_mac_header(skb);
171 skb_pull(skb, hdrlen); 171 skb_pull(skb, hdrlen);
172 if (prism_header) 172 if (prism_header)
173 skb_pull(skb, phdrlen); 173 skb_pull(skb, phdrlen);
@@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
933 if (frag == 0) { 933 if (frag == 0) {
934 /* copy first fragment (including full headers) into 934 /* copy first fragment (including full headers) into
935 * beginning of the fragment cache skb */ 935 * beginning of the fragment cache skb */
936 memcpy(skb_put(frag_skb, flen), skb->data, flen); 936 skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
937 flen);
937 } else { 938 } else {
938 /* append frame payload to the end of the fragment 939 /* append frame payload to the end of the fragment
939 * cache skb */ 940 * cache skb */
940 memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, 941 skb_copy_from_linear_data_offset(skb, hdrlen,
941 flen); 942 skb_put(frag_skb,
943 flen), flen);
942 } 944 }
943 dev_kfree_skb(skb); 945 dev_kfree_skb(skb);
944 skb = NULL; 946 skb = NULL;
@@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
1044 skb->len >= ETH_HLEN + ETH_ALEN) { 1046 skb->len >= ETH_HLEN + ETH_ALEN) {
1045 /* Non-standard frame: get addr4 from its bogus location after 1047 /* Non-standard frame: get addr4 from its bogus location after
1046 * the payload */ 1048 * the payload */
1047 memcpy(skb->data + ETH_ALEN, 1049 skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
1048 skb->data + skb->len - ETH_ALEN, ETH_ALEN); 1050 skb->data + ETH_ALEN,
1051 ETH_ALEN);
1049 skb_trim(skb, skb->len - ETH_ALEN); 1052 skb_trim(skb, skb->len - ETH_ALEN);
1050 } 1053 }
1051 1054
@@ -1073,17 +1076,17 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
1073 1076
1074 if (skb2 != NULL) { 1077 if (skb2 != NULL) {
1075 /* send to wireless media */ 1078 /* send to wireless media */
1076 skb2->protocol = __constant_htons(ETH_P_802_3);
1077 skb2->mac.raw = skb2->nh.raw = skb2->data;
1078 /* skb2->nh.raw = skb2->data + ETH_HLEN; */
1079 skb2->dev = dev; 1079 skb2->dev = dev;
1080 skb2->protocol = __constant_htons(ETH_P_802_3);
1081 skb_reset_mac_header(skb2);
1082 skb_reset_network_header(skb2);
1083 /* skb2->network_header += ETH_HLEN; */
1080 dev_queue_xmit(skb2); 1084 dev_queue_xmit(skb2);
1081 } 1085 }
1082 1086
1083 if (skb) { 1087 if (skb) {
1084 skb->protocol = eth_type_trans(skb, dev); 1088 skb->protocol = eth_type_trans(skb, dev);
1085 memset(skb->cb, 0, sizeof(skb->cb)); 1089 memset(skb->cb, 0, sizeof(skb->cb));
1086 skb->dev = dev;
1087 netif_rx(skb); 1090 netif_rx(skb);
1088 } 1091 }
1089 1092
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 4a5be70c0419..246fac0e8001 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
146 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; 146 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
147 /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, 147 /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
148 * Addr4 = SA */ 148 * Addr4 = SA */
149 memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 149 skb_copy_from_linear_data_offset(skb, ETH_ALEN,
150 &hdr.addr4, ETH_ALEN);
150 hdr_len += ETH_ALEN; 151 hdr_len += ETH_ALEN;
151 } else { 152 } else {
152 /* bogus 4-addr format to workaround Prism2 station 153 /* bogus 4-addr format to workaround Prism2 station
@@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
159 /* SA from skb->data + ETH_ALEN will be added after 160 /* SA from skb->data + ETH_ALEN will be added after
160 * frame payload; use hdr.addr4 as a temporary buffer 161 * frame payload; use hdr.addr4 as a temporary buffer
161 */ 162 */
162 memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 163 skb_copy_from_linear_data_offset(skb, ETH_ALEN,
164 &hdr.addr4, ETH_ALEN);
163 need_tailroom += ETH_ALEN; 165 need_tailroom += ETH_ALEN;
164 } 166 }
165 167
@@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
174 else 176 else
175 memcpy(&hdr.addr1, local->bssid, ETH_ALEN); 177 memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
176 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); 178 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
177 memcpy(&hdr.addr3, skb->data, ETH_ALEN); 179 skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
178 } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { 180 } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
179 fc |= IEEE80211_FCTL_FROMDS; 181 fc |= IEEE80211_FCTL_FROMDS;
180 /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ 182 /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
181 memcpy(&hdr.addr1, skb->data, ETH_ALEN); 183 skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
182 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); 184 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
183 memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 185 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
186 ETH_ALEN);
184 } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { 187 } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
185 fc |= IEEE80211_FCTL_TODS; 188 fc |= IEEE80211_FCTL_TODS;
186 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ 189 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
187 memcpy(&hdr.addr1, to_assoc_ap ? 190 memcpy(&hdr.addr1, to_assoc_ap ?
188 local->assoc_ap_addr : local->bssid, ETH_ALEN); 191 local->assoc_ap_addr : local->bssid, ETH_ALEN);
189 memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 192 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
190 memcpy(&hdr.addr3, skb->data, ETH_ALEN); 193 ETH_ALEN);
194 skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
191 } else if (local->iw_mode == IW_MODE_ADHOC) { 195 } else if (local->iw_mode == IW_MODE_ADHOC) {
192 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ 196 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
193 memcpy(&hdr.addr1, skb->data, ETH_ALEN); 197 skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
194 memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 198 skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
199 ETH_ALEN);
195 memcpy(&hdr.addr3, local->bssid, ETH_ALEN); 200 memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
196 } 201 }
197 202
@@ -237,7 +242,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
237 iface->stats.tx_packets++; 242 iface->stats.tx_packets++;
238 iface->stats.tx_bytes += skb->len; 243 iface->stats.tx_bytes += skb->len;
239 244
240 skb->mac.raw = skb->data; 245 skb_reset_mac_header(skb);
241 meta = (struct hostap_skb_tx_data *) skb->cb; 246 meta = (struct hostap_skb_tx_data *) skb->cb;
242 memset(meta, 0, sizeof(*meta)); 247 memset(meta, 0, sizeof(*meta));
243 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; 248 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index efb8cf3bd8ad..4ca8a27b8c55 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -982,7 +982,8 @@ static void prism2_send_mgmt(struct net_device *dev,
982 meta->tx_cb_idx = tx_cb_idx; 982 meta->tx_cb_idx = tx_cb_idx;
983 983
984 skb->dev = dev; 984 skb->dev = dev;
985 skb->mac.raw = skb->nh.raw = skb->data; 985 skb_reset_mac_header(skb);
986 skb_reset_network_header(skb);
986 dev_queue_xmit(skb); 987 dev_queue_xmit(skb);
987} 988}
988#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 989#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
@@ -1276,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
1276 return NULL; 1277 return NULL;
1277 } 1278 }
1278 1279
1279 memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len, 1280 skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
1280 WLAN_AUTH_CHALLENGE_LEN); 1281 tmpbuf, WLAN_AUTH_CHALLENGE_LEN);
1281 dev_kfree_skb(skb); 1282 dev_kfree_skb(skb);
1282 1283
1283 return tmpbuf; 1284 return tmpbuf;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 3079378fb8cd..fb01fb95a9f0 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
1838 1838
1839 /* skb->data starts with txdesc->frame_control */ 1839 /* skb->data starts with txdesc->frame_control */
1840 hdr_len = 24; 1840 hdr_len = 24;
1841 memcpy(&txdesc.frame_control, skb->data, hdr_len); 1841 skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
1842 fc = le16_to_cpu(txdesc.frame_control); 1842 fc = le16_to_cpu(txdesc.frame_control);
1843 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && 1843 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
1844 (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && 1844 (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) &&
1845 skb->len >= 30) { 1845 skb->len >= 30) {
1846 /* Addr4 */ 1846 /* Addr4 */
1847 memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN); 1847 skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
1848 ETH_ALEN);
1848 hdr_len += ETH_ALEN; 1849 hdr_len += ETH_ALEN;
1849 } 1850 }
1850 1851
@@ -2217,7 +2218,7 @@ static void hostap_tx_callback(local_info_t *local,
2217 memcpy(skb_put(skb, len), payload, len); 2218 memcpy(skb_put(skb, len), payload, len);
2218 2219
2219 skb->dev = local->dev; 2220 skb->dev = local->dev;
2220 skb->mac.raw = skb->data; 2221 skb_reset_mac_header(skb);
2221 2222
2222 cb->func(skb, ok, cb->data); 2223 cb->func(skb, ok, cb->data);
2223} 2224}
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 9077e6edde34..1f9edd91565d 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -590,20 +590,20 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
590 590
591int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) 591int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr)
592{ 592{
593 memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */ 593 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
594 return ETH_ALEN; 594 return ETH_ALEN;
595} 595}
596 596
597 597
598int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) 598int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr)
599{ 599{
600 if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) { 600 const unsigned char *mac = skb_mac_header(skb);
601 memcpy(haddr, skb->mac.raw + 601
602 sizeof(struct linux_wlan_ng_prism_hdr) + 10, 602 if (*(u32 *)mac == LWNG_CAP_DID_BASE) {
603 memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10,
603 ETH_ALEN); /* addr2 */ 604 ETH_ALEN); /* addr2 */
604 } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */ 605 } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */
605 memcpy(haddr, skb->mac.raw + 606 memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10,
606 sizeof(struct linux_wlan_ng_cap_hdr) + 10,
607 ETH_ALEN); /* addr2 */ 607 ETH_ALEN); /* addr2 */
608 } 608 }
609 return ETH_ALEN; 609 return ETH_ALEN;
@@ -1063,7 +1063,8 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
1063 meta->iface = netdev_priv(dev); 1063 meta->iface = netdev_priv(dev);
1064 1064
1065 skb->dev = dev; 1065 skb->dev = dev;
1066 skb->mac.raw = skb->nh.raw = skb->data; 1066 skb_reset_mac_header(skb);
1067 skb_reset_network_header(skb);
1067 dev_queue_xmit(skb); 1068 dev_queue_xmit(skb);
1068 1069
1069 return 0; 1070 return 0;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index ad6e4a428355..9137a4dd02eb 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
2416#ifdef IPW2100_RX_DEBUG 2416#ifdef IPW2100_RX_DEBUG
2417 /* Make a copy of the frame so we can dump it to the logs if 2417 /* Make a copy of the frame so we can dump it to the logs if
2418 * ieee80211_rx fails */ 2418 * ieee80211_rx fails */
2419 memcpy(packet_data, packet->skb->data, 2419 skb_copy_from_linear_data(packet->skb, packet_data,
2420 min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH)); 2420 min_t(u32, status->frame_size,
2421 IPW_RX_NIC_BUFFER_LENGTH));
2421#endif 2422#endif
2422 2423
2423 if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { 2424 if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index c878a2f3239c..4839a45098cb 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -8133,7 +8133,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8133 skb->dev = priv->ieee->dev; 8133 skb->dev = priv->ieee->dev;
8134 8134
8135 /* Point raw at the ieee80211_stats */ 8135 /* Point raw at the ieee80211_stats */
8136 skb->mac.raw = skb->data; 8136 skb_reset_mac_header(skb);
8137 8137
8138 skb->pkt_type = PACKET_OTHERHOST; 8138 skb->pkt_type = PACKET_OTHERHOST;
8139 skb->protocol = __constant_htons(ETH_P_80211_STATS); 8139 skb->protocol = __constant_htons(ETH_P_80211_STATS);
@@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10355 10355
10356 rt_hdr->it_len = dst->len; 10356 rt_hdr->it_len = dst->len;
10357 10357
10358 memcpy(skb_put(dst, len), src->data, len); 10358 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10359 10359
10360 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) 10360 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10361 dev_kfree_skb_any(dst); 10361 dev_kfree_skb_any(dst);
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index a009ab517710..45b00e13ab2b 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -1283,7 +1283,6 @@ static int netwave_rx(struct net_device *dev)
1283 1283
1284 skb_reserve( skb, 2); /* Align IP on 16 byte */ 1284 skb_reserve( skb, 2); /* Align IP on 16 byte */
1285 skb_put( skb, rcvLen); 1285 skb_put( skb, rcvLen);
1286 skb->dev = dev;
1287 1286
1288 /* Copy packet fragments to the skb data area */ 1287 /* Copy packet fragments to the skb data area */
1289 ptr = (u_char*) skb->data; 1288 ptr = (u_char*) skb->data;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 4e7f6cf51436..062286dc8e15 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -689,7 +689,7 @@ static void orinoco_stat_gather(struct net_device *dev,
689 /* Note : gcc will optimise the whole section away if 689 /* Note : gcc will optimise the whole section away if
690 * WIRELESS_SPY is not defined... - Jean II */ 690 * WIRELESS_SPY is not defined... - Jean II */
691 if (SPY_NUMBER(priv)) { 691 if (SPY_NUMBER(priv)) {
692 orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN, 692 orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
693 desc->signal, desc->silence); 693 desc->signal, desc->silence);
694 } 694 }
695} 695}
@@ -770,7 +770,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
770 770
771 /* Copy the 802.11 header to the skb */ 771 /* Copy the 802.11 header to the skb */
772 memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); 772 memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
773 skb->mac.raw = skb->data; 773 skb_reset_mac_header(skb);
774 774
775 /* If any, copy the data from the card to the skb */ 775 /* If any, copy the data from the card to the skb */
776 if (datalen > 0) { 776 if (datalen > 0) {
@@ -915,7 +915,6 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
915 memcpy(hdr->h_source, desc.addr2, ETH_ALEN); 915 memcpy(hdr->h_source, desc.addr2, ETH_ALEN);
916 916
917 dev->last_rx = jiffies; 917 dev->last_rx = jiffies;
918 skb->dev = dev;
919 skb->protocol = eth_type_trans(skb, dev); 918 skb->protocol = eth_type_trans(skb, dev);
920 skb->ip_summed = CHECKSUM_NONE; 919 skb->ip_summed = CHECKSUM_NONE;
921 if (fc & IEEE80211_FCTL_TODS) 920 if (fc & IEEE80211_FCTL_TODS)
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index b1122912ee2d..dd070cccf324 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -136,7 +136,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
136 printk("islpci_eth_transmit:wds_mac\n"); 136 printk("islpci_eth_transmit:wds_mac\n");
137#endif 137#endif
138 memmove(skb->data + 6, src, skb->len); 138 memmove(skb->data + 6, src, skb->len);
139 memcpy(skb->data, wds_mac, 6); 139 skb_copy_to_linear_data(skb, wds_mac, 6);
140 } else { 140 } else {
141 memmove(skb->data, src, skb->len); 141 memmove(skb->data, src, skb->len);
142 } 142 }
@@ -162,13 +162,16 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
162 162
163 skb_put(newskb, init_wds ? skb->len + 6 : skb->len); 163 skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
164 if (init_wds) { 164 if (init_wds) {
165 memcpy(newskb->data + 6, skb->data, skb->len); 165 skb_copy_from_linear_data(skb,
166 memcpy(newskb->data, wds_mac, 6); 166 newskb->data + 6,
167 skb->len);
168 skb_copy_to_linear_data(newskb, wds_mac, 6);
167#ifdef ISLPCI_ETH_DEBUG 169#ifdef ISLPCI_ETH_DEBUG
168 printk("islpci_eth_transmit:wds_mac\n"); 170 printk("islpci_eth_transmit:wds_mac\n");
169#endif 171#endif
170 } else 172 } else
171 memcpy(newskb->data, skb->data, skb->len); 173 skb_copy_from_linear_data(skb, newskb->data,
174 skb->len);
172 175
173#if VERBOSE > SHOW_ERROR_MESSAGES 176#if VERBOSE > SHOW_ERROR_MESSAGES
174 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", 177 DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
@@ -303,7 +306,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
303 skb_pull(*skb, sizeof (struct rfmon_header)); 306 skb_pull(*skb, sizeof (struct rfmon_header));
304 307
305 (*skb)->protocol = htons(ETH_P_802_2); 308 (*skb)->protocol = htons(ETH_P_802_2);
306 (*skb)->mac.raw = (*skb)->data; 309 skb_reset_mac_header(*skb);
307 (*skb)->pkt_type = PACKET_OTHERHOST; 310 (*skb)->pkt_type = PACKET_OTHERHOST;
308 311
309 return 0; 312 return 0;
@@ -374,10 +377,6 @@ islpci_eth_receive(islpci_private *priv)
374 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); 377 DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
375 display_buffer((char *) skb->data, skb->len); 378 display_buffer((char *) skb->data, skb->len);
376#endif 379#endif
377
378 /* do some additional sk_buff and network layer parameters */
379 skb->dev = ndev;
380
381 /* take care of monitor mode and spy monitoring. */ 380 /* take care of monitor mode and spy monitoring. */
382 if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) 381 if (unlikely(priv->iw_mode == IW_MODE_MONITOR))
383 discard = islpci_monitor_rx(priv, &skb); 382 discard = islpci_monitor_rx(priv, &skb);
@@ -398,8 +397,10 @@ islpci_eth_receive(islpci_private *priv)
398 /* Update spy records */ 397 /* Update spy records */
399 wireless_spy_update(ndev, annex->addr2, &wstats); 398 wireless_spy_update(ndev, annex->addr2, &wstats);
400 399
401 memcpy(skb->data + sizeof (struct rfmon_header), 400 skb_copy_from_linear_data(skb,
402 skb->data, 2 * ETH_ALEN); 401 (skb->data +
402 sizeof(struct rfmon_header)),
403 2 * ETH_ALEN);
403 skb_pull(skb, sizeof (struct rfmon_header)); 404 skb_pull(skb, sizeof (struct rfmon_header));
404 } 405 }
405 skb->protocol = eth_type_trans(skb, ndev); 406 skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 47b2ccb6a633..3be624295a1f 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2232,7 +2232,6 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
2232 return; 2232 return;
2233 } 2233 }
2234 skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/ 2234 skb_reserve( skb, 2); /* Align IP on 16 byte (TBD check this)*/
2235 skb->dev = dev;
2236 2235
2237 DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len); 2236 DEBUG(4,"ray_cs rx_data total_len = %x, rx_len = %x\n",total_len,rx_len);
2238 2237
@@ -2243,7 +2242,8 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
2243 rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); 2242 rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len);
2244 /* Get source address */ 2243 /* Get source address */
2245#ifdef WIRELESS_SPY 2244#ifdef WIRELESS_SPY
2246 memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN); 2245 skb_copy_from_linear_data_offset(skb, offsetof(struct mac_header, addr_2),
2246 linksrcaddr, ETH_ALEN);
2247#endif 2247#endif
2248 /* Now, deal with encapsulation/translation/sniffer */ 2248 /* Now, deal with encapsulation/translation/sniffer */
2249 if (!sniffer) { 2249 if (!sniffer) {
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index f5ce1c6063d8..2a299a0676a6 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -2009,7 +2009,7 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header,
2009 packetlen); 2009 packetlen);
2010 skb->dev = get_strip_dev(strip_info); 2010 skb->dev = get_strip_dev(strip_info);
2011 skb->protocol = header->protocol; 2011 skb->protocol = header->protocol;
2012 skb->mac.raw = skb->data; 2012 skb_reset_mac_header(skb);
2013 2013
2014 /* Having put a fake header on the front of the sk_buff for the */ 2014 /* Having put a fake header on the front of the sk_buff for the */
2015 /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ 2015 /* benefit of tools like tcpdump, skb_pull now 'consumes' that */
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 2aa3c761dd83..1cf090d60edc 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -2512,14 +2512,13 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2512 return; 2512 return;
2513 } 2513 }
2514 2514
2515 skb->dev = dev;
2516
2517 /* Copy the packet to the buffer. */ 2515 /* Copy the packet to the buffer. */
2518 obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize); 2516 obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
2519 skb->protocol = eth_type_trans(skb, dev); 2517 skb->protocol = eth_type_trans(skb, dev);
2520 2518
2521#ifdef DEBUG_RX_INFO 2519#ifdef DEBUG_RX_INFO
2522 wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); 2520 wv_packet_info(skb_mac_header(skb), sksize, dev->name,
2521 "wv_packet_read");
2523#endif /* DEBUG_RX_INFO */ 2522#endif /* DEBUG_RX_INFO */
2524 2523
2525 /* Statistics-gathering and associated stuff. 2524 /* Statistics-gathering and associated stuff.
@@ -2555,7 +2554,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2555 2554
2556 /* Spying stuff */ 2555 /* Spying stuff */
2557#ifdef IW_WIRELESS_SPY 2556#ifdef IW_WIRELESS_SPY
2558 wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, 2557 wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE,
2559 stats); 2558 stats);
2560#endif /* IW_WIRELESS_SPY */ 2559#endif /* IW_WIRELESS_SPY */
2561#ifdef HISTOGRAM 2560#ifdef HISTOGRAM
@@ -2939,7 +2938,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
2939 * need to pad. Jean II */ 2938 * need to pad. Jean II */
2940 if (skb->len < ETH_ZLEN) { 2939 if (skb->len < ETH_ZLEN) {
2941 memset(data, 0, ETH_ZLEN); 2940 memset(data, 0, ETH_ZLEN);
2942 memcpy(data, skb->data, skb->len); 2941 skb_copy_from_linear_data(skb, data, skb->len);
2943 /* Write packet on the card */ 2942 /* Write packet on the card */
2944 if(wv_packet_write(dev, data, ETH_ZLEN)) 2943 if(wv_packet_write(dev, data, ETH_ZLEN))
2945 return 1; /* We failed */ 2944 return 1; /* We failed */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index b04239792f63..67b867f837ca 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -2884,14 +2884,12 @@ wv_packet_read(struct net_device * dev,
2884 return; 2884 return;
2885 } 2885 }
2886 2886
2887 skb->dev = dev;
2888
2889 skb_reserve(skb, 2); 2887 skb_reserve(skb, 2);
2890 fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize); 2888 fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize);
2891 skb->protocol = eth_type_trans(skb, dev); 2889 skb->protocol = eth_type_trans(skb, dev);
2892 2890
2893#ifdef DEBUG_RX_INFO 2891#ifdef DEBUG_RX_INFO
2894 wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); 2892 wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read");
2895#endif /* DEBUG_RX_INFO */ 2893#endif /* DEBUG_RX_INFO */
2896 2894
2897 /* Statistics gathering & stuff associated. 2895 /* Statistics gathering & stuff associated.
@@ -2925,7 +2923,7 @@ wv_packet_read(struct net_device * dev,
2925#endif /* WAVELAN_ROAMING */ 2923#endif /* WAVELAN_ROAMING */
2926 2924
2927#ifdef WIRELESS_SPY 2925#ifdef WIRELESS_SPY
2928 wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats); 2926 wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats);
2929#endif /* WIRELESS_SPY */ 2927#endif /* WIRELESS_SPY */
2930#ifdef HISTOGRAM 2928#ifdef HISTOGRAM
2931 wl_his_gather(dev, stats); 2929 wl_his_gather(dev, stats);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 6cb66a356c96..935b144d9b56 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -327,7 +327,6 @@ static void zd1201_usbrx(struct urb *urb)
327 memcpy(skb_put(skb, 6), &data[datalen-8], 6); 327 memcpy(skb_put(skb, 6), &data[datalen-8], 6);
328 memcpy(skb_put(skb, 2), &data[datalen-24], 2); 328 memcpy(skb_put(skb, 2), &data[datalen-24], 2);
329 memcpy(skb_put(skb, len), data, len); 329 memcpy(skb_put(skb, len), data, len);
330 skb->dev = zd->dev;
331 skb->dev->last_rx = jiffies; 330 skb->dev->last_rx = jiffies;
332 skb->protocol = eth_type_trans(skb, zd->dev); 331 skb->protocol = eth_type_trans(skb, zd->dev);
333 zd->stats.rx_packets++; 332 zd->stats.rx_packets++;
@@ -385,7 +384,6 @@ static void zd1201_usbrx(struct urb *urb)
385 memcpy(skb_put(skb, 2), &data[6], 2); 384 memcpy(skb_put(skb, 2), &data[6], 2);
386 memcpy(skb_put(skb, len), data+8, len); 385 memcpy(skb_put(skb, len), data+8, len);
387 } 386 }
388 skb->dev = zd->dev;
389 skb->dev->last_rx = jiffies; 387 skb->dev->last_rx = jiffies;
390 skb->protocol = eth_type_trans(skb, zd->dev); 388 skb->protocol = eth_type_trans(skb, zd->dev);
391 zd->stats.rx_packets++; 389 zd->stats.rx_packets++;
@@ -809,10 +807,10 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
809 txbuf[4] = 0x00; 807 txbuf[4] = 0x00;
810 txbuf[5] = 0x00; 808 txbuf[5] = 0x00;
811 809
812 memcpy(txbuf+6, skb->data+12, skb->len-12); 810 skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12);
813 if (pad) 811 if (pad)
814 txbuf[skb->len-12+6]=0; 812 txbuf[skb->len-12+6]=0;
815 memcpy(txbuf+skb->len-12+6+pad, skb->data, 12); 813 skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12);
816 *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); 814 *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6);
817 txbuf[txbuflen-1] = 0; 815 txbuf[txbuflen-1] = 0;
818 816
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 66ed55bc5460..d1ab24a95630 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -1,6 +1,7 @@
1config ZD1211RW 1config ZD1211RW
2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" 2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
3 depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL 3 depends on USB && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL
4 select WIRELESS_EXT
4 select FW_LOADER 5 select FW_LOADER
5 ---help--- 6 ---help---
6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless 7 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index aac8a1c5ba08..edaaad2f648b 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
62 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 62 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
63 { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, 63 { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
65 /* "Driverless" devices that need ejecting */ 66 /* "Driverless" devices that need ejecting */
66 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 67 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
67 {} 68 {}
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 2412ce4917f2..3f4a7cf9efea 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1137,7 +1137,6 @@ static int yellowfin_rx(struct net_device *dev)
1137 skb = dev_alloc_skb(pkt_len + 2); 1137 skb = dev_alloc_skb(pkt_len + 2);
1138 if (skb == NULL) 1138 if (skb == NULL)
1139 break; 1139 break;
1140 skb->dev = dev;
1141 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1140 skb_reserve(skb, 2); /* 16 byte align the IP header */
1142 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); 1141 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
1143 skb_put(skb, pkt_len); 1142 skb_put(skb, pkt_len);
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index b24b0727108c..4032e9f6f9b0 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -774,7 +774,6 @@ static void znet_rx(struct net_device *dev)
774 znet->stats.rx_dropped++; 774 znet->stats.rx_dropped++;
775 break; 775 break;
776 } 776 }
777 skb->dev = dev;
778 777
779 if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { 778 if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
780 int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; 779 int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index d190c05d87ed..453e6829756c 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -372,9 +372,9 @@ static __inline__ int led_get_net_activity(void)
372 continue; 372 continue;
373 if (LOOPBACK(in_dev->ifa_list->ifa_local)) 373 if (LOOPBACK(in_dev->ifa_list->ifa_local))
374 continue; 374 continue;
375 if (!dev->get_stats)
376 continue;
377 stats = dev->get_stats(dev); 375 stats = dev->get_stats(dev);
376 if (!stats)
377 continue;
378 rx_total += stats->rx_packets; 378 rx_total += stats->rx_packets;
379 tx_total += stats->tx_packets; 379 tx_total += stats->tx_packets;
380 } 380 }
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 9793533276ec..400bb90084cf 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -126,7 +126,7 @@ static unsigned char status_sunbpp_to_pc(struct parport *p)
126 if (!(value_tcr & P_TCR_BUSY)) 126 if (!(value_tcr & P_TCR_BUSY))
127 bits |= PARPORT_STATUS_BUSY; 127 bits |= PARPORT_STATUS_BUSY;
128 128
129 dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", regs->p_tcr, regs->p_ir)); 129 dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir));
130 dprintk((KERN_DEBUG "read status 0x%x\n", bits)); 130 dprintk((KERN_DEBUG "read status 0x%x\n", bits));
131 return bits; 131 return bits;
132} 132}
@@ -147,7 +147,7 @@ static unsigned char control_sunbpp_to_pc(struct parport *p)
147 if (value_or & P_OR_SLCT_IN) 147 if (value_or & P_OR_SLCT_IN)
148 bits |= PARPORT_CONTROL_SELECT; 148 bits |= PARPORT_CONTROL_SELECT;
149 149
150 dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); 150 dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or));
151 dprintk((KERN_DEBUG "read control 0x%x\n", bits)); 151 dprintk((KERN_DEBUG "read control 0x%x\n", bits));
152 return bits; 152 return bits;
153} 153}
@@ -165,7 +165,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p,
165 unsigned char value_tcr = sbus_readb(&regs->p_tcr); 165 unsigned char value_tcr = sbus_readb(&regs->p_tcr);
166 unsigned char value_or = sbus_readb(&regs->p_or); 166 unsigned char value_or = sbus_readb(&regs->p_or);
167 167
168 dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); 168 dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n",
169 value_tcr, value_or));
169 if (mask & PARPORT_CONTROL_STROBE) { 170 if (mask & PARPORT_CONTROL_STROBE) {
170 if (val & PARPORT_CONTROL_STROBE) { 171 if (val & PARPORT_CONTROL_STROBE) {
171 value_tcr &= ~P_TCR_DS; 172 value_tcr &= ~P_TCR_DS;
@@ -197,7 +198,8 @@ static unsigned char parport_sunbpp_frob_control(struct parport *p,
197 198
198 sbus_writeb(value_or, &regs->p_or); 199 sbus_writeb(value_or, &regs->p_or);
199 sbus_writeb(value_tcr, &regs->p_tcr); 200 sbus_writeb(value_tcr, &regs->p_tcr);
200 dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", regs->p_tcr, regs->p_or)); 201 dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n",
202 value_tcr, value_or));
201 return parport_sunbpp_read_control(p); 203 return parport_sunbpp_read_control(p);
202} 204}
203 205
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a3c1755b2f28..39e80fcef4b3 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -434,11 +434,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
434 drv->driver.mod_name = mod_name; 434 drv->driver.mod_name = mod_name;
435 drv->driver.kobj.ktype = &pci_driver_kobj_type; 435 drv->driver.kobj.ktype = &pci_driver_kobj_type;
436 436
437 if (pci_multithread_probe)
438 drv->driver.multithread_probe = pci_multithread_probe;
439 else
440 drv->driver.multithread_probe = drv->multithread_probe;
441
442 spin_lock_init(&drv->dynids.lock); 437 spin_lock_init(&drv->dynids.lock);
443 INIT_LIST_HEAD(&drv->dynids.list); 438 INIT_LIST_HEAD(&drv->dynids.list);
444 439
@@ -574,6 +569,7 @@ struct bus_type pci_bus_type = {
574 569
575static int __init pci_driver_init(void) 570static int __init pci_driver_init(void)
576{ 571{
572 pci_bus_type.multithread_probe = pci_multithread_probe;
577 return bus_register(&pci_bus_type); 573 return bus_register(&pci_bus_type);
578} 574}
579 575
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d3eab057b2d3..2a458279327a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/pm.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
18#include <linux/string.h> 19#include <linux/string.h>
@@ -891,31 +892,48 @@ pci_disable_device(struct pci_dev *dev)
891} 892}
892 893
893/** 894/**
894 * pci_enable_wake - enable device to generate PME# when suspended 895 * pci_enable_wake - enable PCI device as wakeup event source
895 * @dev: - PCI device to operate on 896 * @dev: PCI device affected
896 * @state: - Current state of device. 897 * @state: PCI state from which device will issue wakeup events
897 * @enable: - Flag to enable or disable generation 898 * @enable: True to enable event generation; false to disable
898 *
899 * Set the bits in the device's PM Capabilities to generate PME# when
900 * the system is suspended.
901 * 899 *
902 * -EIO is returned if device doesn't have PM Capabilities. 900 * This enables the device as a wakeup event source, or disables it.
903 * -EINVAL is returned if device supports it, but can't generate wake events. 901 * When such events involves platform-specific hooks, those hooks are
904 * 0 if operation is successful. 902 * called automatically by this routine.
905 * 903 *
904 * Devices with legacy power management (no standard PCI PM capabilities)
905 * always require such platform hooks. Depending on the platform, devices
906 * supporting the standard PCI PME# signal may require such platform hooks;
907 * they always update bits in config space to allow PME# generation.
908 *
909 * -EIO is returned if the device can't ever be a wakeup event source.
910 * -EINVAL is returned if the device can't generate wakeup events from
911 * the specified PCI state. Returns zero if the operation is successful.
906 */ 912 */
907int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 913int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
908{ 914{
909 int pm; 915 int pm;
916 int status;
910 u16 value; 917 u16 value;
911 918
919 /* Note that drivers should verify device_may_wakeup(&dev->dev)
920 * before calling this function. Platform code should report
921 * errors when drivers try to enable wakeup on devices that
922 * can't issue wakeups, or on which wakeups were disabled by
923 * userspace updating the /sys/devices.../power/wakeup file.
924 */
925
926 status = call_platform_enable_wakeup(&dev->dev, enable);
927
912 /* find PCI PM capability in list */ 928 /* find PCI PM capability in list */
913 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 929 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
914 930
915 /* If device doesn't support PM Capabilities, but request is to disable 931 /* If device doesn't support PM Capabilities, but caller wants to
916 * wake events, it's a nop; otherwise fail */ 932 * disable wake events, it's a NOP. Otherwise fail unless the
917 if (!pm) 933 * platform hooks handled this legacy device already.
918 return enable ? -EIO : 0; 934 */
935 if (!pm)
936 return enable ? status : 0;
919 937
920 /* Check device's ability to generate PME# */ 938 /* Check device's ability to generate PME# */
921 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 939 pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
@@ -924,8 +942,14 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
924 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 942 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
925 943
926 /* Check if it can generate PME# from requested state. */ 944 /* Check if it can generate PME# from requested state. */
927 if (!value || !(value & (1 << state))) 945 if (!value || !(value & (1 << state))) {
946 /* if it can't, revert what the platform hook changed,
947 * always reporting the base "EINVAL, can't PME#" error
948 */
949 if (enable)
950 call_platform_enable_wakeup(&dev->dev, 0);
928 return enable ? -EINVAL : 0; 951 return enable ? -EINVAL : 0;
952 }
929 953
930 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 954 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
931 955
@@ -936,7 +960,7 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
936 value &= ~PCI_PM_CTRL_PME_ENABLE; 960 value &= ~PCI_PM_CTRL_PME_ENABLE;
937 961
938 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 962 pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
939 963
940 return 0; 964 return 0;
941} 965}
942 966
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a4a96826d9e0..2fe1d690eb13 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -682,34 +682,7 @@ static void pci_read_irq(struct pci_dev *dev)
682 dev->irq = irq; 682 dev->irq = irq;
683} 683}
684 684
685static void change_legacy_io_resource(struct pci_dev * dev, unsigned index, 685#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
686 unsigned start, unsigned end)
687{
688 unsigned base = start & PCI_BASE_ADDRESS_IO_MASK;
689 unsigned len = (end | ~PCI_BASE_ADDRESS_IO_MASK) - base + 1;
690
691 /*
692 * Some X versions get confused when the BARs reported through
693 * /sys or /proc differ from those seen in config space, thus
694 * try to update the config space values, too.
695 */
696 if (!(pci_resource_flags(dev, index) & IORESOURCE_IO))
697 printk(KERN_WARNING "%s: cannot adjust BAR%u (not I/O)\n",
698 pci_name(dev), index);
699 else if (pci_resource_len(dev, index) != len)
700 printk(KERN_WARNING "%s: cannot adjust BAR%u (size %04X)\n",
701 pci_name(dev), index, (unsigned)pci_resource_len(dev, index));
702 else {
703 printk(KERN_INFO "%s: trying to change BAR%u from %04X to %04X\n",
704 pci_name(dev), index,
705 (unsigned)pci_resource_start(dev, index), base);
706 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + index * 4, base);
707 }
708 pci_resource_start(dev, index) = start;
709 pci_resource_end(dev, index) = end;
710 pci_resource_flags(dev, index) =
711 IORESOURCE_IO | IORESOURCE_PCI_FIXED | PCI_BASE_ADDRESS_SPACE_IO;
712}
713 686
714/** 687/**
715 * pci_setup_device - fill in class and map information of a device 688 * pci_setup_device - fill in class and map information of a device
@@ -762,12 +735,20 @@ static int pci_setup_device(struct pci_dev * dev)
762 u8 progif; 735 u8 progif;
763 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 736 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
764 if ((progif & 1) == 0) { 737 if ((progif & 1) == 0) {
765 change_legacy_io_resource(dev, 0, 0x1F0, 0x1F7); 738 dev->resource[0].start = 0x1F0;
766 change_legacy_io_resource(dev, 1, 0x3F6, 0x3F6); 739 dev->resource[0].end = 0x1F7;
740 dev->resource[0].flags = LEGACY_IO_RESOURCE;
741 dev->resource[1].start = 0x3F6;
742 dev->resource[1].end = 0x3F6;
743 dev->resource[1].flags = LEGACY_IO_RESOURCE;
767 } 744 }
768 if ((progif & 4) == 0) { 745 if ((progif & 4) == 0) {
769 change_legacy_io_resource(dev, 2, 0x170, 0x177); 746 dev->resource[2].start = 0x170;
770 change_legacy_io_resource(dev, 3, 0x376, 0x376); 747 dev->resource[2].end = 0x177;
748 dev->resource[2].flags = LEGACY_IO_RESOURCE;
749 dev->resource[3].start = 0x376;
750 dev->resource[3].end = 0x376;
751 dev->resource[3].flags = LEGACY_IO_RESOURCE;
771 } 752 }
772 } 753 }
773 break; 754 break;
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 91c047a7e635..dd6384b1efce 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -311,7 +311,6 @@ done:
311 return NULL; 311 return NULL;
312 312
313found: 313found:
314 down_write(&dev->dev.bus->subsys.rwsem);
315 dev->card_link = clink; 314 dev->card_link = clink;
316 dev->dev.driver = &drv->link.driver; 315 dev->dev.driver = &drv->link.driver;
317 if (pnp_bus_type.probe(&dev->dev)) 316 if (pnp_bus_type.probe(&dev->dev))
@@ -319,14 +318,11 @@ found:
319 if (device_bind_driver(&dev->dev)) 318 if (device_bind_driver(&dev->dev))
320 goto err_out; 319 goto err_out;
321 320
322 up_write(&dev->dev.bus->subsys.rwsem);
323
324 return dev; 321 return dev;
325 322
326err_out: 323err_out:
327 dev->dev.driver = NULL; 324 dev->dev.driver = NULL;
328 dev->card_link = NULL; 325 dev->card_link = NULL;
329 up_write(&dev->dev.bus->subsys.rwsem);
330 return NULL; 326 return NULL;
331} 327}
332 328
@@ -340,11 +336,9 @@ void pnp_release_card_device(struct pnp_dev * dev)
340 struct pnp_card_driver * drv = dev->card_link->driver; 336 struct pnp_card_driver * drv = dev->card_link->driver;
341 if (!drv) 337 if (!drv)
342 return; 338 return;
343 down_write(&dev->dev.bus->subsys.rwsem);
344 drv->link.remove = &card_remove; 339 drv->link.remove = &card_remove;
345 device_release_driver(&dev->dev); 340 device_release_driver(&dev->dev);
346 drv->link.remove = &card_remove_first; 341 drv->link.remove = &card_remove_first;
347 up_write(&dev->dev.bus->subsys.rwsem);
348} 342}
349 343
350/* 344/*
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62f0d9c..e71929db8b06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
398 398
399 if (device->state == device->target) 399 if (device->state == device->target)
400 wake_up(&dasd_init_waitq); 400 wake_up(&dasd_init_waitq);
401
402 /* let user-space know that the device status changed */
403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
401} 404}
402 405
403/* 406/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index ed70852cc915..6a89cefe99bb 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/ipl.h>
22 23
23/* This is ugly... */ 24/* This is ugly... */
24#define PRINTK_HEADER "dasd_devmap:" 25#define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
133__setup ("dasd=", dasd_call_setup); 134__setup ("dasd=", dasd_call_setup);
134#endif /* #ifndef MODULE */ 135#endif /* #ifndef MODULE */
135 136
137#define DASD_IPLDEV "ipldev"
138
136/* 139/*
137 * Read a device busid/devno from a string. 140 * Read a device busid/devno from a string.
138 */ 141 */
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 144{
142 int val, old_style; 145 int val, old_style;
143 146
147 /* Interpret ipldev busid */
148 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
149 if (ipl_info.type != IPL_TYPE_CCW) {
150 MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
151 "device");
152 return -EINVAL;
153 }
154 *id0 = 0;
155 *id1 = ipl_info.data.ccw.dev_id.ssid;
156 *devno = ipl_info.data.ccw.dev_id.devno;
157 *str += strlen(DASD_IPLDEV);
158
159 return 0;
160 }
144 /* check for leading '0x' */ 161 /* check for leading '0x' */
145 old_style = 0; 162 old_style = 0;
146 if ((*str)[0] == '0' && (*str)[1] == 'x') { 163 if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
829static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 846static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
830 847
831static ssize_t 848static ssize_t
849dasd_device_status_show(struct device *dev, struct device_attribute *attr,
850 char *buf)
851{
852 struct dasd_device *device;
853 ssize_t len;
854
855 device = dasd_device_from_cdev(to_ccwdev(dev));
856 if (!IS_ERR(device)) {
857 switch (device->state) {
858 case DASD_STATE_NEW:
859 len = snprintf(buf, PAGE_SIZE, "new\n");
860 break;
861 case DASD_STATE_KNOWN:
862 len = snprintf(buf, PAGE_SIZE, "detected\n");
863 break;
864 case DASD_STATE_BASIC:
865 len = snprintf(buf, PAGE_SIZE, "basic\n");
866 break;
867 case DASD_STATE_UNFMT:
868 len = snprintf(buf, PAGE_SIZE, "unformatted\n");
869 break;
870 case DASD_STATE_READY:
871 len = snprintf(buf, PAGE_SIZE, "ready\n");
872 break;
873 case DASD_STATE_ONLINE:
874 len = snprintf(buf, PAGE_SIZE, "online\n");
875 break;
876 default:
877 len = snprintf(buf, PAGE_SIZE, "no stat\n");
878 break;
879 }
880 dasd_put_device(device);
881 } else
882 len = snprintf(buf, PAGE_SIZE, "unknown\n");
883 return len;
884}
885
886static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
887
888static ssize_t
832dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 889dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
833{ 890{
834 struct dasd_devmap *devmap; 891 struct dasd_devmap *devmap;
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
939static struct attribute * dasd_attrs[] = { 996static struct attribute * dasd_attrs[] = {
940 &dev_attr_readonly.attr, 997 &dev_attr_readonly.attr,
941 &dev_attr_discipline.attr, 998 &dev_attr_discipline.attr,
999 &dev_attr_status.attr,
942 &dev_attr_alias.attr, 1000 &dev_attr_alias.attr,
943 &dev_attr_vendor.attr, 1001 &dev_attr_vendor.attr,
944 &dev_attr_uid.attr, 1002 &dev_attr_uid.attr,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 293e667b50f2..c210784bdf46 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o 6 sclp_info.o sclp_config.o sclp_chp.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o 31obj-$(CONFIG_MONWRITER) += monwriter.o
32
33zcore_mod-objs := sclp_sdias.o zcore.o
34obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9a328f14a641..6000bdee4082 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -813,12 +813,6 @@ con3215_unblank(void)
813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
814} 814}
815 815
816static int __init
817con3215_consetup(struct console *co, char *options)
818{
819 return 0;
820}
821
822/* 816/*
823 * The console structure for the 3215 console 817 * The console structure for the 3215 console
824 */ 818 */
@@ -827,7 +821,6 @@ static struct console con3215 = {
827 .write = con3215_write, 821 .write = con3215_write,
828 .device = con3215_device, 822 .device = con3215_device,
829 .unblank = con3215_unblank, 823 .unblank = con3215_unblank,
830 .setup = con3215_consetup,
831 .flags = CON_PRINTBUFFER, 824 .flags = CON_PRINTBUFFER,
832}; 825};
833 826
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8e7f2d7633d6..fd3479119eb4 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -555,12 +555,6 @@ con3270_unblank(void)
555 spin_unlock_irqrestore(&cp->view.lock, flags); 555 spin_unlock_irqrestore(&cp->view.lock, flags);
556} 556}
557 557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/* 558/*
565 * The console structure for the 3270 console 559 * The console structure for the 3270 console
566 */ 560 */
@@ -569,7 +563,6 @@ static struct console con3270 = {
569 .write = con3270_write, 563 .write = con3270_write,
570 .device = con3270_device, 564 .device = con3270_device,
571 .unblank = con3270_unblank, 565 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER, 566 .flags = CON_PRINTBUFFER,
574}; 567};
575 568
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f171de3b0b11..fa62e6944057 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -15,6 +15,7 @@
15#include <linux/timer.h> 15#include <linux/timer.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/init.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/s390_ext.h> 20#include <asm/s390_ext.h>
20 21
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
510} 511}
511 512
512static struct sclp_register sclp_state_change_event = { 513static struct sclp_register sclp_state_change_event = {
513 .receive_mask = EvTyp_StateChange_Mask, 514 .receive_mask = EVTYP_STATECHANGE_MASK,
514 .receiver_fn = sclp_state_change_cb 515 .receiver_fn = sclp_state_change_cb
515}; 516};
516 517
@@ -930,3 +931,10 @@ sclp_init(void)
930 sclp_init_mask(1); 931 sclp_init_mask(1);
931 return 0; 932 return 0;
932} 933}
934
935static __init int sclp_initcall(void)
936{
937 return sclp_init();
938}
939
940arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7d29ab45a6ed..87ac4a3ad49d 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,33 +19,37 @@
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) 19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4 20#define MAX_CONSOLE_PAGES 4
21 21
22#define EvTyp_OpCmd 0x01 22#define EVTYP_OPCMD 0x01
23#define EvTyp_Msg 0x02 23#define EVTYP_MSG 0x02
24#define EvTyp_StateChange 0x08 24#define EVTYP_STATECHANGE 0x08
25#define EvTyp_PMsgCmd 0x09 25#define EVTYP_PMSGCMD 0x09
26#define EvTyp_CntlProgOpCmd 0x20 26#define EVTYP_CNTLPROGOPCMD 0x20
27#define EvTyp_CntlProgIdent 0x0B 27#define EVTYP_CNTLPROGIDENT 0x0B
28#define EvTyp_SigQuiesce 0x1D 28#define EVTYP_SIGQUIESCE 0x1D
29#define EvTyp_VT220Msg 0x1A 29#define EVTYP_VT220MSG 0x1A
30 30#define EVTYP_CONFMGMDATA 0x04
31#define EvTyp_OpCmd_Mask 0x80000000 31#define EVTYP_SDIAS 0x1C
32#define EvTyp_Msg_Mask 0x40000000 32
33#define EvTyp_StateChange_Mask 0x01000000 33#define EVTYP_OPCMD_MASK 0x80000000
34#define EvTyp_PMsgCmd_Mask 0x00800000 34#define EVTYP_MSG_MASK 0x40000000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001 35#define EVTYP_STATECHANGE_MASK 0x01000000
36#define EvTyp_CtlProgIdent_Mask 0x00200000 36#define EVTYP_PMSGCMD_MASK 0x00800000
37#define EvTyp_SigQuiesce_Mask 0x00000008 37#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
38#define EvTyp_VT220Msg_Mask 0x00000040 38#define EVTYP_CTLPROGIDENT_MASK 0x00200000
39 39#define EVTYP_SIGQUIESCE_MASK 0x00000008
40#define GnrlMsgFlgs_DOM 0x8000 40#define EVTYP_VT220MSG_MASK 0x00000040
41#define GnrlMsgFlgs_SndAlrm 0x4000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
42#define GnrlMsgFlgs_HoldMsg 0x2000 42#define EVTYP_SDIAS_MASK 0x00000010
43 43
44#define LnTpFlgs_CntlText 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
45#define LnTpFlgs_LabelText 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
46#define LnTpFlgs_DataText 0x2000 46#define GNRLMSGFLGS_HOLDMSG 0x2000
47#define LnTpFlgs_EndText 0x1000 47
48#define LnTpFlgs_PromptText 0x0800 48#define LNTPFLGS_CNTLTEXT 0x8000
49#define LNTPFLGS_LABELTEXT 0x4000
50#define LNTPFLGS_DATATEXT 0x2000
51#define LNTPFLGS_ENDTEXT 0x1000
52#define LNTPFLGS_PROMPTTEXT 0x0800
49 53
50typedef unsigned int sclp_cmdw_t; 54typedef unsigned int sclp_cmdw_t;
51 55
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
57 61
58#define GDS_ID_MDSMU 0x1310 62#define GDS_ID_MDSMU 0x1310
59#define GDS_ID_MDSRouteInfo 0x1311 63#define GDS_ID_MDSROUTEINFO 0x1311
60#define GDS_ID_AgUnWrkCorr 0x1549 64#define GDS_ID_AGUNWRKCORR 0x1549
61#define GDS_ID_SNACondReport 0x1532 65#define GDS_ID_SNACONDREPORT 0x1532
62#define GDS_ID_CPMSU 0x1212 66#define GDS_ID_CPMSU 0x1212
63#define GDS_ID_RoutTargInstr 0x154D 67#define GDS_ID_ROUTTARGINSTR 0x154D
64#define GDS_ID_OpReq 0x8070 68#define GDS_ID_OPREQ 0x8070
65#define GDS_ID_TextCmd 0x1320 69#define GDS_ID_TEXTCMD 0x1320
66 70
67#define GDS_KEY_SelfDefTextMsg 0x31 71#define GDS_KEY_SELFDEFTEXTMSG 0x31
68 72
69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 73typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
70 74
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644
index 000000000000..a66b914519b5
--- /dev/null
+++ b/drivers/s390/char/sclp_chp.c
@@ -0,0 +1,196 @@
1/*
2 * drivers/s390/char/sclp_chp.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/errno.h>
11#include <linux/completion.h>
12#include <asm/sclp.h>
13#include <asm/chpid.h>
14
15#include "sclp.h"
16
17#define TAG "sclp_chp: "
18
19#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
20#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
21#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
22
23static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
24{
25 return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
26}
27
28static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
29{
30 return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
31}
32
33static void chp_callback(struct sclp_req *req, void *data)
34{
35 struct completion *completion = data;
36
37 complete(completion);
38}
39
40struct chp_cfg_sccb {
41 struct sccb_header header;
42 u8 ccm;
43 u8 reserved[6];
44 u8 cssid;
45} __attribute__((packed));
46
47struct chp_cfg_data {
48 struct chp_cfg_sccb sccb;
49 struct sclp_req req;
50 struct completion completion;
51} __attribute__((packed));
52
53static int do_configure(sclp_cmdw_t cmd)
54{
55 struct chp_cfg_data *data;
56 int rc;
57
58 /* Prepare sccb. */
59 data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
60 if (!data)
61 return -ENOMEM;
62 data->sccb.header.length = sizeof(struct chp_cfg_sccb);
63 data->req.command = cmd;
64 data->req.sccb = &(data->sccb);
65 data->req.status = SCLP_REQ_FILLED;
66 data->req.callback = chp_callback;
67 data->req.callback_data = &(data->completion);
68 init_completion(&data->completion);
69
70 /* Perform sclp request. */
71 rc = sclp_add_request(&(data->req));
72 if (rc)
73 goto out;
74 wait_for_completion(&data->completion);
75
76 /* Check response .*/
77 if (data->req.status != SCLP_REQ_DONE) {
78 printk(KERN_WARNING TAG "configure channel-path request failed "
79 "(status=0x%02x)\n", data->req.status);
80 rc = -EIO;
81 goto out;
82 }
83 switch (data->sccb.header.response_code) {
84 case 0x0020:
85 case 0x0120:
86 case 0x0440:
87 case 0x0450:
88 break;
89 default:
90 printk(KERN_WARNING TAG "configure channel-path failed "
91 "(cmd=0x%08x, response=0x%04x)\n", cmd,
92 data->sccb.header.response_code);
93 rc = -EIO;
94 break;
95 }
96out:
97 free_page((unsigned long) data);
98
99 return rc;
100}
101
102/**
103 * sclp_chp_configure - perform configure channel-path sclp command
104 * @chpid: channel-path ID
105 *
106 * Perform configure channel-path command sclp command for specified chpid.
107 * Return 0 after command successfully finished, non-zero otherwise.
108 */
109int sclp_chp_configure(struct chp_id chpid)
110{
111 return do_configure(get_configure_cmdw(chpid));
112}
113
114/**
115 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
116 * @chpid: channel-path ID
117 *
118 * Perform deconfigure channel-path command sclp command for specified chpid
119 * and wait for completion. On success return 0. Return non-zero otherwise.
120 */
121int sclp_chp_deconfigure(struct chp_id chpid)
122{
123 return do_configure(get_deconfigure_cmdw(chpid));
124}
125
126struct chp_info_sccb {
127 struct sccb_header header;
128 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
129 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
130 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
131 u8 ccm;
132 u8 reserved[6];
133 u8 cssid;
134} __attribute__((packed));
135
136struct chp_info_data {
137 struct chp_info_sccb sccb;
138 struct sclp_req req;
139 struct completion completion;
140} __attribute__((packed));
141
142/**
143 * sclp_chp_read_info - perform read channel-path information sclp command
144 * @info: resulting channel-path information data
145 *
146 * Perform read channel-path information sclp command and wait for completion.
147 * On success, store channel-path information in @info and return 0. Return
148 * non-zero otherwise.
149 */
150int sclp_chp_read_info(struct sclp_chp_info *info)
151{
152 struct chp_info_data *data;
153 int rc;
154
155 /* Prepare sccb. */
156 data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
157 if (!data)
158 return -ENOMEM;
159 data->sccb.header.length = sizeof(struct chp_info_sccb);
160 data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
161 data->req.sccb = &(data->sccb);
162 data->req.status = SCLP_REQ_FILLED;
163 data->req.callback = chp_callback;
164 data->req.callback_data = &(data->completion);
165 init_completion(&data->completion);
166
167 /* Perform sclp request. */
168 rc = sclp_add_request(&(data->req));
169 if (rc)
170 goto out;
171 wait_for_completion(&data->completion);
172
173 /* Check response .*/
174 if (data->req.status != SCLP_REQ_DONE) {
175 printk(KERN_WARNING TAG "read channel-path info request failed "
176 "(status=0x%02x)\n", data->req.status);
177 rc = -EIO;
178 goto out;
179 }
180 if (data->sccb.header.response_code != 0x0010) {
181 printk(KERN_WARNING TAG "read channel-path info failed "
182 "(response=0x%04x)\n", data->sccb.header.response_code);
183 rc = -EIO;
184 goto out;
185 }
186 memcpy(info->recognized, data->sccb.recognized,
187 SCLP_CHP_INFO_MASK_SIZE);
188 memcpy(info->standby, data->sccb.standby,
189 SCLP_CHP_INFO_MASK_SIZE);
190 memcpy(info->configured, data->sccb.configured,
191 SCLP_CHP_INFO_MASK_SIZE);
192out:
193 free_page((unsigned long) data);
194
195 return rc;
196}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 000000000000..5322e5e54a98
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/sclp_config.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/cpu.h>
11#include <linux/sysdev.h>
12#include <linux/workqueue.h>
13#include "sclp.h"
14
15#define TAG "sclp_config: "
16
17struct conf_mgm_data {
18 u8 reserved;
19 u8 ev_qualifier;
20} __attribute__((packed));
21
22#define EV_QUAL_CAP_CHANGE 3
23
24static struct work_struct sclp_cpu_capability_work;
25
26static void sclp_cpu_capability_notify(struct work_struct *work)
27{
28 int cpu;
29 struct sys_device *sysdev;
30
31 printk(KERN_WARNING TAG "cpu capability changed.\n");
32 lock_cpu_hotplug();
33 for_each_online_cpu(cpu) {
34 sysdev = get_cpu_sysdev(cpu);
35 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
36 }
37 unlock_cpu_hotplug();
38}
39
40static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
41{
42 struct conf_mgm_data *cdata;
43
44 cdata = (struct conf_mgm_data *)(evbuf + 1);
45 if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
46 schedule_work(&sclp_cpu_capability_work);
47}
48
49static struct sclp_register sclp_conf_register =
50{
51 .receive_mask = EVTYP_CONFMGMDATA_MASK,
52 .receiver_fn = sclp_conf_receiver_fn,
53};
54
55static int __init sclp_conf_init(void)
56{
57 int rc;
58
59 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
60
61 rc = sclp_register(&sclp_conf_register);
62 if (rc) {
63 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
64 return rc;
65 }
66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS;
71 }
72 return rc;
73}
74
75__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 65aa2c85737f..29fe2a5ec2fe 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -46,7 +46,7 @@ struct cpi_sccb {
46/* Event type structure for write message and write priority message */ 46/* Event type structure for write message and write priority message */
47static struct sclp_register sclp_cpi_event = 47static struct sclp_register sclp_cpi_event =
48{ 48{
49 .send_mask = EvTyp_CtlProgIdent_Mask 49 .send_mask = EVTYP_CTLPROGIDENT_MASK
50}; 50};
51 51
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
201 "console.\n"); 201 "console.\n");
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { 204 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
205 printk(KERN_WARNING "cpi: no control program identification " 205 printk(KERN_WARNING "cpi: no control program identification "
206 "support\n"); 206 "support\n");
207 sclp_unregister(&sclp_cpi_event); 207 sclp_unregister(&sclp_cpi_event);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index baa8fe669ed2..45ff25e787cb 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
43} 43}
44 44
45static struct sclp_register sclp_quiesce_event = { 45static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EvTyp_SigQuiesce_Mask, 46 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 47 .receiver_fn = sclp_quiesce_handler
48}; 48};
49 49
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 2486783ea58e..bbd5b8b66f42 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -30,7 +30,7 @@
30 30
31/* Event type structure for write message and write priority message */ 31/* Event type structure for write message and write priority message */
32static struct sclp_register sclp_rw_event = { 32static struct sclp_register sclp_rw_event = {
33 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask 33 .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
34}; 34};
35 35
36/* 36/*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
64 memset(sccb, 0, sizeof(struct write_sccb)); 64 memset(sccb, 0, sizeof(struct write_sccb));
65 sccb->header.length = sizeof(struct write_sccb); 65 sccb->header.length = sizeof(struct write_sccb);
66 sccb->msg_buf.header.length = sizeof(struct msg_buf); 66 sccb->msg_buf.header.length = sizeof(struct msg_buf);
67 sccb->msg_buf.header.type = EvTyp_Msg; 67 sccb->msg_buf.header.type = EVTYP_MSG;
68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb); 68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
69 sccb->msg_buf.mdb.header.type = 1; 69 sccb->msg_buf.mdb.header.type = 1;
70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
114 memset(mto, 0, sizeof(struct mto)); 114 memset(mto, 0, sizeof(struct mto));
115 mto->length = sizeof(struct mto); 115 mto->length = sizeof(struct mto);
116 mto->type = 4; /* message text object */ 116 mto->type = 4; /* message text object */
117 mto->line_type_flags = LnTpFlgs_EndText; /* end text */ 117 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
118 118
119 /* set pointer to first byte after struct mto. */ 119 /* set pointer to first byte after struct mto. */
120 buffer->current_line = (char *) (mto + 1); 120 buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
215 case '\a': /* bell, one for several times */ 215 case '\a': /* bell, one for several times */
216 /* set SCLP sound alarm bit in General Object */ 216 /* set SCLP sound alarm bit in General Object */
217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
218 GnrlMsgFlgs_SndAlrm; 218 GNRLMSGFLGS_SNDALRM;
219 break; 219 break;
220 case '\t': /* horizontal tabulator */ 220 case '\t': /* horizontal tabulator */
221 /* check if new mto needs to be created */ 221 /* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) 455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EvTyp_Msg; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) 458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 000000000000..52283daddaef
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,255 @@
1/*
2 * Sclp "store data in absolut storage"
3 *
4 * Copyright IBM Corp. 2003,2007
5 * Author(s): Michael Holzheu
6 */
7
8#include <linux/sched.h>
9#include <asm/sclp.h>
10#include <asm/debug.h>
11#include <asm/ipl.h>
12#include "sclp.h"
13#include "sclp_rw.h"
14
15#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
16#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
17
18#define SDIAS_RETRIES 300
19#define SDIAS_SLEEP_TICKS 50
20
21#define EQ_STORE_DATA 0x0
22#define EQ_SIZE 0x1
23#define DI_FCP_DUMP 0x0
24#define ASA_SIZE_32 0x0
25#define ASA_SIZE_64 0x1
26#define EVSTATE_ALL_STORED 0x0
27#define EVSTATE_NO_DATA 0x3
28#define EVSTATE_PART_STORED 0x10
29
30static struct debug_info *sdias_dbf;
31
32static struct sclp_register sclp_sdias_register = {
33 .send_mask = EVTYP_SDIAS_MASK,
34};
35
36struct sdias_evbuf {
37 struct evbuf_header hdr;
38 u8 event_qual;
39 u8 data_id;
40 u64 reserved2;
41 u32 event_id;
42 u16 reserved3;
43 u8 asa_size;
44 u8 event_status;
45 u32 reserved4;
46 u32 blk_cnt;
47 u64 asa;
48 u32 reserved5;
49 u32 fbn;
50 u32 reserved6;
51 u32 lbn;
52 u16 reserved7;
53 u16 dbs;
54} __attribute__((packed));
55
56struct sdias_sccb {
57 struct sccb_header hdr;
58 struct sdias_evbuf evbuf;
59} __attribute__((packed));
60
61static struct sdias_sccb sccb __attribute__((aligned(4096)));
62
63static int sclp_req_done;
64static wait_queue_head_t sdias_wq;
65static DEFINE_MUTEX(sdias_mutex);
66
67static void sdias_callback(struct sclp_req *request, void *data)
68{
69 struct sdias_sccb *sccb;
70
71 sccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n");
75}
76
77static int sdias_sclp_send(struct sclp_req *req)
78{
79 int retries;
80 int rc;
81
82 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
84 TRACE("add request\n");
85 rc = sclp_add_request(req);
86 if (rc) {
87 /* not initiated, wait some time and retry */
88 set_current_state(TASK_INTERRUPTIBLE);
89 TRACE("add request failed: rc = %i\n",rc);
90 schedule_timeout(SDIAS_SLEEP_TICKS);
91 continue;
92 }
93 /* initiated, wait for completion of service call */
94 wait_event(sdias_wq, (sclp_req_done == 1));
95 if (req->status == SCLP_REQ_FAILED) {
96 TRACE("sclp request failed\n");
97 rc = -EIO;
98 continue;
99 }
100 TRACE("request done\n");
101 break;
102 }
103 return rc;
104}
105
106/*
107 * Get number of blocks (4K) available in the HSA
108 */
109int sclp_sdias_blk_count(void)
110{
111 struct sclp_req request;
112 int rc;
113
114 mutex_lock(&sdias_mutex);
115
116 memset(&sccb, 0, sizeof(sccb));
117 memset(&request, 0, sizeof(request));
118
119 sccb.hdr.length = sizeof(sccb);
120 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
121 sccb.evbuf.hdr.type = EVTYP_SDIAS;
122 sccb.evbuf.event_qual = EQ_SIZE;
123 sccb.evbuf.data_id = DI_FCP_DUMP;
124 sccb.evbuf.event_id = 4712;
125 sccb.evbuf.dbs = 1;
126
127 request.sccb = &sccb;
128 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
129 request.status = SCLP_REQ_FILLED;
130 request.callback = sdias_callback;
131
132 rc = sdias_sclp_send(&request);
133 if (rc) {
134 ERROR_MSG("sclp_send failed for get_nr_blocks\n");
135 goto out;
136 }
137 if (sccb.hdr.response_code != 0x0020) {
138 TRACE("send failed: %x\n", sccb.hdr.response_code);
139 rc = -EIO;
140 goto out;
141 }
142
143 switch (sccb.evbuf.event_status) {
144 case 0:
145 rc = sccb.evbuf.blk_cnt;
146 break;
147 default:
148 ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
149 rc = -EIO;
150 goto out;
151 }
152 TRACE("%i blocks\n", rc);
153out:
154 mutex_unlock(&sdias_mutex);
155 return rc;
156}
157
158/*
159 * Copy from HSA to absolute storage (not reentrant):
160 *
161 * @dest : Address of buffer where data should be copied
162 * @start_blk: Start Block (beginning with 1)
163 * @nr_blks : Number of 4K blocks to copy
164 *
165 * Return Value: 0 : Requested 'number' of blocks of data copied
166 * <0: ERROR - negative event status
167 */
168int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
169{
170 struct sclp_req request;
171 int rc;
172
173 mutex_lock(&sdias_mutex);
174
175 memset(&sccb, 0, sizeof(sccb));
176 memset(&request, 0, sizeof(request));
177
178 sccb.hdr.length = sizeof(sccb);
179 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
180 sccb.evbuf.hdr.type = EVTYP_SDIAS;
181 sccb.evbuf.hdr.flags = 0;
182 sccb.evbuf.event_qual = EQ_STORE_DATA;
183 sccb.evbuf.data_id = DI_FCP_DUMP;
184 sccb.evbuf.event_id = 4712;
185#ifdef __s390x__
186 sccb.evbuf.asa_size = ASA_SIZE_64;
187#else
188 sccb.evbuf.asa_size = ASA_SIZE_32;
189#endif
190 sccb.evbuf.event_status = 0;
191 sccb.evbuf.blk_cnt = nr_blks;
192 sccb.evbuf.asa = (unsigned long)dest;
193 sccb.evbuf.fbn = start_blk;
194 sccb.evbuf.lbn = 0;
195 sccb.evbuf.dbs = 1;
196
197 request.sccb = &sccb;
198 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
199 request.status = SCLP_REQ_FILLED;
200 request.callback = sdias_callback;
201
202 rc = sdias_sclp_send(&request);
203 if (rc) {
204 ERROR_MSG("sclp_send failed: %x\n", rc);
205 goto out;
206 }
207 if (sccb.hdr.response_code != 0x0020) {
208 TRACE("copy failed: %x\n", sccb.hdr.response_code);
209 rc = -EIO;
210 goto out;
211 }
212
213 switch (sccb.evbuf.event_status) {
214 case EVSTATE_ALL_STORED:
215 TRACE("all stored\n");
216 case EVSTATE_PART_STORED:
217 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
218 break;
219 case EVSTATE_NO_DATA:
220 TRACE("no data\n");
221 default:
222 ERROR_MSG("Error from SCLP while copying hsa. "
223 "Event status = %x\n",
224 sccb.evbuf.event_status);
225 rc = -EIO;
226 }
227out:
228 mutex_unlock(&sdias_mutex);
229 return rc;
230}
231
232int __init sdias_init(void)
233{
234 int rc;
235
236 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
237 return 0;
238 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) {
243 ERROR_MSG("sclp register failed\n");
244 return rc;
245 }
246 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n");
248 return 0;
249}
250
251void __exit sdias_exit(void)
252{
253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register);
255}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 076816b9d528..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
648 subvec = start; 648 subvec = start;
649 while (subvec < end) { 649 while (subvec < end) {
650 subvec = find_gds_subvector(subvec, end, 650 subvec = find_gds_subvector(subvec, end,
651 GDS_KEY_SelfDefTextMsg); 651 GDS_KEY_SELFDEFTEXTMSG);
652 if (!subvec) 652 if (!subvec)
653 break; 653 break;
654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), 654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
664 664
665 vec = start; 665 vec = start;
666 while (vec < end) { 666 while (vec < end) {
667 vec = find_gds_vector(vec, end, GDS_ID_TextCmd); 667 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
668 if (!vec) 668 if (!vec)
669 break; 669 break;
670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1), 670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
703 703
704static struct sclp_register sclp_input_event = 704static struct sclp_register sclp_input_event =
705{ 705{
706 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, 706 .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
707 .state_change_fn = sclp_tty_state_change, 707 .state_change_fn = sclp_tty_state_change,
708 .receiver_fn = sclp_tty_receiver 708 .receiver_fn = sclp_tty_receiver
709}; 709};
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f77dc33b5f8d..726334757bbf 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
99 99
100/* Registration structure for our interest in SCLP event buffers */ 100/* Registration structure for our interest in SCLP event buffers */
101static struct sclp_register sclp_vt220_register = { 101static struct sclp_register sclp_vt220_register = {
102 .send_mask = EvTyp_VT220Msg_Mask, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EvTyp_VT220Msg_Mask, 103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL, 104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn 105 .receiver_fn = sclp_vt220_receiver_fn
106}; 106};
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { 205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
284 sccb->header.length = sizeof(struct sclp_vt220_sccb); 284 sccb->header.length = sizeof(struct sclp_vt220_sccb);
285 sccb->header.function_code = SCLP_NORMAL_WRITE; 285 sccb->header.function_code = SCLP_NORMAL_WRITE;
286 sccb->header.response_code = 0x0000; 286 sccb->header.response_code = 0x0000;
287 sccb->evbuf.type = EvTyp_VT220Msg; 287 sccb->evbuf.type = EVTYP_VT220MSG;
288 sccb->evbuf.length = sizeof(struct evbuf_header); 288 sccb->evbuf.length = sizeof(struct evbuf_header);
289 289
290 return request; 290 return request;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b87d3b019936..a5a00e9ae4d0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
125 .recording_name = "EREP", 125 .recording_name = "EREP",
126 .minor_num = 0, 126 .minor_num = 0,
127 .buffer_free = 1, 127 .buffer_free = 1,
128 .priv_lock = SPIN_LOCK_UNLOCKED, 128 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
129 .autorecording = 1, 129 .autorecording = 1,
130 .autopurge = 1, 130 .autopurge = 1,
131 }, 131 },
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
134 .recording_name = "ACCOUNT", 134 .recording_name = "ACCOUNT",
135 .minor_num = 1, 135 .minor_num = 1,
136 .buffer_free = 1, 136 .buffer_free = 1,
137 .priv_lock = SPIN_LOCK_UNLOCKED, 137 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
138 .autorecording = 1, 138 .autorecording = 1,
139 .autopurge = 1, 139 .autopurge = 1,
140 }, 140 },
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
143 .recording_name = "SYMPTOM", 143 .recording_name = "SYMPTOM",
144 .minor_num = 2, 144 .minor_num = 2,
145 .buffer_free = 1, 145 .buffer_free = 1,
146 .priv_lock = SPIN_LOCK_UNLOCKED, 146 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
147 .autorecording = 1, 147 .autorecording = 1,
148 .autopurge = 1, 148 .autopurge = 1,
149 } 149 }
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
385 385
386 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
387 387
388 iucv_path_sever(logptr->path, NULL);
389 kfree(logptr->path);
390 logptr->path = NULL;
388 if (logptr->autorecording) { 391 if (logptr->autorecording) {
389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 392 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
390 if (ret) 393 if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 000000000000..89d439316a53
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,651 @@
1/*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 *
8 * Copyright IBM Corp. 2003,2007
9 * Author(s): Michael Holzheu
10 */
11
12#include <linux/init.h>
13#include <linux/miscdevice.h>
14#include <linux/utsname.h>
15#include <linux/debugfs.h>
16#include <asm/ipl.h>
17#include <asm/sclp.h>
18#include <asm/setup.h>
19#include <asm/sigp.h>
20#include <asm/uaccess.h>
21#include <asm/debug.h>
22#include <asm/processor.h>
23#include <asm/irqflags.h>
24
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x )
27#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
28
29#define TO_USER 0
30#define TO_KERNEL 1
31
32enum arch_id {
33 ARCH_S390 = 0,
34 ARCH_S390X = 1,
35};
36
37/* dump system info */
38
39struct sys_info {
40 enum arch_id arch;
41 unsigned long sa_base;
42 u32 sa_size;
43 int cpu_map[NR_CPUS];
44 unsigned long mem_size;
45 union save_area lc_mask;
46};
47
48static struct sys_info sys_info;
49static struct debug_info *zcore_dbf;
50static int hsa_available;
51static struct dentry *zcore_dir;
52static struct dentry *zcore_file;
53
54/*
55 * Copy memory from HSA to kernel or user memory (not reentrant):
56 *
57 * @dest: Kernel or user buffer where memory should be copied to
58 * @src: Start address within HSA where data should be copied
59 * @count: Size of buffer, which should be copied
60 * @mode: Either TO_KERNEL or TO_USER
61 */
62static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
63{
64 int offs, blk_num;
65 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
66
67 if (count == 0)
68 return 0;
69
70 /* copy first block */
71 offs = 0;
72 if ((src % PAGE_SIZE) != 0) {
73 blk_num = src / PAGE_SIZE + 2;
74 if (sclp_sdias_copy(buf, blk_num, 1)) {
75 TRACE("sclp_sdias_copy() failed\n");
76 return -EIO;
77 }
78 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
79 if (mode == TO_USER) {
80 if (copy_to_user((__force __user void*) dest,
81 buf + (src % PAGE_SIZE), offs))
82 return -EFAULT;
83 } else
84 memcpy(dest, buf + (src % PAGE_SIZE), offs);
85 }
86 if (offs == count)
87 goto out;
88
89 /* copy middle */
90 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
91 blk_num = (src + offs) / PAGE_SIZE + 2;
92 if (sclp_sdias_copy(buf, blk_num, 1)) {
93 TRACE("sclp_sdias_copy() failed\n");
94 return -EIO;
95 }
96 if (mode == TO_USER) {
97 if (copy_to_user((__force __user void*) dest + offs,
98 buf, PAGE_SIZE))
99 return -EFAULT;
100 } else
101 memcpy(dest + offs, buf, PAGE_SIZE);
102 }
103 if (offs == count)
104 goto out;
105
106 /* copy last block */
107 blk_num = (src + offs) / PAGE_SIZE + 2;
108 if (sclp_sdias_copy(buf, blk_num, 1)) {
109 TRACE("sclp_sdias_copy() failed\n");
110 return -EIO;
111 }
112 if (mode == TO_USER) {
113 if (copy_to_user((__force __user void*) dest + offs, buf,
114 PAGE_SIZE))
115 return -EFAULT;
116 } else
117 memcpy(dest + offs, buf, count - offs);
118out:
119 return 0;
120}
121
122static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
123{
124 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
125}
126
127static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
128{
129 return memcpy_hsa(dest, src, count, TO_KERNEL);
130}
131
132static int memcpy_real(void *dest, unsigned long src, size_t count)
133{
134 unsigned long flags;
135 int rc = -EFAULT;
136 register unsigned long _dest asm("2") = (unsigned long) dest;
137 register unsigned long _len1 asm("3") = (unsigned long) count;
138 register unsigned long _src asm("4") = src;
139 register unsigned long _len2 asm("5") = (unsigned long) count;
140
141 if (count == 0)
142 return 0;
143 flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
144 asm volatile (
145 "0: mvcle %1,%2,0x0\n"
146 "1: jo 0b\n"
147 " lhi %0,0x0\n"
148 "2:\n"
149 EX_TABLE(1b,2b)
150 : "+d" (rc)
151 : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
152 : "cc", "memory");
153 __raw_local_irq_ssm(flags);
154
155 return rc;
156}
157
158static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
159{
160 static char buf[4096];
161 int offs = 0, size;
162
163 while (offs < count) {
164 size = min(sizeof(buf), count - offs);
165 if (memcpy_real(buf, src + offs, size))
166 return -EFAULT;
167 if (copy_to_user(dest + offs, buf, size))
168 return -EFAULT;
169 offs += size;
170 }
171 return 0;
172}
173
174#ifdef __s390x__
175/*
176 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
177 */
178static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
179 int cpu)
180{
181 int i;
182
183 for (i = 0; i < 16; i++) {
184 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
185 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
186 out->s390.ctrl_regs[i] =
187 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
188 }
189 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
190 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
191 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
192 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
193 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
194 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
195 out->s390.psw[1] |= 0x8; /* set bit 12 */
196 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
197 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
198 out->s390.pref_reg = in->s390x.pref_reg;
199 out->s390.timer = in->s390x.timer;
200 out->s390.clk_cmp = in->s390x.clk_cmp;
201}
202
203static void __init s390x_to_s390_save_areas(void)
204{
205 int i = 1;
206 static union save_area tmp;
207
208 while (zfcpdump_save_areas[i]) {
209 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
210 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
211 i++;
212 }
213}
214
215#endif /* __s390x__ */
216
217static int __init init_cpu_info(enum arch_id arch)
218{
219 union save_area *sa;
220
221 /* get info for boot cpu from lowcore, stored in the HSA */
222
223 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
224 if (!sa) {
225 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
226 return -ENOMEM;
227 }
228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
229 ERROR_MSG("could not copy from HSA\n");
230 kfree(sa);
231 return -EIO;
232 }
233 zfcpdump_save_areas[0] = sa;
234
235#ifdef __s390x__
236 /* convert s390x regs to s390, if we are dumping an s390 Linux */
237
238 if (arch == ARCH_S390)
239 s390x_to_s390_save_areas();
240#endif
241
242 return 0;
243}
244
245static DEFINE_MUTEX(zcore_mutex);
246
247#define DUMP_VERSION 0x3
248#define DUMP_MAGIC 0xa8190173618f23fdULL
249#define DUMP_ARCH_S390X 2
250#define DUMP_ARCH_S390 1
251#define HEADER_SIZE 4096
252
253/* dump header dumped according to s390 crash dump format */
254
255struct zcore_header {
256 u64 magic;
257 u32 version;
258 u32 header_size;
259 u32 dump_level;
260 u32 page_size;
261 u64 mem_size;
262 u64 mem_start;
263 u64 mem_end;
264 u32 num_pages;
265 u32 pad1;
266 u64 tod;
267 cpuid_t cpu_id;
268 u32 arch_id;
269 u32 build_arch;
270 char pad2[4016];
271} __attribute__((packed,__aligned__(16)));
272
273static struct zcore_header zcore_header = {
274 .magic = DUMP_MAGIC,
275 .version = DUMP_VERSION,
276 .header_size = 4096,
277 .dump_level = 0,
278 .page_size = PAGE_SIZE,
279 .mem_start = 0,
280#ifdef __s390x__
281 .build_arch = DUMP_ARCH_S390X,
282#else
283 .build_arch = DUMP_ARCH_S390,
284#endif
285};
286
287/*
288 * Copy lowcore info to buffer. Use map in order to copy only register parts.
289 *
290 * @buf: User buffer
291 * @sa: Pointer to save area
292 * @sa_off: Offset in save area to copy
293 * @len: Number of bytes to copy
294 */
295static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
296{
297 int i;
298 char *lc_mask = (char*)&sys_info.lc_mask;
299
300 for (i = 0; i < len; i++) {
301 if (!lc_mask[i + sa_off])
302 continue;
303 if (copy_to_user(buf + i, sa + sa_off + i, 1))
304 return -EFAULT;
305 }
306 return 0;
307}
308
309/*
310 * Copy lowcores info to memory, if necessary
311 *
312 * @buf: User buffer
313 * @addr: Start address of buffer in dump memory
314 * @count: Size of buffer
315 */
316static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
317{
318 unsigned long end;
319 int i = 0;
320
321 if (count == 0)
322 return 0;
323
324 end = start + count;
325 while (zfcpdump_save_areas[i]) {
326 unsigned long cp_start, cp_end; /* copy range */
327 unsigned long sa_start, sa_end; /* save area range */
328 unsigned long prefix;
329 unsigned long sa_off, len, buf_off;
330
331 if (sys_info.arch == ARCH_S390)
332 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
333 else
334 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
335
336 sa_start = prefix + sys_info.sa_base;
337 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
338
339 if ((end < sa_start) || (start > sa_end))
340 goto next;
341 cp_start = max(start, sa_start);
342 cp_end = min(end, sa_end);
343
344 buf_off = cp_start - start;
345 sa_off = cp_start - sa_start;
346 len = cp_end - cp_start;
347
348 TRACE("copy_lc for: %lx\n", start);
349 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
350 return -EFAULT;
351next:
352 i++;
353 }
354 return 0;
355}
356
357/*
358 * Read routine for zcore character device
359 * First 4K are dump header
360 * Next 32MB are HSA Memory
361 * Rest is read from absolute Memory
362 */
363static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
364 loff_t *ppos)
365{
366 unsigned long mem_start; /* Start address in memory */
367 size_t mem_offs; /* Offset in dump memory */
368 size_t hdr_count; /* Size of header part of output buffer */
369 size_t size;
370 int rc;
371
372 mutex_lock(&zcore_mutex);
373
374 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
375 rc = -EINVAL;
376 goto fail;
377 }
378
379 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
380
381 /* Copy dump header */
382 if (*ppos < HEADER_SIZE) {
383 size = min(count, (size_t) (HEADER_SIZE - *ppos));
384 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
385 rc = -EFAULT;
386 goto fail;
387 }
388 hdr_count = size;
389 mem_start = 0;
390 } else {
391 hdr_count = 0;
392 mem_start = *ppos - HEADER_SIZE;
393 }
394
395 mem_offs = 0;
396
397 /* Copy from HSA data */
398 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
399 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
400 - mem_start));
401 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
402 if (rc)
403 goto fail;
404
405 mem_offs += size;
406 }
407
408 /* Copy from real mem */
409 size = count - mem_offs - hdr_count;
410 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
411 size);
412 if (rc)
413 goto fail;
414
415 /*
416 * Since s390 dump analysis tools like lcrash or crash
417 * expect register sets in the prefix pages of the cpus,
418 * we copy them into the read buffer, if necessary.
419 * buf + hdr_count: Start of memory part of output buffer
420 * mem_start: Start memory address to copy from
421 * count - hdr_count: Size of memory area to copy
422 */
423 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
424 rc = -EFAULT;
425 goto fail;
426 }
427 *ppos += count;
428fail:
429 mutex_unlock(&zcore_mutex);
430 return (rc < 0) ? rc : count;
431}
432
433static int zcore_open(struct inode *inode, struct file *filp)
434{
435 if (!hsa_available)
436 return -ENODATA;
437 else
438 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
439}
440
441static int zcore_release(struct inode *inode, struct file *filep)
442{
443 diag308(DIAG308_REL_HSA, NULL);
444 hsa_available = 0;
445 return 0;
446}
447
448static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
449{
450 loff_t rc;
451
452 mutex_lock(&zcore_mutex);
453 switch (orig) {
454 case 0:
455 file->f_pos = offset;
456 rc = file->f_pos;
457 break;
458 case 1:
459 file->f_pos += offset;
460 rc = file->f_pos;
461 break;
462 default:
463 rc = -EINVAL;
464 }
465 mutex_unlock(&zcore_mutex);
466 return rc;
467}
468
469static struct file_operations zcore_fops = {
470 .owner = THIS_MODULE,
471 .llseek = zcore_lseek,
472 .read = zcore_read,
473 .open = zcore_open,
474 .release = zcore_release,
475};
476
477
478static void __init set_s390_lc_mask(union save_area *map)
479{
480 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
481 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
482 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
483 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
484 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
485 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
486 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
487 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
488 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
489}
490
491static void __init set_s390x_lc_mask(union save_area *map)
492{
493 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
494 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
495 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
496 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
497 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
498 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
499 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
500 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
501 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
502 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
503}
504
505/*
506 * Initialize dump globals for a given architecture
507 */
508static int __init sys_info_init(enum arch_id arch)
509{
510 switch (arch) {
511 case ARCH_S390X:
512 MSG("DETECTED 'S390X (64 bit) OS'\n");
513 sys_info.sa_base = SAVE_AREA_BASE_S390X;
514 sys_info.sa_size = sizeof(struct save_area_s390x);
515 set_s390x_lc_mask(&sys_info.lc_mask);
516 break;
517 case ARCH_S390:
518 MSG("DETECTED 'S390 (32 bit) OS'\n");
519 sys_info.sa_base = SAVE_AREA_BASE_S390;
520 sys_info.sa_size = sizeof(struct save_area_s390);
521 set_s390_lc_mask(&sys_info.lc_mask);
522 break;
523 default:
524 ERROR_MSG("unknown architecture 0x%x.\n",arch);
525 return -EINVAL;
526 }
527 sys_info.arch = arch;
528 if (init_cpu_info(arch)) {
529 ERROR_MSG("get cpu info failed\n");
530 return -ENOMEM;
531 }
532 sys_info.mem_size = real_memory_size;
533
534 return 0;
535}
536
537static int __init check_sdias(void)
538{
539 int rc, act_hsa_size;
540
541 rc = sclp_sdias_blk_count();
542 if (rc < 0) {
543 ERROR_MSG("Could not determine HSA size\n");
544 return rc;
545 }
546 act_hsa_size = (rc - 1) * PAGE_SIZE;
547 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
548 ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
549 return -EINVAL;
550 }
551 return 0;
552}
553
554static void __init zcore_header_init(int arch, struct zcore_header *hdr)
555{
556 if (arch == ARCH_S390X)
557 hdr->arch_id = DUMP_ARCH_S390X;
558 else
559 hdr->arch_id = DUMP_ARCH_S390;
560 hdr->mem_size = sys_info.mem_size;
561 hdr->mem_end = sys_info.mem_size;
562 hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
563 hdr->tod = get_clock();
564 get_cpu_id(&hdr->cpu_id);
565}
566
567extern int sdias_init(void);
568
569static int __init zcore_init(void)
570{
571 unsigned char arch;
572 int rc;
573
574 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
575 return -ENODATA;
576
577 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
578 debug_register_view(zcore_dbf, &debug_sprintf_view);
579 debug_set_level(zcore_dbf, 6);
580
581 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584
585 rc = sdias_init();
586 if (rc)
587 goto fail;
588
589 rc = check_sdias();
590 if (rc) {
591 ERROR_MSG("Dump initialization failed\n");
592 goto fail;
593 }
594
595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
596 if (rc) {
597 ERROR_MSG("sdial memcpy for arch id failed\n");
598 goto fail;
599 }
600
601#ifndef __s390x__
602 if (arch == ARCH_S390X) {
603 ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
604 rc = -EINVAL;
605 goto fail;
606 }
607#endif
608
609 rc = sys_info_init(arch);
610 if (rc) {
611 ERROR_MSG("arch init failed\n");
612 goto fail;
613 }
614
615 zcore_header_init(arch, &zcore_header);
616
617 zcore_dir = debugfs_create_dir("zcore" , NULL);
618 if (!zcore_dir) {
619 rc = -ENOMEM;
620 goto fail;
621 }
622 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
623 &zcore_fops);
624 if (!zcore_file) {
625 debugfs_remove(zcore_dir);
626 rc = -ENOMEM;
627 goto fail;
628 }
629 hsa_available = 1;
630 return 0;
631
632fail:
633 diag308(DIAG308_REL_HSA, NULL);
634 return rc;
635}
636
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void)
640{
641 debug_unregister(zcore_dbf);
642 sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL);
644}
645
646MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
647MODULE_DESCRIPTION("zcore module for zfcpdump support");
648MODULE_LICENSE("GPL");
649
650subsys_initcall(zcore_init);
651module_exit(zcore_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index c490c2a1c2fc..cfaf77b320f5 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 8obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5aeb68e732b0..e5ccda63e883 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75{ 75{
76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
77 77
78 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 79 __ccwgroup_remove_symlinks(gdev);
79 device_unregister(dev); 80 device_unregister(dev);
81 mutex_unlock(&gdev->reg_mutex);
80} 82}
81 83
82static ssize_t 84static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
173 return -ENOMEM; 175 return -ENOMEM;
174 176
175 atomic_set(&gdev->onoff, 0); 177 atomic_set(&gdev->onoff, 0);
176 178 mutex_init(&gdev->reg_mutex);
179 mutex_lock(&gdev->reg_mutex);
177 for (i = 0; i < argc; i++) { 180 for (i = 0; i < argc; i++) {
178 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 181 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
179 182
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
183 || gdev->cdev[i]->id.driver_info != 186 || gdev->cdev[i]->id.driver_info !=
184 gdev->cdev[0]->id.driver_info) { 187 gdev->cdev[0]->id.driver_info) {
185 rc = -EINVAL; 188 rc = -EINVAL;
186 goto free_dev; 189 goto error;
187 } 190 }
188 /* Don't allow a device to belong to more than one group. */ 191 /* Don't allow a device to belong to more than one group. */
189 if (gdev->cdev[i]->dev.driver_data) { 192 if (gdev->cdev[i]->dev.driver_data) {
190 rc = -EINVAL; 193 rc = -EINVAL;
191 goto free_dev; 194 goto error;
192 } 195 }
193 gdev->cdev[i]->dev.driver_data = gdev; 196 gdev->cdev[i]->dev.driver_data = gdev;
194 } 197 }
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
203 gdev->cdev[0]->dev.bus_id); 206 gdev->cdev[0]->dev.bus_id);
204 207
205 rc = device_register(&gdev->dev); 208 rc = device_register(&gdev->dev);
206
207 if (rc) 209 if (rc)
208 goto free_dev; 210 goto error;
209 get_device(&gdev->dev); 211 get_device(&gdev->dev);
210 rc = device_create_file(&gdev->dev, &dev_attr_ungroup); 212 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
211 213
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root,
216 218
217 rc = __ccwgroup_create_symlinks(gdev); 219 rc = __ccwgroup_create_symlinks(gdev);
218 if (!rc) { 220 if (!rc) {
221 mutex_unlock(&gdev->reg_mutex);
219 put_device(&gdev->dev); 222 put_device(&gdev->dev);
220 return 0; 223 return 0;
221 } 224 }
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root,
224error: 227error:
225 for (i = 0; i < argc; i++) 228 for (i = 0; i < argc; i++)
226 if (gdev->cdev[i]) { 229 if (gdev->cdev[i]) {
227 put_device(&gdev->cdev[i]->dev);
228 gdev->cdev[i]->dev.driver_data = NULL;
229 }
230 put_device(&gdev->dev);
231 return rc;
232free_dev:
233 for (i = 0; i < argc; i++)
234 if (gdev->cdev[i]) {
235 if (gdev->cdev[i]->dev.driver_data == gdev) 230 if (gdev->cdev[i]->dev.driver_data == gdev)
236 gdev->cdev[i]->dev.driver_data = NULL; 231 gdev->cdev[i]->dev.driver_data = NULL;
237 put_device(&gdev->cdev[i]->dev); 232 put_device(&gdev->cdev[i]->dev);
238 } 233 }
239 kfree(gdev); 234 mutex_unlock(&gdev->reg_mutex);
235 put_device(&gdev->dev);
240 return rc; 236 return rc;
241} 237}
242 238
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
422 get_driver(&cdriver->driver); 418 get_driver(&cdriver->driver);
423 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, 419 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
424 __ccwgroup_match_all))) { 420 __ccwgroup_match_all))) {
425 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); 421 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
422
423 mutex_lock(&gdev->reg_mutex);
424 __ccwgroup_remove_symlinks(gdev);
426 device_unregister(dev); 425 device_unregister(dev);
426 mutex_unlock(&gdev->reg_mutex);
427 put_device(dev); 427 put_device(dev);
428 } 428 }
429 put_driver(&cdriver->driver); 429 put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
444 if (cdev->dev.driver_data) { 444 if (cdev->dev.driver_data) {
445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
446 if (get_device(&gdev->dev)) { 446 if (get_device(&gdev->dev)) {
447 mutex_lock(&gdev->reg_mutex);
447 if (device_is_registered(&gdev->dev)) 448 if (device_is_registered(&gdev->dev))
448 return gdev; 449 return gdev;
450 mutex_unlock(&gdev->reg_mutex);
449 put_device(&gdev->dev); 451 put_device(&gdev->dev);
450 } 452 }
451 return NULL; 453 return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
465 if (gdev) { 467 if (gdev) {
466 __ccwgroup_remove_symlinks(gdev); 468 __ccwgroup_remove_symlinks(gdev);
467 device_unregister(&gdev->dev); 469 device_unregister(&gdev->dev);
470 mutex_unlock(&gdev->reg_mutex);
468 put_device(&gdev->dev); 471 put_device(&gdev->dev);
469 } 472 }
470} 473}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 000000000000..ac289e6eadfe
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,683 @@
1/*
2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/bug.h>
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/jiffies.h>
15#include <linux/wait.h>
16#include <linux/mutex.h>
17#include <asm/errno.h>
18#include <asm/chpid.h>
19#include <asm/sclp.h>
20
21#include "cio.h"
22#include "css.h"
23#include "ioasm.h"
24#include "cio_debug.h"
25#include "chp.h"
26
27#define to_channelpath(device) container_of(device, struct channel_path, dev)
28#define CHP_INFO_UPDATE_INTERVAL 1*HZ
29
30enum cfg_task_t {
31 cfg_none,
32 cfg_configure,
33 cfg_deconfigure
34};
35
36/* Map for pending configure tasks. */
37static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
38static DEFINE_MUTEX(cfg_lock);
39static int cfg_busy;
40
41/* Map for channel-path status. */
42static struct sclp_chp_info chp_info;
43static DEFINE_MUTEX(info_lock);
44
45/* Time after which channel-path status may be outdated. */
46static unsigned long chp_info_expires;
47
48/* Workqueue to perform pending configure tasks. */
49static struct workqueue_struct *chp_wq;
50static struct work_struct cfg_work;
51
52/* Wait queue for configure completion events. */
53static wait_queue_head_t cfg_wait_queue;
54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return css[chpid.cssid]->chps[chpid.id];
59}
60
61/* Set vary state for given chpid. */
62static void set_chp_logically_online(struct chp_id chpid, int onoff)
63{
64 chpid_to_chp(chpid)->state = onoff;
65}
66
67/* On succes return 0 if channel-path is varied offline, 1 if it is varied
68 * online. Return -ENODEV if channel-path is not registered. */
69int chp_get_status(struct chp_id chpid)
70{
71 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
72}
73
74/**
75 * chp_get_sch_opm - return opm for subchannel
76 * @sch: subchannel
77 *
78 * Calculate and return the operational path mask (opm) based on the chpids
79 * used by the subchannel and the status of the associated channel-paths.
80 */
81u8 chp_get_sch_opm(struct subchannel *sch)
82{
83 struct chp_id chpid;
84 int opm;
85 int i;
86
87 opm = 0;
88 chp_id_init(&chpid);
89 for (i=0; i < 8; i++) {
90 opm <<= 1;
91 chpid.id = sch->schib.pmcw.chpid[i];
92 if (chp_get_status(chpid) != 0)
93 opm |= 1;
94 }
95 return opm;
96}
97
98/**
99 * chp_is_registered - check if a channel-path is registered
100 * @chpid: channel-path ID
101 *
102 * Return non-zero if a channel-path with the given chpid is registered,
103 * zero otherwise.
104 */
105int chp_is_registered(struct chp_id chpid)
106{
107 return chpid_to_chp(chpid) != NULL;
108}
109
110/*
111 * Function: s390_vary_chpid
112 * Varies the specified chpid online or offline
113 */
114static int s390_vary_chpid(struct chp_id chpid, int on)
115{
116 char dbf_text[15];
117 int status;
118
119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
120 chpid.id);
121 CIO_TRACE_EVENT( 2, dbf_text);
122
123 status = chp_get_status(chpid);
124 if (status < 0) {
125 printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129
130 if (!on && !status) {
131 printk(KERN_ERR "chpid %x.%02x is already offline\n",
132 chpid.cssid, chpid.id);
133 return -EINVAL;
134 }
135
136 set_chp_logically_online(chpid, on);
137 chsc_chp_vary(chpid, on);
138 return 0;
139}
140
141/*
142 * Channel measurement related functions
143 */
144static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
145 loff_t off, size_t count)
146{
147 struct channel_path *chp;
148 unsigned int size;
149
150 chp = to_channelpath(container_of(kobj, struct device, kobj));
151 if (!chp->cmg_chars)
152 return 0;
153
154 size = sizeof(struct cmg_chars);
155
156 if (off > size)
157 return 0;
158 if (off + count > size)
159 count = size - off;
160 memcpy(buf, chp->cmg_chars + off, count);
161 return count;
162}
163
164static struct bin_attribute chp_measurement_chars_attr = {
165 .attr = {
166 .name = "measurement_chars",
167 .mode = S_IRUSR,
168 .owner = THIS_MODULE,
169 },
170 .size = sizeof(struct cmg_chars),
171 .read = chp_measurement_chars_read,
172};
173
174static void chp_measurement_copy_block(struct cmg_entry *buf,
175 struct channel_subsystem *css,
176 struct chp_id chpid)
177{
178 void *area;
179 struct cmg_entry *entry, reference_buf;
180 int idx;
181
182 if (chpid.id < 128) {
183 area = css->cub_addr1;
184 idx = chpid.id;
185 } else {
186 area = css->cub_addr2;
187 idx = chpid.id - 128;
188 }
189 entry = area + (idx * sizeof(struct cmg_entry));
190 do {
191 memcpy(buf, entry, sizeof(*entry));
192 memcpy(&reference_buf, entry, sizeof(*entry));
193 } while (reference_buf.values[0] != buf->values[0]);
194}
195
196static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
197 loff_t off, size_t count)
198{
199 struct channel_path *chp;
200 struct channel_subsystem *css;
201 unsigned int size;
202
203 chp = to_channelpath(container_of(kobj, struct device, kobj));
204 css = to_css(chp->dev.parent);
205
206 size = sizeof(struct cmg_entry);
207
208 /* Only allow single reads. */
209 if (off || count < size)
210 return 0;
211 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
212 count = size;
213 return count;
214}
215
216static struct bin_attribute chp_measurement_attr = {
217 .attr = {
218 .name = "measurement",
219 .mode = S_IRUSR,
220 .owner = THIS_MODULE,
221 },
222 .size = sizeof(struct cmg_entry),
223 .read = chp_measurement_read,
224};
225
226void chp_remove_cmg_attr(struct channel_path *chp)
227{
228 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
229 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
230}
231
232int chp_add_cmg_attr(struct channel_path *chp)
233{
234 int ret;
235
236 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
237 if (ret)
238 return ret;
239 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
240 if (ret)
241 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
242 return ret;
243}
244
245/*
246 * Files for the channel path entries.
247 */
248static ssize_t chp_status_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct channel_path *chp = container_of(dev, struct channel_path, dev);
252
253 if (!chp)
254 return 0;
255 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
256 sprintf(buf, "offline\n"));
257}
258
259static ssize_t chp_status_write(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct channel_path *cp = container_of(dev, struct channel_path, dev);
264 char cmd[10];
265 int num_args;
266 int error;
267
268 num_args = sscanf(buf, "%5s", cmd);
269 if (!num_args)
270 return count;
271
272 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
273 error = s390_vary_chpid(cp->chpid, 1);
274 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
275 error = s390_vary_chpid(cp->chpid, 0);
276 else
277 error = -EINVAL;
278
279 return error < 0 ? error : count;
280
281}
282
283static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
284
285static ssize_t chp_configure_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct channel_path *cp;
289 int status;
290
291 cp = container_of(dev, struct channel_path, dev);
292 status = chp_info_get_status(cp->chpid);
293 if (status < 0)
294 return status;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n", status);
297}
298
299static int cfg_wait_idle(void);
300
301static ssize_t chp_configure_write(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 struct channel_path *cp;
306 int val;
307 char delim;
308
309 if (sscanf(buf, "%d %c", &val, &delim) != 1)
310 return -EINVAL;
311 if (val != 0 && val != 1)
312 return -EINVAL;
313 cp = container_of(dev, struct channel_path, dev);
314 chp_cfg_schedule(cp->chpid, val);
315 cfg_wait_idle();
316
317 return count;
318}
319
320static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
321
322static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
323 char *buf)
324{
325 struct channel_path *chp = container_of(dev, struct channel_path, dev);
326
327 if (!chp)
328 return 0;
329 return sprintf(buf, "%x\n", chp->desc.desc);
330}
331
332static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
333
334static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct channel_path *chp = to_channelpath(dev);
338
339 if (!chp)
340 return 0;
341 if (chp->cmg == -1) /* channel measurements not available */
342 return sprintf(buf, "unknown\n");
343 return sprintf(buf, "%x\n", chp->cmg);
344}
345
346static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
347
348static ssize_t chp_shared_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350{
351 struct channel_path *chp = to_channelpath(dev);
352
353 if (!chp)
354 return 0;
355 if (chp->shared == -1) /* channel measurements not available */
356 return sprintf(buf, "unknown\n");
357 return sprintf(buf, "%x\n", chp->shared);
358}
359
360static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
361
362static struct attribute * chp_attrs[] = {
363 &dev_attr_status.attr,
364 &dev_attr_configure.attr,
365 &dev_attr_type.attr,
366 &dev_attr_cmg.attr,
367 &dev_attr_shared.attr,
368 NULL,
369};
370
371static struct attribute_group chp_attr_group = {
372 .attrs = chp_attrs,
373};
374
375static void chp_release(struct device *dev)
376{
377 struct channel_path *cp;
378
379 cp = container_of(dev, struct channel_path, dev);
380 kfree(cp);
381}
382
383/**
384 * chp_new - register a new channel-path
385 * @chpid - channel-path ID
386 *
387 * Create and register data structure representing new channel-path. Return
388 * zero on success, non-zero otherwise.
389 */
390int chp_new(struct chp_id chpid)
391{
392 struct channel_path *chp;
393 int ret;
394
395 if (chp_is_registered(chpid))
396 return 0;
397 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
398 if (!chp)
399 return -ENOMEM;
400
401 /* fill in status, etc. */
402 chp->chpid = chpid;
403 chp->state = 1;
404 chp->dev.parent = &css[chpid.cssid]->device;
405 chp->dev.release = chp_release;
406 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
407 chpid.id);
408
409 /* Obtain channel path description and fill it in. */
410 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
411 if (ret)
412 goto out_free;
413 if ((chp->desc.flags & 0x80) == 0) {
414 ret = -ENODEV;
415 goto out_free;
416 }
417 /* Get channel-measurement characteristics. */
418 if (css_characteristics_avail && css_chsc_characteristics.scmc
419 && css_chsc_characteristics.secm) {
420 ret = chsc_get_channel_measurement_chars(chp);
421 if (ret)
422 goto out_free;
423 } else {
424 static int msg_done;
425
426 if (!msg_done) {
427 printk(KERN_WARNING "cio: Channel measurements not "
428 "available, continuing.\n");
429 msg_done = 1;
430 }
431 chp->cmg = -1;
432 }
433
434 /* make it known to the system */
435 ret = device_register(&chp->dev);
436 if (ret) {
437 printk(KERN_WARNING "%s: could not register %x.%02x\n",
438 __func__, chpid.cssid, chpid.id);
439 goto out_free;
440 }
441 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
442 if (ret) {
443 device_unregister(&chp->dev);
444 goto out_free;
445 }
446 mutex_lock(&css[chpid.cssid]->mutex);
447 if (css[chpid.cssid]->cm_enabled) {
448 ret = chp_add_cmg_attr(chp);
449 if (ret) {
450 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
451 device_unregister(&chp->dev);
452 mutex_unlock(&css[chpid.cssid]->mutex);
453 goto out_free;
454 }
455 }
456 css[chpid.cssid]->chps[chpid.id] = chp;
457 mutex_unlock(&css[chpid.cssid]->mutex);
458 return ret;
459out_free:
460 kfree(chp);
461 return ret;
462}
463
464/**
465 * chp_get_chp_desc - return newly allocated channel-path description
466 * @chpid: channel-path ID
467 *
468 * On success return a newly allocated copy of the channel-path description
469 * data associated with the given channel-path ID. Return %NULL on error.
470 */
471void *chp_get_chp_desc(struct chp_id chpid)
472{
473 struct channel_path *chp;
474 struct channel_path_desc *desc;
475
476 chp = chpid_to_chp(chpid);
477 if (!chp)
478 return NULL;
479 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
480 if (!desc)
481 return NULL;
482 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
483 return desc;
484}
485
486/**
487 * chp_process_crw - process channel-path status change
488 * @id: channel-path ID number
489 * @status: non-zero if channel-path has become available, zero otherwise
490 *
491 * Handle channel-report-words indicating that the status of a channel-path
492 * has changed.
493 */
494void chp_process_crw(int id, int status)
495{
496 struct chp_id chpid;
497
498 chp_id_init(&chpid);
499 chpid.id = id;
500 if (status) {
501 if (!chp_is_registered(chpid))
502 chp_new(chpid);
503 chsc_chp_online(chpid);
504 } else
505 chsc_chp_offline(chpid);
506}
507
508static inline int info_bit_num(struct chp_id id)
509{
510 return id.id + id.cssid * (__MAX_CHPID + 1);
511}
512
513/* Force chp_info refresh on next call to info_validate(). */
514static void info_expire(void)
515{
516 mutex_lock(&info_lock);
517 chp_info_expires = jiffies - 1;
518 mutex_unlock(&info_lock);
519}
520
521/* Ensure that chp_info is up-to-date. */
522static int info_update(void)
523{
524 int rc;
525
526 mutex_lock(&info_lock);
527 rc = 0;
528 if (time_after(jiffies, chp_info_expires)) {
529 /* Data is too old, update. */
530 rc = sclp_chp_read_info(&chp_info);
531 chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
532 }
533 mutex_unlock(&info_lock);
534
535 return rc;
536}
537
538/**
539 * chp_info_get_status - retrieve configure status of a channel-path
540 * @chpid: channel-path ID
541 *
542 * On success, return 0 for standby, 1 for configured, 2 for reserved,
543 * 3 for not recognized. Return negative error code on error.
544 */
545int chp_info_get_status(struct chp_id chpid)
546{
547 int rc;
548 int bit;
549
550 rc = info_update();
551 if (rc)
552 return rc;
553
554 bit = info_bit_num(chpid);
555 mutex_lock(&info_lock);
556 if (!chp_test_bit(chp_info.recognized, bit))
557 rc = CHP_STATUS_NOT_RECOGNIZED;
558 else if (chp_test_bit(chp_info.configured, bit))
559 rc = CHP_STATUS_CONFIGURED;
560 else if (chp_test_bit(chp_info.standby, bit))
561 rc = CHP_STATUS_STANDBY;
562 else
563 rc = CHP_STATUS_RESERVED;
564 mutex_unlock(&info_lock);
565
566 return rc;
567}
568
569/* Return configure task for chpid. */
570static enum cfg_task_t cfg_get_task(struct chp_id chpid)
571{
572 return chp_cfg_task[chpid.cssid][chpid.id];
573}
574
575/* Set configure task for chpid. */
576static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
577{
578 chp_cfg_task[chpid.cssid][chpid.id] = cfg;
579}
580
581/* Perform one configure/deconfigure request. Reschedule work function until
582 * last request. */
583static void cfg_func(struct work_struct *work)
584{
585 struct chp_id chpid;
586 enum cfg_task_t t;
587
588 mutex_lock(&cfg_lock);
589 t = cfg_none;
590 chp_id_for_each(&chpid) {
591 t = cfg_get_task(chpid);
592 if (t != cfg_none) {
593 cfg_set_task(chpid, cfg_none);
594 break;
595 }
596 }
597 mutex_unlock(&cfg_lock);
598
599 switch (t) {
600 case cfg_configure:
601 sclp_chp_configure(chpid);
602 info_expire();
603 chsc_chp_online(chpid);
604 break;
605 case cfg_deconfigure:
606 sclp_chp_deconfigure(chpid);
607 info_expire();
608 chsc_chp_offline(chpid);
609 break;
610 case cfg_none:
611 /* Get updated information after last change. */
612 info_update();
613 mutex_lock(&cfg_lock);
614 cfg_busy = 0;
615 mutex_unlock(&cfg_lock);
616 wake_up_interruptible(&cfg_wait_queue);
617 return;
618 }
619 queue_work(chp_wq, &cfg_work);
620}
621
622/**
623 * chp_cfg_schedule - schedule chpid configuration request
624 * @chpid - channel-path ID
625 * @configure - Non-zero for configure, zero for deconfigure
626 *
627 * Schedule a channel-path configuration/deconfiguration request.
628 */
629void chp_cfg_schedule(struct chp_id chpid, int configure)
630{
631 CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
632 configure);
633 mutex_lock(&cfg_lock);
634 cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
635 cfg_busy = 1;
636 mutex_unlock(&cfg_lock);
637 queue_work(chp_wq, &cfg_work);
638}
639
640/**
641 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
642 * @chpid - channel-path ID
643 *
644 * Cancel an active channel-path deconfiguration request if it has not yet
645 * been performed.
646 */
647void chp_cfg_cancel_deconfigure(struct chp_id chpid)
648{
649 CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
650 mutex_lock(&cfg_lock);
651 if (cfg_get_task(chpid) == cfg_deconfigure)
652 cfg_set_task(chpid, cfg_none);
653 mutex_unlock(&cfg_lock);
654}
655
656static int cfg_wait_idle(void)
657{
658 if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
659 return -ERESTARTSYS;
660 return 0;
661}
662
663static int __init chp_init(void)
664{
665 struct chp_id chpid;
666
667 chp_wq = create_singlethread_workqueue("cio_chp");
668 if (!chp_wq)
669 return -ENOMEM;
670 INIT_WORK(&cfg_work, cfg_func);
671 init_waitqueue_head(&cfg_wait_queue);
672 if (info_update())
673 return 0;
674 /* Register available channel-paths. */
675 chp_id_for_each(&chpid) {
676 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
677 chp_new(chpid);
678 }
679
680 return 0;
681}
682
683subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 000000000000..65286563c592
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_CHP_H
9#define S390_CHP_H S390_CHP_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/chpid.h>
14#include "chsc.h"
15
16#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3
20
21static inline int chp_test_bit(u8 *bitmap, int num)
22{
23 int byte = num >> 3;
24 int mask = 128 >> (num & 7);
25
26 return (bitmap[byte] & mask) ? 1 : 0;
27}
28
29
30struct channel_path {
31 struct chp_id chpid;
32 int state;
33 struct channel_path_desc desc;
34 /* Channel-measurement related stuff: */
35 int cmg;
36 int shared;
37 void *cmg_chars;
38 struct device dev;
39};
40
41int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid);
52
53#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e3817..ea92ac4d6577 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
15#include <linux/device.h> 15#include <linux/device.h>
16 16
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "css.h" 20#include "css.h"
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "ioasm.h" 23#include "ioasm.h"
24#include "chp.h"
23#include "chsc.h" 25#include "chsc.h"
24 26
25static void *sei_page; 27static void *sei_page;
26 28
27static int new_channel_path(int chpid); 29struct chsc_ssd_area {
28 30 struct chsc_header request;
29static inline void 31 u16 :10;
30set_chp_logically_online(int chp, int onoff) 32 u16 ssid:2;
31{ 33 u16 :4;
32 css[0]->chps[chp]->state = onoff; 34 u16 f_sch; /* first subchannel */
33} 35 u16 :16;
34 36 u16 l_sch; /* last subchannel */
35static int 37 u32 :32;
36get_chp_status(int chp) 38 struct chsc_header response;
37{ 39 u32 :32;
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 u8 sch_valid : 1;
39} 41 u8 dev_valid : 1;
40 42 u8 st : 3; /* subchannel type */
41void 43 u8 zeroes : 3;
42chsc_validate_chpids(struct subchannel *sch) 44 u8 unit_addr; /* unit address */
43{ 45 u16 devno; /* device number */
44 int mask, chp; 46 u8 path_mask;
45 47 u8 fla_valid_mask;
46 for (chp = 0; chp <= 7; chp++) { 48 u16 sch; /* subchannel */
47 mask = 0x80 >> chp; 49 u8 chpid[8]; /* chpids 0-7 */
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 u16 fla[8]; /* full link addresses 0-7 */
49 /* disable using this path */ 51} __attribute__ ((packed));
50 sch->opm &= ~mask;
51 }
52}
53
54void
55chpid_is_actually_online(int chp)
56{
57 int state;
58
59 state = get_chp_status(chp);
60 if (state < 0) {
61 need_rescan = 1;
62 queue_work(slow_path_wq, &slow_path_work);
63 } else
64 WARN_ON(!state);
65}
66 52
67/* FIXME: this is _always_ called for every subchannel. shouldn't we 53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
68 * process more than one at a time? */
69static int
70chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71{ 54{
72 int ccode, j; 55 unsigned long page;
73 56 struct chsc_ssd_area *ssd_area;
74 struct { 57 int ccode;
75 struct chsc_header request; 58 int ret;
76 u16 reserved1a:10; 59 int i;
77 u16 ssid:2; 60 int mask;
78 u16 reserved1b:4;
79 u16 f_sch; /* first subchannel */
80 u16 reserved2;
81 u16 l_sch; /* last subchannel */
82 u32 reserved3;
83 struct chsc_header response;
84 u32 reserved4;
85 u8 sch_valid : 1;
86 u8 dev_valid : 1;
87 u8 st : 3; /* subchannel type */
88 u8 zeroes : 3;
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
91 u8 path_mask;
92 u8 fla_valid_mask;
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __attribute__ ((packed)) *ssd_area;
97
98 ssd_area = page;
99 61
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
100 ssd_area->request.length = 0x0010; 66 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004; 67 ssd_area->request.code = 0x0004;
102 68 ssd_area->ssid = schid.ssid;
103 ssd_area->ssid = sch->schid.ssid; 69 ssd_area->f_sch = schid.sch_no;
104 ssd_area->f_sch = sch->schid.sch_no; 70 ssd_area->l_sch = schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
106 71
107 ccode = chsc(ssd_area); 72 ccode = chsc(ssd_area);
73 /* Check response. */
108 if (ccode > 0) { 74 if (ccode > 0) {
109 pr_debug("chsc returned with ccode = %d\n", ccode); 75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
110 return (ccode == 3) ? -ENODEV : -EBUSY; 76 goto out_free;
111 } 77 }
112 78 if (ssd_area->response.code != 0x0001) {
113 switch (ssd_area->response.code) { 79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 case 0x0001: /* everything ok */ 80 schid.ssid, schid.sch_no,
115 break;
116 case 0x0002:
117 CIO_CRW_EVENT(2, "Invalid command!\n");
118 return -EINVAL;
119 case 0x0003:
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 return -EINVAL;
122 case 0x0004:
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 return -EOPNOTSUPP;
125 default:
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code); 81 ssd_area->response.code);
128 return -EIO; 82 ret = -EIO;
83 goto out_free;
129 } 84 }
130 85 if (!ssd_area->sch_valid) {
131 /* 86 ret = -ENODEV;
132 * ssd_area->st stores the type of the detected 87 goto out_free;
133 * subchannel, with the following definitions:
134 *
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
137 * have meaning
138 * 2: Message subchannel: All fields except unit_addr
139 * have meaning
140 * 3: ADM subchannel: Only sch_val, st and sch
141 * have meaning
142 *
143 * Other types are currently undefined.
144 */
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
149 /*
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
153 */
154 return 0;
155 } else {
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
159 type[ssd_area->st]);
160
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
163 } 88 }
164 89 /* Copy data */
165 if (ssd_area->st == 0 || ssd_area->st == 2) { 90 ret = 0;
166 for (j = 0; j < 8; j++) { 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
167 if (!((0x80 >> j) & ssd_area->path_mask & 92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
168 ssd_area->fla_valid_mask)) 93 goto out_free;
169 continue; 94 ssd->path_mask = ssd_area->path_mask;
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
172 } 101 }
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
173 } 104 }
174 return 0; 105out_free:
106 free_page(page);
107 return ret;
175} 108}
176 109
177int 110static int check_for_io_on_path(struct subchannel *sch, int mask)
178css_get_ssd_info(struct subchannel *sch)
179{ 111{
180 int ret; 112 int cc;
181 void *page;
182 113
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 114 cc = stsch(sch->schid, &sch->schib);
184 if (!page) 115 if (cc)
185 return -ENOMEM; 116 return 0;
186 spin_lock_irq(sch->lock); 117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
187 ret = chsc_get_sch_desc_irq(sch, page); 118 return 1;
188 if (ret) { 119 return 0;
189 static int cio_chsc_err_msg; 120}
190 121
191 if (!cio_chsc_err_msg) { 122static void terminate_internal_io(struct subchannel *sch)
192 printk(KERN_ERR 123{
193 "chsc_get_sch_descriptions:" 124 if (cio_clear(sch)) {
194 " Error %d while doing chsc; " 125 /* Recheck device in case clear failed. */
195 "processing some machine checks may " 126 sch->lpm = 0;
196 "not work\n", ret); 127 if (device_trigger_verify(sch) != 0)
197 cio_chsc_err_msg = 1; 128 css_schedule_eval(sch->schid);
198 } 129 return;
199 }
200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page);
202 if (!ret) {
203 int j, chpid, mask;
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 mask = 0x80 >> j;
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
211 }
212 } 130 }
213 return ret; 131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev);
214} 136}
215 137
216static int 138static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
219 int j; 141 int j;
220 int mask; 142 int mask;
221 struct subchannel *sch; 143 struct subchannel *sch;
222 struct channel_path *chpid; 144 struct chp_id *chpid;
223 struct schib schib; 145 struct schib schib;
224 146
225 sch = to_subchannel(dev); 147 sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
243 if (sch->schib.pmcw.pim == 0x80) 165 if (sch->schib.pmcw.pim == 0x80)
244 goto out_unreg; 166 goto out_unreg;
245 167
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 168 if (check_for_io_on_path(sch, mask)) {
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 169 if (device_is_online(sch))
248 (sch->schib.pmcw.lpum == mask)) { 170 device_kill_io(sch);
249 int cc; 171 else {
250 172 terminate_internal_io(sch);
251 cc = cio_clear(sch); 173 /* Re-start path verification. */
252 if (cc == -ENODEV) 174 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev);
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev);
181 else if (sch->lpm == mask)
253 goto out_unreg; 182 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
256 /* Call handler. */
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
259 goto out_unlock;
260 } 183 }
261 184
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
266 goto out_unreg;
267out_unlock:
268 spin_unlock_irq(sch->lock); 185 spin_unlock_irq(sch->lock);
269 return 0; 186 return 0;
187
270out_unreg: 188out_unreg:
271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 189 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 190 spin_unlock_irq(sch->lock);
274 css_clear_subchannel_slow_list(); 191 css_schedule_eval(sch->schid);
275 need_rescan = 1;
276 }
277 return 0; 192 return 0;
278} 193}
279 194
280static void 195void chsc_chp_offline(struct chp_id chpid)
281s390_set_chpid_offline( __u8 chpid)
282{ 196{
283 char dbf_txt[15]; 197 char dbf_txt[15];
284 struct device *dev;
285 198
286 sprintf(dbf_txt, "chpr%x", chpid); 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
287 CIO_TRACE_EVENT(2, dbf_txt); 200 CIO_TRACE_EVENT(2, dbf_txt);
288 201
289 if (get_chp_status(chpid) <= 0) 202 if (chp_get_status(chpid) <= 0)
290 return; 203 return;
291 dev = get_device(&css[0]->chps[chpid]->dev); 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid); 205 s390_subchannel_remove_chpid);
294
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
297 put_device(dev);
298}
299
300struct res_acc_data {
301 struct channel_path *chp;
302 u32 fla_mask;
303 u16 fla;
304};
305
306static int
307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
308{
309 int found;
310 int chp;
311 int ccode;
312
313 found = 0;
314 for (chp = 0; chp <= 7; chp++)
315 /*
316 * check if chpid is in information updated by ssd
317 */
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
321 == res_data->fla) {
322 found = 1;
323 break;
324 }
325
326 if (found == 0)
327 return 0;
328
329 /*
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
332 * offline chpids.
333 */
334 ccode = stsch(sch->schid, &sch->schib);
335 if (ccode > 0)
336 return 0;
337
338 return 0x80 >> chp;
339} 206}
340 207
341static int 208static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 209s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 210{
344 struct schib schib; 211 struct schib schib;
345 int ret;
346 /* 212 /*
347 * We don't know the device yet, but since a path 213 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have 214 * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
353 */ 219 */
354 if (stsch_err(schid, &schib)) 220 if (stsch_err(schid, &schib))
355 /* We're through */ 221 /* We're through */
356 return need_rescan ? -EAGAIN : -ENXIO; 222 return -ENXIO;
357 223
358 /* Put it on the slow path. */ 224 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid); 225 css_schedule_eval(schid);
360 if (ret) { 226 return 0;
361 css_clear_subchannel_slow_list(); 227}
362 need_rescan = 1; 228
363 return -EAGAIN; 229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
364 } 251 }
365 return 0; 252 return 0;
366} 253}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
379 return s390_process_res_acc_new_sch(schid); 266 return s390_process_res_acc_new_sch(schid);
380 267
381 spin_lock_irq(sch->lock); 268 spin_lock_irq(sch->lock);
382 269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 270 if (chp_mask == 0)
384 271 goto out;
385 if (chp_mask == 0) { 272 if (stsch(sch->schid, &sch->schib))
386 spin_unlock_irq(sch->lock); 273 goto out;
387 put_device(&sch->dev);
388 return 0;
389 }
390 old_lpm = sch->lpm; 274 old_lpm = sch->lpm;
391 sch->lpm = ((sch->schib.pmcw.pim & 275 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam & 276 sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
396 device_trigger_reprobe(sch); 280 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify) 281 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 282 sch->driver->verify(&sch->dev);
399 283out:
400 spin_unlock_irq(sch->lock); 284 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 285 put_device(&sch->dev);
402 return 0; 286 return 0;
403} 287}
404 288
405 289static void s390_process_res_acc (struct res_acc_data *res_data)
406static int
407s390_process_res_acc (struct res_acc_data *res_data)
408{ 290{
409 int rc;
410 char dbf_txt[15]; 291 char dbf_txt[15];
411 292
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
413 CIO_TRACE_EVENT( 2, dbf_txt); 295 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) { 296 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla); 297 sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
423 * The more information we have (info), the less scanning 305 * The more information we have (info), the less scanning
424 * will we have to do. 306 * will we have to do.
425 */ 307 */
426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 308 for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
428 rc = -EAGAIN;
429 else if (rc != -EAGAIN)
430 rc = 0;
431 return rc;
432} 309}
433 310
434static int 311static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
480 /* ccdf has to be big enough for a link-incident record */ 357 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed)); 358} __attribute__ ((packed));
482 359
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{ 361{
485 int chpid; 362 struct chp_id chpid;
363 int id;
486 364
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid); 366 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4) 367 if (sei_area->rs != 4)
490 return 0; 368 return;
491 chpid = __get_chpid_from_lir(sei_area->ccdf); 369 id = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0) 370 if (id < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else 372 else {
495 s390_set_chpid_offline(chpid); 373 chp_id_init(&chpid);
496 374 chpid.id = id;
497 return 0; 375 chsc_chp_offline(chpid);
376 }
498} 377}
499 378
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
501{ 380{
502 struct res_acc_data res_data; 381 struct res_acc_data res_data;
503 struct device *dev; 382 struct chp_id chpid;
504 int status; 383 int status;
505 int rc;
506 384
507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
509 if (sei_area->rs != 4) 387 if (sei_area->rs != 4)
510 return 0; 388 return;
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
511 /* allocate a new channel path structure, if needed */ 391 /* allocate a new channel path structure, if needed */
512 status = get_chp_status(sei_area->rsid); 392 status = chp_get_status(chpid);
513 if (status < 0) 393 if (status < 0)
514 new_channel_path(sei_area->rsid); 394 chp_new(chpid);
515 else if (!status) 395 else if (!status)
516 return 0; 396 return;
517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
518 memset(&res_data, 0, sizeof(struct res_acc_data)); 397 memset(&res_data, 0, sizeof(struct res_acc_data));
519 res_data.chp = to_channelpath(dev); 398 res_data.chpid = chpid;
520 if ((sei_area->vf & 0xc0) != 0) { 399 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla; 400 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0) 401 if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
526 /* link address */ 405 /* link address */
527 res_data.fla_mask = 0xff00; 406 res_data.fla_mask = 0xff00;
528 } 407 }
529 rc = s390_process_res_acc(&res_data); 408 s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533} 409}
534 410
535static int chsc_process_sei(struct chsc_sei_area *sei_area) 411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 418{
537 int rc; 419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
425 return;
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
446}
538 447
448static void chsc_process_sei(struct chsc_sei_area *sei_area)
449{
539 /* Check if we might have lost some information. */ 450 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40) 451 if (sei_area->flags & 0x40) {
541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
454 }
542 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) { 456 switch (sei_area->cc) {
545 case 1: /* link incident*/ 457 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area); 458 chsc_process_sei_link_incident(sei_area);
547 break; 459 break;
548 case 2: /* i/o resource accessibiliy */ 460 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area); 461 chsc_process_sei_res_acc(sei_area);
462 break;
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area);
550 break; 465 break;
551 default: /* other stuff */ 466 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc); 468 sei_area->cc);
554 break; 469 break;
555 } 470 }
556
557 return rc;
558} 471}
559 472
560int chsc_process_crw(void) 473void chsc_process_crw(void)
561{ 474{
562 struct chsc_sei_area *sei_area; 475 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
565 476
566 if (!sei_page) 477 if (!sei_page)
567 return 0; 478 return;
568 /* Access to sei_page is serialized through machine check handler 479 /* Access to sei_page is serialized through machine check handler
569 * thread, so no need for locking. */ 480 * thread, so no need for locking. */
570 sei_area = sei_page; 481 sei_area = sei_page;
571 482
572 CIO_TRACE_EVENT( 2, "prcss"); 483 CIO_TRACE_EVENT( 2, "prcss");
573 ret = 0;
574 do { 484 do {
575 memset(sei_area, 0, sizeof(*sei_area)); 485 memset(sei_area, 0, sizeof(*sei_area));
576 sei_area->request.length = 0x0010; 486 sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
580 490
581 if (sei_area->response.code == 0x0001) { 491 if (sei_area->response.code == 0x0001) {
582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
583 rc = chsc_process_sei(sei_area); 493 chsc_process_sei(sei_area);
584 if (rc)
585 ret = rc;
586 } else { 494 } else {
587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
588 sei_area->response.code); 496 sei_area->response.code);
589 ret = 0;
590 break; 497 break;
591 } 498 }
592 } while (sei_area->flags & 0x80); 499 } while (sei_area->flags & 0x80);
593
594 return ret;
595} 500}
596 501
597static int 502static int
598__chp_add_new_sch(struct subchannel_id schid) 503__chp_add_new_sch(struct subchannel_id schid)
599{ 504{
600 struct schib schib; 505 struct schib schib;
601 int ret;
602 506
603 if (stsch_err(schid, &schib)) 507 if (stsch_err(schid, &schib))
604 /* We're through */ 508 /* We're through */
605 return need_rescan ? -EAGAIN : -ENXIO; 509 return -ENXIO;
606 510
607 /* Put it on the slow path. */ 511 /* Put it on the slow path. */
608 ret = css_enqueue_subchannel_slow(schid); 512 css_schedule_eval(schid);
609 if (ret) {
610 css_clear_subchannel_slow_list();
611 need_rescan = 1;
612 return -EAGAIN;
613 }
614 return 0; 513 return 0;
615} 514}
616 515
@@ -619,10 +518,10 @@ static int
619__chp_add(struct subchannel_id schid, void *data) 518__chp_add(struct subchannel_id schid, void *data)
620{ 519{
621 int i, mask; 520 int i, mask;
622 struct channel_path *chp; 521 struct chp_id *chpid;
623 struct subchannel *sch; 522 struct subchannel *sch;
624 523
625 chp = data; 524 chpid = data;
626 sch = get_subchannel_by_schid(schid); 525 sch = get_subchannel_by_schid(schid);
627 if (!sch) 526 if (!sch)
628 /* Check if the subchannel is now available. */ 527 /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
631 for (i=0; i<8; i++) { 530 for (i=0; i<8; i++) {
632 mask = 0x80 >> i; 531 mask = 0x80 >> i;
633 if ((sch->schib.pmcw.pim & mask) && 532 if ((sch->schib.pmcw.pim & mask) &&
634 (sch->schib.pmcw.chpid[i] == chp->id)) { 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
635 if (stsch(sch->schid, &sch->schib) != 0) { 534 if (stsch(sch->schid, &sch->schib) != 0) {
636 /* Endgame. */ 535 /* Endgame. */
637 spin_unlock_irq(sch->lock); 536 spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
657 return 0; 556 return 0;
658} 557}
659 558
660static int 559void chsc_chp_online(struct chp_id chpid)
661chp_add(int chpid)
662{ 560{
663 int rc;
664 char dbf_txt[15]; 561 char dbf_txt[15];
665 struct device *dev;
666 562
667 if (!get_chp_status(chpid)) 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
668 return 0; /* no need to do the rest */
669
670 sprintf(dbf_txt, "cadd%x", chpid);
671 CIO_TRACE_EVENT(2, dbf_txt); 564 CIO_TRACE_EVENT(2, dbf_txt);
672 565
673 dev = get_device(&css[0]->chps[chpid]->dev); 566 if (chp_get_status(chpid) != 0)
674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 567 for_each_subchannel(__chp_add, &chpid);
675 if (css_slow_subchannels_exist())
676 rc = -EAGAIN;
677 if (rc != -EAGAIN)
678 rc = 0;
679 put_device(dev);
680 return rc;
681} 568}
682 569
683/* 570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
684 * Handling of crw machine checks with channel path source. 571 struct chp_id chpid, int on)
685 */
686int
687chp_process_crw(int chpid, int on)
688{
689 if (on == 0) {
690 /* Path has gone. We use the link incident routine.*/
691 s390_set_chpid_offline(chpid);
692 return 0; /* De-register is async anyway. */
693 }
694 /*
695 * Path has come. Allocate a new channel path structure,
696 * if needed.
697 */
698 if (get_chp_status(chpid) < 0)
699 new_channel_path(chpid);
700 /* Avoid the extra overhead in process_rec_acc. */
701 return chp_add(chpid);
702}
703
704static int check_for_io_on_path(struct subchannel *sch, int index)
705{
706 int cc;
707
708 cc = stsch(sch->schid, &sch->schib);
709 if (cc)
710 return 0;
711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
712 return 1;
713 return 0;
714}
715
716static void terminate_internal_io(struct subchannel *sch)
717{
718 if (cio_clear(sch)) {
719 /* Recheck device in case clear failed. */
720 sch->lpm = 0;
721 if (device_trigger_verify(sch) != 0) {
722 if(css_enqueue_subchannel_slow(sch->schid)) {
723 css_clear_subchannel_slow_list();
724 need_rescan = 1;
725 }
726 }
727 return;
728 }
729 /* Request retry of internal operation. */
730 device_set_intretry(sch);
731 /* Call handler. */
732 if (sch->driver && sch->driver->termination)
733 sch->driver->termination(&sch->dev);
734}
735
736static void
737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
738{ 572{
739 int chp, old_lpm; 573 int chp, old_lpm;
574 int mask;
740 unsigned long flags; 575 unsigned long flags;
741 576
742 if (!sch->ssd_info.valid)
743 return;
744
745 spin_lock_irqsave(sch->lock, flags); 577 spin_lock_irqsave(sch->lock, flags);
746 old_lpm = sch->lpm; 578 old_lpm = sch->lpm;
747 for (chp = 0; chp < 8; chp++) { 579 for (chp = 0; chp < 8; chp++) {
748 if (sch->ssd_info.chpid[chp] != chpid) 580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
749 continue; 584 continue;
750 585
751 if (on) { 586 if (on) {
752 sch->opm |= (0x80 >> chp); 587 sch->opm |= mask;
753 sch->lpm |= (0x80 >> chp); 588 sch->lpm |= mask;
754 if (!old_lpm) 589 if (!old_lpm)
755 device_trigger_reprobe(sch); 590 device_trigger_reprobe(sch);
756 else if (sch->driver && sch->driver->verify) 591 else if (sch->driver && sch->driver->verify)
757 sch->driver->verify(&sch->dev); 592 sch->driver->verify(&sch->dev);
758 break; 593 break;
759 } 594 }
760 sch->opm &= ~(0x80 >> chp); 595 sch->opm &= ~mask;
761 sch->lpm &= ~(0x80 >> chp); 596 sch->lpm &= ~mask;
762 if (check_for_io_on_path(sch, chp)) { 597 if (check_for_io_on_path(sch, mask)) {
763 if (device_is_online(sch)) 598 if (device_is_online(sch))
764 /* Path verification is done after killing. */ 599 /* Path verification is done after killing. */
765 device_kill_io(sch); 600 device_kill_io(sch);
766 else 601 else {
767 /* Kill and retry internal I/O. */ 602 /* Kill and retry internal I/O. */
768 terminate_internal_io(sch); 603 terminate_internal_io(sch);
769 } else if (!sch->lpm) { 604 /* Re-start path verification. */
770 if (device_trigger_verify(sch) != 0) { 605 if (sch->driver && sch->driver->verify)
771 if (css_enqueue_subchannel_slow(sch->schid)) { 606 sch->driver->verify(&sch->dev);
772 css_clear_subchannel_slow_list();
773 need_rescan = 1;
774 }
775 } 607 }
608 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
776 } else if (sch->driver && sch->driver->verify) 611 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev); 612 sch->driver->verify(&sch->dev);
778 break; 613 break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
780 spin_unlock_irqrestore(sch->lock, flags); 615 spin_unlock_irqrestore(sch->lock, flags);
781} 616}
782 617
783static int 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
784s390_subchannel_vary_chpid_off(struct device *dev, void *data)
785{ 619{
786 struct subchannel *sch; 620 struct subchannel *sch;
787 __u8 *chpid; 621 struct chp_id *chpid;
788 622
789 sch = to_subchannel(dev); 623 sch = to_subchannel(dev);
790 chpid = data; 624 chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
793 return 0; 627 return 0;
794} 628}
795 629
796static int 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
797s390_subchannel_vary_chpid_on(struct device *dev, void *data)
798{ 631{
799 struct subchannel *sch; 632 struct subchannel *sch;
800 __u8 *chpid; 633 struct chp_id *chpid;
801 634
802 sch = to_subchannel(dev); 635 sch = to_subchannel(dev);
803 chpid = data; 636 chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 /* We're through */ 654 /* We're through */
822 return -ENXIO; 655 return -ENXIO;
823 /* Put it on the slow path. */ 656 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) { 657 css_schedule_eval(schid);
825 css_clear_subchannel_slow_list();
826 need_rescan = 1;
827 return -EAGAIN;
828 }
829 return 0; 658 return 0;
830} 659}
831 660
832/* 661/**
833 * Function: s390_vary_chpid 662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
834 * Varies the specified chpid online or offline 663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
835 */ 665 */
836static int 666int chsc_chp_vary(struct chp_id chpid, int on)
837s390_vary_chpid( __u8 chpid, int on)
838{ 667{
839 char dbf_text[15];
840 int status;
841
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
844
845 status = get_chp_status(chpid);
846 if (status < 0) {
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
848 return -EINVAL;
849 }
850
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
853 return -EINVAL;
854 }
855
856 set_chp_logically_online(chpid, on);
857
858 /* 668 /*
859 * Redo PathVerification on the devices the chpid connects to 669 * Redo PathVerification on the devices the chpid connects to
860 */ 670 */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
865 if (on) 675 if (on)
866 /* Scan for new devices on varied on path. */ 676 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL); 677 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
870 return 0; 678 return 0;
871} 679}
872 680
873/*
874 * Channel measurement related functions
875 */
876static ssize_t
877chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
878 size_t count)
879{
880 struct channel_path *chp;
881 unsigned int size;
882
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
884 if (!chp->cmg_chars)
885 return 0;
886
887 size = sizeof(struct cmg_chars);
888
889 if (off > size)
890 return 0;
891 if (off + count > size)
892 count = size - off;
893 memcpy(buf, chp->cmg_chars + off, count);
894 return count;
895}
896
897static struct bin_attribute chp_measurement_chars_attr = {
898 .attr = {
899 .name = "measurement_chars",
900 .mode = S_IRUSR,
901 .owner = THIS_MODULE,
902 },
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
905};
906
907static void
908chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
910{
911 void *area;
912 struct cmg_entry *entry, reference_buf;
913 int idx;
914
915 if (chpid < 128) {
916 area = css->cub_addr1;
917 idx = chpid;
918 } else {
919 area = css->cub_addr2;
920 idx = chpid - 128;
921 }
922 entry = area + (idx * sizeof(struct cmg_entry));
923 do {
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
927}
928
929static ssize_t
930chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
931{
932 struct channel_path *chp;
933 struct channel_subsystem *css;
934 unsigned int size;
935
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
938
939 size = sizeof(struct cmg_entry);
940
941 /* Only allow single reads. */
942 if (off || count < size)
943 return 0;
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
945 count = size;
946 return count;
947}
948
949static struct bin_attribute chp_measurement_attr = {
950 .attr = {
951 .name = "measurement",
952 .mode = S_IRUSR,
953 .owner = THIS_MODULE,
954 },
955 .size = sizeof(struct cmg_entry),
956 .read = chp_measurement_read,
957};
958
959static void
960chsc_remove_chp_cmg_attr(struct channel_path *chp)
961{
962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
964}
965
966static int
967chsc_add_chp_cmg_attr(struct channel_path *chp)
968{
969 int ret;
970
971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
972 if (ret)
973 return ret;
974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
975 if (ret)
976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
977 return ret;
978}
979
980static void 681static void
981chsc_remove_cmg_attr(struct channel_subsystem *css) 682chsc_remove_cmg_attr(struct channel_subsystem *css)
982{ 683{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
985 for (i = 0; i <= __MAX_CHPID; i++) { 686 for (i = 0; i <= __MAX_CHPID; i++) {
986 if (!css->chps[i]) 687 if (!css->chps[i])
987 continue; 688 continue;
988 chsc_remove_chp_cmg_attr(css->chps[i]); 689 chp_remove_cmg_attr(css->chps[i]);
989 } 690 }
990} 691}
991 692
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
998 for (i = 0; i <= __MAX_CHPID; i++) { 699 for (i = 0; i <= __MAX_CHPID; i++) {
999 if (!css->chps[i]) 700 if (!css->chps[i])
1000 continue; 701 continue;
1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 702 ret = chp_add_cmg_attr(css->chps[i]);
1002 if (ret) 703 if (ret)
1003 goto cleanup; 704 goto cleanup;
1004 } 705 }
@@ -1007,12 +708,11 @@ cleanup:
1007 for (--i; i >= 0; i--) { 708 for (--i; i >= 0; i--) {
1008 if (!css->chps[i]) 709 if (!css->chps[i])
1009 continue; 710 continue;
1010 chsc_remove_chp_cmg_attr(css->chps[i]); 711 chp_remove_cmg_attr(css->chps[i]);
1011 } 712 }
1012 return ret; 713 return ret;
1013} 714}
1014 715
1015
1016static int 716static int
1017__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1018{ 718{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
1118 } else 818 } else
1119 chsc_remove_cmg_attr(css); 819 chsc_remove_cmg_attr(css);
1120 } 820 }
1121 if (enable && !css->cm_enabled) { 821 if (!css->cm_enabled) {
1122 free_page((unsigned long)css->cub_addr1); 822 free_page((unsigned long)css->cub_addr1);
1123 free_page((unsigned long)css->cub_addr2); 823 free_page((unsigned long)css->cub_addr2);
1124 } 824 }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
1127 return ret; 827 return ret;
1128} 828}
1129 829
1130/* 830int chsc_determine_channel_path_description(struct chp_id chpid,
1131 * Files for the channel path entries. 831 struct channel_path_desc *desc)
1132 */
1133static ssize_t
1134chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1135{
1136 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1137
1138 if (!chp)
1139 return 0;
1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1141 sprintf(buf, "offline\n"));
1142}
1143
1144static ssize_t
1145chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1146{
1147 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1148 char cmd[10];
1149 int num_args;
1150 int error;
1151
1152 num_args = sscanf(buf, "%5s", cmd);
1153 if (!num_args)
1154 return count;
1155
1156 if (!strnicmp(cmd, "on", 2))
1157 error = s390_vary_chpid(cp->id, 1);
1158 else if (!strnicmp(cmd, "off", 3))
1159 error = s390_vary_chpid(cp->id, 0);
1160 else
1161 error = -EINVAL;
1162
1163 return error < 0 ? error : count;
1164
1165}
1166
1167static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1168
1169static ssize_t
1170chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1173
1174 if (!chp)
1175 return 0;
1176 return sprintf(buf, "%x\n", chp->desc.desc);
1177}
1178
1179static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1180
1181static ssize_t
1182chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1183{
1184 struct channel_path *chp = to_channelpath(dev);
1185
1186 if (!chp)
1187 return 0;
1188 if (chp->cmg == -1) /* channel measurements not available */
1189 return sprintf(buf, "unknown\n");
1190 return sprintf(buf, "%x\n", chp->cmg);
1191}
1192
1193static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1194
1195static ssize_t
1196chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1197{
1198 struct channel_path *chp = to_channelpath(dev);
1199
1200 if (!chp)
1201 return 0;
1202 if (chp->shared == -1) /* channel measurements not available */
1203 return sprintf(buf, "unknown\n");
1204 return sprintf(buf, "%x\n", chp->shared);
1205}
1206
1207static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1208
1209static struct attribute * chp_attrs[] = {
1210 &dev_attr_status.attr,
1211 &dev_attr_type.attr,
1212 &dev_attr_cmg.attr,
1213 &dev_attr_shared.attr,
1214 NULL,
1215};
1216
1217static struct attribute_group chp_attr_group = {
1218 .attrs = chp_attrs,
1219};
1220
1221static void
1222chp_release(struct device *dev)
1223{
1224 struct channel_path *cp;
1225
1226 cp = container_of(dev, struct channel_path, dev);
1227 kfree(cp);
1228}
1229
1230static int
1231chsc_determine_channel_path_description(int chpid,
1232 struct channel_path_desc *desc)
1233{ 832{
1234 int ccode, ret; 833 int ccode, ret;
1235 834
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
1252 scpd_area->request.length = 0x0010; 851 scpd_area->request.length = 0x0010;
1253 scpd_area->request.code = 0x0002; 852 scpd_area->request.code = 0x0002;
1254 853
1255 scpd_area->first_chpid = chpid; 854 scpd_area->first_chpid = chpid.id;
1256 scpd_area->last_chpid = chpid; 855 scpd_area->last_chpid = chpid.id;
1257 856
1258 ccode = chsc(scpd_area); 857 ccode = chsc(scpd_area);
1259 if (ccode > 0) { 858 if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1316 } 915 }
1317} 916}
1318 917
1319static int 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
1320chsc_get_channel_measurement_chars(struct channel_path *chp)
1321{ 919{
1322 int ccode, ret; 920 int ccode, ret;
1323 921
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1349 scmc_area->request.length = 0x0010; 947 scmc_area->request.length = 0x0010;
1350 scmc_area->request.code = 0x0022; 948 scmc_area->request.code = 0x0022;
1351 949
1352 scmc_area->first_chpid = chp->id; 950 scmc_area->first_chpid = chp->chpid.id;
1353 scmc_area->last_chpid = chp->id; 951 scmc_area->last_chpid = chp->chpid.id;
1354 952
1355 ccode = chsc(scmc_area); 953 ccode = chsc(scmc_area);
1356 if (ccode > 0) { 954 if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
1392 return ret; 990 return ret;
1393} 991}
1394 992
1395/*
1396 * Entries for chpids on the system bus.
1397 * This replaces /proc/chpids.
1398 */
1399static int
1400new_channel_path(int chpid)
1401{
1402 struct channel_path *chp;
1403 int ret;
1404
1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1406 if (!chp)
1407 return -ENOMEM;
1408
1409 /* fill in status, etc. */
1410 chp->id = chpid;
1411 chp->state = 1;
1412 chp->dev.parent = &css[0]->device;
1413 chp->dev.release = chp_release;
1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1415
1416 /* Obtain channel path description and fill it in. */
1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1418 if (ret)
1419 goto out_free;
1420 /* Get channel-measurement characteristics. */
1421 if (css_characteristics_avail && css_chsc_characteristics.scmc
1422 && css_chsc_characteristics.secm) {
1423 ret = chsc_get_channel_measurement_chars(chp);
1424 if (ret)
1425 goto out_free;
1426 } else {
1427 static int msg_done;
1428
1429 if (!msg_done) {
1430 printk(KERN_WARNING "cio: Channel measurements not "
1431 "available, continuing.\n");
1432 msg_done = 1;
1433 }
1434 chp->cmg = -1;
1435 }
1436
1437 /* make it known to the system */
1438 ret = device_register(&chp->dev);
1439 if (ret) {
1440 printk(KERN_WARNING "%s: could not register %02x\n",
1441 __func__, chpid);
1442 goto out_free;
1443 }
1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1445 if (ret) {
1446 device_unregister(&chp->dev);
1447 goto out_free;
1448 }
1449 mutex_lock(&css[0]->mutex);
1450 if (css[0]->cm_enabled) {
1451 ret = chsc_add_chp_cmg_attr(chp);
1452 if (ret) {
1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1454 device_unregister(&chp->dev);
1455 mutex_unlock(&css[0]->mutex);
1456 goto out_free;
1457 }
1458 }
1459 css[0]->chps[chpid] = chp;
1460 mutex_unlock(&css[0]->mutex);
1461 return ret;
1462out_free:
1463 kfree(chp);
1464 return ret;
1465}
1466
1467void *
1468chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1469{
1470 struct channel_path *chp;
1471 struct channel_path_desc *desc;
1472
1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1474 if (!chp)
1475 return NULL;
1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1477 if (!desc)
1478 return NULL;
1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1480 return desc;
1481}
1482
1483static int __init 993static int __init
1484chsc_alloc_sei_area(void) 994chsc_alloc_sei_area(void)
1485{ 995{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0fb2b024208f..2ad81d11cf7b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,9 +1,10 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define CHSC_SEI_ACC_CHPID 1 4#include <linux/types.h>
5#define CHSC_SEI_ACC_LINKADDR 2 5#include <linux/device.h>
6#define CHSC_SEI_ACC_FULLLINKADDR 3 6#include <asm/chpid.h>
7#include "schid.h"
7 8
8#define CHSC_SDA_OC_MSS 0x2 9#define CHSC_SDA_OC_MSS 0x2
9 10
@@ -33,23 +34,9 @@ struct channel_path_desc {
33 u8 chpp; 34 u8 chpp;
34} __attribute__ ((packed)); 35} __attribute__ ((packed));
35 36
36struct channel_path { 37struct channel_path;
37 int id;
38 int state;
39 struct channel_path_desc desc;
40 /* Channel-measurement related stuff: */
41 int cmg;
42 int shared;
43 void *cmg_chars;
44 struct device dev;
45};
46 38
47extern void s390_process_css( void ); 39extern void chsc_process_crw(void);
48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
53 40
54struct css_general_char { 41struct css_general_char {
55 u64 : 41; 42 u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
82extern struct css_general_char css_general_characteristics; 69extern struct css_general_char css_general_characteristics;
83extern struct css_chsc_char css_chsc_characteristics; 70extern struct css_chsc_char css_chsc_characteristics;
84 71
72struct chsc_ssd_info {
73 u8 path_mask;
74 u8 fla_valid_mask;
75 struct chp_id chpid[8];
76 u16 fla[8];
77};
78extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd);
85extern int chsc_determine_css_characteristics(void); 80extern int chsc_determine_css_characteristics(void);
86extern int css_characteristics_avail; 81extern int css_characteristics_avail;
87 82
88extern void *chsc_get_chp_desc(struct subchannel*, int);
89
90extern int chsc_enable_facility(int); 83extern int chsc_enable_facility(int);
91struct channel_subsystem; 84struct channel_subsystem;
92extern int chsc_secm(struct channel_subsystem *, int); 85extern int chsc_secm(struct channel_subsystem *, int);
93 86
94#define to_channelpath(device) container_of(device, struct channel_path, dev) 87int chsc_chp_vary(struct chp_id chpid, int on);
88int chsc_determine_channel_path_description(struct chp_id chpid,
89 struct channel_path_desc *desc);
90void chsc_chp_online(struct chp_id chpid);
91void chsc_chp_offline(struct chp_id chpid);
92int chsc_get_channel_measurement_chars(struct channel_path *chp);
95 93
96#endif 94#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 9cb129ab5be5..ea1defba5693 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -22,6 +22,7 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h>
25#include "airq.h" 26#include "airq.h"
26#include "cio.h" 27#include "cio.h"
27#include "css.h" 28#include "css.h"
@@ -29,6 +30,7 @@
29#include "ioasm.h" 30#include "ioasm.h"
30#include "blacklist.h" 31#include "blacklist.h"
31#include "cio_debug.h" 32#include "cio_debug.h"
33#include "chp.h"
32#include "../s390mach.h" 34#include "../s390mach.h"
33 35
34debug_info_t *cio_debug_msg_id; 36debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
592 err = -ENODEV; 594 err = -ENODEV;
593 goto out; 595 goto out;
594 } 596 }
595 sch->opm = 0xff; 597 if (cio_is_console(sch->schid))
596 if (!cio_is_console(sch->schid)) 598 sch->opm = 0xff;
597 chsc_validate_chpids(sch); 599 else
600 sch->opm = chp_get_sch_opm(sch);
598 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
599 602
600 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
954{ 957{
955 int i, ret; 958 int i, ret;
956 unsigned long long timeout; 959 unsigned long long timeout;
960 struct chp_id chpid;
957 961
958 /* Reset subchannels. */ 962 /* Reset subchannels. */
959 for_each_subchannel(__shutdown_subchannel_easy, NULL); 963 for_each_subchannel(__shutdown_subchannel_easy, NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
963 __ctl_set_bit(14, 28); 967 __ctl_set_bit(14, 28);
964 /* Temporarily reenable machine checks. */ 968 /* Temporarily reenable machine checks. */
965 local_mcck_enable(); 969 local_mcck_enable();
970 chp_id_init(&chpid);
966 for (i = 0; i <= __MAX_CHPID; i++) { 971 for (i = 0; i <= __MAX_CHPID; i++) {
967 ret = rchp(i); 972 chpid.id = i;
973 ret = rchp(chpid);
968 if ((ret == 0) || (ret == 2)) 974 if ((ret == 0) || (ret == 2))
969 /* 975 /*
970 * rchp either succeeded, or another rchp is already 976 * rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1048 do_reipl_asm(*((__u32*)&schid)); 1054 do_reipl_asm(*((__u32*)&schid));
1049} 1055}
1050 1056
1051static struct schib __initdata ipl_schib; 1057int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1052
1053/*
1054 * ipl_save_parameters gets called very early. It is not allowed to access
1055 * anything in the bss section at all. The bss section is not cleared yet,
1056 * but may contain some ipl parameters written by the firmware.
1057 * These parameters (if present) are copied to 0x2000.
1058 * To avoid corruption of the ipl parameters, all variables used by this
1059 * function must reside on the stack or in the data section.
1060 */
1061void ipl_save_parameters(void)
1062{ 1058{
1063 struct subchannel_id schid; 1059 struct subchannel_id schid;
1064 unsigned int *ipl_ptr; 1060 struct schib schib;
1065 void *src, *dst;
1066 1061
1067 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1062 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1068 if (!schid.one) 1063 if (!schid.one)
1069 return; 1064 return -ENODEV;
1070 if (stsch(schid, &ipl_schib)) 1065 if (stsch(schid, &schib))
1071 return; 1066 return -ENODEV;
1072 if (!ipl_schib.pmcw.dnv) 1067 if (!schib.pmcw.dnv)
1073 return; 1068 return -ENODEV;
1074 ipl_devno = ipl_schib.pmcw.dev; 1069 iplinfo->devno = schib.pmcw.dev;
1075 ipl_flags |= IPL_DEVNO_VALID; 1070 iplinfo->is_qdio = schib.pmcw.qf;
1076 if (!ipl_schib.pmcw.qf) 1071 return 0;
1077 return;
1078 ipl_flags |= IPL_PARMBLOCK_VALID;
1079 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1080 src = (void *)(unsigned long)*ipl_ptr;
1081 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1082 memmove(dst, src, PAGE_SIZE);
1083 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1084} 1072}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 35154a210357..7446c39951a7 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,18 +1,11 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5#include <linux/mutex.h> 4#include <linux/mutex.h>
6 5#include <linux/device.h>
7/* 6#include <asm/chpid.h>
8 * where we put the ssd info 7#include "chsc.h"
9 */ 8#include "schid.h"
10struct ssd_info {
11 __u8 valid:1;
12 __u8 type:7; /* subchannel type */
13 __u8 chpid[8]; /* chpids */
14 __u16 fla[8]; /* full link addresses */
15} __attribute__ ((packed));
16 9
17/* 10/*
18 * path management control word 11 * path management control word
@@ -108,7 +101,7 @@ struct subchannel {
108 struct schib schib; /* subchannel information block */ 101 struct schib schib; /* subchannel information block */
109 struct orb orb; /* operation request block */ 102 struct orb orb; /* operation request block */
110 struct ccw1 sense_ccw; /* static ccw for sense command */ 103 struct ccw1 sense_ccw; /* static ccw for sense command */
111 struct ssd_info ssd_info; /* subchannel description */ 104 struct chsc_ssd_info ssd_info; /* subchannel description */
112 struct device dev; /* entry in device tree */ 105 struct device dev; /* entry in device tree */
113 struct css_driver *driver; 106 struct css_driver *driver;
114} __attribute__ ((aligned(8))); 107} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 90b22faabbf7..28abd697be1a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -476,7 +476,7 @@ struct cmb_area {
476}; 476};
477 477
478static struct cmb_area cmb_area = { 478static struct cmb_area cmb_area = {
479 .lock = SPIN_LOCK_UNLOCKED, 479 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
480 .list = LIST_HEAD_INIT(cmb_area.list), 480 .list = LIST_HEAD_INIT(cmb_area.list),
481 .num_channels = 1024, 481 .num_channels = 1024,
482}; 482};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece8..27c6d9e55b23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,9 @@
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h" 22#include "device.h"
23#include "idset.h"
24#include "chp.h"
23 25
24int need_rescan = 0;
25int css_init_done = 0; 26int css_init_done = 0;
26static int need_reprobe = 0; 27static int need_reprobe = 0;
27static int max_ssid = 0; 28static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
125 mutex_unlock(&sch->reg_mutex); 126 mutex_unlock(&sch->reg_mutex);
126} 127}
127 128
128static int 129static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
129css_register_subchannel(struct subchannel *sch) 130{
131 int i;
132 int mask;
133
134 memset(ssd, 0, sizeof(struct chsc_ssd_info));
135 ssd->path_mask = pmcw->pim;
136 for (i = 0; i < 8; i++) {
137 mask = 0x80 >> i;
138 if (pmcw->pim & mask) {
139 chp_id_init(&ssd->chpid[i]);
140 ssd->chpid[i].id = pmcw->chpid[i];
141 }
142 }
143}
144
145static void ssd_register_chpids(struct chsc_ssd_info *ssd)
146{
147 int i;
148 int mask;
149
150 for (i = 0; i < 8; i++) {
151 mask = 0x80 >> i;
152 if (ssd->path_mask & mask)
153 if (!chp_is_registered(ssd->chpid[i]))
154 chp_new(ssd->chpid[i]);
155 }
156}
157
158void css_update_ssd_info(struct subchannel *sch)
159{
160 int ret;
161
162 if (cio_is_console(sch->schid)) {
163 /* Console is initialized too early for functions requiring
164 * memory allocation. */
165 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
166 } else {
167 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
168 if (ret)
169 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
170 ssd_register_chpids(&sch->ssd_info);
171 }
172}
173
174static int css_register_subchannel(struct subchannel *sch)
130{ 175{
131 int ret; 176 int ret;
132 177
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
135 sch->dev.bus = &css_bus_type; 180 sch->dev.bus = &css_bus_type;
136 sch->dev.release = &css_subchannel_release; 181 sch->dev.release = &css_subchannel_release;
137 sch->dev.groups = subch_attr_groups; 182 sch->dev.groups = subch_attr_groups;
138 183 css_update_ssd_info(sch);
139 css_get_ssd_info(sch);
140
141 /* make it known to the system */ 184 /* make it known to the system */
142 ret = css_sch_device_register(sch); 185 ret = css_sch_device_register(sch);
143 if (ret) { 186 if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
306 return css_probe_device(schid); 349 return css_probe_device(schid);
307} 350}
308 351
309static int css_evaluate_subchannel(struct subchannel_id schid, int slow) 352static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
310{ 353{
311 struct subchannel *sch; 354 struct subchannel *sch;
312 int ret; 355 int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
317 put_device(&sch->dev); 360 put_device(&sch->dev);
318 } else 361 } else
319 ret = css_evaluate_new_subchannel(schid, slow); 362 ret = css_evaluate_new_subchannel(schid, slow);
320 363 if (ret == -EAGAIN)
321 return ret; 364 css_schedule_eval(schid);
322} 365}
323 366
324static int 367static struct idset *slow_subchannel_set;
325css_rescan_devices(struct subchannel_id schid, void *data) 368static spinlock_t slow_subchannel_lock;
369
370static int __init slow_subchannel_init(void)
326{ 371{
327 return css_evaluate_subchannel(schid, 1); 372 spin_lock_init(&slow_subchannel_lock);
373 slow_subchannel_set = idset_sch_new();
374 if (!slow_subchannel_set) {
375 printk(KERN_WARNING "cio: could not allocate slow subchannel "
376 "set\n");
377 return -ENOMEM;
378 }
379 return 0;
328} 380}
329 381
330struct slow_subchannel { 382subsys_initcall(slow_subchannel_init);
331 struct list_head slow_list;
332 struct subchannel_id schid;
333};
334
335static LIST_HEAD(slow_subchannels_head);
336static DEFINE_SPINLOCK(slow_subchannel_lock);
337 383
338static void 384static void css_slow_path_func(struct work_struct *unused)
339css_trigger_slow_path(struct work_struct *unused)
340{ 385{
341 CIO_TRACE_EVENT(4, "slowpath"); 386 struct subchannel_id schid;
342
343 if (need_rescan) {
344 need_rescan = 0;
345 for_each_subchannel(css_rescan_devices, NULL);
346 return;
347 }
348 387
388 CIO_TRACE_EVENT(4, "slowpath");
349 spin_lock_irq(&slow_subchannel_lock); 389 spin_lock_irq(&slow_subchannel_lock);
350 while (!list_empty(&slow_subchannels_head)) { 390 init_subchannel_id(&schid);
351 struct slow_subchannel *slow_sch = 391 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
352 list_entry(slow_subchannels_head.next, 392 idset_sch_del(slow_subchannel_set, schid);
353 struct slow_subchannel, slow_list);
354
355 list_del_init(slow_subchannels_head.next);
356 spin_unlock_irq(&slow_subchannel_lock); 393 spin_unlock_irq(&slow_subchannel_lock);
357 css_evaluate_subchannel(slow_sch->schid, 1); 394 css_evaluate_subchannel(schid, 1);
358 spin_lock_irq(&slow_subchannel_lock); 395 spin_lock_irq(&slow_subchannel_lock);
359 kfree(slow_sch);
360 } 396 }
361 spin_unlock_irq(&slow_subchannel_lock); 397 spin_unlock_irq(&slow_subchannel_lock);
362} 398}
363 399
364DECLARE_WORK(slow_path_work, css_trigger_slow_path); 400static DECLARE_WORK(slow_path_work, css_slow_path_func);
365struct workqueue_struct *slow_path_wq; 401struct workqueue_struct *slow_path_wq;
366 402
403void css_schedule_eval(struct subchannel_id schid)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&slow_subchannel_lock, flags);
408 idset_sch_add(slow_subchannel_set, schid);
409 queue_work(slow_path_wq, &slow_path_work);
410 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
411}
412
413void css_schedule_eval_all(void)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&slow_subchannel_lock, flags);
418 idset_fill(slow_subchannel_set);
419 queue_work(slow_path_wq, &slow_path_work);
420 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
421}
422
367/* Reprobe subchannel if unregistered. */ 423/* Reprobe subchannel if unregistered. */
368static int reprobe_subchannel(struct subchannel_id schid, void *data) 424static int reprobe_subchannel(struct subchannel_id schid, void *data)
369{ 425{
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void)
426EXPORT_SYMBOL_GPL(css_schedule_reprobe); 482EXPORT_SYMBOL_GPL(css_schedule_reprobe);
427 483
428/* 484/*
429 * Rescan for new devices. FIXME: This is slow.
430 * This function is called when we have lost CRWs due to overflows and we have
431 * to do subchannel housekeeping.
432 */
433void
434css_reiterate_subchannels(void)
435{
436 css_clear_subchannel_slow_list();
437 need_rescan = 1;
438}
439
440/*
441 * Called from the machine check handler for subchannel report words. 485 * Called from the machine check handler for subchannel report words.
442 */ 486 */
443int 487void css_process_crw(int rsid1, int rsid2)
444css_process_crw(int rsid1, int rsid2)
445{ 488{
446 int ret;
447 struct subchannel_id mchk_schid; 489 struct subchannel_id mchk_schid;
448 490
449 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 491 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
450 rsid1, rsid2); 492 rsid1, rsid2);
451
452 if (need_rescan)
453 /* We need to iterate all subchannels anyway. */
454 return -EAGAIN;
455
456 init_subchannel_id(&mchk_schid); 493 init_subchannel_id(&mchk_schid);
457 mchk_schid.sch_no = rsid1; 494 mchk_schid.sch_no = rsid1;
458 if (rsid2 != 0) 495 if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
463 * use stsch() to find out if the subchannel in question has come 500 * use stsch() to find out if the subchannel in question has come
464 * or gone. 501 * or gone.
465 */ 502 */
466 ret = css_evaluate_subchannel(mchk_schid, 0); 503 css_evaluate_subchannel(mchk_schid, 0);
467 if (ret == -EAGAIN) {
468 if (css_enqueue_subchannel_slow(mchk_schid)) {
469 css_clear_subchannel_slow_list();
470 need_rescan = 1;
471 }
472 }
473 return ret;
474} 504}
475 505
476static int __init 506static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
745 775
746subsys_initcall(init_channel_subsystem); 776subsys_initcall(init_channel_subsystem);
747 777
748int
749css_enqueue_subchannel_slow(struct subchannel_id schid)
750{
751 struct slow_subchannel *new_slow_sch;
752 unsigned long flags;
753
754 new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
755 if (!new_slow_sch)
756 return -ENOMEM;
757 new_slow_sch->schid = schid;
758 spin_lock_irqsave(&slow_subchannel_lock, flags);
759 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
760 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
761 return 0;
762}
763
764void
765css_clear_subchannel_slow_list(void)
766{
767 unsigned long flags;
768
769 spin_lock_irqsave(&slow_subchannel_lock, flags);
770 while (!list_empty(&slow_subchannels_head)) {
771 struct slow_subchannel *slow_sch =
772 list_entry(slow_subchannels_head.next,
773 struct slow_subchannel, slow_list);
774
775 list_del_init(slow_subchannels_head.next);
776 kfree(slow_sch);
777 }
778 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
779}
780
781
782
783int
784css_slow_subchannels_exist(void)
785{
786 return (!list_empty(&slow_subchannels_head));
787}
788
789MODULE_LICENSE("GPL"); 778MODULE_LICENSE("GPL");
790EXPORT_SYMBOL(css_bus_type); 779EXPORT_SYMBOL(css_bus_type);
791EXPORT_SYMBOL_GPL(css_characteristics_avail); 780EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ca2bab932a8a..71fcfdc42800 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -4,8 +4,11 @@
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/wait.h> 5#include <linux/wait.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/device.h>
8#include <linux/types.h>
7 9
8#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h>
9 12
10#include "schid.h" 13#include "schid.h"
11 14
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 147extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int); 149extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 150extern void css_reiterate_subchannels(void);
151void css_update_ssd_info(struct subchannel *sch);
148 152
149#define __MAX_SUBCHANNEL 65535 153#define __MAX_SUBCHANNEL 65535
150#define __MAX_SSID 3 154#define __MAX_SSID 3
151#define __MAX_CHPID 255
152#define __MAX_CSSID 0
153 155
154struct channel_subsystem { 156struct channel_subsystem {
155 u8 cssid; 157 u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
185void device_kill_pending_timer(struct subchannel *); 187void device_kill_pending_timer(struct subchannel *);
186 188
187/* Helper functions to build lists for the slow path. */ 189/* Helper functions to build lists for the slow path. */
188extern int css_enqueue_subchannel_slow(struct subchannel_id schid); 190void css_schedule_eval(struct subchannel_id schid);
189void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 191void css_schedule_eval_all(void);
190void css_clear_subchannel_slow_list(void);
191int css_slow_subchannels_exist(void);
192extern int need_rescan;
193 192
194int sch_is_pseudo_sch(struct subchannel *); 193int sch_is_pseudo_sch(struct subchannel *);
195 194
196extern struct workqueue_struct *slow_path_wq; 195extern struct workqueue_struct *slow_path_wq;
197extern struct work_struct slow_path_work;
198 196
199int subchannel_add_files (struct device *); 197int subchannel_add_files (struct device *);
200extern struct attribute_group *subch_attr_groups[]; 198extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e322111fb369..a23ff582db9d 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
56/* Store modalias string delimited by prefix/suffix string into buffer with 56/* Store modalias string delimited by prefix/suffix string into buffer with
57 * specified size. Return length of resulting string (excluding trailing '\0') 57 * specified size. Return length of resulting string (excluding trailing '\0')
58 * even if string doesn't fit buffer (snprintf semantics). */ 58 * even if string doesn't fit buffer (snprintf semantics). */
59static int snprint_alias(char *buf, size_t size, const char *prefix, 59static int snprint_alias(char *buf, size_t size,
60 struct ccw_device_id *id, const char *suffix) 60 struct ccw_device_id *id, const char *suffix)
61{ 61{
62 int len; 62 int len;
63 63
64 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, 64 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
65 id->cu_model);
66 if (len > size) 65 if (len > size)
67 return len; 66 return len;
68 buf += len; 67 buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
85 struct ccw_device *cdev = to_ccwdev(dev); 84 struct ccw_device *cdev = to_ccwdev(dev);
86 struct ccw_device_id *id = &(cdev->id); 85 struct ccw_device_id *id = &(cdev->id);
87 int i = 0; 86 int i = 0;
88 int len; 87 int len = 0;
88 int ret;
89 char modalias_buf[30];
89 90
90 /* CU_TYPE= */ 91 /* CU_TYPE= */
91 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; 92 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
92 if (len > buffer_size || i >= num_envp) 93 "CU_TYPE=%04X", id->cu_type);
93 return -ENOMEM; 94 if (ret)
94 envp[i++] = buffer; 95 return ret;
95 buffer += len;
96 buffer_size -= len;
97 96
98 /* CU_MODEL= */ 97 /* CU_MODEL= */
99 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; 98 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
100 if (len > buffer_size || i >= num_envp) 99 "CU_MODEL=%02X", id->cu_model);
101 return -ENOMEM; 100 if (ret)
102 envp[i++] = buffer; 101 return ret;
103 buffer += len;
104 buffer_size -= len;
105 102
106 /* The next two can be zero, that's ok for us */ 103 /* The next two can be zero, that's ok for us */
107 /* DEV_TYPE= */ 104 /* DEV_TYPE= */
108 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; 105 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
109 if (len > buffer_size || i >= num_envp) 106 "DEV_TYPE=%04X", id->dev_type);
110 return -ENOMEM; 107 if (ret)
111 envp[i++] = buffer; 108 return ret;
112 buffer += len;
113 buffer_size -= len;
114 109
115 /* DEV_MODEL= */ 110 /* DEV_MODEL= */
116 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", 111 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
117 (unsigned char) id->dev_model) + 1; 112 "DEV_MODEL=%02X", id->dev_model);
118 if (len > buffer_size || i >= num_envp) 113 if (ret)
119 return -ENOMEM; 114 return ret;
120 envp[i++] = buffer;
121 buffer += len;
122 buffer_size -= len;
123 115
124 /* MODALIAS= */ 116 /* MODALIAS= */
125 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; 117 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
126 if (len > buffer_size || i >= num_envp) 118 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
127 return -ENOMEM; 119 "MODALIAS=%s", modalias_buf);
128 envp[i++] = buffer; 120 return ret;
129 buffer += len;
130 buffer_size -= len;
131
132 envp[i] = NULL;
133
134 return 0;
135} 121}
136 122
137struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
230chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 216chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
231{ 217{
232 struct subchannel *sch = to_subchannel(dev); 218 struct subchannel *sch = to_subchannel(dev);
233 struct ssd_info *ssd = &sch->ssd_info; 219 struct chsc_ssd_info *ssd = &sch->ssd_info;
234 ssize_t ret = 0; 220 ssize_t ret = 0;
235 int chp; 221 int chp;
222 int mask;
236 223
237 for (chp = 0; chp < 8; chp++) 224 for (chp = 0; chp < 8; chp++) {
238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); 225 mask = 0x80 >> chp;
226 if (ssd->path_mask & mask)
227 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
228 else
229 ret += sprintf(buf + ret, "00 ");
230 }
239 ret += sprintf (buf+ret, "\n"); 231 ret += sprintf (buf+ret, "\n");
240 return min((ssize_t)PAGE_SIZE, ret); 232 return min((ssize_t)PAGE_SIZE, ret);
241} 233}
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
280 struct ccw_device_id *id = &(cdev->id); 272 struct ccw_device_id *id = &(cdev->id);
281 int len; 273 int len;
282 274
283 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; 275 len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
284 276
285 return len > PAGE_SIZE ? PAGE_SIZE : len; 277 return len > PAGE_SIZE ? PAGE_SIZE : len;
286} 278}
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
298 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 290 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
299} 291}
300 292
301static void ccw_device_unregister(struct work_struct *work) 293static void ccw_device_unregister(struct ccw_device *cdev)
302{ 294{
303 struct ccw_device_private *priv;
304 struct ccw_device *cdev;
305
306 priv = container_of(work, struct ccw_device_private, kick_work);
307 cdev = priv->cdev;
308 if (test_and_clear_bit(1, &cdev->private->registered)) 295 if (test_and_clear_bit(1, &cdev->private->registered))
309 device_unregister(&cdev->dev); 296 device_del(&cdev->dev);
310 put_device(&cdev->dev);
311} 297}
312 298
313static void 299static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
324 spin_lock_irqsave(cdev->ccwlock, flags); 310 spin_lock_irqsave(cdev->ccwlock, flags);
325 cdev->private->state = DEV_STATE_NOT_OPER; 311 cdev->private->state = DEV_STATE_NOT_OPER;
326 spin_unlock_irqrestore(cdev->ccwlock, flags); 312 spin_unlock_irqrestore(cdev->ccwlock, flags);
327 if (get_device(&cdev->dev)) { 313 ccw_device_unregister(cdev);
328 PREPARE_WORK(&cdev->private->kick_work, 314 put_device(&cdev->dev);
329 ccw_device_unregister);
330 queue_work(ccw_device_work, &cdev->private->kick_work);
331 }
332 return ; 315 return ;
333 } 316 }
334 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
413 return (ret == 0) ? -ENODEV : ret; 396 return (ret == 0) ? -ENODEV : ret;
414} 397}
415 398
416static ssize_t 399static void online_store_handle_offline(struct ccw_device *cdev)
417online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 400{
401 if (cdev->private->state == DEV_STATE_DISCONNECTED)
402 ccw_device_remove_disconnected(cdev);
403 else if (cdev->drv && cdev->drv->set_offline)
404 ccw_device_set_offline(cdev);
405}
406
407static int online_store_recog_and_online(struct ccw_device *cdev)
408{
409 int ret;
410
411 /* Do device recognition, if needed. */
412 if (cdev->id.cu_type == 0) {
413 ret = ccw_device_recognition(cdev);
414 if (ret) {
415 printk(KERN_WARNING"Couldn't start recognition "
416 "for device %s (ret=%d)\n",
417 cdev->dev.bus_id, ret);
418 return ret;
419 }
420 wait_event(cdev->private->wait_q,
421 cdev->private->flags.recog_done);
422 }
423 if (cdev->drv && cdev->drv->set_online)
424 ccw_device_set_online(cdev);
425 return 0;
426}
427static void online_store_handle_online(struct ccw_device *cdev, int force)
428{
429 int ret;
430
431 ret = online_store_recog_and_online(cdev);
432 if (ret)
433 return;
434 if (force && cdev->private->state == DEV_STATE_BOXED) {
435 ret = ccw_device_stlck(cdev);
436 if (ret) {
437 printk(KERN_WARNING"ccw_device_stlck for device %s "
438 "returned %d!\n", cdev->dev.bus_id, ret);
439 return;
440 }
441 if (cdev->id.cu_type == 0)
442 cdev->private->state = DEV_STATE_NOT_OPER;
443 online_store_recog_and_online(cdev);
444 }
445
446}
447
448static ssize_t online_store (struct device *dev, struct device_attribute *attr,
449 const char *buf, size_t count)
418{ 450{
419 struct ccw_device *cdev = to_ccwdev(dev); 451 struct ccw_device *cdev = to_ccwdev(dev);
420 int i, force, ret; 452 int i, force;
421 char *tmp; 453 char *tmp;
422 454
423 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 455 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
434 force = 0; 466 force = 0;
435 i = simple_strtoul(buf, &tmp, 16); 467 i = simple_strtoul(buf, &tmp, 16);
436 } 468 }
437 if (i == 1) { 469
438 /* Do device recognition, if needed. */ 470 switch (i) {
439 if (cdev->id.cu_type == 0) { 471 case 0:
440 ret = ccw_device_recognition(cdev); 472 online_store_handle_offline(cdev);
441 if (ret) { 473 break;
442 printk(KERN_WARNING"Couldn't start recognition " 474 case 1:
443 "for device %s (ret=%d)\n", 475 online_store_handle_online(cdev, force);
444 cdev->dev.bus_id, ret); 476 break;
445 goto out; 477 default:
446 } 478 count = -EINVAL;
447 wait_event(cdev->private->wait_q,
448 cdev->private->flags.recog_done);
449 }
450 if (cdev->drv && cdev->drv->set_online)
451 ccw_device_set_online(cdev);
452 } else if (i == 0) {
453 if (cdev->private->state == DEV_STATE_DISCONNECTED)
454 ccw_device_remove_disconnected(cdev);
455 else if (cdev->drv && cdev->drv->set_offline)
456 ccw_device_set_offline(cdev);
457 }
458 if (force && cdev->private->state == DEV_STATE_BOXED) {
459 ret = ccw_device_stlck(cdev);
460 if (ret) {
461 printk(KERN_WARNING"ccw_device_stlck for device %s "
462 "returned %d!\n", cdev->dev.bus_id, ret);
463 goto out;
464 }
465 /* Do device recognition, if needed. */
466 if (cdev->id.cu_type == 0) {
467 cdev->private->state = DEV_STATE_NOT_OPER;
468 ret = ccw_device_recognition(cdev);
469 if (ret) {
470 printk(KERN_WARNING"Couldn't start recognition "
471 "for device %s (ret=%d)\n",
472 cdev->dev.bus_id, ret);
473 goto out;
474 }
475 wait_event(cdev->private->wait_q,
476 cdev->private->flags.recog_done);
477 }
478 if (cdev->drv && cdev->drv->set_online)
479 ccw_device_set_online(cdev);
480 } 479 }
481 out:
482 if (cdev->drv) 480 if (cdev->drv)
483 module_put(cdev->drv->owner); 481 module_put(cdev->drv->owner);
484 atomic_set(&cdev->private->onoff, 0); 482 atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
548 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
549}; 547};
550 548
551static int 549struct attribute_group *ccwdev_attr_groups[] = {
552device_add_files (struct device *dev) 550 &ccwdev_attr_group,
553{ 551 NULL,
554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 552};
555}
556
557static void
558device_remove_files(struct device *dev)
559{
560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
561}
562 553
563/* this is a simple abstraction for device_register that sets the 554/* this is a simple abstraction for device_register that sets the
564 * correct bus type and adds the bus specific files */ 555 * correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
573 return ret; 564 return ret;
574 565
575 set_bit(1, &cdev->private->registered); 566 set_bit(1, &cdev->private->registered);
576 if ((ret = device_add_files(dev))) {
577 if (test_and_clear_bit(1, &cdev->private->registered))
578 device_del(dev);
579 }
580 return ret; 567 return ret;
581} 568}
582 569
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
648 return; 635 return;
649 } 636 }
650 set_bit(1, &cdev->private->registered); 637 set_bit(1, &cdev->private->registered);
651 if (device_add_files(&cdev->dev)) {
652 if (test_and_clear_bit(1, &cdev->private->registered))
653 device_unregister(&cdev->dev);
654 }
655} 638}
656 639
657void ccw_device_do_unreg_rereg(struct work_struct *work) 640void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
664 cdev = priv->cdev; 647 cdev = priv->cdev;
665 sch = to_subchannel(cdev->dev.parent); 648 sch = to_subchannel(cdev->dev.parent);
666 649
667 device_remove_files(&cdev->dev); 650 ccw_device_unregister(cdev);
668 if (test_and_clear_bit(1, &cdev->private->registered))
669 device_del(&cdev->dev);
670 PREPARE_WORK(&cdev->private->kick_work, 651 PREPARE_WORK(&cdev->private->kick_work,
671 ccw_device_add_changed); 652 ccw_device_add_changed);
672 queue_work(ccw_device_work, &cdev->private->kick_work); 653 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
705 cdev->dev.parent = &sch->dev; 686 cdev->dev.parent = &sch->dev;
706 cdev->dev.release = ccw_device_release; 687 cdev->dev.release = ccw_device_release;
707 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 688 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
689 cdev->dev.groups = ccwdev_attr_groups;
708 /* Do first half of device_register. */ 690 /* Do first half of device_register. */
709 device_initialize(&cdev->dev); 691 device_initialize(&cdev->dev);
710 if (!get_device(&sch->dev)) { 692 if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
736static void sch_attach_device(struct subchannel *sch, 718static void sch_attach_device(struct subchannel *sch,
737 struct ccw_device *cdev) 719 struct ccw_device *cdev)
738{ 720{
721 css_update_ssd_info(sch);
739 spin_lock_irq(sch->lock); 722 spin_lock_irq(sch->lock);
740 sch->dev.driver_data = cdev; 723 sch->dev.driver_data = cdev;
741 cdev->private->schid = sch->schid; 724 cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
871 priv = container_of(work, struct ccw_device_private, kick_work); 854 priv = container_of(work, struct ccw_device_private, kick_work);
872 cdev = priv->cdev; 855 cdev = priv->cdev;
873 sch = to_subchannel(cdev->dev.parent); 856 sch = to_subchannel(cdev->dev.parent);
874 857 css_update_ssd_info(sch);
875 /* 858 /*
876 * io_subchannel_register() will also be called after device 859 * io_subchannel_register() will also be called after device
877 * recognition has been done for a boxed device (which will already 860 * recognition has been done for a boxed device (which will already
@@ -888,6 +871,12 @@ io_subchannel_register(struct work_struct *work)
888 } 871 }
889 goto out; 872 goto out;
890 } 873 }
874 /*
875 * Now we know this subchannel will stay, we can throw
876 * our delayed uevent.
877 */
878 sch->dev.uevent_suppress = 0;
879 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
891 /* make it known to the system */ 880 /* make it known to the system */
892 ret = ccw_device_register(cdev); 881 ret = ccw_device_register(cdev);
893 if (ret) { 882 if (ret) {
@@ -1133,15 +1122,8 @@ io_subchannel_remove (struct subchannel *sch)
1133 sch->dev.driver_data = NULL; 1122 sch->dev.driver_data = NULL;
1134 cdev->private->state = DEV_STATE_NOT_OPER; 1123 cdev->private->state = DEV_STATE_NOT_OPER;
1135 spin_unlock_irqrestore(cdev->ccwlock, flags); 1124 spin_unlock_irqrestore(cdev->ccwlock, flags);
1136 /* 1125 ccw_device_unregister(cdev);
1137 * Put unregistration on workqueue to avoid livelocks on the css bus 1126 put_device(&cdev->dev);
1138 * semaphore.
1139 */
1140 if (get_device(&cdev->dev)) {
1141 PREPARE_WORK(&cdev->private->kick_work,
1142 ccw_device_unregister);
1143 queue_work(ccw_device_work, &cdev->private->kick_work);
1144 }
1145 return 0; 1127 return 0;
1146} 1128}
1147 1129
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 089a3ddd6265..898ec3b2bebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -22,6 +23,7 @@
22#include "device.h" 23#include "device.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h"
25 27
26int 28int
27device_is_online(struct subchannel *sch) 29device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 213{
212 int mask, i; 214 int mask, i;
215 struct chp_id chpid;
213 216
217 chp_id_init(&chpid);
214 for (i = 0; i<8; i++) { 218 for (i = 0; i<8; i++) {
215 mask = 0x80 >> i; 219 mask = 0x80 >> i;
216 if (!(sch->lpm & mask)) 220 if (!(sch->lpm & mask))
217 continue; 221 continue;
218 if (old_lpm & mask) 222 if (old_lpm & mask)
219 continue; 223 continue;
220 chpid_is_actually_online(sch->schib.pmcw.chpid[i]); 224 chpid.id = sch->schib.pmcw.chpid[i];
225 if (!chp_is_registered(chpid))
226 css_schedule_eval_all();
221 } 227 }
222} 228}
223 229
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7c7775aae38a..16f59fcb66b1 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -16,12 +16,14 @@
16 16
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h>
19 20
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "css.h" 23#include "css.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "device.h" 25#include "device.h"
26#include "chp.h"
25 27
26int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 28int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
27{ 29{
@@ -606,9 +608,12 @@ void *
606ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 608ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
607{ 609{
608 struct subchannel *sch; 610 struct subchannel *sch;
611 struct chp_id chpid;
609 612
610 sch = to_subchannel(cdev->dev.parent); 613 sch = to_subchannel(cdev->dev.parent);
611 return chsc_get_chp_desc(sch, chp_no); 614 chp_id_init(&chpid);
615 chpid.id = sch->schib.pmcw.chpid[chp_no];
616 return chp_get_chp_desc(chpid);
612} 617}
613 618
614// FIXME: these have to go: 619// FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000000..16ea828e99f7
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
1/*
2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include <asm/bitops.h>
10#include "idset.h"
11#include "css.h"
12
13struct idset {
14 int num_ssid;
15 int num_id;
16 unsigned long bitmap[0];
17};
18
19static inline unsigned long bitmap_size(int num_ssid, int num_id)
20{
21 return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
22}
23
24static struct idset *idset_new(int num_ssid, int num_id)
25{
26 struct idset *set;
27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
29 GFP_KERNEL);
30 if (set) {
31 set->num_ssid = num_ssid;
32 set->num_id = num_id;
33 }
34 return set;
35}
36
37void idset_free(struct idset *set)
38{
39 kfree(set);
40}
41
42void idset_clear(struct idset *set)
43{
44 memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
45}
46
47void idset_fill(struct idset *set)
48{
49 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
50}
51
52static inline void idset_add(struct idset *set, int ssid, int id)
53{
54 set_bit(ssid * set->num_id + id, set->bitmap);
55}
56
57static inline void idset_del(struct idset *set, int ssid, int id)
58{
59 clear_bit(ssid * set->num_id + id, set->bitmap);
60}
61
62static inline int idset_contains(struct idset *set, int ssid, int id)
63{
64 return test_bit(ssid * set->num_id + id, set->bitmap);
65}
66
67static inline int idset_get_first(struct idset *set, int *ssid, int *id)
68{
69 int bitnum;
70
71 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
72 if (bitnum >= set->num_ssid * set->num_id)
73 return 0;
74 *ssid = bitnum / set->num_id;
75 *id = bitnum % set->num_id;
76 return 1;
77}
78
79struct idset *idset_sch_new(void)
80{
81 return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
82}
83
84void idset_sch_add(struct idset *set, struct subchannel_id schid)
85{
86 idset_add(set, schid.ssid, schid.sch_no);
87}
88
89void idset_sch_del(struct idset *set, struct subchannel_id schid)
90{
91 idset_del(set, schid.ssid, schid.sch_no);
92}
93
94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
95{
96 return idset_contains(set, schid.ssid, schid.sch_no);
97}
98
99int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
100{
101 int ssid = 0;
102 int id = 0;
103 int rc;
104
105 rc = idset_get_first(set, &ssid, &id);
106 if (rc) {
107 init_subchannel_id(schid);
108 schid->ssid = ssid;
109 schid->sch_no = id;
110 }
111 return rc;
112}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000000..144466ab8c15
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
1/*
2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H
10
11#include "schid.h"
12
13struct idset;
14
15void idset_free(struct idset *set);
16void idset_clear(struct idset *set);
17void idset_fill(struct idset *set);
18
19struct idset *idset_sch_new(void);
20void idset_sch_add(struct idset *set, struct subchannel_id id);
21void idset_sch_del(struct idset *set, struct subchannel_id id);
22int idset_sch_contains(struct idset *set, struct subchannel_id id);
23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
24
25#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index ad6d82940069..7153dd959082 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,6 +1,7 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h>
4#include "schid.h" 5#include "schid.h"
5 6
6/* 7/*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
189 return cc; 190 return cc;
190} 191}
191 192
192static inline int rchp(int chpid) 193static inline int rchp(struct chp_id chpid)
193{ 194{
194 register unsigned int reg1 asm ("1") = chpid; 195 register struct chp_id reg1 asm ("1") = chpid;
195 int ccode; 196 int ccode;
196 197
197 asm volatile( 198 asm volatile(
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index bf37cdf43fae..5aac0ec36368 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -423,27 +423,25 @@ static int ap_uevent (struct device *dev, char **envp, int num_envp,
423 char *buffer, int buffer_size) 423 char *buffer, int buffer_size)
424{ 424{
425 struct ap_device *ap_dev = to_ap_dev(dev); 425 struct ap_device *ap_dev = to_ap_dev(dev);
426 int length; 426 int retval = 0, length = 0, i = 0;
427 427
428 if (!ap_dev) 428 if (!ap_dev)
429 return -ENODEV; 429 return -ENODEV;
430 430
431 /* Set up DEV_TYPE environment variable. */ 431 /* Set up DEV_TYPE environment variable. */
432 envp[0] = buffer; 432 retval = add_uevent_var(envp, num_envp, &i,
433 length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X", 433 buffer, buffer_size, &length,
434 ap_dev->device_type); 434 "DEV_TYPE=%04X", ap_dev->device_type);
435 if (buffer_size - length <= 0) 435 if (retval)
436 return -ENOMEM; 436 return retval;
437 buffer += length; 437
438 buffer_size -= length;
439 /* Add MODALIAS= */ 438 /* Add MODALIAS= */
440 envp[1] = buffer; 439 retval = add_uevent_var(envp, num_envp, &i,
441 length = scnprintf(buffer, buffer_size, "MODALIAS=ap:t%02X", 440 buffer, buffer_size, &length,
442 ap_dev->device_type); 441 "MODALIAS=ap:t%02X", ap_dev->device_type);
443 if (buffer_size - length <= 0) 442
444 return -ENOMEM; 443 envp[i] = NULL;
445 envp[2] = NULL; 444 return retval;
446 return 0;
447} 445}
448 446
449static struct bus_type ap_bus_type = { 447static struct bus_type ap_bus_type = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 7809a79feec7..6dd64d0c8d45 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3525,8 +3525,8 @@ unpack_next:
3525 memcpy(skb_put(skb,len_of_data), 3525 memcpy(skb_put(skb,len_of_data),
3526 privptr->p_mtc_envelope, 3526 privptr->p_mtc_envelope,
3527 len_of_data); 3527 len_of_data);
3528 skb->mac.raw=skb->data;
3529 skb->dev=dev; 3528 skb->dev=dev;
3529 skb_reset_mac_header(skb);
3530 skb->protocol=htons(ETH_P_IP); 3530 skb->protocol=htons(ETH_P_IP);
3531 skb->ip_summed=CHECKSUM_UNNECESSARY; 3531 skb->ip_summed=CHECKSUM_UNNECESSARY;
3532 privptr->stats.rx_packets++; 3532 privptr->stats.rx_packets++;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 0d6d5fcc128b..b20fd0681733 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -455,7 +455,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
455 return; 455 return;
456 } 456 }
457 skb_put(pskb, header->length); 457 skb_put(pskb, header->length);
458 pskb->mac.raw = pskb->data; 458 skb_reset_mac_header(pskb);
459 len -= header->length; 459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len); 460 skb = dev_alloc_skb(pskb->len);
461 if (!skb) { 461 if (!skb) {
@@ -472,8 +472,9 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
472 privptr->stats.rx_dropped++; 472 privptr->stats.rx_dropped++;
473 return; 473 return;
474 } 474 }
475 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); 475 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
476 skb->mac.raw = skb->data; 476 pskb->len);
477 skb_reset_mac_header(skb);
477 skb->dev = pskb->dev; 478 skb->dev = pskb->dev;
478 skb->protocol = pskb->protocol; 479 skb->protocol = pskb->protocol;
479 pskb->ip_summed = CHECKSUM_UNNECESSARY; 480 pskb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -706,7 +707,8 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
706 spin_unlock(&ch->collect_lock); 707 spin_unlock(&ch->collect_lock);
707 return; 708 return;
708 } 709 }
709 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; 710 ch->trans_skb->data = ch->trans_skb_data;
711 skb_reset_tail_pointer(ch->trans_skb);
710 ch->trans_skb->len = 0; 712 ch->trans_skb->len = 0;
711 if (ch->prof.maxmulti < (ch->collect_len + 2)) 713 if (ch->prof.maxmulti < (ch->collect_len + 2))
712 ch->prof.maxmulti = ch->collect_len + 2; 714 ch->prof.maxmulti = ch->collect_len + 2;
@@ -715,8 +717,9 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
715 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 717 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
716 i = 0; 718 i = 0;
717 while ((skb = skb_dequeue(&ch->collect_queue))) { 719 while ((skb = skb_dequeue(&ch->collect_queue))) {
718 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, 720 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
719 skb->len); 721 skb->len),
722 skb->len);
720 privptr->stats.tx_packets++; 723 privptr->stats.tx_packets++;
721 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 724 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
722 atomic_dec(&skb->users); 725 atomic_dec(&skb->users);
@@ -831,7 +834,8 @@ ch_action_rx(fsm_instance * fi, int event, void *arg)
831 ctc_unpack_skb(ch, skb); 834 ctc_unpack_skb(ch, skb);
832 } 835 }
833 again: 836 again:
834 skb->data = skb->tail = ch->trans_skb_data; 837 skb->data = ch->trans_skb_data;
838 skb_reset_tail_pointer(skb);
835 skb->len = 0; 839 skb->len = 0;
836 if (ctc_checkalloc_buffer(ch, 1)) 840 if (ctc_checkalloc_buffer(ch, 1))
837 return; 841 return;
@@ -1638,21 +1642,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1638 struct channel *ch; 1642 struct channel *ch;
1639 1643
1640 DBF_TEXT(trace, 2, __FUNCTION__); 1644 DBF_TEXT(trace, 2, __FUNCTION__);
1641 if ((ch = 1645 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1642 (struct channel *) kmalloc(sizeof (struct channel), 1646 if (!ch) {
1643 GFP_KERNEL)) == NULL) {
1644 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1647 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1645 return -1; 1648 return -1;
1646 } 1649 }
1647 memset(ch, 0, sizeof (struct channel)); 1650 /* assure all flags and counters are reset */
1648 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), 1651 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1649 GFP_KERNEL | GFP_DMA)) == NULL) { 1652 if (!ch->ccw) {
1650 kfree(ch); 1653 kfree(ch);
1651 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1654 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1652 return -1; 1655 return -1;
1653 } 1656 }
1654 1657
1655 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1656 1658
1657 /** 1659 /**
1658 * "static" ccws are used in the following way: 1660 * "static" ccws are used in the following way:
@@ -1692,15 +1694,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1692 return -1; 1694 return -1;
1693 } 1695 }
1694 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1696 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1695 if ((ch->irb = kmalloc(sizeof (struct irb), 1697 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1696 GFP_KERNEL)) == NULL) { 1698 if (!ch->irb) {
1697 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1699 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1698 kfree_fsm(ch->fsm); 1700 kfree_fsm(ch->fsm);
1699 kfree(ch->ccw); 1701 kfree(ch->ccw);
1700 kfree(ch); 1702 kfree(ch);
1701 return -1; 1703 return -1;
1702 } 1704 }
1703 memset(ch->irb, 0, sizeof (struct irb));
1704 while (*c && less_than((*c)->id, ch->id)) 1705 while (*c && less_than((*c)->id, ch->id))
1705 c = &(*c)->next; 1706 c = &(*c)->next;
1706 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { 1707 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2226,7 +2227,8 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2226 * IDAL support in CTC is broken, so we have to 2227 * IDAL support in CTC is broken, so we have to
2227 * care about skb's above 2G ourselves. 2228 * care about skb's above 2G ourselves.
2228 */ 2229 */
2229 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; 2230 hi = ((unsigned long)skb_tail_pointer(skb) +
2231 LL_HEADER_LENGTH) >> 31;
2230 if (hi) { 2232 if (hi) {
2231 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 2233 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2232 if (!nskb) { 2234 if (!nskb) {
@@ -2262,11 +2264,12 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2262 return -EBUSY; 2264 return -EBUSY;
2263 } 2265 }
2264 2266
2265 ch->trans_skb->tail = ch->trans_skb->data; 2267 skb_reset_tail_pointer(ch->trans_skb);
2266 ch->trans_skb->len = 0; 2268 ch->trans_skb->len = 0;
2267 ch->ccw[1].count = skb->len; 2269 ch->ccw[1].count = skb->len;
2268 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, 2270 skb_copy_from_linear_data(skb, skb_put(ch->trans_skb,
2269 skb->len); 2271 skb->len),
2272 skb->len);
2270 atomic_dec(&skb->users); 2273 atomic_dec(&skb->users);
2271 dev_kfree_skb_irq(skb); 2274 dev_kfree_skb_irq(skb);
2272 ccw_idx = 0; 2275 ccw_idx = 0;
@@ -2745,14 +2748,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
2745 if (!get_device(&cgdev->dev)) 2748 if (!get_device(&cgdev->dev))
2746 return -ENODEV; 2749 return -ENODEV;
2747 2750
2748 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); 2751 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2749 if (!priv) { 2752 if (!priv) {
2750 ctc_pr_err("%s: Out of memory\n", __func__); 2753 ctc_pr_err("%s: Out of memory\n", __func__);
2751 put_device(&cgdev->dev); 2754 put_device(&cgdev->dev);
2752 return -ENOMEM; 2755 return -ENOMEM;
2753 } 2756 }
2754 2757
2755 memset(priv, 0, sizeof (struct ctc_priv));
2756 rc = ctc_add_files(&cgdev->dev); 2758 rc = ctc_add_files(&cgdev->dev);
2757 if (rc) { 2759 if (rc) {
2758 kfree(priv); 2760 kfree(priv);
@@ -2793,10 +2795,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
2793 DBF_TEXT(setup, 3, __FUNCTION__); 2795 DBF_TEXT(setup, 3, __FUNCTION__);
2794 2796
2795 if (alloc_device) { 2797 if (alloc_device) {
2796 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); 2798 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
2797 if (!dev) 2799 if (!dev)
2798 return NULL; 2800 return NULL;
2799 memset(dev, 0, sizeof (struct net_device));
2800 } 2801 }
2801 2802
2802 dev->priv = privptr; 2803 dev->priv = privptr;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index ecca1046714e..08a994fdd1a4 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1576,7 +1576,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1576 header->offset = card->tx_buffer->count; 1576 header->offset = card->tx_buffer->count;
1577 header->type = card->lan_type; 1577 header->type = card->lan_type;
1578 header->slot = card->portno; 1578 header->slot = card->portno;
1579 memcpy(header + 1, skb->data, skb->len); 1579 skb_copy_from_linear_data(skb, header + 1, skb->len);
1580 spin_unlock(&card->lock); 1580 spin_unlock(&card->lock);
1581 card->stats.tx_bytes += skb->len; 1581 card->stats.tx_bytes += skb->len;
1582 card->stats.tx_packets++; 1582 card->stats.tx_packets++;
@@ -1784,7 +1784,6 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1784 card->stats.rx_dropped++; 1784 card->stats.rx_dropped++;
1785 return; 1785 return;
1786 } 1786 }
1787 skb->dev = card->dev;
1788 memcpy(skb_put(skb, skb_len), skb_data, skb_len); 1787 memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1789 skb->protocol = card->lan_type_trans(skb, card->dev); 1788 skb->protocol = card->lan_type_trans(skb, card->dev);
1790 card->stats.rx_bytes += skb_len; 1789 card->stats.rx_bytes += skb_len;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 594320ca1b7c..e10e85e85c84 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
635 return; 635 return;
636 } 636 }
637 skb_put(pskb, header->next); 637 skb_put(pskb, header->next);
638 pskb->mac.raw = pskb->data; 638 skb_reset_mac_header(pskb);
639 skb = dev_alloc_skb(pskb->len); 639 skb = dev_alloc_skb(pskb->len);
640 if (!skb) { 640 if (!skb) {
641 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", 641 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
@@ -645,8 +645,9 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
645 privptr->stats.rx_dropped++; 645 privptr->stats.rx_dropped++;
646 return; 646 return;
647 } 647 }
648 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); 648 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
649 skb->mac.raw = skb->data; 649 pskb->len);
650 skb_reset_mac_header(skb);
650 skb->dev = pskb->dev; 651 skb->dev = pskb->dev;
651 skb->protocol = pskb->protocol; 652 skb->protocol = pskb->protocol;
652 pskb->ip_summed = CHECKSUM_UNNECESSARY; 653 pskb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -689,7 +690,8 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
689 msg->length, conn->max_buffsize); 690 msg->length, conn->max_buffsize);
690 return; 691 return;
691 } 692 }
692 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; 693 conn->rx_buff->data = conn->rx_buff->head;
694 skb_reset_tail_pointer(conn->rx_buff);
693 conn->rx_buff->len = 0; 695 conn->rx_buff->len = 0;
694 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, 696 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
695 msg->length, NULL); 697 msg->length, NULL);
@@ -735,14 +737,17 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
735 } 737 }
736 } 738 }
737 } 739 }
738 conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; 740 conn->tx_buff->data = conn->tx_buff->head;
741 skb_reset_tail_pointer(conn->tx_buff);
739 conn->tx_buff->len = 0; 742 conn->tx_buff->len = 0;
740 spin_lock_irqsave(&conn->collect_lock, saveflags); 743 spin_lock_irqsave(&conn->collect_lock, saveflags);
741 while ((skb = skb_dequeue(&conn->collect_queue))) { 744 while ((skb = skb_dequeue(&conn->collect_queue))) {
742 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; 745 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
743 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, 746 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
744 NETIUCV_HDRLEN); 747 NETIUCV_HDRLEN);
745 memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len); 748 skb_copy_from_linear_data(skb,
749 skb_put(conn->tx_buff, skb->len),
750 skb->len);
746 txbytes += skb->len; 751 txbytes += skb->len;
747 txpackets++; 752 txpackets++;
748 stat_maxcq++; 753 stat_maxcq++;
@@ -1164,8 +1169,8 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1164 * Copy the skb to a new allocated skb in lowmem only if the 1169 * Copy the skb to a new allocated skb in lowmem only if the
1165 * data is located above 2G in memory or tailroom is < 2. 1170 * data is located above 2G in memory or tailroom is < 2.
1166 */ 1171 */
1167 unsigned long hi = 1172 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1168 ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; 1173 NETIUCV_HDRLEN)) >> 31;
1169 int copied = 0; 1174 int copied = 0;
1170 if (hi || (skb_tailroom(skb) < 2)) { 1175 if (hi || (skb_tailroom(skb) < 2)) {
1171 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + 1176 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 7c735e1fe063..dd7034fbfff8 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -267,7 +267,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
267 267
268 QETH_DBF_TEXT(trace, 5, "eddpcdtc"); 268 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
269 if (skb_shinfo(eddp->skb)->nr_frags == 0) { 269 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
270 memcpy(dst, eddp->skb->data + eddp->skb_offset, len); 270 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
271 dst, len);
271 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, 272 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
272 *hcsum); 273 *hcsum);
273 eddp->skb_offset += len; 274 eddp->skb_offset += len;
@@ -416,7 +417,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
416 eddp->skb_offset += VLAN_HLEN; 417 eddp->skb_offset += VLAN_HLEN;
417#endif /* CONFIG_QETH_VLAN */ 418#endif /* CONFIG_QETH_VLAN */
418 } 419 }
419 tcph = eddp->skb->h.th; 420 tcph = tcp_hdr(eddp->skb);
420 while (eddp->skb_offset < eddp->skb->len) { 421 while (eddp->skb_offset < eddp->skb->len) {
421 data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 422 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
422 (int)(eddp->skb->len - eddp->skb_offset)); 423 (int)(eddp->skb->len - eddp->skb_offset));
@@ -473,20 +474,24 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
473 QETH_DBF_TEXT(trace, 5, "eddpficx"); 474 QETH_DBF_TEXT(trace, 5, "eddpficx");
474 /* create our segmentation headers and copy original headers */ 475 /* create our segmentation headers and copy original headers */
475 if (skb->protocol == htons(ETH_P_IP)) 476 if (skb->protocol == htons(ETH_P_IP))
476 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, 477 eddp = qeth_eddp_create_eddp_data(qhdr,
477 skb->nh.iph->ihl*4, 478 skb_network_header(skb),
478 (u8 *)skb->h.th, skb->h.th->doff*4); 479 ip_hdrlen(skb),
480 skb_transport_header(skb),
481 tcp_hdrlen(skb));
479 else 482 else
480 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, 483 eddp = qeth_eddp_create_eddp_data(qhdr,
481 sizeof(struct ipv6hdr), 484 skb_network_header(skb),
482 (u8 *)skb->h.th, skb->h.th->doff*4); 485 sizeof(struct ipv6hdr),
486 skb_transport_header(skb),
487 tcp_hdrlen(skb));
483 488
484 if (eddp == NULL) { 489 if (eddp == NULL) {
485 QETH_DBF_TEXT(trace, 2, "eddpfcnm"); 490 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
486 return -ENOMEM; 491 return -ENOMEM;
487 } 492 }
488 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 493 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
489 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); 494 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
490 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); 495 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
491#ifdef CONFIG_QETH_VLAN 496#ifdef CONFIG_QETH_VLAN
492 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 497 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
@@ -590,12 +595,13 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
590 QETH_DBF_TEXT(trace, 5, "creddpct"); 595 QETH_DBF_TEXT(trace, 5, "creddpct");
591 if (skb->protocol == htons(ETH_P_IP)) 596 if (skb->protocol == htons(ETH_P_IP))
592 ctx = qeth_eddp_create_context_generic(card, skb, 597 ctx = qeth_eddp_create_context_generic(card, skb,
593 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + 598 (sizeof(struct qeth_hdr) +
594 skb->h.th->doff*4); 599 ip_hdrlen(skb) +
600 tcp_hdrlen(skb)));
595 else if (skb->protocol == htons(ETH_P_IPV6)) 601 else if (skb->protocol == htons(ETH_P_IPV6))
596 ctx = qeth_eddp_create_context_generic(card, skb, 602 ctx = qeth_eddp_create_context_generic(card, skb,
597 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + 603 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
598 skb->h.th->doff*4); 604 tcp_hdrlen(skb));
599 else 605 else
600 QETH_DBF_TEXT(trace, 2, "cetcpinv"); 606 QETH_DBF_TEXT(trace, 2, "cetcpinv");
601 607
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d8a86f5af379..ad7792dc1a04 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2278,7 +2278,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2278 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) 2278 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
2279 return tr_type_trans(skb,dev); 2279 return tr_type_trans(skb,dev);
2280#endif /* CONFIG_TR */ 2280#endif /* CONFIG_TR */
2281 skb->mac.raw = skb->data; 2281 skb_reset_mac_header(skb);
2282 skb_pull(skb, ETH_HLEN ); 2282 skb_pull(skb, ETH_HLEN );
2283 eth = eth_hdr(skb); 2283 eth = eth_hdr(skb);
2284 2284
@@ -2306,9 +2306,9 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2306 struct iphdr *ip_hdr; 2306 struct iphdr *ip_hdr;
2307 2307
2308 QETH_DBF_TEXT(trace,5,"skbfktr"); 2308 QETH_DBF_TEXT(trace,5,"skbfktr");
2309 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR; 2309 skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR);
2310 /* this is a fake ethernet header */ 2310 /* this is a fake ethernet header */
2311 fake_hdr = (struct trh_hdr *) skb->mac.raw; 2311 fake_hdr = tr_hdr(skb);
2312 2312
2313 /* the destination MAC address */ 2313 /* the destination MAC address */
2314 switch (skb->pkt_type){ 2314 switch (skb->pkt_type){
@@ -2359,9 +2359,9 @@ qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2359 struct iphdr *ip_hdr; 2359 struct iphdr *ip_hdr;
2360 2360
2361 QETH_DBF_TEXT(trace,5,"skbfketh"); 2361 QETH_DBF_TEXT(trace,5,"skbfketh");
2362 skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH; 2362 skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH);
2363 /* this is a fake ethernet header */ 2363 /* this is a fake ethernet header */
2364 fake_hdr = (struct ethhdr *) skb->mac.raw; 2364 fake_hdr = eth_hdr(skb);
2365 2365
2366 /* the destination MAC address */ 2366 /* the destination MAC address */
2367 switch (skb->pkt_type){ 2367 switch (skb->pkt_type){
@@ -2461,7 +2461,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2461 if (card->options.fake_ll) 2461 if (card->options.fake_ll)
2462 qeth_rebuild_skb_fake_ll(card, skb, hdr); 2462 qeth_rebuild_skb_fake_ll(card, skb, hdr);
2463 else 2463 else
2464 skb->mac.raw = skb->data; 2464 skb_reset_mac_header(skb);
2465 skb->ip_summed = card->options.checksum_type; 2465 skb->ip_summed = card->options.checksum_type;
2466 if (card->options.checksum_type == HW_CHECKSUMMING){ 2466 if (card->options.checksum_type == HW_CHECKSUMMING){
2467 if ( (hdr->hdr.l3.ext_flags & 2467 if ( (hdr->hdr.l3.ext_flags &
@@ -2501,7 +2501,8 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2501 vlan_tag = qeth_rebuild_skb(card, skb, hdr); 2501 vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2502 else { /*in case of OSN*/ 2502 else { /*in case of OSN*/
2503 skb_push(skb, sizeof(struct qeth_hdr)); 2503 skb_push(skb, sizeof(struct qeth_hdr));
2504 memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); 2504 skb_copy_to_linear_data(skb, hdr,
2505 sizeof(struct qeth_hdr));
2505 } 2506 }
2506 /* is device UP ? */ 2507 /* is device UP ? */
2507 if (!(card->dev->flags & IFF_UP)){ 2508 if (!(card->dev->flags & IFF_UP)){
@@ -3778,9 +3779,11 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3778 } 3779 }
3779 /* try something else */ 3780 /* try something else */
3780 if (skb->protocol == ETH_P_IPV6) 3781 if (skb->protocol == ETH_P_IPV6)
3781 return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; 3782 return (skb_network_header(skb)[24] == 0xff) ?
3783 RTN_MULTICAST : 0;
3782 else if (skb->protocol == ETH_P_IP) 3784 else if (skb->protocol == ETH_P_IP)
3783 return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; 3785 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
3786 RTN_MULTICAST : 0;
3784 /* ... */ 3787 /* ... */
3785 if (!memcmp(skb->data, skb->dev->broadcast, 6)) 3788 if (!memcmp(skb->data, skb->dev->broadcast, 6))
3786 return RTN_BROADCAST; 3789 return RTN_BROADCAST;
@@ -3818,18 +3821,20 @@ qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3818 return card->info.is_multicast_different & 3821 return card->info.is_multicast_different &
3819 (card->qdio.no_out_queues - 1); 3822 (card->qdio.no_out_queues - 1);
3820 if (card->qdio.do_prio_queueing && (ipv == 4)) { 3823 if (card->qdio.do_prio_queueing && (ipv == 4)) {
3824 const u8 tos = ip_hdr(skb)->tos;
3825
3821 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ 3826 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){
3822 if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT) 3827 if (tos & IP_TOS_NOTIMPORTANT)
3823 return 3; 3828 return 3;
3824 if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY) 3829 if (tos & IP_TOS_HIGHRELIABILITY)
3825 return 2; 3830 return 2;
3826 if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT) 3831 if (tos & IP_TOS_HIGHTHROUGHPUT)
3827 return 1; 3832 return 1;
3828 if (skb->nh.iph->tos & IP_TOS_LOWDELAY) 3833 if (tos & IP_TOS_LOWDELAY)
3829 return 0; 3834 return 0;
3830 } 3835 }
3831 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) 3836 if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC)
3832 return 3 - (skb->nh.iph->tos >> 6); 3837 return 3 - (tos >> 6);
3833 } else if (card->qdio.do_prio_queueing && (ipv == 6)) { 3838 } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
3834 /* TODO: IPv6!!! */ 3839 /* TODO: IPv6!!! */
3835 } 3840 }
@@ -3866,9 +3871,9 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3866 * memcpys instead of one memmove to save cycles. 3871 * memcpys instead of one memmove to save cycles.
3867 */ 3872 */
3868 skb_push(skb, VLAN_HLEN); 3873 skb_push(skb, VLAN_HLEN);
3869 memcpy(skb->data, skb->data + 4, 4); 3874 skb_copy_to_linear_data(skb, skb->data + 4, 4);
3870 memcpy(skb->data + 4, skb->data + 8, 4); 3875 skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4);
3871 memcpy(skb->data + 8, skb->data + 12, 4); 3876 skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4);
3872 tag = (u16 *)(skb->data + 12); 3877 tag = (u16 *)(skb->data + 12);
3873 /* 3878 /*
3874 * first two bytes = ETH_P_8021Q (0x8100) 3879 * first two bytes = ETH_P_8021Q (0x8100)
@@ -4039,7 +4044,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4039 *((u32 *) skb->dst->neighbour->primary_key); 4044 *((u32 *) skb->dst->neighbour->primary_key);
4040 } else { 4045 } else {
4041 /* fill in destination address used in ip header */ 4046 /* fill in destination address used in ip header */
4042 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr; 4047 *((u32 *)(&hdr->hdr.l3.dest_addr[12])) =
4048 ip_hdr(skb)->daddr;
4043 } 4049 }
4044 } else if (ipv == 6) { /* IPv6 or passthru */ 4050 } else if (ipv == 6) { /* IPv6 or passthru */
4045 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); 4051 hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type);
@@ -4048,7 +4054,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4048 skb->dst->neighbour->primary_key, 16); 4054 skb->dst->neighbour->primary_key, 16);
4049 } else { 4055 } else {
4050 /* fill in destination address used in ip header */ 4056 /* fill in destination address used in ip header */
4051 memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16); 4057 memcpy(hdr->hdr.l3.dest_addr,
4058 &ipv6_hdr(skb)->daddr, 16);
4052 } 4059 }
4053 } else { /* passthrough */ 4060 } else { /* passthrough */
4054 if((skb->dev->type == ARPHRD_IEEE802_TR) && 4061 if((skb->dev->type == ARPHRD_IEEE802_TR) &&
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index 81f805cc5ee7..89d56c8ecdd2 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -37,7 +37,6 @@ qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
37 struct device *dev = NULL; 37 struct device *dev = NULL;
38 loff_t nr = 0; 38 loff_t nr = 0;
39 39
40 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
41 if (*offset == 0) 40 if (*offset == 0)
42 return SEQ_START_TOKEN; 41 return SEQ_START_TOKEN;
43 while (1) { 42 while (1) {
@@ -53,7 +52,6 @@ qeth_procfile_seq_start(struct seq_file *s, loff_t *offset)
53static void 52static void
54qeth_procfile_seq_stop(struct seq_file *s, void* it) 53qeth_procfile_seq_stop(struct seq_file *s, void* it)
55{ 54{
56 up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem);
57} 55}
58 56
59static void * 57static void *
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
index 14504afb044e..c20e923cf9ad 100644
--- a/drivers/s390/net/qeth_tso.h
+++ b/drivers/s390/net/qeth_tso.h
@@ -40,8 +40,8 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
40 QETH_DBF_TEXT(trace, 5, "tsofhdr"); 40 QETH_DBF_TEXT(trace, 5, "tsofhdr");
41 41
42 hdr = (struct qeth_hdr_tso *) skb->data; 42 hdr = (struct qeth_hdr_tso *) skb->data;
43 iph = skb->nh.iph; 43 iph = ip_hdr(skb);
44 tcph = skb->h.th; 44 tcph = tcp_hdr(skb);
45 /*fix header to TSO values ...*/ 45 /*fix header to TSO values ...*/
46 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 46 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
47 /*set values which are fix for the first approach ...*/ 47 /*set values which are fix for the first approach ...*/
@@ -63,13 +63,9 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
63static inline void 63static inline void
64qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) 64qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
65{ 65{
66 struct iphdr *iph; 66 struct iphdr *iph = ip_hdr(skb);
67 struct ipv6hdr *ip6h; 67 struct ipv6hdr *ip6h = ipv6_hdr(skb);
68 struct tcphdr *tcph; 68 struct tcphdr *tcph = tcp_hdr(skb);
69
70 iph = skb->nh.iph;
71 ip6h = skb->nh.ipv6h;
72 tcph = skb->h.th;
73 69
74 tcph->check = 0; 70 tcph->check = 0;
75 if (skb->protocol == ETH_P_IPV6) { 71 if (skb->protocol == ETH_P_IPV6) {
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 806bb1a921eb..644a06eba828 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -21,6 +21,7 @@
21#include "cio/cio.h" 21#include "cio/cio.h"
22#include "cio/chsc.h" 22#include "cio/chsc.h"
23#include "cio/css.h" 23#include "cio/css.h"
24#include "cio/chp.h"
24#include "s390mach.h" 25#include "s390mach.h"
25 26
26static struct semaphore m_sem; 27static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
44s390_collect_crw_info(void *param) 45s390_collect_crw_info(void *param)
45{ 46{
46 struct crw crw[2]; 47 struct crw crw[2];
47 int ccode, ret, slow; 48 int ccode;
48 struct semaphore *sem; 49 struct semaphore *sem;
49 unsigned int chain; 50 unsigned int chain;
50 51
51 sem = (struct semaphore *)param; 52 sem = (struct semaphore *)param;
52repeat: 53repeat:
53 down_interruptible(sem); 54 down_interruptible(sem);
54 slow = 0;
55 chain = 0; 55 chain = 0;
56 while (1) { 56 while (1) {
57 if (unlikely(chain > 1)) { 57 if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
87 css_reiterate_subchannels(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 slow = 1;
90 continue; 89 continue;
91 } 90 }
92 switch (crw[chain].rsc) { 91 switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
94 if (crw[0].chn && !chain) 93 if (crw[0].chn && !chain)
95 break; 94 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid); 95 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 ret = css_process_crw (crw[0].rsid, 96 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 chain ? crw[1].rsid : 0);
99 if (ret == -EAGAIN)
100 slow = 1;
101 break; 97 break;
102 case CRW_RSC_MONITOR: 98 case CRW_RSC_MONITOR:
103 pr_debug("source is monitoring facility\n"); 99 pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
116 } 112 }
117 switch (crw[0].erc) { 113 switch (crw[0].erc) {
118 case CRW_ERC_IPARM: /* Path has come. */ 114 case CRW_ERC_IPARM: /* Path has come. */
119 ret = chp_process_crw(crw[0].rsid, 1); 115 chp_process_crw(crw[0].rsid, 1);
120 break; 116 break;
121 case CRW_ERC_PERRI: /* Path has gone. */ 117 case CRW_ERC_PERRI: /* Path has gone. */
122 case CRW_ERC_PERRN: 118 case CRW_ERC_PERRN:
123 ret = chp_process_crw(crw[0].rsid, 0); 119 chp_process_crw(crw[0].rsid, 0);
124 break; 120 break;
125 default: 121 default:
126 pr_debug("Don't know how to handle erc=%x\n", 122 pr_debug("Don't know how to handle erc=%x\n",
127 crw[0].erc); 123 crw[0].erc);
128 ret = 0;
129 } 124 }
130 if (ret == -EAGAIN)
131 slow = 1;
132 break; 125 break;
133 case CRW_RSC_CONFIG: 126 case CRW_RSC_CONFIG:
134 pr_debug("source is configuration-alert facility\n"); 127 pr_debug("source is configuration-alert facility\n");
135 break; 128 break;
136 case CRW_RSC_CSS: 129 case CRW_RSC_CSS:
137 pr_debug("source is channel subsystem\n"); 130 pr_debug("source is channel subsystem\n");
138 ret = chsc_process_crw(); 131 chsc_process_crw();
139 if (ret == -EAGAIN)
140 slow = 1;
141 break; 132 break;
142 default: 133 default:
143 pr_debug("unknown source\n"); 134 pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
146 /* chain is always 0 or 1 here. */ 137 /* chain is always 0 or 1 here. */
147 chain = crw[chain].chn ? chain + 1 : 0; 138 chain = crw[chain].chn ? chain + 1 : 0;
148 } 139 }
149 if (slow)
150 queue_work(slow_path_wq, &slow_path_work);
151 goto repeat; 140 goto repeat;
152 return 0; 141 return 0;
153} 142}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 090743d2f914..19343f9675c3 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
357 357
358__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
359 359
360int get_cpu_capability(unsigned int *capability)
361{
362 struct sysinfo_1_2_2 *info;
363 int rc;
364
365 info = (void *) get_zeroed_page(GFP_KERNEL);
366 if (!info)
367 return -ENOMEM;
368 rc = stsi(info, 1, 2, 2);
369 if (rc == -ENOSYS)
370 goto out;
371 rc = 0;
372 *capability = info->capability;
373out:
374 free_page((unsigned long) info);
375 return rc;
376}
377
360/* 378/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 379 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */ 380 */
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 2cea4f5d2084..f2be2ead8742 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -726,7 +726,7 @@ static struct miscdevice envctrl_dev = {
726 * Return: None. 726 * Return: None.
727 */ 727 */
728static void envctrl_set_mon(struct i2c_child_t *pchild, 728static void envctrl_set_mon(struct i2c_child_t *pchild,
729 char *chnl_desc, 729 const char *chnl_desc,
730 int chnl_no) 730 int chnl_no)
731{ 731{
732 /* Firmware only has temperature type. It does not distinguish 732 /* Firmware only has temperature type. It does not distinguish
@@ -763,8 +763,8 @@ static void envctrl_set_mon(struct i2c_child_t *pchild,
763static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) 763static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp)
764{ 764{
765 int i = 0, len; 765 int i = 0, len;
766 char *pos; 766 const char *pos;
767 unsigned int *pval; 767 const unsigned int *pval;
768 768
769 /* Firmware describe channels into a stream separated by a '\0'. */ 769 /* Firmware describe channels into a stream separated by a '\0'. */
770 pos = of_get_property(dp, "channels-description", &len); 770 pos = of_get_property(dp, "channels-description", &len);
@@ -859,7 +859,7 @@ static void envctrl_init_i2c_child(struct linux_ebus_child *edev_child,
859{ 859{
860 int len, i, tbls_size = 0; 860 int len, i, tbls_size = 0;
861 struct device_node *dp = edev_child->prom_node; 861 struct device_node *dp = edev_child->prom_node;
862 void *pval; 862 const void *pval;
863 863
864 /* Get device address. */ 864 /* Get device address. */
865 pval = of_get_property(dp, "reg", &len); 865 pval = of_get_property(dp, "reg", &len);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 6e99507aeb12..262f01e68592 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -190,7 +190,7 @@ static int __init flash_init(void)
190 } 190 }
191 if (!sdev) { 191 if (!sdev) {
192#ifdef CONFIG_PCI 192#ifdef CONFIG_PCI
193 struct linux_prom_registers *ebus_regs; 193 const struct linux_prom_registers *ebus_regs;
194 194
195 for_each_ebus(ebus) { 195 for_each_ebus(ebus) {
196 for_each_ebusdev(edev, ebus) { 196 for_each_ebusdev(edev, ebus) {
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index eec28c142a59..fbfeb89a6f32 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -44,7 +44,6 @@
44#include <asm/openpromio.h> 44#include <asm/openpromio.h>
45#ifdef CONFIG_PCI 45#ifdef CONFIG_PCI
46#include <linux/pci.h> 46#include <linux/pci.h>
47#include <asm/pbm.h>
48#endif 47#endif
49 48
50MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)"); 49MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
@@ -141,7 +140,7 @@ static int copyout(void __user *info, struct openpromio *opp, int len)
141 140
142static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize) 141static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
143{ 142{
144 void *pval; 143 const void *pval;
145 int len; 144 int len;
146 145
147 if (!dp || 146 if (!dp ||
@@ -248,18 +247,18 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp
248 if (bufsize >= 2*sizeof(int)) { 247 if (bufsize >= 2*sizeof(int)) {
249#ifdef CONFIG_PCI 248#ifdef CONFIG_PCI
250 struct pci_dev *pdev; 249 struct pci_dev *pdev;
251 struct pcidev_cookie *pcp; 250 struct device_node *dp;
252 pdev = pci_find_slot (((int *) op->oprom_array)[0], 251
252 pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0],
253 ((int *) op->oprom_array)[1]); 253 ((int *) op->oprom_array)[1]);
254 254
255 pcp = pdev->sysdata; 255 dp = pci_device_to_OF_node(pdev);
256 if (pcp != NULL) { 256 data->current_node = dp;
257 dp = pcp->prom_node; 257 *((int *)op->oprom_array) = dp->node;
258 data->current_node = dp; 258 op->oprom_size = sizeof(int);
259 *((int *)op->oprom_array) = dp->node; 259 err = copyout(argp, op, bufsize + sizeof(int));
260 op->oprom_size = sizeof(int); 260
261 err = copyout(argp, op, bufsize + sizeof(int)); 261 pci_dev_put(pdev);
262 }
263#endif 262#endif
264 } 263 }
265 264
@@ -409,7 +408,7 @@ static int opiocget(void __user *argp, DATA *data)
409 struct opiocdesc op; 408 struct opiocdesc op;
410 struct device_node *dp; 409 struct device_node *dp;
411 char *str; 410 char *str;
412 void *pval; 411 const void *pval;
413 int err, len; 412 int err, len;
414 413
415 if (copy_from_user(&op, argp, sizeof(op))) 414 if (copy_from_user(&op, argp, sizeof(op)))
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 8bfb67ccdcd4..c3135e2fbd5a 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -259,11 +259,10 @@ static int vfc_debug(struct vfc_dev *dev, int cmd, void __user *argp)
259 if (copy_from_user(&inout, argp, sizeof(inout))) 259 if (copy_from_user(&inout, argp, sizeof(inout)))
260 return -EFAULT; 260 return -EFAULT;
261 261
262 buffer = kmalloc(inout.len, GFP_KERNEL); 262 buffer = kzalloc(inout.len, GFP_KERNEL);
263 if (buffer == NULL) 263 if (buffer == NULL)
264 return -ENOMEM; 264 return -ENOMEM;
265 265
266 memset(buffer,0,inout.len);
267 vfc_lock_device(dev); 266 vfc_lock_device(dev);
268 inout.ret= 267 inout.ret=
269 vfc_i2c_recvbuf(dev,inout.addr & 0xff 268 vfc_i2c_recvbuf(dev,inout.addr & 0xff
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 6349dd617f85..eee590a51d8a 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -35,7 +35,7 @@ struct sbus_bus *sbus_root;
35static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev) 35static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev)
36{ 36{
37 unsigned long base; 37 unsigned long base;
38 void *pval; 38 const void *pval;
39 int len, err; 39 int len, err;
40 40
41 sdev->prom_node = dp->node; 41 sdev->prom_node = dp->node;
@@ -86,7 +86,7 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde
86 86
87static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus) 87static void __init sbus_bus_ranges_init(struct device_node *dp, struct sbus_bus *sbus)
88{ 88{
89 void *pval; 89 const void *pval;
90 int len; 90 int len;
91 91
92 pval = of_get_property(dp, "ranges", &len); 92 pval = of_get_property(dp, "ranges", &len);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index bf5d63e1beee..656bdb1352d8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1864,10 +1864,17 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1864/* This function will handle the request sense scsi command */ 1864/* This function will handle the request sense scsi command */
1865static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id) 1865static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id)
1866{ 1866{
1867 char request_buffer[18];
1868
1867 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n"); 1869 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n");
1868 1870
1869 /* For now we just zero the request buffer */ 1871 memset(request_buffer, 0, sizeof(request_buffer));
1870 memset(tw_dev->srb[request_id]->request_buffer, 0, tw_dev->srb[request_id]->request_bufflen); 1872 request_buffer[0] = 0x70; /* Immediate fixed format */
1873 request_buffer[7] = 10; /* minimum size per SPC: 18 bytes */
1874 /* leave all other fields zero, giving effectively NO_SENSE return */
1875 tw_transfer_internal(tw_dev, request_id, request_buffer,
1876 sizeof(request_buffer));
1877
1871 tw_dev->state[request_id] = TW_S_COMPLETED; 1878 tw_dev->state[request_id] = TW_S_COMPLETED;
1872 tw_state_request_finish(tw_dev, request_id); 1879 tw_state_request_finish(tw_dev, request_id);
1873 1880
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4cd280e86966..fcc4cb6c7f46 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1763,9 +1763,15 @@ config SUN3X_ESP
1763 The ESP was an on-board SCSI controller used on Sun 3/80 1763 The ESP was an on-board SCSI controller used on Sun 3/80
1764 machines. Say Y here to compile in support for it. 1764 machines. Say Y here to compile in support for it.
1765 1765
1766config SCSI_ESP_CORE
1767 tristate "ESP Scsi Driver Core"
1768 depends on SCSI
1769 select SCSI_SPI_ATTRS
1770
1766config SCSI_SUNESP 1771config SCSI_SUNESP
1767 tristate "Sparc ESP Scsi Driver" 1772 tristate "Sparc ESP Scsi Driver"
1768 depends on SBUS && SCSI 1773 depends on SBUS && SCSI
1774 select SCSI_ESP_CORE
1769 help 1775 help
1770 This is the driver for the Sun ESP SCSI host adapter. The ESP 1776 This is the driver for the Sun ESP SCSI host adapter. The ESP
1771 chipset is present in most SPARC SBUS-based computers. 1777 chipset is present in most SPARC SBUS-based computers.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 79ecf4ebe6eb..70cff4c599d7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
106obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 106obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
107obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 107obj-$(CONFIG_MEGARAID_SAS) += megaraid/
108obj-$(CONFIG_SCSI_ACARD) += atp870u.o 108obj-$(CONFIG_SCSI_ACARD) += atp870u.o
109obj-$(CONFIG_SCSI_SUNESP) += esp.o 109obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o
110obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o
110obj-$(CONFIG_SCSI_GDTH) += gdth.o 111obj-$(CONFIG_SCSI_GDTH) += gdth.o
111obj-$(CONFIG_SCSI_INITIO) += initio.o 112obj-$(CONFIG_SCSI_INITIO) += initio.o
112obj-$(CONFIG_SCSI_INIA100) += a100u2w.o 113obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
deleted file mode 100644
index 2c2fe80bc42a..000000000000
--- a/drivers/scsi/esp.c
+++ /dev/null
@@ -1,4394 +0,0 @@
1/* esp.c: ESP Sun SCSI driver.
2 *
3 * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net)
4 */
5
6/* TODO:
7 *
8 * 1) Maybe disable parity checking in config register one for SCSI1
9 * targets. (Gilmore says parity error on the SBus can lock up
10 * old sun4c's)
11 * 2) Add support for DMA2 pipelining.
12 * 3) Add tagged queueing.
13 */
14
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/slab.h>
20#include <linux/blkdev.h>
21#include <linux/proc_fs.h>
22#include <linux/stat.h>
23#include <linux/init.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/module.h>
27
28#include "esp.h"
29
30#include <asm/sbus.h>
31#include <asm/dma.h>
32#include <asm/system.h>
33#include <asm/ptrace.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38#ifndef __sparc_v9__
39#include <asm/machines.h>
40#include <asm/idprom.h>
41#endif
42
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_eh.h>
47#include <scsi/scsi_host.h>
48#include <scsi/scsi_tcq.h>
49
50#define DRV_VERSION "1.101"
51
52#define DEBUG_ESP
53/* #define DEBUG_ESP_HME */
54/* #define DEBUG_ESP_DATA */
55/* #define DEBUG_ESP_QUEUE */
56/* #define DEBUG_ESP_DISCONNECT */
57/* #define DEBUG_ESP_STATUS */
58/* #define DEBUG_ESP_PHASES */
59/* #define DEBUG_ESP_WORKBUS */
60/* #define DEBUG_STATE_MACHINE */
61/* #define DEBUG_ESP_CMDS */
62/* #define DEBUG_ESP_IRQS */
63/* #define DEBUG_SDTR */
64/* #define DEBUG_ESP_SG */
65
66/* Use the following to sprinkle debugging messages in a way which
67 * suits you if combinations of the above become too verbose when
68 * trying to track down a specific problem.
69 */
70/* #define DEBUG_ESP_MISC */
71
72#if defined(DEBUG_ESP)
73#define ESPLOG(foo) printk foo
74#else
75#define ESPLOG(foo)
76#endif /* (DEBUG_ESP) */
77
78#if defined(DEBUG_ESP_HME)
79#define ESPHME(foo) printk foo
80#else
81#define ESPHME(foo)
82#endif
83
84#if defined(DEBUG_ESP_DATA)
85#define ESPDATA(foo) printk foo
86#else
87#define ESPDATA(foo)
88#endif
89
90#if defined(DEBUG_ESP_QUEUE)
91#define ESPQUEUE(foo) printk foo
92#else
93#define ESPQUEUE(foo)
94#endif
95
96#if defined(DEBUG_ESP_DISCONNECT)
97#define ESPDISC(foo) printk foo
98#else
99#define ESPDISC(foo)
100#endif
101
102#if defined(DEBUG_ESP_STATUS)
103#define ESPSTAT(foo) printk foo
104#else
105#define ESPSTAT(foo)
106#endif
107
108#if defined(DEBUG_ESP_PHASES)
109#define ESPPHASE(foo) printk foo
110#else
111#define ESPPHASE(foo)
112#endif
113
114#if defined(DEBUG_ESP_WORKBUS)
115#define ESPBUS(foo) printk foo
116#else
117#define ESPBUS(foo)
118#endif
119
120#if defined(DEBUG_ESP_IRQS)
121#define ESPIRQ(foo) printk foo
122#else
123#define ESPIRQ(foo)
124#endif
125
126#if defined(DEBUG_SDTR)
127#define ESPSDTR(foo) printk foo
128#else
129#define ESPSDTR(foo)
130#endif
131
132#if defined(DEBUG_ESP_MISC)
133#define ESPMISC(foo) printk foo
134#else
135#define ESPMISC(foo)
136#endif
137
138/* Command phase enumeration. */
139enum {
140 not_issued = 0x00, /* Still in the issue_SC queue. */
141
142 /* Various forms of selecting a target. */
143#define in_slct_mask 0x10
144 in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */
145 in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */
146 in_slct_msg = 0x12, /* select, then send a message */
147 in_slct_tag = 0x13, /* select and send tagged queue msg */
148 in_slct_sneg = 0x14, /* select and acquire sync capabilities */
149
150 /* Any post selection activity. */
151#define in_phases_mask 0x20
152 in_datain = 0x20, /* Data is transferring from the bus */
153 in_dataout = 0x21, /* Data is transferring to the bus */
154 in_data_done = 0x22, /* Last DMA data operation done (maybe) */
155 in_msgin = 0x23, /* Eating message from target */
156 in_msgincont = 0x24, /* Eating more msg bytes from target */
157 in_msgindone = 0x25, /* Decide what to do with what we got */
158 in_msgout = 0x26, /* Sending message to target */
159 in_msgoutdone = 0x27, /* Done sending msg out */
160 in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */
161 in_cmdend = 0x29, /* Done sending slow cmd */
162 in_status = 0x2a, /* Was in status phase, finishing cmd */
163 in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */
164 in_the_dark = 0x2c, /* Don't know what bus phase we are in */
165
166 /* Special states, ie. not normal bus transitions... */
167#define in_spec_mask 0x80
168 in_abortone = 0x80, /* Aborting one command currently */
169 in_abortall = 0x81, /* Blowing away all commands we have */
170 in_resetdev = 0x82, /* SCSI target reset in progress */
171 in_resetbus = 0x83, /* SCSI bus reset in progress */
172 in_tgterror = 0x84, /* Target did something stupid */
173};
174
175enum {
176 /* Zero has special meaning, see skipahead[12]. */
177/*0*/ do_never,
178
179/*1*/ do_phase_determine,
180/*2*/ do_reset_bus,
181/*3*/ do_reset_complete,
182/*4*/ do_work_bus,
183/*5*/ do_intr_end
184};
185
186/* Forward declarations. */
187static irqreturn_t esp_intr(int irq, void *dev_id);
188
189/* Debugging routines */
190struct esp_cmdstrings {
191 u8 cmdchar;
192 char *text;
193} esp_cmd_strings[] = {
194 /* Miscellaneous */
195 { ESP_CMD_NULL, "ESP_NOP", },
196 { ESP_CMD_FLUSH, "FIFO_FLUSH", },
197 { ESP_CMD_RC, "RSTESP", },
198 { ESP_CMD_RS, "RSTSCSI", },
199 /* Disconnected State Group */
200 { ESP_CMD_RSEL, "RESLCTSEQ", },
201 { ESP_CMD_SEL, "SLCTNATN", },
202 { ESP_CMD_SELA, "SLCTATN", },
203 { ESP_CMD_SELAS, "SLCTATNSTOP", },
204 { ESP_CMD_ESEL, "ENSLCTRESEL", },
205 { ESP_CMD_DSEL, "DISSELRESEL", },
206 { ESP_CMD_SA3, "SLCTATN3", },
207 { ESP_CMD_RSEL3, "RESLCTSEQ", },
208 /* Target State Group */
209 { ESP_CMD_SMSG, "SNDMSG", },
210 { ESP_CMD_SSTAT, "SNDSTATUS", },
211 { ESP_CMD_SDATA, "SNDDATA", },
212 { ESP_CMD_DSEQ, "DISCSEQ", },
213 { ESP_CMD_TSEQ, "TERMSEQ", },
214 { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
215 { ESP_CMD_DCNCT, "DISC", },
216 { ESP_CMD_RMSG, "RCVMSG", },
217 { ESP_CMD_RCMD, "RCVCMD", },
218 { ESP_CMD_RDATA, "RCVDATA", },
219 { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
220 /* Initiator State Group */
221 { ESP_CMD_TI, "TRANSINFO", },
222 { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
223 { ESP_CMD_MOK, "MSGACCEPTED", },
224 { ESP_CMD_TPAD, "TPAD", },
225 { ESP_CMD_SATN, "SATN", },
226 { ESP_CMD_RATN, "RATN", },
227};
228#define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
229
230/* Print textual representation of an ESP command */
231static inline void esp_print_cmd(u8 espcmd)
232{
233 u8 dma_bit = espcmd & ESP_CMD_DMA;
234 int i;
235
236 espcmd &= ~dma_bit;
237 for (i = 0; i < NUM_ESP_COMMANDS; i++)
238 if (esp_cmd_strings[i].cmdchar == espcmd)
239 break;
240 if (i == NUM_ESP_COMMANDS)
241 printk("ESP_Unknown");
242 else
243 printk("%s%s", esp_cmd_strings[i].text,
244 ((dma_bit) ? "+DMA" : ""));
245}
246
247/* Print the status register's value */
248static inline void esp_print_statreg(u8 statreg)
249{
250 u8 phase;
251
252 printk("STATUS<");
253 phase = statreg & ESP_STAT_PMASK;
254 printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
255 (phase == ESP_DIP ? "DATA-IN" :
256 (phase == ESP_CMDP ? "COMMAND" :
257 (phase == ESP_STATP ? "STATUS" :
258 (phase == ESP_MOP ? "MSG-OUT" :
259 (phase == ESP_MIP ? "MSG_IN" :
260 "unknown")))))));
261 if (statreg & ESP_STAT_TDONE)
262 printk("TRANS_DONE,");
263 if (statreg & ESP_STAT_TCNT)
264 printk("TCOUNT_ZERO,");
265 if (statreg & ESP_STAT_PERR)
266 printk("P_ERROR,");
267 if (statreg & ESP_STAT_SPAM)
268 printk("SPAM,");
269 if (statreg & ESP_STAT_INTR)
270 printk("IRQ,");
271 printk(">");
272}
273
274/* Print the interrupt register's value */
275static inline void esp_print_ireg(u8 intreg)
276{
277 printk("INTREG< ");
278 if (intreg & ESP_INTR_S)
279 printk("SLCT_NATN ");
280 if (intreg & ESP_INTR_SATN)
281 printk("SLCT_ATN ");
282 if (intreg & ESP_INTR_RSEL)
283 printk("RSLCT ");
284 if (intreg & ESP_INTR_FDONE)
285 printk("FDONE ");
286 if (intreg & ESP_INTR_BSERV)
287 printk("BSERV ");
288 if (intreg & ESP_INTR_DC)
289 printk("DISCNCT ");
290 if (intreg & ESP_INTR_IC)
291 printk("ILL_CMD ");
292 if (intreg & ESP_INTR_SR)
293 printk("SCSI_BUS_RESET ");
294 printk(">");
295}
296
297/* Print the sequence step registers contents */
298static inline void esp_print_seqreg(u8 stepreg)
299{
300 stepreg &= ESP_STEP_VBITS;
301 printk("STEP<%s>",
302 (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
303 (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
304 (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
305 (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
306 (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" :
307 "UNKNOWN"))))));
308}
309
310static char *phase_string(int phase)
311{
312 switch (phase) {
313 case not_issued:
314 return "UNISSUED";
315 case in_slct_norm:
316 return "SLCTNORM";
317 case in_slct_stop:
318 return "SLCTSTOP";
319 case in_slct_msg:
320 return "SLCTMSG";
321 case in_slct_tag:
322 return "SLCTTAG";
323 case in_slct_sneg:
324 return "SLCTSNEG";
325 case in_datain:
326 return "DATAIN";
327 case in_dataout:
328 return "DATAOUT";
329 case in_data_done:
330 return "DATADONE";
331 case in_msgin:
332 return "MSGIN";
333 case in_msgincont:
334 return "MSGINCONT";
335 case in_msgindone:
336 return "MSGINDONE";
337 case in_msgout:
338 return "MSGOUT";
339 case in_msgoutdone:
340 return "MSGOUTDONE";
341 case in_cmdbegin:
342 return "CMDBEGIN";
343 case in_cmdend:
344 return "CMDEND";
345 case in_status:
346 return "STATUS";
347 case in_freeing:
348 return "FREEING";
349 case in_the_dark:
350 return "CLUELESS";
351 case in_abortone:
352 return "ABORTONE";
353 case in_abortall:
354 return "ABORTALL";
355 case in_resetdev:
356 return "RESETDEV";
357 case in_resetbus:
358 return "RESETBUS";
359 case in_tgterror:
360 return "TGTERROR";
361 default:
362 return "UNKNOWN";
363 };
364}
365
366#ifdef DEBUG_STATE_MACHINE
367static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase)
368{
369 ESPLOG(("<%s>", phase_string(newphase)));
370 s->SCp.sent_command = s->SCp.phase;
371 s->SCp.phase = newphase;
372}
373#else
374#define esp_advance_phase(__s, __newphase) \
375 (__s)->SCp.sent_command = (__s)->SCp.phase; \
376 (__s)->SCp.phase = (__newphase);
377#endif
378
379#ifdef DEBUG_ESP_CMDS
380static inline void esp_cmd(struct esp *esp, u8 cmd)
381{
382 esp->espcmdlog[esp->espcmdent] = cmd;
383 esp->espcmdent = (esp->espcmdent + 1) & 31;
384 sbus_writeb(cmd, esp->eregs + ESP_CMD);
385}
386#else
387#define esp_cmd(__esp, __cmd) \
388 sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD)
389#endif
390
391#define ESP_INTSOFF(__dregs) \
392 sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR)
393#define ESP_INTSON(__dregs) \
394 sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR)
395#define ESP_IRQ_P(__dregs) \
396 (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR))
397
398/* How we use the various Linux SCSI data structures for operation.
399 *
400 * struct scsi_cmnd:
401 *
402 * We keep track of the synchronous capabilities of a target
403 * in the device member, using sync_min_period and
404 * sync_max_offset. These are the values we directly write
405 * into the ESP registers while running a command. If offset
406 * is zero the ESP will use asynchronous transfers.
407 * If the borken flag is set we assume we shouldn't even bother
408 * trying to negotiate for synchronous transfer as this target
409 * is really stupid. If we notice the target is dropping the
410 * bus, and we have been allowing it to disconnect, we clear
411 * the disconnect flag.
412 */
413
414
415/* Manipulation of the ESP command queues. Thanks to the aha152x driver
416 * and its author, Juergen E. Fischer, for the methods used here.
417 * Note that these are per-ESP queues, not global queues like
418 * the aha152x driver uses.
419 */
420static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
421{
422 struct scsi_cmnd *end;
423
424 new_SC->host_scribble = (unsigned char *) NULL;
425 if (!*SC)
426 *SC = new_SC;
427 else {
428 for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble)
429 ;
430 end->host_scribble = (unsigned char *) new_SC;
431 }
432}
433
434static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
435{
436 new_SC->host_scribble = (unsigned char *) *SC;
437 *SC = new_SC;
438}
439
440static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC)
441{
442 struct scsi_cmnd *ptr;
443 ptr = *SC;
444 if (ptr)
445 *SC = (struct scsi_cmnd *) (*SC)->host_scribble;
446 return ptr;
447}
448
449static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun)
450{
451 struct scsi_cmnd *ptr, *prev;
452
453 for (ptr = *SC, prev = NULL;
454 ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
455 prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble)
456 ;
457 if (ptr) {
458 if (prev)
459 prev->host_scribble=ptr->host_scribble;
460 else
461 *SC=(struct scsi_cmnd *)ptr->host_scribble;
462 }
463 return ptr;
464}
465
466/* Resetting various pieces of the ESP scsi driver chipset/buses. */
467static void esp_reset_dma(struct esp *esp)
468{
469 int can_do_burst16, can_do_burst32, can_do_burst64;
470 int can_do_sbus64;
471 u32 tmp;
472
473 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
474 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
475 can_do_burst64 = 0;
476 can_do_sbus64 = 0;
477 if (sbus_can_dma_64bit(esp->sdev))
478 can_do_sbus64 = 1;
479 if (sbus_can_burst64(esp->sdev))
480 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
481
482 /* Punt the DVMA into a known state. */
483 if (esp->dma->revision != dvmahme) {
484 tmp = sbus_readl(esp->dregs + DMA_CSR);
485 sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR);
486 sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR);
487 }
488 switch (esp->dma->revision) {
489 case dvmahme:
490 /* This is the HME DVMA gate array. */
491
492 sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR);
493 sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR);
494
495 esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB);
496 esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ);
497
498 if (can_do_burst64)
499 esp->prev_hme_dmacsr |= DMA_BRST64;
500 else if (can_do_burst32)
501 esp->prev_hme_dmacsr |= DMA_BRST32;
502
503 if (can_do_sbus64) {
504 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
505 sbus_set_sbus64(esp->sdev, esp->bursts);
506 }
507
508 /* This chip is horrible. */
509 while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ)
510 udelay(1);
511
512 sbus_writel(0, esp->dregs + DMA_CSR);
513 sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
514
515 /* This is necessary to avoid having the SCSI channel
516 * engine lock up on us.
517 */
518 sbus_writel(0, esp->dregs + DMA_ADDR);
519
520 break;
521 case dvmarev2:
522 /* This is the gate array found in the sun4m
523 * NCR SBUS I/O subsystem.
524 */
525 if (esp->erev != esp100) {
526 tmp = sbus_readl(esp->dregs + DMA_CSR);
527 sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR);
528 }
529 break;
530 case dvmarev3:
531 tmp = sbus_readl(esp->dregs + DMA_CSR);
532 tmp &= ~DMA_3CLKS;
533 tmp |= DMA_2CLKS;
534 if (can_do_burst32) {
535 tmp &= ~DMA_BRST_SZ;
536 tmp |= DMA_BRST32;
537 }
538 sbus_writel(tmp, esp->dregs + DMA_CSR);
539 break;
540 case dvmaesc1:
541 /* This is the DMA unit found on SCSI/Ether cards. */
542 tmp = sbus_readl(esp->dregs + DMA_CSR);
543 tmp |= DMA_ADD_ENABLE;
544 tmp &= ~DMA_BCNT_ENAB;
545 if (!can_do_burst32 && can_do_burst16) {
546 tmp |= DMA_ESC_BURST;
547 } else {
548 tmp &= ~(DMA_ESC_BURST);
549 }
550 sbus_writel(tmp, esp->dregs + DMA_CSR);
551 break;
552 default:
553 break;
554 };
555 ESP_INTSON(esp->dregs);
556}
557
558/* Reset the ESP chip, _not_ the SCSI bus. */
559static void __init esp_reset_esp(struct esp *esp)
560{
561 u8 family_code, version;
562 int i;
563
564 /* Now reset the ESP chip */
565 esp_cmd(esp, ESP_CMD_RC);
566 esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
567 esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
568
569 /* Reload the configuration registers */
570 sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT);
571 esp->prev_stp = 0;
572 sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
573 esp->prev_soff = 0;
574 sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
575 sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO);
576
577 /* This is the only point at which it is reliable to read
578 * the ID-code for a fast ESP chip variants.
579 */
580 esp->max_period = ((35 * esp->ccycle) / 1000);
581 if (esp->erev == fast) {
582 version = sbus_readb(esp->eregs + ESP_UID);
583 family_code = (version & 0xf8) >> 3;
584 if (family_code == 0x02)
585 esp->erev = fas236;
586 else if (family_code == 0x0a)
587 esp->erev = fashme; /* Version is usually '5'. */
588 else
589 esp->erev = fas100a;
590 ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n",
591 esp->esp_id,
592 (esp->erev == fas236) ? "fas236" :
593 ((esp->erev == fas100a) ? "fas100a" :
594 "fasHME"), family_code, (version & 7)));
595
596 esp->min_period = ((4 * esp->ccycle) / 1000);
597 } else {
598 esp->min_period = ((5 * esp->ccycle) / 1000);
599 }
600 esp->max_period = (esp->max_period + 3)>>2;
601 esp->min_period = (esp->min_period + 3)>>2;
602
603 sbus_writeb(esp->config1, esp->eregs + ESP_CFG1);
604 switch (esp->erev) {
605 case esp100:
606 /* nothing to do */
607 break;
608 case esp100a:
609 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
610 break;
611 case esp236:
612 /* Slow 236 */
613 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
614 esp->prev_cfg3 = esp->config3[0];
615 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
616 break;
617 case fashme:
618 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
619 /* fallthrough... */
620 case fas236:
621 /* Fast 236 or HME */
622 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
623 for (i = 0; i < 16; i++) {
624 if (esp->erev == fashme) {
625 u8 cfg3;
626
627 cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
628 if (esp->scsi_id >= 8)
629 cfg3 |= ESP_CONFIG3_IDBIT3;
630 esp->config3[i] |= cfg3;
631 } else {
632 esp->config3[i] |= ESP_CONFIG3_FCLK;
633 }
634 }
635 esp->prev_cfg3 = esp->config3[0];
636 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
637 if (esp->erev == fashme) {
638 esp->radelay = 80;
639 } else {
640 if (esp->diff)
641 esp->radelay = 0;
642 else
643 esp->radelay = 96;
644 }
645 break;
646 case fas100a:
647 /* Fast 100a */
648 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
649 for (i = 0; i < 16; i++)
650 esp->config3[i] |= ESP_CONFIG3_FCLOCK;
651 esp->prev_cfg3 = esp->config3[0];
652 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
653 esp->radelay = 32;
654 break;
655 default:
656 panic("esp: what could it be... I wonder...");
657 break;
658 };
659
660 /* Eat any bitrot in the chip */
661 sbus_readb(esp->eregs + ESP_INTRPT);
662 udelay(100);
663}
664
665/* This places the ESP into a known state at boot time. */
666static void __init esp_bootup_reset(struct esp *esp)
667{
668 u8 tmp;
669
670 /* Reset the DMA */
671 esp_reset_dma(esp);
672
673 /* Reset the ESP */
674 esp_reset_esp(esp);
675
676 /* Reset the SCSI bus, but tell ESP not to generate an irq */
677 tmp = sbus_readb(esp->eregs + ESP_CFG1);
678 tmp |= ESP_CONFIG1_SRRDISAB;
679 sbus_writeb(tmp, esp->eregs + ESP_CFG1);
680
681 esp_cmd(esp, ESP_CMD_RS);
682 udelay(400);
683
684 sbus_writeb(esp->config1, esp->eregs + ESP_CFG1);
685
686 /* Eat any bitrot in the chip and we are done... */
687 sbus_readb(esp->eregs + ESP_INTRPT);
688}
689
690static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev)
691{
692 struct sbus_dev *sdev = esp->sdev;
693 struct sbus_dma *dma;
694
695 if (dma_sdev != NULL) {
696 for_each_dvma(dma) {
697 if (dma->sdev == dma_sdev)
698 break;
699 }
700 } else {
701 for_each_dvma(dma) {
702 /* If allocated already, can't use it. */
703 if (dma->allocated)
704 continue;
705
706 if (dma->sdev == NULL)
707 break;
708
709 /* If bus + slot are the same and it has the
710 * correct OBP name, it's ours.
711 */
712 if (sdev->bus == dma->sdev->bus &&
713 sdev->slot == dma->sdev->slot &&
714 (!strcmp(dma->sdev->prom_name, "dma") ||
715 !strcmp(dma->sdev->prom_name, "espdma")))
716 break;
717 }
718 }
719
720 /* If we don't know how to handle the dvma,
721 * do not use this device.
722 */
723 if (dma == NULL) {
724 printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id);
725 return -1;
726 }
727 if (dma->allocated) {
728 printk("esp%d: can't use my espdma\n", esp->esp_id);
729 return -1;
730 }
731 dma->allocated = 1;
732 esp->dma = dma;
733 esp->dregs = dma->regs;
734
735 return 0;
736}
737
738static int __init esp_map_regs(struct esp *esp, int hme)
739{
740 struct sbus_dev *sdev = esp->sdev;
741 struct resource *res;
742
743 /* On HME, two reg sets exist, first is DVMA,
744 * second is ESP registers.
745 */
746 if (hme)
747 res = &sdev->resource[1];
748 else
749 res = &sdev->resource[0];
750
751 esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers");
752
753 if (esp->eregs == 0)
754 return -1;
755 return 0;
756}
757
758static int __init esp_map_cmdarea(struct esp *esp)
759{
760 struct sbus_dev *sdev = esp->sdev;
761
762 esp->esp_command = sbus_alloc_consistent(sdev, 16,
763 &esp->esp_command_dvma);
764 if (esp->esp_command == NULL ||
765 esp->esp_command_dvma == 0)
766 return -1;
767 return 0;
768}
769
770static int __init esp_register_irq(struct esp *esp)
771{
772 esp->ehost->irq = esp->irq = esp->sdev->irqs[0];
773
774 /* We used to try various overly-clever things to
775 * reduce the interrupt processing overhead on
776 * sun4c/sun4m when multiple ESP's shared the
777 * same IRQ. It was too complex and messy to
778 * sanely maintain.
779 */
780 if (request_irq(esp->ehost->irq, esp_intr,
781 IRQF_SHARED, "ESP SCSI", esp)) {
782 printk("esp%d: Cannot acquire irq line\n",
783 esp->esp_id);
784 return -1;
785 }
786
787 printk("esp%d: IRQ %d ", esp->esp_id,
788 esp->ehost->irq);
789
790 return 0;
791}
792
793static void __init esp_get_scsi_id(struct esp *esp)
794{
795 struct sbus_dev *sdev = esp->sdev;
796 struct device_node *dp = sdev->ofdev.node;
797
798 esp->scsi_id = of_getintprop_default(dp,
799 "initiator-id",
800 -1);
801 if (esp->scsi_id == -1)
802 esp->scsi_id = of_getintprop_default(dp,
803 "scsi-initiator-id",
804 -1);
805 if (esp->scsi_id == -1)
806 esp->scsi_id = (sdev->bus == NULL) ? 7 :
807 of_getintprop_default(sdev->bus->ofdev.node,
808 "scsi-initiator-id",
809 7);
810 esp->ehost->this_id = esp->scsi_id;
811 esp->scsi_id_mask = (1 << esp->scsi_id);
812
813}
814
815static void __init esp_get_clock_params(struct esp *esp)
816{
817 struct sbus_dev *sdev = esp->sdev;
818 int prom_node = esp->prom_node;
819 int sbus_prom_node;
820 unsigned int fmhz;
821 u8 ccf;
822
823 if (sdev != NULL && sdev->bus != NULL)
824 sbus_prom_node = sdev->bus->prom_node;
825 else
826 sbus_prom_node = 0;
827
828 /* This is getting messy but it has to be done
829 * correctly or else you get weird behavior all
830 * over the place. We are trying to basically
831 * figure out three pieces of information.
832 *
833 * a) Clock Conversion Factor
834 *
835 * This is a representation of the input
836 * crystal clock frequency going into the
837 * ESP on this machine. Any operation whose
838 * timing is longer than 400ns depends on this
839 * value being correct. For example, you'll
840 * get blips for arbitration/selection during
841 * high load or with multiple targets if this
842 * is not set correctly.
843 *
844 * b) Selection Time-Out
845 *
846 * The ESP isn't very bright and will arbitrate
847 * for the bus and try to select a target
848 * forever if you let it. This value tells
849 * the ESP when it has taken too long to
850 * negotiate and that it should interrupt
851 * the CPU so we can see what happened.
852 * The value is computed as follows (from
853 * NCR/Symbios chip docs).
854 *
855 * (Time Out Period) * (Input Clock)
856 * STO = ----------------------------------
857 * (8192) * (Clock Conversion Factor)
858 *
859 * You usually want the time out period to be
860 * around 250ms, I think we'll set it a little
861 * bit higher to account for fully loaded SCSI
862 * bus's and slow devices that don't respond so
863 * quickly to selection attempts. (yeah, I know
864 * this is out of spec. but there is a lot of
865 * buggy pieces of firmware out there so bite me)
866 *
867 * c) Imperical constants for synchronous offset
868 * and transfer period register values
869 *
870 * This entails the smallest and largest sync
871 * period we could ever handle on this ESP.
872 */
873
874 fmhz = prom_getintdefault(prom_node, "clock-frequency", -1);
875 if (fmhz == -1)
876 fmhz = (!sbus_prom_node) ? 0 :
877 prom_getintdefault(sbus_prom_node, "clock-frequency", -1);
878
879 if (fmhz <= (5000000))
880 ccf = 0;
881 else
882 ccf = (((5000000 - 1) + (fmhz))/(5000000));
883
884 if (!ccf || ccf > 8) {
885 /* If we can't find anything reasonable,
886 * just assume 20MHZ. This is the clock
887 * frequency of the older sun4c's where I've
888 * been unable to find the clock-frequency
889 * PROM property. All other machines provide
890 * useful values it seems.
891 */
892 ccf = ESP_CCF_F4;
893 fmhz = (20000000);
894 }
895
896 if (ccf == (ESP_CCF_F7 + 1))
897 esp->cfact = ESP_CCF_F0;
898 else if (ccf == ESP_CCF_NEVER)
899 esp->cfact = ESP_CCF_F2;
900 else
901 esp->cfact = ccf;
902 esp->raw_cfact = ccf;
903
904 esp->cfreq = fmhz;
905 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
906 esp->ctick = ESP_TICK(ccf, esp->ccycle);
907 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
908 esp->sync_defp = SYNC_DEFP_SLOW;
909
910 printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ",
911 esp->scsi_id, (fmhz / 1000000),
912 (int)esp->ccycle, (int)ccf, (int) esp->neg_defp);
913}
914
915static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
916{
917 struct sbus_dev *sdev = esp->sdev;
918 u8 bursts;
919
920 bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff);
921
922 if (dma) {
923 u8 tmp = prom_getintdefault(dma->prom_node,
924 "burst-sizes", 0xff);
925 if (tmp != 0xff)
926 bursts &= tmp;
927 }
928
929 if (sdev->bus) {
930 u8 tmp = prom_getintdefault(sdev->bus->prom_node,
931 "burst-sizes", 0xff);
932 if (tmp != 0xff)
933 bursts &= tmp;
934 }
935
936 if (bursts == 0xff ||
937 (bursts & DMA_BURST16) == 0 ||
938 (bursts & DMA_BURST32) == 0)
939 bursts = (DMA_BURST32 - 1);
940
941 esp->bursts = bursts;
942}
943
944static void __init esp_get_revision(struct esp *esp)
945{
946 u8 tmp;
947
948 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
949 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
950 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
951
952 tmp = sbus_readb(esp->eregs + ESP_CFG2);
953 tmp &= ~ESP_CONFIG2_MAGIC;
954 if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
955 /* If what we write to cfg2 does not come back, cfg2
956 * is not implemented, therefore this must be a plain
957 * esp100.
958 */
959 esp->erev = esp100;
960 printk("NCR53C90(esp100)\n");
961 } else {
962 esp->config2 = 0;
963 esp->prev_cfg3 = esp->config3[0] = 5;
964 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
965 sbus_writeb(0, esp->eregs + ESP_CFG3);
966 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
967
968 tmp = sbus_readb(esp->eregs + ESP_CFG3);
969 if (tmp != 5) {
970 /* The cfg2 register is implemented, however
971 * cfg3 is not, must be esp100a.
972 */
973 esp->erev = esp100a;
974 printk("NCR53C90A(esp100a)\n");
975 } else {
976 int target;
977
978 for (target = 0; target < 16; target++)
979 esp->config3[target] = 0;
980 esp->prev_cfg3 = 0;
981 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
982
983 /* All of cfg{1,2,3} implemented, must be one of
984 * the fas variants, figure out which one.
985 */
986 if (esp->raw_cfact > ESP_CCF_F5) {
987 esp->erev = fast;
988 esp->sync_defp = SYNC_DEFP_FAST;
989 printk("NCR53C9XF(espfast)\n");
990 } else {
991 esp->erev = esp236;
992 printk("NCR53C9x(esp236)\n");
993 }
994 esp->config2 = 0;
995 sbus_writeb(esp->config2, esp->eregs + ESP_CFG2);
996 }
997 }
998}
999
1000static void __init esp_init_swstate(struct esp *esp)
1001{
1002 int i;
1003
1004 /* Command queues... */
1005 esp->current_SC = NULL;
1006 esp->disconnected_SC = NULL;
1007 esp->issue_SC = NULL;
1008
1009 /* Target and current command state... */
1010 esp->targets_present = 0;
1011 esp->resetting_bus = 0;
1012 esp->snip = 0;
1013
1014 init_waitqueue_head(&esp->reset_queue);
1015
1016 /* Debugging... */
1017 for(i = 0; i < 32; i++)
1018 esp->espcmdlog[i] = 0;
1019 esp->espcmdent = 0;
1020
1021 /* MSG phase state... */
1022 for(i = 0; i < 16; i++) {
1023 esp->cur_msgout[i] = 0;
1024 esp->cur_msgin[i] = 0;
1025 }
1026 esp->prevmsgout = esp->prevmsgin = 0;
1027 esp->msgout_len = esp->msgin_len = 0;
1028
1029 /* Clear the one behind caches to hold unmatchable values. */
1030 esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff;
1031 esp->prev_hme_dmacsr = 0xffffffff;
1032}
1033
1034static int __init detect_one_esp(struct scsi_host_template *tpnt,
1035 struct device *dev,
1036 struct sbus_dev *esp_dev,
1037 struct sbus_dev *espdma,
1038 struct sbus_bus *sbus,
1039 int hme)
1040{
1041 static int instance;
1042 struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp));
1043 struct esp *esp;
1044
1045 if (!esp_host)
1046 return -ENOMEM;
1047
1048 if (hme)
1049 esp_host->max_id = 16;
1050 esp = (struct esp *) esp_host->hostdata;
1051 esp->ehost = esp_host;
1052 esp->sdev = esp_dev;
1053 esp->esp_id = instance;
1054 esp->prom_node = esp_dev->prom_node;
1055 prom_getstring(esp->prom_node, "name", esp->prom_name,
1056 sizeof(esp->prom_name));
1057
1058 if (esp_find_dvma(esp, espdma) < 0)
1059 goto fail_unlink;
1060 if (esp_map_regs(esp, hme) < 0) {
1061 printk("ESP registers unmappable");
1062 goto fail_dvma_release;
1063 }
1064 if (esp_map_cmdarea(esp) < 0) {
1065 printk("ESP DVMA transport area unmappable");
1066 goto fail_unmap_regs;
1067 }
1068 if (esp_register_irq(esp) < 0)
1069 goto fail_unmap_cmdarea;
1070
1071 esp_get_scsi_id(esp);
1072
1073 esp->diff = prom_getbool(esp->prom_node, "differential");
1074 if (esp->diff)
1075 printk("Differential ");
1076
1077 esp_get_clock_params(esp);
1078 esp_get_bursts(esp, espdma);
1079 esp_get_revision(esp);
1080 esp_init_swstate(esp);
1081
1082 esp_bootup_reset(esp);
1083
1084 if (scsi_add_host(esp_host, dev))
1085 goto fail_free_irq;
1086
1087 dev_set_drvdata(&esp_dev->ofdev.dev, esp);
1088
1089 scsi_scan_host(esp_host);
1090 instance++;
1091
1092 return 0;
1093
1094fail_free_irq:
1095 free_irq(esp->ehost->irq, esp);
1096
1097fail_unmap_cmdarea:
1098 sbus_free_consistent(esp->sdev, 16,
1099 (void *) esp->esp_command,
1100 esp->esp_command_dvma);
1101
1102fail_unmap_regs:
1103 sbus_iounmap(esp->eregs, ESP_REG_SIZE);
1104
1105fail_dvma_release:
1106 esp->dma->allocated = 0;
1107
1108fail_unlink:
1109 scsi_host_put(esp_host);
1110 return -1;
1111}
1112
1113/* Detecting ESP chips on the machine. This is the simple and easy
1114 * version.
1115 */
1116static int __devexit esp_remove_common(struct esp *esp)
1117{
1118 unsigned int irq = esp->ehost->irq;
1119
1120 scsi_remove_host(esp->ehost);
1121
1122 ESP_INTSOFF(esp->dregs);
1123#if 0
1124 esp_reset_dma(esp);
1125 esp_reset_esp(esp);
1126#endif
1127
1128 free_irq(irq, esp);
1129 sbus_free_consistent(esp->sdev, 16,
1130 (void *) esp->esp_command, esp->esp_command_dvma);
1131 sbus_iounmap(esp->eregs, ESP_REG_SIZE);
1132 esp->dma->allocated = 0;
1133
1134 scsi_host_put(esp->ehost);
1135
1136 return 0;
1137}
1138
1139
1140#ifdef CONFIG_SUN4
1141
1142#include <asm/sun4paddr.h>
1143
1144static struct sbus_dev sun4_esp_dev;
1145
1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1147{
1148 if (sun4_esp_physaddr) {
1149 memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
1151 sun4_esp_dev.irqs[0] = 4;
1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
1153 sun4_esp_dev.resource[0].end =
1154 sun4_esp_physaddr + ESP_REG_SIZE - 1;
1155 sun4_esp_dev.resource[0].flags = IORESOURCE_IO;
1156
1157 return detect_one_esp(tpnt, NULL,
1158 &sun4_esp_dev, NULL, NULL, 0);
1159 }
1160 return 0;
1161}
1162
1163static int __devexit esp_sun4_remove(void)
1164{
1165 struct of_device *dev = &sun4_esp_dev.ofdev;
1166 struct esp *esp = dev_get_drvdata(&dev->dev);
1167
1168 return esp_remove_common(esp);
1169}
1170
1171#else /* !CONFIG_SUN4 */
1172
1173static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1174{
1175 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1176 struct device_node *dp = dev->node;
1177 struct sbus_dev *dma_sdev = NULL;
1178 int hme = 0;
1179
1180 if (dp->parent &&
1181 (!strcmp(dp->parent->name, "espdma") ||
1182 !strcmp(dp->parent->name, "dma")))
1183 dma_sdev = sdev->parent;
1184 else if (!strcmp(dp->name, "SUNW,fas")) {
1185 dma_sdev = sdev;
1186 hme = 1;
1187 }
1188
1189 return detect_one_esp(match->data, &dev->dev,
1190 sdev, dma_sdev, sdev->bus, hme);
1191}
1192
1193static int __devexit esp_sbus_remove(struct of_device *dev)
1194{
1195 struct esp *esp = dev_get_drvdata(&dev->dev);
1196
1197 return esp_remove_common(esp);
1198}
1199
1200#endif /* !CONFIG_SUN4 */
1201
1202/* The info function will return whatever useful
1203 * information the developer sees fit. If not provided, then
1204 * the name field will be used instead.
1205 */
1206static const char *esp_info(struct Scsi_Host *host)
1207{
1208 struct esp *esp;
1209
1210 esp = (struct esp *) host->hostdata;
1211 switch (esp->erev) {
1212 case esp100:
1213 return "Sparc ESP100 (NCR53C90)";
1214 case esp100a:
1215 return "Sparc ESP100A (NCR53C90A)";
1216 case esp236:
1217 return "Sparc ESP236";
1218 case fas236:
1219 return "Sparc ESP236-FAST";
1220 case fashme:
1221 return "Sparc ESP366-HME";
1222 case fas100a:
1223 return "Sparc ESP100A-FAST";
1224 default:
1225 return "Bogon ESP revision";
1226 };
1227}
1228
1229/* From Wolfgang Stanglmeier's NCR scsi driver. */
1230struct info_str
1231{
1232 char *buffer;
1233 int length;
1234 int offset;
1235 int pos;
1236};
1237
1238static void copy_mem_info(struct info_str *info, char *data, int len)
1239{
1240 if (info->pos + len > info->length)
1241 len = info->length - info->pos;
1242
1243 if (info->pos + len < info->offset) {
1244 info->pos += len;
1245 return;
1246 }
1247 if (info->pos < info->offset) {
1248 data += (info->offset - info->pos);
1249 len -= (info->offset - info->pos);
1250 }
1251
1252 if (len > 0) {
1253 memcpy(info->buffer + info->pos, data, len);
1254 info->pos += len;
1255 }
1256}
1257
1258static int copy_info(struct info_str *info, char *fmt, ...)
1259{
1260 va_list args;
1261 char buf[81];
1262 int len;
1263
1264 va_start(args, fmt);
1265 len = vsprintf(buf, fmt, args);
1266 va_end(args);
1267
1268 copy_mem_info(info, buf, len);
1269 return len;
1270}
1271
1272static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len)
1273{
1274 struct scsi_device *sdev;
1275 struct info_str info;
1276 int i;
1277
1278 info.buffer = ptr;
1279 info.length = len;
1280 info.offset = offset;
1281 info.pos = 0;
1282
1283 copy_info(&info, "Sparc ESP Host Adapter:\n");
1284 copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node);
1285 copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name);
1286 copy_info(&info, "\tESP Model\t\t");
1287 switch (esp->erev) {
1288 case esp100:
1289 copy_info(&info, "ESP100\n");
1290 break;
1291 case esp100a:
1292 copy_info(&info, "ESP100A\n");
1293 break;
1294 case esp236:
1295 copy_info(&info, "ESP236\n");
1296 break;
1297 case fas236:
1298 copy_info(&info, "FAS236\n");
1299 break;
1300 case fas100a:
1301 copy_info(&info, "FAS100A\n");
1302 break;
1303 case fast:
1304 copy_info(&info, "FAST\n");
1305 break;
1306 case fashme:
1307 copy_info(&info, "Happy Meal FAS\n");
1308 break;
1309 case espunknown:
1310 default:
1311 copy_info(&info, "Unknown!\n");
1312 break;
1313 };
1314 copy_info(&info, "\tDMA Revision\t\t");
1315 switch (esp->dma->revision) {
1316 case dvmarev0:
1317 copy_info(&info, "Rev 0\n");
1318 break;
1319 case dvmaesc1:
1320 copy_info(&info, "ESC Rev 1\n");
1321 break;
1322 case dvmarev1:
1323 copy_info(&info, "Rev 1\n");
1324 break;
1325 case dvmarev2:
1326 copy_info(&info, "Rev 2\n");
1327 break;
1328 case dvmarev3:
1329 copy_info(&info, "Rev 3\n");
1330 break;
1331 case dvmarevplus:
1332 copy_info(&info, "Rev 1+\n");
1333 break;
1334 case dvmahme:
1335 copy_info(&info, "Rev HME/FAS\n");
1336 break;
1337 default:
1338 copy_info(&info, "Unknown!\n");
1339 break;
1340 };
1341 copy_info(&info, "\tLive Targets\t\t[ ");
1342 for (i = 0; i < 15; i++) {
1343 if (esp->targets_present & (1 << i))
1344 copy_info(&info, "%d ", i);
1345 }
1346 copy_info(&info, "]\n\n");
1347
1348 /* Now describe the state of each existing target. */
1349 copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n");
1350
1351 shost_for_each_device(sdev, esp->ehost) {
1352 struct esp_device *esp_dev = sdev->hostdata;
1353 uint id = sdev->id;
1354
1355 if (!(esp->targets_present & (1 << id)))
1356 continue;
1357
1358 copy_info(&info, "%d\t\t", id);
1359 copy_info(&info, "%08lx\t", esp->config3[id]);
1360 copy_info(&info, "[%02lx,%02lx]\t\t\t",
1361 esp_dev->sync_max_offset,
1362 esp_dev->sync_min_period);
1363 copy_info(&info, "%s\t\t",
1364 esp_dev->disconnect ? "yes" : "no");
1365 copy_info(&info, "%s\n",
1366 (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no");
1367 }
1368 return info.pos > info.offset? info.pos - info.offset : 0;
1369}
1370
1371/* ESP proc filesystem code. */
1372static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1373 int length, int inout)
1374{
1375 struct esp *esp = (struct esp *) host->hostdata;
1376
1377 if (inout)
1378 return -EINVAL; /* not yet */
1379
1380 if (start)
1381 *start = buffer;
1382
1383 return esp_host_info(esp, buffer, offset, length);
1384}
1385
1386static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1387{
1388 if (sp->use_sg == 0) {
1389 sp->SCp.this_residual = sp->request_bufflen;
1390 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1391 sp->SCp.buffers_residual = 0;
1392 if (sp->request_bufflen) {
1393 sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer,
1394 sp->SCp.this_residual,
1395 sp->sc_data_direction);
1396 sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in);
1397 } else {
1398 sp->SCp.ptr = NULL;
1399 }
1400 } else {
1401 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1402 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
1403 sp->SCp.buffer,
1404 sp->use_sg,
1405 sp->sc_data_direction);
1406 sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer);
1407 sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer));
1408 }
1409}
1410
1411static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1412{
1413 if (sp->use_sg) {
1414 sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
1415 sp->sc_data_direction);
1416 } else if (sp->request_bufflen) {
1417 sbus_unmap_single(esp->sdev,
1418 sp->SCp.have_data_in,
1419 sp->request_bufflen,
1420 sp->sc_data_direction);
1421 }
1422}
1423
1424static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp)
1425{
1426 struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
1427
1428 sp->SCp.ptr = ep->saved_ptr;
1429 sp->SCp.buffer = ep->saved_buffer;
1430 sp->SCp.this_residual = ep->saved_this_residual;
1431 sp->SCp.buffers_residual = ep->saved_buffers_residual;
1432}
1433
1434static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp)
1435{
1436 struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
1437
1438 ep->saved_ptr = sp->SCp.ptr;
1439 ep->saved_buffer = sp->SCp.buffer;
1440 ep->saved_this_residual = sp->SCp.this_residual;
1441 ep->saved_buffers_residual = sp->SCp.buffers_residual;
1442}
1443
1444/* Some rules:
1445 *
1446 * 1) Never ever panic while something is live on the bus.
1447 * If there is to be any chance of syncing the disks this
1448 * rule is to be obeyed.
1449 *
1450 * 2) Any target that causes a foul condition will no longer
1451 * have synchronous transfers done to it, no questions
1452 * asked.
1453 *
1454 * 3) Keep register accesses to a minimum. Think about some
1455 * day when we have Xbus machines this is running on and
1456 * the ESP chip is on the other end of the machine on a
1457 * different board from the cpu where this is running.
1458 */
1459
1460/* Fire off a command. We assume the bus is free and that the only
1461 * case where we could see an interrupt is where we have disconnected
1462 * commands active and they are trying to reselect us.
1463 */
1464static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp)
1465{
1466 switch (sp->cmd_len) {
1467 case 6:
1468 case 10:
1469 case 12:
1470 esp->esp_slowcmd = 0;
1471 break;
1472
1473 default:
1474 esp->esp_slowcmd = 1;
1475 esp->esp_scmdleft = sp->cmd_len;
1476 esp->esp_scmdp = &sp->cmnd[0];
1477 break;
1478 };
1479}
1480
1481static inline void build_sync_nego_msg(struct esp *esp, int period, int offset)
1482{
1483 esp->cur_msgout[0] = EXTENDED_MESSAGE;
1484 esp->cur_msgout[1] = 3;
1485 esp->cur_msgout[2] = EXTENDED_SDTR;
1486 esp->cur_msgout[3] = period;
1487 esp->cur_msgout[4] = offset;
1488 esp->msgout_len = 5;
1489}
1490
1491/* SIZE is in bits, currently HME only supports 16 bit wide transfers. */
1492static inline void build_wide_nego_msg(struct esp *esp, int size)
1493{
1494 esp->cur_msgout[0] = EXTENDED_MESSAGE;
1495 esp->cur_msgout[1] = 2;
1496 esp->cur_msgout[2] = EXTENDED_WDTR;
1497 switch (size) {
1498 case 32:
1499 esp->cur_msgout[3] = 2;
1500 break;
1501 case 16:
1502 esp->cur_msgout[3] = 1;
1503 break;
1504 case 8:
1505 default:
1506 esp->cur_msgout[3] = 0;
1507 break;
1508 };
1509
1510 esp->msgout_len = 4;
1511}
1512
1513static void esp_exec_cmd(struct esp *esp)
1514{
1515 struct scsi_cmnd *SCptr;
1516 struct scsi_device *SDptr;
1517 struct esp_device *esp_dev;
1518 volatile u8 *cmdp = esp->esp_command;
1519 u8 the_esp_command;
1520 int lun, target;
1521 int i;
1522
1523 /* Hold off if we have disconnected commands and
1524 * an IRQ is showing...
1525 */
1526 if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs))
1527 return;
1528
1529 /* Grab first member of the issue queue. */
1530 SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
1531
1532 /* Safe to panic here because current_SC is null. */
1533 if (!SCptr)
1534 panic("esp: esp_exec_cmd and issue queue is NULL");
1535
1536 SDptr = SCptr->device;
1537 esp_dev = SDptr->hostdata;
1538 lun = SCptr->device->lun;
1539 target = SCptr->device->id;
1540
1541 esp->snip = 0;
1542 esp->msgout_len = 0;
1543
1544 /* Send it out whole, or piece by piece? The ESP
1545 * only knows how to automatically send out 6, 10,
1546 * and 12 byte commands. I used to think that the
1547 * Linux SCSI code would never throw anything other
1548 * than that to us, but then again there is the
1549 * SCSI generic driver which can send us anything.
1550 */
1551 esp_check_cmd(esp, SCptr);
1552
1553 /* If arbitration/selection is successful, the ESP will leave
1554 * ATN asserted, causing the target to go into message out
1555 * phase. The ESP will feed the target the identify and then
1556 * the target can only legally go to one of command,
1557 * datain/out, status, or message in phase, or stay in message
1558 * out phase (should we be trying to send a sync negotiation
1559 * message after the identify). It is not allowed to drop
1560 * BSY, but some buggy targets do and we check for this
1561 * condition in the selection complete code. Most of the time
1562 * we'll make the command bytes available to the ESP and it
1563 * will not interrupt us until it finishes command phase, we
1564 * cannot do this for command sizes the ESP does not
1565 * understand and in this case we'll get interrupted right
1566 * when the target goes into command phase.
1567 *
1568 * It is absolutely _illegal_ in the presence of SCSI-2 devices
1569 * to use the ESP select w/o ATN command. When SCSI-2 devices are
1570 * present on the bus we _must_ always go straight to message out
1571 * phase with an identify message for the target. Being that
1572 * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2
1573 * selections should not confuse SCSI-1 we hope.
1574 */
1575
1576 if (esp_dev->sync) {
1577 /* this targets sync is known */
1578#ifndef __sparc_v9__
1579do_sync_known:
1580#endif
1581 if (esp_dev->disconnect)
1582 *cmdp++ = IDENTIFY(1, lun);
1583 else
1584 *cmdp++ = IDENTIFY(0, lun);
1585
1586 if (esp->esp_slowcmd) {
1587 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1588 esp_advance_phase(SCptr, in_slct_stop);
1589 } else {
1590 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1591 esp_advance_phase(SCptr, in_slct_norm);
1592 }
1593 } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
1594 /* After the bootup SCSI code sends both the
1595 * TEST_UNIT_READY and INQUIRY commands we want
1596 * to at least attempt allowing the device to
1597 * disconnect.
1598 */
1599 ESPMISC(("esp: Selecting device for first time. target=%d "
1600 "lun=%d\n", target, SCptr->device->lun));
1601 if (!SDptr->borken && !esp_dev->disconnect)
1602 esp_dev->disconnect = 1;
1603
1604 *cmdp++ = IDENTIFY(0, lun);
1605 esp->prevmsgout = NOP;
1606 esp_advance_phase(SCptr, in_slct_norm);
1607 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1608
1609 /* Take no chances... */
1610 esp_dev->sync_max_offset = 0;
1611 esp_dev->sync_min_period = 0;
1612 } else {
1613 /* Sorry, I have had way too many problems with
1614 * various CDROM devices on ESP. -DaveM
1615 */
1616 int cdrom_hwbug_wkaround = 0;
1617
1618#ifndef __sparc_v9__
1619 /* Never allow disconnects or synchronous transfers on
1620 * SparcStation1 and SparcStation1+. Allowing those
1621 * to be enabled seems to lockup the machine completely.
1622 */
1623 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
1624 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
1625 /* But we are nice and allow tapes and removable
1626 * disks (but not CDROMs) to disconnect.
1627 */
1628 if(SDptr->type == TYPE_TAPE ||
1629 (SDptr->type != TYPE_ROM && SDptr->removable))
1630 esp_dev->disconnect = 1;
1631 else
1632 esp_dev->disconnect = 0;
1633 esp_dev->sync_max_offset = 0;
1634 esp_dev->sync_min_period = 0;
1635 esp_dev->sync = 1;
1636 esp->snip = 0;
1637 goto do_sync_known;
1638 }
1639#endif /* !(__sparc_v9__) */
1640
1641 /* We've talked to this guy before,
1642 * but never negotiated. Let's try,
1643 * need to attempt WIDE first, before
1644 * sync nego, as per SCSI 2 standard.
1645 */
1646 if (esp->erev == fashme && !esp_dev->wide) {
1647 if (!SDptr->borken &&
1648 SDptr->type != TYPE_ROM &&
1649 SDptr->removable == 0) {
1650 build_wide_nego_msg(esp, 16);
1651 esp_dev->wide = 1;
1652 esp->wnip = 1;
1653 goto after_nego_msg_built;
1654 } else {
1655 esp_dev->wide = 1;
1656 /* Fall through and try sync. */
1657 }
1658 }
1659
1660 if (!SDptr->borken) {
1661 if ((SDptr->type == TYPE_ROM)) {
1662 /* Nice try sucker... */
1663 ESPMISC(("esp%d: Disabling sync for buggy "
1664 "CDROM.\n", esp->esp_id));
1665 cdrom_hwbug_wkaround = 1;
1666 build_sync_nego_msg(esp, 0, 0);
1667 } else if (SDptr->removable != 0) {
1668 ESPMISC(("esp%d: Not negotiating sync/wide but "
1669 "allowing disconnect for removable media.\n",
1670 esp->esp_id));
1671 build_sync_nego_msg(esp, 0, 0);
1672 } else {
1673 build_sync_nego_msg(esp, esp->sync_defp, 15);
1674 }
1675 } else {
1676 build_sync_nego_msg(esp, 0, 0);
1677 }
1678 esp_dev->sync = 1;
1679 esp->snip = 1;
1680
1681after_nego_msg_built:
1682 /* A fix for broken SCSI1 targets, when they disconnect
1683 * they lock up the bus and confuse ESP. So disallow
1684 * disconnects for SCSI1 targets for now until we
1685 * find a better fix.
1686 *
1687 * Addendum: This is funny, I figured out what was going
1688 * on. The blotzed SCSI1 target would disconnect,
1689 * one of the other SCSI2 targets or both would be
1690 * disconnected as well. The SCSI1 target would
1691 * stay disconnected long enough that we start
1692 * up a command on one of the SCSI2 targets. As
1693 * the ESP is arbitrating for the bus the SCSI1
1694 * target begins to arbitrate as well to reselect
1695 * the ESP. The SCSI1 target refuses to drop it's
1696 * ID bit on the data bus even though the ESP is
1697 * at ID 7 and is the obvious winner for any
1698 * arbitration. The ESP is a poor sport and refuses
1699 * to lose arbitration, it will continue indefinitely
1700 * trying to arbitrate for the bus and can only be
1701 * stopped via a chip reset or SCSI bus reset.
1702 * Therefore _no_ disconnects for SCSI1 targets
1703 * thank you very much. ;-)
1704 */
1705 if(((SDptr->scsi_level < 3) &&
1706 (SDptr->type != TYPE_TAPE) &&
1707 SDptr->removable == 0) ||
1708 cdrom_hwbug_wkaround || SDptr->borken) {
1709 ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
1710 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
1711 esp_dev->disconnect = 0;
1712 *cmdp++ = IDENTIFY(0, lun);
1713 } else {
1714 *cmdp++ = IDENTIFY(1, lun);
1715 }
1716
1717 /* ESP fifo is only so big...
1718 * Make this look like a slow command.
1719 */
1720 esp->esp_slowcmd = 1;
1721 esp->esp_scmdleft = SCptr->cmd_len;
1722 esp->esp_scmdp = &SCptr->cmnd[0];
1723
1724 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1725 esp_advance_phase(SCptr, in_slct_msg);
1726 }
1727
1728 if (!esp->esp_slowcmd)
1729 for (i = 0; i < SCptr->cmd_len; i++)
1730 *cmdp++ = SCptr->cmnd[i];
1731
1732 /* HME sucks... */
1733 if (esp->erev == fashme)
1734 sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT),
1735 esp->eregs + ESP_BUSID);
1736 else
1737 sbus_writeb(target & 7, esp->eregs + ESP_BUSID);
1738 if (esp->prev_soff != esp_dev->sync_max_offset ||
1739 esp->prev_stp != esp_dev->sync_min_period ||
1740 (esp->erev > esp100a &&
1741 esp->prev_cfg3 != esp->config3[target])) {
1742 esp->prev_soff = esp_dev->sync_max_offset;
1743 esp->prev_stp = esp_dev->sync_min_period;
1744 sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
1745 sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
1746 if (esp->erev > esp100a) {
1747 esp->prev_cfg3 = esp->config3[target];
1748 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
1749 }
1750 }
1751 i = (cmdp - esp->esp_command);
1752
1753 if (esp->erev == fashme) {
1754 esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */
1755
1756 /* Set up the DMA and HME counters */
1757 sbus_writeb(i, esp->eregs + ESP_TCLOW);
1758 sbus_writeb(0, esp->eregs + ESP_TCMED);
1759 sbus_writeb(0, esp->eregs + FAS_RLO);
1760 sbus_writeb(0, esp->eregs + FAS_RHI);
1761 esp_cmd(esp, the_esp_command);
1762
1763 /* Talk about touchy hardware... */
1764 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
1765 (DMA_SCSI_DISAB | DMA_ENABLE)) &
1766 ~(DMA_ST_WRITE));
1767 sbus_writel(16, esp->dregs + DMA_COUNT);
1768 sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
1769 sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
1770 } else {
1771 u32 tmp;
1772
1773 /* Set up the DMA and ESP counters */
1774 sbus_writeb(i, esp->eregs + ESP_TCLOW);
1775 sbus_writeb(0, esp->eregs + ESP_TCMED);
1776 tmp = sbus_readl(esp->dregs + DMA_CSR);
1777 tmp &= ~DMA_ST_WRITE;
1778 tmp |= DMA_ENABLE;
1779 sbus_writel(tmp, esp->dregs + DMA_CSR);
1780 if (esp->dma->revision == dvmaesc1) {
1781 if (i) /* Workaround ESC gate array SBUS rerun bug. */
1782 sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT);
1783 }
1784 sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
1785
1786 /* Tell ESP to "go". */
1787 esp_cmd(esp, the_esp_command);
1788 }
1789}
1790
1791/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
1792static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1793{
1794 struct esp *esp;
1795
1796 /* Set up func ptr and initial driver cmd-phase. */
1797 SCpnt->scsi_done = done;
1798 SCpnt->SCp.phase = not_issued;
1799
1800 /* We use the scratch area. */
1801 ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun));
1802 ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun));
1803
1804 esp = (struct esp *) SCpnt->device->host->hostdata;
1805 esp_get_dmabufs(esp, SCpnt);
1806 esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */
1807
1808 SCpnt->SCp.Status = CHECK_CONDITION;
1809 SCpnt->SCp.Message = 0xff;
1810 SCpnt->SCp.sent_command = 0;
1811
1812 /* Place into our queue. */
1813 if (SCpnt->cmnd[0] == REQUEST_SENSE) {
1814 ESPQUEUE(("RQSENSE\n"));
1815 prepend_SC(&esp->issue_SC, SCpnt);
1816 } else {
1817 ESPQUEUE(("\n"));
1818 append_SC(&esp->issue_SC, SCpnt);
1819 }
1820
1821 /* Run it now if we can. */
1822 if (!esp->current_SC && !esp->resetting_bus)
1823 esp_exec_cmd(esp);
1824
1825 return 0;
1826}
1827
1828/* Dump driver state. */
1829static void esp_dump_cmd(struct scsi_cmnd *SCptr)
1830{
1831 ESPLOG(("[tgt<%02x> lun<%02x> "
1832 "pphase<%s> cphase<%s>]",
1833 SCptr->device->id, SCptr->device->lun,
1834 phase_string(SCptr->SCp.sent_command),
1835 phase_string(SCptr->SCp.phase)));
1836}
1837
1838static void esp_dump_state(struct esp *esp)
1839{
1840 struct scsi_cmnd *SCptr = esp->current_SC;
1841#ifdef DEBUG_ESP_CMDS
1842 int i;
1843#endif
1844
1845 ESPLOG(("esp%d: dumping state\n", esp->esp_id));
1846 ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n",
1847 esp->esp_id,
1848 sbus_readl(esp->dregs + DMA_CSR),
1849 sbus_readl(esp->dregs + DMA_ADDR)));
1850 ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1851 esp->esp_id, esp->sreg, esp->seqreg, esp->ireg));
1852 ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1853 esp->esp_id,
1854 sbus_readb(esp->eregs + ESP_STATUS),
1855 sbus_readb(esp->eregs + ESP_SSTEP),
1856 sbus_readb(esp->eregs + ESP_INTRPT)));
1857#ifdef DEBUG_ESP_CMDS
1858 printk("esp%d: last ESP cmds [", esp->esp_id);
1859 i = (esp->espcmdent - 1) & 31;
1860 printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
1861 i = (i - 1) & 31;
1862 printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
1863 i = (i - 1) & 31;
1864 printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
1865 i = (i - 1) & 31;
1866 printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">");
1867 printk("]\n");
1868#endif /* (DEBUG_ESP_CMDS) */
1869
1870 if (SCptr) {
1871 ESPLOG(("esp%d: current command ", esp->esp_id));
1872 esp_dump_cmd(SCptr);
1873 }
1874 ESPLOG(("\n"));
1875 SCptr = esp->disconnected_SC;
1876 ESPLOG(("esp%d: disconnected ", esp->esp_id));
1877 while (SCptr) {
1878 esp_dump_cmd(SCptr);
1879 SCptr = (struct scsi_cmnd *) SCptr->host_scribble;
1880 }
1881 ESPLOG(("\n"));
1882}
1883
1884/* Abort a command. The host_lock is acquired by caller. */
1885static int esp_abort(struct scsi_cmnd *SCptr)
1886{
1887 struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
1888 int don;
1889
1890 ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
1891 esp_dump_state(esp);
1892
1893 /* Wheee, if this is the current command on the bus, the
1894 * best we can do is assert ATN and wait for msgout phase.
1895 * This should even fix a hung SCSI bus when we lose state
1896 * in the driver and timeout because the eventual phase change
1897 * will cause the ESP to (eventually) give an interrupt.
1898 */
1899 if (esp->current_SC == SCptr) {
1900 esp->cur_msgout[0] = ABORT;
1901 esp->msgout_len = 1;
1902 esp->msgout_ctr = 0;
1903 esp_cmd(esp, ESP_CMD_SATN);
1904 return SUCCESS;
1905 }
1906
1907 /* If it is still in the issue queue then we can safely
1908 * call the completion routine and report abort success.
1909 */
1910 don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB);
1911 if (don) {
1912 ESP_INTSOFF(esp->dregs);
1913 }
1914 if (esp->issue_SC) {
1915 struct scsi_cmnd **prev, *this;
1916 for (prev = (&esp->issue_SC), this = esp->issue_SC;
1917 this != NULL;
1918 prev = (struct scsi_cmnd **) &(this->host_scribble),
1919 this = (struct scsi_cmnd *) this->host_scribble) {
1920
1921 if (this == SCptr) {
1922 *prev = (struct scsi_cmnd *) this->host_scribble;
1923 this->host_scribble = NULL;
1924
1925 esp_release_dmabufs(esp, this);
1926 this->result = DID_ABORT << 16;
1927 this->scsi_done(this);
1928
1929 if (don)
1930 ESP_INTSON(esp->dregs);
1931
1932 return SUCCESS;
1933 }
1934 }
1935 }
1936
1937 /* Yuck, the command to abort is disconnected, it is not
1938 * worth trying to abort it now if something else is live
1939 * on the bus at this time. So, we let the SCSI code wait
1940 * a little bit and try again later.
1941 */
1942 if (esp->current_SC) {
1943 if (don)
1944 ESP_INTSON(esp->dregs);
1945 return FAILED;
1946 }
1947
1948 /* It's disconnected, we have to reconnect to re-establish
1949 * the nexus and tell the device to abort. However, we really
1950 * cannot 'reconnect' per se. Don't try to be fancy, just
1951 * indicate failure, which causes our caller to reset the whole
1952 * bus.
1953 */
1954
1955 if (don)
1956 ESP_INTSON(esp->dregs);
1957
1958 return FAILED;
1959}
1960
1961/* We've sent ESP_CMD_RS to the ESP, the interrupt had just
1962 * arrived indicating the end of the SCSI bus reset. Our job
1963 * is to clean out the command queues and begin re-execution
1964 * of SCSI commands once more.
1965 */
1966static int esp_finish_reset(struct esp *esp)
1967{
1968 struct scsi_cmnd *sp = esp->current_SC;
1969
1970 /* Clean up currently executing command, if any. */
1971 if (sp != NULL) {
1972 esp->current_SC = NULL;
1973
1974 esp_release_dmabufs(esp, sp);
1975 sp->result = (DID_RESET << 16);
1976
1977 sp->scsi_done(sp);
1978 }
1979
1980 /* Clean up disconnected queue, they have been invalidated
1981 * by the bus reset.
1982 */
1983 if (esp->disconnected_SC) {
1984 while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
1985 esp_release_dmabufs(esp, sp);
1986 sp->result = (DID_RESET << 16);
1987
1988 sp->scsi_done(sp);
1989 }
1990 }
1991
1992 /* SCSI bus reset is complete. */
1993 esp->resetting_bus = 0;
1994 wake_up(&esp->reset_queue);
1995
1996 /* Ok, now it is safe to get commands going once more. */
1997 if (esp->issue_SC)
1998 esp_exec_cmd(esp);
1999
2000 return do_intr_end;
2001}
2002
2003static int esp_do_resetbus(struct esp *esp)
2004{
2005 ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id));
2006 esp->resetting_bus = 1;
2007 esp_cmd(esp, ESP_CMD_RS);
2008
2009 return do_intr_end;
2010}
2011
2012/* Reset ESP chip, reset hanging bus, then kill active and
2013 * disconnected commands for targets without soft reset.
2014 *
2015 * The host_lock is acquired by caller.
2016 */
2017static int esp_reset(struct scsi_cmnd *SCptr)
2018{
2019 struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
2020
2021 spin_lock_irq(esp->ehost->host_lock);
2022 (void) esp_do_resetbus(esp);
2023 spin_unlock_irq(esp->ehost->host_lock);
2024
2025 wait_event(esp->reset_queue, (esp->resetting_bus == 0));
2026
2027 return SUCCESS;
2028}
2029
2030/* Internal ESP done function. */
2031static void esp_done(struct esp *esp, int error)
2032{
2033 struct scsi_cmnd *done_SC = esp->current_SC;
2034
2035 esp->current_SC = NULL;
2036
2037 esp_release_dmabufs(esp, done_SC);
2038 done_SC->result = error;
2039
2040 done_SC->scsi_done(done_SC);
2041
2042 /* Bus is free, issue any commands in the queue. */
2043 if (esp->issue_SC && !esp->current_SC)
2044 esp_exec_cmd(esp);
2045
2046}
2047
2048/* Wheee, ESP interrupt engine. */
2049
2050/* Forward declarations. */
2051static int esp_do_phase_determine(struct esp *esp);
2052static int esp_do_data_finale(struct esp *esp);
2053static int esp_select_complete(struct esp *esp);
2054static int esp_do_status(struct esp *esp);
2055static int esp_do_msgin(struct esp *esp);
2056static int esp_do_msgindone(struct esp *esp);
2057static int esp_do_msgout(struct esp *esp);
2058static int esp_do_cmdbegin(struct esp *esp);
2059
2060#define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP)
2061#define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP)
2062
2063/* Read any bytes found in the FAS366 fifo, storing them into
2064 * the ESP driver software state structure.
2065 */
2066static void hme_fifo_read(struct esp *esp)
2067{
2068 u8 count = 0;
2069 u8 status = esp->sreg;
2070
2071 /* Cannot safely frob the fifo for these following cases, but
2072 * we must always read the fifo when the reselect interrupt
2073 * is pending.
2074 */
2075 if (((esp->ireg & ESP_INTR_RSEL) == 0) &&
2076 (sreg_datainp(status) ||
2077 sreg_dataoutp(status) ||
2078 (esp->current_SC &&
2079 esp->current_SC->SCp.phase == in_data_done))) {
2080 ESPHME(("<wkaround_skipped>"));
2081 } else {
2082 unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES;
2083
2084 /* The HME stores bytes in multiples of 2 in the fifo. */
2085 ESPHME(("hme_fifo[fcnt=%d", (int)fcnt));
2086 while (fcnt) {
2087 esp->hme_fifo_workaround_buffer[count++] =
2088 sbus_readb(esp->eregs + ESP_FDATA);
2089 esp->hme_fifo_workaround_buffer[count++] =
2090 sbus_readb(esp->eregs + ESP_FDATA);
2091 ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1]));
2092 fcnt--;
2093 }
2094 if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) {
2095 ESPHME(("<poke_byte>"));
2096 sbus_writeb(0, esp->eregs + ESP_FDATA);
2097 esp->hme_fifo_workaround_buffer[count++] =
2098 sbus_readb(esp->eregs + ESP_FDATA);
2099 ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1]));
2100 ESPHME(("CMD_FLUSH"));
2101 esp_cmd(esp, ESP_CMD_FLUSH);
2102 } else {
2103 ESPHME(("no_xtra_byte"));
2104 }
2105 }
2106 ESPHME(("wkarnd_cnt=%d]", (int)count));
2107 esp->hme_fifo_workaround_count = count;
2108}
2109
2110static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count)
2111{
2112 esp_cmd(esp, ESP_CMD_FLUSH);
2113 while (count) {
2114 u8 tmp = *bytes++;
2115 sbus_writeb(tmp, esp->eregs + ESP_FDATA);
2116 sbus_writeb(0, esp->eregs + ESP_FDATA);
2117 count--;
2118 }
2119}
2120
2121/* We try to avoid some interrupts by jumping ahead and see if the ESP
2122 * has gotten far enough yet. Hence the following.
2123 */
2124static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp,
2125 int prev_phase, int new_phase)
2126{
2127 if (scp->SCp.sent_command != prev_phase)
2128 return 0;
2129 if (ESP_IRQ_P(esp->dregs)) {
2130 /* Yes, we are able to save an interrupt. */
2131 if (esp->erev == fashme)
2132 esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
2133 esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR));
2134 esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
2135 if (esp->erev == fashme) {
2136 /* This chip is really losing. */
2137 ESPHME(("HME["));
2138 /* Must latch fifo before reading the interrupt
2139 * register else garbage ends up in the FIFO
2140 * which confuses the driver utterly.
2141 * Happy Meal indeed....
2142 */
2143 ESPHME(("fifo_workaround]"));
2144 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2145 (esp->sreg2 & ESP_STAT2_F1BYTE))
2146 hme_fifo_read(esp);
2147 }
2148 if (!(esp->ireg & ESP_INTR_SR))
2149 return 0;
2150 else
2151 return do_reset_complete;
2152 }
2153 /* Ho hum, target is taking forever... */
2154 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
2155 return do_intr_end;
2156}
2157
2158static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp,
2159 int prev_phase1, int prev_phase2, int new_phase)
2160{
2161 if (scp->SCp.sent_command != prev_phase1 &&
2162 scp->SCp.sent_command != prev_phase2)
2163 return 0;
2164 if (ESP_IRQ_P(esp->dregs)) {
2165 /* Yes, we are able to save an interrupt. */
2166 if (esp->erev == fashme)
2167 esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
2168 esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR));
2169 esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
2170 if (esp->erev == fashme) {
2171 /* This chip is really losing. */
2172 ESPHME(("HME["));
2173
2174 /* Must latch fifo before reading the interrupt
2175 * register else garbage ends up in the FIFO
2176 * which confuses the driver utterly.
2177 * Happy Meal indeed....
2178 */
2179 ESPHME(("fifo_workaround]"));
2180 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2181 (esp->sreg2 & ESP_STAT2_F1BYTE))
2182 hme_fifo_read(esp);
2183 }
2184 if (!(esp->ireg & ESP_INTR_SR))
2185 return 0;
2186 else
2187 return do_reset_complete;
2188 }
2189 /* Ho hum, target is taking forever... */
2190 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
2191 return do_intr_end;
2192}
2193
2194/* Now some dma helpers. */
2195static void dma_setup(struct esp *esp, __u32 addr, int count, int write)
2196{
2197 u32 nreg = sbus_readl(esp->dregs + DMA_CSR);
2198
2199 if (write)
2200 nreg |= DMA_ST_WRITE;
2201 else
2202 nreg &= ~(DMA_ST_WRITE);
2203 nreg |= DMA_ENABLE;
2204 sbus_writel(nreg, esp->dregs + DMA_CSR);
2205 if (esp->dma->revision == dvmaesc1) {
2206 /* This ESC gate array sucks! */
2207 __u32 src = addr;
2208 __u32 dest = src + count;
2209
2210 if (dest & (PAGE_SIZE - 1))
2211 count = PAGE_ALIGN(count);
2212 sbus_writel(count, esp->dregs + DMA_COUNT);
2213 }
2214 sbus_writel(addr, esp->dregs + DMA_ADDR);
2215}
2216
2217static void dma_drain(struct esp *esp)
2218{
2219 u32 tmp;
2220
2221 if (esp->dma->revision == dvmahme)
2222 return;
2223 if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) {
2224 switch (esp->dma->revision) {
2225 default:
2226 tmp |= DMA_FIFO_STDRAIN;
2227 sbus_writel(tmp, esp->dregs + DMA_CSR);
2228
2229 case dvmarev3:
2230 case dvmaesc1:
2231 while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
2232 udelay(1);
2233 };
2234 }
2235}
2236
2237static void dma_invalidate(struct esp *esp)
2238{
2239 u32 tmp;
2240
2241 if (esp->dma->revision == dvmahme) {
2242 sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR);
2243
2244 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
2245 (DMA_PARITY_OFF | DMA_2CLKS |
2246 DMA_SCSI_DISAB | DMA_INT_ENAB)) &
2247 ~(DMA_ST_WRITE | DMA_ENABLE));
2248
2249 sbus_writel(0, esp->dregs + DMA_CSR);
2250 sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
2251
2252 /* This is necessary to avoid having the SCSI channel
2253 * engine lock up on us.
2254 */
2255 sbus_writel(0, esp->dregs + DMA_ADDR);
2256 } else {
2257 while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ)
2258 udelay(1);
2259
2260 tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
2261 tmp |= DMA_FIFO_INV;
2262 sbus_writel(tmp, esp->dregs + DMA_CSR);
2263 tmp &= ~DMA_FIFO_INV;
2264 sbus_writel(tmp, esp->dregs + DMA_CSR);
2265 }
2266}
2267
2268static inline void dma_flashclear(struct esp *esp)
2269{
2270 dma_drain(esp);
2271 dma_invalidate(esp);
2272}
2273
2274static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp)
2275{
2276 __u32 base, end, sz;
2277
2278 if (esp->dma->revision == dvmarev3) {
2279 sz = sp->SCp.this_residual;
2280 if (sz > 0x1000000)
2281 sz = 0x1000000;
2282 } else {
2283 base = ((__u32)((unsigned long)sp->SCp.ptr));
2284 base &= (0x1000000 - 1);
2285 end = (base + sp->SCp.this_residual);
2286 if (end > 0x1000000)
2287 end = 0x1000000;
2288 sz = (end - base);
2289 }
2290 return sz;
2291}
2292
2293/* Misc. esp helper macros. */
2294#define esp_setcount(__eregs, __cnt, __hme) \
2295 sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \
2296 sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \
2297 if (__hme) { \
2298 sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \
2299 sbus_writeb(0, (__eregs) + FAS_RHI); \
2300 }
2301
2302#define esp_getcount(__eregs, __hme) \
2303 ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \
2304 ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \
2305 ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0))
2306
2307#define fcount(__esp) \
2308 (((__esp)->erev == fashme) ? \
2309 (__esp)->hme_fifo_workaround_count : \
2310 sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES)
2311
2312#define fnzero(__esp) \
2313 (((__esp)->erev == fashme) ? 0 : \
2314 sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO)
2315
2316/* XXX speculative nops unnecessary when continuing amidst a data phase
2317 * XXX even on esp100!!! another case of flooding the bus with I/O reg
2318 * XXX writes...
2319 */
2320#define esp_maybe_nop(__esp) \
2321 if ((__esp)->erev == esp100) \
2322 esp_cmd((__esp), ESP_CMD_NULL)
2323
2324#define sreg_to_dataphase(__sreg) \
2325 ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain)
2326
2327/* The ESP100 when in synchronous data phase, can mistake a long final
2328 * REQ pulse from the target as an extra byte, it places whatever is on
2329 * the data lines into the fifo. For now, we will assume when this
2330 * happens that the target is a bit quirky and we don't want to
2331 * be talking synchronously to it anyways. Regardless, we need to
2332 * tell the ESP to eat the extraneous byte so that we can proceed
2333 * to the next phase.
2334 */
2335static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt)
2336{
2337 /* Do not touch this piece of code. */
2338 if ((!(esp->erev == esp100)) ||
2339 (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) &&
2340 !fifocnt) &&
2341 !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) {
2342 if (sp->SCp.phase == in_dataout)
2343 esp_cmd(esp, ESP_CMD_FLUSH);
2344 return 0;
2345 } else {
2346 /* Async mode for this guy. */
2347 build_sync_nego_msg(esp, 0, 0);
2348
2349 /* Ack the bogus byte, but set ATN first. */
2350 esp_cmd(esp, ESP_CMD_SATN);
2351 esp_cmd(esp, ESP_CMD_MOK);
2352 return 1;
2353 }
2354}
2355
2356/* This closes the window during a selection with a reselect pending, because
2357 * we use DMA for the selection process the FIFO should hold the correct
2358 * contents if we get reselected during this process. So we just need to
2359 * ack the possible illegal cmd interrupt pending on the esp100.
2360 */
2361static inline int esp100_reconnect_hwbug(struct esp *esp)
2362{
2363 u8 tmp;
2364
2365 if (esp->erev != esp100)
2366 return 0;
2367 tmp = sbus_readb(esp->eregs + ESP_INTRPT);
2368 if (tmp & ESP_INTR_SR)
2369 return 1;
2370 return 0;
2371}
2372
2373/* This verifies the BUSID bits during a reselection so that we know which
2374 * target is talking to us.
2375 */
2376static inline int reconnect_target(struct esp *esp)
2377{
2378 int it, me = esp->scsi_id_mask, targ = 0;
2379
2380 if (2 != fcount(esp))
2381 return -1;
2382 if (esp->erev == fashme) {
2383 /* HME does not latch it's own BUS ID bits during
2384 * a reselection. Also the target number is given
2385 * as an unsigned char, not as a sole bit number
2386 * like the other ESP's do.
2387 * Happy Meal indeed....
2388 */
2389 targ = esp->hme_fifo_workaround_buffer[0];
2390 } else {
2391 it = sbus_readb(esp->eregs + ESP_FDATA);
2392 if (!(it & me))
2393 return -1;
2394 it &= ~me;
2395 if (it & (it - 1))
2396 return -1;
2397 while (!(it & 1))
2398 targ++, it >>= 1;
2399 }
2400 return targ;
2401}
2402
2403/* This verifies the identify from the target so that we know which lun is
2404 * being reconnected.
2405 */
2406static inline int reconnect_lun(struct esp *esp)
2407{
2408 int lun;
2409
2410 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)
2411 return -1;
2412 if (esp->erev == fashme)
2413 lun = esp->hme_fifo_workaround_buffer[1];
2414 else
2415 lun = sbus_readb(esp->eregs + ESP_FDATA);
2416
2417 /* Yes, you read this correctly. We report lun of zero
2418 * if we see parity error. ESP reports parity error for
2419 * the lun byte, and this is the only way to hope to recover
2420 * because the target is connected.
2421 */
2422 if (esp->sreg & ESP_STAT_PERR)
2423 return 0;
2424
2425 /* Check for illegal bits being set in the lun. */
2426 if ((lun & 0x40) || !(lun & 0x80))
2427 return -1;
2428
2429 return lun & 7;
2430}
2431
2432/* This puts the driver in a state where it can revitalize a command that
2433 * is being continued due to reselection.
2434 */
2435static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp)
2436{
2437 struct esp_device *esp_dev = sp->device->hostdata;
2438
2439 if (esp->prev_soff != esp_dev->sync_max_offset ||
2440 esp->prev_stp != esp_dev->sync_min_period ||
2441 (esp->erev > esp100a &&
2442 esp->prev_cfg3 != esp->config3[sp->device->id])) {
2443 esp->prev_soff = esp_dev->sync_max_offset;
2444 esp->prev_stp = esp_dev->sync_min_period;
2445 sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
2446 sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
2447 if (esp->erev > esp100a) {
2448 esp->prev_cfg3 = esp->config3[sp->device->id];
2449 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
2450 }
2451 }
2452 esp->current_SC = sp;
2453}
2454
2455/* This will place the current working command back into the issue queue
2456 * if we are to receive a reselection amidst a selection attempt.
2457 */
2458static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp)
2459{
2460 if (!esp->disconnected_SC)
2461 ESPLOG(("esp%d: Weird, being reselected but disconnected "
2462 "command queue is empty.\n", esp->esp_id));
2463 esp->snip = 0;
2464 esp->current_SC = NULL;
2465 sp->SCp.phase = not_issued;
2466 append_SC(&esp->issue_SC, sp);
2467}
2468
2469/* Begin message in phase. */
2470static int esp_do_msgin(struct esp *esp)
2471{
2472 /* Must be very careful with the fifo on the HME */
2473 if ((esp->erev != fashme) ||
2474 !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY))
2475 esp_cmd(esp, ESP_CMD_FLUSH);
2476 esp_maybe_nop(esp);
2477 esp_cmd(esp, ESP_CMD_TI);
2478 esp->msgin_len = 1;
2479 esp->msgin_ctr = 0;
2480 esp_advance_phase(esp->current_SC, in_msgindone);
2481 return do_work_bus;
2482}
2483
2484/* This uses various DMA csr fields and the fifo flags count value to
2485 * determine how many bytes were successfully sent/received by the ESP.
2486 */
2487static inline int esp_bytes_sent(struct esp *esp, int fifo_count)
2488{
2489 int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma;
2490
2491 if (esp->dma->revision == dvmarev1)
2492 rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11));
2493 return rval - fifo_count;
2494}
2495
2496static inline void advance_sg(struct scsi_cmnd *sp)
2497{
2498 ++sp->SCp.buffer;
2499 --sp->SCp.buffers_residual;
2500 sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer);
2501 sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer));
2502}
2503
2504/* Please note that the way I've coded these routines is that I _always_
2505 * check for a disconnect during any and all information transfer
2506 * phases. The SCSI standard states that the target _can_ cause a BUS
2507 * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note
2508 * that during information transfer phases the target controls every
2509 * change in phase, the only thing the initiator can do is "ask" for
2510 * a message out phase by driving ATN true. The target can, and sometimes
2511 * will, completely ignore this request so we cannot assume anything when
2512 * we try to force a message out phase to abort/reset a target. Most of
2513 * the time the target will eventually be nice and go to message out, so
2514 * we may have to hold on to our state about what we want to tell the target
2515 * for some period of time.
2516 */
2517
2518/* I think I have things working here correctly. Even partial transfers
2519 * within a buffer or sub-buffer should not upset us at all no matter
2520 * how bad the target and/or ESP fucks things up.
2521 */
2522static int esp_do_data(struct esp *esp)
2523{
2524 struct scsi_cmnd *SCptr = esp->current_SC;
2525 int thisphase, hmuch;
2526
2527 ESPDATA(("esp_do_data: "));
2528 esp_maybe_nop(esp);
2529 thisphase = sreg_to_dataphase(esp->sreg);
2530 esp_advance_phase(SCptr, thisphase);
2531 ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT"));
2532 hmuch = dma_can_transfer(esp, SCptr);
2533 if (hmuch > (64 * 1024) && (esp->erev != fashme))
2534 hmuch = (64 * 1024);
2535 ESPDATA(("hmuch<%d> ", hmuch));
2536 esp->current_transfer_size = hmuch;
2537
2538 if (esp->erev == fashme) {
2539 u32 tmp = esp->prev_hme_dmacsr;
2540
2541 /* Always set the ESP count registers first. */
2542 esp_setcount(esp->eregs, hmuch, 1);
2543
2544 /* Get the DMA csr computed. */
2545 tmp |= (DMA_SCSI_DISAB | DMA_ENABLE);
2546 if (thisphase == in_datain)
2547 tmp |= DMA_ST_WRITE;
2548 else
2549 tmp &= ~(DMA_ST_WRITE);
2550 esp->prev_hme_dmacsr = tmp;
2551
2552 ESPDATA(("DMA|TI --> do_intr_end\n"));
2553 if (thisphase == in_datain) {
2554 sbus_writel(hmuch, esp->dregs + DMA_COUNT);
2555 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
2556 } else {
2557 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
2558 sbus_writel(hmuch, esp->dregs + DMA_COUNT);
2559 }
2560 sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR);
2561 sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR);
2562 } else {
2563 esp_setcount(esp->eregs, hmuch, 0);
2564 dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)),
2565 hmuch, (thisphase == in_datain));
2566 ESPDATA(("DMA|TI --> do_intr_end\n"));
2567 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
2568 }
2569 return do_intr_end;
2570}
2571
2572/* See how successful the data transfer was. */
2573static int esp_do_data_finale(struct esp *esp)
2574{
2575 struct scsi_cmnd *SCptr = esp->current_SC;
2576 struct esp_device *esp_dev = SCptr->device->hostdata;
2577 int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
2578
2579 ESPDATA(("esp_do_data_finale: "));
2580
2581 if (SCptr->SCp.phase == in_datain) {
2582 if (esp->sreg & ESP_STAT_PERR) {
2583 /* Yuck, parity error. The ESP asserts ATN
2584 * so that we can go to message out phase
2585 * immediately and inform the target that
2586 * something bad happened.
2587 */
2588 ESPLOG(("esp%d: data bad parity detected.\n",
2589 esp->esp_id));
2590 esp->cur_msgout[0] = INITIATOR_ERROR;
2591 esp->msgout_len = 1;
2592 }
2593 dma_drain(esp);
2594 }
2595 dma_invalidate(esp);
2596
2597 /* This could happen for the above parity error case. */
2598 if (esp->ireg != ESP_INTR_BSERV) {
2599 /* Please go to msgout phase, please please please... */
2600 ESPLOG(("esp%d: !BSERV after data, probably to msgout\n",
2601 esp->esp_id));
2602 return esp_do_phase_determine(esp);
2603 }
2604
2605 /* Check for partial transfers and other horrible events.
2606 * Note, here we read the real fifo flags register even
2607 * on HME broken adapters because we skip the HME fifo
2608 * workaround code in esp_handle() if we are doing data
2609 * phase things. We don't want to fuck directly with
2610 * the fifo like that, especially if doing synchronous
2611 * transfers! Also, will need to double the count on
2612 * HME if we are doing wide transfers, as the HME fifo
2613 * will move and count 16-bit quantities during wide data.
2614 * SMCC _and_ Qlogic can both bite me.
2615 */
2616 fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES);
2617 if (esp->erev != fashme)
2618 ecount = esp_getcount(esp->eregs, 0);
2619 bytes_sent = esp->current_transfer_size;
2620
2621 ESPDATA(("trans_sz(%d), ", bytes_sent));
2622 if (esp->erev == fashme) {
2623 if (!(esp->sreg & ESP_STAT_TCNT)) {
2624 ecount = esp_getcount(esp->eregs, 1);
2625 bytes_sent -= ecount;
2626 }
2627
2628 /* Always subtract any cruft remaining in the FIFO. */
2629 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
2630 fifocnt <<= 1;
2631 if (SCptr->SCp.phase == in_dataout)
2632 bytes_sent -= fifocnt;
2633
2634 /* I have an IBM disk which exhibits the following
2635 * behavior during writes to it. It disconnects in
2636 * the middle of a partial transfer, the current sglist
2637 * buffer is 1024 bytes, the disk stops data transfer
2638 * at 512 bytes.
2639 *
2640 * However the FAS366 reports that 32 more bytes were
2641 * transferred than really were. This is precisely
2642 * the size of a fully loaded FIFO in wide scsi mode.
2643 * The FIFO state recorded indicates that it is empty.
2644 *
2645 * I have no idea if this is a bug in the FAS366 chip
2646 * or a bug in the firmware on this IBM disk. In any
2647 * event the following seems to be a good workaround. -DaveM
2648 */
2649 if (bytes_sent != esp->current_transfer_size &&
2650 SCptr->SCp.phase == in_dataout) {
2651 int mask = (64 - 1);
2652
2653 if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0)
2654 mask >>= 1;
2655
2656 if (bytes_sent & mask)
2657 bytes_sent -= (bytes_sent & mask);
2658 }
2659 } else {
2660 if (!(esp->sreg & ESP_STAT_TCNT))
2661 bytes_sent -= ecount;
2662 if (SCptr->SCp.phase == in_dataout)
2663 bytes_sent -= fifocnt;
2664 }
2665
2666 ESPDATA(("bytes_sent(%d), ", bytes_sent));
2667
2668 /* If we were in synchronous mode, check for peculiarities. */
2669 if (esp->erev == fashme) {
2670 if (esp_dev->sync_max_offset) {
2671 if (SCptr->SCp.phase == in_dataout)
2672 esp_cmd(esp, ESP_CMD_FLUSH);
2673 } else {
2674 esp_cmd(esp, ESP_CMD_FLUSH);
2675 }
2676 } else {
2677 if (esp_dev->sync_max_offset)
2678 bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt);
2679 else
2680 esp_cmd(esp, ESP_CMD_FLUSH);
2681 }
2682
2683 /* Until we are sure of what has happened, we are certainly
2684 * in the dark.
2685 */
2686 esp_advance_phase(SCptr, in_the_dark);
2687
2688 if (bytes_sent < 0) {
2689 /* I've seen this happen due to lost state in this
2690 * driver. No idea why it happened, but allowing
2691 * this value to be negative caused things to
2692 * lock up. This allows greater chance of recovery.
2693 * In fact every time I've seen this, it has been
2694 * a driver bug without question.
2695 */
2696 ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id));
2697 ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n",
2698 esp->esp_id,
2699 esp->current_transfer_size, fifocnt, ecount));
2700 ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n",
2701 esp->esp_id,
2702 SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual));
2703 ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
2704 SCptr->device->id));
2705 SCptr->device->borken = 1;
2706 esp_dev->sync = 0;
2707 bytes_sent = 0;
2708 }
2709
2710 /* Update the state of our transfer. */
2711 SCptr->SCp.ptr += bytes_sent;
2712 SCptr->SCp.this_residual -= bytes_sent;
2713 if (SCptr->SCp.this_residual < 0) {
2714 /* shit */
2715 ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id));
2716 SCptr->SCp.this_residual = 0;
2717 }
2718
2719 /* Maybe continue. */
2720 if (!bogus_data) {
2721 ESPDATA(("!bogus_data, "));
2722
2723 /* NO MATTER WHAT, we advance the scatterlist,
2724 * if the target should decide to disconnect
2725 * in between scatter chunks (which is common)
2726 * we could die horribly! I used to have the sg
2727 * advance occur only if we are going back into
2728 * (or are staying in) a data phase, you can
2729 * imagine the hell I went through trying to
2730 * figure this out.
2731 */
2732 if (SCptr->use_sg && !SCptr->SCp.this_residual)
2733 advance_sg(SCptr);
2734 if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) {
2735 ESPDATA(("to more data\n"));
2736 return esp_do_data(esp);
2737 }
2738 ESPDATA(("to new phase\n"));
2739 return esp_do_phase_determine(esp);
2740 }
2741 /* Bogus data, just wait for next interrupt. */
2742 ESPLOG(("esp%d: bogus_data during end of data phase\n",
2743 esp->esp_id));
2744 return do_intr_end;
2745}
2746
2747/* We received a non-good status return at the end of
2748 * running a SCSI command. This is used to decide if
2749 * we should clear our synchronous transfer state for
2750 * such a device when that happens.
2751 *
2752 * The idea is that when spinning up a disk or rewinding
2753 * a tape, we don't want to go into a loop re-negotiating
2754 * synchronous capabilities over and over.
2755 */
2756static int esp_should_clear_sync(struct scsi_cmnd *sp)
2757{
2758 u8 cmd = sp->cmnd[0];
2759
2760 /* These cases are for spinning up a disk and
2761 * waiting for that spinup to complete.
2762 */
2763 if (cmd == START_STOP)
2764 return 0;
2765
2766 if (cmd == TEST_UNIT_READY)
2767 return 0;
2768
2769 /* One more special case for SCSI tape drives,
2770 * this is what is used to probe the device for
2771 * completion of a rewind or tape load operation.
2772 */
2773 if (sp->device->type == TYPE_TAPE) {
2774 if (cmd == MODE_SENSE)
2775 return 0;
2776 }
2777
2778 return 1;
2779}
2780
2781/* Either a command is completing or a target is dropping off the bus
2782 * to continue the command in the background so we can do other work.
2783 */
2784static int esp_do_freebus(struct esp *esp)
2785{
2786 struct scsi_cmnd *SCptr = esp->current_SC;
2787 struct esp_device *esp_dev = SCptr->device->hostdata;
2788 int rval;
2789
2790 rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing);
2791 if (rval)
2792 return rval;
2793 if (esp->ireg != ESP_INTR_DC) {
2794 ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id));
2795 return do_reset_bus; /* target will not drop BSY... */
2796 }
2797 esp->msgout_len = 0;
2798 esp->prevmsgout = NOP;
2799 if (esp->prevmsgin == COMMAND_COMPLETE) {
2800 /* Normal end of nexus. */
2801 if (esp->disconnected_SC || (esp->erev == fashme))
2802 esp_cmd(esp, ESP_CMD_ESEL);
2803
2804 if (SCptr->SCp.Status != GOOD &&
2805 SCptr->SCp.Status != CONDITION_GOOD &&
2806 ((1<<SCptr->device->id) & esp->targets_present) &&
2807 esp_dev->sync &&
2808 esp_dev->sync_max_offset) {
2809 /* SCSI standard says that the synchronous capabilities
2810 * should be renegotiated at this point. Most likely
2811 * we are about to request sense from this target
2812 * in which case we want to avoid using sync
2813 * transfers until we are sure of the current target
2814 * state.
2815 */
2816 ESPMISC(("esp: Status <%d> for target %d lun %d\n",
2817 SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun));
2818
2819 /* But don't do this when spinning up a disk at
2820 * boot time while we poll for completion as it
2821 * fills up the console with messages. Also, tapes
2822 * can report not ready many times right after
2823 * loading up a tape.
2824 */
2825 if (esp_should_clear_sync(SCptr) != 0)
2826 esp_dev->sync = 0;
2827 }
2828 ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2829 esp_done(esp, ((SCptr->SCp.Status & 0xff) |
2830 ((SCptr->SCp.Message & 0xff)<<8) |
2831 (DID_OK << 16)));
2832 } else if (esp->prevmsgin == DISCONNECT) {
2833 /* Normal disconnect. */
2834 esp_cmd(esp, ESP_CMD_ESEL);
2835 ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2836 append_SC(&esp->disconnected_SC, SCptr);
2837 esp->current_SC = NULL;
2838 if (esp->issue_SC)
2839 esp_exec_cmd(esp);
2840 } else {
2841 /* Driver bug, we do not expect a disconnect here
2842 * and should not have advanced the state engine
2843 * to in_freeing.
2844 */
2845 ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n",
2846 esp->esp_id));
2847 return do_reset_bus;
2848 }
2849 return do_intr_end;
2850}
2851
2852/* When a reselect occurs, and we cannot find the command to
2853 * reconnect to in our queues, we do this.
2854 */
2855static int esp_bad_reconnect(struct esp *esp)
2856{
2857 struct scsi_cmnd *sp;
2858
2859 ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n",
2860 esp->esp_id));
2861 ESPLOG(("QUEUE DUMP\n"));
2862 sp = esp->issue_SC;
2863 ESPLOG(("esp%d: issue_SC[", esp->esp_id));
2864 while (sp) {
2865 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2866 sp = (struct scsi_cmnd *) sp->host_scribble;
2867 }
2868 ESPLOG(("]\n"));
2869 sp = esp->current_SC;
2870 ESPLOG(("esp%d: current_SC[", esp->esp_id));
2871 if (sp)
2872 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2873 else
2874 ESPLOG(("<NULL>"));
2875 ESPLOG(("]\n"));
2876 sp = esp->disconnected_SC;
2877 ESPLOG(("esp%d: disconnected_SC[", esp->esp_id));
2878 while (sp) {
2879 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2880 sp = (struct scsi_cmnd *) sp->host_scribble;
2881 }
2882 ESPLOG(("]\n"));
2883 return do_reset_bus;
2884}
2885
2886/* Do the needy when a target tries to reconnect to us. */
2887static int esp_do_reconnect(struct esp *esp)
2888{
2889 int lun, target;
2890 struct scsi_cmnd *SCptr;
2891
2892 /* Check for all bogus conditions first. */
2893 target = reconnect_target(esp);
2894 if (target < 0) {
2895 ESPDISC(("bad bus bits\n"));
2896 return do_reset_bus;
2897 }
2898 lun = reconnect_lun(esp);
2899 if (lun < 0) {
2900 ESPDISC(("target=%2x, bad identify msg\n", target));
2901 return do_reset_bus;
2902 }
2903
2904 /* Things look ok... */
2905 ESPDISC(("R<%02x,%02x>", target, lun));
2906
2907 /* Must not flush FIFO or DVMA on HME. */
2908 if (esp->erev != fashme) {
2909 esp_cmd(esp, ESP_CMD_FLUSH);
2910 if (esp100_reconnect_hwbug(esp))
2911 return do_reset_bus;
2912 esp_cmd(esp, ESP_CMD_NULL);
2913 }
2914
2915 SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun);
2916 if (!SCptr)
2917 return esp_bad_reconnect(esp);
2918
2919 esp_connect(esp, SCptr);
2920 esp_cmd(esp, ESP_CMD_MOK);
2921
2922 if (esp->erev == fashme)
2923 sbus_writeb(((SCptr->device->id & 0xf) |
2924 (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)),
2925 esp->eregs + ESP_BUSID);
2926
2927 /* Reconnect implies a restore pointers operation. */
2928 esp_restore_pointers(esp, SCptr);
2929
2930 esp->snip = 0;
2931 esp_advance_phase(SCptr, in_the_dark);
2932 return do_intr_end;
2933}
2934
2935/* End of NEXUS (hopefully), pick up status + message byte then leave if
2936 * all goes well.
2937 */
2938static int esp_do_status(struct esp *esp)
2939{
2940 struct scsi_cmnd *SCptr = esp->current_SC;
2941 int intr, rval;
2942
2943 rval = skipahead1(esp, SCptr, in_the_dark, in_status);
2944 if (rval)
2945 return rval;
2946 intr = esp->ireg;
2947 ESPSTAT(("esp_do_status: "));
2948 if (intr != ESP_INTR_DC) {
2949 int message_out = 0; /* for parity problems */
2950
2951 /* Ack the message. */
2952 ESPSTAT(("ack msg, "));
2953 esp_cmd(esp, ESP_CMD_MOK);
2954
2955 if (esp->erev != fashme) {
2956 dma_flashclear(esp);
2957
2958 /* Wait till the first bits settle. */
2959 while (esp->esp_command[0] == 0xff)
2960 udelay(1);
2961 } else {
2962 esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0];
2963 esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1];
2964 }
2965
2966 ESPSTAT(("got something, "));
2967 /* ESP chimes in with one of
2968 *
2969 * 1) function done interrupt:
2970 * both status and message in bytes
2971 * are available
2972 *
2973 * 2) bus service interrupt:
2974 * only status byte was acquired
2975 *
2976 * 3) Anything else:
2977 * can't happen, but we test for it
2978 * anyways
2979 *
2980 * ALSO: If bad parity was detected on either
2981 * the status _or_ the message byte then
2982 * the ESP has asserted ATN on the bus
2983 * and we must therefore wait for the
2984 * next phase change.
2985 */
2986 if (intr & ESP_INTR_FDONE) {
2987 /* We got it all, hallejulia. */
2988 ESPSTAT(("got both, "));
2989 SCptr->SCp.Status = esp->esp_command[0];
2990 SCptr->SCp.Message = esp->esp_command[1];
2991 esp->prevmsgin = SCptr->SCp.Message;
2992 esp->cur_msgin[0] = SCptr->SCp.Message;
2993 if (esp->sreg & ESP_STAT_PERR) {
2994 /* There was bad parity for the
2995 * message byte, the status byte
2996 * was ok.
2997 */
2998 message_out = MSG_PARITY_ERROR;
2999 }
3000 } else if (intr == ESP_INTR_BSERV) {
3001 /* Only got status byte. */
3002 ESPLOG(("esp%d: got status only, ", esp->esp_id));
3003 if (!(esp->sreg & ESP_STAT_PERR)) {
3004 SCptr->SCp.Status = esp->esp_command[0];
3005 SCptr->SCp.Message = 0xff;
3006 } else {
3007 /* The status byte had bad parity.
3008 * we leave the scsi_pointer Status
3009 * field alone as we set it to a default
3010 * of CHECK_CONDITION in esp_queue.
3011 */
3012 message_out = INITIATOR_ERROR;
3013 }
3014 } else {
3015 /* This shouldn't happen ever. */
3016 ESPSTAT(("got bolixed\n"));
3017 esp_advance_phase(SCptr, in_the_dark);
3018 return esp_do_phase_determine(esp);
3019 }
3020
3021 if (!message_out) {
3022 ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status,
3023 SCptr->SCp.Message));
3024 if (SCptr->SCp.Message == COMMAND_COMPLETE) {
3025 ESPSTAT(("and was COMMAND_COMPLETE\n"));
3026 esp_advance_phase(SCptr, in_freeing);
3027 return esp_do_freebus(esp);
3028 } else {
3029 ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n",
3030 esp->esp_id));
3031 esp->msgin_len = esp->msgin_ctr = 1;
3032 esp_advance_phase(SCptr, in_msgindone);
3033 return esp_do_msgindone(esp);
3034 }
3035 } else {
3036 /* With luck we'll be able to let the target
3037 * know that bad parity happened, it will know
3038 * which byte caused the problems and send it
3039 * again. For the case where the status byte
3040 * receives bad parity, I do not believe most
3041 * targets recover very well. We'll see.
3042 */
3043 ESPLOG(("esp%d: bad parity somewhere mout=%2x\n",
3044 esp->esp_id, message_out));
3045 esp->cur_msgout[0] = message_out;
3046 esp->msgout_len = esp->msgout_ctr = 1;
3047 esp_advance_phase(SCptr, in_the_dark);
3048 return esp_do_phase_determine(esp);
3049 }
3050 } else {
3051 /* If we disconnect now, all hell breaks loose. */
3052 ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id));
3053 esp_advance_phase(SCptr, in_the_dark);
3054 return esp_do_phase_determine(esp);
3055 }
3056}
3057
3058static int esp_enter_status(struct esp *esp)
3059{
3060 u8 thecmd = ESP_CMD_ICCSEQ;
3061
3062 esp_cmd(esp, ESP_CMD_FLUSH);
3063 if (esp->erev != fashme) {
3064 u32 tmp;
3065
3066 esp->esp_command[0] = esp->esp_command[1] = 0xff;
3067 sbus_writeb(2, esp->eregs + ESP_TCLOW);
3068 sbus_writeb(0, esp->eregs + ESP_TCMED);
3069 tmp = sbus_readl(esp->dregs + DMA_CSR);
3070 tmp |= (DMA_ST_WRITE | DMA_ENABLE);
3071 sbus_writel(tmp, esp->dregs + DMA_CSR);
3072 if (esp->dma->revision == dvmaesc1)
3073 sbus_writel(0x100, esp->dregs + DMA_COUNT);
3074 sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
3075 thecmd |= ESP_CMD_DMA;
3076 }
3077 esp_cmd(esp, thecmd);
3078 esp_advance_phase(esp->current_SC, in_status);
3079
3080 return esp_do_status(esp);
3081}
3082
3083static int esp_disconnect_amidst_phases(struct esp *esp)
3084{
3085 struct scsi_cmnd *sp = esp->current_SC;
3086 struct esp_device *esp_dev = sp->device->hostdata;
3087
3088 /* This means real problems if we see this
3089 * here. Unless we were actually trying
3090 * to force the device to abort/reset.
3091 */
3092 ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id));
3093 ESPLOG(("pphase<%s> cphase<%s>, ",
3094 phase_string(sp->SCp.phase),
3095 phase_string(sp->SCp.sent_command)));
3096
3097 if (esp->disconnected_SC != NULL || (esp->erev == fashme))
3098 esp_cmd(esp, ESP_CMD_ESEL);
3099
3100 switch (esp->cur_msgout[0]) {
3101 default:
3102 /* We didn't expect this to happen at all. */
3103 ESPLOG(("device is bolixed\n"));
3104 esp_advance_phase(sp, in_tgterror);
3105 esp_done(esp, (DID_ERROR << 16));
3106 break;
3107
3108 case BUS_DEVICE_RESET:
3109 ESPLOG(("device reset successful\n"));
3110 esp_dev->sync_max_offset = 0;
3111 esp_dev->sync_min_period = 0;
3112 esp_dev->sync = 0;
3113 esp_advance_phase(sp, in_resetdev);
3114 esp_done(esp, (DID_RESET << 16));
3115 break;
3116
3117 case ABORT:
3118 ESPLOG(("device abort successful\n"));
3119 esp_advance_phase(sp, in_abortone);
3120 esp_done(esp, (DID_ABORT << 16));
3121 break;
3122
3123 };
3124 return do_intr_end;
3125}
3126
3127static int esp_enter_msgout(struct esp *esp)
3128{
3129 esp_advance_phase(esp->current_SC, in_msgout);
3130 return esp_do_msgout(esp);
3131}
3132
3133static int esp_enter_msgin(struct esp *esp)
3134{
3135 esp_advance_phase(esp->current_SC, in_msgin);
3136 return esp_do_msgin(esp);
3137}
3138
3139static int esp_enter_cmd(struct esp *esp)
3140{
3141 esp_advance_phase(esp->current_SC, in_cmdbegin);
3142 return esp_do_cmdbegin(esp);
3143}
3144
3145static int esp_enter_badphase(struct esp *esp)
3146{
3147 ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id,
3148 esp->sreg & ESP_STAT_PMASK));
3149 return do_reset_bus;
3150}
3151
3152typedef int (*espfunc_t)(struct esp *);
3153
3154static espfunc_t phase_vector[] = {
3155 esp_do_data, /* ESP_DOP */
3156 esp_do_data, /* ESP_DIP */
3157 esp_enter_cmd, /* ESP_CMDP */
3158 esp_enter_status, /* ESP_STATP */
3159 esp_enter_badphase, /* ESP_STAT_PMSG */
3160 esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */
3161 esp_enter_msgout, /* ESP_MOP */
3162 esp_enter_msgin, /* ESP_MIP */
3163};
3164
3165/* The target has control of the bus and we have to see where it has
3166 * taken us.
3167 */
3168static int esp_do_phase_determine(struct esp *esp)
3169{
3170 if ((esp->ireg & ESP_INTR_DC) != 0)
3171 return esp_disconnect_amidst_phases(esp);
3172 return phase_vector[esp->sreg & ESP_STAT_PMASK](esp);
3173}
3174
3175/* First interrupt after exec'ing a cmd comes here. */
3176static int esp_select_complete(struct esp *esp)
3177{
3178 struct scsi_cmnd *SCptr = esp->current_SC;
3179 struct esp_device *esp_dev = SCptr->device->hostdata;
3180 int cmd_bytes_sent, fcnt;
3181
3182 if (esp->erev != fashme)
3183 esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS);
3184
3185 if (esp->erev == fashme)
3186 fcnt = esp->hme_fifo_workaround_count;
3187 else
3188 fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES);
3189
3190 cmd_bytes_sent = esp_bytes_sent(esp, fcnt);
3191 dma_invalidate(esp);
3192
3193 /* Let's check to see if a reselect happened
3194 * while we we're trying to select. This must
3195 * be checked first.
3196 */
3197 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
3198 esp_reconnect(esp, SCptr);
3199 return esp_do_reconnect(esp);
3200 }
3201
3202 /* Looks like things worked, we should see a bus service &
3203 * a function complete interrupt at this point. Note we
3204 * are doing a direct comparison because we don't want to
3205 * be fooled into thinking selection was successful if
3206 * ESP_INTR_DC is set, see below.
3207 */
3208 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
3209 /* target speaks... */
3210 esp->targets_present |= (1<<SCptr->device->id);
3211
3212 /* What if the target ignores the sdtr? */
3213 if (esp->snip)
3214 esp_dev->sync = 1;
3215
3216 /* See how far, if at all, we got in getting
3217 * the information out to the target.
3218 */
3219 switch (esp->seqreg) {
3220 default:
3221
3222 case ESP_STEP_ASEL:
3223 /* Arbitration won, target selected, but
3224 * we are in some phase which is not command
3225 * phase nor is it message out phase.
3226 *
3227 * XXX We've confused the target, obviously.
3228 * XXX So clear it's state, but we also end
3229 * XXX up clearing everyone elses. That isn't
3230 * XXX so nice. I'd like to just reset this
3231 * XXX target, but if I cannot even get it's
3232 * XXX attention and finish selection to talk
3233 * XXX to it, there is not much more I can do.
3234 * XXX If we have a loaded bus we're going to
3235 * XXX spend the next second or so renegotiating
3236 * XXX for synchronous transfers.
3237 */
3238 ESPLOG(("esp%d: STEP_ASEL for tgt %d\n",
3239 esp->esp_id, SCptr->device->id));
3240
3241 case ESP_STEP_SID:
3242 /* Arbitration won, target selected, went
3243 * to message out phase, sent one message
3244 * byte, then we stopped. ATN is asserted
3245 * on the SCSI bus and the target is still
3246 * there hanging on. This is a legal
3247 * sequence step if we gave the ESP a select
3248 * and stop command.
3249 *
3250 * XXX See above, I could set the borken flag
3251 * XXX in the device struct and retry the
3252 * XXX command. But would that help for
3253 * XXX tagged capable targets?
3254 */
3255
3256 case ESP_STEP_NCMD:
3257 /* Arbitration won, target selected, maybe
3258 * sent the one message byte in message out
3259 * phase, but we did not go to command phase
3260 * in the end. Actually, we could have sent
3261 * only some of the message bytes if we tried
3262 * to send out the entire identify and tag
3263 * message using ESP_CMD_SA3.
3264 */
3265 cmd_bytes_sent = 0;
3266 break;
3267
3268 case ESP_STEP_PPC:
3269 /* No, not the powerPC pinhead. Arbitration
3270 * won, all message bytes sent if we went to
3271 * message out phase, went to command phase
3272 * but only part of the command was sent.
3273 *
3274 * XXX I've seen this, but usually in conjunction
3275 * XXX with a gross error which appears to have
3276 * XXX occurred between the time I told the
3277 * XXX ESP to arbitrate and when I got the
3278 * XXX interrupt. Could I have misloaded the
3279 * XXX command bytes into the fifo? Actually,
3280 * XXX I most likely missed a phase, and therefore
3281 * XXX went into never never land and didn't even
3282 * XXX know it. That was the old driver though.
3283 * XXX What is even more peculiar is that the ESP
3284 * XXX showed the proper function complete and
3285 * XXX bus service bits in the interrupt register.
3286 */
3287
3288 case ESP_STEP_FINI4:
3289 case ESP_STEP_FINI5:
3290 case ESP_STEP_FINI6:
3291 case ESP_STEP_FINI7:
3292 /* Account for the identify message */
3293 if (SCptr->SCp.phase == in_slct_norm)
3294 cmd_bytes_sent -= 1;
3295 };
3296
3297 if (esp->erev != fashme)
3298 esp_cmd(esp, ESP_CMD_NULL);
3299
3300 /* Be careful, we could really get fucked during synchronous
3301 * data transfers if we try to flush the fifo now.
3302 */
3303 if ((esp->erev != fashme) && /* not a Happy Meal and... */
3304 !fcnt && /* Fifo is empty and... */
3305 /* either we are not doing synchronous transfers or... */
3306 (!esp_dev->sync_max_offset ||
3307 /* We are not going into data in phase. */
3308 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
3309 esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */
3310
3311 /* See how far we got if this is not a slow command. */
3312 if (!esp->esp_slowcmd) {
3313 if (cmd_bytes_sent < 0)
3314 cmd_bytes_sent = 0;
3315 if (cmd_bytes_sent != SCptr->cmd_len) {
3316 /* Crapola, mark it as a slowcmd
3317 * so that we have some chance of
3318 * keeping the command alive with
3319 * good luck.
3320 *
3321 * XXX Actually, if we didn't send it all
3322 * XXX this means either we didn't set things
3323 * XXX up properly (driver bug) or the target
3324 * XXX or the ESP detected parity on one of
3325 * XXX the command bytes. This makes much
3326 * XXX more sense, and therefore this code
3327 * XXX should be changed to send out a
3328 * XXX parity error message or if the status
3329 * XXX register shows no parity error then
3330 * XXX just expect the target to bring the
3331 * XXX bus into message in phase so that it
3332 * XXX can send us the parity error message.
3333 * XXX SCSI sucks...
3334 */
3335 esp->esp_slowcmd = 1;
3336 esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]);
3337 esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent);
3338 }
3339 }
3340
3341 /* Now figure out where we went. */
3342 esp_advance_phase(SCptr, in_the_dark);
3343 return esp_do_phase_determine(esp);
3344 }
3345
3346 /* Did the target even make it? */
3347 if (esp->ireg == ESP_INTR_DC) {
3348 /* wheee... nobody there or they didn't like
3349 * what we told it to do, clean up.
3350 */
3351
3352 /* If anyone is off the bus, but working on
3353 * a command in the background for us, tell
3354 * the ESP to listen for them.
3355 */
3356 if (esp->disconnected_SC)
3357 esp_cmd(esp, ESP_CMD_ESEL);
3358
3359 if (((1<<SCptr->device->id) & esp->targets_present) &&
3360 esp->seqreg != 0 &&
3361 (esp->cur_msgout[0] == EXTENDED_MESSAGE) &&
3362 (SCptr->SCp.phase == in_slct_msg ||
3363 SCptr->SCp.phase == in_slct_stop)) {
3364 /* shit */
3365 esp->snip = 0;
3366 ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
3367 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
3368 esp_dev->sync_max_offset = 0;
3369 esp_dev->sync_min_period = 0;
3370 esp_dev->sync = 1; /* so we don't negotiate again */
3371
3372 /* Run the command again, this time though we
3373 * won't try to negotiate for synchronous transfers.
3374 *
3375 * XXX I'd like to do something like send an
3376 * XXX INITIATOR_ERROR or ABORT message to the
3377 * XXX target to tell it, "Sorry I confused you,
3378 * XXX please come back and I will be nicer next
3379 * XXX time". But that requires having the target
3380 * XXX on the bus, and it has dropped BSY on us.
3381 */
3382 esp->current_SC = NULL;
3383 esp_advance_phase(SCptr, not_issued);
3384 prepend_SC(&esp->issue_SC, SCptr);
3385 esp_exec_cmd(esp);
3386 return do_intr_end;
3387 }
3388
3389 /* Ok, this is normal, this is what we see during boot
3390 * or whenever when we are scanning the bus for targets.
3391 * But first make sure that is really what is happening.
3392 */
3393 if (((1<<SCptr->device->id) & esp->targets_present)) {
3394 ESPLOG(("esp%d: Warning, live target %d not responding to "
3395 "selection.\n", esp->esp_id, SCptr->device->id));
3396
3397 /* This _CAN_ happen. The SCSI standard states that
3398 * the target is to _not_ respond to selection if
3399 * _it_ detects bad parity on the bus for any reason.
3400 * Therefore, we assume that if we've talked successfully
3401 * to this target before, bad parity is the problem.
3402 */
3403 esp_done(esp, (DID_PARITY << 16));
3404 } else {
3405 /* Else, there really isn't anyone there. */
3406 ESPMISC(("esp: selection failure, maybe nobody there?\n"));
3407 ESPMISC(("esp: target %d lun %d\n",
3408 SCptr->device->id, SCptr->device->lun));
3409 esp_done(esp, (DID_BAD_TARGET << 16));
3410 }
3411 return do_intr_end;
3412 }
3413
3414 ESPLOG(("esp%d: Selection failure.\n", esp->esp_id));
3415 printk("esp%d: Currently -- ", esp->esp_id);
3416 esp_print_ireg(esp->ireg); printk(" ");
3417 esp_print_statreg(esp->sreg); printk(" ");
3418 esp_print_seqreg(esp->seqreg); printk("\n");
3419 printk("esp%d: New -- ", esp->esp_id);
3420 esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
3421 esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP);
3422 esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT);
3423 esp_print_ireg(esp->ireg); printk(" ");
3424 esp_print_statreg(esp->sreg); printk(" ");
3425 esp_print_seqreg(esp->seqreg); printk("\n");
3426 ESPLOG(("esp%d: resetting bus\n", esp->esp_id));
3427 return do_reset_bus; /* ugh... */
3428}
3429
3430/* Continue reading bytes for msgin phase. */
3431static int esp_do_msgincont(struct esp *esp)
3432{
3433 if (esp->ireg & ESP_INTR_BSERV) {
3434 /* in the right phase too? */
3435 if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) {
3436 /* phew... */
3437 esp_cmd(esp, ESP_CMD_TI);
3438 esp_advance_phase(esp->current_SC, in_msgindone);
3439 return do_intr_end;
3440 }
3441
3442 /* We changed phase but ESP shows bus service,
3443 * in this case it is most likely that we, the
3444 * hacker who has been up for 20hrs straight
3445 * staring at the screen, drowned in coffee
3446 * smelling like retched cigarette ashes
3447 * have miscoded something..... so, try to
3448 * recover as best we can.
3449 */
3450 ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id));
3451 }
3452 esp_advance_phase(esp->current_SC, in_the_dark);
3453 return do_phase_determine;
3454}
3455
3456static int check_singlebyte_msg(struct esp *esp)
3457{
3458 esp->prevmsgin = esp->cur_msgin[0];
3459 if (esp->cur_msgin[0] & 0x80) {
3460 /* wheee... */
3461 ESPLOG(("esp%d: target sends identify amidst phases\n",
3462 esp->esp_id));
3463 esp_advance_phase(esp->current_SC, in_the_dark);
3464 return 0;
3465 } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) ||
3466 (esp->cur_msgin[0] == EXTENDED_MESSAGE)) {
3467 esp->msgin_len = 2;
3468 esp_advance_phase(esp->current_SC, in_msgincont);
3469 return 0;
3470 }
3471 esp_advance_phase(esp->current_SC, in_the_dark);
3472 switch (esp->cur_msgin[0]) {
3473 default:
3474 /* We don't want to hear about it. */
3475 ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id,
3476 esp->cur_msgin[0]));
3477 return MESSAGE_REJECT;
3478
3479 case NOP:
3480 ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id,
3481 esp->current_SC->device->id));
3482 return 0;
3483
3484 case RESTORE_POINTERS:
3485 /* In this case we might also have to backup the
3486 * "slow command" pointer. It is rare to get such
3487 * a save/restore pointer sequence so early in the
3488 * bus transition sequences, but cover it.
3489 */
3490 if (esp->esp_slowcmd) {
3491 esp->esp_scmdleft = esp->current_SC->cmd_len;
3492 esp->esp_scmdp = &esp->current_SC->cmnd[0];
3493 }
3494 esp_restore_pointers(esp, esp->current_SC);
3495 return 0;
3496
3497 case SAVE_POINTERS:
3498 esp_save_pointers(esp, esp->current_SC);
3499 return 0;
3500
3501 case COMMAND_COMPLETE:
3502 case DISCONNECT:
3503 /* Freeing the bus, let it go. */
3504 esp->current_SC->SCp.phase = in_freeing;
3505 return 0;
3506
3507 case MESSAGE_REJECT:
3508 ESPMISC(("msg reject, "));
3509 if (esp->prevmsgout == EXTENDED_MESSAGE) {
3510 struct esp_device *esp_dev = esp->current_SC->device->hostdata;
3511
3512 /* Doesn't look like this target can
3513 * do synchronous or WIDE transfers.
3514 */
3515 ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
3516 esp_dev->sync = 1;
3517 esp_dev->wide = 1;
3518 esp_dev->sync_min_period = 0;
3519 esp_dev->sync_max_offset = 0;
3520 return 0;
3521 } else {
3522 ESPMISC(("not sync nego, sending ABORT\n"));
3523 return ABORT;
3524 }
3525 };
3526}
3527
3528/* Target negotiates for synchronous transfers before we do, this
3529 * is legal although very strange. What is even funnier is that
3530 * the SCSI2 standard specifically recommends against targets doing
3531 * this because so many initiators cannot cope with this occurring.
3532 */
3533static int target_with_ants_in_pants(struct esp *esp,
3534 struct scsi_cmnd *SCptr,
3535 struct esp_device *esp_dev)
3536{
3537 if (esp_dev->sync || SCptr->device->borken) {
3538 /* sorry, no can do */
3539 ESPSDTR(("forcing to async, "));
3540 build_sync_nego_msg(esp, 0, 0);
3541 esp_dev->sync = 1;
3542 esp->snip = 1;
3543 ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
3544 esp_advance_phase(SCptr, in_the_dark);
3545 return EXTENDED_MESSAGE;
3546 }
3547
3548 /* Ok, we'll check them out... */
3549 return 0;
3550}
3551
3552static void sync_report(struct esp *esp)
3553{
3554 int msg3, msg4;
3555 char *type;
3556
3557 msg3 = esp->cur_msgin[3];
3558 msg4 = esp->cur_msgin[4];
3559 if (msg4) {
3560 int hz = 1000000000 / (msg3 * 4);
3561 int integer = hz / 1000000;
3562 int fraction = (hz - (integer * 1000000)) / 10000;
3563 if ((esp->erev == fashme) &&
3564 (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) {
3565 type = "FAST-WIDE";
3566 integer <<= 1;
3567 fraction <<= 1;
3568 } else if ((msg3 * 4) < 200) {
3569 type = "FAST";
3570 } else {
3571 type = "synchronous";
3572 }
3573
3574 /* Do not transform this back into one big printk
3575 * again, it triggers a bug in our sparc64-gcc272
3576 * sibling call optimization. -DaveM
3577 */
3578 ESPLOG((KERN_INFO "esp%d: target %d ",
3579 esp->esp_id, esp->current_SC->device->id));
3580 ESPLOG(("[period %dns offset %d %d.%02dMHz ",
3581 (int) msg3 * 4, (int) msg4,
3582 integer, fraction));
3583 ESPLOG(("%s SCSI%s]\n", type,
3584 (((msg3 * 4) < 200) ? "-II" : "")));
3585 } else {
3586 ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n",
3587 esp->esp_id, esp->current_SC->device->id));
3588 }
3589}
3590
3591static int check_multibyte_msg(struct esp *esp)
3592{
3593 struct scsi_cmnd *SCptr = esp->current_SC;
3594 struct esp_device *esp_dev = SCptr->device->hostdata;
3595 u8 regval = 0;
3596 int message_out = 0;
3597
3598 ESPSDTR(("chk multibyte msg: "));
3599 if (esp->cur_msgin[2] == EXTENDED_SDTR) {
3600 int period = esp->cur_msgin[3];
3601 int offset = esp->cur_msgin[4];
3602
3603 ESPSDTR(("is sync nego response, "));
3604 if (!esp->snip) {
3605 int rval;
3606
3607 /* Target negotiates first! */
3608 ESPSDTR(("target jumps the gun, "));
3609 message_out = EXTENDED_MESSAGE; /* we must respond */
3610 rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
3611 if (rval)
3612 return rval;
3613 }
3614
3615 ESPSDTR(("examining sdtr, "));
3616
3617 /* Offset cannot be larger than ESP fifo size. */
3618 if (offset > 15) {
3619 ESPSDTR(("offset too big %2x, ", offset));
3620 offset = 15;
3621 ESPSDTR(("sending back new offset\n"));
3622 build_sync_nego_msg(esp, period, offset);
3623 return EXTENDED_MESSAGE;
3624 }
3625
3626 if (offset && period > esp->max_period) {
3627 /* Yeee, async for this slow device. */
3628 ESPSDTR(("period too long %2x, ", period));
3629 build_sync_nego_msg(esp, 0, 0);
3630 ESPSDTR(("hoping for msgout\n"));
3631 esp_advance_phase(esp->current_SC, in_the_dark);
3632 return EXTENDED_MESSAGE;
3633 } else if (offset && period < esp->min_period) {
3634 ESPSDTR(("period too short %2x, ", period));
3635 period = esp->min_period;
3636 if (esp->erev > esp236)
3637 regval = 4;
3638 else
3639 regval = 5;
3640 } else if (offset) {
3641 int tmp;
3642
3643 ESPSDTR(("period is ok, "));
3644 tmp = esp->ccycle / 1000;
3645 regval = (((period << 2) + tmp - 1) / tmp);
3646 if (regval && ((esp->erev == fas100a ||
3647 esp->erev == fas236 ||
3648 esp->erev == fashme))) {
3649 if (period >= 50)
3650 regval--;
3651 }
3652 }
3653
3654 if (offset) {
3655 u8 bit;
3656
3657 esp_dev->sync_min_period = (regval & 0x1f);
3658 esp_dev->sync_max_offset = (offset | esp->radelay);
3659 if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) {
3660 if ((esp->erev == fas100a) || (esp->erev == fashme))
3661 bit = ESP_CONFIG3_FAST;
3662 else
3663 bit = ESP_CONFIG3_FSCSI;
3664 if (period < 50) {
3665 /* On FAS366, if using fast-20 synchronous transfers
3666 * we need to make sure the REQ/ACK assert/deassert
3667 * control bits are clear.
3668 */
3669 if (esp->erev == fashme)
3670 esp_dev->sync_max_offset &= ~esp->radelay;
3671 esp->config3[SCptr->device->id] |= bit;
3672 } else {
3673 esp->config3[SCptr->device->id] &= ~bit;
3674 }
3675 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3676 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
3677 }
3678 esp->prev_soff = esp_dev->sync_max_offset;
3679 esp->prev_stp = esp_dev->sync_min_period;
3680 sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
3681 sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
3682 ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
3683 esp_dev->sync_max_offset,
3684 esp_dev->sync_min_period,
3685 esp->config3[SCptr->device->id]));
3686
3687 esp->snip = 0;
3688 } else if (esp_dev->sync_max_offset) {
3689 u8 bit;
3690
3691 /* back to async mode */
3692 ESPSDTR(("unaccaptable sync nego, forcing async\n"));
3693 esp_dev->sync_max_offset = 0;
3694 esp_dev->sync_min_period = 0;
3695 esp->prev_soff = 0;
3696 esp->prev_stp = 0;
3697 sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
3698 sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
3699 if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) {
3700 if ((esp->erev == fas100a) || (esp->erev == fashme))
3701 bit = ESP_CONFIG3_FAST;
3702 else
3703 bit = ESP_CONFIG3_FSCSI;
3704 esp->config3[SCptr->device->id] &= ~bit;
3705 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3706 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
3707 }
3708 }
3709
3710 sync_report(esp);
3711
3712 ESPSDTR(("chk multibyte msg: sync is known, "));
3713 esp_dev->sync = 1;
3714
3715 if (message_out) {
3716 ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
3717 esp->esp_id));
3718 build_sync_nego_msg(esp, period, offset);
3719 esp_advance_phase(SCptr, in_the_dark);
3720 return EXTENDED_MESSAGE;
3721 }
3722
3723 ESPSDTR(("returning zero\n"));
3724 esp_advance_phase(SCptr, in_the_dark); /* ...or else! */
3725 return 0;
3726 } else if (esp->cur_msgin[2] == EXTENDED_WDTR) {
3727 int size = 8 << esp->cur_msgin[3];
3728
3729 esp->wnip = 0;
3730 if (esp->erev != fashme) {
3731 ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n",
3732 esp->esp_id));
3733 message_out = MESSAGE_REJECT;
3734 } else if (size > 16) {
3735 ESPLOG(("esp%d: AIEEE wide transfer for %d size "
3736 "not supported.\n", esp->esp_id, size));
3737 message_out = MESSAGE_REJECT;
3738 } else {
3739 /* Things look good; let's see what we got. */
3740 if (size == 16) {
3741 /* Set config 3 register for this target. */
3742 esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE;
3743 } else {
3744 /* Just make sure it was one byte sized. */
3745 if (size != 8) {
3746 ESPLOG(("esp%d: Aieee, wide nego of %d size.\n",
3747 esp->esp_id, size));
3748 message_out = MESSAGE_REJECT;
3749 goto finish;
3750 }
3751 /* Pure paranoia. */
3752 esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE);
3753 }
3754 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3755 sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
3756
3757 /* Regardless, next try for sync transfers. */
3758 build_sync_nego_msg(esp, esp->sync_defp, 15);
3759 esp_dev->sync = 1;
3760 esp->snip = 1;
3761 message_out = EXTENDED_MESSAGE;
3762 }
3763 } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) {
3764 ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id));
3765 message_out = MESSAGE_REJECT;
3766 }
3767finish:
3768 esp_advance_phase(SCptr, in_the_dark);
3769 return message_out;
3770}
3771
3772static int esp_do_msgindone(struct esp *esp)
3773{
3774 struct scsi_cmnd *SCptr = esp->current_SC;
3775 int message_out = 0, it = 0, rval;
3776
3777 rval = skipahead1(esp, SCptr, in_msgin, in_msgindone);
3778 if (rval)
3779 return rval;
3780 if (SCptr->SCp.sent_command != in_status) {
3781 if (!(esp->ireg & ESP_INTR_DC)) {
3782 if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) {
3783 message_out = MSG_PARITY_ERROR;
3784 esp_cmd(esp, ESP_CMD_FLUSH);
3785 } else if (esp->erev != fashme &&
3786 (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) {
3787 /* We certainly dropped the ball somewhere. */
3788 message_out = INITIATOR_ERROR;
3789 esp_cmd(esp, ESP_CMD_FLUSH);
3790 } else if (!esp->msgin_len) {
3791 if (esp->erev == fashme)
3792 it = esp->hme_fifo_workaround_buffer[0];
3793 else
3794 it = sbus_readb(esp->eregs + ESP_FDATA);
3795 esp_advance_phase(SCptr, in_msgincont);
3796 } else {
3797 /* it is ok and we want it */
3798 if (esp->erev == fashme)
3799 it = esp->cur_msgin[esp->msgin_ctr] =
3800 esp->hme_fifo_workaround_buffer[0];
3801 else
3802 it = esp->cur_msgin[esp->msgin_ctr] =
3803 sbus_readb(esp->eregs + ESP_FDATA);
3804 esp->msgin_ctr++;
3805 }
3806 } else {
3807 esp_advance_phase(SCptr, in_the_dark);
3808 return do_work_bus;
3809 }
3810 } else {
3811 it = esp->cur_msgin[0];
3812 }
3813 if (!message_out && esp->msgin_len) {
3814 if (esp->msgin_ctr < esp->msgin_len) {
3815 esp_advance_phase(SCptr, in_msgincont);
3816 } else if (esp->msgin_len == 1) {
3817 message_out = check_singlebyte_msg(esp);
3818 } else if (esp->msgin_len == 2) {
3819 if (esp->cur_msgin[0] == EXTENDED_MESSAGE) {
3820 if ((it + 2) >= 15) {
3821 message_out = MESSAGE_REJECT;
3822 } else {
3823 esp->msgin_len = (it + 2);
3824 esp_advance_phase(SCptr, in_msgincont);
3825 }
3826 } else {
3827 message_out = MESSAGE_REJECT; /* foo on you */
3828 }
3829 } else {
3830 message_out = check_multibyte_msg(esp);
3831 }
3832 }
3833 if (message_out < 0) {
3834 return -message_out;
3835 } else if (message_out) {
3836 if (((message_out != 1) &&
3837 ((message_out < 0x20) || (message_out & 0x80))))
3838 esp->msgout_len = 1;
3839 esp->cur_msgout[0] = message_out;
3840 esp_cmd(esp, ESP_CMD_SATN);
3841 esp_advance_phase(SCptr, in_the_dark);
3842 esp->msgin_len = 0;
3843 }
3844 esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
3845 esp->sreg &= ~(ESP_STAT_INTR);
3846 if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD))
3847 esp_cmd(esp, ESP_CMD_MOK);
3848 if ((SCptr->SCp.sent_command == in_msgindone) &&
3849 (SCptr->SCp.phase == in_freeing))
3850 return esp_do_freebus(esp);
3851 return do_intr_end;
3852}
3853
3854static int esp_do_cmdbegin(struct esp *esp)
3855{
3856 struct scsi_cmnd *SCptr = esp->current_SC;
3857
3858 esp_advance_phase(SCptr, in_cmdend);
3859 if (esp->erev == fashme) {
3860 u32 tmp = sbus_readl(esp->dregs + DMA_CSR);
3861 int i;
3862
3863 for (i = 0; i < esp->esp_scmdleft; i++)
3864 esp->esp_command[i] = *esp->esp_scmdp++;
3865 esp->esp_scmdleft = 0;
3866 esp_cmd(esp, ESP_CMD_FLUSH);
3867 esp_setcount(esp->eregs, i, 1);
3868 esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI));
3869 tmp |= (DMA_SCSI_DISAB | DMA_ENABLE);
3870 tmp &= ~(DMA_ST_WRITE);
3871 sbus_writel(i, esp->dregs + DMA_COUNT);
3872 sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR);
3873 sbus_writel(tmp, esp->dregs + DMA_CSR);
3874 } else {
3875 u8 tmp;
3876
3877 esp_cmd(esp, ESP_CMD_FLUSH);
3878 tmp = *esp->esp_scmdp++;
3879 esp->esp_scmdleft--;
3880 sbus_writeb(tmp, esp->eregs + ESP_FDATA);
3881 esp_cmd(esp, ESP_CMD_TI);
3882 }
3883 return do_intr_end;
3884}
3885
3886static int esp_do_cmddone(struct esp *esp)
3887{
3888 if (esp->erev == fashme)
3889 dma_invalidate(esp);
3890 else
3891 esp_cmd(esp, ESP_CMD_NULL);
3892
3893 if (esp->ireg & ESP_INTR_BSERV) {
3894 esp_advance_phase(esp->current_SC, in_the_dark);
3895 return esp_do_phase_determine(esp);
3896 }
3897
3898 ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n",
3899 esp->esp_id));
3900 return do_reset_bus;
3901}
3902
3903static int esp_do_msgout(struct esp *esp)
3904{
3905 esp_cmd(esp, ESP_CMD_FLUSH);
3906 switch (esp->msgout_len) {
3907 case 1:
3908 if (esp->erev == fashme)
3909 hme_fifo_push(esp, &esp->cur_msgout[0], 1);
3910 else
3911 sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA);
3912
3913 esp_cmd(esp, ESP_CMD_TI);
3914 break;
3915
3916 case 2:
3917 esp->esp_command[0] = esp->cur_msgout[0];
3918 esp->esp_command[1] = esp->cur_msgout[1];
3919
3920 if (esp->erev == fashme) {
3921 hme_fifo_push(esp, &esp->cur_msgout[0], 2);
3922 esp_cmd(esp, ESP_CMD_TI);
3923 } else {
3924 dma_setup(esp, esp->esp_command_dvma, 2, 0);
3925 esp_setcount(esp->eregs, 2, 0);
3926 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
3927 }
3928 break;
3929
3930 case 4:
3931 esp->esp_command[0] = esp->cur_msgout[0];
3932 esp->esp_command[1] = esp->cur_msgout[1];
3933 esp->esp_command[2] = esp->cur_msgout[2];
3934 esp->esp_command[3] = esp->cur_msgout[3];
3935 esp->snip = 1;
3936
3937 if (esp->erev == fashme) {
3938 hme_fifo_push(esp, &esp->cur_msgout[0], 4);
3939 esp_cmd(esp, ESP_CMD_TI);
3940 } else {
3941 dma_setup(esp, esp->esp_command_dvma, 4, 0);
3942 esp_setcount(esp->eregs, 4, 0);
3943 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
3944 }
3945 break;
3946
3947 case 5:
3948 esp->esp_command[0] = esp->cur_msgout[0];
3949 esp->esp_command[1] = esp->cur_msgout[1];
3950 esp->esp_command[2] = esp->cur_msgout[2];
3951 esp->esp_command[3] = esp->cur_msgout[3];
3952 esp->esp_command[4] = esp->cur_msgout[4];
3953 esp->snip = 1;
3954
3955 if (esp->erev == fashme) {
3956 hme_fifo_push(esp, &esp->cur_msgout[0], 5);
3957 esp_cmd(esp, ESP_CMD_TI);
3958 } else {
3959 dma_setup(esp, esp->esp_command_dvma, 5, 0);
3960 esp_setcount(esp->eregs, 5, 0);
3961 esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI);
3962 }
3963 break;
3964
3965 default:
3966 /* whoops */
3967 ESPMISC(("bogus msgout sending NOP\n"));
3968 esp->cur_msgout[0] = NOP;
3969
3970 if (esp->erev == fashme) {
3971 hme_fifo_push(esp, &esp->cur_msgout[0], 1);
3972 } else {
3973 sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA);
3974 }
3975
3976 esp->msgout_len = 1;
3977 esp_cmd(esp, ESP_CMD_TI);
3978 break;
3979 };
3980
3981 esp_advance_phase(esp->current_SC, in_msgoutdone);
3982 return do_intr_end;
3983}
3984
3985static int esp_do_msgoutdone(struct esp *esp)
3986{
3987 if (esp->msgout_len > 1) {
3988 /* XXX HME/FAS ATN deassert workaround required,
3989 * XXX no DMA flushing, only possible ESP_CMD_FLUSH
3990 * XXX to kill the fifo.
3991 */
3992 if (esp->erev != fashme) {
3993 u32 tmp;
3994
3995 while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ)
3996 udelay(1);
3997 tmp &= ~DMA_ENABLE;
3998 sbus_writel(tmp, esp->dregs + DMA_CSR);
3999 dma_invalidate(esp);
4000 } else {
4001 esp_cmd(esp, ESP_CMD_FLUSH);
4002 }
4003 }
4004 if (!(esp->ireg & ESP_INTR_DC)) {
4005 if (esp->erev != fashme)
4006 esp_cmd(esp, ESP_CMD_NULL);
4007 switch (esp->sreg & ESP_STAT_PMASK) {
4008 case ESP_MOP:
4009 /* whoops, parity error */
4010 ESPLOG(("esp%d: still in msgout, parity error assumed\n",
4011 esp->esp_id));
4012 if (esp->msgout_len > 1)
4013 esp_cmd(esp, ESP_CMD_SATN);
4014 esp_advance_phase(esp->current_SC, in_msgout);
4015 return do_work_bus;
4016
4017 case ESP_DIP:
4018 break;
4019
4020 default:
4021 /* Happy Meal fifo is touchy... */
4022 if ((esp->erev != fashme) &&
4023 !fcount(esp) &&
4024 !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
4025 esp_cmd(esp, ESP_CMD_FLUSH);
4026 break;
4027
4028 };
4029 } else {
4030 ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id));
4031 return do_reset_bus;
4032 }
4033
4034 /* If we sent out a synchronous negotiation message, update
4035 * our state.
4036 */
4037 if (esp->cur_msgout[2] == EXTENDED_MESSAGE &&
4038 esp->cur_msgout[4] == EXTENDED_SDTR) {
4039 esp->snip = 1; /* anal retentiveness... */
4040 }
4041
4042 esp->prevmsgout = esp->cur_msgout[0];
4043 esp->msgout_len = 0;
4044 esp_advance_phase(esp->current_SC, in_the_dark);
4045 return esp_do_phase_determine(esp);
4046}
4047
4048static int esp_bus_unexpected(struct esp *esp)
4049{
4050 ESPLOG(("esp%d: command in weird state %2x\n",
4051 esp->esp_id, esp->current_SC->SCp.phase));
4052 return do_reset_bus;
4053}
4054
4055static espfunc_t bus_vector[] = {
4056 esp_do_data_finale,
4057 esp_do_data_finale,
4058 esp_bus_unexpected,
4059 esp_do_msgin,
4060 esp_do_msgincont,
4061 esp_do_msgindone,
4062 esp_do_msgout,
4063 esp_do_msgoutdone,
4064 esp_do_cmdbegin,
4065 esp_do_cmddone,
4066 esp_do_status,
4067 esp_do_freebus,
4068 esp_do_phase_determine,
4069 esp_bus_unexpected,
4070 esp_bus_unexpected,
4071 esp_bus_unexpected,
4072};
4073
4074/* This is the second tier in our dual-level SCSI state machine. */
4075static int esp_work_bus(struct esp *esp)
4076{
4077 struct scsi_cmnd *SCptr = esp->current_SC;
4078 unsigned int phase;
4079
4080 ESPBUS(("esp_work_bus: "));
4081 if (!SCptr) {
4082 ESPBUS(("reconnect\n"));
4083 return esp_do_reconnect(esp);
4084 }
4085 phase = SCptr->SCp.phase;
4086 if ((phase & 0xf0) == in_phases_mask)
4087 return bus_vector[(phase & 0x0f)](esp);
4088 else if ((phase & 0xf0) == in_slct_mask)
4089 return esp_select_complete(esp);
4090 else
4091 return esp_bus_unexpected(esp);
4092}
4093
4094static espfunc_t isvc_vector[] = {
4095 NULL,
4096 esp_do_phase_determine,
4097 esp_do_resetbus,
4098 esp_finish_reset,
4099 esp_work_bus
4100};
4101
4102/* Main interrupt handler for an esp adapter. */
4103static void esp_handle(struct esp *esp)
4104{
4105 struct scsi_cmnd *SCptr;
4106 int what_next = do_intr_end;
4107
4108 SCptr = esp->current_SC;
4109
4110 /* Check for errors. */
4111 esp->sreg = sbus_readb(esp->eregs + ESP_STATUS);
4112 esp->sreg &= (~ESP_STAT_INTR);
4113 if (esp->erev == fashme) {
4114 esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2);
4115 esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS);
4116 }
4117
4118 if (esp->sreg & (ESP_STAT_SPAM)) {
4119 /* Gross error, could be due to one of:
4120 *
4121 * - top of fifo overwritten, could be because
4122 * we tried to do a synchronous transfer with
4123 * an offset greater than ESP fifo size
4124 *
4125 * - top of command register overwritten
4126 *
4127 * - DMA setup to go in one direction, SCSI
4128 * bus points in the other, whoops
4129 *
4130 * - weird phase change during asynchronous
4131 * data phase while we are initiator
4132 */
4133 ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg));
4134
4135 /* If a command is live on the bus we cannot safely
4136 * reset the bus, so we'll just let the pieces fall
4137 * where they may. Here we are hoping that the
4138 * target will be able to cleanly go away soon
4139 * so we can safely reset things.
4140 */
4141 if (!SCptr) {
4142 ESPLOG(("esp%d: No current cmd during gross error, "
4143 "resetting bus\n", esp->esp_id));
4144 what_next = do_reset_bus;
4145 goto state_machine;
4146 }
4147 }
4148
4149 if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) {
4150 /* A DMA gate array error. Here we must
4151 * be seeing one of two things. Either the
4152 * virtual to physical address translation
4153 * on the SBUS could not occur, else the
4154 * translation it did get pointed to a bogus
4155 * page. Ho hum...
4156 */
4157 ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id,
4158 sbus_readl(esp->dregs + DMA_CSR)));
4159
4160 /* DMA gate array itself must be reset to clear the
4161 * error condition.
4162 */
4163 esp_reset_dma(esp);
4164
4165 what_next = do_reset_bus;
4166 goto state_machine;
4167 }
4168
4169 esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); /* Unlatch intr reg */
4170
4171 if (esp->erev == fashme) {
4172 /* This chip is really losing. */
4173 ESPHME(("HME["));
4174
4175 ESPHME(("sreg2=%02x,", esp->sreg2));
4176 /* Must latch fifo before reading the interrupt
4177 * register else garbage ends up in the FIFO
4178 * which confuses the driver utterly.
4179 */
4180 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
4181 (esp->sreg2 & ESP_STAT2_F1BYTE)) {
4182 ESPHME(("fifo_workaround]"));
4183 hme_fifo_read(esp);
4184 } else {
4185 ESPHME(("no_fifo_workaround]"));
4186 }
4187 }
4188
4189 /* No current cmd is only valid at this point when there are
4190 * commands off the bus or we are trying a reset.
4191 */
4192 if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) {
4193 /* Panic is safe, since current_SC is null. */
4194 ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id));
4195 panic("esp_handle: current_SC == penguin within interrupt!");
4196 }
4197
4198 if (esp->ireg & (ESP_INTR_IC)) {
4199 /* Illegal command fed to ESP. Outside of obvious
4200 * software bugs that could cause this, there is
4201 * a condition with esp100 where we can confuse the
4202 * ESP into an erroneous illegal command interrupt
4203 * because it does not scrape the FIFO properly
4204 * for reselection. See esp100_reconnect_hwbug()
4205 * to see how we try very hard to avoid this.
4206 */
4207 ESPLOG(("esp%d: invalid command\n", esp->esp_id));
4208
4209 esp_dump_state(esp);
4210
4211 if (SCptr != NULL) {
4212 /* Devices with very buggy firmware can drop BSY
4213 * during a scatter list interrupt when using sync
4214 * mode transfers. We continue the transfer as
4215 * expected, the target drops the bus, the ESP
4216 * gets confused, and we get a illegal command
4217 * interrupt because the bus is in the disconnected
4218 * state now and ESP_CMD_TI is only allowed when
4219 * a nexus is alive on the bus.
4220 */
4221 ESPLOG(("esp%d: Forcing async and disabling disconnect for "
4222 "target %d\n", esp->esp_id, SCptr->device->id));
4223 SCptr->device->borken = 1; /* foo on you */
4224 }
4225
4226 what_next = do_reset_bus;
4227 } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) {
4228 if (SCptr) {
4229 unsigned int phase = SCptr->SCp.phase;
4230
4231 if (phase & in_phases_mask) {
4232 what_next = esp_work_bus(esp);
4233 } else if (phase & in_slct_mask) {
4234 what_next = esp_select_complete(esp);
4235 } else {
4236 ESPLOG(("esp%d: interrupt for no good reason...\n",
4237 esp->esp_id));
4238 what_next = do_intr_end;
4239 }
4240 } else {
4241 ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n",
4242 esp->esp_id));
4243 what_next = do_reset_bus;
4244 }
4245 } else if (esp->ireg & ESP_INTR_SR) {
4246 ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id));
4247 what_next = do_reset_complete;
4248 } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) {
4249 ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n",
4250 esp->esp_id));
4251 what_next = do_reset_bus;
4252 } else if (esp->ireg & ESP_INTR_RSEL) {
4253 if (SCptr == NULL) {
4254 /* This is ok. */
4255 what_next = esp_do_reconnect(esp);
4256 } else if (SCptr->SCp.phase & in_slct_mask) {
4257 /* Only selection code knows how to clean
4258 * up properly.
4259 */
4260 ESPDISC(("Reselected during selection attempt\n"));
4261 what_next = esp_select_complete(esp);
4262 } else {
4263 ESPLOG(("esp%d: Reselected while bus is busy\n",
4264 esp->esp_id));
4265 what_next = do_reset_bus;
4266 }
4267 }
4268
4269 /* This is tier-one in our dual level SCSI state machine. */
4270state_machine:
4271 while (what_next != do_intr_end) {
4272 if (what_next >= do_phase_determine &&
4273 what_next < do_intr_end) {
4274 what_next = isvc_vector[what_next](esp);
4275 } else {
4276 /* state is completely lost ;-( */
4277 ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n",
4278 esp->esp_id));
4279 what_next = do_reset_bus;
4280 }
4281 }
4282}
4283
4284/* Service only the ESP described by dev_id. */
4285static irqreturn_t esp_intr(int irq, void *dev_id)
4286{
4287 struct esp *esp = dev_id;
4288 unsigned long flags;
4289
4290 spin_lock_irqsave(esp->ehost->host_lock, flags);
4291 if (ESP_IRQ_P(esp->dregs)) {
4292 ESP_INTSOFF(esp->dregs);
4293
4294 ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id));
4295 esp_handle(esp);
4296 ESPIRQ((")"));
4297
4298 ESP_INTSON(esp->dregs);
4299 }
4300 spin_unlock_irqrestore(esp->ehost->host_lock, flags);
4301
4302 return IRQ_HANDLED;
4303}
4304
4305static int esp_slave_alloc(struct scsi_device *SDptr)
4306{
4307 struct esp_device *esp_dev =
4308 kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
4309
4310 if (!esp_dev)
4311 return -ENOMEM;
4312 memset(esp_dev, 0, sizeof(struct esp_device));
4313 SDptr->hostdata = esp_dev;
4314 return 0;
4315}
4316
4317static void esp_slave_destroy(struct scsi_device *SDptr)
4318{
4319 struct esp *esp = (struct esp *) SDptr->host->hostdata;
4320
4321 esp->targets_present &= ~(1 << SDptr->id);
4322 kfree(SDptr->hostdata);
4323 SDptr->hostdata = NULL;
4324}
4325
4326static struct scsi_host_template esp_template = {
4327 .module = THIS_MODULE,
4328 .name = "esp",
4329 .info = esp_info,
4330 .slave_alloc = esp_slave_alloc,
4331 .slave_destroy = esp_slave_destroy,
4332 .queuecommand = esp_queue,
4333 .eh_abort_handler = esp_abort,
4334 .eh_bus_reset_handler = esp_reset,
4335 .can_queue = 7,
4336 .this_id = 7,
4337 .sg_tablesize = SG_ALL,
4338 .cmd_per_lun = 1,
4339 .use_clustering = ENABLE_CLUSTERING,
4340 .proc_name = "esp",
4341 .proc_info = esp_proc_info,
4342};
4343
4344#ifndef CONFIG_SUN4
4345static struct of_device_id esp_match[] = {
4346 {
4347 .name = "SUNW,esp",
4348 .data = &esp_template,
4349 },
4350 {
4351 .name = "SUNW,fas",
4352 .data = &esp_template,
4353 },
4354 {
4355 .name = "esp",
4356 .data = &esp_template,
4357 },
4358 {},
4359};
4360MODULE_DEVICE_TABLE(of, esp_match);
4361
4362static struct of_platform_driver esp_sbus_driver = {
4363 .name = "esp",
4364 .match_table = esp_match,
4365 .probe = esp_sbus_probe,
4366 .remove = __devexit_p(esp_sbus_remove),
4367};
4368#endif
4369
4370static int __init esp_init(void)
4371{
4372#ifdef CONFIG_SUN4
4373 return esp_sun4_probe(&esp_template);
4374#else
4375 return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
4376#endif
4377}
4378
4379static void __exit esp_exit(void)
4380{
4381#ifdef CONFIG_SUN4
4382 esp_sun4_remove();
4383#else
4384 of_unregister_driver(&esp_sbus_driver);
4385#endif
4386}
4387
4388MODULE_DESCRIPTION("ESP Sun SCSI driver");
4389MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
4390MODULE_LICENSE("GPL");
4391MODULE_VERSION(DRV_VERSION);
4392
4393module_init(esp_init);
4394module_exit(esp_exit);
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
deleted file mode 100644
index a98cda9121fc..000000000000
--- a/drivers/scsi/esp.h
+++ /dev/null
@@ -1,406 +0,0 @@
1/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
2 * esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI
3 * Processor) driver under Linux.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_ESP_H
9#define _SPARC_ESP_H
10
11/* For dvma controller register definitions. */
12#include <asm/dma.h>
13
14/* The ESP SCSI controllers have their register sets in three
15 * "classes":
16 *
17 * 1) Registers which are both read and write.
18 * 2) Registers which are read only.
19 * 3) Registers which are write only.
20 *
21 * Yet, they all live within the same IO space.
22 */
23
24/* All the ESP registers are one byte each and are accessed longwords
25 * apart with a big-endian ordering to the bytes.
26 */
27 /* Access Description Offset */
28#define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */
29#define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */
30#define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */
31#define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */
32#define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */
33#define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */
34#define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */
35#define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */
36#define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */
37#define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */
38#define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */
39#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */
40#define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */
41#define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */
42#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */
43#define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */
44#define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */
45#define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */
46#define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */
47#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
48#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
49#define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */
50#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */
51#define ESP_REG_SIZE 0x40UL
52
53/* Various revisions of the ESP board. */
54enum esp_rev {
55 esp100 = 0x00, /* NCR53C90 - very broken */
56 esp100a = 0x01, /* NCR53C90A */
57 esp236 = 0x02,
58 fas236 = 0x03,
59 fas100a = 0x04,
60 fast = 0x05,
61 fashme = 0x06,
62 espunknown = 0x07
63};
64
65/* We allocate one of these for each scsi device and attach it to
66 * SDptr->hostdata for use in the driver
67 */
68struct esp_device {
69 unsigned char sync_min_period;
70 unsigned char sync_max_offset;
71 unsigned sync:1;
72 unsigned wide:1;
73 unsigned disconnect:1;
74};
75
76struct scsi_cmnd;
77
78/* We get one of these for each ESP probed. */
79struct esp {
80 void __iomem *eregs; /* ESP controller registers */
81 void __iomem *dregs; /* DMA controller registers */
82 struct sbus_dma *dma; /* DMA controller sw state */
83 struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
84 struct sbus_dev *sdev; /* Pointer to SBus entry */
85
86 /* ESP Configuration Registers */
87 u8 config1; /* Copy of the 1st config register */
88 u8 config2; /* Copy of the 2nd config register */
89 u8 config3[16]; /* Copy of the 3rd config register */
90
91 /* The current command we are sending to the ESP chip. This esp_command
92 * ptr needs to be mapped in DVMA area so we can send commands and read
93 * from the ESP fifo without burning precious CPU cycles. Programmed I/O
94 * sucks when we have the DVMA to do it for us. The ESP is stupid and will
95 * only send out 6, 10, and 12 byte SCSI commands, others we need to send
96 * one byte at a time. esp_slowcmd being set says that we are doing one
97 * of the command types ESP doesn't understand, esp_scmdp keeps track of
98 * which byte we are sending, esp_scmdleft says how many bytes to go.
99 */
100 volatile u8 *esp_command; /* Location of command (CPU view) */
101 __u32 esp_command_dvma;/* Location of command (DVMA view) */
102 unsigned char esp_clen; /* Length of this command */
103 unsigned char esp_slowcmd;
104 unsigned char *esp_scmdp;
105 unsigned char esp_scmdleft;
106
107 /* The following are used to determine the cause of an IRQ. Upon every
108 * IRQ entry we synchronize these with the hardware registers.
109 */
110 u8 ireg; /* Copy of ESP interrupt register */
111 u8 sreg; /* Copy of ESP status register */
112 u8 seqreg; /* Copy of ESP sequence step register */
113 u8 sreg2; /* Copy of HME status2 register */
114
115 /* To save register writes to the ESP, which can be expensive, we
116 * keep track of the previous value that various registers had for
117 * the last target we connected to. If they are the same for the
118 * current target, we skip the register writes as they are not needed.
119 */
120 u8 prev_soff, prev_stp;
121 u8 prev_cfg3, __cache_pad;
122
123 /* We also keep a cache of the previous FAS/HME DMA CSR register value. */
124 u32 prev_hme_dmacsr;
125
126 /* The HME is the biggest piece of shit I have ever seen. */
127 u8 hme_fifo_workaround_buffer[16 * 2];
128 u8 hme_fifo_workaround_count;
129
130 /* For each target we keep track of save/restore data
131 * pointer information. This needs to be updated majorly
132 * when we add support for tagged queueing. -DaveM
133 */
134 struct esp_pointers {
135 char *saved_ptr;
136 struct scatterlist *saved_buffer;
137 int saved_this_residual;
138 int saved_buffers_residual;
139 } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
140
141 /* Clock periods, frequencies, synchronization, etc. */
142 unsigned int cfreq; /* Clock frequency in HZ */
143 unsigned int cfact; /* Clock conversion factor */
144 unsigned int raw_cfact; /* Raw copy from probing */
145 unsigned int ccycle; /* One ESP clock cycle */
146 unsigned int ctick; /* One ESP clock time */
147 unsigned int radelay; /* FAST chip req/ack delay */
148 unsigned int neg_defp; /* Default negotiation period */
149 unsigned int sync_defp; /* Default sync transfer period */
150 unsigned int max_period; /* longest our period can be */
151 unsigned int min_period; /* shortest period we can withstand */
152
153 struct esp *next; /* Next ESP we probed or NULL */
154 char prom_name[64]; /* Name of ESP device from prom */
155 int prom_node; /* Prom node where ESP found */
156 int esp_id; /* Unique per-ESP ID number */
157
158 /* For slow to medium speed input clock rates we shoot for 5mb/s,
159 * but for high input clock rates we try to do 10mb/s although I
160 * don't think a transfer can even run that fast with an ESP even
161 * with DMA2 scatter gather pipelining.
162 */
163#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
164#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
165
166 unsigned int snip; /* Sync. negotiation in progress */
167 unsigned int wnip; /* WIDE negotiation in progress */
168 unsigned int targets_present;/* targets spoken to before */
169
170 int current_transfer_size; /* Set at beginning of data dma */
171
172 u8 espcmdlog[32]; /* Log of current esp cmds sent. */
173 u8 espcmdent; /* Current entry in esp cmd log. */
174
175 /* Misc. info about this ESP */
176 enum esp_rev erev; /* ESP revision */
177 int irq; /* SBus IRQ for this ESP */
178 int scsi_id; /* Who am I as initiator? */
179 int scsi_id_mask; /* Bitmask of 'me'. */
180 int diff; /* Differential SCSI bus? */
181 int bursts; /* Burst sizes our DVMA supports */
182
183 /* Our command queues, only one cmd lives in the current_SC queue. */
184 struct scsi_cmnd *issue_SC; /* Commands to be issued */
185 struct scsi_cmnd *current_SC; /* Who is currently working the bus */
186 struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */
187
188 /* Message goo */
189 u8 cur_msgout[16];
190 u8 cur_msgin[16];
191 u8 prevmsgout, prevmsgin;
192 u8 msgout_len, msgin_len;
193 u8 msgout_ctr, msgin_ctr;
194
195 /* States that we cannot keep in the per cmd structure because they
196 * cannot be assosciated with any specific command.
197 */
198 u8 resetting_bus;
199 wait_queue_head_t reset_queue;
200};
201
202/* Bitfield meanings for the above registers. */
203
204/* ESP config reg 1, read-write, found on all ESP chips */
205#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
206#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
207#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
208#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
209#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
210#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
211
212/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
213#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */
214#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */
215#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
216#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
217#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
218#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
219#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
220#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */
221#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */
222#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
223#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */
224#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */
225#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
226
227/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
228#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */
229#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
230#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */
231#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
232#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */
233#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
234#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */
235#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
236#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */
237#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
238#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
239#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */
240#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
241#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */
242#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
243#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
244
245/* ESP command register read-write */
246/* Group 1 commands: These may be sent at any point in time to the ESP
247 * chip. None of them can generate interrupts 'cept
248 * the "SCSI bus reset" command if you have not disabled
249 * SCSI reset interrupts in the config1 ESP register.
250 */
251#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
252#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
253#define ESP_CMD_RC 0x02 /* Chip reset */
254#define ESP_CMD_RS 0x03 /* SCSI bus reset */
255
256/* Group 2 commands: ESP must be an initiator and connected to a target
257 * for these commands to work.
258 */
259#define ESP_CMD_TI 0x10 /* Transfer Information */
260#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
261#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
262#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
263#define ESP_CMD_SATN 0x1a /* Set ATN */
264#define ESP_CMD_RATN 0x1b /* De-assert ATN */
265
266/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
267 * to a target as the initiator for these commands to work.
268 */
269#define ESP_CMD_SMSG 0x20 /* Send message */
270#define ESP_CMD_SSTAT 0x21 /* Send status */
271#define ESP_CMD_SDATA 0x22 /* Send data */
272#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
273#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
274#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
275#define ESP_CMD_DCNCT 0x27 /* Disconnect */
276#define ESP_CMD_RMSG 0x28 /* Receive Message */
277#define ESP_CMD_RCMD 0x29 /* Receive Command */
278#define ESP_CMD_RDATA 0x2a /* Receive Data */
279#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
280
281/* Group 4 commands: The ESP must be in the disconnected state and must
282 * not be connected to any targets as initiator for
283 * these commands to work.
284 */
285#define ESP_CMD_RSEL 0x40 /* Reselect */
286#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
287#define ESP_CMD_SELA 0x42 /* Select w/ATN */
288#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
289#define ESP_CMD_ESEL 0x44 /* Enable selection */
290#define ESP_CMD_DSEL 0x45 /* Disable selections */
291#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
292#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
293
294/* This bit enables the ESP's DMA on the SBus */
295#define ESP_CMD_DMA 0x80 /* Do DMA? */
296
297
298/* ESP status register read-only */
299#define ESP_STAT_PIO 0x01 /* IO phase bit */
300#define ESP_STAT_PCD 0x02 /* CD phase bit */
301#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
302#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
303#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
304#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
305#define ESP_STAT_PERR 0x20 /* Parity error */
306#define ESP_STAT_SPAM 0x40 /* Real bad error */
307/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
308 * bit on other revs of the ESP.
309 */
310#define ESP_STAT_INTR 0x80 /* Interrupt */
311
312/* HME only: status 2 register */
313#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */
314#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */
315#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */
316#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */
317#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */
318#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */
319#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */
320#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */
321
322/* The status register can be masked with ESP_STAT_PMASK and compared
323 * with the following values to determine the current phase the ESP
324 * (at least thinks it) is in. For our purposes we also add our own
325 * software 'done' bit for our phase management engine.
326 */
327#define ESP_DOP (0) /* Data Out */
328#define ESP_DIP (ESP_STAT_PIO) /* Data In */
329#define ESP_CMDP (ESP_STAT_PCD) /* Command */
330#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
331#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
332#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
333
334/* ESP interrupt register read-only */
335#define ESP_INTR_S 0x01 /* Select w/o ATN */
336#define ESP_INTR_SATN 0x02 /* Select w/ATN */
337#define ESP_INTR_RSEL 0x04 /* Reselected */
338#define ESP_INTR_FDONE 0x08 /* Function done */
339#define ESP_INTR_BSERV 0x10 /* Bus service */
340#define ESP_INTR_DC 0x20 /* Disconnect */
341#define ESP_INTR_IC 0x40 /* Illegal command given */
342#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
343
344/* Interrupt status macros */
345#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
346#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
347#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
348#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
349#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
350 (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
351#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
352
353/* ESP sequence step register read-only */
354#define ESP_STEP_VBITS 0x07 /* Valid bits */
355#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
356#define ESP_STEP_SID 0x01 /* One msg byte sent */
357#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
358#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
359 * bytes to be lost
360 */
361#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
362
363/* Ho hum, some ESP's set the step register to this as well... */
364#define ESP_STEP_FINI5 0x05
365#define ESP_STEP_FINI6 0x06
366#define ESP_STEP_FINI7 0x07
367
368/* ESP chip-test register read-write */
369#define ESP_TEST_TARG 0x01 /* Target test mode */
370#define ESP_TEST_INI 0x02 /* Initiator test mode */
371#define ESP_TEST_TS 0x04 /* Tristate test mode */
372
373/* ESP unique ID register read-only, found on fas236+fas100a only */
374#define ESP_UID_F100A 0x00 /* ESP FAS100A */
375#define ESP_UID_F236 0x02 /* ESP FAS236 */
376#define ESP_UID_REV 0x07 /* ESP revision */
377#define ESP_UID_FAM 0xf8 /* ESP family */
378
379/* ESP fifo flags register read-only */
380/* Note that the following implies a 16 byte FIFO on the ESP. */
381#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
382#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */
383#define ESP_FF_SSTEP 0xe0 /* Sequence step */
384
385/* ESP clock conversion factor register write-only */
386#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
387#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
388#define ESP_CCF_F2 0x02 /* 10MHz */
389#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
390#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
391#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
392#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
393#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
394
395/* HME only... */
396#define ESP_BUSID_RESELID 0x10
397#define ESP_BUSID_CTR32BIT 0x40
398
399#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
400#define ESP_TIMEO_CONST 8192
401#define ESP_NEG_DEFP(mhz, cfact) \
402 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
403#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
404#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
405
406#endif /* !(_SPARC_ESP_H) */
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
new file mode 100644
index 000000000000..99ce03331b64
--- /dev/null
+++ b/drivers/scsi/esp_scsi.c
@@ -0,0 +1,2711 @@
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/irqreturn.h>
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52
53#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \
56} while (0)
57
58#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \
61} while (0)
62
63#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \
66} while (0)
67
68#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \
71} while (0)
72
73#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \
76} while (0)
77
78#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \
81} while (0)
82
83#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \
86} while (0)
87
88#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \
91} while (0)
92
93#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \
96} while (0)
97
98#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \
101} while (0)
102
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
104#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
105
106static void esp_log_fill_regs(struct esp *esp,
107 struct esp_event_ent *p)
108{
109 p->sreg = esp->sreg;
110 p->seqreg = esp->seqreg;
111 p->sreg2 = esp->sreg2;
112 p->ireg = esp->ireg;
113 p->select_state = esp->select_state;
114 p->event = esp->event;
115}
116
117void scsi_esp_cmd(struct esp *esp, u8 val)
118{
119 struct esp_event_ent *p;
120 int idx = esp->esp_event_cur;
121
122 p = &esp->esp_event_log[idx];
123 p->type = ESP_EVENT_TYPE_CMD;
124 p->val = val;
125 esp_log_fill_regs(esp, p);
126
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128
129 esp_write8(val, ESP_CMD);
130}
131EXPORT_SYMBOL(scsi_esp_cmd);
132
133static void esp_event(struct esp *esp, u8 val)
134{
135 struct esp_event_ent *p;
136 int idx = esp->esp_event_cur;
137
138 p = &esp->esp_event_log[idx];
139 p->type = ESP_EVENT_TYPE_EVENT;
140 p->val = val;
141 esp_log_fill_regs(esp, p);
142
143 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
144
145 esp->event = val;
146}
147
148static void esp_dump_cmd_log(struct esp *esp)
149{
150 int idx = esp->esp_event_cur;
151 int stop = idx;
152
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n",
154 esp->host->unique_id);
155 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx];
157
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
159 esp->host->unique_id, idx,
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
161
162 printk("val[%02x] sreg[%02x] seqreg[%02x] "
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
164 p->val, p->sreg, p->seqreg,
165 p->sreg2, p->ireg, p->select_state, p->event);
166
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop);
169}
170
171static void esp_flush_fifo(struct esp *esp)
172{
173 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
174 if (esp->rev == ESP236) {
175 int lim = 1000;
176
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
180 "will not clear!\n",
181 esp->host->unique_id);
182 break;
183 }
184 udelay(1);
185 }
186 }
187}
188
189static void hme_read_fifo(struct esp *esp)
190{
191 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
192 int idx = 0;
193
194 while (fcnt--) {
195 esp->fifo[idx++] = esp_read8(ESP_FDATA);
196 esp->fifo[idx++] = esp_read8(ESP_FDATA);
197 }
198 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
199 esp_write8(0, ESP_FDATA);
200 esp->fifo[idx++] = esp_read8(ESP_FDATA);
201 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
202 }
203 esp->fifo_cnt = idx;
204}
205
206static void esp_set_all_config3(struct esp *esp, u8 val)
207{
208 int i;
209
210 for (i = 0; i < ESP_MAX_TARGET; i++)
211 esp->target[i].esp_config3 = val;
212}
213
214/* Reset the ESP chip, _not_ the SCSI bus. */
215static void esp_reset_esp(struct esp *esp)
216{
217 u8 family_code, version;
218
219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants.
237 */
238 esp->max_period = ((35 * esp->ccycle) / 1000);
239 if (esp->rev == FAST) {
240 version = esp_read8(ESP_UID);
241 family_code = (version & 0xf8) >> 3;
242 if (family_code == 0x02)
243 esp->rev = FAS236;
244 else if (family_code == 0x0a)
245 esp->rev = FASHME; /* Version is usually '5'. */
246 else
247 esp->rev = FAS100A;
248 esp->min_period = ((4 * esp->ccycle) / 1000);
249 } else {
250 esp->min_period = ((5 * esp->ccycle) / 1000);
251 }
252 esp->max_period = (esp->max_period + 3)>>2;
253 esp->min_period = (esp->min_period + 3)>>2;
254
255 esp_write8(esp->config1, ESP_CFG1);
256 switch (esp->rev) {
257 case ESP100:
258 /* nothing to do */
259 break;
260
261 case ESP100A:
262 esp_write8(esp->config2, ESP_CFG2);
263 break;
264
265 case ESP236:
266 /* Slow 236 */
267 esp_write8(esp->config2, ESP_CFG2);
268 esp->prev_cfg3 = esp->target[0].esp_config3;
269 esp_write8(esp->prev_cfg3, ESP_CFG3);
270 break;
271
272 case FASHME:
273 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
274 /* fallthrough... */
275
276 case FAS236:
277 /* Fast 236 or HME */
278 esp_write8(esp->config2, ESP_CFG2);
279 if (esp->rev == FASHME) {
280 u8 cfg3 = esp->target[0].esp_config3;
281
282 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
283 if (esp->scsi_id >= 8)
284 cfg3 |= ESP_CONFIG3_IDBIT3;
285 esp_set_all_config3(esp, cfg3);
286 } else {
287 u32 cfg3 = esp->target[0].esp_config3;
288
289 cfg3 |= ESP_CONFIG3_FCLK;
290 esp_set_all_config3(esp, cfg3);
291 }
292 esp->prev_cfg3 = esp->target[0].esp_config3;
293 esp_write8(esp->prev_cfg3, ESP_CFG3);
294 if (esp->rev == FASHME) {
295 esp->radelay = 80;
296 } else {
297 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
298 esp->radelay = 0;
299 else
300 esp->radelay = 96;
301 }
302 break;
303
304 case FAS100A:
305 /* Fast 100a */
306 esp_write8(esp->config2, ESP_CFG2);
307 esp_set_all_config3(esp,
308 (esp->target[0].esp_config3 |
309 ESP_CONFIG3_FCLOCK));
310 esp->prev_cfg3 = esp->target[0].esp_config3;
311 esp_write8(esp->prev_cfg3, ESP_CFG3);
312 esp->radelay = 32;
313 break;
314
315 default:
316 break;
317 }
318
319 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT);
321 udelay(100);
322}
323
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{
326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
327 struct scatterlist *sg = cmd->request_buffer;
328 int dir = cmd->sc_data_direction;
329 int total, i;
330
331 if (dir == DMA_NONE)
332 return;
333
334 BUG_ON(cmd->use_sg == 0);
335
336 spriv->u.num_sg = esp->ops->map_sg(esp, sg,
337 cmd->use_sg, dir);
338 spriv->cur_residue = sg_dma_len(sg);
339 spriv->cur_sg = sg;
340
341 total = 0;
342 for (i = 0; i < spriv->u.num_sg; i++)
343 total += sg_dma_len(&sg[i]);
344 spriv->tot_residue = total;
345}
346
347static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
348 struct scsi_cmnd *cmd)
349{
350 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
351
352 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
353 return ent->sense_dma +
354 (ent->sense_ptr - cmd->sense_buffer);
355 }
356
357 return sg_dma_address(p->cur_sg) +
358 (sg_dma_len(p->cur_sg) -
359 p->cur_residue);
360}
361
362static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
363 struct scsi_cmnd *cmd)
364{
365 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
366
367 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
368 return SCSI_SENSE_BUFFERSIZE -
369 (ent->sense_ptr - cmd->sense_buffer);
370 }
371 return p->cur_residue;
372}
373
374static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
375 struct scsi_cmnd *cmd, unsigned int len)
376{
377 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
378
379 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380 ent->sense_ptr += len;
381 return;
382 }
383
384 p->cur_residue -= len;
385 p->tot_residue -= len;
386 if (p->cur_residue < 0 || p->tot_residue < 0) {
387 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
388 esp->host->unique_id);
389 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
390 "len[%u]\n",
391 esp->host->unique_id,
392 p->cur_residue, p->tot_residue, len);
393 p->cur_residue = 0;
394 p->tot_residue = 0;
395 }
396 if (!p->cur_residue && p->tot_residue) {
397 p->cur_sg++;
398 p->cur_residue = sg_dma_len(p->cur_sg);
399 }
400}
401
402static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
403{
404 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
405 int dir = cmd->sc_data_direction;
406
407 if (dir == DMA_NONE)
408 return;
409
410 esp->ops->unmap_sg(esp, cmd->request_buffer,
411 spriv->u.num_sg, dir);
412}
413
414static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
415{
416 struct scsi_cmnd *cmd = ent->cmd;
417 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
418
419 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
420 ent->saved_sense_ptr = ent->sense_ptr;
421 return;
422 }
423 ent->saved_cur_residue = spriv->cur_residue;
424 ent->saved_cur_sg = spriv->cur_sg;
425 ent->saved_tot_residue = spriv->tot_residue;
426}
427
428static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
429{
430 struct scsi_cmnd *cmd = ent->cmd;
431 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
432
433 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
434 ent->sense_ptr = ent->saved_sense_ptr;
435 return;
436 }
437 spriv->cur_residue = ent->saved_cur_residue;
438 spriv->cur_sg = ent->saved_cur_sg;
439 spriv->tot_residue = ent->saved_tot_residue;
440}
441
442static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
443{
444 if (cmd->cmd_len == 6 ||
445 cmd->cmd_len == 10 ||
446 cmd->cmd_len == 12) {
447 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
448 } else {
449 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
450 }
451}
452
453static void esp_write_tgt_config3(struct esp *esp, int tgt)
454{
455 if (esp->rev > ESP100A) {
456 u8 val = esp->target[tgt].esp_config3;
457
458 if (val != esp->prev_cfg3) {
459 esp->prev_cfg3 = val;
460 esp_write8(val, ESP_CFG3);
461 }
462 }
463}
464
465static void esp_write_tgt_sync(struct esp *esp, int tgt)
466{
467 u8 off = esp->target[tgt].esp_offset;
468 u8 per = esp->target[tgt].esp_period;
469
470 if (off != esp->prev_soff) {
471 esp->prev_soff = off;
472 esp_write8(off, ESP_SOFF);
473 }
474 if (per != esp->prev_stp) {
475 esp->prev_stp = per;
476 esp_write8(per, ESP_STP);
477 }
478}
479
480static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
481{
482 if (esp->rev == FASHME) {
483 /* Arbitrary segment boundaries, 24-bit counts. */
484 if (dma_len > (1U << 24))
485 dma_len = (1U << 24);
486 } else {
487 u32 base, end;
488
489 /* ESP chip limits other variants by 16-bits of transfer
490 * count. Actually on FAS100A and FAS236 we could get
491 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
492 * in the ESP_CFG2 register but that causes other unwanted
493 * changes so we don't use it currently.
494 */
495 if (dma_len > (1U << 16))
496 dma_len = (1U << 16);
497
498 /* All of the DMA variants hooked up to these chips
499 * cannot handle crossing a 24-bit address boundary.
500 */
501 base = dma_addr & ((1U << 24) - 1U);
502 end = base + dma_len;
503 if (end > (1U << 24))
504 end = (1U <<24);
505 dma_len = end - base;
506 }
507 return dma_len;
508}
509
510static int esp_need_to_nego_wide(struct esp_target_data *tp)
511{
512 struct scsi_target *target = tp->starget;
513
514 return spi_width(target) != tp->nego_goal_width;
515}
516
517static int esp_need_to_nego_sync(struct esp_target_data *tp)
518{
519 struct scsi_target *target = tp->starget;
520
521 /* When offset is zero, period is "don't care". */
522 if (!spi_offset(target) && !tp->nego_goal_offset)
523 return 0;
524
525 if (spi_offset(target) == tp->nego_goal_offset &&
526 spi_period(target) == tp->nego_goal_period)
527 return 0;
528
529 return 1;
530}
531
532static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
533 struct esp_lun_data *lp)
534{
535 if (!ent->tag[0]) {
536 /* Non-tagged, slot already taken? */
537 if (lp->non_tagged_cmd)
538 return -EBUSY;
539
540 if (lp->hold) {
541 /* We are being held by active tagged
542 * commands.
543 */
544 if (lp->num_tagged)
545 return -EBUSY;
546
547 /* Tagged commands completed, we can unplug
548 * the queue and run this untagged command.
549 */
550 lp->hold = 0;
551 } else if (lp->num_tagged) {
552 /* Plug the queue until num_tagged decreases
553 * to zero in esp_free_lun_tag.
554 */
555 lp->hold = 1;
556 return -EBUSY;
557 }
558
559 lp->non_tagged_cmd = ent;
560 return 0;
561 } else {
562 /* Tagged command, see if blocked by a
563 * non-tagged one.
564 */
565 if (lp->non_tagged_cmd || lp->hold)
566 return -EBUSY;
567 }
568
569 BUG_ON(lp->tagged_cmds[ent->tag[1]]);
570
571 lp->tagged_cmds[ent->tag[1]] = ent;
572 lp->num_tagged++;
573
574 return 0;
575}
576
577static void esp_free_lun_tag(struct esp_cmd_entry *ent,
578 struct esp_lun_data *lp)
579{
580 if (ent->tag[0]) {
581 BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
582 lp->tagged_cmds[ent->tag[1]] = NULL;
583 lp->num_tagged--;
584 } else {
585 BUG_ON(lp->non_tagged_cmd != ent);
586 lp->non_tagged_cmd = NULL;
587 }
588}
589
590/* When a contingent allegiance conditon is created, we force feed a
591 * REQUEST_SENSE command to the device to fetch the sense data. I
592 * tried many other schemes, relying on the scsi error handling layer
593 * to send out the REQUEST_SENSE automatically, but this was difficult
594 * to get right especially in the presence of applications like smartd
595 * which use SG_IO to send out their own REQUEST_SENSE commands.
596 */
597static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
598{
599 struct scsi_cmnd *cmd = ent->cmd;
600 struct scsi_device *dev = cmd->device;
601 int tgt, lun;
602 u8 *p, val;
603
604 tgt = dev->id;
605 lun = dev->lun;
606
607
608 if (!ent->sense_ptr) {
609 esp_log_autosense("esp%d: Doing auto-sense for "
610 "tgt[%d] lun[%d]\n",
611 esp->host->unique_id, tgt, lun);
612
613 ent->sense_ptr = cmd->sense_buffer;
614 ent->sense_dma = esp->ops->map_single(esp,
615 ent->sense_ptr,
616 SCSI_SENSE_BUFFERSIZE,
617 DMA_FROM_DEVICE);
618 }
619 ent->saved_sense_ptr = ent->sense_ptr;
620
621 esp->active_cmd = ent;
622
623 p = esp->command_block;
624 esp->msg_out_len = 0;
625
626 *p++ = IDENTIFY(0, lun);
627 *p++ = REQUEST_SENSE;
628 *p++ = ((dev->scsi_level <= SCSI_2) ?
629 (lun << 5) : 0);
630 *p++ = 0;
631 *p++ = 0;
632 *p++ = SCSI_SENSE_BUFFERSIZE;
633 *p++ = 0;
634
635 esp->select_state = ESP_SELECT_BASIC;
636
637 val = tgt;
638 if (esp->rev == FASHME)
639 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
640 esp_write8(val, ESP_BUSID);
641
642 esp_write_tgt_sync(esp, tgt);
643 esp_write_tgt_config3(esp, tgt);
644
645 val = (p - esp->command_block);
646
647 if (esp->rev == FASHME)
648 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
649 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
650 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
651}
652
653static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
654{
655 struct esp_cmd_entry *ent;
656
657 list_for_each_entry(ent, &esp->queued_cmds, list) {
658 struct scsi_cmnd *cmd = ent->cmd;
659 struct scsi_device *dev = cmd->device;
660 struct esp_lun_data *lp = dev->hostdata;
661
662 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
663 ent->tag[0] = 0;
664 ent->tag[1] = 0;
665 return ent;
666 }
667
668 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
669 ent->tag[0] = 0;
670 ent->tag[1] = 0;
671 }
672
673 if (esp_alloc_lun_tag(ent, lp) < 0)
674 continue;
675
676 return ent;
677 }
678
679 return NULL;
680}
681
682static void esp_maybe_execute_command(struct esp *esp)
683{
684 struct esp_target_data *tp;
685 struct esp_lun_data *lp;
686 struct scsi_device *dev;
687 struct scsi_cmnd *cmd;
688 struct esp_cmd_entry *ent;
689 int tgt, lun, i;
690 u32 val, start_cmd;
691 u8 *p;
692
693 if (esp->active_cmd ||
694 (esp->flags & ESP_FLAG_RESETTING))
695 return;
696
697 ent = find_and_prep_issuable_command(esp);
698 if (!ent)
699 return;
700
701 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
702 esp_autosense(esp, ent);
703 return;
704 }
705
706 cmd = ent->cmd;
707 dev = cmd->device;
708 tgt = dev->id;
709 lun = dev->lun;
710 tp = &esp->target[tgt];
711 lp = dev->hostdata;
712
713 list_del(&ent->list);
714 list_add(&ent->list, &esp->active_cmds);
715
716 esp->active_cmd = ent;
717
718 esp_map_dma(esp, cmd);
719 esp_save_pointers(esp, ent);
720
721 esp_check_command_len(esp, cmd);
722
723 p = esp->command_block;
724
725 esp->msg_out_len = 0;
726 if (tp->flags & ESP_TGT_CHECK_NEGO) {
727 /* Need to negotiate. If the target is broken
728 * go for synchronous transfers and non-wide.
729 */
730 if (tp->flags & ESP_TGT_BROKEN) {
731 tp->flags &= ~ESP_TGT_DISCONNECT;
732 tp->nego_goal_period = 0;
733 tp->nego_goal_offset = 0;
734 tp->nego_goal_width = 0;
735 tp->nego_goal_tags = 0;
736 }
737
738 /* If the settings are not changing, skip this. */
739 if (spi_width(tp->starget) == tp->nego_goal_width &&
740 spi_period(tp->starget) == tp->nego_goal_period &&
741 spi_offset(tp->starget) == tp->nego_goal_offset) {
742 tp->flags &= ~ESP_TGT_CHECK_NEGO;
743 goto build_identify;
744 }
745
746 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
747 esp->msg_out_len =
748 spi_populate_width_msg(&esp->msg_out[0],
749 (tp->nego_goal_width ?
750 1 : 0));
751 tp->flags |= ESP_TGT_NEGO_WIDE;
752 } else if (esp_need_to_nego_sync(tp)) {
753 esp->msg_out_len =
754 spi_populate_sync_msg(&esp->msg_out[0],
755 tp->nego_goal_period,
756 tp->nego_goal_offset);
757 tp->flags |= ESP_TGT_NEGO_SYNC;
758 } else {
759 tp->flags &= ~ESP_TGT_CHECK_NEGO;
760 }
761
762 /* Process it like a slow command. */
763 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
764 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
765 }
766
767build_identify:
768 /* If we don't have a lun-data struct yet, we're probing
769 * so do not disconnect. Also, do not disconnect unless
770 * we have a tag on this command.
771 */
772 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
773 *p++ = IDENTIFY(1, lun);
774 else
775 *p++ = IDENTIFY(0, lun);
776
777 if (ent->tag[0] && esp->rev == ESP100) {
778 /* ESP100 lacks select w/atn3 command, use select
779 * and stop instead.
780 */
781 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
782 }
783
784 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
785 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
786 if (ent->tag[0]) {
787 *p++ = ent->tag[0];
788 *p++ = ent->tag[1];
789
790 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
791 }
792
793 for (i = 0; i < cmd->cmd_len; i++)
794 *p++ = cmd->cmnd[i];
795
796 esp->select_state = ESP_SELECT_BASIC;
797 } else {
798 esp->cmd_bytes_left = cmd->cmd_len;
799 esp->cmd_bytes_ptr = &cmd->cmnd[0];
800
801 if (ent->tag[0]) {
802 for (i = esp->msg_out_len - 1;
803 i >= 0; i--)
804 esp->msg_out[i + 2] = esp->msg_out[i];
805 esp->msg_out[0] = ent->tag[0];
806 esp->msg_out[1] = ent->tag[1];
807 esp->msg_out_len += 2;
808 }
809
810 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
811 esp->select_state = ESP_SELECT_MSGOUT;
812 }
813 val = tgt;
814 if (esp->rev == FASHME)
815 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
816 esp_write8(val, ESP_BUSID);
817
818 esp_write_tgt_sync(esp, tgt);
819 esp_write_tgt_config3(esp, tgt);
820
821 val = (p - esp->command_block);
822
823 if (esp_debug & ESP_DEBUG_SCSICMD) {
824 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
825 for (i = 0; i < cmd->cmd_len; i++)
826 printk("%02x ", cmd->cmnd[i]);
827 printk("]\n");
828 }
829
830 if (esp->rev == FASHME)
831 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
832 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
833 val, 16, 0, start_cmd);
834}
835
836static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
837{
838 struct list_head *head = &esp->esp_cmd_pool;
839 struct esp_cmd_entry *ret;
840
841 if (list_empty(head)) {
842 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
843 } else {
844 ret = list_entry(head->next, struct esp_cmd_entry, list);
845 list_del(&ret->list);
846 memset(ret, 0, sizeof(*ret));
847 }
848 return ret;
849}
850
851static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
852{
853 list_add(&ent->list, &esp->esp_cmd_pool);
854}
855
856static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
857 struct scsi_cmnd *cmd, unsigned int result)
858{
859 struct scsi_device *dev = cmd->device;
860 int tgt = dev->id;
861 int lun = dev->lun;
862
863 esp->active_cmd = NULL;
864 esp_unmap_dma(esp, cmd);
865 esp_free_lun_tag(ent, dev->hostdata);
866 cmd->result = result;
867
868 if (ent->eh_done) {
869 complete(ent->eh_done);
870 ent->eh_done = NULL;
871 }
872
873 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
874 esp->ops->unmap_single(esp, ent->sense_dma,
875 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
876 ent->sense_ptr = NULL;
877
878 /* Restore the message/status bytes to what we actually
879 * saw originally. Also, report that we are providing
880 * the sense data.
881 */
882 cmd->result = ((DRIVER_SENSE << 24) |
883 (DID_OK << 16) |
884 (COMMAND_COMPLETE << 8) |
885 (SAM_STAT_CHECK_CONDITION << 0));
886
887 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
888 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
889 int i;
890
891 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
892 esp->host->unique_id, tgt, lun);
893 for (i = 0; i < 18; i++)
894 printk("%02x ", cmd->sense_buffer[i]);
895 printk("]\n");
896 }
897 }
898
899 cmd->scsi_done(cmd);
900
901 list_del(&ent->list);
902 esp_put_ent(esp, ent);
903
904 esp_maybe_execute_command(esp);
905}
906
907static unsigned int compose_result(unsigned int status, unsigned int message,
908 unsigned int driver_code)
909{
910 return (status | (message << 8) | (driver_code << 16));
911}
912
913static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
914{
915 struct scsi_device *dev = ent->cmd->device;
916 struct esp_lun_data *lp = dev->hostdata;
917
918 scsi_track_queue_full(dev, lp->num_tagged - 1);
919}
920
921static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
922{
923 struct scsi_device *dev = cmd->device;
924 struct esp *esp = host_to_esp(dev->host);
925 struct esp_cmd_priv *spriv;
926 struct esp_cmd_entry *ent;
927
928 ent = esp_get_ent(esp);
929 if (!ent)
930 return SCSI_MLQUEUE_HOST_BUSY;
931
932 ent->cmd = cmd;
933
934 cmd->scsi_done = done;
935
936 spriv = ESP_CMD_PRIV(cmd);
937 spriv->u.dma_addr = ~(dma_addr_t)0x0;
938
939 list_add_tail(&ent->list, &esp->queued_cmds);
940
941 esp_maybe_execute_command(esp);
942
943 return 0;
944}
945
946static int esp_check_gross_error(struct esp *esp)
947{
948 if (esp->sreg & ESP_STAT_SPAM) {
949 /* Gross Error, could be one of:
950 * - top of fifo overwritten
951 * - top of command register overwritten
952 * - DMA programmed with wrong direction
953 * - improper phase change
954 */
955 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
956 esp->host->unique_id, esp->sreg);
957 /* XXX Reset the chip. XXX */
958 return 1;
959 }
960 return 0;
961}
962
963static int esp_check_spur_intr(struct esp *esp)
964{
965 switch (esp->rev) {
966 case ESP100:
967 case ESP100A:
968 /* The interrupt pending bit of the status register cannot
969 * be trusted on these revisions.
970 */
971 esp->sreg &= ~ESP_STAT_INTR;
972 break;
973
974 default:
975 if (!(esp->sreg & ESP_STAT_INTR)) {
976 esp->ireg = esp_read8(ESP_INTRPT);
977 if (esp->ireg & ESP_INTR_SR)
978 return 1;
979
980 /* If the DMA is indicating interrupt pending and the
981 * ESP is not, the only possibility is a DMA error.
982 */
983 if (!esp->ops->dma_error(esp)) {
984 printk(KERN_ERR PFX "esp%d: Spurious irq, "
985 "sreg=%x.\n",
986 esp->host->unique_id, esp->sreg);
987 return -1;
988 }
989
990 printk(KERN_ERR PFX "esp%d: DMA error\n",
991 esp->host->unique_id);
992
993 /* XXX Reset the chip. XXX */
994 return -1;
995 }
996 break;
997 }
998
999 return 0;
1000}
1001
1002static void esp_schedule_reset(struct esp *esp)
1003{
1004 esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1005 __builtin_return_address(0));
1006 esp->flags |= ESP_FLAG_RESETTING;
1007 esp_event(esp, ESP_EVENT_RESET);
1008}
1009
1010/* In order to avoid having to add a special half-reconnected state
1011 * into the driver we just sit here and poll through the rest of
1012 * the reselection process to get the tag message bytes.
1013 */
1014static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1015 struct esp_lun_data *lp)
1016{
1017 struct esp_cmd_entry *ent;
1018 int i;
1019
1020 if (!lp->num_tagged) {
1021 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1022 esp->host->unique_id);
1023 return NULL;
1024 }
1025
1026 esp_log_reconnect("ESP: reconnect tag, ");
1027
1028 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1029 if (esp->ops->irq_pending(esp))
1030 break;
1031 }
1032 if (i == ESP_QUICKIRQ_LIMIT) {
1033 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1034 esp->host->unique_id);
1035 return NULL;
1036 }
1037
1038 esp->sreg = esp_read8(ESP_STATUS);
1039 esp->ireg = esp_read8(ESP_INTRPT);
1040
1041 esp_log_reconnect("IRQ(%d:%x:%x), ",
1042 i, esp->ireg, esp->sreg);
1043
1044 if (esp->ireg & ESP_INTR_DC) {
1045 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1046 esp->host->unique_id);
1047 return NULL;
1048 }
1049
1050 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1051 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1052 esp->host->unique_id, esp->sreg);
1053 return NULL;
1054 }
1055
1056 /* DMA in the tag bytes... */
1057 esp->command_block[0] = 0xff;
1058 esp->command_block[1] = 0xff;
1059 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1060 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1061
1062 /* ACK the msssage. */
1063 scsi_esp_cmd(esp, ESP_CMD_MOK);
1064
1065 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1066 if (esp->ops->irq_pending(esp)) {
1067 esp->sreg = esp_read8(ESP_STATUS);
1068 esp->ireg = esp_read8(ESP_INTRPT);
1069 if (esp->ireg & ESP_INTR_FDONE)
1070 break;
1071 }
1072 udelay(1);
1073 }
1074 if (i == ESP_RESELECT_TAG_LIMIT) {
1075 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1076 esp->host->unique_id);
1077 return NULL;
1078 }
1079 esp->ops->dma_drain(esp);
1080 esp->ops->dma_invalidate(esp);
1081
1082 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1083 i, esp->ireg, esp->sreg,
1084 esp->command_block[0],
1085 esp->command_block[1]);
1086
1087 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1088 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1089 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1090 "type %02x.\n",
1091 esp->host->unique_id, esp->command_block[0]);
1092 return NULL;
1093 }
1094
1095 ent = lp->tagged_cmds[esp->command_block[1]];
1096 if (!ent) {
1097 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1098 "tag %02x.\n",
1099 esp->host->unique_id, esp->command_block[1]);
1100 return NULL;
1101 }
1102
1103 return ent;
1104}
1105
1106static int esp_reconnect(struct esp *esp)
1107{
1108 struct esp_cmd_entry *ent;
1109 struct esp_target_data *tp;
1110 struct esp_lun_data *lp;
1111 struct scsi_device *dev;
1112 int target, lun;
1113
1114 BUG_ON(esp->active_cmd);
1115 if (esp->rev == FASHME) {
1116 /* FASHME puts the target and lun numbers directly
1117 * into the fifo.
1118 */
1119 target = esp->fifo[0];
1120 lun = esp->fifo[1] & 0x7;
1121 } else {
1122 u8 bits = esp_read8(ESP_FDATA);
1123
1124 /* Older chips put the lun directly into the fifo, but
1125 * the target is given as a sample of the arbitration
1126 * lines on the bus at reselection time. So we should
1127 * see the ID of the ESP and the one reconnecting target
1128 * set in the bitmap.
1129 */
1130 if (!(bits & esp->scsi_id_mask))
1131 goto do_reset;
1132 bits &= ~esp->scsi_id_mask;
1133 if (!bits || (bits & (bits - 1)))
1134 goto do_reset;
1135
1136 target = ffs(bits) - 1;
1137 lun = (esp_read8(ESP_FDATA) & 0x7);
1138
1139 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1140 if (esp->rev == ESP100) {
1141 u8 ireg = esp_read8(ESP_INTRPT);
1142 /* This chip has a bug during reselection that can
1143 * cause a spurious illegal-command interrupt, which
1144 * we simply ACK here. Another possibility is a bus
1145 * reset so we must check for that.
1146 */
1147 if (ireg & ESP_INTR_SR)
1148 goto do_reset;
1149 }
1150 scsi_esp_cmd(esp, ESP_CMD_NULL);
1151 }
1152
1153 esp_write_tgt_sync(esp, target);
1154 esp_write_tgt_config3(esp, target);
1155
1156 scsi_esp_cmd(esp, ESP_CMD_MOK);
1157
1158 if (esp->rev == FASHME)
1159 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1160 ESP_BUSID);
1161
1162 tp = &esp->target[target];
1163 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1164 if (!dev) {
1165 printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1166 "tgt[%u] lun[%u]\n",
1167 esp->host->unique_id, target, lun);
1168 goto do_reset;
1169 }
1170 lp = dev->hostdata;
1171
1172 ent = lp->non_tagged_cmd;
1173 if (!ent) {
1174 ent = esp_reconnect_with_tag(esp, lp);
1175 if (!ent)
1176 goto do_reset;
1177 }
1178
1179 esp->active_cmd = ent;
1180
1181 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1182 esp->msg_out[0] = ABORT_TASK_SET;
1183 esp->msg_out_len = 1;
1184 scsi_esp_cmd(esp, ESP_CMD_SATN);
1185 }
1186
1187 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1188 esp_restore_pointers(esp, ent);
1189 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1190 return 1;
1191
1192do_reset:
1193 esp_schedule_reset(esp);
1194 return 0;
1195}
1196
1197static int esp_finish_select(struct esp *esp)
1198{
1199 struct esp_cmd_entry *ent;
1200 struct scsi_cmnd *cmd;
1201 u8 orig_select_state;
1202
1203 orig_select_state = esp->select_state;
1204
1205 /* No longer selecting. */
1206 esp->select_state = ESP_SELECT_NONE;
1207
1208 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1209 ent = esp->active_cmd;
1210 cmd = ent->cmd;
1211
1212 if (esp->ops->dma_error(esp)) {
1213 /* If we see a DMA error during or as a result of selection,
1214 * all bets are off.
1215 */
1216 esp_schedule_reset(esp);
1217 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1218 return 0;
1219 }
1220
1221 esp->ops->dma_invalidate(esp);
1222
1223 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1224 struct esp_target_data *tp = &esp->target[cmd->device->id];
1225
1226 /* Carefully back out of the selection attempt. Release
1227 * resources (such as DMA mapping & TAG) and reset state (such
1228 * as message out and command delivery variables).
1229 */
1230 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1231 esp_unmap_dma(esp, cmd);
1232 esp_free_lun_tag(ent, cmd->device->hostdata);
1233 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1234 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1235 esp->cmd_bytes_ptr = NULL;
1236 esp->cmd_bytes_left = 0;
1237 } else {
1238 esp->ops->unmap_single(esp, ent->sense_dma,
1239 SCSI_SENSE_BUFFERSIZE,
1240 DMA_FROM_DEVICE);
1241 ent->sense_ptr = NULL;
1242 }
1243
1244 /* Now that the state is unwound properly, put back onto
1245 * the issue queue. This command is no longer active.
1246 */
1247 list_del(&ent->list);
1248 list_add(&ent->list, &esp->queued_cmds);
1249 esp->active_cmd = NULL;
1250
1251 /* Return value ignored by caller, it directly invokes
1252 * esp_reconnect().
1253 */
1254 return 0;
1255 }
1256
1257 if (esp->ireg == ESP_INTR_DC) {
1258 struct scsi_device *dev = cmd->device;
1259
1260 /* Disconnect. Make sure we re-negotiate sync and
1261 * wide parameters if this target starts responding
1262 * again in the future.
1263 */
1264 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1265
1266 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1267 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1268 return 1;
1269 }
1270
1271 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1272 /* Selection successful. On pre-FAST chips we have
1273 * to do a NOP and possibly clean out the FIFO.
1274 */
1275 if (esp->rev <= ESP236) {
1276 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1277
1278 scsi_esp_cmd(esp, ESP_CMD_NULL);
1279
1280 if (!fcnt &&
1281 (!esp->prev_soff ||
1282 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1283 esp_flush_fifo(esp);
1284 }
1285
1286 /* If we are doing a slow command, negotiation, etc.
1287 * we'll do the right thing as we transition to the
1288 * next phase.
1289 */
1290 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1291 return 0;
1292 }
1293
1294 printk("ESP: Unexpected selection completion ireg[%x].\n",
1295 esp->ireg);
1296 esp_schedule_reset(esp);
1297 return 0;
1298}
1299
1300static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1301 struct scsi_cmnd *cmd)
1302{
1303 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1304
1305 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1306 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1307 fifo_cnt <<= 1;
1308
1309 ecount = 0;
1310 if (!(esp->sreg & ESP_STAT_TCNT)) {
1311 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1312 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1313 if (esp->rev == FASHME)
1314 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1315 }
1316
1317 bytes_sent = esp->data_dma_len;
1318 bytes_sent -= ecount;
1319
1320 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1321 bytes_sent -= fifo_cnt;
1322
1323 flush_fifo = 0;
1324 if (!esp->prev_soff) {
1325 /* Synchronous data transfer, always flush fifo. */
1326 flush_fifo = 1;
1327 } else {
1328 if (esp->rev == ESP100) {
1329 u32 fflags, phase;
1330
1331 /* ESP100 has a chip bug where in the synchronous data
1332 * phase it can mistake a final long REQ pulse from the
1333 * target as an extra data byte. Fun.
1334 *
1335 * To detect this case we resample the status register
1336 * and fifo flags. If we're still in a data phase and
1337 * we see spurious chunks in the fifo, we return error
1338 * to the caller which should reset and set things up
1339 * such that we only try future transfers to this
1340 * target in synchronous mode.
1341 */
1342 esp->sreg = esp_read8(ESP_STATUS);
1343 phase = esp->sreg & ESP_STAT_PMASK;
1344 fflags = esp_read8(ESP_FFLAGS);
1345
1346 if ((phase == ESP_DOP &&
1347 (fflags & ESP_FF_ONOTZERO)) ||
1348 (phase == ESP_DIP &&
1349 (fflags & ESP_FF_FBYTES)))
1350 return -1;
1351 }
1352 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1353 flush_fifo = 1;
1354 }
1355
1356 if (flush_fifo)
1357 esp_flush_fifo(esp);
1358
1359 return bytes_sent;
1360}
1361
1362static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1363 u8 scsi_period, u8 scsi_offset,
1364 u8 esp_stp, u8 esp_soff)
1365{
1366 spi_period(tp->starget) = scsi_period;
1367 spi_offset(tp->starget) = scsi_offset;
1368 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1369
1370 if (esp_soff) {
1371 esp_stp &= 0x1f;
1372 esp_soff |= esp->radelay;
1373 if (esp->rev >= FAS236) {
1374 u8 bit = ESP_CONFIG3_FSCSI;
1375 if (esp->rev >= FAS100A)
1376 bit = ESP_CONFIG3_FAST;
1377
1378 if (scsi_period < 50) {
1379 if (esp->rev == FASHME)
1380 esp_soff &= ~esp->radelay;
1381 tp->esp_config3 |= bit;
1382 } else {
1383 tp->esp_config3 &= ~bit;
1384 }
1385 esp->prev_cfg3 = tp->esp_config3;
1386 esp_write8(esp->prev_cfg3, ESP_CFG3);
1387 }
1388 }
1389
1390 tp->esp_period = esp->prev_stp = esp_stp;
1391 tp->esp_offset = esp->prev_soff = esp_soff;
1392
1393 esp_write8(esp_soff, ESP_SOFF);
1394 esp_write8(esp_stp, ESP_STP);
1395
1396 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1397
1398 spi_display_xfer_agreement(tp->starget);
1399}
1400
1401static void esp_msgin_reject(struct esp *esp)
1402{
1403 struct esp_cmd_entry *ent = esp->active_cmd;
1404 struct scsi_cmnd *cmd = ent->cmd;
1405 struct esp_target_data *tp;
1406 int tgt;
1407
1408 tgt = cmd->device->id;
1409 tp = &esp->target[tgt];
1410
1411 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1412 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1413
1414 if (!esp_need_to_nego_sync(tp)) {
1415 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1416 scsi_esp_cmd(esp, ESP_CMD_RATN);
1417 } else {
1418 esp->msg_out_len =
1419 spi_populate_sync_msg(&esp->msg_out[0],
1420 tp->nego_goal_period,
1421 tp->nego_goal_offset);
1422 tp->flags |= ESP_TGT_NEGO_SYNC;
1423 scsi_esp_cmd(esp, ESP_CMD_SATN);
1424 }
1425 return;
1426 }
1427
1428 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1429 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1430 tp->esp_period = 0;
1431 tp->esp_offset = 0;
1432 esp_setsync(esp, tp, 0, 0, 0, 0);
1433 scsi_esp_cmd(esp, ESP_CMD_RATN);
1434 return;
1435 }
1436
1437 esp->msg_out[0] = ABORT_TASK_SET;
1438 esp->msg_out_len = 1;
1439 scsi_esp_cmd(esp, ESP_CMD_SATN);
1440}
1441
1442static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1443{
1444 u8 period = esp->msg_in[3];
1445 u8 offset = esp->msg_in[4];
1446 u8 stp;
1447
1448 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1449 goto do_reject;
1450
1451 if (offset > 15)
1452 goto do_reject;
1453
1454 if (offset) {
1455 int rounded_up, one_clock;
1456
1457 if (period > esp->max_period) {
1458 period = offset = 0;
1459 goto do_sdtr;
1460 }
1461 if (period < esp->min_period)
1462 goto do_reject;
1463
1464 one_clock = esp->ccycle / 1000;
1465 rounded_up = (period << 2);
1466 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1467 stp = rounded_up;
1468 if (stp && esp->rev >= FAS236) {
1469 if (stp >= 50)
1470 stp--;
1471 }
1472 } else {
1473 stp = 0;
1474 }
1475
1476 esp_setsync(esp, tp, period, offset, stp, offset);
1477 return;
1478
1479do_reject:
1480 esp->msg_out[0] = MESSAGE_REJECT;
1481 esp->msg_out_len = 1;
1482 scsi_esp_cmd(esp, ESP_CMD_SATN);
1483 return;
1484
1485do_sdtr:
1486 tp->nego_goal_period = period;
1487 tp->nego_goal_offset = offset;
1488 esp->msg_out_len =
1489 spi_populate_sync_msg(&esp->msg_out[0],
1490 tp->nego_goal_period,
1491 tp->nego_goal_offset);
1492 scsi_esp_cmd(esp, ESP_CMD_SATN);
1493}
1494
1495static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1496{
1497 int size = 8 << esp->msg_in[3];
1498 u8 cfg3;
1499
1500 if (esp->rev != FASHME)
1501 goto do_reject;
1502
1503 if (size != 8 && size != 16)
1504 goto do_reject;
1505
1506 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1507 goto do_reject;
1508
1509 cfg3 = tp->esp_config3;
1510 if (size == 16) {
1511 tp->flags |= ESP_TGT_WIDE;
1512 cfg3 |= ESP_CONFIG3_EWIDE;
1513 } else {
1514 tp->flags &= ~ESP_TGT_WIDE;
1515 cfg3 &= ~ESP_CONFIG3_EWIDE;
1516 }
1517 tp->esp_config3 = cfg3;
1518 esp->prev_cfg3 = cfg3;
1519 esp_write8(cfg3, ESP_CFG3);
1520
1521 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1522
1523 spi_period(tp->starget) = 0;
1524 spi_offset(tp->starget) = 0;
1525 if (!esp_need_to_nego_sync(tp)) {
1526 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1527 scsi_esp_cmd(esp, ESP_CMD_RATN);
1528 } else {
1529 esp->msg_out_len =
1530 spi_populate_sync_msg(&esp->msg_out[0],
1531 tp->nego_goal_period,
1532 tp->nego_goal_offset);
1533 tp->flags |= ESP_TGT_NEGO_SYNC;
1534 scsi_esp_cmd(esp, ESP_CMD_SATN);
1535 }
1536 return;
1537
1538do_reject:
1539 esp->msg_out[0] = MESSAGE_REJECT;
1540 esp->msg_out_len = 1;
1541 scsi_esp_cmd(esp, ESP_CMD_SATN);
1542}
1543
1544static void esp_msgin_extended(struct esp *esp)
1545{
1546 struct esp_cmd_entry *ent = esp->active_cmd;
1547 struct scsi_cmnd *cmd = ent->cmd;
1548 struct esp_target_data *tp;
1549 int tgt = cmd->device->id;
1550
1551 tp = &esp->target[tgt];
1552 if (esp->msg_in[2] == EXTENDED_SDTR) {
1553 esp_msgin_sdtr(esp, tp);
1554 return;
1555 }
1556 if (esp->msg_in[2] == EXTENDED_WDTR) {
1557 esp_msgin_wdtr(esp, tp);
1558 return;
1559 }
1560
1561 printk("ESP: Unexpected extended msg type %x\n",
1562 esp->msg_in[2]);
1563
1564 esp->msg_out[0] = ABORT_TASK_SET;
1565 esp->msg_out_len = 1;
1566 scsi_esp_cmd(esp, ESP_CMD_SATN);
1567}
1568
1569/* Analyze msgin bytes received from target so far. Return non-zero
1570 * if there are more bytes needed to complete the message.
1571 */
1572static int esp_msgin_process(struct esp *esp)
1573{
1574 u8 msg0 = esp->msg_in[0];
1575 int len = esp->msg_in_len;
1576
1577 if (msg0 & 0x80) {
1578 /* Identify */
1579 printk("ESP: Unexpected msgin identify\n");
1580 return 0;
1581 }
1582
1583 switch (msg0) {
1584 case EXTENDED_MESSAGE:
1585 if (len == 1)
1586 return 1;
1587 if (len < esp->msg_in[1] + 2)
1588 return 1;
1589 esp_msgin_extended(esp);
1590 return 0;
1591
1592 case IGNORE_WIDE_RESIDUE: {
1593 struct esp_cmd_entry *ent;
1594 struct esp_cmd_priv *spriv;
1595 if (len == 1)
1596 return 1;
1597
1598 if (esp->msg_in[1] != 1)
1599 goto do_reject;
1600
1601 ent = esp->active_cmd;
1602 spriv = ESP_CMD_PRIV(ent->cmd);
1603
1604 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1605 spriv->cur_sg--;
1606 spriv->cur_residue = 1;
1607 } else
1608 spriv->cur_residue++;
1609 spriv->tot_residue++;
1610 return 0;
1611 }
1612 case NOP:
1613 return 0;
1614 case RESTORE_POINTERS:
1615 esp_restore_pointers(esp, esp->active_cmd);
1616 return 0;
1617 case SAVE_POINTERS:
1618 esp_save_pointers(esp, esp->active_cmd);
1619 return 0;
1620
1621 case COMMAND_COMPLETE:
1622 case DISCONNECT: {
1623 struct esp_cmd_entry *ent = esp->active_cmd;
1624
1625 ent->message = msg0;
1626 esp_event(esp, ESP_EVENT_FREE_BUS);
1627 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1628 return 0;
1629 }
1630 case MESSAGE_REJECT:
1631 esp_msgin_reject(esp);
1632 return 0;
1633
1634 default:
1635 do_reject:
1636 esp->msg_out[0] = MESSAGE_REJECT;
1637 esp->msg_out_len = 1;
1638 scsi_esp_cmd(esp, ESP_CMD_SATN);
1639 return 0;
1640 }
1641}
1642
1643static int esp_process_event(struct esp *esp)
1644{
1645 int write;
1646
1647again:
1648 write = 0;
1649 switch (esp->event) {
1650 case ESP_EVENT_CHECK_PHASE:
1651 switch (esp->sreg & ESP_STAT_PMASK) {
1652 case ESP_DOP:
1653 esp_event(esp, ESP_EVENT_DATA_OUT);
1654 break;
1655 case ESP_DIP:
1656 esp_event(esp, ESP_EVENT_DATA_IN);
1657 break;
1658 case ESP_STATP:
1659 esp_flush_fifo(esp);
1660 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1661 esp_event(esp, ESP_EVENT_STATUS);
1662 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1663 return 1;
1664
1665 case ESP_MOP:
1666 esp_event(esp, ESP_EVENT_MSGOUT);
1667 break;
1668
1669 case ESP_MIP:
1670 esp_event(esp, ESP_EVENT_MSGIN);
1671 break;
1672
1673 case ESP_CMDP:
1674 esp_event(esp, ESP_EVENT_CMD_START);
1675 break;
1676
1677 default:
1678 printk("ESP: Unexpected phase, sreg=%02x\n",
1679 esp->sreg);
1680 esp_schedule_reset(esp);
1681 return 0;
1682 }
1683 goto again;
1684 break;
1685
1686 case ESP_EVENT_DATA_IN:
1687 write = 1;
1688 /* fallthru */
1689
1690 case ESP_EVENT_DATA_OUT: {
1691 struct esp_cmd_entry *ent = esp->active_cmd;
1692 struct scsi_cmnd *cmd = ent->cmd;
1693 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1694 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1695
1696 if (esp->rev == ESP100)
1697 scsi_esp_cmd(esp, ESP_CMD_NULL);
1698
1699 if (write)
1700 ent->flags |= ESP_CMD_FLAG_WRITE;
1701 else
1702 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1703
1704 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1705 esp->data_dma_len = dma_len;
1706
1707 if (!dma_len) {
1708 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1709 esp->host->unique_id);
1710 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1711 esp->host->unique_id,
1712 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1713 esp_cur_dma_len(ent, cmd));
1714 esp_schedule_reset(esp);
1715 return 0;
1716 }
1717
1718 esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1719 "write(%d)\n",
1720 (unsigned long long)dma_addr, dma_len, write);
1721
1722 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1723 write, ESP_CMD_DMA | ESP_CMD_TI);
1724 esp_event(esp, ESP_EVENT_DATA_DONE);
1725 break;
1726 }
1727 case ESP_EVENT_DATA_DONE: {
1728 struct esp_cmd_entry *ent = esp->active_cmd;
1729 struct scsi_cmnd *cmd = ent->cmd;
1730 int bytes_sent;
1731
1732 if (esp->ops->dma_error(esp)) {
1733 printk("ESP: data done, DMA error, resetting\n");
1734 esp_schedule_reset(esp);
1735 return 0;
1736 }
1737
1738 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1739 /* XXX parity errors, etc. XXX */
1740
1741 esp->ops->dma_drain(esp);
1742 }
1743 esp->ops->dma_invalidate(esp);
1744
1745 if (esp->ireg != ESP_INTR_BSERV) {
1746 /* We should always see exactly a bus-service
1747 * interrupt at the end of a successful transfer.
1748 */
1749 printk("ESP: data done, not BSERV, resetting\n");
1750 esp_schedule_reset(esp);
1751 return 0;
1752 }
1753
1754 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1755
1756 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1757 ent->flags, bytes_sent);
1758
1759 if (bytes_sent < 0) {
1760 /* XXX force sync mode for this target XXX */
1761 esp_schedule_reset(esp);
1762 return 0;
1763 }
1764
1765 esp_advance_dma(esp, ent, cmd, bytes_sent);
1766 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1767 goto again;
1768 break;
1769 }
1770
1771 case ESP_EVENT_STATUS: {
1772 struct esp_cmd_entry *ent = esp->active_cmd;
1773
1774 if (esp->ireg & ESP_INTR_FDONE) {
1775 ent->status = esp_read8(ESP_FDATA);
1776 ent->message = esp_read8(ESP_FDATA);
1777 scsi_esp_cmd(esp, ESP_CMD_MOK);
1778 } else if (esp->ireg == ESP_INTR_BSERV) {
1779 ent->status = esp_read8(ESP_FDATA);
1780 ent->message = 0xff;
1781 esp_event(esp, ESP_EVENT_MSGIN);
1782 return 0;
1783 }
1784
1785 if (ent->message != COMMAND_COMPLETE) {
1786 printk("ESP: Unexpected message %x in status\n",
1787 ent->message);
1788 esp_schedule_reset(esp);
1789 return 0;
1790 }
1791
1792 esp_event(esp, ESP_EVENT_FREE_BUS);
1793 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1794 break;
1795 }
1796 case ESP_EVENT_FREE_BUS: {
1797 struct esp_cmd_entry *ent = esp->active_cmd;
1798 struct scsi_cmnd *cmd = ent->cmd;
1799
1800 if (ent->message == COMMAND_COMPLETE ||
1801 ent->message == DISCONNECT)
1802 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1803
1804 if (ent->message == COMMAND_COMPLETE) {
1805 esp_log_cmddone("ESP: Command done status[%x] "
1806 "message[%x]\n",
1807 ent->status, ent->message);
1808 if (ent->status == SAM_STAT_TASK_SET_FULL)
1809 esp_event_queue_full(esp, ent);
1810
1811 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1812 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1813 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1814 esp_autosense(esp, ent);
1815 } else {
1816 esp_cmd_is_done(esp, ent, cmd,
1817 compose_result(ent->status,
1818 ent->message,
1819 DID_OK));
1820 }
1821 } else if (ent->message == DISCONNECT) {
1822 esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1823 "tag[%x:%x]\n",
1824 cmd->device->id,
1825 ent->tag[0], ent->tag[1]);
1826
1827 esp->active_cmd = NULL;
1828 esp_maybe_execute_command(esp);
1829 } else {
1830 printk("ESP: Unexpected message %x in freebus\n",
1831 ent->message);
1832 esp_schedule_reset(esp);
1833 return 0;
1834 }
1835 if (esp->active_cmd)
1836 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1837 break;
1838 }
1839 case ESP_EVENT_MSGOUT: {
1840 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1841
1842 if (esp_debug & ESP_DEBUG_MSGOUT) {
1843 int i;
1844 printk("ESP: Sending message [ ");
1845 for (i = 0; i < esp->msg_out_len; i++)
1846 printk("%02x ", esp->msg_out[i]);
1847 printk("]\n");
1848 }
1849
1850 if (esp->rev == FASHME) {
1851 int i;
1852
1853 /* Always use the fifo. */
1854 for (i = 0; i < esp->msg_out_len; i++) {
1855 esp_write8(esp->msg_out[i], ESP_FDATA);
1856 esp_write8(0, ESP_FDATA);
1857 }
1858 scsi_esp_cmd(esp, ESP_CMD_TI);
1859 } else {
1860 if (esp->msg_out_len == 1) {
1861 esp_write8(esp->msg_out[0], ESP_FDATA);
1862 scsi_esp_cmd(esp, ESP_CMD_TI);
1863 } else {
1864 /* Use DMA. */
1865 memcpy(esp->command_block,
1866 esp->msg_out,
1867 esp->msg_out_len);
1868
1869 esp->ops->send_dma_cmd(esp,
1870 esp->command_block_dma,
1871 esp->msg_out_len,
1872 esp->msg_out_len,
1873 0,
1874 ESP_CMD_DMA|ESP_CMD_TI);
1875 }
1876 }
1877 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1878 break;
1879 }
1880 case ESP_EVENT_MSGOUT_DONE:
1881 if (esp->rev == FASHME) {
1882 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1883 } else {
1884 if (esp->msg_out_len > 1)
1885 esp->ops->dma_invalidate(esp);
1886 }
1887
1888 if (!(esp->ireg & ESP_INTR_DC)) {
1889 if (esp->rev != FASHME)
1890 scsi_esp_cmd(esp, ESP_CMD_NULL);
1891 }
1892 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1893 goto again;
1894 case ESP_EVENT_MSGIN:
1895 if (esp->ireg & ESP_INTR_BSERV) {
1896 if (esp->rev == FASHME) {
1897 if (!(esp_read8(ESP_STATUS2) &
1898 ESP_STAT2_FEMPTY))
1899 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1900 } else {
1901 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1902 if (esp->rev == ESP100)
1903 scsi_esp_cmd(esp, ESP_CMD_NULL);
1904 }
1905 scsi_esp_cmd(esp, ESP_CMD_TI);
1906 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1907 return 1;
1908 }
1909 if (esp->ireg & ESP_INTR_FDONE) {
1910 u8 val;
1911
1912 if (esp->rev == FASHME)
1913 val = esp->fifo[0];
1914 else
1915 val = esp_read8(ESP_FDATA);
1916 esp->msg_in[esp->msg_in_len++] = val;
1917
1918 esp_log_msgin("ESP: Got msgin byte %x\n", val);
1919
1920 if (!esp_msgin_process(esp))
1921 esp->msg_in_len = 0;
1922
1923 if (esp->rev == FASHME)
1924 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1925
1926 scsi_esp_cmd(esp, ESP_CMD_MOK);
1927
1928 if (esp->event != ESP_EVENT_FREE_BUS)
1929 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1930 } else {
1931 printk("ESP: MSGIN neither BSERV not FDON, resetting");
1932 esp_schedule_reset(esp);
1933 return 0;
1934 }
1935 break;
1936 case ESP_EVENT_CMD_START:
1937 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1938 esp->cmd_bytes_left);
1939 if (esp->rev == FASHME)
1940 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1942 esp->cmd_bytes_left, 16, 0,
1943 ESP_CMD_DMA | ESP_CMD_TI);
1944 esp_event(esp, ESP_EVENT_CMD_DONE);
1945 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1946 break;
1947 case ESP_EVENT_CMD_DONE:
1948 esp->ops->dma_invalidate(esp);
1949 if (esp->ireg & ESP_INTR_BSERV) {
1950 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951 goto again;
1952 }
1953 esp_schedule_reset(esp);
1954 return 0;
1955 break;
1956
1957 case ESP_EVENT_RESET:
1958 scsi_esp_cmd(esp, ESP_CMD_RS);
1959 break;
1960
1961 default:
1962 printk("ESP: Unexpected event %x, resetting\n",
1963 esp->event);
1964 esp_schedule_reset(esp);
1965 return 0;
1966 break;
1967 }
1968 return 1;
1969}
1970
1971static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1972{
1973 struct scsi_cmnd *cmd = ent->cmd;
1974
1975 esp_unmap_dma(esp, cmd);
1976 esp_free_lun_tag(ent, cmd->device->hostdata);
1977 cmd->result = DID_RESET << 16;
1978
1979 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1980 esp->ops->unmap_single(esp, ent->sense_dma,
1981 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1982 ent->sense_ptr = NULL;
1983 }
1984
1985 cmd->scsi_done(cmd);
1986 list_del(&ent->list);
1987 esp_put_ent(esp, ent);
1988}
1989
1990static void esp_clear_hold(struct scsi_device *dev, void *data)
1991{
1992 struct esp_lun_data *lp = dev->hostdata;
1993
1994 BUG_ON(lp->num_tagged);
1995 lp->hold = 0;
1996}
1997
1998static void esp_reset_cleanup(struct esp *esp)
1999{
2000 struct esp_cmd_entry *ent, *tmp;
2001 int i;
2002
2003 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2004 struct scsi_cmnd *cmd = ent->cmd;
2005
2006 list_del(&ent->list);
2007 cmd->result = DID_RESET << 16;
2008 cmd->scsi_done(cmd);
2009 esp_put_ent(esp, ent);
2010 }
2011
2012 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2013 if (ent == esp->active_cmd)
2014 esp->active_cmd = NULL;
2015 esp_reset_cleanup_one(esp, ent);
2016 }
2017
2018 BUG_ON(esp->active_cmd != NULL);
2019
2020 /* Force renegotiation of sync/wide transfers. */
2021 for (i = 0; i < ESP_MAX_TARGET; i++) {
2022 struct esp_target_data *tp = &esp->target[i];
2023
2024 tp->esp_period = 0;
2025 tp->esp_offset = 0;
2026 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2027 ESP_CONFIG3_FSCSI |
2028 ESP_CONFIG3_FAST);
2029 tp->flags &= ~ESP_TGT_WIDE;
2030 tp->flags |= ESP_TGT_CHECK_NEGO;
2031
2032 if (tp->starget)
2033 starget_for_each_device(tp->starget, NULL,
2034 esp_clear_hold);
2035 }
2036}
2037
2038/* Runs under host->lock */
2039static void __esp_interrupt(struct esp *esp)
2040{
2041 int finish_reset, intr_done;
2042 u8 phase;
2043
2044 esp->sreg = esp_read8(ESP_STATUS);
2045
2046 if (esp->flags & ESP_FLAG_RESETTING) {
2047 finish_reset = 1;
2048 } else {
2049 if (esp_check_gross_error(esp))
2050 return;
2051
2052 finish_reset = esp_check_spur_intr(esp);
2053 if (finish_reset < 0)
2054 return;
2055 }
2056
2057 esp->ireg = esp_read8(ESP_INTRPT);
2058
2059 if (esp->ireg & ESP_INTR_SR)
2060 finish_reset = 1;
2061
2062 if (finish_reset) {
2063 esp_reset_cleanup(esp);
2064 if (esp->eh_reset) {
2065 complete(esp->eh_reset);
2066 esp->eh_reset = NULL;
2067 }
2068 return;
2069 }
2070
2071 phase = (esp->sreg & ESP_STAT_PMASK);
2072 if (esp->rev == FASHME) {
2073 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2074 esp->select_state == ESP_SELECT_NONE &&
2075 esp->event != ESP_EVENT_STATUS &&
2076 esp->event != ESP_EVENT_DATA_DONE) ||
2077 (esp->ireg & ESP_INTR_RSEL)) {
2078 esp->sreg2 = esp_read8(ESP_STATUS2);
2079 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2080 (esp->sreg2 & ESP_STAT2_F1BYTE))
2081 hme_read_fifo(esp);
2082 }
2083 }
2084
2085 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2086 "sreg2[%02x] ireg[%02x]\n",
2087 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2088
2089 intr_done = 0;
2090
2091 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2092 printk("ESP: unexpected IREG %02x\n", esp->ireg);
2093 if (esp->ireg & ESP_INTR_IC)
2094 esp_dump_cmd_log(esp);
2095
2096 esp_schedule_reset(esp);
2097 } else {
2098 if (!(esp->ireg & ESP_INTR_RSEL)) {
2099 /* Some combination of FDONE, BSERV, DC. */
2100 if (esp->select_state != ESP_SELECT_NONE)
2101 intr_done = esp_finish_select(esp);
2102 } else if (esp->ireg & ESP_INTR_RSEL) {
2103 if (esp->active_cmd)
2104 (void) esp_finish_select(esp);
2105 intr_done = esp_reconnect(esp);
2106 }
2107 }
2108 while (!intr_done)
2109 intr_done = esp_process_event(esp);
2110}
2111
2112irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2113{
2114 struct esp *esp = dev_id;
2115 unsigned long flags;
2116 irqreturn_t ret;
2117
2118 spin_lock_irqsave(esp->host->host_lock, flags);
2119 ret = IRQ_NONE;
2120 if (esp->ops->irq_pending(esp)) {
2121 ret = IRQ_HANDLED;
2122 for (;;) {
2123 int i;
2124
2125 __esp_interrupt(esp);
2126 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2127 break;
2128 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2129
2130 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2131 if (esp->ops->irq_pending(esp))
2132 break;
2133 }
2134 if (i == ESP_QUICKIRQ_LIMIT)
2135 break;
2136 }
2137 }
2138 spin_unlock_irqrestore(esp->host->host_lock, flags);
2139
2140 return ret;
2141}
2142EXPORT_SYMBOL(scsi_esp_intr);
2143
2144static void __devinit esp_get_revision(struct esp *esp)
2145{
2146 u8 val;
2147
2148 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2149 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2150 esp_write8(esp->config2, ESP_CFG2);
2151
2152 val = esp_read8(ESP_CFG2);
2153 val &= ~ESP_CONFIG2_MAGIC;
2154 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2155 /* If what we write to cfg2 does not come back, cfg2 is not
2156 * implemented, therefore this must be a plain esp100.
2157 */
2158 esp->rev = ESP100;
2159 } else {
2160 esp->config2 = 0;
2161 esp_set_all_config3(esp, 5);
2162 esp->prev_cfg3 = 5;
2163 esp_write8(esp->config2, ESP_CFG2);
2164 esp_write8(0, ESP_CFG3);
2165 esp_write8(esp->prev_cfg3, ESP_CFG3);
2166
2167 val = esp_read8(ESP_CFG3);
2168 if (val != 5) {
2169 /* The cfg2 register is implemented, however
2170 * cfg3 is not, must be esp100a.
2171 */
2172 esp->rev = ESP100A;
2173 } else {
2174 esp_set_all_config3(esp, 0);
2175 esp->prev_cfg3 = 0;
2176 esp_write8(esp->prev_cfg3, ESP_CFG3);
2177
2178 /* All of cfg{1,2,3} implemented, must be one of
2179 * the fas variants, figure out which one.
2180 */
2181 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2182 esp->rev = FAST;
2183 esp->sync_defp = SYNC_DEFP_FAST;
2184 } else {
2185 esp->rev = ESP236;
2186 }
2187 esp->config2 = 0;
2188 esp_write8(esp->config2, ESP_CFG2);
2189 }
2190 }
2191}
2192
2193static void __devinit esp_init_swstate(struct esp *esp)
2194{
2195 int i;
2196
2197 INIT_LIST_HEAD(&esp->queued_cmds);
2198 INIT_LIST_HEAD(&esp->active_cmds);
2199 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2200
2201 /* Start with a clear state, domain validation (via ->slave_configure,
2202 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2203 * commands.
2204 */
2205 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2206 esp->target[i].flags = 0;
2207 esp->target[i].nego_goal_period = 0;
2208 esp->target[i].nego_goal_offset = 0;
2209 esp->target[i].nego_goal_width = 0;
2210 esp->target[i].nego_goal_tags = 0;
2211 }
2212}
2213
2214/* This places the ESP into a known state at boot time. */
2215static void __devinit esp_bootup_reset(struct esp *esp)
2216{
2217 u8 val;
2218
2219 /* Reset the DMA */
2220 esp->ops->reset_dma(esp);
2221
2222 /* Reset the ESP */
2223 esp_reset_esp(esp);
2224
2225 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2226 val = esp_read8(ESP_CFG1);
2227 val |= ESP_CONFIG1_SRRDISAB;
2228 esp_write8(val, ESP_CFG1);
2229
2230 scsi_esp_cmd(esp, ESP_CMD_RS);
2231 udelay(400);
2232
2233 esp_write8(esp->config1, ESP_CFG1);
2234
2235 /* Eat any bitrot in the chip and we are done... */
2236 esp_read8(ESP_INTRPT);
2237}
2238
2239static void __devinit esp_set_clock_params(struct esp *esp)
2240{
2241 int fmhz;
2242 u8 ccf;
2243
2244 /* This is getting messy but it has to be done correctly or else
2245 * you get weird behavior all over the place. We are trying to
2246 * basically figure out three pieces of information.
2247 *
2248 * a) Clock Conversion Factor
2249 *
2250 * This is a representation of the input crystal clock frequency
2251 * going into the ESP on this machine. Any operation whose timing
2252 * is longer than 400ns depends on this value being correct. For
2253 * example, you'll get blips for arbitration/selection during high
2254 * load or with multiple targets if this is not set correctly.
2255 *
2256 * b) Selection Time-Out
2257 *
2258 * The ESP isn't very bright and will arbitrate for the bus and try
2259 * to select a target forever if you let it. This value tells the
2260 * ESP when it has taken too long to negotiate and that it should
2261 * interrupt the CPU so we can see what happened. The value is
2262 * computed as follows (from NCR/Symbios chip docs).
2263 *
2264 * (Time Out Period) * (Input Clock)
2265 * STO = ----------------------------------
2266 * (8192) * (Clock Conversion Factor)
2267 *
2268 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2269 *
2270 * c) Imperical constants for synchronous offset and transfer period
2271 * register values
2272 *
2273 * This entails the smallest and largest sync period we could ever
2274 * handle on this ESP.
2275 */
2276 fmhz = esp->cfreq;
2277
2278 ccf = ((fmhz / 1000000) + 4) / 5;
2279 if (ccf == 1)
2280 ccf = 2;
2281
2282 /* If we can't find anything reasonable, just assume 20MHZ.
2283 * This is the clock frequency of the older sun4c's where I've
2284 * been unable to find the clock-frequency PROM property. All
2285 * other machines provide useful values it seems.
2286 */
2287 if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
2288 fmhz = 20000000;
2289 ccf = 4;
2290 }
2291
2292 esp->cfact = (ccf == 8 ? 0 : ccf);
2293 esp->cfreq = fmhz;
2294 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
2295 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2296 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
2297 esp->sync_defp = SYNC_DEFP_SLOW;
2298}
2299
2300static const char *esp_chip_names[] = {
2301 "ESP100",
2302 "ESP100A",
2303 "ESP236",
2304 "FAS236",
2305 "FAS100A",
2306 "FAST",
2307 "FASHME",
2308};
2309
2310static struct scsi_transport_template *esp_transport_template;
2311
2312int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2313{
2314 static int instance;
2315 int err;
2316
2317 esp->host->transportt = esp_transport_template;
2318 esp->host->max_lun = ESP_MAX_LUN;
2319 esp->host->cmd_per_lun = 2;
2320
2321 esp_set_clock_params(esp);
2322
2323 esp_get_revision(esp);
2324
2325 esp_init_swstate(esp);
2326
2327 esp_bootup_reset(esp);
2328
2329 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2330 esp->host->unique_id, esp->regs, esp->dma_regs,
2331 esp->host->irq);
2332 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2333 esp->host->unique_id, esp_chip_names[esp->rev],
2334 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2335
2336 /* Let the SCSI bus reset settle. */
2337 ssleep(esp_bus_reset_settle);
2338
2339 err = scsi_add_host(esp->host, dev);
2340 if (err)
2341 return err;
2342
2343 esp->host->unique_id = instance++;
2344
2345 scsi_scan_host(esp->host);
2346
2347 return 0;
2348}
2349EXPORT_SYMBOL(scsi_esp_register);
2350
2351void __devexit scsi_esp_unregister(struct esp *esp)
2352{
2353 scsi_remove_host(esp->host);
2354}
2355EXPORT_SYMBOL(scsi_esp_unregister);
2356
2357static int esp_slave_alloc(struct scsi_device *dev)
2358{
2359 struct esp *esp = host_to_esp(dev->host);
2360 struct esp_target_data *tp = &esp->target[dev->id];
2361 struct esp_lun_data *lp;
2362
2363 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2364 if (!lp)
2365 return -ENOMEM;
2366 dev->hostdata = lp;
2367
2368 tp->starget = dev->sdev_target;
2369
2370 spi_min_period(tp->starget) = esp->min_period;
2371 spi_max_offset(tp->starget) = 15;
2372
2373 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2374 spi_max_width(tp->starget) = 1;
2375 else
2376 spi_max_width(tp->starget) = 0;
2377
2378 return 0;
2379}
2380
2381static int esp_slave_configure(struct scsi_device *dev)
2382{
2383 struct esp *esp = host_to_esp(dev->host);
2384 struct esp_target_data *tp = &esp->target[dev->id];
2385 int goal_tags, queue_depth;
2386
2387 goal_tags = 0;
2388
2389 if (dev->tagged_supported) {
2390 /* XXX make this configurable somehow XXX */
2391 goal_tags = ESP_DEFAULT_TAGS;
2392
2393 if (goal_tags > ESP_MAX_TAG)
2394 goal_tags = ESP_MAX_TAG;
2395 }
2396
2397 queue_depth = goal_tags;
2398 if (queue_depth < dev->host->cmd_per_lun)
2399 queue_depth = dev->host->cmd_per_lun;
2400
2401 if (goal_tags) {
2402 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2403 scsi_activate_tcq(dev, queue_depth);
2404 } else {
2405 scsi_deactivate_tcq(dev, queue_depth);
2406 }
2407 tp->flags |= ESP_TGT_DISCONNECT;
2408
2409 if (!spi_initial_dv(dev->sdev_target))
2410 spi_dv_device(dev);
2411
2412 return 0;
2413}
2414
2415static void esp_slave_destroy(struct scsi_device *dev)
2416{
2417 struct esp_lun_data *lp = dev->hostdata;
2418
2419 kfree(lp);
2420 dev->hostdata = NULL;
2421}
2422
2423static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2424{
2425 struct esp *esp = host_to_esp(cmd->device->host);
2426 struct esp_cmd_entry *ent, *tmp;
2427 struct completion eh_done;
2428 unsigned long flags;
2429
2430 /* XXX This helps a lot with debugging but might be a bit
2431 * XXX much for the final driver.
2432 */
2433 spin_lock_irqsave(esp->host->host_lock, flags);
2434 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2435 esp->host->unique_id, cmd, cmd->cmnd[0]);
2436 ent = esp->active_cmd;
2437 if (ent)
2438 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2439 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2440 list_for_each_entry(ent, &esp->queued_cmds, list) {
2441 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2442 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2443 }
2444 list_for_each_entry(ent, &esp->active_cmds, list) {
2445 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2446 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2447 }
2448 esp_dump_cmd_log(esp);
2449 spin_unlock_irqrestore(esp->host->host_lock, flags);
2450
2451 spin_lock_irqsave(esp->host->host_lock, flags);
2452
2453 ent = NULL;
2454 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2455 if (tmp->cmd == cmd) {
2456 ent = tmp;
2457 break;
2458 }
2459 }
2460
2461 if (ent) {
2462 /* Easiest case, we didn't even issue the command
2463 * yet so it is trivial to abort.
2464 */
2465 list_del(&ent->list);
2466
2467 cmd->result = DID_ABORT << 16;
2468 cmd->scsi_done(cmd);
2469
2470 esp_put_ent(esp, ent);
2471
2472 goto out_success;
2473 }
2474
2475 init_completion(&eh_done);
2476
2477 ent = esp->active_cmd;
2478 if (ent && ent->cmd == cmd) {
2479 /* Command is the currently active command on
2480 * the bus. If we already have an output message
2481 * pending, no dice.
2482 */
2483 if (esp->msg_out_len)
2484 goto out_failure;
2485
2486 /* Send out an abort, encouraging the target to
2487 * go to MSGOUT phase by asserting ATN.
2488 */
2489 esp->msg_out[0] = ABORT_TASK_SET;
2490 esp->msg_out_len = 1;
2491 ent->eh_done = &eh_done;
2492
2493 scsi_esp_cmd(esp, ESP_CMD_SATN);
2494 } else {
2495 /* The command is disconnected. This is not easy to
2496 * abort. For now we fail and let the scsi error
2497 * handling layer go try a scsi bus reset or host
2498 * reset.
2499 *
2500 * What we could do is put together a scsi command
2501 * solely for the purpose of sending an abort message
2502 * to the target. Coming up with all the code to
2503 * cook up scsi commands, special case them everywhere,
2504 * etc. is for questionable gain and it would be better
2505 * if the generic scsi error handling layer could do at
2506 * least some of that for us.
2507 *
2508 * Anyways this is an area for potential future improvement
2509 * in this driver.
2510 */
2511 goto out_failure;
2512 }
2513
2514 spin_unlock_irqrestore(esp->host->host_lock, flags);
2515
2516 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2517 spin_lock_irqsave(esp->host->host_lock, flags);
2518 ent->eh_done = NULL;
2519 spin_unlock_irqrestore(esp->host->host_lock, flags);
2520
2521 return FAILED;
2522 }
2523
2524 return SUCCESS;
2525
2526out_success:
2527 spin_unlock_irqrestore(esp->host->host_lock, flags);
2528 return SUCCESS;
2529
2530out_failure:
2531 /* XXX This might be a good location to set ESP_TGT_BROKEN
2532 * XXX since we know which target/lun in particular is
2533 * XXX causing trouble.
2534 */
2535 spin_unlock_irqrestore(esp->host->host_lock, flags);
2536 return FAILED;
2537}
2538
2539static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2540{
2541 struct esp *esp = host_to_esp(cmd->device->host);
2542 struct completion eh_reset;
2543 unsigned long flags;
2544
2545 init_completion(&eh_reset);
2546
2547 spin_lock_irqsave(esp->host->host_lock, flags);
2548
2549 esp->eh_reset = &eh_reset;
2550
2551 /* XXX This is too simple... We should add lots of
2552 * XXX checks here so that if we find that the chip is
2553 * XXX very wedged we return failure immediately so
2554 * XXX that we can perform a full chip reset.
2555 */
2556 esp->flags |= ESP_FLAG_RESETTING;
2557 scsi_esp_cmd(esp, ESP_CMD_RS);
2558
2559 spin_unlock_irqrestore(esp->host->host_lock, flags);
2560
2561 ssleep(esp_bus_reset_settle);
2562
2563 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2564 spin_lock_irqsave(esp->host->host_lock, flags);
2565 esp->eh_reset = NULL;
2566 spin_unlock_irqrestore(esp->host->host_lock, flags);
2567
2568 return FAILED;
2569 }
2570
2571 return SUCCESS;
2572}
2573
2574/* All bets are off, reset the entire device. */
2575static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2576{
2577 struct esp *esp = host_to_esp(cmd->device->host);
2578 unsigned long flags;
2579
2580 spin_lock_irqsave(esp->host->host_lock, flags);
2581 esp_bootup_reset(esp);
2582 esp_reset_cleanup(esp);
2583 spin_unlock_irqrestore(esp->host->host_lock, flags);
2584
2585 ssleep(esp_bus_reset_settle);
2586
2587 return SUCCESS;
2588}
2589
2590static const char *esp_info(struct Scsi_Host *host)
2591{
2592 return "esp";
2593}
2594
2595struct scsi_host_template scsi_esp_template = {
2596 .module = THIS_MODULE,
2597 .name = "esp",
2598 .info = esp_info,
2599 .queuecommand = esp_queuecommand,
2600 .slave_alloc = esp_slave_alloc,
2601 .slave_configure = esp_slave_configure,
2602 .slave_destroy = esp_slave_destroy,
2603 .eh_abort_handler = esp_eh_abort_handler,
2604 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2605 .eh_host_reset_handler = esp_eh_host_reset_handler,
2606 .can_queue = 7,
2607 .this_id = 7,
2608 .sg_tablesize = SG_ALL,
2609 .use_clustering = ENABLE_CLUSTERING,
2610 .max_sectors = 0xffff,
2611 .skip_settle_delay = 1,
2612};
2613EXPORT_SYMBOL(scsi_esp_template);
2614
2615static void esp_get_signalling(struct Scsi_Host *host)
2616{
2617 struct esp *esp = host_to_esp(host);
2618 enum spi_signal_type type;
2619
2620 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2621 type = SPI_SIGNAL_HVD;
2622 else
2623 type = SPI_SIGNAL_SE;
2624
2625 spi_signalling(host) = type;
2626}
2627
2628static void esp_set_offset(struct scsi_target *target, int offset)
2629{
2630 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2631 struct esp *esp = host_to_esp(host);
2632 struct esp_target_data *tp = &esp->target[target->id];
2633
2634 tp->nego_goal_offset = offset;
2635 tp->flags |= ESP_TGT_CHECK_NEGO;
2636}
2637
2638static void esp_set_period(struct scsi_target *target, int period)
2639{
2640 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2641 struct esp *esp = host_to_esp(host);
2642 struct esp_target_data *tp = &esp->target[target->id];
2643
2644 tp->nego_goal_period = period;
2645 tp->flags |= ESP_TGT_CHECK_NEGO;
2646}
2647
2648static void esp_set_width(struct scsi_target *target, int width)
2649{
2650 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2651 struct esp *esp = host_to_esp(host);
2652 struct esp_target_data *tp = &esp->target[target->id];
2653
2654 tp->nego_goal_width = (width ? 1 : 0);
2655 tp->flags |= ESP_TGT_CHECK_NEGO;
2656}
2657
2658static struct spi_function_template esp_transport_ops = {
2659 .set_offset = esp_set_offset,
2660 .show_offset = 1,
2661 .set_period = esp_set_period,
2662 .show_period = 1,
2663 .set_width = esp_set_width,
2664 .show_width = 1,
2665 .get_signalling = esp_get_signalling,
2666};
2667
2668static int __init esp_init(void)
2669{
2670 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2671 sizeof(struct esp_cmd_priv));
2672
2673 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2674 if (!esp_transport_template)
2675 return -ENODEV;
2676
2677 return 0;
2678}
2679
2680static void __exit esp_exit(void)
2681{
2682 spi_release_transport(esp_transport_template);
2683}
2684
2685MODULE_DESCRIPTION("ESP SCSI driver core");
2686MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2687MODULE_LICENSE("GPL");
2688MODULE_VERSION(DRV_VERSION);
2689
2690module_param(esp_bus_reset_settle, int, 0);
2691MODULE_PARM_DESC(esp_bus_reset_settle,
2692 "ESP scsi bus reset delay in seconds");
2693
2694module_param(esp_debug, int, 0);
2695MODULE_PARM_DESC(esp_debug,
2696"ESP bitmapped debugging message enable value:\n"
2697" 0x00000001 Log interrupt events\n"
2698" 0x00000002 Log scsi commands\n"
2699" 0x00000004 Log resets\n"
2700" 0x00000008 Log message in events\n"
2701" 0x00000010 Log message out events\n"
2702" 0x00000020 Log command completion\n"
2703" 0x00000040 Log disconnects\n"
2704" 0x00000080 Log data start\n"
2705" 0x00000100 Log data done\n"
2706" 0x00000200 Log reconnects\n"
2707" 0x00000400 Log auto-sense data\n"
2708);
2709
2710module_init(esp_init);
2711module_exit(esp_exit);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
new file mode 100644
index 000000000000..8d4a6690401f
--- /dev/null
+++ b/drivers/scsi/esp_scsi.h
@@ -0,0 +1,560 @@
1/* esp_scsi.h: Defines and structures for the ESP drier.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _ESP_SCSI_H
7#define _ESP_SCSI_H
8
9 /* Access Description Offset */
10#define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */
11#define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */
12#define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */
13#define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */
14#define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */
15#define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */
16#define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */
17#define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */
18#define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */
19#define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */
20#define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */
21#define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */
22#define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */
23#define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */
24#define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */
25#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */
26#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */
27#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */
28#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */
29#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
30#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
31#define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */
32#define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */
33
34#define SBUS_ESP_REG_SIZE 0x40UL
35
36/* Bitfield meanings for the above registers. */
37
38/* ESP config reg 1, read-write, found on all ESP chips */
39#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
40#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
41#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
42#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
43#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
44#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
45
46/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
47#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */
48#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */
49#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
50#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */
51#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
52#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
53#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
54#define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */
55#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */
56#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */
57#define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */
58#define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */
59#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
60
61/* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */
62#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */
63#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
64#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */
65#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
66#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */
67#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
68#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */
69#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
70#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */
71#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
72#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
73#define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */
74#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
75#define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */
76#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
77#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
78
79/* ESP command register read-write */
80/* Group 1 commands: These may be sent at any point in time to the ESP
81 * chip. None of them can generate interrupts 'cept
82 * the "SCSI bus reset" command if you have not disabled
83 * SCSI reset interrupts in the config1 ESP register.
84 */
85#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
86#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
87#define ESP_CMD_RC 0x02 /* Chip reset */
88#define ESP_CMD_RS 0x03 /* SCSI bus reset */
89
90/* Group 2 commands: ESP must be an initiator and connected to a target
91 * for these commands to work.
92 */
93#define ESP_CMD_TI 0x10 /* Transfer Information */
94#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
95#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
96#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
97#define ESP_CMD_SATN 0x1a /* Set ATN */
98#define ESP_CMD_RATN 0x1b /* De-assert ATN */
99
100/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
101 * to a target as the initiator for these commands to work.
102 */
103#define ESP_CMD_SMSG 0x20 /* Send message */
104#define ESP_CMD_SSTAT 0x21 /* Send status */
105#define ESP_CMD_SDATA 0x22 /* Send data */
106#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
107#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
108#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
109#define ESP_CMD_DCNCT 0x27 /* Disconnect */
110#define ESP_CMD_RMSG 0x28 /* Receive Message */
111#define ESP_CMD_RCMD 0x29 /* Receive Command */
112#define ESP_CMD_RDATA 0x2a /* Receive Data */
113#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
114
115/* Group 4 commands: The ESP must be in the disconnected state and must
116 * not be connected to any targets as initiator for
117 * these commands to work.
118 */
119#define ESP_CMD_RSEL 0x40 /* Reselect */
120#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
121#define ESP_CMD_SELA 0x42 /* Select w/ATN */
122#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
123#define ESP_CMD_ESEL 0x44 /* Enable selection */
124#define ESP_CMD_DSEL 0x45 /* Disable selections */
125#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
126#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
127
128/* This bit enables the ESP's DMA on the SBus */
129#define ESP_CMD_DMA 0x80 /* Do DMA? */
130
131/* ESP status register read-only */
132#define ESP_STAT_PIO 0x01 /* IO phase bit */
133#define ESP_STAT_PCD 0x02 /* CD phase bit */
134#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
135#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
136#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
137#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
138#define ESP_STAT_PERR 0x20 /* Parity error */
139#define ESP_STAT_SPAM 0x40 /* Real bad error */
140/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
141 * bit on other revs of the ESP.
142 */
143#define ESP_STAT_INTR 0x80 /* Interrupt */
144
145/* The status register can be masked with ESP_STAT_PMASK and compared
146 * with the following values to determine the current phase the ESP
147 * (at least thinks it) is in. For our purposes we also add our own
148 * software 'done' bit for our phase management engine.
149 */
150#define ESP_DOP (0) /* Data Out */
151#define ESP_DIP (ESP_STAT_PIO) /* Data In */
152#define ESP_CMDP (ESP_STAT_PCD) /* Command */
153#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
154#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
155#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
156
157/* HME only: status 2 register */
158#define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */
159#define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */
160#define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */
161#define ESP_STAT2_CREGA 0x08 /* The command reg is active now */
162#define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */
163#define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */
164#define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */
165#define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */
166
167/* ESP interrupt register read-only */
168#define ESP_INTR_S 0x01 /* Select w/o ATN */
169#define ESP_INTR_SATN 0x02 /* Select w/ATN */
170#define ESP_INTR_RSEL 0x04 /* Reselected */
171#define ESP_INTR_FDONE 0x08 /* Function done */
172#define ESP_INTR_BSERV 0x10 /* Bus service */
173#define ESP_INTR_DC 0x20 /* Disconnect */
174#define ESP_INTR_IC 0x40 /* Illegal command given */
175#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
176
177/* ESP sequence step register read-only */
178#define ESP_STEP_VBITS 0x07 /* Valid bits */
179#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
180#define ESP_STEP_SID 0x01 /* One msg byte sent */
181#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
182#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
183 * bytes to be lost
184 */
185#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
186
187/* Ho hum, some ESP's set the step register to this as well... */
188#define ESP_STEP_FINI5 0x05
189#define ESP_STEP_FINI6 0x06
190#define ESP_STEP_FINI7 0x07
191
192/* ESP chip-test register read-write */
193#define ESP_TEST_TARG 0x01 /* Target test mode */
194#define ESP_TEST_INI 0x02 /* Initiator test mode */
195#define ESP_TEST_TS 0x04 /* Tristate test mode */
196
197/* ESP unique ID register read-only, found on fas236+fas100a only */
198#define ESP_UID_F100A 0x00 /* ESP FAS100A */
199#define ESP_UID_F236 0x02 /* ESP FAS236 */
200#define ESP_UID_REV 0x07 /* ESP revision */
201#define ESP_UID_FAM 0xf8 /* ESP family */
202
203/* ESP fifo flags register read-only */
204/* Note that the following implies a 16 byte FIFO on the ESP. */
205#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
206#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */
207#define ESP_FF_SSTEP 0xe0 /* Sequence step */
208
209/* ESP clock conversion factor register write-only */
210#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
211#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
212#define ESP_CCF_F2 0x02 /* 10MHz */
213#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
214#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
215#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
216#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
217#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
218
219/* HME only... */
220#define ESP_BUSID_RESELID 0x10
221#define ESP_BUSID_CTR32BIT 0x40
222
223#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
224#define ESP_TIMEO_CONST 8192
225#define ESP_NEG_DEFP(mhz, cfact) \
226 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
227#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
228#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
229
230/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
231 * input clock rates we try to do 10mb/s although I don't think a transfer can
232 * even run that fast with an ESP even with DMA2 scatter gather pipelining.
233 */
234#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
235#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
236
237struct esp_cmd_priv {
238 union {
239 dma_addr_t dma_addr;
240 int num_sg;
241 } u;
242
243 unsigned int cur_residue;
244 struct scatterlist *cur_sg;
245 unsigned int tot_residue;
246};
247#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
248
249enum esp_rev {
250 ESP100 = 0x00, /* NCR53C90 - very broken */
251 ESP100A = 0x01, /* NCR53C90A */
252 ESP236 = 0x02,
253 FAS236 = 0x03,
254 FAS100A = 0x04,
255 FAST = 0x05,
256 FASHME = 0x06,
257};
258
259struct esp_cmd_entry {
260 struct list_head list;
261
262 struct scsi_cmnd *cmd;
263
264 unsigned int saved_cur_residue;
265 struct scatterlist *saved_cur_sg;
266 unsigned int saved_tot_residue;
267
268 u8 flags;
269#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */
270#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */
271#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
272
273 u8 tag[2];
274
275 u8 status;
276 u8 message;
277
278 unsigned char *sense_ptr;
279 unsigned char *saved_sense_ptr;
280 dma_addr_t sense_dma;
281
282 struct completion *eh_done;
283};
284
285/* XXX make this configurable somehow XXX */
286#define ESP_DEFAULT_TAGS 16
287
288#define ESP_MAX_TARGET 16
289#define ESP_MAX_LUN 8
290#define ESP_MAX_TAG 256
291
292struct esp_lun_data {
293 struct esp_cmd_entry *non_tagged_cmd;
294 int num_tagged;
295 int hold;
296 struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG];
297};
298
299struct esp_target_data {
300 /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which
301 * match the currently negotiated settings for this target. The SCSI
302 * protocol values are maintained in spi_{offset,period,wide}(starget).
303 */
304 u8 esp_period;
305 u8 esp_offset;
306 u8 esp_config3;
307
308 u8 flags;
309#define ESP_TGT_WIDE 0x01
310#define ESP_TGT_DISCONNECT 0x02
311#define ESP_TGT_NEGO_WIDE 0x04
312#define ESP_TGT_NEGO_SYNC 0x08
313#define ESP_TGT_CHECK_NEGO 0x40
314#define ESP_TGT_BROKEN 0x80
315
316 /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this
317 * device we will try to negotiate the following parameters.
318 */
319 u8 nego_goal_period;
320 u8 nego_goal_offset;
321 u8 nego_goal_width;
322 u8 nego_goal_tags;
323
324 struct scsi_target *starget;
325};
326
327struct esp_event_ent {
328 u8 type;
329#define ESP_EVENT_TYPE_EVENT 0x01
330#define ESP_EVENT_TYPE_CMD 0x02
331 u8 val;
332
333 u8 sreg;
334 u8 seqreg;
335 u8 sreg2;
336 u8 ireg;
337 u8 select_state;
338 u8 event;
339 u8 __pad;
340};
341
342struct esp;
343struct esp_driver_ops {
344 /* Read and write the ESP 8-bit registers. On some
345 * applications of the ESP chip the registers are at 4-byte
346 * instead of 1-byte intervals.
347 */
348 void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
349 u8 (*esp_read8)(struct esp *esp, unsigned long reg);
350
351 /* Map and unmap DMA memory. Eventually the driver will be
352 * converted to the generic DMA API as soon as SBUS is able to
353 * cope with that. At such time we can remove this.
354 */
355 dma_addr_t (*map_single)(struct esp *esp, void *buf,
356 size_t sz, int dir);
357 int (*map_sg)(struct esp *esp, struct scatterlist *sg,
358 int num_sg, int dir);
359 void (*unmap_single)(struct esp *esp, dma_addr_t addr,
360 size_t sz, int dir);
361 void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
362 int num_sg, int dir);
363
364 /* Return non-zero if there is an IRQ pending. Usually this
365 * status bit lives in the DMA controller sitting in front of
366 * the ESP. This has to be accurate or else the ESP interrupt
367 * handler will not run.
368 */
369 int (*irq_pending)(struct esp *esp);
370
371 /* Reset the DMA engine entirely. On return, ESP interrupts
372 * should be enabled. Often the interrupt enabling is
373 * controlled in the DMA engine.
374 */
375 void (*reset_dma)(struct esp *esp);
376
377 /* Drain any pending DMA in the DMA engine after a transfer.
378 * This is for writes to memory.
379 */
380 void (*dma_drain)(struct esp *esp);
381
382 /* Invalidate the DMA engine after a DMA transfer. */
383 void (*dma_invalidate)(struct esp *esp);
384
385 /* Setup an ESP command that will use a DMA transfer.
386 * The 'esp_count' specifies what transfer length should be
387 * programmed into the ESP transfer counter registers, whereas
388 * the 'dma_count' is the length that should be programmed into
389 * the DMA controller. Usually they are the same. If 'write'
390 * is non-zero, this transfer is a write into memory. 'cmd'
391 * holds the ESP command that should be issued by calling
392 * scsi_esp_cmd() at the appropriate time while programming
393 * the DMA hardware.
394 */
395 void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count,
396 u32 dma_count, int write, u8 cmd);
397
398 /* Return non-zero if the DMA engine is reporting an error
399 * currently.
400 */
401 int (*dma_error)(struct esp *esp);
402};
403
404#define ESP_MAX_MSG_SZ 8
405#define ESP_EVENT_LOG_SZ 32
406
407#define ESP_QUICKIRQ_LIMIT 100
408#define ESP_RESELECT_TAG_LIMIT 2500
409
410struct esp {
411 void __iomem *regs;
412 void __iomem *dma_regs;
413
414 const struct esp_driver_ops *ops;
415
416 struct Scsi_Host *host;
417 void *dev;
418
419 struct esp_cmd_entry *active_cmd;
420
421 struct list_head queued_cmds;
422 struct list_head active_cmds;
423
424 u8 *command_block;
425 dma_addr_t command_block_dma;
426
427 unsigned int data_dma_len;
428
429 /* The following are used to determine the cause of an IRQ. Upon every
430 * IRQ entry we synchronize these with the hardware registers.
431 */
432 u8 sreg;
433 u8 seqreg;
434 u8 sreg2;
435 u8 ireg;
436
437 u32 prev_hme_dmacsr;
438 u8 prev_soff;
439 u8 prev_stp;
440 u8 prev_cfg3;
441 u8 __pad;
442
443 struct list_head esp_cmd_pool;
444
445 struct esp_target_data target[ESP_MAX_TARGET];
446
447 int fifo_cnt;
448 u8 fifo[16];
449
450 struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ];
451 int esp_event_cur;
452
453 u8 msg_out[ESP_MAX_MSG_SZ];
454 int msg_out_len;
455
456 u8 msg_in[ESP_MAX_MSG_SZ];
457 int msg_in_len;
458
459 u8 bursts;
460 u8 config1;
461 u8 config2;
462
463 u8 scsi_id;
464 u32 scsi_id_mask;
465
466 enum esp_rev rev;
467
468 u32 flags;
469#define ESP_FLAG_DIFFERENTIAL 0x00000001
470#define ESP_FLAG_RESETTING 0x00000002
471#define ESP_FLAG_DOING_SLOWCMD 0x00000004
472#define ESP_FLAG_WIDE_CAPABLE 0x00000008
473#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
474
475 u8 select_state;
476#define ESP_SELECT_NONE 0x00 /* Not selecting */
477#define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */
478#define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */
479
480 /* When we are not selecting, we are expecting an event. */
481 u8 event;
482#define ESP_EVENT_NONE 0x00
483#define ESP_EVENT_CMD_START 0x01
484#define ESP_EVENT_CMD_DONE 0x02
485#define ESP_EVENT_DATA_IN 0x03
486#define ESP_EVENT_DATA_OUT 0x04
487#define ESP_EVENT_DATA_DONE 0x05
488#define ESP_EVENT_MSGIN 0x06
489#define ESP_EVENT_MSGIN_MORE 0x07
490#define ESP_EVENT_MSGIN_DONE 0x08
491#define ESP_EVENT_MSGOUT 0x09
492#define ESP_EVENT_MSGOUT_DONE 0x0a
493#define ESP_EVENT_STATUS 0x0b
494#define ESP_EVENT_FREE_BUS 0x0c
495#define ESP_EVENT_CHECK_PHASE 0x0d
496#define ESP_EVENT_RESET 0x10
497
498 /* Probed in esp_get_clock_params() */
499 u32 cfact;
500 u32 cfreq;
501 u32 ccycle;
502 u32 ctick;
503 u32 neg_defp;
504 u32 sync_defp;
505
506 /* Computed in esp_reset_esp() */
507 u32 max_period;
508 u32 min_period;
509 u32 radelay;
510
511 /* Slow command state. */
512 u8 *cmd_bytes_ptr;
513 int cmd_bytes_left;
514
515 struct completion *eh_reset;
516
517 struct sbus_dma *dma;
518};
519
520#define host_to_esp(host) ((struct esp *)(host)->hostdata)
521
522/* A front-end driver for the ESP chip should do the following in
523 * it's device probe routine:
524 * 1) Allocate the host and private area using scsi_host_alloc()
525 * with size 'sizeof(struct esp)'. The first argument to
526 * scsi_host_alloc() should be &scsi_esp_template.
527 * 2) Set host->max_id as appropriate.
528 * 3) Set esp->host to the scsi_host itself, and esp->dev
529 * to the device object pointer.
530 * 4) Hook up esp->ops to the front-end implementation.
531 * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE
532 * in esp->flags.
533 * 6) Map the DMA and ESP chip registers.
534 * 7) DMA map the ESP command block, store the DMA address
535 * in esp->command_block_dma.
536 * 8) Register the scsi_esp_intr() interrupt handler.
537 * 9) Probe for and provide the following chip properties:
538 * esp->scsi_id (assign to esp->host->this_id too)
539 * esp->scsi_id_mask
540 * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL
541 * esp->cfreq
542 * DMA burst bit mask in esp->bursts, if necessary
543 * 10) Perform any actions necessary before the ESP device can
544 * be programmed for the first time. On some configs, for
545 * example, the DMA engine has to be reset before ESP can
546 * be programmed.
547 * 11) If necessary, call dev_set_drvdata() as needed.
548 * 12) Call scsi_esp_register() with prepared 'esp' structure
549 * and a device pointer if possible.
550 * 13) Check scsi_esp_register() return value, release all resources
551 * if an error was returned.
552 */
553extern struct scsi_host_template scsi_esp_template;
554extern int scsi_esp_register(struct esp *, struct device *);
555
556extern void scsi_esp_unregister(struct esp *);
557extern irqreturn_t scsi_esp_intr(int, void *);
558extern void scsi_esp_cmd(struct esp *, u8);
559
560#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 38c3a291efac..bd8e7f323c69 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -435,7 +435,7 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
435 struct class_device *cdev; 435 struct class_device *cdev;
436 struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p; 436 struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p;
437 437
438 down_read(&class->subsys.rwsem); 438 down(&class->sem);
439 list_for_each_entry(cdev, &class->children, node) { 439 list_for_each_entry(cdev, &class->children, node) {
440 p = class_to_shost(cdev); 440 p = class_to_shost(cdev);
441 if (p->host_no == hostnum) { 441 if (p->host_no == hostnum) {
@@ -443,7 +443,7 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
443 break; 443 break;
444 } 444 }
445 } 445 }
446 up_read(&class->subsys.rwsem); 446 up(&class->sem);
447 447
448 return shost; 448 return shost;
449} 449}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9b827ceec501..c4195ea869e9 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1281,7 +1281,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1281 (struct scatterlist *)Cmnd->request_buffer, 1281 (struct scatterlist *)Cmnd->request_buffer,
1282 Cmnd->use_sg, 1282 Cmnd->use_sg,
1283 Cmnd->sc_data_direction); 1283 Cmnd->sc_data_direction);
1284 } else { 1284 } else if (Cmnd->request_bufflen) {
1285 sbus_unmap_single(qpti->sdev, 1285 sbus_unmap_single(qpti->sdev,
1286 (__u32)((unsigned long)Cmnd->SCp.ptr), 1286 (__u32)((unsigned long)Cmnd->SCp.ptr),
1287 Cmnd->request_bufflen, 1287 Cmnd->request_bufflen,
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
1403 struct scsi_host_template *tpnt = match->data; 1403 struct scsi_host_template *tpnt = match->data;
1404 struct Scsi_Host *host; 1404 struct Scsi_Host *host;
1405 struct qlogicpti *qpti; 1405 struct qlogicpti *qpti;
1406 char *fcode; 1406 const char *fcode;
1407 1407
1408 /* Sometimes Antares cards come up not completely 1408 /* Sometimes Antares cards come up not completely
1409 * setup, and we get a report of a zero IRQ. 1409 * setup, and we get a report of a zero IRQ.
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 1b59b27e887f..4bf9aa547c78 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
50 while (skb->len >= NLMSG_SPACE(0)) { 50 while (skb->len >= NLMSG_SPACE(0)) {
51 err = 0; 51 err = 0;
52 52
53 nlh = (struct nlmsghdr *) skb->data; 53 nlh = nlmsg_hdr(skb);
54 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || 54 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
55 (skb->len < nlh->nlmsg_len)) { 55 (skb->len < nlh->nlmsg_len)) {
56 printk(KERN_WARNING "%s: discarding partial skb\n", 56 printk(KERN_WARNING "%s: discarding partial skb\n",
@@ -168,7 +168,8 @@ scsi_netlink_init(void)
168 } 168 }
169 169
170 scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, 170 scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT,
171 SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); 171 SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL,
172 THIS_MODULE);
172 if (!scsi_nl_sock) { 173 if (!scsi_nl_sock) {
173 printk(KERN_ERR "%s: register of recieve handler failed\n", 174 printk(KERN_ERR "%s: register of recieve handler failed\n",
174 __FUNCTION__); 175 __FUNCTION__);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ce0d14af33c8..aabaa0576ab4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len)
1081 struct nlmsghdr *nlh; 1081 struct nlmsghdr *nlh;
1082 struct iscsi_uevent *ev; 1082 struct iscsi_uevent *ev;
1083 1083
1084 nlh = (struct nlmsghdr *)skb->data; 1084 nlh = nlmsg_hdr(skb);
1085 if (nlh->nlmsg_len < sizeof(*nlh) || 1085 if (nlh->nlmsg_len < sizeof(*nlh) ||
1086 skb->len < nlh->nlmsg_len) { 1086 skb->len < nlh->nlmsg_len) {
1087 break; 1087 break;
@@ -1435,7 +1435,7 @@ static __init int iscsi_transport_init(void)
1435 if (err) 1435 if (err)
1436 goto unregister_conn_class; 1436 goto unregister_conn_class;
1437 1437
1438 nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, 1438 nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
1439 THIS_MODULE); 1439 THIS_MODULE);
1440 if (!nls) { 1440 if (!nls) {
1441 err = -ENOBUFS; 1441 err = -ENOBUFS;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
new file mode 100644
index 000000000000..8c766bcd1095
--- /dev/null
+++ b/drivers/scsi/sun_esp.c
@@ -0,0 +1,634 @@
1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/module.h>
9#include <linux/init.h>
10
11#include <asm/irq.h>
12#include <asm/io.h>
13#include <asm/dma.h>
14
15#include <asm/sbus.h>
16
17#include <scsi/scsi_host.h>
18
19#include "esp_scsi.h"
20
21#define DRV_MODULE_NAME "sun_esp"
22#define PFX DRV_MODULE_NAME ": "
23#define DRV_VERSION "1.000"
24#define DRV_MODULE_RELDATE "April 19, 2007"
25
26#define dma_read32(REG) \
27 sbus_readl(esp->dma_regs + (REG))
28#define dma_write32(VAL, REG) \
29 sbus_writel((VAL), esp->dma_regs + (REG))
30
31static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev)
32{
33 struct sbus_dev *sdev = esp->dev;
34 struct sbus_dma *dma;
35
36 if (dma_sdev != NULL) {
37 for_each_dvma(dma) {
38 if (dma->sdev == dma_sdev)
39 break;
40 }
41 } else {
42 for_each_dvma(dma) {
43 if (dma->sdev == NULL)
44 break;
45
46 /* If bus + slot are the same and it has the
47 * correct OBP name, it's ours.
48 */
49 if (sdev->bus == dma->sdev->bus &&
50 sdev->slot == dma->sdev->slot &&
51 (!strcmp(dma->sdev->prom_name, "dma") ||
52 !strcmp(dma->sdev->prom_name, "espdma")))
53 break;
54 }
55 }
56
57 if (dma == NULL) {
58 printk(KERN_ERR PFX "[%s] Cannot find dma.\n",
59 sdev->ofdev.node->full_name);
60 return -ENODEV;
61 }
62 esp->dma = dma;
63 esp->dma_regs = dma->regs;
64
65 return 0;
66
67}
68
69static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
70{
71 struct sbus_dev *sdev = esp->dev;
72 struct resource *res;
73
74 /* On HME, two reg sets exist, first is DVMA,
75 * second is ESP registers.
76 */
77 if (hme)
78 res = &sdev->resource[1];
79 else
80 res = &sdev->resource[0];
81
82 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
83 if (!esp->regs)
84 return -ENOMEM;
85
86 return 0;
87}
88
89static int __devinit esp_sbus_map_command_block(struct esp *esp)
90{
91 struct sbus_dev *sdev = esp->dev;
92
93 esp->command_block = sbus_alloc_consistent(sdev, 16,
94 &esp->command_block_dma);
95 if (!esp->command_block)
96 return -ENOMEM;
97 return 0;
98}
99
100static int __devinit esp_sbus_register_irq(struct esp *esp)
101{
102 struct Scsi_Host *host = esp->host;
103 struct sbus_dev *sdev = esp->dev;
104
105 host->irq = sdev->irqs[0];
106 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
107}
108
109static void __devinit esp_get_scsi_id(struct esp *esp)
110{
111 struct sbus_dev *sdev = esp->dev;
112 struct device_node *dp = sdev->ofdev.node;
113
114 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
115 if (esp->scsi_id != 0xff)
116 goto done;
117
118 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
119 if (esp->scsi_id != 0xff)
120 goto done;
121
122 if (!sdev->bus) {
123 /* SUN4 */
124 esp->scsi_id = 7;
125 goto done;
126 }
127
128 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
129 "scsi-initiator-id", 7);
130
131done:
132 esp->host->this_id = esp->scsi_id;
133 esp->scsi_id_mask = (1 << esp->scsi_id);
134}
135
136static void __devinit esp_get_differential(struct esp *esp)
137{
138 struct sbus_dev *sdev = esp->dev;
139 struct device_node *dp = sdev->ofdev.node;
140
141 if (of_find_property(dp, "differential", NULL))
142 esp->flags |= ESP_FLAG_DIFFERENTIAL;
143 else
144 esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
145}
146
147static void __devinit esp_get_clock_params(struct esp *esp)
148{
149 struct sbus_dev *sdev = esp->dev;
150 struct device_node *dp = sdev->ofdev.node;
151 struct device_node *bus_dp;
152 int fmhz;
153
154 bus_dp = NULL;
155 if (sdev != NULL && sdev->bus != NULL)
156 bus_dp = sdev->bus->ofdev.node;
157
158 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
159 if (fmhz == 0)
160 fmhz = (!bus_dp) ? 0 :
161 of_getintprop_default(bus_dp, "clock-frequency", 0);
162
163 esp->cfreq = fmhz;
164}
165
166static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
167{
168 struct sbus_dev *sdev = esp->dev;
169 struct device_node *dp = sdev->ofdev.node;
170 u8 bursts;
171
172 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
173 if (dma) {
174 struct device_node *dma_dp = dma->ofdev.node;
175 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
176 if (val != 0xff)
177 bursts &= val;
178 }
179
180 if (sdev->bus) {
181 u8 val = of_getintprop_default(sdev->bus->ofdev.node,
182 "burst-sizes", 0xff);
183 if (val != 0xff)
184 bursts &= val;
185 }
186
187 if (bursts == 0xff ||
188 (bursts & DMA_BURST16) == 0 ||
189 (bursts & DMA_BURST32) == 0)
190 bursts = (DMA_BURST32 - 1);
191
192 esp->bursts = bursts;
193}
194
195static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma)
196{
197 esp_get_scsi_id(esp);
198 esp_get_differential(esp);
199 esp_get_clock_params(esp);
200 esp_get_bursts(esp, espdma);
201}
202
203static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
204{
205 sbus_writeb(val, esp->regs + (reg * 4UL));
206}
207
208static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
209{
210 return sbus_readb(esp->regs + (reg * 4UL));
211}
212
213static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
214 size_t sz, int dir)
215{
216 return sbus_map_single(esp->dev, buf, sz, dir);
217}
218
219static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
220 int num_sg, int dir)
221{
222 return sbus_map_sg(esp->dev, sg, num_sg, dir);
223}
224
225static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
226 size_t sz, int dir)
227{
228 sbus_unmap_single(esp->dev, addr, sz, dir);
229}
230
231static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
232 int num_sg, int dir)
233{
234 sbus_unmap_sg(esp->dev, sg, num_sg, dir);
235}
236
237static int sbus_esp_irq_pending(struct esp *esp)
238{
239 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
240 return 1;
241 return 0;
242}
243
244static void sbus_esp_reset_dma(struct esp *esp)
245{
246 int can_do_burst16, can_do_burst32, can_do_burst64;
247 int can_do_sbus64, lim;
248 u32 val;
249
250 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
251 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
252 can_do_burst64 = 0;
253 can_do_sbus64 = 0;
254 if (sbus_can_dma_64bit(esp->dev))
255 can_do_sbus64 = 1;
256 if (sbus_can_burst64(esp->sdev))
257 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
258
259 /* Put the DVMA into a known state. */
260 if (esp->dma->revision != dvmahme) {
261 val = dma_read32(DMA_CSR);
262 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
263 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
264 }
265 switch (esp->dma->revision) {
266 case dvmahme:
267 dma_write32(DMA_RESET_FAS366, DMA_CSR);
268 dma_write32(DMA_RST_SCSI, DMA_CSR);
269
270 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
271 DMA_SCSI_DISAB | DMA_INT_ENAB);
272
273 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
274 DMA_BRST_SZ);
275
276 if (can_do_burst64)
277 esp->prev_hme_dmacsr |= DMA_BRST64;
278 else if (can_do_burst32)
279 esp->prev_hme_dmacsr |= DMA_BRST32;
280
281 if (can_do_sbus64) {
282 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
283 sbus_set_sbus64(esp->dev, esp->bursts);
284 }
285
286 lim = 1000;
287 while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
288 if (--lim == 0) {
289 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
290 "will not clear!\n",
291 esp->host->unique_id);
292 break;
293 }
294 udelay(1);
295 }
296
297 dma_write32(0, DMA_CSR);
298 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
299
300 dma_write32(0, DMA_ADDR);
301 break;
302
303 case dvmarev2:
304 if (esp->rev != ESP100) {
305 val = dma_read32(DMA_CSR);
306 dma_write32(val | DMA_3CLKS, DMA_CSR);
307 }
308 break;
309
310 case dvmarev3:
311 val = dma_read32(DMA_CSR);
312 val &= ~DMA_3CLKS;
313 val |= DMA_2CLKS;
314 if (can_do_burst32) {
315 val &= ~DMA_BRST_SZ;
316 val |= DMA_BRST32;
317 }
318 dma_write32(val, DMA_CSR);
319 break;
320
321 case dvmaesc1:
322 val = dma_read32(DMA_CSR);
323 val |= DMA_ADD_ENABLE;
324 val &= ~DMA_BCNT_ENAB;
325 if (!can_do_burst32 && can_do_burst16) {
326 val |= DMA_ESC_BURST;
327 } else {
328 val &= ~(DMA_ESC_BURST);
329 }
330 dma_write32(val, DMA_CSR);
331 break;
332
333 default:
334 break;
335 }
336
337 /* Enable interrupts. */
338 val = dma_read32(DMA_CSR);
339 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
340}
341
342static void sbus_esp_dma_drain(struct esp *esp)
343{
344 u32 csr;
345 int lim;
346
347 if (esp->dma->revision == dvmahme)
348 return;
349
350 csr = dma_read32(DMA_CSR);
351 if (!(csr & DMA_FIFO_ISDRAIN))
352 return;
353
354 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1)
355 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
356
357 lim = 1000;
358 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
359 if (--lim == 0) {
360 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
361 esp->host->unique_id);
362 break;
363 }
364 udelay(1);
365 }
366}
367
368static void sbus_esp_dma_invalidate(struct esp *esp)
369{
370 if (esp->dma->revision == dvmahme) {
371 dma_write32(DMA_RST_SCSI, DMA_CSR);
372
373 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
374 (DMA_PARITY_OFF | DMA_2CLKS |
375 DMA_SCSI_DISAB | DMA_INT_ENAB)) &
376 ~(DMA_ST_WRITE | DMA_ENABLE));
377
378 dma_write32(0, DMA_CSR);
379 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
380
381 /* This is necessary to avoid having the SCSI channel
382 * engine lock up on us.
383 */
384 dma_write32(0, DMA_ADDR);
385 } else {
386 u32 val;
387 int lim;
388
389 lim = 1000;
390 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
391 if (--lim == 0) {
392 printk(KERN_ALERT PFX "esp%d: DMA will not "
393 "invalidate!\n", esp->host->unique_id);
394 break;
395 }
396 udelay(1);
397 }
398
399 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
400 val |= DMA_FIFO_INV;
401 dma_write32(val, DMA_CSR);
402 val &= ~DMA_FIFO_INV;
403 dma_write32(val, DMA_CSR);
404 }
405}
406
407static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
408 u32 dma_count, int write, u8 cmd)
409{
410 u32 csr;
411
412 BUG_ON(!(cmd & ESP_CMD_DMA));
413
414 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
415 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
416 if (esp->rev == FASHME) {
417 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
418 sbus_esp_write8(esp, 0, FAS_RHI);
419
420 scsi_esp_cmd(esp, cmd);
421
422 csr = esp->prev_hme_dmacsr;
423 csr |= DMA_SCSI_DISAB | DMA_ENABLE;
424 if (write)
425 csr |= DMA_ST_WRITE;
426 else
427 csr &= ~DMA_ST_WRITE;
428 esp->prev_hme_dmacsr = csr;
429
430 dma_write32(dma_count, DMA_COUNT);
431 dma_write32(addr, DMA_ADDR);
432 dma_write32(csr, DMA_CSR);
433 } else {
434 csr = dma_read32(DMA_CSR);
435 csr |= DMA_ENABLE;
436 if (write)
437 csr |= DMA_ST_WRITE;
438 else
439 csr &= ~DMA_ST_WRITE;
440 dma_write32(csr, DMA_CSR);
441 if (esp->dma->revision == dvmaesc1) {
442 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
443 dma_write32(end - addr, DMA_COUNT);
444 }
445 dma_write32(addr, DMA_ADDR);
446
447 scsi_esp_cmd(esp, cmd);
448 }
449
450}
451
452static int sbus_esp_dma_error(struct esp *esp)
453{
454 u32 csr = dma_read32(DMA_CSR);
455
456 if (csr & DMA_HNDL_ERROR)
457 return 1;
458
459 return 0;
460}
461
462static const struct esp_driver_ops sbus_esp_ops = {
463 .esp_write8 = sbus_esp_write8,
464 .esp_read8 = sbus_esp_read8,
465 .map_single = sbus_esp_map_single,
466 .map_sg = sbus_esp_map_sg,
467 .unmap_single = sbus_esp_unmap_single,
468 .unmap_sg = sbus_esp_unmap_sg,
469 .irq_pending = sbus_esp_irq_pending,
470 .reset_dma = sbus_esp_reset_dma,
471 .dma_drain = sbus_esp_dma_drain,
472 .dma_invalidate = sbus_esp_dma_invalidate,
473 .send_dma_cmd = sbus_esp_send_dma_cmd,
474 .dma_error = sbus_esp_dma_error,
475};
476
477static int __devinit esp_sbus_probe_one(struct device *dev,
478 struct sbus_dev *esp_dev,
479 struct sbus_dev *espdma,
480 struct sbus_bus *sbus,
481 int hme)
482{
483 struct scsi_host_template *tpnt = &scsi_esp_template;
484 struct Scsi_Host *host;
485 struct esp *esp;
486 int err;
487
488 host = scsi_host_alloc(tpnt, sizeof(struct esp));
489
490 err = -ENOMEM;
491 if (!host)
492 goto fail;
493
494 host->max_id = (hme ? 16 : 8);
495 esp = host_to_esp(host);
496
497 esp->host = host;
498 esp->dev = esp_dev;
499 esp->ops = &sbus_esp_ops;
500
501 if (hme)
502 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
503
504 err = esp_sbus_find_dma(esp, espdma);
505 if (err < 0)
506 goto fail_unlink;
507
508 err = esp_sbus_map_regs(esp, hme);
509 if (err < 0)
510 goto fail_unlink;
511
512 err = esp_sbus_map_command_block(esp);
513 if (err < 0)
514 goto fail_unmap_regs;
515
516 err = esp_sbus_register_irq(esp);
517 if (err < 0)
518 goto fail_unmap_command_block;
519
520 esp_sbus_get_props(esp, espdma);
521
522 /* Before we try to touch the ESP chip, ESC1 dma can
523 * come up with the reset bit set, so make sure that
524 * is clear first.
525 */
526 if (esp->dma->revision == dvmaesc1) {
527 u32 val = dma_read32(DMA_CSR);
528
529 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
530 }
531
532 dev_set_drvdata(&esp_dev->ofdev.dev, esp);
533
534 err = scsi_esp_register(esp, dev);
535 if (err)
536 goto fail_free_irq;
537
538 return 0;
539
540fail_free_irq:
541 free_irq(host->irq, esp);
542fail_unmap_command_block:
543 sbus_free_consistent(esp->dev, 16,
544 esp->command_block,
545 esp->command_block_dma);
546fail_unmap_regs:
547 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
548fail_unlink:
549 scsi_host_put(host);
550fail:
551 return err;
552}
553
554static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
555{
556 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
557 struct device_node *dp = dev->node;
558 struct sbus_dev *dma_sdev = NULL;
559 int hme = 0;
560
561 if (dp->parent &&
562 (!strcmp(dp->parent->name, "espdma") ||
563 !strcmp(dp->parent->name, "dma")))
564 dma_sdev = sdev->parent;
565 else if (!strcmp(dp->name, "SUNW,fas")) {
566 dma_sdev = sdev;
567 hme = 1;
568 }
569
570 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev,
571 sdev->bus, hme);
572}
573
574static int __devexit esp_sbus_remove(struct of_device *dev)
575{
576 struct esp *esp = dev_get_drvdata(&dev->dev);
577 unsigned int irq = esp->host->irq;
578 u32 val;
579
580 scsi_esp_unregister(esp);
581
582 /* Disable interrupts. */
583 val = dma_read32(DMA_CSR);
584 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
585
586 free_irq(irq, esp);
587 sbus_free_consistent(esp->dev, 16,
588 esp->command_block,
589 esp->command_block_dma);
590 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
591
592 scsi_host_put(esp->host);
593
594 return 0;
595}
596
597static struct of_device_id esp_match[] = {
598 {
599 .name = "SUNW,esp",
600 },
601 {
602 .name = "SUNW,fas",
603 },
604 {
605 .name = "esp",
606 },
607 {},
608};
609MODULE_DEVICE_TABLE(of, esp_match);
610
611static struct of_platform_driver esp_sbus_driver = {
612 .name = "esp",
613 .match_table = esp_match,
614 .probe = esp_sbus_probe,
615 .remove = __devexit_p(esp_sbus_remove),
616};
617
618static int __init sunesp_init(void)
619{
620 return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
621}
622
623static void __exit sunesp_exit(void)
624{
625 of_unregister_driver(&esp_sbus_driver);
626}
627
628MODULE_DESCRIPTION("Sun ESP SCSI driver");
629MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
630MODULE_LICENSE("GPL");
631MODULE_VERSION(DRV_VERSION);
632
633module_init(sunesp_init);
634module_exit(sunesp_exit);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c129a0e8e807..90621c3312bc 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1310,7 +1310,8 @@ static unsigned int check_modem_status(struct uart_8250_port *up)
1310{ 1310{
1311 unsigned int status = serial_in(up, UART_MSR); 1311 unsigned int status = serial_in(up, UART_MSR);
1312 1312
1313 if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI) { 1313 if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
1314 up->port.info != NULL) {
1314 if (status & UART_MSR_TERI) 1315 if (status & UART_MSR_TERI)
1315 up->port.icount.rng++; 1316 up->port.icount.rng++;
1316 if (status & UART_MSR_DDSR) 1317 if (status & UART_MSR_DDSR)
@@ -1333,8 +1334,9 @@ static inline void
1333serial8250_handle_port(struct uart_8250_port *up) 1334serial8250_handle_port(struct uart_8250_port *up)
1334{ 1335{
1335 unsigned int status; 1336 unsigned int status;
1337 unsigned long flags;
1336 1338
1337 spin_lock(&up->port.lock); 1339 spin_lock_irqsave(&up->port.lock, flags);
1338 1340
1339 status = serial_inp(up, UART_LSR); 1341 status = serial_inp(up, UART_LSR);
1340 1342
@@ -1346,7 +1348,7 @@ serial8250_handle_port(struct uart_8250_port *up)
1346 if (status & UART_LSR_THRE) 1348 if (status & UART_LSR_THRE)
1347 transmit_chars(up); 1349 transmit_chars(up);
1348 1350
1349 spin_unlock(&up->port.lock); 1351 spin_unlock_irqrestore(&up->port.lock, flags);
1350} 1352}
1351 1353
1352/* 1354/*
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 41431d0d5512..246c5572667b 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -164,7 +164,7 @@ static void free_port_memory(struct icom_port *icom_port)
164 } 164 }
165} 165}
166 166
167static int __init get_port_memory(struct icom_port *icom_port) 167static int __devinit get_port_memory(struct icom_port *icom_port)
168{ 168{
169 int index; 169 int index;
170 unsigned long stgAddr; 170 unsigned long stgAddr;
@@ -1380,7 +1380,7 @@ static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *i
1380 0x8024 + 2 - 2 * (icom_port->port - 2); 1380 0x8024 + 2 - 2 * (icom_port->port - 2);
1381 } 1381 }
1382} 1382}
1383static int __init icom_load_ports(struct icom_adapter *icom_adapter) 1383static int __devinit icom_load_ports(struct icom_adapter *icom_adapter)
1384{ 1384{
1385 struct icom_port *icom_port; 1385 struct icom_port *icom_port;
1386 int port_num; 1386 int port_num;
@@ -1473,7 +1473,7 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
1473 } 1473 }
1474 } 1474 }
1475 1475
1476 free_irq(icom_adapter->irq_number, (void *) icom_adapter); 1476 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
1477 iounmap(icom_adapter->base_addr); 1477 iounmap(icom_adapter->base_addr);
1478 icom_free_adapter(icom_adapter); 1478 icom_free_adapter(icom_adapter);
1479 pci_release_regions(icom_adapter->pci_dev); 1479 pci_release_regions(icom_adapter->pci_dev);
@@ -1539,7 +1539,6 @@ static int __devinit icom_probe(struct pci_dev *dev,
1539 } 1539 }
1540 1540
1541 icom_adapter->base_addr_pci = pci_resource_start(dev, 0); 1541 icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
1542 icom_adapter->irq_number = dev->irq;
1543 icom_adapter->pci_dev = dev; 1542 icom_adapter->pci_dev = dev;
1544 icom_adapter->version = ent->driver_data; 1543 icom_adapter->version = ent->driver_data;
1545 icom_adapter->subsystem_id = ent->subdevice; 1544 icom_adapter->subsystem_id = ent->subdevice;
@@ -1570,7 +1569,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1570 icom_port = &icom_adapter->port_info[index]; 1569 icom_port = &icom_adapter->port_info[index];
1571 1570
1572 if (icom_port->status == ICOM_PORT_ACTIVE) { 1571 if (icom_port->status == ICOM_PORT_ACTIVE) {
1573 icom_port->uart_port.irq = icom_port->adapter->irq_number; 1572 icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq;
1574 icom_port->uart_port.type = PORT_ICOM; 1573 icom_port->uart_port.type = PORT_ICOM;
1575 icom_port->uart_port.iotype = UPIO_MEM; 1574 icom_port->uart_port.iotype = UPIO_MEM;
1576 icom_port->uart_port.membase = 1575 icom_port->uart_port.membase =
diff --git a/drivers/serial/icom.h b/drivers/serial/icom.h
index 798f1ef23712..e8578d8cd35e 100644
--- a/drivers/serial/icom.h
+++ b/drivers/serial/icom.h
@@ -258,7 +258,6 @@ struct icom_port {
258struct icom_adapter { 258struct icom_adapter {
259 void __iomem * base_addr; 259 void __iomem * base_addr;
260 unsigned long base_addr_pci; 260 unsigned long base_addr_pci;
261 unsigned char irq_number;
262 struct pci_dev *pci_dev; 261 struct pci_dev *pci_dev;
263 struct icom_port port_info[4]; 262 struct icom_port port_info[4];
264 int index; 263 int index;
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 96a852aa1903..bfd44177a215 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1387,8 +1387,8 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
1387 struct device_node *ap = of_find_node_by_path("/aliases"); 1387 struct device_node *ap = of_find_node_by_path("/aliases");
1388 1388
1389 if (ap) { 1389 if (ap) {
1390 char *keyb = of_get_property(ap, "keyboard", NULL); 1390 const char *keyb = of_get_property(ap, "keyboard", NULL);
1391 char *ms = of_get_property(ap, "mouse", NULL); 1391 const char *ms = of_get_property(ap, "mouse", NULL);
1392 1392
1393 if (keyb) { 1393 if (keyb) {
1394 if (dp == of_find_node_by_path(keyb)) 1394 if (dp == of_find_node_by_path(keyb))
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 220abce63e4a..b10211c420ef 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -77,7 +77,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
77 77
78 switch (value) { 78 switch (value) {
79 case BITBANG_CS_INACTIVE: 79 case BITBANG_CS_INACTIVE:
80 hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol^1); 80 hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
81 break; 81 break;
82 82
83 case BITBANG_CS_ACTIVE: 83 case BITBANG_CS_ACTIVE:
@@ -98,7 +98,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
98 /* write new configration */ 98 /* write new configration */
99 99
100 writeb(spcon, hw->regs + S3C2410_SPCON); 100 writeb(spcon, hw->regs + S3C2410_SPCON);
101 hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol); 101 hw->set_cs(hw->pdata, spi->chip_select, cspol);
102 102
103 break; 103 break;
104 } 104 }
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 8b7ff467d262..c1b0affae290 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_USB_OHCI_HCD) += host/
15obj-$(CONFIG_USB_UHCI_HCD) += host/ 15obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_SL811_HCD) += host/ 16obj-$(CONFIG_USB_SL811_HCD) += host/
17obj-$(CONFIG_USB_U132_HCD) += host/ 17obj-$(CONFIG_USB_U132_HCD) += host/
18obj-$(CONFIG_ETRAX_USB_HOST) += host/
19obj-$(CONFIG_USB_OHCI_AT91) += host/ 18obj-$(CONFIG_USB_OHCI_AT91) += host/
20 19
21obj-$(CONFIG_USB_ACM) += class/ 20obj-$(CONFIG_USB_ACM) += class/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 3dfa3e40e148..30b7bfbc985a 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan 5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) 6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
7 * Copyright (C) 2007 Simon Arlott
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 10 * under the terms of the GNU General Public License as published by the Free
@@ -34,14 +35,14 @@
34#include <linux/errno.h> 35#include <linux/errno.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/device.h> /* FIXME: linux/firmware.h should include it itself */ 38#include <linux/device.h>
38#include <linux/firmware.h> 39#include <linux/firmware.h>
39#include <linux/mutex.h> 40#include <linux/mutex.h>
40 41
41#include "usbatm.h" 42#include "usbatm.h"
42 43
43#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands" 44#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
44#define DRIVER_VERSION "0.2" 45#define DRIVER_VERSION "0.3"
45#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" 46#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
46 47
47static const char cxacru_driver_name[] = "cxacru"; 48static const char cxacru_driver_name[] = "cxacru";
@@ -64,7 +65,7 @@ static const char cxacru_driver_name[] = "cxacru";
64#define SDRAM_ENA 0x1 65#define SDRAM_ENA 0x1
65 66
66#define CMD_TIMEOUT 2000 /* msecs */ 67#define CMD_TIMEOUT 2000 /* msecs */
67#define POLL_INTERVAL 5000 /* msecs */ 68#define POLL_INTERVAL 1 /* secs */
68 69
69/* commands for interaction with the modem through the control channel before 70/* commands for interaction with the modem through the control channel before
70 * firmware is loaded */ 71 * firmware is loaded */
@@ -146,6 +147,13 @@ enum cxacru_info_idx {
146 CXINF_MAX = 0x1c, 147 CXINF_MAX = 0x1c,
147}; 148};
148 149
150enum cxacru_poll_state {
151 CXPOLL_STOPPING,
152 CXPOLL_STOPPED,
153 CXPOLL_POLLING,
154 CXPOLL_SHUTDOWN
155};
156
149struct cxacru_modem_type { 157struct cxacru_modem_type {
150 u32 pll_f_clk; 158 u32 pll_f_clk;
151 u32 pll_b_clk; 159 u32 pll_b_clk;
@@ -158,7 +166,12 @@ struct cxacru_data {
158 const struct cxacru_modem_type *modem_type; 166 const struct cxacru_modem_type *modem_type;
159 167
160 int line_status; 168 int line_status;
169 struct mutex adsl_state_serialize;
170 int adsl_status;
161 struct delayed_work poll_work; 171 struct delayed_work poll_work;
172 u32 card_info[CXINF_MAX];
173 struct mutex poll_state_serialize;
174 int poll_state;
162 175
163 /* contol handles */ 176 /* contol handles */
164 struct mutex cm_serialize; 177 struct mutex cm_serialize;
@@ -170,6 +183,275 @@ struct cxacru_data {
170 struct completion snd_done; 183 struct completion snd_done;
171}; 184};
172 185
186static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
187 u8 *wdata, int wsize, u8 *rdata, int rsize);
188static void cxacru_poll_status(struct work_struct *work);
189
190/* Card info exported through sysfs */
191#define CXACRU__ATTR_INIT(_name) \
192static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
193
194#define CXACRU_CMD_INIT(_name) \
195static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
196 cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
197
198#define CXACRU_ATTR_INIT(_value, _type, _name) \
199static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
200 struct device_attribute *attr, char *buf) \
201{ \
202 struct usb_interface *intf = to_usb_interface(dev); \
203 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \
204 struct cxacru_data *instance = usbatm_instance->driver_data; \
205 return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
206} \
207CXACRU__ATTR_INIT(_name)
208
209#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
210#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
211#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
212
213#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
214#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
215#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
216
217static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
218{
219 return snprintf(buf, PAGE_SIZE, "%u\n", value);
220}
221
222static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf)
223{
224 return snprintf(buf, PAGE_SIZE, "%d\n", value);
225}
226
227static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
228{
229 if (unlikely(value < 0)) {
230 return snprintf(buf, PAGE_SIZE, "%d.%02u\n",
231 value / 100, -value % 100);
232 } else {
233 return snprintf(buf, PAGE_SIZE, "%d.%02u\n",
234 value / 100, value % 100);
235 }
236}
237
238static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
239{
240 switch (value) {
241 case 0: return snprintf(buf, PAGE_SIZE, "no\n");
242 case 1: return snprintf(buf, PAGE_SIZE, "yes\n");
243 default: return 0;
244 }
245}
246
247static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
248{
249 switch (value) {
250 case 1: return snprintf(buf, PAGE_SIZE, "not connected\n");
251 case 2: return snprintf(buf, PAGE_SIZE, "connected\n");
252 case 3: return snprintf(buf, PAGE_SIZE, "lost\n");
253 default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
254 }
255}
256
257static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
258{
259 switch (value) {
260 case 0: return snprintf(buf, PAGE_SIZE, "down\n");
261 case 1: return snprintf(buf, PAGE_SIZE, "attempting to activate\n");
262 case 2: return snprintf(buf, PAGE_SIZE, "training\n");
263 case 3: return snprintf(buf, PAGE_SIZE, "channel analysis\n");
264 case 4: return snprintf(buf, PAGE_SIZE, "exchange\n");
265 case 5: return snprintf(buf, PAGE_SIZE, "up\n");
266 case 6: return snprintf(buf, PAGE_SIZE, "waiting\n");
267 case 7: return snprintf(buf, PAGE_SIZE, "initialising\n");
268 default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
269 }
270}
271
272static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
273{
274 switch (value) {
275 case 0: return 0;
276 case 1: return snprintf(buf, PAGE_SIZE, "ANSI T1.413\n");
277 case 2: return snprintf(buf, PAGE_SIZE, "ITU-T G.992.1 (G.DMT)\n");
278 case 3: return snprintf(buf, PAGE_SIZE, "ITU-T G.992.2 (G.LITE)\n");
279 default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
280 }
281}
282
283/*
284 * This could use MAC_ADDRESS_HIGH and MAC_ADDRESS_LOW, but since
285 * this data is already in atm_dev there's no point.
286 *
287 * MAC_ADDRESS_HIGH = 0x????5544
288 * MAC_ADDRESS_LOW = 0x33221100
289 * Where 00-55 are bytes 0-5 of the MAC.
290 */
291static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct usb_interface *intf = to_usb_interface(dev);
295 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
296 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
297
298 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
299 atm_dev->esi[0], atm_dev->esi[1], atm_dev->esi[2],
300 atm_dev->esi[3], atm_dev->esi[4], atm_dev->esi[5]);
301}
302
303static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct usb_interface *intf = to_usb_interface(dev);
307 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
308 struct cxacru_data *instance = usbatm_instance->driver_data;
309 u32 value = instance->card_info[CXINF_LINE_STARTABLE];
310
311 switch (value) {
312 case 0: return snprintf(buf, PAGE_SIZE, "running\n");
313 case 1: return snprintf(buf, PAGE_SIZE, "stopped\n");
314 default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
315 }
316}
317
318static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
319 struct device_attribute *attr, const char *buf, size_t count)
320{
321 struct usb_interface *intf = to_usb_interface(dev);
322 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
323 struct cxacru_data *instance = usbatm_instance->driver_data;
324 int ret;
325 int poll = -1;
326 char str_cmd[8];
327 int len = strlen(buf);
328
329 if (!capable(CAP_NET_ADMIN))
330 return -EACCES;
331
332 ret = sscanf(buf, "%7s", str_cmd);
333 if (ret != 1)
334 return -EINVAL;
335 ret = 0;
336
337 if (mutex_lock_interruptible(&instance->adsl_state_serialize))
338 return -ERESTARTSYS;
339
340 if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
341 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
342 if (ret < 0) {
343 atm_err(usbatm_instance, "change adsl state:"
344 " CHIP_ADSL_LINE_STOP returned %d\n", ret);
345
346 ret = -EIO;
347 } else {
348 ret = len;
349 poll = CXPOLL_STOPPED;
350 }
351 }
352
353 /* Line status is only updated every second
354 * and the device appears to only react to
355 * START/STOP every second too. Wait 1.5s to
356 * be sure that restart will have an effect. */
357 if (!strcmp(str_cmd, "restart"))
358 msleep(1500);
359
360 if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
361 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
362 if (ret < 0) {
363 atm_err(usbatm_instance, "change adsl state:"
364 " CHIP_ADSL_LINE_START returned %d\n", ret);
365
366 ret = -EIO;
367 } else {
368 ret = len;
369 poll = CXPOLL_POLLING;
370 }
371 }
372
373 if (!strcmp(str_cmd, "poll")) {
374 ret = len;
375 poll = CXPOLL_POLLING;
376 }
377
378 if (ret == 0) {
379 ret = -EINVAL;
380 poll = -1;
381 }
382
383 if (poll == CXPOLL_POLLING) {
384 mutex_lock(&instance->poll_state_serialize);
385 switch (instance->poll_state) {
386 case CXPOLL_STOPPED:
387 /* start polling */
388 instance->poll_state = CXPOLL_POLLING;
389 break;
390
391 case CXPOLL_STOPPING:
392 /* abort stop request */
393 instance->poll_state = CXPOLL_POLLING;
394 case CXPOLL_POLLING:
395 case CXPOLL_SHUTDOWN:
396 /* don't start polling */
397 poll = -1;
398 }
399 mutex_unlock(&instance->poll_state_serialize);
400 } else if (poll == CXPOLL_STOPPED) {
401 mutex_lock(&instance->poll_state_serialize);
402 /* request stop */
403 if (instance->poll_state == CXPOLL_POLLING)
404 instance->poll_state = CXPOLL_STOPPING;
405 mutex_unlock(&instance->poll_state_serialize);
406 }
407
408 mutex_unlock(&instance->adsl_state_serialize);
409
410 if (poll == CXPOLL_POLLING)
411 cxacru_poll_status(&instance->poll_work.work);
412
413 return ret;
414}
415
416/*
417 * All device attributes are included in CXACRU_ALL_FILES
418 * so that the same list can be used multiple times:
419 * INIT (define the device attributes)
420 * CREATE (create all the device files)
421 * REMOVE (remove all the device files)
422 *
423 * With the last two being defined as needed in the functions
424 * they are used in before calling CXACRU_ALL_FILES()
425 */
426#define CXACRU_ALL_FILES(_action) \
427CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_RATE, u32, downstream_rate); \
428CXACRU_ATTR_##_action(CXINF_UPSTREAM_RATE, u32, upstream_rate); \
429CXACRU_ATTR_##_action(CXINF_LINK_STATUS, LINK, link_status); \
430CXACRU_ATTR_##_action(CXINF_LINE_STATUS, LINE, line_status); \
431CXACRU__ATTR_##_action( mac_address); \
432CXACRU_ATTR_##_action(CXINF_UPSTREAM_SNR_MARGIN, dB, upstream_snr_margin); \
433CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_SNR_MARGIN, dB, downstream_snr_margin); \
434CXACRU_ATTR_##_action(CXINF_UPSTREAM_ATTENUATION, dB, upstream_attenuation); \
435CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_ATTENUATION, dB, downstream_attenuation); \
436CXACRU_ATTR_##_action(CXINF_TRANSMITTER_POWER, s8, transmitter_power); \
437CXACRU_ATTR_##_action(CXINF_UPSTREAM_BITS_PER_FRAME, u32, upstream_bits_per_frame); \
438CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_BITS_PER_FRAME, u32, downstream_bits_per_frame); \
439CXACRU_ATTR_##_action(CXINF_STARTUP_ATTEMPTS, u32, startup_attempts); \
440CXACRU_ATTR_##_action(CXINF_UPSTREAM_CRC_ERRORS, u32, upstream_crc_errors); \
441CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_CRC_ERRORS, u32, downstream_crc_errors); \
442CXACRU_ATTR_##_action(CXINF_UPSTREAM_FEC_ERRORS, u32, upstream_fec_errors); \
443CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_FEC_ERRORS, u32, downstream_fec_errors); \
444CXACRU_ATTR_##_action(CXINF_UPSTREAM_HEC_ERRORS, u32, upstream_hec_errors); \
445CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_HEC_ERRORS, u32, downstream_hec_errors); \
446CXACRU_ATTR_##_action(CXINF_LINE_STARTABLE, bool, line_startable); \
447CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
448CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
449CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
450CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
451CXACRU_CMD_##_action( adsl_state);
452
453CXACRU_ALL_FILES(INIT);
454
173/* the following three functions are stolen from drivers/usb/core/message.c */ 455/* the following three functions are stolen from drivers/usb/core/message.c */
174static void cxacru_blocking_completion(struct urb *urb) 456static void cxacru_blocking_completion(struct urb *urb)
175{ 457{
@@ -347,8 +629,6 @@ static int cxacru_card_status(struct cxacru_data *instance)
347 return 0; 629 return 0;
348} 630}
349 631
350static void cxacru_poll_status(struct work_struct *work);
351
352static int cxacru_atm_start(struct usbatm_data *usbatm_instance, 632static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
353 struct atm_dev *atm_dev) 633 struct atm_dev *atm_dev)
354{ 634{
@@ -357,6 +637,7 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
357 struct atm_dev *atm_dev = usbatm_instance->atm_dev; 637 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
358 */ 638 */
359 int ret; 639 int ret;
640 int start_polling = 1;
360 641
361 dbg("cxacru_atm_start"); 642 dbg("cxacru_atm_start");
362 643
@@ -369,14 +650,35 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
369 } 650 }
370 651
371 /* start ADSL */ 652 /* start ADSL */
653 mutex_lock(&instance->adsl_state_serialize);
372 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); 654 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
373 if (ret < 0) { 655 if (ret < 0) {
374 atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret); 656 atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret);
657 mutex_unlock(&instance->adsl_state_serialize);
375 return ret; 658 return ret;
376 } 659 }
377 660
378 /* Start status polling */ 661 /* Start status polling */
379 cxacru_poll_status(&instance->poll_work.work); 662 mutex_lock(&instance->poll_state_serialize);
663 switch (instance->poll_state) {
664 case CXPOLL_STOPPED:
665 /* start polling */
666 instance->poll_state = CXPOLL_POLLING;
667 break;
668
669 case CXPOLL_STOPPING:
670 /* abort stop request */
671 instance->poll_state = CXPOLL_POLLING;
672 case CXPOLL_POLLING:
673 case CXPOLL_SHUTDOWN:
674 /* don't start polling */
675 start_polling = 0;
676 }
677 mutex_unlock(&instance->poll_state_serialize);
678 mutex_unlock(&instance->adsl_state_serialize);
679
680 if (start_polling)
681 cxacru_poll_status(&instance->poll_work.work);
380 return 0; 682 return 0;
381} 683}
382 684
@@ -387,14 +689,46 @@ static void cxacru_poll_status(struct work_struct *work)
387 u32 buf[CXINF_MAX] = {}; 689 u32 buf[CXINF_MAX] = {};
388 struct usbatm_data *usbatm = instance->usbatm; 690 struct usbatm_data *usbatm = instance->usbatm;
389 struct atm_dev *atm_dev = usbatm->atm_dev; 691 struct atm_dev *atm_dev = usbatm->atm_dev;
692 int keep_polling = 1;
390 int ret; 693 int ret;
391 694
392 ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX); 695 ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX);
393 if (ret < 0) { 696 if (ret < 0) {
394 atm_warn(usbatm, "poll status: error %d\n", ret); 697 if (ret != -ESHUTDOWN)
698 atm_warn(usbatm, "poll status: error %d\n", ret);
699
700 mutex_lock(&instance->poll_state_serialize);
701 if (instance->poll_state != CXPOLL_SHUTDOWN) {
702 instance->poll_state = CXPOLL_STOPPED;
703
704 if (ret != -ESHUTDOWN)
705 atm_warn(usbatm, "polling disabled, set adsl_state"
706 " to 'start' or 'poll' to resume\n");
707 }
708 mutex_unlock(&instance->poll_state_serialize);
395 goto reschedule; 709 goto reschedule;
396 } 710 }
397 711
712 memcpy(instance->card_info, buf, sizeof(instance->card_info));
713
714 if (instance->adsl_status != buf[CXINF_LINE_STARTABLE]) {
715 instance->adsl_status = buf[CXINF_LINE_STARTABLE];
716
717 switch (instance->adsl_status) {
718 case 0:
719 atm_printk(KERN_INFO, usbatm, "ADSL state: running\n");
720 break;
721
722 case 1:
723 atm_printk(KERN_INFO, usbatm, "ADSL state: stopped\n");
724 break;
725
726 default:
727 atm_printk(KERN_INFO, usbatm, "Unknown adsl status %02x\n", instance->adsl_status);
728 break;
729 }
730 }
731
398 if (instance->line_status == buf[CXINF_LINE_STATUS]) 732 if (instance->line_status == buf[CXINF_LINE_STATUS])
399 goto reschedule; 733 goto reschedule;
400 734
@@ -449,7 +783,20 @@ static void cxacru_poll_status(struct work_struct *work)
449 break; 783 break;
450 } 784 }
451reschedule: 785reschedule:
452 schedule_delayed_work(&instance->poll_work, msecs_to_jiffies(POLL_INTERVAL)); 786
787 mutex_lock(&instance->poll_state_serialize);
788 if (instance->poll_state == CXPOLL_STOPPING &&
789 instance->adsl_status == 1 && /* stopped */
790 instance->line_status == 0) /* down */
791 instance->poll_state = CXPOLL_STOPPED;
792
793 if (instance->poll_state == CXPOLL_STOPPED)
794 keep_polling = 0;
795 mutex_unlock(&instance->poll_state_serialize);
796
797 if (keep_polling)
798 schedule_delayed_work(&instance->poll_work,
799 round_jiffies_relative(POLL_INTERVAL*HZ));
453} 800}
454 801
455static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw, 802static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
@@ -684,6 +1031,14 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
684 1031
685 instance->usbatm = usbatm_instance; 1032 instance->usbatm = usbatm_instance;
686 instance->modem_type = (struct cxacru_modem_type *) id->driver_info; 1033 instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
1034 memset(instance->card_info, 0, sizeof(instance->card_info));
1035
1036 mutex_init(&instance->poll_state_serialize);
1037 instance->poll_state = CXPOLL_STOPPED;
1038 instance->line_status = -1;
1039 instance->adsl_status = -1;
1040
1041 mutex_init(&instance->adsl_state_serialize);
687 1042
688 instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL); 1043 instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
689 if (!instance->rcv_buf) { 1044 if (!instance->rcv_buf) {
@@ -710,6 +1065,13 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
710 goto fail; 1065 goto fail;
711 } 1066 }
712 1067
1068 #define CXACRU_DEVICE_CREATE_FILE(_name) \
1069 ret = device_create_file(&intf->dev, &dev_attr_##_name); \
1070 if (unlikely(ret)) \
1071 goto fail_sysfs;
1072 CXACRU_ALL_FILES(CREATE);
1073 #undef CXACRU_DEVICE_CREATE_FILE
1074
713 usb_fill_int_urb(instance->rcv_urb, 1075 usb_fill_int_urb(instance->rcv_urb,
714 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), 1076 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
715 instance->rcv_buf, PAGE_SIZE, 1077 instance->rcv_buf, PAGE_SIZE,
@@ -730,6 +1092,14 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
730 1092
731 return 0; 1093 return 0;
732 1094
1095 fail_sysfs:
1096 dbg("cxacru_bind: device_create_file failed (%d)\n", ret);
1097
1098 #define CXACRU_DEVICE_REMOVE_FILE(_name) \
1099 device_remove_file(&intf->dev, &dev_attr_##_name);
1100 CXACRU_ALL_FILES(REMOVE);
1101 #undef CXACRU_DEVICE_REVOVE_FILE
1102
733 fail: 1103 fail:
734 free_page((unsigned long) instance->snd_buf); 1104 free_page((unsigned long) instance->snd_buf);
735 free_page((unsigned long) instance->rcv_buf); 1105 free_page((unsigned long) instance->rcv_buf);
@@ -744,6 +1114,7 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
744 struct usb_interface *intf) 1114 struct usb_interface *intf)
745{ 1115{
746 struct cxacru_data *instance = usbatm_instance->driver_data; 1116 struct cxacru_data *instance = usbatm_instance->driver_data;
1117 int is_polling = 1;
747 1118
748 dbg("cxacru_unbind entered"); 1119 dbg("cxacru_unbind entered");
749 1120
@@ -752,8 +1123,20 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
752 return; 1123 return;
753 } 1124 }
754 1125
755 while (!cancel_delayed_work(&instance->poll_work)) 1126 mutex_lock(&instance->poll_state_serialize);
756 flush_scheduled_work(); 1127 BUG_ON(instance->poll_state == CXPOLL_SHUTDOWN);
1128
1129 /* ensure that status polling continues unless
1130 * it has already stopped */
1131 if (instance->poll_state == CXPOLL_STOPPED)
1132 is_polling = 0;
1133
1134 /* stop polling from being stopped or started */
1135 instance->poll_state = CXPOLL_SHUTDOWN;
1136 mutex_unlock(&instance->poll_state_serialize);
1137
1138 if (is_polling)
1139 cancel_rearming_delayed_work(&instance->poll_work);
757 1140
758 usb_kill_urb(instance->snd_urb); 1141 usb_kill_urb(instance->snd_urb);
759 usb_kill_urb(instance->rcv_urb); 1142 usb_kill_urb(instance->rcv_urb);
@@ -762,6 +1145,12 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
762 1145
763 free_page((unsigned long) instance->snd_buf); 1146 free_page((unsigned long) instance->snd_buf);
764 free_page((unsigned long) instance->rcv_buf); 1147 free_page((unsigned long) instance->rcv_buf);
1148
1149 #define CXACRU_DEVICE_REMOVE_FILE(_name) \
1150 device_remove_file(&intf->dev, &dev_attr_##_name);
1151 CXACRU_ALL_FILES(REMOVE);
1152 #undef CXACRU_DEVICE_REVOVE_FILE
1153
765 kfree(instance); 1154 kfree(instance);
766 1155
767 usbatm_instance->driver_data = NULL; 1156 usbatm_instance->driver_data = NULL;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index ec63b0ee0743..b3f779f5933a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -274,6 +274,9 @@ static void usbatm_complete(struct urb *urb)
274 (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) || 274 (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) ||
275 urb->status != -EILSEQ )) 275 urb->status != -EILSEQ ))
276 { 276 {
277 if (urb->status == -ESHUTDOWN)
278 return;
279
277 if (printk_ratelimit()) 280 if (printk_ratelimit())
278 atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n", 281 atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n",
279 __func__, urb, urb->status); 282 __func__, urb, urb->status);
@@ -343,7 +346,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
343 UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); 346 UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
344 } 347 }
345 348
346 memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); 349 memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
347 __skb_put(sarb, ATM_CELL_PAYLOAD); 350 __skb_put(sarb, ATM_CELL_PAYLOAD);
348 351
349 if (pti & 1) { 352 if (pti & 1) {
@@ -370,7 +373,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
370 goto out; 373 goto out;
371 } 374 }
372 375
373 if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) { 376 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
374 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", 377 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
375 __func__, vcc); 378 __func__, vcc);
376 atomic_inc(&vcc->stats->rx_err); 379 atomic_inc(&vcc->stats->rx_err);
@@ -396,7 +399,9 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
396 goto out; /* atm_charge increments rx_drop */ 399 goto out; /* atm_charge increments rx_drop */
397 } 400 }
398 401
399 memcpy(skb->data, sarb->tail - pdu_length, length); 402 skb_copy_to_linear_data(skb,
403 skb_tail_pointer(sarb) - pdu_length,
404 length);
400 __skb_put(skb, length); 405 __skb_put(skb, length);
401 406
402 vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", 407 vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u",
@@ -484,7 +489,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
484 ptr[4] = 0xec; 489 ptr[4] = 0xec;
485 ptr += ATM_CELL_HEADER; 490 ptr += ATM_CELL_HEADER;
486 491
487 memcpy(ptr, skb->data, data_len); 492 skb_copy_from_linear_data(skb, ptr, data_len);
488 ptr += data_len; 493 ptr += data_len;
489 __skb_pull(skb, data_len); 494 __skb_pull(skb, data_len);
490 495
@@ -966,6 +971,14 @@ static int usbatm_atm_init(struct usbatm_data *instance)
966 /* temp init ATM device, set to 128kbit */ 971 /* temp init ATM device, set to 128kbit */
967 atm_dev->link_rate = 128 * 1000 / 424; 972 atm_dev->link_rate = 128 * 1000 / 424;
968 973
974 ret = sysfs_create_link(&atm_dev->class_dev.kobj,
975 &instance->usb_intf->dev.kobj, "device");
976 if (ret) {
977 atm_err(instance, "%s: sysfs_create_link failed: %d\n",
978 __func__, ret);
979 goto fail_sysfs;
980 }
981
969 if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) { 982 if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) {
970 atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret); 983 atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret);
971 goto fail; 984 goto fail;
@@ -984,6 +997,8 @@ static int usbatm_atm_init(struct usbatm_data *instance)
984 return 0; 997 return 0;
985 998
986 fail: 999 fail:
1000 sysfs_remove_link(&atm_dev->class_dev.kobj, "device");
1001 fail_sysfs:
987 instance->atm_dev = NULL; 1002 instance->atm_dev = NULL;
988 atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */ 1003 atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */
989 return ret; 1004 return ret;
@@ -1316,8 +1331,10 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
1316 kfree(instance->cell_buf); 1331 kfree(instance->cell_buf);
1317 1332
1318 /* ATM finalize */ 1333 /* ATM finalize */
1319 if (instance->atm_dev) 1334 if (instance->atm_dev) {
1335 sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
1320 atm_dev_deregister(instance->atm_dev); 1336 atm_dev_deregister(instance->atm_dev);
1337 }
1321 1338
1322 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ 1339 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
1323} 1340}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 31ae661e586a..14de3b1b6a20 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -212,7 +212,41 @@ static int acm_write_start(struct acm *acm)
212 } 212 }
213 return rc; 213 return rc;
214} 214}
215/*
216 * attributes exported through sysfs
217 */
218static ssize_t show_caps
219(struct device *dev, struct device_attribute *attr, char *buf)
220{
221 struct usb_interface *intf = to_usb_interface(dev);
222 struct acm *acm = usb_get_intfdata(intf);
223
224 return sprintf(buf, "%d", acm->ctrl_caps);
225}
226static DEVICE_ATTR(bmCapabilities, S_IRUGO, show_caps, NULL);
227
228static ssize_t show_country_codes
229(struct device *dev, struct device_attribute *attr, char *buf)
230{
231 struct usb_interface *intf = to_usb_interface(dev);
232 struct acm *acm = usb_get_intfdata(intf);
233
234 memcpy(buf, acm->country_codes, acm->country_code_size);
235 return acm->country_code_size;
236}
237
238static DEVICE_ATTR(wCountryCodes, S_IRUGO, show_country_codes, NULL);
239
240static ssize_t show_country_rel_date
241(struct device *dev, struct device_attribute *attr, char *buf)
242{
243 struct usb_interface *intf = to_usb_interface(dev);
244 struct acm *acm = usb_get_intfdata(intf);
245
246 return sprintf(buf, "%d", acm->country_rel_date);
247}
215 248
249static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL);
216/* 250/*
217 * Interrupt handlers for various ACM device responses 251 * Interrupt handlers for various ACM device responses
218 */ 252 */
@@ -514,6 +548,7 @@ static void acm_tty_unregister(struct acm *acm)
514 usb_free_urb(acm->writeurb); 548 usb_free_urb(acm->writeurb);
515 for (i = 0; i < nr; i++) 549 for (i = 0; i < nr; i++)
516 usb_free_urb(acm->ru[i].urb); 550 usb_free_urb(acm->ru[i].urb);
551 kfree(acm->country_codes);
517 kfree(acm); 552 kfree(acm);
518} 553}
519 554
@@ -761,6 +796,7 @@ static int acm_probe (struct usb_interface *intf,
761 const struct usb_device_id *id) 796 const struct usb_device_id *id)
762{ 797{
763 struct usb_cdc_union_desc *union_header = NULL; 798 struct usb_cdc_union_desc *union_header = NULL;
799 struct usb_cdc_country_functional_desc *cfd = NULL;
764 char *buffer = intf->altsetting->extra; 800 char *buffer = intf->altsetting->extra;
765 int buflen = intf->altsetting->extralen; 801 int buflen = intf->altsetting->extralen;
766 struct usb_interface *control_interface; 802 struct usb_interface *control_interface;
@@ -824,8 +860,9 @@ static int acm_probe (struct usb_interface *intf,
824 union_header = (struct usb_cdc_union_desc *) 860 union_header = (struct usb_cdc_union_desc *)
825 buffer; 861 buffer;
826 break; 862 break;
827 case USB_CDC_COUNTRY_TYPE: /* maybe somehow export */ 863 case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
828 break; /* for now we ignore it */ 864 cfd = (struct usb_cdc_country_functional_desc *)buffer;
865 break;
829 case USB_CDC_HEADER_TYPE: /* maybe check version */ 866 case USB_CDC_HEADER_TYPE: /* maybe check version */
830 break; /* for now we ignore it */ 867 break; /* for now we ignore it */
831 case USB_CDC_ACM_TYPE: 868 case USB_CDC_ACM_TYPE:
@@ -983,6 +1020,34 @@ skip_normal_probe:
983 goto alloc_fail7; 1020 goto alloc_fail7;
984 } 1021 }
985 1022
1023 usb_set_intfdata (intf, acm);
1024
1025 i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
1026 if (i < 0)
1027 goto alloc_fail8;
1028
1029 if (cfd) { /* export the country data */
1030 acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
1031 if (!acm->country_codes)
1032 goto skip_countries;
1033 acm->country_code_size = cfd->bLength - 4;
1034 memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4);
1035 acm->country_rel_date = cfd->iCountryCodeRelDate;
1036
1037 i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
1038 if (i < 0) {
1039 kfree(acm->country_codes);
1040 goto skip_countries;
1041 }
1042
1043 i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate);
1044 if (i < 0) {
1045 kfree(acm->country_codes);
1046 goto skip_countries;
1047 }
1048 }
1049
1050skip_countries:
986 usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), 1051 usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
987 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, epctrl->bInterval); 1052 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, epctrl->bInterval);
988 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1053 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -1006,9 +1071,10 @@ skip_normal_probe:
1006 tty_register_device(acm_tty_driver, minor, &control_interface->dev); 1071 tty_register_device(acm_tty_driver, minor, &control_interface->dev);
1007 1072
1008 acm_table[minor] = acm; 1073 acm_table[minor] = acm;
1009 usb_set_intfdata (intf, acm);
1010 return 0;
1011 1074
1075 return 0;
1076alloc_fail8:
1077 usb_free_urb(acm->writeurb);
1012alloc_fail7: 1078alloc_fail7:
1013 for (i = 0; i < num_rx_buf; i++) 1079 for (i = 0; i < num_rx_buf; i++)
1014 usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma); 1080 usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma);
@@ -1027,7 +1093,7 @@ alloc_fail:
1027 1093
1028static void acm_disconnect(struct usb_interface *intf) 1094static void acm_disconnect(struct usb_interface *intf)
1029{ 1095{
1030 struct acm *acm = usb_get_intfdata (intf); 1096 struct acm *acm = usb_get_intfdata(intf);
1031 struct usb_device *usb_dev = interface_to_usbdev(intf); 1097 struct usb_device *usb_dev = interface_to_usbdev(intf);
1032 int i; 1098 int i;
1033 1099
@@ -1041,6 +1107,11 @@ static void acm_disconnect(struct usb_interface *intf)
1041 mutex_unlock(&open_mutex); 1107 mutex_unlock(&open_mutex);
1042 return; 1108 return;
1043 } 1109 }
1110 if (acm->country_codes){
1111 device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
1112 device_remove_file(&intf->dev, &dev_attr_iCountryCodeRelDate);
1113 }
1114 device_remove_file(&intf->dev, &dev_attr_bmCapabilities);
1044 acm->dev = NULL; 1115 acm->dev = NULL;
1045 usb_set_intfdata(acm->control, NULL); 1116 usb_set_intfdata(acm->control, NULL);
1046 usb_set_intfdata(acm->data, NULL); 1117 usb_set_intfdata(acm->data, NULL);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1bcaea32cfc1..09f7765dbf8d 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -91,6 +91,9 @@ struct acm {
91 struct urb *ctrlurb, *writeurb; /* urbs */ 91 struct urb *ctrlurb, *writeurb; /* urbs */
92 u8 *ctrl_buffer; /* buffers of urbs */ 92 u8 *ctrl_buffer; /* buffers of urbs */
93 dma_addr_t ctrl_dma; /* dma handles of buffers */ 93 dma_addr_t ctrl_dma; /* dma handles of buffers */
94 u8 *country_codes; /* country codes from device */
95 unsigned int country_code_size; /* size of this buffer */
96 unsigned int country_rel_date; /* release date of version */
94 struct acm_wb wb[ACM_NW]; 97 struct acm_wb wb[ACM_NW];
95 struct acm_ru ru[ACM_NR]; 98 struct acm_ru ru[ACM_NR];
96 struct acm_rb rb[ACM_NR]; 99 struct acm_rb rb[ACM_NR];
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 2fc0f88a3d86..f493fb1eaa27 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -31,7 +31,30 @@ config USB_DEVICEFS
31 For the format of the various /proc/bus/usb/ files, please read 31 For the format of the various /proc/bus/usb/ files, please read
32 <file:Documentation/usb/proc_usb_info.txt>. 32 <file:Documentation/usb/proc_usb_info.txt>.
33 33
34 Most users want to say Y here. 34 Usbfs files can't handle Access Control Lists (ACL), which are the
35 default way to grant access to USB devices for untrusted users of a
36 desktop system. The usbfs functionality is replaced by real
37 device-nodes managed by udev. These nodes live in /dev/bus/usb and
38 are used by libusb.
39
40config USB_DEVICE_CLASS
41 bool "USB device class-devices (DEPRECATED)"
42 depends on USB
43 default n
44 ---help---
45 Userspace access to USB devices is granted by device-nodes exported
46 directly from the usbdev in sysfs. Old versions of the driver
47 core and udev needed additional class devices to export device nodes.
48
49 These additional devices are difficult to handle in userspace, if
50 information about USB interfaces must be available. One device contains
51 the device node, the other device contains the interface data. Both
52 devices are at the same level in sysfs (siblings) and one can't access
53 the other. The device node created directly by the usbdev is the parent
54 device of the interface and therefore easily accessible from the interface
55 event.
56
57 This option provides backward compatibility if needed.
35 58
36config USB_DYNAMIC_MINORS 59config USB_DYNAMIC_MINORS
37 bool "Dynamic USB minor allocation (EXPERIMENTAL)" 60 bool "Dynamic USB minor allocation (EXPERIMENTAL)"
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index aefc7987120d..6753ca059ee4 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -246,7 +246,6 @@ static char *usb_dump_interface_descriptor(char *start, char *end,
246 246
247 if (start > end) 247 if (start > end)
248 return start; 248 return start;
249 down_read(&usb_bus_type.subsys.rwsem);
250 if (iface) { 249 if (iface) {
251 driver_name = (iface->dev.driver 250 driver_name = (iface->dev.driver
252 ? iface->dev.driver->name 251 ? iface->dev.driver->name
@@ -263,7 +262,6 @@ static char *usb_dump_interface_descriptor(char *start, char *end,
263 desc->bInterfaceSubClass, 262 desc->bInterfaceSubClass,
264 desc->bInterfaceProtocol, 263 desc->bInterfaceProtocol,
265 driver_name); 264 driver_name);
266 up_read(&usb_bus_type.subsys.rwsem);
267 return start; 265 return start;
268} 266}
269 267
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 36e7a843bf91..927a181120a9 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -57,7 +57,6 @@
57 57
58#define USB_MAXBUS 64 58#define USB_MAXBUS 64
59#define USB_DEVICE_MAX USB_MAXBUS * 128 59#define USB_DEVICE_MAX USB_MAXBUS * 128
60static struct class *usb_device_class;
61 60
62/* Mutual exclusion for removal, open, and release */ 61/* Mutual exclusion for removal, open, and release */
63DEFINE_MUTEX(usbfs_mutex); 62DEFINE_MUTEX(usbfs_mutex);
@@ -421,14 +420,11 @@ static int claimintf(struct dev_state *ps, unsigned int ifnum)
421 if (test_bit(ifnum, &ps->ifclaimed)) 420 if (test_bit(ifnum, &ps->ifclaimed))
422 return 0; 421 return 0;
423 422
424 /* lock against other changes to driver bindings */
425 down_write(&usb_bus_type.subsys.rwsem);
426 intf = usb_ifnum_to_if(dev, ifnum); 423 intf = usb_ifnum_to_if(dev, ifnum);
427 if (!intf) 424 if (!intf)
428 err = -ENOENT; 425 err = -ENOENT;
429 else 426 else
430 err = usb_driver_claim_interface(&usbfs_driver, intf, ps); 427 err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
431 up_write(&usb_bus_type.subsys.rwsem);
432 if (err == 0) 428 if (err == 0)
433 set_bit(ifnum, &ps->ifclaimed); 429 set_bit(ifnum, &ps->ifclaimed);
434 return err; 430 return err;
@@ -444,8 +440,6 @@ static int releaseintf(struct dev_state *ps, unsigned int ifnum)
444 if (ifnum >= 8*sizeof(ps->ifclaimed)) 440 if (ifnum >= 8*sizeof(ps->ifclaimed))
445 return err; 441 return err;
446 dev = ps->dev; 442 dev = ps->dev;
447 /* lock against other changes to driver bindings */
448 down_write(&usb_bus_type.subsys.rwsem);
449 intf = usb_ifnum_to_if(dev, ifnum); 443 intf = usb_ifnum_to_if(dev, ifnum);
450 if (!intf) 444 if (!intf)
451 err = -ENOENT; 445 err = -ENOENT;
@@ -453,7 +447,6 @@ static int releaseintf(struct dev_state *ps, unsigned int ifnum)
453 usb_driver_release_interface(&usbfs_driver, intf); 447 usb_driver_release_interface(&usbfs_driver, intf);
454 err = 0; 448 err = 0;
455 } 449 }
456 up_write(&usb_bus_type.subsys.rwsem);
457 return err; 450 return err;
458} 451}
459 452
@@ -520,22 +513,25 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsig
520 return ret; 513 return ret;
521} 514}
522 515
523static struct usb_device *usbdev_lookup_minor(int minor) 516static int __match_minor(struct device *dev, void *data)
524{ 517{
525 struct device *device; 518 int minor = *((int *)data);
526 struct usb_device *udev = NULL;
527 519
528 down(&usb_device_class->sem); 520 if (dev->devt == MKDEV(USB_DEVICE_MAJOR, minor))
529 list_for_each_entry(device, &usb_device_class->devices, node) { 521 return 1;
530 if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { 522 return 0;
531 udev = device->platform_data; 523}
532 break;
533 }
534 }
535 up(&usb_device_class->sem);
536 524
537 return udev; 525static struct usb_device *usbdev_lookup_by_minor(int minor)
538}; 526{
527 struct device *dev;
528
529 dev = bus_find_device(&usb_bus_type, NULL, &minor, __match_minor);
530 if (!dev)
531 return NULL;
532 put_device(dev);
533 return container_of(dev, struct usb_device, dev);
534}
539 535
540/* 536/*
541 * file operations 537 * file operations
@@ -554,11 +550,14 @@ static int usbdev_open(struct inode *inode, struct file *file)
554 goto out; 550 goto out;
555 551
556 ret = -ENOENT; 552 ret = -ENOENT;
557 /* check if we are called from a real node or usbfs */ 553 /* usbdev device-node */
558 if (imajor(inode) == USB_DEVICE_MAJOR) 554 if (imajor(inode) == USB_DEVICE_MAJOR)
559 dev = usbdev_lookup_minor(iminor(inode)); 555 dev = usbdev_lookup_by_minor(iminor(inode));
556#ifdef CONFIG_USB_DEVICEFS
557 /* procfs file */
560 if (!dev) 558 if (!dev)
561 dev = inode->i_private; 559 dev = inode->i_private;
560#endif
562 if (!dev) 561 if (!dev)
563 goto out; 562 goto out;
564 ret = usb_autoresume_device(dev); 563 ret = usb_autoresume_device(dev);
@@ -581,7 +580,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
581 ps->disccontext = NULL; 580 ps->disccontext = NULL;
582 ps->ifclaimed = 0; 581 ps->ifclaimed = 0;
583 security_task_getsecid(current, &ps->secid); 582 security_task_getsecid(current, &ps->secid);
584 wmb(); 583 smp_wmb();
585 list_add_tail(&ps->list, &dev->filelist); 584 list_add_tail(&ps->list, &dev->filelist);
586 file->private_data = ps; 585 file->private_data = ps;
587 out: 586 out:
@@ -813,7 +812,6 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
813 812
814 if (copy_from_user(&gd, arg, sizeof(gd))) 813 if (copy_from_user(&gd, arg, sizeof(gd)))
815 return -EFAULT; 814 return -EFAULT;
816 down_read(&usb_bus_type.subsys.rwsem);
817 intf = usb_ifnum_to_if(ps->dev, gd.interface); 815 intf = usb_ifnum_to_if(ps->dev, gd.interface);
818 if (!intf || !intf->dev.driver) 816 if (!intf || !intf->dev.driver)
819 ret = -ENODATA; 817 ret = -ENODATA;
@@ -822,7 +820,6 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
822 sizeof(gd.driver)); 820 sizeof(gd.driver));
823 ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); 821 ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
824 } 822 }
825 up_read(&usb_bus_type.subsys.rwsem);
826 return ret; 823 return ret;
827} 824}
828 825
@@ -1351,15 +1348,12 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
1351 1348
1352 /* disconnect kernel driver from interface */ 1349 /* disconnect kernel driver from interface */
1353 case USBDEVFS_DISCONNECT: 1350 case USBDEVFS_DISCONNECT:
1354
1355 down_write(&usb_bus_type.subsys.rwsem);
1356 if (intf->dev.driver) { 1351 if (intf->dev.driver) {
1357 driver = to_usb_driver(intf->dev.driver); 1352 driver = to_usb_driver(intf->dev.driver);
1358 dev_dbg (&intf->dev, "disconnect by usbfs\n"); 1353 dev_dbg (&intf->dev, "disconnect by usbfs\n");
1359 usb_driver_release_interface(driver, intf); 1354 usb_driver_release_interface(driver, intf);
1360 } else 1355 } else
1361 retval = -ENODATA; 1356 retval = -ENODATA;
1362 up_write(&usb_bus_type.subsys.rwsem);
1363 break; 1357 break;
1364 1358
1365 /* let kernel drivers try to (re)bind to the interface */ 1359 /* let kernel drivers try to (re)bind to the interface */
@@ -1371,7 +1365,6 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
1371 1365
1372 /* talk directly to the interface's driver */ 1366 /* talk directly to the interface's driver */
1373 default: 1367 default:
1374 down_read(&usb_bus_type.subsys.rwsem);
1375 if (intf->dev.driver) 1368 if (intf->dev.driver)
1376 driver = to_usb_driver(intf->dev.driver); 1369 driver = to_usb_driver(intf->dev.driver);
1377 if (driver == NULL || driver->ioctl == NULL) { 1370 if (driver == NULL || driver->ioctl == NULL) {
@@ -1381,7 +1374,6 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
1381 if (retval == -ENOIOCTLCMD) 1374 if (retval == -ENOIOCTLCMD)
1382 retval = -ENOTTY; 1375 retval = -ENOTTY;
1383 } 1376 }
1384 up_read(&usb_bus_type.subsys.rwsem);
1385 } 1377 }
1386 1378
1387 /* cleanup and return */ 1379 /* cleanup and return */
@@ -1583,7 +1575,7 @@ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wai
1583 return mask; 1575 return mask;
1584} 1576}
1585 1577
1586const struct file_operations usbfs_device_file_operations = { 1578const struct file_operations usbdev_file_operations = {
1587 .llseek = usbdev_lseek, 1579 .llseek = usbdev_lseek,
1588 .read = usbdev_read, 1580 .read = usbdev_read,
1589 .poll = usbdev_poll, 1581 .poll = usbdev_poll,
@@ -1592,50 +1584,53 @@ const struct file_operations usbfs_device_file_operations = {
1592 .release = usbdev_release, 1584 .release = usbdev_release,
1593}; 1585};
1594 1586
1595static int usbdev_add(struct usb_device *dev) 1587#ifdef CONFIG_USB_DEVICE_CLASS
1588static struct class *usb_classdev_class;
1589
1590static int usb_classdev_add(struct usb_device *dev)
1596{ 1591{
1597 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); 1592 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1);
1598 1593
1599 dev->usbfs_dev = device_create(usb_device_class, &dev->dev, 1594 dev->usb_classdev = device_create(usb_classdev_class, &dev->dev,
1600 MKDEV(USB_DEVICE_MAJOR, minor), 1595 MKDEV(USB_DEVICE_MAJOR, minor),
1601 "usbdev%d.%d", dev->bus->busnum, dev->devnum); 1596 "usbdev%d.%d", dev->bus->busnum, dev->devnum);
1602 if (IS_ERR(dev->usbfs_dev)) 1597 if (IS_ERR(dev->usb_classdev))
1603 return PTR_ERR(dev->usbfs_dev); 1598 return PTR_ERR(dev->usb_classdev);
1604 1599
1605 dev->usbfs_dev->platform_data = dev;
1606 return 0; 1600 return 0;
1607} 1601}
1608 1602
1609static void usbdev_remove(struct usb_device *dev) 1603static void usb_classdev_remove(struct usb_device *dev)
1610{ 1604{
1611 device_unregister(dev->usbfs_dev); 1605 device_unregister(dev->usb_classdev);
1612} 1606}
1613 1607
1614static int usbdev_notify(struct notifier_block *self, unsigned long action, 1608static int usb_classdev_notify(struct notifier_block *self,
1615 void *dev) 1609 unsigned long action, void *dev)
1616{ 1610{
1617 switch (action) { 1611 switch (action) {
1618 case USB_DEVICE_ADD: 1612 case USB_DEVICE_ADD:
1619 if (usbdev_add(dev)) 1613 if (usb_classdev_add(dev))
1620 return NOTIFY_BAD; 1614 return NOTIFY_BAD;
1621 break; 1615 break;
1622 case USB_DEVICE_REMOVE: 1616 case USB_DEVICE_REMOVE:
1623 usbdev_remove(dev); 1617 usb_classdev_remove(dev);
1624 break; 1618 break;
1625 } 1619 }
1626 return NOTIFY_OK; 1620 return NOTIFY_OK;
1627} 1621}
1628 1622
1629static struct notifier_block usbdev_nb = { 1623static struct notifier_block usbdev_nb = {
1630 .notifier_call = usbdev_notify, 1624 .notifier_call = usb_classdev_notify,
1631}; 1625};
1626#endif
1632 1627
1633static struct cdev usb_device_cdev = { 1628static struct cdev usb_device_cdev = {
1634 .kobj = {.name = "usb_device", }, 1629 .kobj = {.name = "usb_device", },
1635 .owner = THIS_MODULE, 1630 .owner = THIS_MODULE,
1636}; 1631};
1637 1632
1638int __init usbdev_init(void) 1633int __init usb_devio_init(void)
1639{ 1634{
1640 int retval; 1635 int retval;
1641 1636
@@ -1645,38 +1640,38 @@ int __init usbdev_init(void)
1645 err("unable to register minors for usb_device"); 1640 err("unable to register minors for usb_device");
1646 goto out; 1641 goto out;
1647 } 1642 }
1648 cdev_init(&usb_device_cdev, &usbfs_device_file_operations); 1643 cdev_init(&usb_device_cdev, &usbdev_file_operations);
1649 retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); 1644 retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX);
1650 if (retval) { 1645 if (retval) {
1651 err("unable to get usb_device major %d", USB_DEVICE_MAJOR); 1646 err("unable to get usb_device major %d", USB_DEVICE_MAJOR);
1652 goto error_cdev; 1647 goto error_cdev;
1653 } 1648 }
1654 usb_device_class = class_create(THIS_MODULE, "usb_device"); 1649#ifdef CONFIG_USB_DEVICE_CLASS
1655 if (IS_ERR(usb_device_class)) { 1650 usb_classdev_class = class_create(THIS_MODULE, "usb_device");
1651 if (IS_ERR(usb_classdev_class)) {
1656 err("unable to register usb_device class"); 1652 err("unable to register usb_device class");
1657 retval = PTR_ERR(usb_device_class); 1653 retval = PTR_ERR(usb_classdev_class);
1658 goto error_class; 1654 cdev_del(&usb_device_cdev);
1655 usb_classdev_class = NULL;
1656 goto out;
1659 } 1657 }
1660 1658
1661 usb_register_notify(&usbdev_nb); 1659 usb_register_notify(&usbdev_nb);
1662 1660#endif
1663out: 1661out:
1664 return retval; 1662 return retval;
1665 1663
1666error_class:
1667 usb_device_class = NULL;
1668 cdev_del(&usb_device_cdev);
1669
1670error_cdev: 1664error_cdev:
1671 unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); 1665 unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
1672 goto out; 1666 goto out;
1673} 1667}
1674 1668
1675void usbdev_cleanup(void) 1669void usb_devio_cleanup(void)
1676{ 1670{
1671#ifdef CONFIG_USB_DEVICE_CLASS
1677 usb_unregister_notify(&usbdev_nb); 1672 usb_unregister_notify(&usbdev_nb);
1678 class_destroy(usb_device_class); 1673 class_destroy(usb_classdev_class);
1674#endif
1679 cdev_del(&usb_device_cdev); 1675 cdev_del(&usb_device_cdev);
1680 unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); 1676 unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
1681} 1677}
1682
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 9e3e943f313c..b9f7f90aef82 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -287,9 +287,9 @@ static int usb_unbind_interface(struct device *dev)
287 * way to bind to an interface is to return the private data from 287 * way to bind to an interface is to return the private data from
288 * the driver's probe() method. 288 * the driver's probe() method.
289 * 289 *
290 * Callers must own the device lock and the driver model's usb_bus_type.subsys 290 * Callers must own the device lock, so driver probe() entries don't need
291 * writelock. So driver probe() entries don't need extra locking, 291 * extra locking, but other call contexts may need to explicitly claim that
292 * but other call contexts may need to explicitly claim those locks. 292 * lock.
293 */ 293 */
294int usb_driver_claim_interface(struct usb_driver *driver, 294int usb_driver_claim_interface(struct usb_driver *driver,
295 struct usb_interface *iface, void* priv) 295 struct usb_interface *iface, void* priv)
@@ -330,9 +330,9 @@ EXPORT_SYMBOL(usb_driver_claim_interface);
330 * also causes the driver disconnect() method to be called. 330 * also causes the driver disconnect() method to be called.
331 * 331 *
332 * This call is synchronous, and may not be used in an interrupt context. 332 * This call is synchronous, and may not be used in an interrupt context.
333 * Callers must own the device lock and the driver model's usb_bus_type.subsys 333 * Callers must own the device lock, so driver disconnect() entries don't
334 * writelock. So driver disconnect() entries don't need extra locking, 334 * need extra locking, but other call contexts may need to explicitly claim
335 * but other call contexts may need to explicitly claim those locks. 335 * that lock.
336 */ 336 */
337void usb_driver_release_interface(struct usb_driver *driver, 337void usb_driver_release_interface(struct usb_driver *driver,
338 struct usb_interface *iface) 338 struct usb_interface *iface)
@@ -574,23 +574,10 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
574} 574}
575 575
576#ifdef CONFIG_HOTPLUG 576#ifdef CONFIG_HOTPLUG
577
578/*
579 * This sends an uevent to userspace, typically helping to load driver
580 * or other modules, configure the device, and more. Drivers can provide
581 * a MODULE_DEVICE_TABLE to help with module loading subtasks.
582 *
583 * We're called either from khubd (the typical case) or from root hub
584 * (init, kapmd, modprobe, rmmod, etc), but the agents need to handle
585 * delays in event delivery. Use sysfs (and DEVPATH) to make sure the
586 * device (and this configuration!) are still present.
587 */
588static int usb_uevent(struct device *dev, char **envp, int num_envp, 577static int usb_uevent(struct device *dev, char **envp, int num_envp,
589 char *buffer, int buffer_size) 578 char *buffer, int buffer_size)
590{ 579{
591 struct usb_interface *intf;
592 struct usb_device *usb_dev; 580 struct usb_device *usb_dev;
593 struct usb_host_interface *alt;
594 int i = 0; 581 int i = 0;
595 int length = 0; 582 int length = 0;
596 583
@@ -600,13 +587,11 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp,
600 /* driver is often null here; dev_dbg() would oops */ 587 /* driver is often null here; dev_dbg() would oops */
601 pr_debug ("usb %s: uevent\n", dev->bus_id); 588 pr_debug ("usb %s: uevent\n", dev->bus_id);
602 589
603 if (is_usb_device(dev)) { 590 if (is_usb_device(dev))
604 usb_dev = to_usb_device(dev); 591 usb_dev = to_usb_device(dev);
605 alt = NULL; 592 else {
606 } else { 593 struct usb_interface *intf = to_usb_interface(dev);
607 intf = to_usb_interface(dev);
608 usb_dev = interface_to_usbdev(intf); 594 usb_dev = interface_to_usbdev(intf);
609 alt = intf->cur_altsetting;
610 } 595 }
611 596
612 if (usb_dev->devnum < 0) { 597 if (usb_dev->devnum < 0) {
@@ -621,9 +606,7 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp,
621#ifdef CONFIG_USB_DEVICEFS 606#ifdef CONFIG_USB_DEVICEFS
622 /* If this is available, userspace programs can directly read 607 /* If this is available, userspace programs can directly read
623 * all the device descriptors we don't tell them about. Or 608 * all the device descriptors we don't tell them about. Or
624 * even act as usermode drivers. 609 * act as usermode drivers.
625 *
626 * FIXME reduce hardwired intelligence here
627 */ 610 */
628 if (add_uevent_var(envp, num_envp, &i, 611 if (add_uevent_var(envp, num_envp, &i,
629 buffer, buffer_size, &length, 612 buffer, buffer_size, &length,
@@ -650,44 +633,29 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp,
650 usb_dev->descriptor.bDeviceProtocol)) 633 usb_dev->descriptor.bDeviceProtocol))
651 return -ENOMEM; 634 return -ENOMEM;
652 635
653 if (!is_usb_device(dev)) { 636 if (add_uevent_var(envp, num_envp, &i,
654
655 if (add_uevent_var(envp, num_envp, &i,
656 buffer, buffer_size, &length, 637 buffer, buffer_size, &length,
657 "INTERFACE=%d/%d/%d", 638 "BUSNUM=%03d",
658 alt->desc.bInterfaceClass, 639 usb_dev->bus->busnum))
659 alt->desc.bInterfaceSubClass, 640 return -ENOMEM;
660 alt->desc.bInterfaceProtocol))
661 return -ENOMEM;
662 641
663 if (add_uevent_var(envp, num_envp, &i, 642 if (add_uevent_var(envp, num_envp, &i,
664 buffer, buffer_size, &length, 643 buffer, buffer_size, &length,
665 "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X", 644 "DEVNUM=%03d",
666 le16_to_cpu(usb_dev->descriptor.idVendor), 645 usb_dev->devnum))
667 le16_to_cpu(usb_dev->descriptor.idProduct), 646 return -ENOMEM;
668 le16_to_cpu(usb_dev->descriptor.bcdDevice),
669 usb_dev->descriptor.bDeviceClass,
670 usb_dev->descriptor.bDeviceSubClass,
671 usb_dev->descriptor.bDeviceProtocol,
672 alt->desc.bInterfaceClass,
673 alt->desc.bInterfaceSubClass,
674 alt->desc.bInterfaceProtocol))
675 return -ENOMEM;
676 }
677 647
678 envp[i] = NULL; 648 envp[i] = NULL;
679
680 return 0; 649 return 0;
681} 650}
682 651
683#else 652#else
684 653
685static int usb_uevent(struct device *dev, char **envp, 654static int usb_uevent(struct device *dev, char **envp,
686 int num_envp, char *buffer, int buffer_size) 655 int num_envp, char *buffer, int buffer_size)
687{ 656{
688 return -ENODEV; 657 return -ENODEV;
689} 658}
690
691#endif /* CONFIG_HOTPLUG */ 659#endif /* CONFIG_HOTPLUG */
692 660
693/** 661/**
@@ -872,8 +840,10 @@ static int usb_resume_device(struct usb_device *udev)
872 840
873done: 841done:
874 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); 842 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
875 if (status == 0) 843 if (status == 0) {
844 udev->autoresume_disabled = 0;
876 udev->dev.power.power_state.event = PM_EVENT_ON; 845 udev->dev.power.power_state.event = PM_EVENT_ON;
846 }
877 return status; 847 return status;
878} 848}
879 849
@@ -962,6 +932,7 @@ static int autosuspend_check(struct usb_device *udev)
962{ 932{
963 int i; 933 int i;
964 struct usb_interface *intf; 934 struct usb_interface *intf;
935 unsigned long suspend_time;
965 936
966 /* For autosuspend, fail fast if anything is in use or autosuspend 937 /* For autosuspend, fail fast if anything is in use or autosuspend
967 * is disabled. Also fail if any interfaces require remote wakeup 938 * is disabled. Also fail if any interfaces require remote wakeup
@@ -970,9 +941,10 @@ static int autosuspend_check(struct usb_device *udev)
970 udev->do_remote_wakeup = device_may_wakeup(&udev->dev); 941 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
971 if (udev->pm_usage_cnt > 0) 942 if (udev->pm_usage_cnt > 0)
972 return -EBUSY; 943 return -EBUSY;
973 if (!udev->autosuspend_delay) 944 if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
974 return -EPERM; 945 return -EPERM;
975 946
947 suspend_time = udev->last_busy + udev->autosuspend_delay;
976 if (udev->actconfig) { 948 if (udev->actconfig) {
977 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 949 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
978 intf = udev->actconfig->interface[i]; 950 intf = udev->actconfig->interface[i];
@@ -988,6 +960,24 @@ static int autosuspend_check(struct usb_device *udev)
988 } 960 }
989 } 961 }
990 } 962 }
963
964 /* If everything is okay but the device hasn't been idle for long
965 * enough, queue a delayed autosuspend request.
966 */
967 if (time_after(suspend_time, jiffies)) {
968 if (!timer_pending(&udev->autosuspend.timer)) {
969
970 /* The value of jiffies may change between the
971 * time_after() comparison above and the subtraction
972 * below. That's okay; the system behaves sanely
973 * when a timer is registered for the present moment
974 * or for the past.
975 */
976 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
977 suspend_time - jiffies);
978 }
979 return -EAGAIN;
980 }
991 return 0; 981 return 0;
992} 982}
993 983
@@ -1033,26 +1023,25 @@ static int autosuspend_check(struct usb_device *udev)
1033 * 1023 *
1034 * This routine can run only in process context. 1024 * This routine can run only in process context.
1035 */ 1025 */
1036int usb_suspend_both(struct usb_device *udev, pm_message_t msg) 1026static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1037{ 1027{
1038 int status = 0; 1028 int status = 0;
1039 int i = 0; 1029 int i = 0;
1040 struct usb_interface *intf; 1030 struct usb_interface *intf;
1041 struct usb_device *parent = udev->parent; 1031 struct usb_device *parent = udev->parent;
1042 1032
1043 cancel_delayed_work(&udev->autosuspend); 1033 if (udev->state == USB_STATE_NOTATTACHED ||
1044 if (udev->state == USB_STATE_NOTATTACHED) 1034 udev->state == USB_STATE_SUSPENDED)
1045 return 0; 1035 goto done;
1046 if (udev->state == USB_STATE_SUSPENDED)
1047 return 0;
1048 1036
1049 udev->do_remote_wakeup = device_may_wakeup(&udev->dev); 1037 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1050 1038
1051 if (udev->auto_pm) { 1039 if (udev->auto_pm) {
1052 status = autosuspend_check(udev); 1040 status = autosuspend_check(udev);
1053 if (status < 0) 1041 if (status < 0)
1054 return status; 1042 goto done;
1055 } 1043 }
1044 cancel_delayed_work(&udev->autosuspend);
1056 1045
1057 /* Suspend all the interfaces and then udev itself */ 1046 /* Suspend all the interfaces and then udev itself */
1058 if (udev->actconfig) { 1047 if (udev->actconfig) {
@@ -1077,6 +1066,7 @@ int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1077 } else if (parent) 1066 } else if (parent)
1078 usb_autosuspend_device(parent); 1067 usb_autosuspend_device(parent);
1079 1068
1069 done:
1080 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); 1070 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
1081 return status; 1071 return status;
1082} 1072}
@@ -1109,7 +1099,7 @@ int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1109 * 1099 *
1110 * This routine can run only in process context. 1100 * This routine can run only in process context.
1111 */ 1101 */
1112int usb_resume_both(struct usb_device *udev) 1102static int usb_resume_both(struct usb_device *udev)
1113{ 1103{
1114 int status = 0; 1104 int status = 0;
1115 int i; 1105 int i;
@@ -1117,11 +1107,17 @@ int usb_resume_both(struct usb_device *udev)
1117 struct usb_device *parent = udev->parent; 1107 struct usb_device *parent = udev->parent;
1118 1108
1119 cancel_delayed_work(&udev->autosuspend); 1109 cancel_delayed_work(&udev->autosuspend);
1120 if (udev->state == USB_STATE_NOTATTACHED) 1110 if (udev->state == USB_STATE_NOTATTACHED) {
1121 return -ENODEV; 1111 status = -ENODEV;
1112 goto done;
1113 }
1122 1114
1123 /* Propagate the resume up the tree, if necessary */ 1115 /* Propagate the resume up the tree, if necessary */
1124 if (udev->state == USB_STATE_SUSPENDED) { 1116 if (udev->state == USB_STATE_SUSPENDED) {
1117 if (udev->auto_pm && udev->autoresume_disabled) {
1118 status = -EPERM;
1119 goto done;
1120 }
1125 if (parent) { 1121 if (parent) {
1126 status = usb_autoresume_device(parent); 1122 status = usb_autoresume_device(parent);
1127 if (status == 0) { 1123 if (status == 0) {
@@ -1167,6 +1163,7 @@ int usb_resume_both(struct usb_device *udev)
1167 } 1163 }
1168 } 1164 }
1169 1165
1166 done:
1170 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); 1167 // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
1171 return status; 1168 return status;
1172} 1169}
@@ -1181,20 +1178,34 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
1181 int status = 0; 1178 int status = 0;
1182 1179
1183 usb_pm_lock(udev); 1180 usb_pm_lock(udev);
1181 udev->auto_pm = 1;
1184 udev->pm_usage_cnt += inc_usage_cnt; 1182 udev->pm_usage_cnt += inc_usage_cnt;
1185 WARN_ON(udev->pm_usage_cnt < 0); 1183 WARN_ON(udev->pm_usage_cnt < 0);
1186 if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) { 1184 if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
1187 udev->auto_pm = 1; 1185 if (udev->state == USB_STATE_SUSPENDED)
1188 status = usb_resume_both(udev); 1186 status = usb_resume_both(udev);
1189 if (status != 0) 1187 if (status != 0)
1190 udev->pm_usage_cnt -= inc_usage_cnt; 1188 udev->pm_usage_cnt -= inc_usage_cnt;
1191 } else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0) 1189 else if (inc_usage_cnt)
1192 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, 1190 udev->last_busy = jiffies;
1193 udev->autosuspend_delay); 1191 } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
1192 if (inc_usage_cnt)
1193 udev->last_busy = jiffies;
1194 status = usb_suspend_both(udev, PMSG_SUSPEND);
1195 }
1194 usb_pm_unlock(udev); 1196 usb_pm_unlock(udev);
1195 return status; 1197 return status;
1196} 1198}
1197 1199
1200/* usb_autosuspend_work - callback routine to autosuspend a USB device */
1201void usb_autosuspend_work(struct work_struct *work)
1202{
1203 struct usb_device *udev =
1204 container_of(work, struct usb_device, autosuspend.work);
1205
1206 usb_autopm_do_device(udev, 0);
1207}
1208
1198/** 1209/**
1199 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces 1210 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
1200 * @udev: the usb_device to autosuspend 1211 * @udev: the usb_device to autosuspend
@@ -1286,15 +1297,20 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
1286 if (intf->condition == USB_INTERFACE_UNBOUND) 1297 if (intf->condition == USB_INTERFACE_UNBOUND)
1287 status = -ENODEV; 1298 status = -ENODEV;
1288 else { 1299 else {
1300 udev->auto_pm = 1;
1289 intf->pm_usage_cnt += inc_usage_cnt; 1301 intf->pm_usage_cnt += inc_usage_cnt;
1290 if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) { 1302 if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
1291 udev->auto_pm = 1; 1303 if (udev->state == USB_STATE_SUSPENDED)
1292 status = usb_resume_both(udev); 1304 status = usb_resume_both(udev);
1293 if (status != 0) 1305 if (status != 0)
1294 intf->pm_usage_cnt -= inc_usage_cnt; 1306 intf->pm_usage_cnt -= inc_usage_cnt;
1295 } else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0) 1307 else if (inc_usage_cnt)
1296 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, 1308 udev->last_busy = jiffies;
1297 udev->autosuspend_delay); 1309 } else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) {
1310 if (inc_usage_cnt)
1311 udev->last_busy = jiffies;
1312 status = usb_suspend_both(udev, PMSG_SUSPEND);
1313 }
1298 } 1314 }
1299 usb_pm_unlock(udev); 1315 usb_pm_unlock(udev);
1300 return status; 1316 return status;
@@ -1353,11 +1369,14 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1353 * or @intf is unbound. A typical example would be a character-device 1369 * or @intf is unbound. A typical example would be a character-device
1354 * driver when its device file is opened. 1370 * driver when its device file is opened.
1355 * 1371 *
1356 * The routine increments @intf's usage counter. So long as the counter 1372 *
1357 * is greater than 0, autosuspend will not be allowed for @intf or its 1373 * The routine increments @intf's usage counter. (However if the
1358 * usb_device. When the driver is finished using @intf it should call 1374 * autoresume fails then the counter is re-decremented.) So long as the
1359 * usb_autopm_put_interface() to decrement the usage counter and queue 1375 * counter is greater than 0, autosuspend will not be allowed for @intf
1360 * a delayed autosuspend request (if the counter is <= 0). 1376 * or its usb_device. When the driver is finished using @intf it should
1377 * call usb_autopm_put_interface() to decrement the usage counter and
1378 * queue a delayed autosuspend request (if the counter is <= 0).
1379 *
1361 * 1380 *
1362 * Note that @intf->pm_usage_cnt is owned by the interface driver. The 1381 * Note that @intf->pm_usage_cnt is owned by the interface driver. The
1363 * core will not change its value other than the increment and decrement 1382 * core will not change its value other than the increment and decrement
@@ -1405,50 +1424,96 @@ int usb_autopm_set_interface(struct usb_interface *intf)
1405} 1424}
1406EXPORT_SYMBOL_GPL(usb_autopm_set_interface); 1425EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
1407 1426
1427#else
1428
1429void usb_autosuspend_work(struct work_struct *work)
1430{}
1431
1408#endif /* CONFIG_USB_SUSPEND */ 1432#endif /* CONFIG_USB_SUSPEND */
1409 1433
1410static int usb_suspend(struct device *dev, pm_message_t message) 1434/**
1435 * usb_external_suspend_device - external suspend of a USB device and its interfaces
1436 * @udev: the usb_device to suspend
1437 * @msg: Power Management message describing this state transition
1438 *
1439 * This routine handles external suspend requests: ones not generated
1440 * internally by a USB driver (autosuspend) but rather coming from the user
1441 * (via sysfs) or the PM core (system sleep). The suspend will be carried
1442 * out regardless of @udev's usage counter or those of its interfaces,
1443 * and regardless of whether or not remote wakeup is enabled. Of course,
1444 * interface drivers still have the option of failing the suspend (if
1445 * there are unsuspended children, for example).
1446 *
1447 * The caller must hold @udev's device lock.
1448 */
1449int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
1411{ 1450{
1412 int status; 1451 int status;
1413 1452
1414 if (is_usb_device(dev)) { 1453 usb_pm_lock(udev);
1415 struct usb_device *udev = to_usb_device(dev); 1454 udev->auto_pm = 0;
1416 1455 status = usb_suspend_both(udev, msg);
1417 usb_pm_lock(udev); 1456 usb_pm_unlock(udev);
1418 udev->auto_pm = 0;
1419 status = usb_suspend_both(udev, message);
1420 usb_pm_unlock(udev);
1421 } else
1422 status = 0;
1423 return status; 1457 return status;
1424} 1458}
1425 1459
1426static int usb_resume(struct device *dev) 1460/**
1461 * usb_external_resume_device - external resume of a USB device and its interfaces
1462 * @udev: the usb_device to resume
1463 *
1464 * This routine handles external resume requests: ones not generated
1465 * internally by a USB driver (autoresume) but rather coming from the user
1466 * (via sysfs), the PM core (system resume), or the device itself (remote
1467 * wakeup). @udev's usage counter is unaffected.
1468 *
1469 * The caller must hold @udev's device lock.
1470 */
1471int usb_external_resume_device(struct usb_device *udev)
1427{ 1472{
1428 int status; 1473 int status;
1429 1474
1430 if (is_usb_device(dev)) { 1475 usb_pm_lock(udev);
1431 struct usb_device *udev = to_usb_device(dev); 1476 udev->auto_pm = 0;
1432 1477 status = usb_resume_both(udev);
1433 usb_pm_lock(udev); 1478 usb_pm_unlock(udev);
1434 udev->auto_pm = 0;
1435 status = usb_resume_both(udev);
1436 usb_pm_unlock(udev);
1437 1479
1438 /* Rebind drivers that had no suspend method? */ 1480 /* Now that the device is awake, we can start trying to autosuspend
1439 } else 1481 * it again. */
1440 status = 0; 1482 if (status == 0)
1483 usb_try_autosuspend_device(udev);
1441 return status; 1484 return status;
1442} 1485}
1443 1486
1487static int usb_suspend(struct device *dev, pm_message_t message)
1488{
1489 if (!is_usb_device(dev)) /* Ignore PM for interfaces */
1490 return 0;
1491 return usb_external_suspend_device(to_usb_device(dev), message);
1492}
1493
1494static int usb_resume(struct device *dev)
1495{
1496 struct usb_device *udev;
1497
1498 if (!is_usb_device(dev)) /* Ignore PM for interfaces */
1499 return 0;
1500 udev = to_usb_device(dev);
1501 if (udev->autoresume_disabled)
1502 return -EPERM;
1503 return usb_external_resume_device(udev);
1504}
1505
1506#else
1507
1508#define usb_suspend NULL
1509#define usb_resume NULL
1510
1444#endif /* CONFIG_PM */ 1511#endif /* CONFIG_PM */
1445 1512
1446struct bus_type usb_bus_type = { 1513struct bus_type usb_bus_type = {
1447 .name = "usb", 1514 .name = "usb",
1448 .match = usb_device_match, 1515 .match = usb_device_match,
1449 .uevent = usb_uevent, 1516 .uevent = usb_uevent,
1450#ifdef CONFIG_PM
1451 .suspend = usb_suspend, 1517 .suspend = usb_suspend,
1452 .resume = usb_resume, 1518 .resume = usb_resume,
1453#endif
1454}; 1519};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index b26c19e8d19f..40cf882293e6 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -37,6 +37,7 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/byteorder.h> 38#include <asm/byteorder.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h>
40 41
41#include <linux/usb.h> 42#include <linux/usb.h>
42 43
@@ -544,6 +545,8 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
544 unsigned long flags; 545 unsigned long flags;
545 char buffer[4]; /* Any root hubs with > 31 ports? */ 546 char buffer[4]; /* Any root hubs with > 31 ports? */
546 547
548 if (unlikely(!hcd->rh_registered))
549 return;
547 if (!hcd->uses_new_polling && !hcd->status_urb) 550 if (!hcd->uses_new_polling && !hcd->status_urb)
548 return; 551 return;
549 552
@@ -1296,14 +1299,26 @@ int hcd_bus_resume (struct usb_bus *bus)
1296 return status; 1299 return status;
1297} 1300}
1298 1301
1302/* Workqueue routine for root-hub remote wakeup */
1303static void hcd_resume_work(struct work_struct *work)
1304{
1305 struct usb_hcd *hcd = container_of(work, struct usb_hcd, wakeup_work);
1306 struct usb_device *udev = hcd->self.root_hub;
1307
1308 usb_lock_device(udev);
1309 usb_mark_last_busy(udev);
1310 usb_external_resume_device(udev);
1311 usb_unlock_device(udev);
1312}
1313
1299/** 1314/**
1300 * usb_hcd_resume_root_hub - called by HCD to resume its root hub 1315 * usb_hcd_resume_root_hub - called by HCD to resume its root hub
1301 * @hcd: host controller for this root hub 1316 * @hcd: host controller for this root hub
1302 * 1317 *
1303 * The USB host controller calls this function when its root hub is 1318 * The USB host controller calls this function when its root hub is
1304 * suspended (with the remote wakeup feature enabled) and a remote 1319 * suspended (with the remote wakeup feature enabled) and a remote
1305 * wakeup request is received. It queues a request for khubd to 1320 * wakeup request is received. The routine submits a workqueue request
1306 * resume the root hub (that is, manage its downstream ports again). 1321 * to resume the root hub (that is, manage its downstream ports again).
1307 */ 1322 */
1308void usb_hcd_resume_root_hub (struct usb_hcd *hcd) 1323void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
1309{ 1324{
@@ -1311,7 +1326,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
1311 1326
1312 spin_lock_irqsave (&hcd_root_hub_lock, flags); 1327 spin_lock_irqsave (&hcd_root_hub_lock, flags);
1313 if (hcd->rh_registered) 1328 if (hcd->rh_registered)
1314 usb_resume_root_hub (hcd->self.root_hub); 1329 queue_work(ksuspend_usb_wq, &hcd->wakeup_work);
1315 spin_unlock_irqrestore (&hcd_root_hub_lock, flags); 1330 spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
1316} 1331}
1317EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub); 1332EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
@@ -1500,6 +1515,9 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
1500 init_timer(&hcd->rh_timer); 1515 init_timer(&hcd->rh_timer);
1501 hcd->rh_timer.function = rh_timer_func; 1516 hcd->rh_timer.function = rh_timer_func;
1502 hcd->rh_timer.data = (unsigned long) hcd; 1517 hcd->rh_timer.data = (unsigned long) hcd;
1518#ifdef CONFIG_PM
1519 INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
1520#endif
1503 1521
1504 hcd->driver = driver; 1522 hcd->driver = driver;
1505 hcd->product_desc = (driver->product_desc) ? driver->product_desc : 1523 hcd->product_desc = (driver->product_desc) ? driver->product_desc :
@@ -1666,16 +1684,20 @@ void usb_remove_hcd(struct usb_hcd *hcd)
1666 hcd->rh_registered = 0; 1684 hcd->rh_registered = 0;
1667 spin_unlock_irq (&hcd_root_hub_lock); 1685 spin_unlock_irq (&hcd_root_hub_lock);
1668 1686
1687#ifdef CONFIG_PM
1688 flush_workqueue(ksuspend_usb_wq);
1689#endif
1690
1669 mutex_lock(&usb_bus_list_lock); 1691 mutex_lock(&usb_bus_list_lock);
1670 usb_disconnect(&hcd->self.root_hub); 1692 usb_disconnect(&hcd->self.root_hub);
1671 mutex_unlock(&usb_bus_list_lock); 1693 mutex_unlock(&usb_bus_list_lock);
1672 1694
1673 hcd->poll_rh = 0;
1674 del_timer_sync(&hcd->rh_timer);
1675
1676 hcd->driver->stop(hcd); 1695 hcd->driver->stop(hcd);
1677 hcd->state = HC_STATE_HALT; 1696 hcd->state = HC_STATE_HALT;
1678 1697
1698 hcd->poll_rh = 0;
1699 del_timer_sync(&hcd->rh_timer);
1700
1679 if (hcd->irq >= 0) 1701 if (hcd->irq >= 0)
1680 free_irq(hcd->irq, hcd); 1702 free_irq(hcd->irq, hcd);
1681 usb_deregister_bus(&hcd->self); 1703 usb_deregister_bus(&hcd->self);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 2a269ca20517..ef50fa494e47 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -68,6 +68,9 @@ struct usb_hcd {
68 68
69 struct timer_list rh_timer; /* drives root-hub polling */ 69 struct timer_list rh_timer; /* drives root-hub polling */
70 struct urb *status_urb; /* the current status urb */ 70 struct urb *status_urb; /* the current status urb */
71#ifdef CONFIG_PM
72 struct work_struct wakeup_work; /* for remote wakeup */
73#endif
71 74
72 /* 75 /*
73 * hardware info/state 76 * hardware info/state
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b89a98e61323..bde29ab2b504 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -119,8 +119,7 @@ MODULE_PARM_DESC(use_both_schemes,
119 "first one fails"); 119 "first one fails");
120 120
121 121
122#ifdef DEBUG 122static inline char *portspeed(int portstatus)
123static inline char *portspeed (int portstatus)
124{ 123{
125 if (portstatus & (1 << USB_PORT_FEAT_HIGHSPEED)) 124 if (portstatus & (1 << USB_PORT_FEAT_HIGHSPEED))
126 return "480 Mb/s"; 125 return "480 Mb/s";
@@ -129,7 +128,6 @@ static inline char *portspeed (int portstatus)
129 else 128 else
130 return "12 Mb/s"; 129 return "12 Mb/s";
131} 130}
132#endif
133 131
134/* Note that hdev or one of its children must be locked! */ 132/* Note that hdev or one of its children must be locked! */
135static inline struct usb_hub *hdev_to_hub(struct usb_device *hdev) 133static inline struct usb_hub *hdev_to_hub(struct usb_device *hdev)
@@ -1369,11 +1367,15 @@ int usb_new_device(struct usb_device *udev)
1369 } 1367 }
1370#endif 1368#endif
1371 1369
1370 /* export the usbdev device-node for libusb */
1371 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
1372 (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
1373
1372 /* Register the device. The device driver is responsible 1374 /* Register the device. The device driver is responsible
1373 * for adding the device files to usbfs and sysfs and for 1375 * for adding the device files to sysfs and for configuring
1374 * configuring the device. 1376 * the device.
1375 */ 1377 */
1376 err = device_add (&udev->dev); 1378 err = device_add(&udev->dev);
1377 if (err) { 1379 if (err) {
1378 dev_err(&udev->dev, "can't device_add, error %d\n", err); 1380 dev_err(&udev->dev, "can't device_add, error %d\n", err);
1379 goto fail; 1381 goto fail;
@@ -1857,12 +1859,8 @@ static int remote_wakeup(struct usb_device *udev)
1857 usb_lock_device(udev); 1859 usb_lock_device(udev);
1858 if (udev->state == USB_STATE_SUSPENDED) { 1860 if (udev->state == USB_STATE_SUSPENDED) {
1859 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); 1861 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
1860 status = usb_autoresume_device(udev); 1862 usb_mark_last_busy(udev);
1861 1863 status = usb_external_resume_device(udev);
1862 /* Give the interface drivers a chance to do something,
1863 * then autosuspend the device again. */
1864 if (status == 0)
1865 usb_autosuspend_device(udev);
1866 } 1864 }
1867 usb_unlock_device(udev); 1865 usb_unlock_device(udev);
1868 return status; 1866 return status;
@@ -1986,13 +1984,6 @@ static inline int remote_wakeup(struct usb_device *udev)
1986#define hub_resume NULL 1984#define hub_resume NULL
1987#endif 1985#endif
1988 1986
1989void usb_resume_root_hub(struct usb_device *hdev)
1990{
1991 struct usb_hub *hub = hdev_to_hub(hdev);
1992
1993 kick_khubd(hub);
1994}
1995
1996 1987
1997/* USB 2.0 spec, 7.1.7.3 / fig 7-29: 1988/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
1998 * 1989 *
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 11dad22da41c..cddfc62c4611 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -662,7 +662,7 @@ static void usbfs_add_device(struct usb_device *dev)
662 sprintf (name, "%03d", dev->devnum); 662 sprintf (name, "%03d", dev->devnum);
663 dev->usbfs_dentry = fs_create_file (name, devmode | S_IFREG, 663 dev->usbfs_dentry = fs_create_file (name, devmode | S_IFREG,
664 dev->bus->usbfs_dentry, dev, 664 dev->bus->usbfs_dentry, dev,
665 &usbfs_device_file_operations, 665 &usbdev_file_operations,
666 devuid, devgid); 666 devuid, devgid);
667 if (dev->usbfs_dentry == NULL) { 667 if (dev->usbfs_dentry == NULL) {
668 err ("error creating usbfs device entry"); 668 err ("error creating usbfs device entry");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 217a3d6d0a06..b7434787db5f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -412,10 +412,24 @@ int usb_sg_init (
412 io->urbs [i]->status = -EINPROGRESS; 412 io->urbs [i]->status = -EINPROGRESS;
413 io->urbs [i]->actual_length = 0; 413 io->urbs [i]->actual_length = 0;
414 414
415 /*
416 * Some systems need to revert to PIO when DMA is temporarily
417 * unavailable. For their sakes, both transfer_buffer and
418 * transfer_dma are set when possible. However this can only
419 * work on systems without HIGHMEM, since DMA buffers located
420 * in high memory are not directly addressable by the CPU for
421 * PIO ... so when HIGHMEM is in use, transfer_buffer is NULL
422 * to prevent stale pointers and to help spot bugs.
423 */
415 if (dma) { 424 if (dma) {
416 /* hc may use _only_ transfer_dma */
417 io->urbs [i]->transfer_dma = sg_dma_address (sg + i); 425 io->urbs [i]->transfer_dma = sg_dma_address (sg + i);
418 len = sg_dma_len (sg + i); 426 len = sg_dma_len (sg + i);
427#ifdef CONFIG_HIGHMEM
428 io->urbs[i]->transfer_buffer = NULL;
429#else
430 io->urbs[i]->transfer_buffer =
431 page_address(sg[i].page) + sg[i].offset;
432#endif
419 } else { 433 } else {
420 /* hc may use _only_ transfer_buffer */ 434 /* hc may use _only_ transfer_buffer */
421 io->urbs [i]->transfer_buffer = 435 io->urbs [i]->transfer_buffer =
@@ -1305,7 +1319,7 @@ int usb_reset_configuration(struct usb_device *dev)
1305 return 0; 1319 return 0;
1306} 1320}
1307 1321
1308static void release_interface(struct device *dev) 1322void usb_release_interface(struct device *dev)
1309{ 1323{
1310 struct usb_interface *intf = to_usb_interface(dev); 1324 struct usb_interface *intf = to_usb_interface(dev);
1311 struct usb_interface_cache *intfc = 1325 struct usb_interface_cache *intfc =
@@ -1315,6 +1329,67 @@ static void release_interface(struct device *dev)
1315 kfree(intf); 1329 kfree(intf);
1316} 1330}
1317 1331
1332#ifdef CONFIG_HOTPLUG
1333static int usb_if_uevent(struct device *dev, char **envp, int num_envp,
1334 char *buffer, int buffer_size)
1335{
1336 struct usb_device *usb_dev;
1337 struct usb_interface *intf;
1338 struct usb_host_interface *alt;
1339 int i = 0;
1340 int length = 0;
1341
1342 if (!dev)
1343 return -ENODEV;
1344
1345 /* driver is often null here; dev_dbg() would oops */
1346 pr_debug ("usb %s: uevent\n", dev->bus_id);
1347
1348 intf = to_usb_interface(dev);
1349 usb_dev = interface_to_usbdev(intf);
1350 alt = intf->cur_altsetting;
1351
1352 if (add_uevent_var(envp, num_envp, &i,
1353 buffer, buffer_size, &length,
1354 "INTERFACE=%d/%d/%d",
1355 alt->desc.bInterfaceClass,
1356 alt->desc.bInterfaceSubClass,
1357 alt->desc.bInterfaceProtocol))
1358 return -ENOMEM;
1359
1360 if (add_uevent_var(envp, num_envp, &i,
1361 buffer, buffer_size, &length,
1362 "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X",
1363 le16_to_cpu(usb_dev->descriptor.idVendor),
1364 le16_to_cpu(usb_dev->descriptor.idProduct),
1365 le16_to_cpu(usb_dev->descriptor.bcdDevice),
1366 usb_dev->descriptor.bDeviceClass,
1367 usb_dev->descriptor.bDeviceSubClass,
1368 usb_dev->descriptor.bDeviceProtocol,
1369 alt->desc.bInterfaceClass,
1370 alt->desc.bInterfaceSubClass,
1371 alt->desc.bInterfaceProtocol))
1372 return -ENOMEM;
1373
1374 envp[i] = NULL;
1375 return 0;
1376}
1377
1378#else
1379
1380static int usb_if_uevent(struct device *dev, char **envp,
1381 int num_envp, char *buffer, int buffer_size)
1382{
1383 return -ENODEV;
1384}
1385#endif /* CONFIG_HOTPLUG */
1386
1387struct device_type usb_if_device_type = {
1388 .name = "usb_interface",
1389 .release = usb_release_interface,
1390 .uevent = usb_if_uevent,
1391};
1392
1318/* 1393/*
1319 * usb_set_configuration - Makes a particular device setting be current 1394 * usb_set_configuration - Makes a particular device setting be current
1320 * @dev: the device whose configuration is being updated 1395 * @dev: the device whose configuration is being updated
@@ -1349,7 +1424,7 @@ static void release_interface(struct device *dev)
1349 * 1424 *
1350 * This call is synchronous. The calling context must be able to sleep, 1425 * This call is synchronous. The calling context must be able to sleep,
1351 * must own the device lock, and must not hold the driver model's USB 1426 * must own the device lock, and must not hold the driver model's USB
1352 * bus rwsem; usb device driver probe() methods cannot use this routine. 1427 * bus mutex; usb device driver probe() methods cannot use this routine.
1353 * 1428 *
1354 * Returns zero on success, or else the status code returned by the 1429 * Returns zero on success, or else the status code returned by the
1355 * underlying call that failed. On successful completion, each interface 1430 * underlying call that failed. On successful completion, each interface
@@ -1478,8 +1553,8 @@ free_interfaces:
1478 intf->dev.parent = &dev->dev; 1553 intf->dev.parent = &dev->dev;
1479 intf->dev.driver = NULL; 1554 intf->dev.driver = NULL;
1480 intf->dev.bus = &usb_bus_type; 1555 intf->dev.bus = &usb_bus_type;
1556 intf->dev.type = &usb_if_device_type;
1481 intf->dev.dma_mask = dev->dev.dma_mask; 1557 intf->dev.dma_mask = dev->dev.dma_mask;
1482 intf->dev.release = release_interface;
1483 device_initialize (&intf->dev); 1558 device_initialize (&intf->dev);
1484 mark_quiesced(intf); 1559 mark_quiesced(intf);
1485 sprintf (&intf->dev.bus_id[0], "%d-%s:%d.%d", 1560 sprintf (&intf->dev.bus_id[0], "%d-%s:%d.%d",
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f08ec85a6d64..739f520908aa 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -42,7 +42,7 @@ static void usb_autosuspend_quirk(struct usb_device *udev)
42{ 42{
43#ifdef CONFIG_USB_SUSPEND 43#ifdef CONFIG_USB_SUSPEND
44 /* disable autosuspend, but allow the user to re-enable it via sysfs */ 44 /* disable autosuspend, but allow the user to re-enable it via sysfs */
45 udev->autosuspend_delay = 0; 45 udev->autosuspend_disabled = 1;
46#endif 46#endif
47} 47}
48 48
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 311d5df80386..e7c982377488 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -11,6 +11,7 @@
11 11
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/string.h>
14#include <linux/usb.h> 15#include <linux/usb.h>
15#include "usb.h" 16#include "usb.h"
16 17
@@ -117,6 +118,16 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
117static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); 118static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL);
118 119
119static ssize_t 120static ssize_t
121show_busnum(struct device *dev, struct device_attribute *attr, char *buf)
122{
123 struct usb_device *udev;
124
125 udev = to_usb_device(dev);
126 return sprintf(buf, "%d\n", udev->bus->busnum);
127}
128static DEVICE_ATTR(busnum, S_IRUGO, show_busnum, NULL);
129
130static ssize_t
120show_devnum(struct device *dev, struct device_attribute *attr, char *buf) 131show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
121{ 132{
122 struct usb_device *udev; 133 struct usb_device *udev;
@@ -165,7 +176,7 @@ show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf)
165{ 176{
166 struct usb_device *udev = to_usb_device(dev); 177 struct usb_device *udev = to_usb_device(dev);
167 178
168 return sprintf(buf, "%u\n", udev->autosuspend_delay / HZ); 179 return sprintf(buf, "%d\n", udev->autosuspend_delay / HZ);
169} 180}
170 181
171static ssize_t 182static ssize_t
@@ -173,39 +184,115 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
173 const char *buf, size_t count) 184 const char *buf, size_t count)
174{ 185{
175 struct usb_device *udev = to_usb_device(dev); 186 struct usb_device *udev = to_usb_device(dev);
176 unsigned value, old; 187 int value;
177 188
178 if (sscanf(buf, "%u", &value) != 1 || value >= INT_MAX/HZ) 189 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
190 value <= - INT_MAX/HZ)
179 return -EINVAL; 191 return -EINVAL;
180 value *= HZ; 192 value *= HZ;
181 193
182 old = udev->autosuspend_delay;
183 udev->autosuspend_delay = value; 194 udev->autosuspend_delay = value;
184 if (value > 0 && old == 0) 195 if (value >= 0)
185 usb_try_autosuspend_device(udev); 196 usb_try_autosuspend_device(udev);
186 197 else {
198 if (usb_autoresume_device(udev) == 0)
199 usb_autosuspend_device(udev);
200 }
187 return count; 201 return count;
188} 202}
189 203
190static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR, 204static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
191 show_autosuspend, set_autosuspend); 205 show_autosuspend, set_autosuspend);
192 206
207static const char on_string[] = "on";
208static const char auto_string[] = "auto";
209static const char suspend_string[] = "suspend";
210
211static ssize_t
212show_level(struct device *dev, struct device_attribute *attr, char *buf)
213{
214 struct usb_device *udev = to_usb_device(dev);
215 const char *p = auto_string;
216
217 if (udev->state == USB_STATE_SUSPENDED) {
218 if (udev->autoresume_disabled)
219 p = suspend_string;
220 } else {
221 if (udev->autosuspend_disabled)
222 p = on_string;
223 }
224 return sprintf(buf, "%s\n", p);
225}
226
227static ssize_t
228set_level(struct device *dev, struct device_attribute *attr,
229 const char *buf, size_t count)
230{
231 struct usb_device *udev = to_usb_device(dev);
232 int len = count;
233 char *cp;
234 int rc = 0;
235
236 cp = memchr(buf, '\n', count);
237 if (cp)
238 len = cp - buf;
239
240 usb_lock_device(udev);
241
242 /* Setting the flags without calling usb_pm_lock is a subject to
243 * races, but who cares...
244 */
245 if (len == sizeof on_string - 1 &&
246 strncmp(buf, on_string, len) == 0) {
247 udev->autosuspend_disabled = 1;
248 udev->autoresume_disabled = 0;
249 rc = usb_external_resume_device(udev);
250
251 } else if (len == sizeof auto_string - 1 &&
252 strncmp(buf, auto_string, len) == 0) {
253 udev->autosuspend_disabled = 0;
254 udev->autoresume_disabled = 0;
255 rc = usb_external_resume_device(udev);
256
257 } else if (len == sizeof suspend_string - 1 &&
258 strncmp(buf, suspend_string, len) == 0) {
259 udev->autosuspend_disabled = 0;
260 udev->autoresume_disabled = 1;
261 rc = usb_external_suspend_device(udev, PMSG_SUSPEND);
262
263 } else
264 rc = -EINVAL;
265
266 usb_unlock_device(udev);
267 return (rc < 0 ? rc : count);
268}
269
270static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
271
193static char power_group[] = "power"; 272static char power_group[] = "power";
194 273
195static int add_power_attributes(struct device *dev) 274static int add_power_attributes(struct device *dev)
196{ 275{
197 int rc = 0; 276 int rc = 0;
198 277
199 if (is_usb_device(dev)) 278 if (is_usb_device(dev)) {
200 rc = sysfs_add_file_to_group(&dev->kobj, 279 rc = sysfs_add_file_to_group(&dev->kobj,
201 &dev_attr_autosuspend.attr, 280 &dev_attr_autosuspend.attr,
202 power_group); 281 power_group);
282 if (rc == 0)
283 rc = sysfs_add_file_to_group(&dev->kobj,
284 &dev_attr_level.attr,
285 power_group);
286 }
203 return rc; 287 return rc;
204} 288}
205 289
206static void remove_power_attributes(struct device *dev) 290static void remove_power_attributes(struct device *dev)
207{ 291{
208 sysfs_remove_file_from_group(&dev->kobj, 292 sysfs_remove_file_from_group(&dev->kobj,
293 &dev_attr_level.attr,
294 power_group);
295 sysfs_remove_file_from_group(&dev->kobj,
209 &dev_attr_autosuspend.attr, 296 &dev_attr_autosuspend.attr,
210 power_group); 297 power_group);
211} 298}
@@ -270,6 +357,7 @@ static struct attribute *dev_attrs[] = {
270 &dev_attr_bNumConfigurations.attr, 357 &dev_attr_bNumConfigurations.attr,
271 &dev_attr_bMaxPacketSize0.attr, 358 &dev_attr_bMaxPacketSize0.attr,
272 &dev_attr_speed.attr, 359 &dev_attr_speed.attr,
360 &dev_attr_busnum.attr,
273 &dev_attr_devnum.attr, 361 &dev_attr_devnum.attr,
274 &dev_attr_version.attr, 362 &dev_attr_version.attr,
275 &dev_attr_maxchild.attr, 363 &dev_attr_maxchild.attr,
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 54b42ce311c1..dfd1b5c87ca3 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,12 +49,13 @@ const char *usbcore_name = "usbcore";
49 49
50static int nousb; /* Disable USB when built into kernel image */ 50static int nousb; /* Disable USB when built into kernel image */
51 51
52struct workqueue_struct *ksuspend_usb_wq; /* For autosuspend */ 52/* Workqueue for autosuspend and for remote wakeup of root hubs */
53struct workqueue_struct *ksuspend_usb_wq;
53 54
54#ifdef CONFIG_USB_SUSPEND 55#ifdef CONFIG_USB_SUSPEND
55static int usb_autosuspend_delay = 2; /* Default delay value, 56static int usb_autosuspend_delay = 2; /* Default delay value,
56 * in seconds */ 57 * in seconds */
57module_param_named(autosuspend, usb_autosuspend_delay, uint, 0644); 58module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
58MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); 59MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
59 60
60#else 61#else
@@ -196,6 +197,11 @@ static void usb_release_dev(struct device *dev)
196 kfree(udev); 197 kfree(udev);
197} 198}
198 199
200struct device_type usb_device_type = {
201 .name = "usb_device",
202 .release = usb_release_dev,
203};
204
199#ifdef CONFIG_PM 205#ifdef CONFIG_PM
200 206
201static int ksuspend_usb_init(void) 207static int ksuspend_usb_init(void)
@@ -211,27 +217,6 @@ static void ksuspend_usb_cleanup(void)
211 destroy_workqueue(ksuspend_usb_wq); 217 destroy_workqueue(ksuspend_usb_wq);
212} 218}
213 219
214#ifdef CONFIG_USB_SUSPEND
215
216/* usb_autosuspend_work - callback routine to autosuspend a USB device */
217static void usb_autosuspend_work(struct work_struct *work)
218{
219 struct usb_device *udev =
220 container_of(work, struct usb_device, autosuspend.work);
221
222 usb_pm_lock(udev);
223 udev->auto_pm = 1;
224 usb_suspend_both(udev, PMSG_SUSPEND);
225 usb_pm_unlock(udev);
226}
227
228#else
229
230static void usb_autosuspend_work(struct work_struct *work)
231{}
232
233#endif /* CONFIG_USB_SUSPEND */
234
235#else 220#else
236 221
237#define ksuspend_usb_init() 0 222#define ksuspend_usb_init() 0
@@ -267,13 +252,10 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
267 252
268 device_initialize(&dev->dev); 253 device_initialize(&dev->dev);
269 dev->dev.bus = &usb_bus_type; 254 dev->dev.bus = &usb_bus_type;
255 dev->dev.type = &usb_device_type;
270 dev->dev.dma_mask = bus->controller->dma_mask; 256 dev->dev.dma_mask = bus->controller->dma_mask;
271 dev->dev.release = usb_release_dev;
272 dev->state = USB_STATE_ATTACHED; 257 dev->state = USB_STATE_ATTACHED;
273 258
274 /* This magic assignment distinguishes devices from interfaces */
275 dev->dev.platform_data = &usb_generic_driver;
276
277 INIT_LIST_HEAD(&dev->ep0.urb_list); 259 INIT_LIST_HEAD(&dev->ep0.urb_list);
278 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; 260 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
279 dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; 261 dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
@@ -902,9 +884,9 @@ static int __init usb_init(void)
902 retval = usb_register(&usbfs_driver); 884 retval = usb_register(&usbfs_driver);
903 if (retval) 885 if (retval)
904 goto driver_register_failed; 886 goto driver_register_failed;
905 retval = usbdev_init(); 887 retval = usb_devio_init();
906 if (retval) 888 if (retval)
907 goto usbdevice_init_failed; 889 goto usb_devio_init_failed;
908 retval = usbfs_init(); 890 retval = usbfs_init();
909 if (retval) 891 if (retval)
910 goto fs_init_failed; 892 goto fs_init_failed;
@@ -919,8 +901,8 @@ static int __init usb_init(void)
919hub_init_failed: 901hub_init_failed:
920 usbfs_cleanup(); 902 usbfs_cleanup();
921fs_init_failed: 903fs_init_failed:
922 usbdev_cleanup(); 904 usb_devio_cleanup();
923usbdevice_init_failed: 905usb_devio_init_failed:
924 usb_deregister(&usbfs_driver); 906 usb_deregister(&usbfs_driver);
925driver_register_failed: 907driver_register_failed:
926 usb_major_cleanup(); 908 usb_major_cleanup();
@@ -947,7 +929,7 @@ static void __exit usb_exit(void)
947 usb_major_cleanup(); 929 usb_major_cleanup();
948 usbfs_cleanup(); 930 usbfs_cleanup();
949 usb_deregister(&usbfs_driver); 931 usb_deregister(&usbfs_driver);
950 usbdev_cleanup(); 932 usb_devio_cleanup();
951 usb_hub_cleanup(); 933 usb_hub_cleanup();
952 usb_host_cleanup(); 934 usb_host_cleanup();
953 bus_unregister(&usb_bus_type); 935 bus_unregister(&usb_bus_type);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 08b5a04e3755..bf2eb0dae2ec 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -21,7 +21,6 @@ extern char *usb_cache_string(struct usb_device *udev, int index);
21extern int usb_set_configuration(struct usb_device *dev, int configuration); 21extern int usb_set_configuration(struct usb_device *dev, int configuration);
22 22
23extern void usb_kick_khubd(struct usb_device *dev); 23extern void usb_kick_khubd(struct usb_device *dev);
24extern void usb_resume_root_hub(struct usb_device *dev);
25extern int usb_match_device(struct usb_device *dev, 24extern int usb_match_device(struct usb_device *dev,
26 const struct usb_device_id *id); 25 const struct usb_device_id *id);
27 26
@@ -34,10 +33,12 @@ extern void usb_host_cleanup(void);
34 33
35#ifdef CONFIG_PM 34#ifdef CONFIG_PM
36 35
37extern int usb_suspend_both(struct usb_device *udev, pm_message_t msg); 36extern void usb_autosuspend_work(struct work_struct *work);
38extern int usb_resume_both(struct usb_device *udev);
39extern int usb_port_suspend(struct usb_device *dev); 37extern int usb_port_suspend(struct usb_device *dev);
40extern int usb_port_resume(struct usb_device *dev); 38extern int usb_port_resume(struct usb_device *dev);
39extern int usb_external_suspend_device(struct usb_device *udev,
40 pm_message_t msg);
41extern int usb_external_resume_device(struct usb_device *udev);
41 42
42static inline void usb_pm_lock(struct usb_device *udev) 43static inline void usb_pm_lock(struct usb_device *udev)
43{ 44{
@@ -51,11 +52,6 @@ static inline void usb_pm_unlock(struct usb_device *udev)
51 52
52#else 53#else
53 54
54#define usb_suspend_both(udev, msg) 0
55static inline int usb_resume_both(struct usb_device *udev)
56{
57 return 0;
58}
59#define usb_port_suspend(dev) 0 55#define usb_port_suspend(dev) 0
60#define usb_port_resume(dev) 0 56#define usb_port_resume(dev) 0
61static inline void usb_pm_lock(struct usb_device *udev) {} 57static inline void usb_pm_lock(struct usb_device *udev) {}
@@ -82,15 +78,13 @@ static inline int usb_autoresume_device(struct usb_device *udev)
82 78
83extern struct workqueue_struct *ksuspend_usb_wq; 79extern struct workqueue_struct *ksuspend_usb_wq;
84extern struct bus_type usb_bus_type; 80extern struct bus_type usb_bus_type;
81extern struct device_type usb_device_type;
82extern struct device_type usb_if_device_type;
85extern struct usb_device_driver usb_generic_driver; 83extern struct usb_device_driver usb_generic_driver;
86 84
87/* Here's how we tell apart devices and interfaces. Luckily there's
88 * no such thing as a platform USB device, so we can steal the use
89 * of the platform_data field. */
90
91static inline int is_usb_device(const struct device *dev) 85static inline int is_usb_device(const struct device *dev)
92{ 86{
93 return dev->platform_data == &usb_generic_driver; 87 return dev->type == &usb_device_type;
94} 88}
95 89
96/* Do the same for device drivers and interface drivers. */ 90/* Do the same for device drivers and interface drivers. */
@@ -126,11 +120,11 @@ extern const char *usbcore_name;
126extern struct mutex usbfs_mutex; 120extern struct mutex usbfs_mutex;
127extern struct usb_driver usbfs_driver; 121extern struct usb_driver usbfs_driver;
128extern const struct file_operations usbfs_devices_fops; 122extern const struct file_operations usbfs_devices_fops;
129extern const struct file_operations usbfs_device_file_operations; 123extern const struct file_operations usbdev_file_operations;
130extern void usbfs_conn_disc_event(void); 124extern void usbfs_conn_disc_event(void);
131 125
132extern int usbdev_init(void); 126extern int usb_devio_init(void);
133extern void usbdev_cleanup(void); 127extern void usb_devio_cleanup(void);
134 128
135struct dev_state { 129struct dev_state {
136 struct list_head list; /* state list */ 130 struct list_head list; /* state list */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 4097a86c4b5e..8065f2b53701 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -68,6 +68,27 @@ choice
68 Many controller drivers are platform-specific; these 68 Many controller drivers are platform-specific; these
69 often need board-specific hooks. 69 often need board-specific hooks.
70 70
71config USB_GADGET_FSL_USB2
72 boolean "Freescale Highspeed USB DR Peripheral Controller"
73 depends on MPC834x || PPC_MPC831x
74 select USB_GADGET_DUALSPEED
75 help
76 Some of Freescale PowerPC processors have a High Speed
77 Dual-Role(DR) USB controller, which supports device mode.
78
79 The number of programmable endpoints is different through
80 SOC revisions.
81
82 Say "y" to link the driver statically, or "m" to build a
83 dynamically linked module called "fsl_usb2_udc" and force
84 all gadget drivers to also be dynamically linked.
85
86config USB_FSL_USB2
87 tristate
88 depends on USB_GADGET_FSL_USB2
89 default USB_GADGET
90 select USB_GADGET_SELECTED
91
71config USB_GADGET_NET2280 92config USB_GADGET_NET2280
72 boolean "NetChip 228x" 93 boolean "NetChip 228x"
73 depends on PCI 94 depends on PCI
@@ -370,6 +391,7 @@ config USB_GADGETFS
370 391
371config USB_FILE_STORAGE 392config USB_FILE_STORAGE
372 tristate "File-backed Storage Gadget" 393 tristate "File-backed Storage Gadget"
394 depends on BLOCK
373 help 395 help
374 The File-backed Storage Gadget acts as a USB Mass Storage 396 The File-backed Storage Gadget acts as a USB Mass Storage
375 disk drive. As its storage repository it can use a regular 397 disk drive. As its storage repository it can use a regular
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index e71e086a1cfa..5db19396631c 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GOKU) += goku_udc.o
8obj-$(CONFIG_USB_OMAP) += omap_udc.o 8obj-$(CONFIG_USB_OMAP) += omap_udc.o
9obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o 9obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
10obj-$(CONFIG_USB_AT91) += at91_udc.o 10obj-$(CONFIG_USB_AT91) += at91_udc.o
11obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
11 12
12# 13#
13# USB gadget drivers 14# USB gadget drivers
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 04e6b8508fb6..1dd8b57f4420 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -282,6 +282,9 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
282#define DEV_CONFIG_CDC 282#define DEV_CONFIG_CDC
283#endif 283#endif
284 284
285#ifdef CONFIG_USB_GADGET_FSL_USB2
286#define DEV_CONFIG_CDC
287#endif
285 288
286/* For CDC-incapable hardware, choose the simple cdc subset. 289/* For CDC-incapable hardware, choose the simple cdc subset.
287 * Anything that talks bulk (without notable bugs) can do this. 290 * Anything that talks bulk (without notable bugs) can do this.
@@ -1735,7 +1738,8 @@ enomem:
1735 defer_kevent (dev, WORK_RX_MEMORY); 1738 defer_kevent (dev, WORK_RX_MEMORY);
1736 if (retval) { 1739 if (retval) {
1737 DEBUG (dev, "rx submit --> %d\n", retval); 1740 DEBUG (dev, "rx submit --> %d\n", retval);
1738 dev_kfree_skb_any (skb); 1741 if (skb)
1742 dev_kfree_skb_any(skb);
1739 spin_lock(&dev->req_lock); 1743 spin_lock(&dev->req_lock);
1740 list_add (&req->list, &dev->rx_reqs); 1744 list_add (&req->list, &dev->rx_reqs);
1741 spin_unlock(&dev->req_lock); 1745 spin_unlock(&dev->req_lock);
@@ -1766,7 +1770,6 @@ static void rx_complete (struct usb_ep *ep, struct usb_request *req)
1766 break; 1770 break;
1767 } 1771 }
1768 1772
1769 skb->dev = dev->net;
1770 skb->protocol = eth_type_trans (skb, dev->net); 1773 skb->protocol = eth_type_trans (skb, dev->net);
1771 dev->stats.rx_packets++; 1774 dev->stats.rx_packets++;
1772 dev->stats.rx_bytes += skb->len; 1775 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
new file mode 100644
index 000000000000..157054ea3978
--- /dev/null
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -0,0 +1,2500 @@
1/*
2 * Copyright (C) 2004-2007 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Author: Li Yang <leoli@freescale.com>
5 * Jiang Bo <tanya.jiang@freescale.com>
6 *
7 * Description:
8 * Freescale high-speed USB SOC DR module device controller driver.
9 * This can be found on MPC8349E/MPC8313E cpus.
10 * The driver is previously named as mpc_udc. Based on bare board
11 * code from Dave Liu and Shlomi Gridish.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#undef VERBOSE
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/ioport.h>
24#include <linux/types.h>
25#include <linux/errno.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/timer.h>
31#include <linux/list.h>
32#include <linux/interrupt.h>
33#include <linux/proc_fs.h>
34#include <linux/mm.h>
35#include <linux/moduleparam.h>
36#include <linux/device.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb_gadget.h>
39#include <linux/usb/otg.h>
40#include <linux/dma-mapping.h>
41#include <linux/platform_device.h>
42#include <linux/fsl_devices.h>
43#include <linux/dmapool.h>
44
45#include <asm/byteorder.h>
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/system.h>
49#include <asm/unaligned.h>
50#include <asm/dma.h>
51#include <asm/cacheflush.h>
52
53#include "fsl_usb2_udc.h"
54
55#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
56#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
57#define DRIVER_VERSION "Apr 20, 2007"
58
59#define DMA_ADDR_INVALID (~(dma_addr_t)0)
60
61static const char driver_name[] = "fsl-usb2-udc";
62static const char driver_desc[] = DRIVER_DESC;
63
64volatile static struct usb_dr_device *dr_regs = NULL;
65volatile static struct usb_sys_interface *usb_sys_regs = NULL;
66
67/* it is initialized in probe() */
68static struct fsl_udc *udc_controller = NULL;
69
70static const struct usb_endpoint_descriptor
71fsl_ep0_desc = {
72 .bLength = USB_DT_ENDPOINT_SIZE,
73 .bDescriptorType = USB_DT_ENDPOINT,
74 .bEndpointAddress = 0,
75 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
76 .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
77};
78
79static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state);
80static int fsl_udc_resume(struct platform_device *pdev);
81static void fsl_ep_fifo_flush(struct usb_ep *_ep);
82
83#ifdef CONFIG_PPC32
84#define fsl_readl(addr) in_le32(addr)
85#define fsl_writel(addr, val32) out_le32(val32, addr)
86#else
87#define fsl_readl(addr) readl(addr)
88#define fsl_writel(addr, val32) writel(addr, val32)
89#endif
90
91/********************************************************************
92 * Internal Used Function
93********************************************************************/
94/*-----------------------------------------------------------------
95 * done() - retire a request; caller blocked irqs
96 * @status : request status to be set, only works when
97 * request is still in progress.
98 *--------------------------------------------------------------*/
99static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
100{
101 struct fsl_udc *udc = NULL;
102 unsigned char stopped = ep->stopped;
103 struct ep_td_struct *curr_td, *next_td;
104 int j;
105
106 udc = (struct fsl_udc *)ep->udc;
107 /* Removed the req from fsl_ep->queue */
108 list_del_init(&req->queue);
109
110 /* req.status should be set as -EINPROGRESS in ep_queue() */
111 if (req->req.status == -EINPROGRESS)
112 req->req.status = status;
113 else
114 status = req->req.status;
115
116 /* Free dtd for the request */
117 next_td = req->head;
118 for (j = 0; j < req->dtd_count; j++) {
119 curr_td = next_td;
120 if (j != req->dtd_count - 1) {
121 next_td = curr_td->next_td_virt;
122 }
123 dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
124 }
125
126 if (req->mapped) {
127 dma_unmap_single(ep->udc->gadget.dev.parent,
128 req->req.dma, req->req.length,
129 ep_is_in(ep)
130 ? DMA_TO_DEVICE
131 : DMA_FROM_DEVICE);
132 req->req.dma = DMA_ADDR_INVALID;
133 req->mapped = 0;
134 } else
135 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
136 req->req.dma, req->req.length,
137 ep_is_in(ep)
138 ? DMA_TO_DEVICE
139 : DMA_FROM_DEVICE);
140
141 if (status && (status != -ESHUTDOWN))
142 VDBG("complete %s req %p stat %d len %u/%u",
143 ep->ep.name, &req->req, status,
144 req->req.actual, req->req.length);
145
146 ep->stopped = 1;
147
148 spin_unlock(&ep->udc->lock);
149 /* complete() is from gadget layer,
150 * eg fsg->bulk_in_complete() */
151 if (req->req.complete)
152 req->req.complete(&ep->ep, &req->req);
153
154 spin_lock(&ep->udc->lock);
155 ep->stopped = stopped;
156}
157
158/*-----------------------------------------------------------------
159 * nuke(): delete all requests related to this ep
160 * called with spinlock held
161 *--------------------------------------------------------------*/
162static void nuke(struct fsl_ep *ep, int status)
163{
164 ep->stopped = 1;
165
166 /* Flush fifo */
167 fsl_ep_fifo_flush(&ep->ep);
168
169 /* Whether this eq has request linked */
170 while (!list_empty(&ep->queue)) {
171 struct fsl_req *req = NULL;
172
173 req = list_entry(ep->queue.next, struct fsl_req, queue);
174 done(ep, req, status);
175 }
176}
177
178/*------------------------------------------------------------------
179 Internal Hardware related function
180 ------------------------------------------------------------------*/
181
182static int dr_controller_setup(struct fsl_udc *udc)
183{
184 unsigned int tmp = 0, portctrl = 0, ctrl = 0;
185 unsigned long timeout;
186#define FSL_UDC_RESET_TIMEOUT 1000
187
188 /* before here, make sure dr_regs has been initialized */
189 if (!udc)
190 return -EINVAL;
191
192 /* Stop and reset the usb controller */
193 tmp = fsl_readl(&dr_regs->usbcmd);
194 tmp &= ~USB_CMD_RUN_STOP;
195 fsl_writel(tmp, &dr_regs->usbcmd);
196
197 tmp = fsl_readl(&dr_regs->usbcmd);
198 tmp |= USB_CMD_CTRL_RESET;
199 fsl_writel(tmp, &dr_regs->usbcmd);
200
201 /* Wait for reset to complete */
202 timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
203 while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
204 if (time_after(jiffies, timeout)) {
205 ERR("udc reset timeout! \n");
206 return -ETIMEDOUT;
207 }
208 cpu_relax();
209 }
210
211 /* Set the controller as device mode */
212 tmp = fsl_readl(&dr_regs->usbmode);
213 tmp |= USB_MODE_CTRL_MODE_DEVICE;
214 /* Disable Setup Lockout */
215 tmp |= USB_MODE_SETUP_LOCK_OFF;
216 fsl_writel(tmp, &dr_regs->usbmode);
217
218 /* Clear the setup status */
219 fsl_writel(0, &dr_regs->usbsts);
220
221 tmp = udc->ep_qh_dma;
222 tmp &= USB_EP_LIST_ADDRESS_MASK;
223 fsl_writel(tmp, &dr_regs->endpointlistaddr);
224
225 VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
226 (int)udc->ep_qh, (int)tmp,
227 fsl_readl(&dr_regs->endpointlistaddr));
228
229 /* Config PHY interface */
230 portctrl = fsl_readl(&dr_regs->portsc1);
231 portctrl &= ~PORTSCX_PHY_TYPE_SEL;
232 switch (udc->phy_mode) {
233 case FSL_USB2_PHY_ULPI:
234 portctrl |= PORTSCX_PTS_ULPI;
235 break;
236 case FSL_USB2_PHY_UTMI:
237 case FSL_USB2_PHY_UTMI_WIDE:
238 portctrl |= PORTSCX_PTS_UTMI;
239 break;
240 case FSL_USB2_PHY_SERIAL:
241 portctrl |= PORTSCX_PTS_FSLS;
242 break;
243 default:
244 return -EINVAL;
245 }
246 fsl_writel(portctrl, &dr_regs->portsc1);
247
248 /* Config control enable i/o output, cpu endian register */
249 ctrl = __raw_readl(&usb_sys_regs->control);
250 ctrl |= USB_CTRL_IOENB;
251 __raw_writel(ctrl, &usb_sys_regs->control);
252
253#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
254 /* Turn on cache snooping hardware, since some PowerPC platforms
255 * wholly rely on hardware to deal with cache coherent. */
256
257 /* Setup Snooping for all the 4GB space */
258 tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */
259 __raw_writel(tmp, &usb_sys_regs->snoop1);
260 tmp |= 0x80000000; /* starts from 0x8000000, size 2G */
261 __raw_writel(tmp, &usb_sys_regs->snoop2);
262#endif
263
264 return 0;
265}
266
267/* Enable DR irq and set controller to run state */
268static void dr_controller_run(struct fsl_udc *udc)
269{
270 u32 temp;
271
272 /* Enable DR irq reg */
273 temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
274 | USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
275 | USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
276
277 fsl_writel(temp, &dr_regs->usbintr);
278
279 /* Clear stopped bit */
280 udc->stopped = 0;
281
282 /* Set the controller as device mode */
283 temp = fsl_readl(&dr_regs->usbmode);
284 temp |= USB_MODE_CTRL_MODE_DEVICE;
285 fsl_writel(temp, &dr_regs->usbmode);
286
287 /* Set controller to Run */
288 temp = fsl_readl(&dr_regs->usbcmd);
289 temp |= USB_CMD_RUN_STOP;
290 fsl_writel(temp, &dr_regs->usbcmd);
291
292 return;
293}
294
295static void dr_controller_stop(struct fsl_udc *udc)
296{
297 unsigned int tmp;
298
299 /* disable all INTR */
300 fsl_writel(0, &dr_regs->usbintr);
301
302 /* Set stopped bit for isr */
303 udc->stopped = 1;
304
305 /* disable IO output */
306/* usb_sys_regs->control = 0; */
307
308 /* set controller to Stop */
309 tmp = fsl_readl(&dr_regs->usbcmd);
310 tmp &= ~USB_CMD_RUN_STOP;
311 fsl_writel(tmp, &dr_regs->usbcmd);
312
313 return;
314}
315
316void dr_ep_setup(unsigned char ep_num, unsigned char dir, unsigned char ep_type)
317{
318 unsigned int tmp_epctrl = 0;
319
320 tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
321 if (dir) {
322 if (ep_num)
323 tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
324 tmp_epctrl |= EPCTRL_TX_ENABLE;
325 tmp_epctrl |= ((unsigned int)(ep_type)
326 << EPCTRL_TX_EP_TYPE_SHIFT);
327 } else {
328 if (ep_num)
329 tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
330 tmp_epctrl |= EPCTRL_RX_ENABLE;
331 tmp_epctrl |= ((unsigned int)(ep_type)
332 << EPCTRL_RX_EP_TYPE_SHIFT);
333 }
334
335 fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
336}
337
338static void
339dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
340{
341 u32 tmp_epctrl = 0;
342
343 tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
344
345 if (value) {
346 /* set the stall bit */
347 if (dir)
348 tmp_epctrl |= EPCTRL_TX_EP_STALL;
349 else
350 tmp_epctrl |= EPCTRL_RX_EP_STALL;
351 } else {
352 /* clear the stall bit and reset data toggle */
353 if (dir) {
354 tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
355 tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
356 } else {
357 tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
358 tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
359 }
360 }
361 fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
362}
363
364/* Get stall status of a specific ep
365 Return: 0: not stalled; 1:stalled */
366static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
367{
368 u32 epctrl;
369
370 epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
371 if (dir)
372 return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
373 else
374 return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
375}
376
377/********************************************************************
378 Internal Structure Build up functions
379********************************************************************/
380
381/*------------------------------------------------------------------
382* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
383 * @zlt: Zero Length Termination Select (1: disable; 0: enable)
384 * @mult: Mult field
385 ------------------------------------------------------------------*/
386static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
387 unsigned char dir, unsigned char ep_type,
388 unsigned int max_pkt_len,
389 unsigned int zlt, unsigned char mult)
390{
391 struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
392 unsigned int tmp = 0;
393
394 /* set the Endpoint Capabilites in QH */
395 switch (ep_type) {
396 case USB_ENDPOINT_XFER_CONTROL:
397 /* Interrupt On Setup (IOS). for control ep */
398 tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
399 | EP_QUEUE_HEAD_IOS;
400 break;
401 case USB_ENDPOINT_XFER_ISOC:
402 tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
403 | (mult << EP_QUEUE_HEAD_MULT_POS);
404 break;
405 case USB_ENDPOINT_XFER_BULK:
406 case USB_ENDPOINT_XFER_INT:
407 tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
408 break;
409 default:
410 VDBG("error ep type is %d", ep_type);
411 return;
412 }
413 if (zlt)
414 tmp |= EP_QUEUE_HEAD_ZLT_SEL;
415 p_QH->max_pkt_length = cpu_to_le32(tmp);
416
417 return;
418}
419
420/* Setup qh structure and ep register for ep0. */
421static void ep0_setup(struct fsl_udc *udc)
422{
423 /* the intialization of an ep includes: fields in QH, Regs,
424 * fsl_ep struct */
425 struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
426 USB_MAX_CTRL_PAYLOAD, 0, 0);
427 struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
428 USB_MAX_CTRL_PAYLOAD, 0, 0);
429 dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
430 dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
431
432 return;
433
434}
435
436/***********************************************************************
437 Endpoint Management Functions
438***********************************************************************/
439
440/*-------------------------------------------------------------------------
441 * when configurations are set, or when interface settings change
442 * for example the do_set_interface() in gadget layer,
443 * the driver will enable or disable the relevant endpoints
444 * ep0 doesn't use this routine. It is always enabled.
445-------------------------------------------------------------------------*/
446static int fsl_ep_enable(struct usb_ep *_ep,
447 const struct usb_endpoint_descriptor *desc)
448{
449 struct fsl_udc *udc = NULL;
450 struct fsl_ep *ep = NULL;
451 unsigned short max = 0;
452 unsigned char mult = 0, zlt;
453 int retval = -EINVAL;
454 unsigned long flags = 0;
455
456 ep = container_of(_ep, struct fsl_ep, ep);
457
458 /* catch various bogus parameters */
459 if (!_ep || !desc || ep->desc
460 || (desc->bDescriptorType != USB_DT_ENDPOINT))
461 return -EINVAL;
462
463 udc = ep->udc;
464
465 if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
466 return -ESHUTDOWN;
467
468 max = le16_to_cpu(desc->wMaxPacketSize);
469
470 /* Disable automatic zlp generation. Driver is reponsible to indicate
471 * explicitly through req->req.zero. This is needed to enable multi-td
472 * request. */
473 zlt = 1;
474
475 /* Assume the max packet size from gadget is always correct */
476 switch (desc->bmAttributes & 0x03) {
477 case USB_ENDPOINT_XFER_CONTROL:
478 case USB_ENDPOINT_XFER_BULK:
479 case USB_ENDPOINT_XFER_INT:
480 /* mult = 0. Execute N Transactions as demonstrated by
481 * the USB variable length packet protocol where N is
482 * computed using the Maximum Packet Length (dQH) and
483 * the Total Bytes field (dTD) */
484 mult = 0;
485 break;
486 case USB_ENDPOINT_XFER_ISOC:
487 /* Calculate transactions needed for high bandwidth iso */
488 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
489 max = max & 0x8ff; /* bit 0~10 */
490 /* 3 transactions at most */
491 if (mult > 3)
492 goto en_done;
493 break;
494 default:
495 goto en_done;
496 }
497
498 spin_lock_irqsave(&udc->lock, flags);
499 ep->ep.maxpacket = max;
500 ep->desc = desc;
501 ep->stopped = 0;
502
503 /* Controller related setup */
504 /* Init EPx Queue Head (Ep Capabilites field in QH
505 * according to max, zlt, mult) */
506 struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
507 (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
508 ? USB_SEND : USB_RECV),
509 (unsigned char) (desc->bmAttributes
510 & USB_ENDPOINT_XFERTYPE_MASK),
511 max, zlt, mult);
512
513 /* Init endpoint ctrl register */
514 dr_ep_setup((unsigned char) ep_index(ep),
515 (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
516 ? USB_SEND : USB_RECV),
517 (unsigned char) (desc->bmAttributes
518 & USB_ENDPOINT_XFERTYPE_MASK));
519
520 spin_unlock_irqrestore(&udc->lock, flags);
521 retval = 0;
522
523 VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
524 ep->desc->bEndpointAddress & 0x0f,
525 (desc->bEndpointAddress & USB_DIR_IN)
526 ? "in" : "out", max);
527en_done:
528 return retval;
529}
530
531/*---------------------------------------------------------------------
532 * @ep : the ep being unconfigured. May not be ep0
533 * Any pending and uncomplete req will complete with status (-ESHUTDOWN)
534*---------------------------------------------------------------------*/
535static int fsl_ep_disable(struct usb_ep *_ep)
536{
537 struct fsl_udc *udc = NULL;
538 struct fsl_ep *ep = NULL;
539 unsigned long flags = 0;
540 u32 epctrl;
541 int ep_num;
542
543 ep = container_of(_ep, struct fsl_ep, ep);
544 if (!_ep || !ep->desc) {
545 VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
546 return -EINVAL;
547 }
548
549 /* disable ep on controller */
550 ep_num = ep_index(ep);
551 epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
552 if (ep_is_in(ep))
553 epctrl &= ~EPCTRL_TX_ENABLE;
554 else
555 epctrl &= ~EPCTRL_RX_ENABLE;
556 fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
557
558 udc = (struct fsl_udc *)ep->udc;
559 spin_lock_irqsave(&udc->lock, flags);
560
561 /* nuke all pending requests (does flush) */
562 nuke(ep, -ESHUTDOWN);
563
564 ep->desc = 0;
565 ep->stopped = 1;
566 spin_unlock_irqrestore(&udc->lock, flags);
567
568 VDBG("disabled %s OK", _ep->name);
569 return 0;
570}
571
572/*---------------------------------------------------------------------
573 * allocate a request object used by this endpoint
574 * the main operation is to insert the req->queue to the eq->queue
575 * Returns the request, or null if one could not be allocated
576*---------------------------------------------------------------------*/
577static struct usb_request *
578fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
579{
580 struct fsl_req *req = NULL;
581
582 req = kzalloc(sizeof *req, gfp_flags);
583 if (!req)
584 return NULL;
585
586 req->req.dma = DMA_ADDR_INVALID;
587 INIT_LIST_HEAD(&req->queue);
588
589 return &req->req;
590}
591
592static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
593{
594 struct fsl_req *req = NULL;
595
596 req = container_of(_req, struct fsl_req, req);
597
598 if (_req)
599 kfree(req);
600}
601
602/*------------------------------------------------------------------
603 * Allocate an I/O buffer
604*---------------------------------------------------------------------*/
605static void *fsl_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
606 dma_addr_t *dma, gfp_t gfp_flags)
607{
608 struct fsl_ep *ep;
609
610 if (!_ep)
611 return NULL;
612
613 ep = container_of(_ep, struct fsl_ep, ep);
614
615 return dma_alloc_coherent(ep->udc->gadget.dev.parent,
616 bytes, dma, gfp_flags);
617}
618
619/*------------------------------------------------------------------
620 * frees an i/o buffer
621*---------------------------------------------------------------------*/
622static void fsl_free_buffer(struct usb_ep *_ep, void *buf,
623 dma_addr_t dma, unsigned bytes)
624{
625 struct fsl_ep *ep;
626
627 if (!_ep)
628 return NULL;
629
630 ep = container_of(_ep, struct fsl_ep, ep);
631
632 dma_free_coherent(ep->udc->gadget.dev.parent, bytes, buf, dma);
633}
634
635/*-------------------------------------------------------------------------*/
636static int fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
637{
638 int i = ep_index(ep) * 2 + ep_is_in(ep);
639 u32 temp, bitmask, tmp_stat;
640 struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
641
642 /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
643 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
644
645 bitmask = ep_is_in(ep)
646 ? (1 << (ep_index(ep) + 16))
647 : (1 << (ep_index(ep)));
648
649 /* check if the pipe is empty */
650 if (!(list_empty(&ep->queue))) {
651 /* Add td to the end */
652 struct fsl_req *lastreq;
653 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
654 lastreq->tail->next_td_ptr =
655 cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK);
656 /* Read prime bit, if 1 goto done */
657 if (fsl_readl(&dr_regs->endpointprime) & bitmask)
658 goto out;
659
660 do {
661 /* Set ATDTW bit in USBCMD */
662 temp = fsl_readl(&dr_regs->usbcmd);
663 fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
664
665 /* Read correct status bit */
666 tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
667
668 } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
669
670 /* Write ATDTW bit to 0 */
671 temp = fsl_readl(&dr_regs->usbcmd);
672 fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
673
674 if (tmp_stat)
675 goto out;
676 }
677
678 /* Write dQH next pointer and terminate bit to 0 */
679 temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
680 dQH->next_dtd_ptr = cpu_to_le32(temp);
681
682 /* Clear active and halt bit */
683 temp = cpu_to_le32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
684 | EP_QUEUE_HEAD_STATUS_HALT));
685 dQH->size_ioc_int_sts &= temp;
686
687 /* Prime endpoint by writing 1 to ENDPTPRIME */
688 temp = ep_is_in(ep)
689 ? (1 << (ep_index(ep) + 16))
690 : (1 << (ep_index(ep)));
691 fsl_writel(temp, &dr_regs->endpointprime);
692out:
693 return 0;
694}
695
696/* Fill in the dTD structure
697 * @req: request that the transfer belongs to
698 * @length: return actually data length of the dTD
699 * @dma: return dma address of the dTD
700 * @is_last: return flag if it is the last dTD of the request
701 * return: pointer to the built dTD */
702static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
703 dma_addr_t *dma, int *is_last)
704{
705 u32 swap_temp;
706 struct ep_td_struct *dtd;
707
708 /* how big will this transfer be? */
709 *length = min(req->req.length - req->req.actual,
710 (unsigned)EP_MAX_LENGTH_TRANSFER);
711
712 dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
713 if (dtd == NULL)
714 return dtd;
715
716 dtd->td_dma = *dma;
717 /* Clear reserved field */
718 swap_temp = cpu_to_le32(dtd->size_ioc_sts);
719 swap_temp &= ~DTD_RESERVED_FIELDS;
720 dtd->size_ioc_sts = cpu_to_le32(swap_temp);
721
722 /* Init all of buffer page pointers */
723 swap_temp = (u32) (req->req.dma + req->req.actual);
724 dtd->buff_ptr0 = cpu_to_le32(swap_temp);
725 dtd->buff_ptr1 = cpu_to_le32(swap_temp + 0x1000);
726 dtd->buff_ptr2 = cpu_to_le32(swap_temp + 0x2000);
727 dtd->buff_ptr3 = cpu_to_le32(swap_temp + 0x3000);
728 dtd->buff_ptr4 = cpu_to_le32(swap_temp + 0x4000);
729
730 req->req.actual += *length;
731
732 /* zlp is needed if req->req.zero is set */
733 if (req->req.zero) {
734 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
735 *is_last = 1;
736 else
737 *is_last = 0;
738 } else if (req->req.length == req->req.actual)
739 *is_last = 1;
740 else
741 *is_last = 0;
742
743 if ((*is_last) == 0)
744 VDBG("multi-dtd request!\n");
745 /* Fill in the transfer size; set active bit */
746 swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
747
748 /* Enable interrupt for the last dtd of a request */
749 if (*is_last && !req->req.no_interrupt)
750 swap_temp |= DTD_IOC;
751
752 dtd->size_ioc_sts = cpu_to_le32(swap_temp);
753
754 mb();
755
756 VDBG("length = %d address= 0x%x", *length, (int)*dma);
757
758 return dtd;
759}
760
761/* Generate dtd chain for a request */
762static int fsl_req_to_dtd(struct fsl_req *req)
763{
764 unsigned count;
765 int is_last;
766 int is_first =1;
767 struct ep_td_struct *last_dtd = NULL, *dtd;
768 dma_addr_t dma;
769
770 do {
771 dtd = fsl_build_dtd(req, &count, &dma, &is_last);
772 if (dtd == NULL)
773 return -ENOMEM;
774
775 if (is_first) {
776 is_first = 0;
777 req->head = dtd;
778 } else {
779 last_dtd->next_td_ptr = cpu_to_le32(dma);
780 last_dtd->next_td_virt = dtd;
781 }
782 last_dtd = dtd;
783
784 req->dtd_count++;
785 } while (!is_last);
786
787 dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE);
788
789 req->tail = dtd;
790
791 return 0;
792}
793
794/* queues (submits) an I/O request to an endpoint */
795static int
796fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
797{
798 struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
799 struct fsl_req *req = container_of(_req, struct fsl_req, req);
800 struct fsl_udc *udc;
801 unsigned long flags;
802 int is_iso = 0;
803
804 /* catch various bogus parameters */
805 if (!_req || !req->req.complete || !req->req.buf
806 || !list_empty(&req->queue)) {
807 VDBG("%s, bad params\n", __FUNCTION__);
808 return -EINVAL;
809 }
810 if (!_ep || (!ep->desc && ep_index(ep))) {
811 VDBG("%s, bad ep\n", __FUNCTION__);
812 return -EINVAL;
813 }
814 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
815 if (req->req.length > ep->ep.maxpacket)
816 return -EMSGSIZE;
817 is_iso = 1;
818 }
819
820 udc = ep->udc;
821 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
822 return -ESHUTDOWN;
823
824 req->ep = ep;
825
826 /* map virtual address to hardware */
827 if (req->req.dma == DMA_ADDR_INVALID) {
828 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
829 req->req.buf,
830 req->req.length, ep_is_in(ep)
831 ? DMA_TO_DEVICE
832 : DMA_FROM_DEVICE);
833 req->mapped = 1;
834 } else {
835 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
836 req->req.dma, req->req.length,
837 ep_is_in(ep)
838 ? DMA_TO_DEVICE
839 : DMA_FROM_DEVICE);
840 req->mapped = 0;
841 }
842
843 req->req.status = -EINPROGRESS;
844 req->req.actual = 0;
845 req->dtd_count = 0;
846
847 spin_lock_irqsave(&udc->lock, flags);
848
849 /* build dtds and push them to device queue */
850 if (!fsl_req_to_dtd(req)) {
851 fsl_queue_td(ep, req);
852 } else {
853 spin_unlock_irqrestore(&udc->lock, flags);
854 return -ENOMEM;
855 }
856
857 /* Update ep0 state */
858 if ((ep_index(ep) == 0))
859 udc->ep0_state = DATA_STATE_XMIT;
860
861 /* irq handler advances the queue */
862 if (req != NULL)
863 list_add_tail(&req->queue, &ep->queue);
864 spin_unlock_irqrestore(&udc->lock, flags);
865
866 return 0;
867}
868
869/* dequeues (cancels, unlinks) an I/O request from an endpoint */
870static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
871{
872 struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
873 struct fsl_req *req;
874 unsigned long flags;
875 int ep_num, stopped, ret = 0;
876 u32 epctrl;
877
878 if (!_ep || !_req)
879 return -EINVAL;
880
881 spin_lock_irqsave(&ep->udc->lock, flags);
882 stopped = ep->stopped;
883
884 /* Stop the ep before we deal with the queue */
885 ep->stopped = 1;
886 ep_num = ep_index(ep);
887 epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
888 if (ep_is_in(ep))
889 epctrl &= ~EPCTRL_TX_ENABLE;
890 else
891 epctrl &= ~EPCTRL_RX_ENABLE;
892 fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
893
894 /* make sure it's actually queued on this endpoint */
895 list_for_each_entry(req, &ep->queue, queue) {
896 if (&req->req == _req)
897 break;
898 }
899 if (&req->req != _req) {
900 ret = -EINVAL;
901 goto out;
902 }
903
904 /* The request is in progress, or completed but not dequeued */
905 if (ep->queue.next == &req->queue) {
906 _req->status = -ECONNRESET;
907 fsl_ep_fifo_flush(_ep); /* flush current transfer */
908
909 /* The request isn't the last request in this ep queue */
910 if (req->queue.next != &ep->queue) {
911 struct ep_queue_head *qh;
912 struct fsl_req *next_req;
913
914 qh = ep->qh;
915 next_req = list_entry(req->queue.next, struct fsl_req,
916 queue);
917
918 /* Point the QH to the first TD of next request */
919 fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
920 }
921
922 /* The request hasn't been processed, patch up the TD chain */
923 } else {
924 struct fsl_req *prev_req;
925
926 prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
927 fsl_writel(fsl_readl(&req->tail->next_td_ptr),
928 &prev_req->tail->next_td_ptr);
929
930 }
931
932 done(ep, req, -ECONNRESET);
933
934 /* Enable EP */
935out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
936 if (ep_is_in(ep))
937 epctrl |= EPCTRL_TX_ENABLE;
938 else
939 epctrl |= EPCTRL_RX_ENABLE;
940 fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
941 ep->stopped = stopped;
942
943 spin_unlock_irqrestore(&ep->udc->lock, flags);
944 return ret;
945}
946
947/*-------------------------------------------------------------------------*/
948
949/*-----------------------------------------------------------------
950 * modify the endpoint halt feature
951 * @ep: the non-isochronous endpoint being stalled
952 * @value: 1--set halt 0--clear halt
953 * Returns zero, or a negative error code.
954*----------------------------------------------------------------*/
955static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
956{
957 struct fsl_ep *ep = NULL;
958 unsigned long flags = 0;
959 int status = -EOPNOTSUPP; /* operation not supported */
960 unsigned char ep_dir = 0, ep_num = 0;
961 struct fsl_udc *udc = NULL;
962
963 ep = container_of(_ep, struct fsl_ep, ep);
964 udc = ep->udc;
965 if (!_ep || !ep->desc) {
966 status = -EINVAL;
967 goto out;
968 }
969
970 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
971 status = -EOPNOTSUPP;
972 goto out;
973 }
974
975 /* Attempt to halt IN ep will fail if any transfer requests
976 * are still queue */
977 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
978 status = -EAGAIN;
979 goto out;
980 }
981
982 status = 0;
983 ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
984 ep_num = (unsigned char)(ep_index(ep));
985 spin_lock_irqsave(&ep->udc->lock, flags);
986 dr_ep_change_stall(ep_num, ep_dir, value);
987 spin_unlock_irqrestore(&ep->udc->lock, flags);
988
989 if (ep_index(ep) == 0) {
990 udc->ep0_state = WAIT_FOR_SETUP;
991 udc->ep0_dir = 0;
992 }
993out:
994 VDBG(" %s %s halt stat %d", ep->ep.name,
995 value ? "set" : "clear", status);
996
997 return status;
998}
999
1000static void fsl_ep_fifo_flush(struct usb_ep *_ep)
1001{
1002 struct fsl_ep *ep;
1003 int ep_num, ep_dir;
1004 u32 bits;
1005 unsigned long timeout;
1006#define FSL_UDC_FLUSH_TIMEOUT 1000
1007
1008 if (!_ep) {
1009 return;
1010 } else {
1011 ep = container_of(_ep, struct fsl_ep, ep);
1012 if (!ep->desc)
1013 return;
1014 }
1015 ep_num = ep_index(ep);
1016 ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
1017
1018 if (ep_num == 0)
1019 bits = (1 << 16) | 1;
1020 else if (ep_dir == USB_SEND)
1021 bits = 1 << (16 + ep_num);
1022 else
1023 bits = 1 << ep_num;
1024
1025 timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
1026 do {
1027 fsl_writel(bits, &dr_regs->endptflush);
1028
1029 /* Wait until flush complete */
1030 while (fsl_readl(&dr_regs->endptflush)) {
1031 if (time_after(jiffies, timeout)) {
1032 ERR("ep flush timeout\n");
1033 return;
1034 }
1035 cpu_relax();
1036 }
1037 /* See if we need to flush again */
1038 } while (fsl_readl(&dr_regs->endptstatus) & bits);
1039}
1040
1041static struct usb_ep_ops fsl_ep_ops = {
1042 .enable = fsl_ep_enable,
1043 .disable = fsl_ep_disable,
1044
1045 .alloc_request = fsl_alloc_request,
1046 .free_request = fsl_free_request,
1047
1048 .alloc_buffer = fsl_alloc_buffer,
1049 .free_buffer = fsl_free_buffer,
1050
1051 .queue = fsl_ep_queue,
1052 .dequeue = fsl_ep_dequeue,
1053
1054 .set_halt = fsl_ep_set_halt,
1055 .fifo_flush = fsl_ep_fifo_flush, /* flush fifo */
1056};
1057
1058/*-------------------------------------------------------------------------
1059 Gadget Driver Layer Operations
1060-------------------------------------------------------------------------*/
1061
1062/*----------------------------------------------------------------------
1063 * Get the current frame number (from DR frame_index Reg )
1064 *----------------------------------------------------------------------*/
1065static int fsl_get_frame(struct usb_gadget *gadget)
1066{
1067 return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
1068}
1069
1070/*-----------------------------------------------------------------------
1071 * Tries to wake up the host connected to this gadget
1072 -----------------------------------------------------------------------*/
1073static int fsl_wakeup(struct usb_gadget *gadget)
1074{
1075 struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
1076 u32 portsc;
1077
1078 /* Remote wakeup feature not enabled by host */
1079 if (!udc->remote_wakeup)
1080 return -ENOTSUPP;
1081
1082 portsc = fsl_readl(&dr_regs->portsc1);
1083 /* not suspended? */
1084 if (!(portsc & PORTSCX_PORT_SUSPEND))
1085 return 0;
1086 /* trigger force resume */
1087 portsc |= PORTSCX_PORT_FORCE_RESUME;
1088 fsl_writel(portsc, &dr_regs->portsc1);
1089 return 0;
1090}
1091
1092static int can_pullup(struct fsl_udc *udc)
1093{
1094 return udc->driver && udc->softconnect && udc->vbus_active;
1095}
1096
1097/* Notify controller that VBUS is powered, Called by whatever
1098 detects VBUS sessions */
1099static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
1100{
1101 struct fsl_udc *udc;
1102 unsigned long flags;
1103
1104 udc = container_of(gadget, struct fsl_udc, gadget);
1105 spin_lock_irqsave(&udc->lock, flags);
1106 VDBG("VBUS %s\n", is_active ? "on" : "off");
1107 udc->vbus_active = (is_active != 0);
1108 if (can_pullup(udc))
1109 fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
1110 &dr_regs->usbcmd);
1111 else
1112 fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
1113 &dr_regs->usbcmd);
1114 spin_unlock_irqrestore(&udc->lock, flags);
1115 return 0;
1116}
1117
1118/* constrain controller's VBUS power usage
1119 * This call is used by gadget drivers during SET_CONFIGURATION calls,
1120 * reporting how much power the device may consume. For example, this
1121 * could affect how quickly batteries are recharged.
1122 *
1123 * Returns zero on success, else negative errno.
1124 */
1125static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1126{
1127#ifdef CONFIG_USB_OTG
1128 struct fsl_udc *udc;
1129
1130 udc = container_of(gadget, struct fsl_udc, gadget);
1131
1132 if (udc->transceiver)
1133 return otg_set_power(udc->transceiver, mA);
1134#endif
1135 return -ENOTSUPP;
1136}
1137
1138/* Change Data+ pullup status
1139 * this func is used by usb_gadget_connect/disconnet
1140 */
1141static int fsl_pullup(struct usb_gadget *gadget, int is_on)
1142{
1143 struct fsl_udc *udc;
1144
1145 udc = container_of(gadget, struct fsl_udc, gadget);
1146 udc->softconnect = (is_on != 0);
1147 if (can_pullup(udc))
1148 fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
1149 &dr_regs->usbcmd);
1150 else
1151 fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
1152 &dr_regs->usbcmd);
1153
1154 return 0;
1155}
1156
1157/* defined in usb_gadget.h */
1158static struct usb_gadget_ops fsl_gadget_ops = {
1159 .get_frame = fsl_get_frame,
1160 .wakeup = fsl_wakeup,
1161/* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
1162 .vbus_session = fsl_vbus_session,
1163 .vbus_draw = fsl_vbus_draw,
1164 .pullup = fsl_pullup,
1165};
1166
1167/* Set protocol stall on ep0, protocol stall will automatically be cleared
1168 on new transaction */
1169static void ep0stall(struct fsl_udc *udc)
1170{
1171 u32 tmp;
1172
1173 /* must set tx and rx to stall at the same time */
1174 tmp = fsl_readl(&dr_regs->endptctrl[0]);
1175 tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
1176 fsl_writel(tmp, &dr_regs->endptctrl[0]);
1177 udc->ep0_state = WAIT_FOR_SETUP;
1178 udc->ep0_dir = 0;
1179}
1180
1181/* Prime a status phase for ep0 */
1182static int ep0_prime_status(struct fsl_udc *udc, int direction)
1183{
1184 struct fsl_req *req = udc->status_req;
1185 struct fsl_ep *ep;
1186 int status = 0;
1187
1188 if (direction == EP_DIR_IN)
1189 udc->ep0_dir = USB_DIR_IN;
1190 else
1191 udc->ep0_dir = USB_DIR_OUT;
1192
1193 ep = &udc->eps[0];
1194 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1195
1196 req->ep = ep;
1197 req->req.length = 0;
1198 req->req.status = -EINPROGRESS;
1199 req->req.actual = 0;
1200 req->req.complete = NULL;
1201 req->dtd_count = 0;
1202
1203 if (fsl_req_to_dtd(req) == 0)
1204 status = fsl_queue_td(ep, req);
1205 else
1206 return -ENOMEM;
1207
1208 if (status)
1209 ERR("Can't queue ep0 status request \n");
1210 list_add_tail(&req->queue, &ep->queue);
1211
1212 return status;
1213}
1214
1215static inline int udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
1216{
1217 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
1218
1219 if (!ep->name)
1220 return 0;
1221
1222 nuke(ep, -ESHUTDOWN);
1223
1224 return 0;
1225}
1226
1227/*
1228 * ch9 Set address
1229 */
1230static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
1231{
1232 /* Save the new address to device struct */
1233 udc->device_address = (u8) value;
1234 /* Update usb state */
1235 udc->usb_state = USB_STATE_ADDRESS;
1236 /* Status phase */
1237 if (ep0_prime_status(udc, EP_DIR_IN))
1238 ep0stall(udc);
1239}
1240
1241/*
1242 * ch9 Get status
1243 */
1244static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
1245 u16 index, u16 length)
1246{
1247 u16 tmp = 0; /* Status, cpu endian */
1248
1249 struct fsl_req *req;
1250 struct fsl_ep *ep;
1251 int status = 0;
1252
1253 ep = &udc->eps[0];
1254
1255 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1256 /* Get device status */
1257 tmp = 1 << USB_DEVICE_SELF_POWERED;
1258 tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1259 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1260 /* Get interface status */
1261 /* We don't have interface information in udc driver */
1262 tmp = 0;
1263 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1264 /* Get endpoint status */
1265 struct fsl_ep *target_ep;
1266
1267 target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
1268
1269 /* stall if endpoint doesn't exist */
1270 if (!target_ep->desc)
1271 goto stall;
1272 tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
1273 << USB_ENDPOINT_HALT;
1274 }
1275
1276 udc->ep0_dir = USB_DIR_IN;
1277 /* Borrow the per device status_req */
1278 req = udc->status_req;
1279 /* Fill in the reqest structure */
1280 *((u16 *) req->req.buf) = cpu_to_le16(tmp);
1281 req->ep = ep;
1282 req->req.length = 2;
1283 req->req.status = -EINPROGRESS;
1284 req->req.actual = 0;
1285 req->req.complete = NULL;
1286 req->dtd_count = 0;
1287
1288 /* prime the data phase */
1289 if ((fsl_req_to_dtd(req) == 0))
1290 status = fsl_queue_td(ep, req);
1291 else /* no mem */
1292 goto stall;
1293
1294 if (status) {
1295 ERR("Can't respond to getstatus request \n");
1296 goto stall;
1297 }
1298 list_add_tail(&req->queue, &ep->queue);
1299 udc->ep0_state = DATA_STATE_XMIT;
1300 return;
1301stall:
1302 ep0stall(udc);
1303}
1304
1305static void setup_received_irq(struct fsl_udc *udc,
1306 struct usb_ctrlrequest *setup)
1307{
1308 u16 wValue = le16_to_cpu(setup->wValue);
1309 u16 wIndex = le16_to_cpu(setup->wIndex);
1310 u16 wLength = le16_to_cpu(setup->wLength);
1311
1312 udc_reset_ep_queue(udc, 0);
1313
1314 switch (setup->bRequest) {
1315 /* Request that need Data+Status phase from udc */
1316 case USB_REQ_GET_STATUS:
1317 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_STANDARD))
1318 != (USB_DIR_IN | USB_TYPE_STANDARD))
1319 break;
1320 ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
1321 break;
1322
1323 /* Requests that need Status phase from udc */
1324 case USB_REQ_SET_ADDRESS:
1325 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
1326 | USB_RECIP_DEVICE))
1327 break;
1328 ch9setaddress(udc, wValue, wIndex, wLength);
1329 break;
1330
1331 /* Handled by udc, no data, status by udc */
1332 case USB_REQ_CLEAR_FEATURE:
1333 case USB_REQ_SET_FEATURE:
1334 { /* status transaction */
1335 int rc = -EOPNOTSUPP;
1336
1337 if ((setup->bRequestType & USB_RECIP_MASK)
1338 == USB_RECIP_ENDPOINT) {
1339 int pipe = get_pipe_by_windex(wIndex);
1340 struct fsl_ep *ep;
1341
1342 if (wValue != 0 || wLength != 0 || pipe > udc->max_ep)
1343 break;
1344 ep = get_ep_by_pipe(udc, pipe);
1345
1346 spin_unlock(&udc->lock);
1347 rc = fsl_ep_set_halt(&ep->ep,
1348 (setup->bRequest == USB_REQ_SET_FEATURE)
1349 ? 1 : 0);
1350 spin_lock(&udc->lock);
1351
1352 } else if ((setup->bRequestType & USB_RECIP_MASK)
1353 == USB_RECIP_DEVICE) {
1354 /* Note: The driver has not include OTG support yet.
1355 * This will be set when OTG support is added */
1356 if (!udc->gadget.is_otg)
1357 break;
1358 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
1359 udc->gadget.b_hnp_enable = 1;
1360 else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
1361 udc->gadget.a_hnp_support = 1;
1362 else if (setup->bRequest ==
1363 USB_DEVICE_A_ALT_HNP_SUPPORT)
1364 udc->gadget.a_alt_hnp_support = 1;
1365 rc = 0;
1366 }
1367 if (rc == 0) {
1368 if (ep0_prime_status(udc, EP_DIR_IN))
1369 ep0stall(udc);
1370 }
1371 break;
1372 }
1373 /* Requests handled by gadget */
1374 default:
1375 if (wLength) {
1376 /* Data phase from gadget, status phase from udc */
1377 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1378 ? USB_DIR_IN : USB_DIR_OUT;
1379 spin_unlock(&udc->lock);
1380 if (udc->driver->setup(&udc->gadget,
1381 &udc->local_setup_buff) < 0)
1382 ep0stall(udc);
1383 spin_lock(&udc->lock);
1384 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1385 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1386
1387 } else {
1388 /* No data phase, IN status from gadget */
1389 udc->ep0_dir = USB_DIR_IN;
1390 spin_unlock(&udc->lock);
1391 if (udc->driver->setup(&udc->gadget,
1392 &udc->local_setup_buff) < 0)
1393 ep0stall(udc);
1394 spin_lock(&udc->lock);
1395 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1396 }
1397 break;
1398 }
1399}
1400
1401/* Process request for Data or Status phase of ep0
1402 * prime status phase if needed */
1403static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
1404 struct fsl_req *req)
1405{
1406 if (udc->usb_state == USB_STATE_ADDRESS) {
1407 /* Set the new address */
1408 u32 new_address = (u32) udc->device_address;
1409 fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
1410 &dr_regs->deviceaddr);
1411 }
1412
1413 done(ep0, req, 0);
1414
1415 switch (udc->ep0_state) {
1416 case DATA_STATE_XMIT:
1417 /* receive status phase */
1418 if (ep0_prime_status(udc, EP_DIR_OUT))
1419 ep0stall(udc);
1420 break;
1421 case DATA_STATE_RECV:
1422 /* send status phase */
1423 if (ep0_prime_status(udc, EP_DIR_IN))
1424 ep0stall(udc);
1425 break;
1426 case WAIT_FOR_OUT_STATUS:
1427 udc->ep0_state = WAIT_FOR_SETUP;
1428 break;
1429 case WAIT_FOR_SETUP:
1430 ERR("Unexpect ep0 packets \n");
1431 break;
1432 default:
1433 ep0stall(udc);
1434 break;
1435 }
1436}
1437
1438/* Tripwire mechanism to ensure a setup packet payload is extracted without
1439 * being corrupted by another incoming setup packet */
1440static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
1441{
1442 u32 temp;
1443 struct ep_queue_head *qh;
1444
1445 qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
1446
1447 /* Clear bit in ENDPTSETUPSTAT */
1448 temp = fsl_readl(&dr_regs->endptsetupstat);
1449 fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
1450
1451 /* while a hazard exists when setup package arrives */
1452 do {
1453 /* Set Setup Tripwire */
1454 temp = fsl_readl(&dr_regs->usbcmd);
1455 fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
1456
1457 /* Copy the setup packet to local buffer */
1458 memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
1459 } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
1460
1461 /* Clear Setup Tripwire */
1462 temp = fsl_readl(&dr_regs->usbcmd);
1463 fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
1464}
1465
1466/* process-ep_req(): free the completed Tds for this req */
1467static int process_ep_req(struct fsl_udc *udc, int pipe,
1468 struct fsl_req *curr_req)
1469{
1470 struct ep_td_struct *curr_td;
1471 int td_complete, actual, remaining_length, j, tmp;
1472 int status = 0;
1473 int errors = 0;
1474 struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
1475 int direction = pipe % 2;
1476
1477 curr_td = curr_req->head;
1478 td_complete = 0;
1479 actual = curr_req->req.length;
1480
1481 for (j = 0; j < curr_req->dtd_count; j++) {
1482 remaining_length = (le32_to_cpu(curr_td->size_ioc_sts)
1483 & DTD_PACKET_SIZE)
1484 >> DTD_LENGTH_BIT_POS;
1485 actual -= remaining_length;
1486
1487 if ((errors = le32_to_cpu(curr_td->size_ioc_sts) &
1488 DTD_ERROR_MASK)) {
1489 if (errors & DTD_STATUS_HALTED) {
1490 ERR("dTD error %08x QH=%d\n", errors, pipe);
1491 /* Clear the errors and Halt condition */
1492 tmp = le32_to_cpu(curr_qh->size_ioc_int_sts);
1493 tmp &= ~errors;
1494 curr_qh->size_ioc_int_sts = cpu_to_le32(tmp);
1495 status = -EPIPE;
1496 /* FIXME: continue with next queued TD? */
1497
1498 break;
1499 }
1500 if (errors & DTD_STATUS_DATA_BUFF_ERR) {
1501 VDBG("Transfer overflow");
1502 status = -EPROTO;
1503 break;
1504 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
1505 VDBG("ISO error");
1506 status = -EILSEQ;
1507 break;
1508 } else
1509 ERR("Unknown error has occured (0x%x)!\r\n",
1510 errors);
1511
1512 } else if (le32_to_cpu(curr_td->size_ioc_sts)
1513 & DTD_STATUS_ACTIVE) {
1514 VDBG("Request not complete");
1515 status = REQ_UNCOMPLETE;
1516 return status;
1517 } else if (remaining_length) {
1518 if (direction) {
1519 VDBG("Transmit dTD remaining length not zero");
1520 status = -EPROTO;
1521 break;
1522 } else {
1523 td_complete++;
1524 break;
1525 }
1526 } else {
1527 td_complete++;
1528 VDBG("dTD transmitted successful ");
1529 }
1530
1531 if (j != curr_req->dtd_count - 1)
1532 curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
1533 }
1534
1535 if (status)
1536 return status;
1537
1538 curr_req->req.actual = actual;
1539
1540 return 0;
1541}
1542
1543/* Process a DTD completion interrupt */
1544static void dtd_complete_irq(struct fsl_udc *udc)
1545{
1546 u32 bit_pos;
1547 int i, ep_num, direction, bit_mask, status;
1548 struct fsl_ep *curr_ep;
1549 struct fsl_req *curr_req, *temp_req;
1550
1551 /* Clear the bits in the register */
1552 bit_pos = fsl_readl(&dr_regs->endptcomplete);
1553 fsl_writel(bit_pos, &dr_regs->endptcomplete);
1554
1555 if (!bit_pos)
1556 return;
1557
1558 for (i = 0; i < udc->max_ep * 2; i++) {
1559 ep_num = i >> 1;
1560 direction = i % 2;
1561
1562 bit_mask = 1 << (ep_num + 16 * direction);
1563
1564 if (!(bit_pos & bit_mask))
1565 continue;
1566
1567 curr_ep = get_ep_by_pipe(udc, i);
1568
1569 /* If the ep is configured */
1570 if (curr_ep->name == NULL) {
1571 WARN("Invalid EP?");
1572 continue;
1573 }
1574
1575 /* process the req queue until an uncomplete request */
1576 list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
1577 queue) {
1578 status = process_ep_req(udc, i, curr_req);
1579
1580 VDBG("status of process_ep_req= %d, ep = %d",
1581 status, ep_num);
1582 if (status == REQ_UNCOMPLETE)
1583 break;
1584 /* write back status to req */
1585 curr_req->req.status = status;
1586
1587 if (ep_num == 0) {
1588 ep0_req_complete(udc, curr_ep, curr_req);
1589 break;
1590 } else
1591 done(curr_ep, curr_req, status);
1592 }
1593 }
1594}
1595
1596/* Process a port change interrupt */
1597static void port_change_irq(struct fsl_udc *udc)
1598{
1599 u32 speed;
1600
1601 if (udc->bus_reset)
1602 udc->bus_reset = 0;
1603
1604 /* Bus resetting is finished */
1605 if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
1606 /* Get the speed */
1607 speed = (fsl_readl(&dr_regs->portsc1)
1608 & PORTSCX_PORT_SPEED_MASK);
1609 switch (speed) {
1610 case PORTSCX_PORT_SPEED_HIGH:
1611 udc->gadget.speed = USB_SPEED_HIGH;
1612 break;
1613 case PORTSCX_PORT_SPEED_FULL:
1614 udc->gadget.speed = USB_SPEED_FULL;
1615 break;
1616 case PORTSCX_PORT_SPEED_LOW:
1617 udc->gadget.speed = USB_SPEED_LOW;
1618 break;
1619 default:
1620 udc->gadget.speed = USB_SPEED_UNKNOWN;
1621 break;
1622 }
1623 }
1624
1625 /* Update USB state */
1626 if (!udc->resume_state)
1627 udc->usb_state = USB_STATE_DEFAULT;
1628}
1629
1630/* Process suspend interrupt */
1631static void suspend_irq(struct fsl_udc *udc)
1632{
1633 udc->resume_state = udc->usb_state;
1634 udc->usb_state = USB_STATE_SUSPENDED;
1635
1636 /* report suspend to the driver, serial.c does not support this */
1637 if (udc->driver->suspend)
1638 udc->driver->suspend(&udc->gadget);
1639}
1640
1641static void bus_resume(struct fsl_udc *udc)
1642{
1643 udc->usb_state = udc->resume_state;
1644 udc->resume_state = 0;
1645
1646 /* report resume to the driver, serial.c does not support this */
1647 if (udc->driver->resume)
1648 udc->driver->resume(&udc->gadget);
1649}
1650
1651/* Clear up all ep queues */
1652static int reset_queues(struct fsl_udc *udc)
1653{
1654 u8 pipe;
1655
1656 for (pipe = 0; pipe < udc->max_pipes; pipe++)
1657 udc_reset_ep_queue(udc, pipe);
1658
1659 /* report disconnect; the driver is already quiesced */
1660 udc->driver->disconnect(&udc->gadget);
1661
1662 return 0;
1663}
1664
1665/* Process reset interrupt */
1666static void reset_irq(struct fsl_udc *udc)
1667{
1668 u32 temp;
1669 unsigned long timeout;
1670
1671 /* Clear the device address */
1672 temp = fsl_readl(&dr_regs->deviceaddr);
1673 fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
1674
1675 udc->device_address = 0;
1676
1677 /* Clear usb state */
1678 udc->resume_state = 0;
1679 udc->ep0_dir = 0;
1680 udc->ep0_state = WAIT_FOR_SETUP;
1681 udc->remote_wakeup = 0; /* default to 0 on reset */
1682 udc->gadget.b_hnp_enable = 0;
1683 udc->gadget.a_hnp_support = 0;
1684 udc->gadget.a_alt_hnp_support = 0;
1685
1686 /* Clear all the setup token semaphores */
1687 temp = fsl_readl(&dr_regs->endptsetupstat);
1688 fsl_writel(temp, &dr_regs->endptsetupstat);
1689
1690 /* Clear all the endpoint complete status bits */
1691 temp = fsl_readl(&dr_regs->endptcomplete);
1692 fsl_writel(temp, &dr_regs->endptcomplete);
1693
1694 timeout = jiffies + 100;
1695 while (fsl_readl(&dr_regs->endpointprime)) {
1696 /* Wait until all endptprime bits cleared */
1697 if (time_after(jiffies, timeout)) {
1698 ERR("Timeout for reset\n");
1699 break;
1700 }
1701 cpu_relax();
1702 }
1703
1704 /* Write 1s to the flush register */
1705 fsl_writel(0xffffffff, &dr_regs->endptflush);
1706
1707 if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
1708 VDBG("Bus reset");
1709 /* Bus is reseting */
1710 udc->bus_reset = 1;
1711 /* Reset all the queues, include XD, dTD, EP queue
1712 * head and TR Queue */
1713 reset_queues(udc);
1714 udc->usb_state = USB_STATE_DEFAULT;
1715 } else {
1716 VDBG("Controller reset");
1717 /* initialize usb hw reg except for regs for EP, not
1718 * touch usbintr reg */
1719 dr_controller_setup(udc);
1720
1721 /* Reset all internal used Queues */
1722 reset_queues(udc);
1723
1724 ep0_setup(udc);
1725
1726 /* Enable DR IRQ reg, Set Run bit, change udc state */
1727 dr_controller_run(udc);
1728 udc->usb_state = USB_STATE_ATTACHED;
1729 }
1730}
1731
1732/*
1733 * USB device controller interrupt handler
1734 */
1735static irqreturn_t fsl_udc_irq(int irq, void *_udc)
1736{
1737 struct fsl_udc *udc = _udc;
1738 u32 irq_src;
1739 irqreturn_t status = IRQ_NONE;
1740 unsigned long flags;
1741
1742 /* Disable ISR for OTG host mode */
1743 if (udc->stopped)
1744 return IRQ_NONE;
1745 spin_lock_irqsave(&udc->lock, flags);
1746 irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
1747 /* Clear notification bits */
1748 fsl_writel(irq_src, &dr_regs->usbsts);
1749
1750 /* VDBG("irq_src [0x%8x]", irq_src); */
1751
1752 /* Need to resume? */
1753 if (udc->usb_state == USB_STATE_SUSPENDED)
1754 if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
1755 bus_resume(udc);
1756
1757 /* USB Interrupt */
1758 if (irq_src & USB_STS_INT) {
1759 VDBG("Packet int");
1760 /* Setup package, we only support ep0 as control ep */
1761 if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
1762 tripwire_handler(udc, 0,
1763 (u8 *) (&udc->local_setup_buff));
1764 setup_received_irq(udc, &udc->local_setup_buff);
1765 status = IRQ_HANDLED;
1766 }
1767
1768 /* completion of dtd */
1769 if (fsl_readl(&dr_regs->endptcomplete)) {
1770 dtd_complete_irq(udc);
1771 status = IRQ_HANDLED;
1772 }
1773 }
1774
1775 /* SOF (for ISO transfer) */
1776 if (irq_src & USB_STS_SOF) {
1777 status = IRQ_HANDLED;
1778 }
1779
1780 /* Port Change */
1781 if (irq_src & USB_STS_PORT_CHANGE) {
1782 port_change_irq(udc);
1783 status = IRQ_HANDLED;
1784 }
1785
1786 /* Reset Received */
1787 if (irq_src & USB_STS_RESET) {
1788 reset_irq(udc);
1789 status = IRQ_HANDLED;
1790 }
1791
1792 /* Sleep Enable (Suspend) */
1793 if (irq_src & USB_STS_SUSPEND) {
1794 suspend_irq(udc);
1795 status = IRQ_HANDLED;
1796 }
1797
1798 if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
1799 VDBG("Error IRQ %x ", irq_src);
1800 }
1801
1802 spin_unlock_irqrestore(&udc->lock, flags);
1803 return status;
1804}
1805
1806/*----------------------------------------------------------------*
1807 * Hook to gadget drivers
1808 * Called by initialization code of gadget drivers
1809*----------------------------------------------------------------*/
1810int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1811{
1812 int retval = -ENODEV;
1813 unsigned long flags = 0;
1814
1815 if (!udc_controller)
1816 return -ENODEV;
1817
1818 if (!driver || (driver->speed != USB_SPEED_FULL
1819 && driver->speed != USB_SPEED_HIGH)
1820 || !driver->bind || !driver->disconnect
1821 || !driver->setup)
1822 return -EINVAL;
1823
1824 if (udc_controller->driver)
1825 return -EBUSY;
1826
1827 /* lock is needed but whether should use this lock or another */
1828 spin_lock_irqsave(&udc_controller->lock, flags);
1829
1830 driver->driver.bus = 0;
1831 /* hook up the driver */
1832 udc_controller->driver = driver;
1833 udc_controller->gadget.dev.driver = &driver->driver;
1834 spin_unlock_irqrestore(&udc_controller->lock, flags);
1835
1836 /* bind udc driver to gadget driver */
1837 retval = driver->bind(&udc_controller->gadget);
1838 if (retval) {
1839 VDBG("bind to %s --> %d", driver->driver.name, retval);
1840 udc_controller->gadget.dev.driver = 0;
1841 udc_controller->driver = 0;
1842 goto out;
1843 }
1844
1845 /* Enable DR IRQ reg and Set usbcmd reg Run bit */
1846 dr_controller_run(udc_controller);
1847 udc_controller->usb_state = USB_STATE_ATTACHED;
1848 udc_controller->ep0_state = WAIT_FOR_SETUP;
1849 udc_controller->ep0_dir = 0;
1850 printk(KERN_INFO "%s: bind to driver %s \n",
1851 udc_controller->gadget.name, driver->driver.name);
1852
1853out:
1854 if (retval)
1855 printk("retval %d \n", retval);
1856 return retval;
1857}
1858EXPORT_SYMBOL(usb_gadget_register_driver);
1859
1860/* Disconnect from gadget driver */
1861int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1862{
1863 struct fsl_ep *loop_ep;
1864 unsigned long flags;
1865
1866 if (!udc_controller)
1867 return -ENODEV;
1868
1869 if (!driver || driver != udc_controller->driver || !driver->unbind)
1870 return -EINVAL;
1871
1872#ifdef CONFIG_USB_OTG
1873 if (udc_controller->transceiver)
1874 (void)otg_set_peripheral(udc_controller->transceiver, 0);
1875#endif
1876
1877 /* stop DR, disable intr */
1878 dr_controller_stop(udc_controller);
1879
1880 /* in fact, no needed */
1881 udc_controller->usb_state = USB_STATE_ATTACHED;
1882 udc_controller->ep0_state = WAIT_FOR_SETUP;
1883 udc_controller->ep0_dir = 0;
1884
1885 /* stand operation */
1886 spin_lock_irqsave(&udc_controller->lock, flags);
1887 udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
1888 nuke(&udc_controller->eps[0], -ESHUTDOWN);
1889 list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
1890 ep.ep_list)
1891 nuke(loop_ep, -ESHUTDOWN);
1892 spin_unlock_irqrestore(&udc_controller->lock, flags);
1893
1894 /* unbind gadget and unhook driver. */
1895 driver->unbind(&udc_controller->gadget);
1896 udc_controller->gadget.dev.driver = 0;
1897 udc_controller->driver = 0;
1898
1899 printk("unregistered gadget driver '%s'\r\n", driver->driver.name);
1900 return 0;
1901}
1902EXPORT_SYMBOL(usb_gadget_unregister_driver);
1903
1904/*-------------------------------------------------------------------------
1905 PROC File System Support
1906-------------------------------------------------------------------------*/
1907#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1908
1909#include <linux/seq_file.h>
1910
1911static const char proc_filename[] = "driver/fsl_usb2_udc";
1912
1913static int fsl_proc_read(char *page, char **start, off_t off, int count,
1914 int *eof, void *_dev)
1915{
1916 char *buf = page;
1917 char *next = buf;
1918 unsigned size = count;
1919 unsigned long flags;
1920 int t, i;
1921 u32 tmp_reg;
1922 struct fsl_ep *ep = NULL;
1923 struct fsl_req *req;
1924
1925 struct fsl_udc *udc = udc_controller;
1926 if (off != 0)
1927 return 0;
1928
1929 spin_lock_irqsave(&udc->lock, flags);
1930
1931 /* ------basic driver infomation ---- */
1932 t = scnprintf(next, size,
1933 DRIVER_DESC "\n"
1934 "%s version: %s\n"
1935 "Gadget driver: %s\n\n",
1936 driver_name, DRIVER_VERSION,
1937 udc->driver ? udc->driver->driver.name : "(none)");
1938 size -= t;
1939 next += t;
1940
1941 /* ------ DR Registers ----- */
1942 tmp_reg = fsl_readl(&dr_regs->usbcmd);
1943 t = scnprintf(next, size,
1944 "USBCMD reg:\n"
1945 "SetupTW: %d\n"
1946 "Run/Stop: %s\n\n",
1947 (tmp_reg & USB_CMD_SUTW) ? 1 : 0,
1948 (tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
1949 size -= t;
1950 next += t;
1951
1952 tmp_reg = fsl_readl(&dr_regs->usbsts);
1953 t = scnprintf(next, size,
1954 "USB Status Reg:\n"
1955 "Dr Suspend: %d" "Reset Received: %d" "System Error: %s"
1956 "USB Error Interrupt: %s\n\n",
1957 (tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
1958 (tmp_reg & USB_STS_RESET) ? 1 : 0,
1959 (tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
1960 (tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
1961 size -= t;
1962 next += t;
1963
1964 tmp_reg = fsl_readl(&dr_regs->usbintr);
1965 t = scnprintf(next, size,
1966 "USB Intrrupt Enable Reg:\n"
1967 "Sleep Enable: %d" "SOF Received Enable: %d"
1968 "Reset Enable: %d\n"
1969 "System Error Enable: %d"
1970 "Port Change Dectected Enable: %d\n"
1971 "USB Error Intr Enable: %d" "USB Intr Enable: %d\n\n",
1972 (tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
1973 (tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
1974 (tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
1975 (tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
1976 (tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
1977 (tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
1978 (tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
1979 size -= t;
1980 next += t;
1981
1982 tmp_reg = fsl_readl(&dr_regs->frindex);
1983 t = scnprintf(next, size,
1984 "USB Frame Index Reg:" "Frame Number is 0x%x\n\n",
1985 (tmp_reg & USB_FRINDEX_MASKS));
1986 size -= t;
1987 next += t;
1988
1989 tmp_reg = fsl_readl(&dr_regs->deviceaddr);
1990 t = scnprintf(next, size,
1991 "USB Device Address Reg:" "Device Addr is 0x%x\n\n",
1992 (tmp_reg & USB_DEVICE_ADDRESS_MASK));
1993 size -= t;
1994 next += t;
1995
1996 tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
1997 t = scnprintf(next, size,
1998 "USB Endpoint List Address Reg:"
1999 "Device Addr is 0x%x\n\n",
2000 (tmp_reg & USB_EP_LIST_ADDRESS_MASK));
2001 size -= t;
2002 next += t;
2003
2004 tmp_reg = fsl_readl(&dr_regs->portsc1);
2005 t = scnprintf(next, size,
2006 "USB Port Status&Control Reg:\n"
2007 "Port Transceiver Type : %s" "Port Speed: %s \n"
2008 "PHY Low Power Suspend: %s" "Port Reset: %s"
2009 "Port Suspend Mode: %s \n" "Over-current Change: %s"
2010 "Port Enable/Disable Change: %s\n"
2011 "Port Enabled/Disabled: %s"
2012 "Current Connect Status: %s\n\n", ( {
2013 char *s;
2014 switch (tmp_reg & PORTSCX_PTS_FSLS) {
2015 case PORTSCX_PTS_UTMI:
2016 s = "UTMI"; break;
2017 case PORTSCX_PTS_ULPI:
2018 s = "ULPI "; break;
2019 case PORTSCX_PTS_FSLS:
2020 s = "FS/LS Serial"; break;
2021 default:
2022 s = "None"; break;
2023 }
2024 s;} ), ( {
2025 char *s;
2026 switch (tmp_reg & PORTSCX_PORT_SPEED_UNDEF) {
2027 case PORTSCX_PORT_SPEED_FULL:
2028 s = "Full Speed"; break;
2029 case PORTSCX_PORT_SPEED_LOW:
2030 s = "Low Speed"; break;
2031 case PORTSCX_PORT_SPEED_HIGH:
2032 s = "High Speed"; break;
2033 default:
2034 s = "Undefined"; break;
2035 }
2036 s;
2037 } ),
2038 (tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
2039 "Normal PHY mode" : "Low power mode",
2040 (tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
2041 "Not in Reset",
2042 (tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
2043 (tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
2044 "No",
2045 (tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
2046 "Not change",
2047 (tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
2048 "Not correct",
2049 (tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
2050 "Attached" : "Not-Att");
2051 size -= t;
2052 next += t;
2053
2054 tmp_reg = fsl_readl(&dr_regs->usbmode);
2055 t = scnprintf(next, size,
2056 "USB Mode Reg:" "Controller Mode is : %s\n\n", ( {
2057 char *s;
2058 switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
2059 case USB_MODE_CTRL_MODE_IDLE:
2060 s = "Idle"; break;
2061 case USB_MODE_CTRL_MODE_DEVICE:
2062 s = "Device Controller"; break;
2063 case USB_MODE_CTRL_MODE_HOST:
2064 s = "Host Controller"; break;
2065 default:
2066 s = "None"; break;
2067 }
2068 s;
2069 } ));
2070 size -= t;
2071 next += t;
2072
2073 tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
2074 t = scnprintf(next, size,
2075 "Endpoint Setup Status Reg:" "SETUP on ep 0x%x\n\n",
2076 (tmp_reg & EP_SETUP_STATUS_MASK));
2077 size -= t;
2078 next += t;
2079
2080 for (i = 0; i < udc->max_ep / 2; i++) {
2081 tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
2082 t = scnprintf(next, size, "EP Ctrl Reg [0x%x]: = [0x%x]\n",
2083 i, tmp_reg);
2084 size -= t;
2085 next += t;
2086 }
2087 tmp_reg = fsl_readl(&dr_regs->endpointprime);
2088 t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n", tmp_reg);
2089 size -= t;
2090 next += t;
2091
2092 tmp_reg = usb_sys_regs->snoop1;
2093 t = scnprintf(next, size, "\nSnoop1 Reg : = [0x%x]\n\n", tmp_reg);
2094 size -= t;
2095 next += t;
2096
2097 tmp_reg = usb_sys_regs->control;
2098 t = scnprintf(next, size, "General Control Reg : = [0x%x]\n\n",
2099 tmp_reg);
2100 size -= t;
2101 next += t;
2102
2103 /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
2104 ep = &udc->eps[0];
2105 t = scnprintf(next, size, "For %s Maxpkt is 0x%x index is 0x%x\n",
2106 ep->ep.name, ep_maxpacket(ep), ep_index(ep));
2107 size -= t;
2108 next += t;
2109
2110 if (list_empty(&ep->queue)) {
2111 t = scnprintf(next, size, "its req queue is empty\n\n");
2112 size -= t;
2113 next += t;
2114 } else {
2115 list_for_each_entry(req, &ep->queue, queue) {
2116 t = scnprintf(next, size,
2117 "req %p actual 0x%x length 0x%x buf %p\n",
2118 &req->req, req->req.actual,
2119 req->req.length, req->req.buf);
2120 size -= t;
2121 next += t;
2122 }
2123 }
2124 /* other gadget->eplist ep */
2125 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
2126 if (ep->desc) {
2127 t = scnprintf(next, size,
2128 "\nFor %s Maxpkt is 0x%x "
2129 "index is 0x%x\n",
2130 ep->ep.name, ep_maxpacket(ep),
2131 ep_index(ep));
2132 size -= t;
2133 next += t;
2134
2135 if (list_empty(&ep->queue)) {
2136 t = scnprintf(next, size,
2137 "its req queue is empty\n\n");
2138 size -= t;
2139 next += t;
2140 } else {
2141 list_for_each_entry(req, &ep->queue, queue) {
2142 t = scnprintf(next, size,
2143 "req %p actual 0x%x length"
2144 "0x%x buf %p\n",
2145 &req->req, req->req.actual,
2146 req->req.length, req->req.buf);
2147 size -= t;
2148 next += t;
2149 } /* end for each_entry of ep req */
2150 } /* end for else */
2151 } /* end for if(ep->queue) */
2152 } /* end (ep->desc) */
2153
2154 spin_unlock_irqrestore(&udc->lock, flags);
2155
2156 *eof = 1;
2157 return count - size;
2158}
2159
2160#define create_proc_file() create_proc_read_entry(proc_filename, \
2161 0, NULL, fsl_proc_read, NULL)
2162
2163#define remove_proc_file() remove_proc_entry(proc_filename, NULL)
2164
2165#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
2166
2167#define create_proc_file() do {} while (0)
2168#define remove_proc_file() do {} while (0)
2169
2170#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
2171
2172/*-------------------------------------------------------------------------*/
2173
2174/* Release udc structures */
2175static void fsl_udc_release(struct device *dev)
2176{
2177 complete(udc_controller->done);
2178 dma_free_coherent(dev, udc_controller->ep_qh_size,
2179 udc_controller->ep_qh, udc_controller->ep_qh_dma);
2180 kfree(udc_controller);
2181}
2182
2183/******************************************************************
2184 Internal structure setup functions
2185*******************************************************************/
2186/*------------------------------------------------------------------
2187 * init resource for globle controller
2188 * Return the udc handle on success or NULL on failure
2189 ------------------------------------------------------------------*/
2190static struct fsl_udc *__init struct_udc_setup(struct platform_device *pdev)
2191{
2192 struct fsl_udc *udc;
2193 struct fsl_usb2_platform_data *pdata;
2194 size_t size;
2195
2196 udc = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
2197 if (udc == NULL) {
2198 ERR("malloc udc failed\n");
2199 return NULL;
2200 }
2201
2202 pdata = pdev->dev.platform_data;
2203 udc->phy_mode = pdata->phy_mode;
2204 /* max_ep_nr is bidirectional ep number, max_ep doubles the number */
2205 udc->max_ep = pdata->max_ep_nr * 2;
2206
2207 udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
2208 if (!udc->eps) {
2209 ERR("malloc fsl_ep failed\n");
2210 goto cleanup;
2211 }
2212
2213 /* initialized QHs, take care of alignment */
2214 size = udc->max_ep * sizeof(struct ep_queue_head);
2215 if (size < QH_ALIGNMENT)
2216 size = QH_ALIGNMENT;
2217 else if ((size % QH_ALIGNMENT) != 0) {
2218 size += QH_ALIGNMENT + 1;
2219 size &= ~(QH_ALIGNMENT - 1);
2220 }
2221 udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
2222 &udc->ep_qh_dma, GFP_KERNEL);
2223 if (!udc->ep_qh) {
2224 ERR("malloc QHs for udc failed\n");
2225 kfree(udc->eps);
2226 goto cleanup;
2227 }
2228
2229 udc->ep_qh_size = size;
2230
2231 /* Initialize ep0 status request structure */
2232 /* FIXME: fsl_alloc_request() ignores ep argument */
2233 udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
2234 struct fsl_req, req);
2235 /* allocate a small amount of memory to get valid address */
2236 udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
2237 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2238
2239 udc->resume_state = USB_STATE_NOTATTACHED;
2240 udc->usb_state = USB_STATE_POWERED;
2241 udc->ep0_dir = 0;
2242 udc->remote_wakeup = 0; /* default to 0 on reset */
2243 spin_lock_init(&udc->lock);
2244
2245 return udc;
2246
2247cleanup:
2248 kfree(udc);
2249 return NULL;
2250}
2251
2252/*----------------------------------------------------------------
2253 * Setup the fsl_ep struct for eps
2254 * Link fsl_ep->ep to gadget->ep_list
2255 * ep0out is not used so do nothing here
2256 * ep0in should be taken care
2257 *--------------------------------------------------------------*/
2258static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
2259 char *name, int link)
2260{
2261 struct fsl_ep *ep = &udc->eps[index];
2262
2263 ep->udc = udc;
2264 strcpy(ep->name, name);
2265 ep->ep.name = ep->name;
2266
2267 ep->ep.ops = &fsl_ep_ops;
2268 ep->stopped = 0;
2269
2270 /* for ep0: maxP defined in desc
2271 * for other eps, maxP is set by epautoconfig() called by gadget layer
2272 */
2273 ep->ep.maxpacket = (unsigned short) ~0;
2274
2275 /* the queue lists any req for this ep */
2276 INIT_LIST_HEAD(&ep->queue);
2277
2278 /* gagdet.ep_list used for ep_autoconfig so no ep0 */
2279 if (link)
2280 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2281 ep->gadget = &udc->gadget;
2282 ep->qh = &udc->ep_qh[index];
2283
2284 return 0;
2285}
2286
2287/* Driver probe function
2288 * all intialize operations implemented here except enabling usb_intr reg
2289 */
2290static int __init fsl_udc_probe(struct platform_device *pdev)
2291{
2292 struct resource *res;
2293 int ret = -ENODEV;
2294 unsigned int i;
2295
2296 if (strcmp(pdev->name, driver_name)) {
2297 VDBG("Wrong device\n");
2298 return -ENODEV;
2299 }
2300
2301 /* board setup should have been done in the platform code */
2302
2303 /* Initialize the udc structure including QH member and other member */
2304 udc_controller = struct_udc_setup(pdev);
2305 if (!udc_controller) {
2306 VDBG("udc_controller is NULL \n");
2307 return -ENOMEM;
2308 }
2309
2310 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2311 if (!res)
2312 return -ENXIO;
2313
2314 if (!request_mem_region(res->start, res->end - res->start + 1,
2315 driver_name)) {
2316 ERR("request mem region for %s failed \n", pdev->name);
2317 return -EBUSY;
2318 }
2319
2320 dr_regs = ioremap(res->start, res->end - res->start + 1);
2321 if (!dr_regs) {
2322 ret = -ENOMEM;
2323 goto err1;
2324 }
2325
2326 usb_sys_regs = (struct usb_sys_interface *)
2327 ((u32)dr_regs + USB_DR_SYS_OFFSET);
2328
2329 udc_controller->irq = platform_get_irq(pdev, 0);
2330 if (!udc_controller->irq) {
2331 ret = -ENODEV;
2332 goto err2;
2333 }
2334
2335 ret = request_irq(udc_controller->irq, fsl_udc_irq, SA_SHIRQ,
2336 driver_name, udc_controller);
2337 if (ret != 0) {
2338 ERR("cannot request irq %d err %d \n",
2339 udc_controller->irq, ret);
2340 goto err2;
2341 }
2342
2343 /* initialize usb hw reg except for regs for EP,
2344 * leave usbintr reg untouched */
2345 dr_controller_setup(udc_controller);
2346
2347 /* Setup gadget structure */
2348 udc_controller->gadget.ops = &fsl_gadget_ops;
2349 udc_controller->gadget.is_dualspeed = 1;
2350 udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
2351 INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
2352 udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
2353 udc_controller->gadget.name = driver_name;
2354
2355 /* Setup gadget.dev and register with kernel */
2356 strcpy(udc_controller->gadget.dev.bus_id, "gadget");
2357 udc_controller->gadget.dev.release = fsl_udc_release;
2358 udc_controller->gadget.dev.parent = &pdev->dev;
2359 ret = device_register(&udc_controller->gadget.dev);
2360 if (ret < 0)
2361 goto err3;
2362
2363 /* setup QH and epctrl for ep0 */
2364 ep0_setup(udc_controller);
2365
2366 /* setup udc->eps[] for ep0 */
2367 struct_ep_setup(udc_controller, 0, "ep0", 0);
2368 /* for ep0: the desc defined here;
2369 * for other eps, gadget layer called ep_enable with defined desc
2370 */
2371 udc_controller->eps[0].desc = &fsl_ep0_desc;
2372 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
2373
2374 /* setup the udc->eps[] for non-control endpoints and link
2375 * to gadget.ep_list */
2376 for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
2377 char name[14];
2378
2379 sprintf(name, "ep%dout", i);
2380 struct_ep_setup(udc_controller, i * 2, name, 1);
2381 sprintf(name, "ep%din", i);
2382 struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
2383 }
2384
2385 /* use dma_pool for TD management */
2386 udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
2387 sizeof(struct ep_td_struct),
2388 DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
2389 if (udc_controller->td_pool == NULL) {
2390 ret = -ENOMEM;
2391 goto err4;
2392 }
2393 create_proc_file();
2394 return 0;
2395
2396err4:
2397 device_unregister(&udc_controller->gadget.dev);
2398err3:
2399 free_irq(udc_controller->irq, udc_controller);
2400err2:
2401 iounmap(dr_regs);
2402err1:
2403 release_mem_region(res->start, res->end - res->start + 1);
2404 return ret;
2405}
2406
2407/* Driver removal function
2408 * Free resources and finish pending transactions
2409 */
2410static int __exit fsl_udc_remove(struct platform_device *pdev)
2411{
2412 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2413
2414 DECLARE_COMPLETION(done);
2415
2416 if (!udc_controller)
2417 return -ENODEV;
2418 udc_controller->done = &done;
2419
2420 /* DR has been stopped in usb_gadget_unregister_driver() */
2421 remove_proc_file();
2422
2423 /* Free allocated memory */
2424 kfree(udc_controller->status_req->req.buf);
2425 kfree(udc_controller->status_req);
2426 kfree(udc_controller->eps);
2427
2428 dma_pool_destroy(udc_controller->td_pool);
2429 free_irq(udc_controller->irq, udc_controller);
2430 iounmap(dr_regs);
2431 release_mem_region(res->start, res->end - res->start + 1);
2432
2433 device_unregister(&udc_controller->gadget.dev);
2434 /* free udc --wait for the release() finished */
2435 wait_for_completion(&done);
2436
2437 return 0;
2438}
2439
2440/*-----------------------------------------------------------------
2441 * Modify Power management attributes
2442 * Used by OTG statemachine to disable gadget temporarily
2443 -----------------------------------------------------------------*/
2444static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
2445{
2446 dr_controller_stop(udc_controller);
2447 return 0;
2448}
2449
2450/*-----------------------------------------------------------------
2451 * Invoked on USB resume. May be called in_interrupt.
2452 * Here we start the DR controller and enable the irq
2453 *-----------------------------------------------------------------*/
2454static int fsl_udc_resume(struct platform_device *pdev)
2455{
2456 /* Enable DR irq reg and set controller Run */
2457 if (udc_controller->stopped) {
2458 dr_controller_setup(udc_controller);
2459 dr_controller_run(udc_controller);
2460 }
2461 udc_controller->usb_state = USB_STATE_ATTACHED;
2462 udc_controller->ep0_state = WAIT_FOR_SETUP;
2463 udc_controller->ep0_dir = 0;
2464 return 0;
2465}
2466
2467/*-------------------------------------------------------------------------
2468 Register entry point for the peripheral controller driver
2469--------------------------------------------------------------------------*/
2470
2471static struct platform_driver udc_driver = {
2472 .remove = __exit_p(fsl_udc_remove),
2473 /* these suspend and resume are not usb suspend and resume */
2474 .suspend = fsl_udc_suspend,
2475 .resume = fsl_udc_resume,
2476 .driver = {
2477 .name = (char *)driver_name,
2478 .owner = THIS_MODULE,
2479 },
2480};
2481
2482static int __init udc_init(void)
2483{
2484 printk(KERN_INFO "%s (%s)\n", driver_desc, DRIVER_VERSION);
2485 return platform_driver_probe(&udc_driver, fsl_udc_probe);
2486}
2487
2488module_init(udc_init);
2489
2490static void __exit udc_exit(void)
2491{
2492 platform_driver_unregister(&udc_driver);
2493 printk("%s unregistered \n", driver_desc);
2494}
2495
2496module_exit(udc_exit);
2497
2498MODULE_DESCRIPTION(DRIVER_DESC);
2499MODULE_AUTHOR(DRIVER_AUTHOR);
2500MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
new file mode 100644
index 000000000000..c6291e046507
--- /dev/null
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -0,0 +1,579 @@
1/*
2 * Freescale USB device/endpoint management registers
3 */
4#ifndef __FSL_USB2_UDC_H
5#define __FSL_USB2_UDC_H
6
7/* ### define USB registers here
8 */
9#define USB_MAX_CTRL_PAYLOAD 64
10#define USB_DR_SYS_OFFSET 0x400
11
12 /* USB DR device mode registers (Little Endian) */
13struct usb_dr_device {
14 /* Capability register */
15 u8 res1[256];
16 u16 caplength; /* Capability Register Length */
17 u16 hciversion; /* Host Controller Interface Version */
18 u32 hcsparams; /* Host Controller Structual Parameters */
19 u32 hccparams; /* Host Controller Capability Parameters */
20 u8 res2[20];
21 u32 dciversion; /* Device Controller Interface Version */
22 u32 dccparams; /* Device Controller Capability Parameters */
23 u8 res3[24];
24 /* Operation register */
25 u32 usbcmd; /* USB Command Register */
26 u32 usbsts; /* USB Status Register */
27 u32 usbintr; /* USB Interrupt Enable Register */
28 u32 frindex; /* Frame Index Register */
29 u8 res4[4];
30 u32 deviceaddr; /* Device Address */
31 u32 endpointlistaddr; /* Endpoint List Address Register */
32 u8 res5[4];
33 u32 burstsize; /* Master Interface Data Burst Size Register */
34 u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
35 u8 res6[24];
36 u32 configflag; /* Configure Flag Register */
37 u32 portsc1; /* Port 1 Status and Control Register */
38 u8 res7[28];
39 u32 otgsc; /* On-The-Go Status and Control */
40 u32 usbmode; /* USB Mode Register */
41 u32 endptsetupstat; /* Endpoint Setup Status Register */
42 u32 endpointprime; /* Endpoint Initialization Register */
43 u32 endptflush; /* Endpoint Flush Register */
44 u32 endptstatus; /* Endpoint Status Register */
45 u32 endptcomplete; /* Endpoint Complete Register */
46 u32 endptctrl[6]; /* Endpoint Control Registers */
47};
48
49 /* USB DR host mode registers (Little Endian) */
50struct usb_dr_host {
51 /* Capability register */
52 u8 res1[256];
53 u16 caplength; /* Capability Register Length */
54 u16 hciversion; /* Host Controller Interface Version */
55 u32 hcsparams; /* Host Controller Structual Parameters */
56 u32 hccparams; /* Host Controller Capability Parameters */
57 u8 res2[20];
58 u32 dciversion; /* Device Controller Interface Version */
59 u32 dccparams; /* Device Controller Capability Parameters */
60 u8 res3[24];
61 /* Operation register */
62 u32 usbcmd; /* USB Command Register */
63 u32 usbsts; /* USB Status Register */
64 u32 usbintr; /* USB Interrupt Enable Register */
65 u32 frindex; /* Frame Index Register */
66 u8 res4[4];
67 u32 periodiclistbase; /* Periodic Frame List Base Address Register */
68 u32 asynclistaddr; /* Current Asynchronous List Address Register */
69 u8 res5[4];
70 u32 burstsize; /* Master Interface Data Burst Size Register */
71 u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
72 u8 res6[24];
73 u32 configflag; /* Configure Flag Register */
74 u32 portsc1; /* Port 1 Status and Control Register */
75 u8 res7[28];
76 u32 otgsc; /* On-The-Go Status and Control */
77 u32 usbmode; /* USB Mode Register */
78 u32 endptsetupstat; /* Endpoint Setup Status Register */
79 u32 endpointprime; /* Endpoint Initialization Register */
80 u32 endptflush; /* Endpoint Flush Register */
81 u32 endptstatus; /* Endpoint Status Register */
82 u32 endptcomplete; /* Endpoint Complete Register */
83 u32 endptctrl[6]; /* Endpoint Control Registers */
84};
85
86 /* non-EHCI USB system interface registers (Big Endian) */
87struct usb_sys_interface {
88 u32 snoop1;
89 u32 snoop2;
90 u32 age_cnt_thresh; /* Age Count Threshold Register */
91 u32 pri_ctrl; /* Priority Control Register */
92 u32 si_ctrl; /* System Interface Control Register */
93 u8 res[236];
94 u32 control; /* General Purpose Control Register */
95};
96
97/* ep0 transfer state */
98#define WAIT_FOR_SETUP 0
99#define DATA_STATE_XMIT 1
100#define DATA_STATE_NEED_ZLP 2
101#define WAIT_FOR_OUT_STATUS 3
102#define DATA_STATE_RECV 4
103
104/* Frame Index Register Bit Masks */
105#define USB_FRINDEX_MASKS 0x3fff
106/* USB CMD Register Bit Masks */
107#define USB_CMD_RUN_STOP 0x00000001
108#define USB_CMD_CTRL_RESET 0x00000002
109#define USB_CMD_PERIODIC_SCHEDULE_EN 0x00000010
110#define USB_CMD_ASYNC_SCHEDULE_EN 0x00000020
111#define USB_CMD_INT_AA_DOORBELL 0x00000040
112#define USB_CMD_ASP 0x00000300
113#define USB_CMD_ASYNC_SCH_PARK_EN 0x00000800
114#define USB_CMD_SUTW 0x00002000
115#define USB_CMD_ATDTW 0x00004000
116#define USB_CMD_ITC 0x00FF0000
117
118/* bit 15,3,2 are frame list size */
119#define USB_CMD_FRAME_SIZE_1024 0x00000000
120#define USB_CMD_FRAME_SIZE_512 0x00000004
121#define USB_CMD_FRAME_SIZE_256 0x00000008
122#define USB_CMD_FRAME_SIZE_128 0x0000000C
123#define USB_CMD_FRAME_SIZE_64 0x00008000
124#define USB_CMD_FRAME_SIZE_32 0x00008004
125#define USB_CMD_FRAME_SIZE_16 0x00008008
126#define USB_CMD_FRAME_SIZE_8 0x0000800C
127
128/* bit 9-8 are async schedule park mode count */
129#define USB_CMD_ASP_00 0x00000000
130#define USB_CMD_ASP_01 0x00000100
131#define USB_CMD_ASP_10 0x00000200
132#define USB_CMD_ASP_11 0x00000300
133#define USB_CMD_ASP_BIT_POS 8
134
135/* bit 23-16 are interrupt threshold control */
136#define USB_CMD_ITC_NO_THRESHOLD 0x00000000
137#define USB_CMD_ITC_1_MICRO_FRM 0x00010000
138#define USB_CMD_ITC_2_MICRO_FRM 0x00020000
139#define USB_CMD_ITC_4_MICRO_FRM 0x00040000
140#define USB_CMD_ITC_8_MICRO_FRM 0x00080000
141#define USB_CMD_ITC_16_MICRO_FRM 0x00100000
142#define USB_CMD_ITC_32_MICRO_FRM 0x00200000
143#define USB_CMD_ITC_64_MICRO_FRM 0x00400000
144#define USB_CMD_ITC_BIT_POS 16
145
146/* USB STS Register Bit Masks */
147#define USB_STS_INT 0x00000001
148#define USB_STS_ERR 0x00000002
149#define USB_STS_PORT_CHANGE 0x00000004
150#define USB_STS_FRM_LST_ROLL 0x00000008
151#define USB_STS_SYS_ERR 0x00000010
152#define USB_STS_IAA 0x00000020
153#define USB_STS_RESET 0x00000040
154#define USB_STS_SOF 0x00000080
155#define USB_STS_SUSPEND 0x00000100
156#define USB_STS_HC_HALTED 0x00001000
157#define USB_STS_RCL 0x00002000
158#define USB_STS_PERIODIC_SCHEDULE 0x00004000
159#define USB_STS_ASYNC_SCHEDULE 0x00008000
160
161/* USB INTR Register Bit Masks */
162#define USB_INTR_INT_EN 0x00000001
163#define USB_INTR_ERR_INT_EN 0x00000002
164#define USB_INTR_PTC_DETECT_EN 0x00000004
165#define USB_INTR_FRM_LST_ROLL_EN 0x00000008
166#define USB_INTR_SYS_ERR_EN 0x00000010
167#define USB_INTR_ASYN_ADV_EN 0x00000020
168#define USB_INTR_RESET_EN 0x00000040
169#define USB_INTR_SOF_EN 0x00000080
170#define USB_INTR_DEVICE_SUSPEND 0x00000100
171
172/* Device Address bit masks */
173#define USB_DEVICE_ADDRESS_MASK 0xFE000000
174#define USB_DEVICE_ADDRESS_BIT_POS 25
175
176/* endpoint list address bit masks */
177#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
178
179/* PORTSCX Register Bit Masks */
180#define PORTSCX_CURRENT_CONNECT_STATUS 0x00000001
181#define PORTSCX_CONNECT_STATUS_CHANGE 0x00000002
182#define PORTSCX_PORT_ENABLE 0x00000004
183#define PORTSCX_PORT_EN_DIS_CHANGE 0x00000008
184#define PORTSCX_OVER_CURRENT_ACT 0x00000010
185#define PORTSCX_OVER_CURRENT_CHG 0x00000020
186#define PORTSCX_PORT_FORCE_RESUME 0x00000040
187#define PORTSCX_PORT_SUSPEND 0x00000080
188#define PORTSCX_PORT_RESET 0x00000100
189#define PORTSCX_LINE_STATUS_BITS 0x00000C00
190#define PORTSCX_PORT_POWER 0x00001000
191#define PORTSCX_PORT_INDICTOR_CTRL 0x0000C000
192#define PORTSCX_PORT_TEST_CTRL 0x000F0000
193#define PORTSCX_WAKE_ON_CONNECT_EN 0x00100000
194#define PORTSCX_WAKE_ON_CONNECT_DIS 0x00200000
195#define PORTSCX_WAKE_ON_OVER_CURRENT 0x00400000
196#define PORTSCX_PHY_LOW_POWER_SPD 0x00800000
197#define PORTSCX_PORT_FORCE_FULL_SPEED 0x01000000
198#define PORTSCX_PORT_SPEED_MASK 0x0C000000
199#define PORTSCX_PORT_WIDTH 0x10000000
200#define PORTSCX_PHY_TYPE_SEL 0xC0000000
201
202/* bit 11-10 are line status */
203#define PORTSCX_LINE_STATUS_SE0 0x00000000
204#define PORTSCX_LINE_STATUS_JSTATE 0x00000400
205#define PORTSCX_LINE_STATUS_KSTATE 0x00000800
206#define PORTSCX_LINE_STATUS_UNDEF 0x00000C00
207#define PORTSCX_LINE_STATUS_BIT_POS 10
208
209/* bit 15-14 are port indicator control */
210#define PORTSCX_PIC_OFF 0x00000000
211#define PORTSCX_PIC_AMBER 0x00004000
212#define PORTSCX_PIC_GREEN 0x00008000
213#define PORTSCX_PIC_UNDEF 0x0000C000
214#define PORTSCX_PIC_BIT_POS 14
215
216/* bit 19-16 are port test control */
217#define PORTSCX_PTC_DISABLE 0x00000000
218#define PORTSCX_PTC_JSTATE 0x00010000
219#define PORTSCX_PTC_KSTATE 0x00020000
220#define PORTSCX_PTC_SEQNAK 0x00030000
221#define PORTSCX_PTC_PACKET 0x00040000
222#define PORTSCX_PTC_FORCE_EN 0x00050000
223#define PORTSCX_PTC_BIT_POS 16
224
225/* bit 27-26 are port speed */
226#define PORTSCX_PORT_SPEED_FULL 0x00000000
227#define PORTSCX_PORT_SPEED_LOW 0x04000000
228#define PORTSCX_PORT_SPEED_HIGH 0x08000000
229#define PORTSCX_PORT_SPEED_UNDEF 0x0C000000
230#define PORTSCX_SPEED_BIT_POS 26
231
232/* bit 28 is parallel transceiver width for UTMI interface */
233#define PORTSCX_PTW 0x10000000
234#define PORTSCX_PTW_8BIT 0x00000000
235#define PORTSCX_PTW_16BIT 0x10000000
236
237/* bit 31-30 are port transceiver select */
238#define PORTSCX_PTS_UTMI 0x00000000
239#define PORTSCX_PTS_ULPI 0x80000000
240#define PORTSCX_PTS_FSLS 0xC0000000
241#define PORTSCX_PTS_BIT_POS 30
242
243/* otgsc Register Bit Masks */
244#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
245#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
246#define OTGSC_CTRL_OTG_TERM 0x00000008
247#define OTGSC_CTRL_DATA_PULSING 0x00000010
248#define OTGSC_STS_USB_ID 0x00000100
249#define OTGSC_STS_A_VBUS_VALID 0x00000200
250#define OTGSC_STS_A_SESSION_VALID 0x00000400
251#define OTGSC_STS_B_SESSION_VALID 0x00000800
252#define OTGSC_STS_B_SESSION_END 0x00001000
253#define OTGSC_STS_1MS_TOGGLE 0x00002000
254#define OTGSC_STS_DATA_PULSING 0x00004000
255#define OTGSC_INTSTS_USB_ID 0x00010000
256#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
257#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
258#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
259#define OTGSC_INTSTS_B_SESSION_END 0x00100000
260#define OTGSC_INTSTS_1MS 0x00200000
261#define OTGSC_INTSTS_DATA_PULSING 0x00400000
262#define OTGSC_INTR_USB_ID 0x01000000
263#define OTGSC_INTR_A_VBUS_VALID 0x02000000
264#define OTGSC_INTR_A_SESSION_VALID 0x04000000
265#define OTGSC_INTR_B_SESSION_VALID 0x08000000
266#define OTGSC_INTR_B_SESSION_END 0x10000000
267#define OTGSC_INTR_1MS_TIMER 0x20000000
268#define OTGSC_INTR_DATA_PULSING 0x40000000
269
270/* USB MODE Register Bit Masks */
271#define USB_MODE_CTRL_MODE_IDLE 0x00000000
272#define USB_MODE_CTRL_MODE_DEVICE 0x00000002
273#define USB_MODE_CTRL_MODE_HOST 0x00000003
274#define USB_MODE_CTRL_MODE_RSV 0x00000001
275#define USB_MODE_SETUP_LOCK_OFF 0x00000008
276#define USB_MODE_STREAM_DISABLE 0x00000010
277/* Endpoint Flush Register */
278#define EPFLUSH_TX_OFFSET 0x00010000
279#define EPFLUSH_RX_OFFSET 0x00000000
280
281/* Endpoint Setup Status bit masks */
282#define EP_SETUP_STATUS_MASK 0x0000003F
283#define EP_SETUP_STATUS_EP0 0x00000001
284
285/* ENDPOINTCTRLx Register Bit Masks */
286#define EPCTRL_TX_ENABLE 0x00800000
287#define EPCTRL_TX_DATA_TOGGLE_RST 0x00400000 /* Not EP0 */
288#define EPCTRL_TX_DATA_TOGGLE_INH 0x00200000 /* Not EP0 */
289#define EPCTRL_TX_TYPE 0x000C0000
290#define EPCTRL_TX_DATA_SOURCE 0x00020000 /* Not EP0 */
291#define EPCTRL_TX_EP_STALL 0x00010000
292#define EPCTRL_RX_ENABLE 0x00000080
293#define EPCTRL_RX_DATA_TOGGLE_RST 0x00000040 /* Not EP0 */
294#define EPCTRL_RX_DATA_TOGGLE_INH 0x00000020 /* Not EP0 */
295#define EPCTRL_RX_TYPE 0x0000000C
296#define EPCTRL_RX_DATA_SINK 0x00000002 /* Not EP0 */
297#define EPCTRL_RX_EP_STALL 0x00000001
298
299/* bit 19-18 and 3-2 are endpoint type */
300#define EPCTRL_EP_TYPE_CONTROL 0
301#define EPCTRL_EP_TYPE_ISO 1
302#define EPCTRL_EP_TYPE_BULK 2
303#define EPCTRL_EP_TYPE_INTERRUPT 3
304#define EPCTRL_TX_EP_TYPE_SHIFT 18
305#define EPCTRL_RX_EP_TYPE_SHIFT 2
306
307/* SNOOPn Register Bit Masks */
308#define SNOOP_ADDRESS_MASK 0xFFFFF000
309#define SNOOP_SIZE_ZERO 0x00 /* snooping disable */
310#define SNOOP_SIZE_4KB 0x0B /* 4KB snoop size */
311#define SNOOP_SIZE_8KB 0x0C
312#define SNOOP_SIZE_16KB 0x0D
313#define SNOOP_SIZE_32KB 0x0E
314#define SNOOP_SIZE_64KB 0x0F
315#define SNOOP_SIZE_128KB 0x10
316#define SNOOP_SIZE_256KB 0x11
317#define SNOOP_SIZE_512KB 0x12
318#define SNOOP_SIZE_1MB 0x13
319#define SNOOP_SIZE_2MB 0x14
320#define SNOOP_SIZE_4MB 0x15
321#define SNOOP_SIZE_8MB 0x16
322#define SNOOP_SIZE_16MB 0x17
323#define SNOOP_SIZE_32MB 0x18
324#define SNOOP_SIZE_64MB 0x19
325#define SNOOP_SIZE_128MB 0x1A
326#define SNOOP_SIZE_256MB 0x1B
327#define SNOOP_SIZE_512MB 0x1C
328#define SNOOP_SIZE_1GB 0x1D
329#define SNOOP_SIZE_2GB 0x1E /* 2GB snoop size */
330
331/* pri_ctrl Register Bit Masks */
332#define PRI_CTRL_PRI_LVL1 0x0000000C
333#define PRI_CTRL_PRI_LVL0 0x00000003
334
335/* si_ctrl Register Bit Masks */
336#define SI_CTRL_ERR_DISABLE 0x00000010
337#define SI_CTRL_IDRC_DISABLE 0x00000008
338#define SI_CTRL_RD_SAFE_EN 0x00000004
339#define SI_CTRL_RD_PREFETCH_DISABLE 0x00000002
340#define SI_CTRL_RD_PREFEFETCH_VAL 0x00000001
341
342/* control Register Bit Masks */
343#define USB_CTRL_IOENB 0x00000004
344#define USB_CTRL_ULPI_INT0EN 0x00000001
345
346/* Endpoint Queue Head data struct
347 * Rem: all the variables of qh are LittleEndian Mode
348 * and NEXT_POINTER_MASK should operate on a LittleEndian, Phy Addr
349 */
350struct ep_queue_head {
351 u32 max_pkt_length; /* Mult(31-30) , Zlt(29) , Max Pkt len
352 and IOS(15) */
353 u32 curr_dtd_ptr; /* Current dTD Pointer(31-5) */
354 u32 next_dtd_ptr; /* Next dTD Pointer(31-5), T(0) */
355 u32 size_ioc_int_sts; /* Total bytes (30-16), IOC (15),
356 MultO(11-10), STS (7-0) */
357 u32 buff_ptr0; /* Buffer pointer Page 0 (31-12) */
358 u32 buff_ptr1; /* Buffer pointer Page 1 (31-12) */
359 u32 buff_ptr2; /* Buffer pointer Page 2 (31-12) */
360 u32 buff_ptr3; /* Buffer pointer Page 3 (31-12) */
361 u32 buff_ptr4; /* Buffer pointer Page 4 (31-12) */
362 u32 res1;
363 u8 setup_buffer[8]; /* Setup data 8 bytes */
364 u32 res2[4];
365};
366
367/* Endpoint Queue Head Bit Masks */
368#define EP_QUEUE_HEAD_MULT_POS 30
369#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
370#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
371#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
372#define EP_QUEUE_HEAD_IOS 0x00008000
373#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
374#define EP_QUEUE_HEAD_IOC 0x00008000
375#define EP_QUEUE_HEAD_MULTO 0x00000C00
376#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
377#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
378#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
379#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
380#define EP_QUEUE_FRINDEX_MASK 0x000007FF
381#define EP_MAX_LENGTH_TRANSFER 0x4000
382
383/* Endpoint Transfer Descriptor data struct */
384/* Rem: all the variables of td are LittleEndian Mode */
385struct ep_td_struct {
386 u32 next_td_ptr; /* Next TD pointer(31-5), T(0) set
387 indicate invalid */
388 u32 size_ioc_sts; /* Total bytes (30-16), IOC (15),
389 MultO(11-10), STS (7-0) */
390 u32 buff_ptr0; /* Buffer pointer Page 0 */
391 u32 buff_ptr1; /* Buffer pointer Page 1 */
392 u32 buff_ptr2; /* Buffer pointer Page 2 */
393 u32 buff_ptr3; /* Buffer pointer Page 3 */
394 u32 buff_ptr4; /* Buffer pointer Page 4 */
395 u32 res;
396 /* 32 bytes */
397 dma_addr_t td_dma; /* dma address for this td */
398 /* virtual address of next td specified in next_td_ptr */
399 struct ep_td_struct *next_td_virt;
400};
401
402/* Endpoint Transfer Descriptor bit Masks */
403#define DTD_NEXT_TERMINATE 0x00000001
404#define DTD_IOC 0x00008000
405#define DTD_STATUS_ACTIVE 0x00000080
406#define DTD_STATUS_HALTED 0x00000040
407#define DTD_STATUS_DATA_BUFF_ERR 0x00000020
408#define DTD_STATUS_TRANSACTION_ERR 0x00000008
409#define DTD_RESERVED_FIELDS 0x80007300
410#define DTD_ADDR_MASK 0xFFFFFFE0
411#define DTD_PACKET_SIZE 0x7FFF0000
412#define DTD_LENGTH_BIT_POS 16
413#define DTD_ERROR_MASK (DTD_STATUS_HALTED | \
414 DTD_STATUS_DATA_BUFF_ERR | \
415 DTD_STATUS_TRANSACTION_ERR)
416/* Alignment requirements; must be a power of two */
417#define DTD_ALIGNMENT 0x20
418#define QH_ALIGNMENT 2048
419
420/* Controller dma boundary */
421#define UDC_DMA_BOUNDARY 0x1000
422
423/* -----------------------------------------------------------------------*/
424/* ##### enum data
425*/
426typedef enum {
427 e_ULPI,
428 e_UTMI_8BIT,
429 e_UTMI_16BIT,
430 e_SERIAL
431} e_PhyInterface;
432
433/*-------------------------------------------------------------------------*/
434
435/* ### driver private data
436 */
437struct fsl_req {
438 struct usb_request req;
439 struct list_head queue;
440 /* ep_queue() func will add
441 a request->queue into a udc_ep->queue 'd tail */
442 struct fsl_ep *ep;
443 unsigned mapped:1;
444
445 struct ep_td_struct *head, *tail; /* For dTD List
446 cpu endian Virtual addr */
447 unsigned int dtd_count;
448};
449
450#define REQ_UNCOMPLETE 1
451
452struct fsl_ep {
453 struct usb_ep ep;
454 struct list_head queue;
455 struct fsl_udc *udc;
456 struct ep_queue_head *qh;
457 const struct usb_endpoint_descriptor *desc;
458 struct usb_gadget *gadget;
459
460 char name[14];
461 unsigned stopped:1;
462};
463
464#define EP_DIR_IN 1
465#define EP_DIR_OUT 0
466
467struct fsl_udc {
468
469 struct usb_gadget gadget;
470 struct usb_gadget_driver *driver;
471 struct fsl_ep *eps;
472 unsigned int max_ep;
473 unsigned int irq;
474
475 struct usb_ctrlrequest local_setup_buff;
476 spinlock_t lock;
477 struct otg_transceiver *transceiver;
478 unsigned softconnect:1;
479 unsigned vbus_active:1;
480 unsigned stopped:1;
481 unsigned remote_wakeup:1;
482
483 struct ep_queue_head *ep_qh; /* Endpoints Queue-Head */
484 struct fsl_req *status_req; /* ep0 status request */
485 struct dma_pool *td_pool; /* dma pool for DTD */
486 enum fsl_usb2_phy_modes phy_mode;
487
488 size_t ep_qh_size; /* size after alignment adjustment*/
489 dma_addr_t ep_qh_dma; /* dma address of QH */
490
491 u32 max_pipes; /* Device max pipes */
492 u32 max_use_endpts; /* Max endpointes to be used */
493 u32 bus_reset; /* Device is bus reseting */
494 u32 resume_state; /* USB state to resume */
495 u32 usb_state; /* USB current state */
496 u32 usb_next_state; /* USB next state */
497 u32 ep0_state; /* Endpoint zero state */
498 u32 ep0_dir; /* Endpoint zero direction: can be
499 USB_DIR_IN or USB_DIR_OUT */
500 u32 usb_sof_count; /* SOF count */
501 u32 errors; /* USB ERRORs count */
502 u8 device_address; /* Device USB address */
503
504 struct completion *done; /* to make sure release() is done */
505};
506
507/*-------------------------------------------------------------------------*/
508
509#ifdef DEBUG
510#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt "\n", \
511 __FUNCTION__, ## args)
512#else
513#define DBG(fmt, args...) do{}while(0)
514#endif
515
516#if 0
517static void dump_msg(const char *label, const u8 * buf, unsigned int length)
518{
519 unsigned int start, num, i;
520 char line[52], *p;
521
522 if (length >= 512)
523 return;
524 DBG("%s, length %u:\n", label, length);
525 start = 0;
526 while (length > 0) {
527 num = min(length, 16u);
528 p = line;
529 for (i = 0; i < num; ++i) {
530 if (i == 8)
531 *p++ = ' ';
532 sprintf(p, " %02x", buf[i]);
533 p += 3;
534 }
535 *p = 0;
536 printk(KERN_DEBUG "%6x: %s\n", start, line);
537 buf += num;
538 start += num;
539 length -= num;
540 }
541}
542#endif
543
544#ifdef VERBOSE
545#define VDBG DBG
546#else
547#define VDBG(stuff...) do{}while(0)
548#endif
549
550#define ERR(stuff...) printk(KERN_ERR "udc: " stuff)
551#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
552#define INFO(stuff...) printk(KERN_INFO "udc: " stuff)
553
554/*-------------------------------------------------------------------------*/
555
556/* ### Add board specific defines here
557 */
558
559/*
560 * ### pipe direction macro from device view
561 */
562#define USB_RECV 0 /* OUT EP */
563#define USB_SEND 1 /* IN EP */
564
565/*
566 * ### internal used help routines.
567 */
568#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF)
569#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
570#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
571 USB_DIR_IN ):((EP)->desc->bEndpointAddress \
572 & USB_DIR_IN)==USB_DIR_IN)
573#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
574 &udc->eps[pipe])
575#define get_pipe_by_windex(windex) ((windex & USB_ENDPOINT_NUMBER_MASK) \
576 * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
577#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
578
579#endif
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 2e3d6620d216..d041b919e7b8 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -99,6 +99,12 @@
99#define gadget_is_imx(g) 0 99#define gadget_is_imx(g) 0
100#endif 100#endif
101 101
102#ifdef CONFIG_USB_GADGET_FSL_USB2
103#define gadget_is_fsl_usb2(g) !strcmp("fsl-usb2-udc", (g)->name)
104#else
105#define gadget_is_fsl_usb2(g) 0
106#endif
107
102/* Mentor high speed function controller */ 108/* Mentor high speed function controller */
103#ifdef CONFIG_USB_GADGET_MUSBHSFC 109#ifdef CONFIG_USB_GADGET_MUSBHSFC
104#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name) 110#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
@@ -177,5 +183,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
177 return 0x17; 183 return 0x17;
178 else if (gadget_is_husb2dev(gadget)) 184 else if (gadget_is_husb2dev(gadget))
179 return 0x18; 185 return 0x18;
186 else if (gadget_is_fsl_usb2(gadget))
187 return 0x19;
180 return -ENOENT; 188 return -ENOENT;
181} 189}
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index f01890dc8751..2c043a1ea156 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -71,7 +71,7 @@
71 * by the host to interact with this device, and allocates endpoints to 71 * by the host to interact with this device, and allocates endpoints to
72 * the different protocol interfaces. The controller driver virtualizes 72 * the different protocol interfaces. The controller driver virtualizes
73 * usb hardware so that the gadget drivers will be more portable. 73 * usb hardware so that the gadget drivers will be more portable.
74 * 74 *
75 * This UDC hardware wants to implement a bit too much USB protocol, so 75 * This UDC hardware wants to implement a bit too much USB protocol, so
76 * it constrains the sorts of USB configuration change events that work. 76 * it constrains the sorts of USB configuration change events that work.
77 * The errata for these chips are misleading; some "fixed" bugs from 77 * The errata for these chips are misleading; some "fixed" bugs from
@@ -141,7 +141,7 @@ MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
141#endif 141#endif
142 142
143/* --------------------------------------------------------------------------- 143/* ---------------------------------------------------------------------------
144 * endpoint related parts of the api to the usb controller hardware, 144 * endpoint related parts of the api to the usb controller hardware,
145 * used by gadget driver; and the inner talker-to-hardware core. 145 * used by gadget driver; and the inner talker-to-hardware core.
146 * --------------------------------------------------------------------------- 146 * ---------------------------------------------------------------------------
147 */ 147 */
@@ -293,7 +293,7 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
293 293
294#ifdef USE_DMA 294#ifdef USE_DMA
295 /* for (some) bulk and ISO endpoints, try to get a DMA channel and 295 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
296 * bind it to the endpoint. otherwise use PIO. 296 * bind it to the endpoint. otherwise use PIO.
297 */ 297 */
298 switch (ep->bmAttributes) { 298 switch (ep->bmAttributes) {
299 case USB_ENDPOINT_XFER_ISOC: 299 case USB_ENDPOINT_XFER_ISOC:
@@ -304,7 +304,7 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
304 if (!use_dma || !ep->reg_drcmr) 304 if (!use_dma || !ep->reg_drcmr)
305 break; 305 break;
306 ep->dma = pxa_request_dma ((char *)_ep->name, 306 ep->dma = pxa_request_dma ((char *)_ep->name,
307 (le16_to_cpu (desc->wMaxPacketSize) > 64) 307 (le16_to_cpu (desc->wMaxPacketSize) > 64)
308 ? DMA_PRIO_MEDIUM /* some iso */ 308 ? DMA_PRIO_MEDIUM /* some iso */
309 : DMA_PRIO_LOW, 309 : DMA_PRIO_LOW,
310 dma_nodesc_handler, ep); 310 dma_nodesc_handler, ep);
@@ -361,7 +361,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
361 */ 361 */
362 362
363/* 363/*
364 * pxa2xx_ep_alloc_request - allocate a request data structure 364 * pxa2xx_ep_alloc_request - allocate a request data structure
365 */ 365 */
366static struct usb_request * 366static struct usb_request *
367pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) 367pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
@@ -378,7 +378,7 @@ pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
378 378
379 379
380/* 380/*
381 * pxa2xx_ep_free_request - deallocate a request data structure 381 * pxa2xx_ep_free_request - deallocate a request data structure
382 */ 382 */
383static void 383static void
384pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) 384pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
@@ -1031,7 +1031,7 @@ pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1031 1031
1032 1032
1033/* 1033/*
1034 * nuke - dequeue ALL requests 1034 * nuke - dequeue ALL requests
1035 */ 1035 */
1036static void nuke(struct pxa2xx_ep *ep, int status) 1036static void nuke(struct pxa2xx_ep *ep, int status)
1037{ 1037{
@@ -1136,16 +1136,16 @@ static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1136 ep->dev->req_pending = 0; 1136 ep->dev->req_pending = 0;
1137 ep->dev->ep0state = EP0_STALL; 1137 ep->dev->ep0state = EP0_STALL;
1138 1138
1139 /* and bulk/intr endpoints like dropping stalls too */ 1139 /* and bulk/intr endpoints like dropping stalls too */
1140 } else { 1140 } else {
1141 unsigned i; 1141 unsigned i;
1142 for (i = 0; i < 1000; i += 20) { 1142 for (i = 0; i < 1000; i += 20) {
1143 if (*ep->reg_udccs & UDCCS_BI_SST) 1143 if (*ep->reg_udccs & UDCCS_BI_SST)
1144 break; 1144 break;
1145 udelay(20); 1145 udelay(20);
1146 } 1146 }
1147 } 1147 }
1148 local_irq_restore(flags); 1148 local_irq_restore(flags);
1149 1149
1150 DBG(DBG_VERBOSE, "%s halt\n", _ep->name); 1150 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1151 return 0; 1151 return 0;
@@ -1216,7 +1216,7 @@ static struct usb_ep_ops pxa2xx_ep_ops = {
1216 1216
1217 1217
1218/* --------------------------------------------------------------------------- 1218/* ---------------------------------------------------------------------------
1219 * device-scoped parts of the api to the usb controller hardware 1219 * device-scoped parts of the api to the usb controller hardware
1220 * --------------------------------------------------------------------------- 1220 * ---------------------------------------------------------------------------
1221 */ 1221 */
1222 1222
@@ -1239,7 +1239,7 @@ static void udc_enable (struct pxa2xx_udc *);
1239static void udc_disable(struct pxa2xx_udc *); 1239static void udc_disable(struct pxa2xx_udc *);
1240 1240
1241/* We disable the UDC -- and its 48 MHz clock -- whenever it's not 1241/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1242 * in active use. 1242 * in active use.
1243 */ 1243 */
1244static int pullup(struct pxa2xx_udc *udc, int is_active) 1244static int pullup(struct pxa2xx_udc *udc, int is_active)
1245{ 1245{
@@ -1464,24 +1464,10 @@ done:
1464 1464
1465#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1465#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1466 1466
1467/* "function" sysfs attribute */
1468static ssize_t
1469show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1470{
1471 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1472
1473 if (!dev->driver
1474 || !dev->driver->function
1475 || strlen (dev->driver->function) > PAGE_SIZE)
1476 return 0;
1477 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1478}
1479static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1480
1481/*-------------------------------------------------------------------------*/ 1467/*-------------------------------------------------------------------------*/
1482 1468
1483/* 1469/*
1484 * udc_disable - disable USB device controller 1470 * udc_disable - disable USB device controller
1485 */ 1471 */
1486static void udc_disable(struct pxa2xx_udc *dev) 1472static void udc_disable(struct pxa2xx_udc *dev)
1487{ 1473{
@@ -1507,7 +1493,7 @@ static void udc_disable(struct pxa2xx_udc *dev)
1507 1493
1508 1494
1509/* 1495/*
1510 * udc_reinit - initialize software state 1496 * udc_reinit - initialize software state
1511 */ 1497 */
1512static void udc_reinit(struct pxa2xx_udc *dev) 1498static void udc_reinit(struct pxa2xx_udc *dev)
1513{ 1499{
@@ -1635,18 +1621,20 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1635 dev->gadget.dev.driver = &driver->driver; 1621 dev->gadget.dev.driver = &driver->driver;
1636 dev->pullup = 1; 1622 dev->pullup = 1;
1637 1623
1638 device_add (&dev->gadget.dev); 1624 retval = device_add (&dev->gadget.dev);
1625 if (retval) {
1626fail:
1627 dev->driver = NULL;
1628 dev->gadget.dev.driver = NULL;
1629 return retval;
1630 }
1639 retval = driver->bind(&dev->gadget); 1631 retval = driver->bind(&dev->gadget);
1640 if (retval) { 1632 if (retval) {
1641 DMSG("bind to driver %s --> error %d\n", 1633 DMSG("bind to driver %s --> error %d\n",
1642 driver->driver.name, retval); 1634 driver->driver.name, retval);
1643 device_del (&dev->gadget.dev); 1635 device_del (&dev->gadget.dev);
1644 1636 goto fail;
1645 dev->driver = NULL;
1646 dev->gadget.dev.driver = NULL;
1647 return retval;
1648 } 1637 }
1649 device_create_file(dev->dev, &dev_attr_function);
1650 1638
1651 /* ... then enable host detection and ep0; and we're ready 1639 /* ... then enable host detection and ep0; and we're ready
1652 * for set_configuration as well as eventual disconnect. 1640 * for set_configuration as well as eventual disconnect.
@@ -1704,7 +1692,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1704 dev->driver = NULL; 1692 dev->driver = NULL;
1705 1693
1706 device_del (&dev->gadget.dev); 1694 device_del (&dev->gadget.dev);
1707 device_remove_file(dev->dev, &dev_attr_function);
1708 1695
1709 DMSG("unregistered gadget driver '%s'\n", driver->driver.name); 1696 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1710 dump_state(dev); 1697 dump_state(dev);
@@ -2474,12 +2461,12 @@ static struct pxa2xx_udc memory = {
2474#define IXP465_AD 0x00000200 2461#define IXP465_AD 0x00000200
2475 2462
2476/* 2463/*
2477 * probe - binds to the platform device 2464 * probe - binds to the platform device
2478 */ 2465 */
2479static int __init pxa2xx_udc_probe(struct platform_device *pdev) 2466static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2480{ 2467{
2481 struct pxa2xx_udc *dev = &memory; 2468 struct pxa2xx_udc *dev = &memory;
2482 int retval, out_dma = 1, vbus_irq; 2469 int retval, out_dma = 1, vbus_irq, irq;
2483 u32 chiprev; 2470 u32 chiprev;
2484 2471
2485 /* insist on Intel/ARM/XScale */ 2472 /* insist on Intel/ARM/XScale */
@@ -2522,7 +2509,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2522 return -ENODEV; 2509 return -ENODEV;
2523 } 2510 }
2524 2511
2525 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB, 2512 irq = platform_get_irq(pdev, 0);
2513 if (irq < 0)
2514 return -ENODEV;
2515
2516 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, irq,
2526 dev->has_cfr ? "" : " (!cfr)", 2517 dev->has_cfr ? "" : " (!cfr)",
2527 out_dma ? "" : " (broken dma-out)", 2518 out_dma ? "" : " (broken dma-out)",
2528 SIZE_STR DMASTR 2519 SIZE_STR DMASTR
@@ -2570,11 +2561,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2570 dev->vbus = is_vbus_present(); 2561 dev->vbus = is_vbus_present();
2571 2562
2572 /* irq setup after old hardware state is cleaned up */ 2563 /* irq setup after old hardware state is cleaned up */
2573 retval = request_irq(IRQ_USB, pxa2xx_udc_irq, 2564 retval = request_irq(irq, pxa2xx_udc_irq,
2574 IRQF_DISABLED, driver_name, dev); 2565 IRQF_DISABLED, driver_name, dev);
2575 if (retval != 0) { 2566 if (retval != 0) {
2576 printk(KERN_ERR "%s: can't get irq %i, err %d\n", 2567 printk(KERN_ERR "%s: can't get irq %d, err %d\n",
2577 driver_name, IRQ_USB, retval); 2568 driver_name, irq, retval);
2578 return -EBUSY; 2569 return -EBUSY;
2579 } 2570 }
2580 dev->got_irq = 1; 2571 dev->got_irq = 1;
@@ -2589,7 +2580,7 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2589 printk(KERN_ERR "%s: can't get irq %i, err %d\n", 2580 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2590 driver_name, LUBBOCK_USB_DISC_IRQ, retval); 2581 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2591lubbock_fail0: 2582lubbock_fail0:
2592 free_irq(IRQ_USB, dev); 2583 free_irq(irq, dev);
2593 return -EBUSY; 2584 return -EBUSY;
2594 } 2585 }
2595 retval = request_irq(LUBBOCK_USB_IRQ, 2586 retval = request_irq(LUBBOCK_USB_IRQ,
@@ -2616,7 +2607,7 @@ lubbock_fail0:
2616 if (retval != 0) { 2607 if (retval != 0) {
2617 printk(KERN_ERR "%s: can't get irq %i, err %d\n", 2608 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2618 driver_name, vbus_irq, retval); 2609 driver_name, vbus_irq, retval);
2619 free_irq(IRQ_USB, dev); 2610 free_irq(irq, dev);
2620 return -EBUSY; 2611 return -EBUSY;
2621 } 2612 }
2622 } 2613 }
@@ -2641,7 +2632,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2641 remove_proc_files(); 2632 remove_proc_files();
2642 2633
2643 if (dev->got_irq) { 2634 if (dev->got_irq) {
2644 free_irq(IRQ_USB, dev); 2635 free_irq(platform_get_irq(pdev, 0), dev);
2645 dev->got_irq = 0; 2636 dev->got_irq = 0;
2646 } 2637 }
2647#ifdef CONFIG_ARCH_LUBBOCK 2638#ifdef CONFIG_ARCH_LUBBOCK
@@ -2668,7 +2659,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2668 * 2659 *
2669 * For now, we punt and forcibly disconnect from the USB host when PXA 2660 * For now, we punt and forcibly disconnect from the USB host when PXA
2670 * enters any suspend state. While we're disconnected, we always disable 2661 * enters any suspend state. While we're disconnected, we always disable
2671 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states. 2662 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2672 * Boards without software pullup control shouldn't use those states. 2663 * Boards without software pullup control shouldn't use those states.
2673 * VBUS IRQs should probably be ignored so that the PXA device just acts 2664 * VBUS IRQs should probably be ignored so that the PXA device just acts
2674 * "dead" to USB hosts until system resume. 2665 * "dead" to USB hosts until system resume.
@@ -2701,7 +2692,6 @@ static int pxa2xx_udc_resume(struct platform_device *dev)
2701/*-------------------------------------------------------------------------*/ 2692/*-------------------------------------------------------------------------*/
2702 2693
2703static struct platform_driver udc_driver = { 2694static struct platform_driver udc_driver = {
2704 .probe = pxa2xx_udc_probe,
2705 .shutdown = pxa2xx_udc_shutdown, 2695 .shutdown = pxa2xx_udc_shutdown,
2706 .remove = __exit_p(pxa2xx_udc_remove), 2696 .remove = __exit_p(pxa2xx_udc_remove),
2707 .suspend = pxa2xx_udc_suspend, 2697 .suspend = pxa2xx_udc_suspend,
@@ -2715,7 +2705,7 @@ static struct platform_driver udc_driver = {
2715static int __init udc_init(void) 2705static int __init udc_init(void)
2716{ 2706{
2717 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION); 2707 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2718 return platform_driver_register(&udc_driver); 2708 return platform_driver_probe(&udc_driver, pxa2xx_udc_probe);
2719} 2709}
2720module_init(udc_init); 2710module_init(udc_init);
2721 2711
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 4c3c7259f019..397b149f3ca7 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -195,7 +195,7 @@ struct rndis_packet_msg_type
195 __le32 PerPacketInfoLength; 195 __le32 PerPacketInfoLength;
196 __le32 VcHandle; 196 __le32 VcHandle;
197 __le32 Reserved; 197 __le32 Reserved;
198}; 198} __attribute__ ((packed));
199 199
200struct rndis_config_parameter 200struct rndis_config_parameter
201{ 201{
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index a2e58c86849f..2ff396bd180f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -15,4 +15,3 @@ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
15obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 15obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
16obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 16obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
17obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 17obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
18obj-$(CONFIG_ETRAX_ARCH_V10) += hc_crisv10.o
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index caac0d1967d0..f28736a917e4 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -31,7 +31,7 @@
31#define FSL_SOC_USB_SNOOP1 0x400 /* NOTE: big-endian */ 31#define FSL_SOC_USB_SNOOP1 0x400 /* NOTE: big-endian */
32#define FSL_SOC_USB_SNOOP2 0x404 /* NOTE: big-endian */ 32#define FSL_SOC_USB_SNOOP2 0x404 /* NOTE: big-endian */
33#define FSL_SOC_USB_AGECNTTHRSH 0x408 /* NOTE: big-endian */ 33#define FSL_SOC_USB_AGECNTTHRSH 0x408 /* NOTE: big-endian */
34#define FSL_SOC_USB_SICTRL 0x40c /* NOTE: big-endian */ 34#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
35#define FSL_SOC_USB_PRICTRL 0x410 /* NOTE: big-endian */ 35#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
36#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */ 36#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
37#endif /* _EHCI_FSL_H */ 37#endif /* _EHCI_FSL_H */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1813b7cac294..f4d301bc83b9 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -136,6 +136,10 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
136 /* restore CMD_RUN, framelist size, and irq threshold */ 136 /* restore CMD_RUN, framelist size, and irq threshold */
137 ehci_writel(ehci, ehci->command, &ehci->regs->command); 137 ehci_writel(ehci, ehci->command, &ehci->regs->command);
138 138
139 /* Some controller/firmware combinations need a delay during which
140 * they set up the port statuses. See Bugzilla #8190. */
141 mdelay(8);
142
139 /* manually resume the ports we suspended during bus_suspend() */ 143 /* manually resume the ports we suspended during bus_suspend() */
140 i = HCS_N_PORTS (ehci->hcs_params); 144 i = HCS_N_PORTS (ehci->hcs_params);
141 while (i--) { 145 while (i--) {
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
deleted file mode 100644
index 32f7caf24747..000000000000
--- a/drivers/usb/host/hc_crisv10.c
+++ /dev/null
@@ -1,4550 +0,0 @@
1/*
2 * usb-host.c: ETRAX 100LX USB Host Controller Driver (HCD)
3 *
4 * Copyright (c) 2002, 2003 Axis Communications AB.
5 */
6
7#include <linux/kernel.h>
8#include <linux/delay.h>
9#include <linux/ioport.h>
10#include <linux/slab.h>
11#include <linux/errno.h>
12#include <linux/unistd.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/irq.h>
21#include <asm/dma.h>
22#include <asm/system.h>
23#include <asm/arch/svinto.h>
24
25#include <linux/usb.h>
26/* Ugly include because we don't live with the other host drivers. */
27#include <../drivers/usb/core/hcd.h>
28#include <../drivers/usb/core/usb.h>
29
30#include "hc_crisv10.h"
31
32#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
33#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
34#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
35
36static const char *usb_hcd_version = "$Revision: 1.2 $";
37
38#undef KERN_DEBUG
39#define KERN_DEBUG ""
40
41
42#undef USB_DEBUG_RH
43#undef USB_DEBUG_EPID
44#undef USB_DEBUG_SB
45#undef USB_DEBUG_DESC
46#undef USB_DEBUG_URB
47#undef USB_DEBUG_TRACE
48#undef USB_DEBUG_BULK
49#undef USB_DEBUG_CTRL
50#undef USB_DEBUG_INTR
51#undef USB_DEBUG_ISOC
52
53#ifdef USB_DEBUG_RH
54#define dbg_rh(format, arg...) printk(KERN_DEBUG __FILE__ ": (RH) " format "\n" , ## arg)
55#else
56#define dbg_rh(format, arg...) do {} while (0)
57#endif
58
59#ifdef USB_DEBUG_EPID
60#define dbg_epid(format, arg...) printk(KERN_DEBUG __FILE__ ": (EPID) " format "\n" , ## arg)
61#else
62#define dbg_epid(format, arg...) do {} while (0)
63#endif
64
65#ifdef USB_DEBUG_SB
66#define dbg_sb(format, arg...) printk(KERN_DEBUG __FILE__ ": (SB) " format "\n" , ## arg)
67#else
68#define dbg_sb(format, arg...) do {} while (0)
69#endif
70
71#ifdef USB_DEBUG_CTRL
72#define dbg_ctrl(format, arg...) printk(KERN_DEBUG __FILE__ ": (CTRL) " format "\n" , ## arg)
73#else
74#define dbg_ctrl(format, arg...) do {} while (0)
75#endif
76
77#ifdef USB_DEBUG_BULK
78#define dbg_bulk(format, arg...) printk(KERN_DEBUG __FILE__ ": (BULK) " format "\n" , ## arg)
79#else
80#define dbg_bulk(format, arg...) do {} while (0)
81#endif
82
83#ifdef USB_DEBUG_INTR
84#define dbg_intr(format, arg...) printk(KERN_DEBUG __FILE__ ": (INTR) " format "\n" , ## arg)
85#else
86#define dbg_intr(format, arg...) do {} while (0)
87#endif
88
89#ifdef USB_DEBUG_ISOC
90#define dbg_isoc(format, arg...) printk(KERN_DEBUG __FILE__ ": (ISOC) " format "\n" , ## arg)
91#else
92#define dbg_isoc(format, arg...) do {} while (0)
93#endif
94
95#ifdef USB_DEBUG_TRACE
96#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
97#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
98#else
99#define DBFENTER do {} while (0)
100#define DBFEXIT do {} while (0)
101#endif
102
103#define usb_pipeslow(pipe) (((pipe) >> 26) & 1)
104
105/*-------------------------------------------------------------------
106 Virtual Root Hub
107 -------------------------------------------------------------------*/
108
109static __u8 root_hub_dev_des[] =
110{
111 0x12, /* __u8 bLength; */
112 0x01, /* __u8 bDescriptorType; Device */
113 0x00, /* __le16 bcdUSB; v1.0 */
114 0x01,
115 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
116 0x00, /* __u8 bDeviceSubClass; */
117 0x00, /* __u8 bDeviceProtocol; */
118 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
119 0x00, /* __le16 idVendor; */
120 0x00,
121 0x00, /* __le16 idProduct; */
122 0x00,
123 0x00, /* __le16 bcdDevice; */
124 0x00,
125 0x00, /* __u8 iManufacturer; */
126 0x02, /* __u8 iProduct; */
127 0x01, /* __u8 iSerialNumber; */
128 0x01 /* __u8 bNumConfigurations; */
129};
130
131/* Configuration descriptor */
132static __u8 root_hub_config_des[] =
133{
134 0x09, /* __u8 bLength; */
135 0x02, /* __u8 bDescriptorType; Configuration */
136 0x19, /* __le16 wTotalLength; */
137 0x00,
138 0x01, /* __u8 bNumInterfaces; */
139 0x01, /* __u8 bConfigurationValue; */
140 0x00, /* __u8 iConfiguration; */
141 0x40, /* __u8 bmAttributes; Bit 7: Bus-powered */
142 0x00, /* __u8 MaxPower; */
143
144 /* interface */
145 0x09, /* __u8 if_bLength; */
146 0x04, /* __u8 if_bDescriptorType; Interface */
147 0x00, /* __u8 if_bInterfaceNumber; */
148 0x00, /* __u8 if_bAlternateSetting; */
149 0x01, /* __u8 if_bNumEndpoints; */
150 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
151 0x00, /* __u8 if_bInterfaceSubClass; */
152 0x00, /* __u8 if_bInterfaceProtocol; */
153 0x00, /* __u8 if_iInterface; */
154
155 /* endpoint */
156 0x07, /* __u8 ep_bLength; */
157 0x05, /* __u8 ep_bDescriptorType; Endpoint */
158 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
159 0x03, /* __u8 ep_bmAttributes; Interrupt */
160 0x08, /* __le16 ep_wMaxPacketSize; 8 Bytes */
161 0x00,
162 0xff /* __u8 ep_bInterval; 255 ms */
163};
164
165static __u8 root_hub_hub_des[] =
166{
167 0x09, /* __u8 bLength; */
168 0x29, /* __u8 bDescriptorType; Hub-descriptor */
169 0x02, /* __u8 bNbrPorts; */
170 0x00, /* __u16 wHubCharacteristics; */
171 0x00,
172 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
173 0x00, /* __u8 bHubContrCurrent; 0 mA */
174 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
175 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
176};
177
178static DEFINE_TIMER(bulk_start_timer, NULL, 0, 0);
179static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0);
180
181/* We want the start timer to expire before the eot timer, because the former might start
182 traffic, thus making it unnecessary for the latter to time out. */
183#define BULK_START_TIMER_INTERVAL (HZ/10) /* 100 ms */
184#define BULK_EOT_TIMER_INTERVAL (HZ/10+2) /* 120 ms */
185
186#define OK(x) len = (x); dbg_rh("OK(%d): line: %d", x, __LINE__); break
187#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
188{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
189
190#define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
191#define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
192
193/* Most helpful debugging aid */
194#define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__))))
195
196/* Alternative assert define which stops after a failed assert. */
197/*
198#define assert(expr) \
199{ \
200 if (!(expr)) { \
201 err("assert failed at line %d",__LINE__); \
202 while (1); \
203 } \
204}
205*/
206
207
208/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it dynamically?
209 To adjust it dynamically we would have to get an interrupt when we reach the end
210 of the rx descriptor list, or when we get close to the end, and then allocate more
211 descriptors. */
212
213#define NBR_OF_RX_DESC 512
214#define RX_DESC_BUF_SIZE 1024
215#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
216
217/* The number of epids is, among other things, used for pre-allocating
218 ctrl, bulk and isoc EP descriptors (one for each epid).
219 Assumed to be > 1 when initiating the DMA lists. */
220#define NBR_OF_EPIDS 32
221
222/* Support interrupt traffic intervals up to 128 ms. */
223#define MAX_INTR_INTERVAL 128
224
225/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP table
226 must be "invalid". By this we mean that we shouldn't care about epid attentions
227 for this epid, or at least handle them differently from epid attentions for "valid"
228 epids. This define determines which one to use (don't change it). */
229#define INVALID_EPID 31
230/* A special epid for the bulk dummys. */
231#define DUMMY_EPID 30
232
233/* This is just a software cache for the valid entries in R_USB_EPT_DATA. */
234static __u32 epid_usage_bitmask;
235
236/* A bitfield to keep information on in/out traffic is needed to uniquely identify
237 an endpoint on a device, since the most significant bit which indicates traffic
238 direction is lacking in the ep_id field (ETRAX epids can handle both in and
239 out traffic on endpoints that are otherwise identical). The USB framework, however,
240 relies on them to be handled separately. For example, bulk IN and OUT urbs cannot
241 be queued in the same list, since they would block each other. */
242static __u32 epid_out_traffic;
243
244/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
245 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be cache aligned. */
246static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
247static volatile USB_IN_Desc_t RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
248
249/* Pointers into RxDescList. */
250static volatile USB_IN_Desc_t *myNextRxDesc;
251static volatile USB_IN_Desc_t *myLastRxDesc;
252static volatile USB_IN_Desc_t *myPrevRxDesc;
253
254/* EP descriptors must be 32-bit aligned. */
255static volatile USB_EP_Desc_t TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
256static volatile USB_EP_Desc_t TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
257/* After each enabled bulk EP (IN or OUT) we put two disabled EP descriptors with the eol flag set,
258 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
259 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
260 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
261 in each frame. */
262static volatile USB_EP_Desc_t TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
263
264static volatile USB_EP_Desc_t TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
265static volatile USB_SB_Desc_t TxIsocSB_zout __attribute__ ((aligned (4)));
266
267static volatile USB_EP_Desc_t TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
268static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
269
270/* A zout transfer makes a memory access at the address of its buf pointer, which means that setting
271 this buf pointer to 0 will cause an access to the flash. In addition to this, setting sw_len to 0
272 results in a 16/32 bytes (depending on DMA burst size) transfer. Instead, we set it to 1, and point
273 it to this buffer. */
274static int zout_buffer[4] __attribute__ ((aligned (4)));
275
276/* Cache for allocating new EP and SB descriptors. */
277static struct kmem_cache *usb_desc_cache;
278
279/* Cache for the registers allocated in the top half. */
280static struct kmem_cache *top_half_reg_cache;
281
282/* Cache for the data allocated in the isoc descr top half. */
283static struct kmem_cache *isoc_compl_cache;
284
285static struct usb_bus *etrax_usb_bus;
286
287/* This is a circular (double-linked) list of the active urbs for each epid.
288 The head is never removed, and new urbs are linked onto the list as
289 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
290 functions instead. Note that working with these lists might require spinlock
291 protection. */
292static struct list_head urb_list[NBR_OF_EPIDS];
293
294/* Read about the need and usage of this lock in submit_ctrl_urb. */
295static spinlock_t urb_list_lock;
296
297/* Used when unlinking asynchronously. */
298static struct list_head urb_unlink_list;
299
300/* for returning string descriptors in UTF-16LE */
301static int ascii2utf (char *ascii, __u8 *utf, int utfmax)
302{
303 int retval;
304
305 for (retval = 0; *ascii && utfmax > 1; utfmax -= 2, retval += 2) {
306 *utf++ = *ascii++ & 0x7f;
307 *utf++ = 0;
308 }
309 return retval;
310}
311
312static int usb_root_hub_string (int id, int serial, char *type, __u8 *data, int len)
313{
314 char buf [30];
315
316 // assert (len > (2 * (sizeof (buf) + 1)));
317 // assert (strlen (type) <= 8);
318
319 // language ids
320 if (id == 0) {
321 *data++ = 4; *data++ = 3; /* 4 bytes data */
322 *data++ = 0; *data++ = 0; /* some language id */
323 return 4;
324
325 // serial number
326 } else if (id == 1) {
327 sprintf (buf, "%x", serial);
328
329 // product description
330 } else if (id == 2) {
331 sprintf (buf, "USB %s Root Hub", type);
332
333 // id 3 == vendor description
334
335 // unsupported IDs --> "stall"
336 } else
337 return 0;
338
339 data [0] = 2 + ascii2utf (buf, data + 2, len - 2);
340 data [1] = 3;
341 return data [0];
342}
343
344/* Wrappers around the list functions (include/linux/list.h). */
345
346static inline int urb_list_empty(int epid)
347{
348 return list_empty(&urb_list[epid]);
349}
350
351/* Returns first urb for this epid, or NULL if list is empty. */
352static inline struct urb *urb_list_first(int epid)
353{
354 struct urb *first_urb = 0;
355
356 if (!urb_list_empty(epid)) {
357 /* Get the first urb (i.e. head->next). */
358 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
359 first_urb = urb_entry->urb;
360 }
361 return first_urb;
362}
363
364/* Adds an urb_entry last in the list for this epid. */
365static inline void urb_list_add(struct urb *urb, int epid)
366{
367 urb_entry_t *urb_entry = kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
368 assert(urb_entry);
369
370 urb_entry->urb = urb;
371 list_add_tail(&urb_entry->list, &urb_list[epid]);
372}
373
374/* Search through the list for an element that contains this urb. (The list
375 is expected to be short and the one we are about to delete will often be
376 the first in the list.) */
377static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid)
378{
379 struct list_head *entry;
380 struct list_head *tmp;
381 urb_entry_t *urb_entry;
382
383 list_for_each_safe(entry, tmp, &urb_list[epid]) {
384 urb_entry = list_entry(entry, urb_entry_t, list);
385 assert(urb_entry);
386 assert(urb_entry->urb);
387
388 if (urb_entry->urb == urb) {
389 return urb_entry;
390 }
391 }
392 return 0;
393}
394
395/* Delete an urb from the list. */
396static inline void urb_list_del(struct urb *urb, int epid)
397{
398 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
399 assert(urb_entry);
400
401 /* Delete entry and free. */
402 list_del(&urb_entry->list);
403 kfree(urb_entry);
404}
405
406/* Move an urb to the end of the list. */
407static inline void urb_list_move_last(struct urb *urb, int epid)
408{
409 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
410 assert(urb_entry);
411
412 list_move_tail(&urb_entry->list, &urb_list[epid]);
413}
414
415/* Get the next urb in the list. */
416static inline struct urb *urb_list_next(struct urb *urb, int epid)
417{
418 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
419
420 assert(urb_entry);
421
422 if (urb_entry->list.next != &urb_list[epid]) {
423 struct list_head *elem = urb_entry->list.next;
424 urb_entry = list_entry(elem, urb_entry_t, list);
425 return urb_entry->urb;
426 } else {
427 return NULL;
428 }
429}
430
431
432
433/* For debug purposes only. */
434static inline void urb_list_dump(int epid)
435{
436 struct list_head *entry;
437 struct list_head *tmp;
438 urb_entry_t *urb_entry;
439 int i = 0;
440
441 info("Dumping urb list for epid %d", epid);
442
443 list_for_each_safe(entry, tmp, &urb_list[epid]) {
444 urb_entry = list_entry(entry, urb_entry_t, list);
445 info(" entry %d, urb = 0x%lx", i, (unsigned long)urb_entry->urb);
446 }
447}
448
449static void init_rx_buffers(void);
450static int etrax_rh_unlink_urb(struct urb *urb);
451static void etrax_rh_send_irq(struct urb *urb);
452static void etrax_rh_init_int_timer(struct urb *urb);
453static void etrax_rh_int_timer_do(unsigned long ptr);
454
455static int etrax_usb_setup_epid(struct urb *urb);
456static int etrax_usb_lookup_epid(struct urb *urb);
457static int etrax_usb_allocate_epid(void);
458static void etrax_usb_free_epid(int epid);
459
460static int etrax_remove_from_sb_list(struct urb *urb);
461
462static void* etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
463 unsigned mem_flags, dma_addr_t *dma);
464static void etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma);
465
466static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid);
467static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid);
468static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid);
469static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid);
470
471static int etrax_usb_submit_bulk_urb(struct urb *urb);
472static int etrax_usb_submit_ctrl_urb(struct urb *urb);
473static int etrax_usb_submit_intr_urb(struct urb *urb);
474static int etrax_usb_submit_isoc_urb(struct urb *urb);
475
476static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags);
477static int etrax_usb_unlink_urb(struct urb *urb, int status);
478static int etrax_usb_get_frame_number(struct usb_device *usb_dev);
479
480static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc);
481static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc);
482static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc);
483static void etrax_usb_hc_interrupt_bottom_half(void *data);
484
485static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data);
486
487
488/* The following is a list of interrupt handlers for the host controller interrupts we use.
489 They are called from etrax_usb_hc_interrupt_bottom_half. */
490static void etrax_usb_hc_isoc_eof_interrupt(void);
491static void etrax_usb_hc_bulk_eot_interrupt(int timer_induced);
492static void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg);
493static void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg);
494static void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg);
495
496static int etrax_rh_submit_urb (struct urb *urb);
497
498/* Forward declaration needed because they are used in the rx interrupt routine. */
499static void etrax_usb_complete_urb(struct urb *urb, int status);
500static void etrax_usb_complete_bulk_urb(struct urb *urb, int status);
501static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status);
502static void etrax_usb_complete_intr_urb(struct urb *urb, int status);
503static void etrax_usb_complete_isoc_urb(struct urb *urb, int status);
504
505static int etrax_usb_hc_init(void);
506static void etrax_usb_hc_cleanup(void);
507
508static struct usb_operations etrax_usb_device_operations =
509{
510 .get_frame_number = etrax_usb_get_frame_number,
511 .submit_urb = etrax_usb_submit_urb,
512 .unlink_urb = etrax_usb_unlink_urb,
513 .buffer_alloc = etrax_usb_buffer_alloc,
514 .buffer_free = etrax_usb_buffer_free
515};
516
517/* Note that these functions are always available in their "__" variants, for use in
518 error situations. The "__" missing variants are controlled by the USB_DEBUG_DESC/
519 USB_DEBUG_URB macros. */
520static void __dump_urb(struct urb* purb)
521{
522 printk("\nurb :0x%08lx\n", (unsigned long)purb);
523 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
524 printk("pipe :0x%08x\n", purb->pipe);
525 printk("status :%d\n", purb->status);
526 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
527 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
528 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
529 printk("actual_length :%d\n", purb->actual_length);
530 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
531 printk("start_frame :%d\n", purb->start_frame);
532 printk("number_of_packets :%d\n", purb->number_of_packets);
533 printk("interval :%d\n", purb->interval);
534 printk("error_count :%d\n", purb->error_count);
535 printk("context :0x%08lx\n", (unsigned long)purb->context);
536 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
537}
538
539static void __dump_in_desc(volatile USB_IN_Desc_t *in)
540{
541 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
542 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
543 printk(" command : 0x%04x\n", in->command);
544 printk(" next : 0x%08lx\n", in->next);
545 printk(" buf : 0x%08lx\n", in->buf);
546 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
547 printk(" status : 0x%04x\n\n", in->status);
548}
549
550static void __dump_sb_desc(volatile USB_SB_Desc_t *sb)
551{
552 char tt = (sb->command & 0x30) >> 4;
553 char *tt_string;
554
555 switch (tt) {
556 case 0:
557 tt_string = "zout";
558 break;
559 case 1:
560 tt_string = "in";
561 break;
562 case 2:
563 tt_string = "out";
564 break;
565 case 3:
566 tt_string = "setup";
567 break;
568 default:
569 tt_string = "unknown (weird)";
570 }
571
572 printk("\n USB_SB_Desc at 0x%08lx\n", (unsigned long)sb);
573 printk(" command : 0x%04x\n", sb->command);
574 printk(" rem : %d\n", (sb->command & 0x3f00) >> 8);
575 printk(" full : %d\n", (sb->command & 0x40) >> 6);
576 printk(" tt : %d (%s)\n", tt, tt_string);
577 printk(" intr : %d\n", (sb->command & 0x8) >> 3);
578 printk(" eot : %d\n", (sb->command & 0x2) >> 1);
579 printk(" eol : %d\n", sb->command & 0x1);
580 printk(" sw_len : 0x%04x (%d)\n", sb->sw_len, sb->sw_len);
581 printk(" next : 0x%08lx\n", sb->next);
582 printk(" buf : 0x%08lx\n\n", sb->buf);
583}
584
585
586static void __dump_ep_desc(volatile USB_EP_Desc_t *ep)
587{
588 printk("\nUSB_EP_Desc at 0x%08lx\n", (unsigned long)ep);
589 printk(" command : 0x%04x\n", ep->command);
590 printk(" ep_id : %d\n", (ep->command & 0x1f00) >> 8);
591 printk(" enable : %d\n", (ep->command & 0x10) >> 4);
592 printk(" intr : %d\n", (ep->command & 0x8) >> 3);
593 printk(" eof : %d\n", (ep->command & 0x2) >> 1);
594 printk(" eol : %d\n", ep->command & 0x1);
595 printk(" hw_len : 0x%04x (%d)\n", ep->hw_len, ep->hw_len);
596 printk(" next : 0x%08lx\n", ep->next);
597 printk(" sub : 0x%08lx\n\n", ep->sub);
598}
599
600static inline void __dump_ep_list(int pipe_type)
601{
602 volatile USB_EP_Desc_t *ep;
603 volatile USB_EP_Desc_t *first_ep;
604 volatile USB_SB_Desc_t *sb;
605
606 switch (pipe_type)
607 {
608 case PIPE_BULK:
609 first_ep = &TxBulkEPList[0];
610 break;
611 case PIPE_CONTROL:
612 first_ep = &TxCtrlEPList[0];
613 break;
614 case PIPE_INTERRUPT:
615 first_ep = &TxIntrEPList[0];
616 break;
617 case PIPE_ISOCHRONOUS:
618 first_ep = &TxIsocEPList[0];
619 break;
620 default:
621 warn("Cannot dump unknown traffic type");
622 return;
623 }
624 ep = first_ep;
625
626 printk("\n\nDumping EP list...\n\n");
627
628 do {
629 __dump_ep_desc(ep);
630 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
631 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
632 while (sb) {
633 __dump_sb_desc(sb);
634 sb = sb->next ? phys_to_virt(sb->next) : 0;
635 }
636 ep = (volatile USB_EP_Desc_t *)(phys_to_virt(ep->next));
637
638 } while (ep != first_ep);
639}
640
641static inline void __dump_ept_data(int epid)
642{
643 unsigned long flags;
644 __u32 r_usb_ept_data;
645
646 if (epid < 0 || epid > 31) {
647 printk("Cannot dump ept data for invalid epid %d\n", epid);
648 return;
649 }
650
651 save_flags(flags);
652 cli();
653 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
654 nop();
655 r_usb_ept_data = *R_USB_EPT_DATA;
656 restore_flags(flags);
657
658 printk("\nR_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
659 if (r_usb_ept_data == 0) {
660 /* No need for more detailed printing. */
661 return;
662 }
663 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
664 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
665 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
666 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
667 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
668 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
669 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
670 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
671 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
672 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
673 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
674 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
675}
676
677static inline void __dump_ept_data_list(void)
678{
679 int i;
680
681 printk("Dumping the whole R_USB_EPT_DATA list\n");
682
683 for (i = 0; i < 32; i++) {
684 __dump_ept_data(i);
685 }
686}
687#ifdef USB_DEBUG_DESC
688#define dump_in_desc(...) __dump_in_desc(...)
689#define dump_sb_desc(...) __dump_sb_desc(...)
690#define dump_ep_desc(...) __dump_ep_desc(...)
691#else
692#define dump_in_desc(...) do {} while (0)
693#define dump_sb_desc(...) do {} while (0)
694#define dump_ep_desc(...) do {} while (0)
695#endif
696
697#ifdef USB_DEBUG_URB
698#define dump_urb(x) __dump_urb(x)
699#else
700#define dump_urb(x) do {} while (0)
701#endif
702
703static void init_rx_buffers(void)
704{
705 int i;
706
707 DBFENTER;
708
709 for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
710 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
711 RxDescList[i].command = 0;
712 RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
713 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
714 RxDescList[i].hw_len = 0;
715 RxDescList[i].status = 0;
716
717 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as USB_IN_Desc
718 for the relevant fields.) */
719 prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
720
721 }
722
723 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
724 RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
725 RxDescList[i].next = virt_to_phys(&RxDescList[0]);
726 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
727 RxDescList[i].hw_len = 0;
728 RxDescList[i].status = 0;
729
730 myNextRxDesc = &RxDescList[0];
731 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
732 myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
733
734 *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
735 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
736
737 DBFEXIT;
738}
739
740static void init_tx_bulk_ep(void)
741{
742 int i;
743
744 DBFENTER;
745
746 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
747 CHECK_ALIGN(&TxBulkEPList[i]);
748 TxBulkEPList[i].hw_len = 0;
749 TxBulkEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
750 TxBulkEPList[i].sub = 0;
751 TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[i + 1]);
752
753 /* Initiate two EPs, disabled and with the eol flag set. No need for any
754 preserved epid. */
755
756 /* The first one has the intr flag set so we get an interrupt when the DMA
757 channel is about to become disabled. */
758 CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
759 TxBulkDummyEPList[i][0].hw_len = 0;
760 TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
761 IO_STATE(USB_EP_command, eol, yes) |
762 IO_STATE(USB_EP_command, intr, yes));
763 TxBulkDummyEPList[i][0].sub = 0;
764 TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
765
766 /* The second one. */
767 CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
768 TxBulkDummyEPList[i][1].hw_len = 0;
769 TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
770 IO_STATE(USB_EP_command, eol, yes));
771 TxBulkDummyEPList[i][1].sub = 0;
772 /* The last dummy's next pointer is the same as the current EP's next pointer. */
773 TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
774 }
775
776 /* Configure the last one. */
777 CHECK_ALIGN(&TxBulkEPList[i]);
778 TxBulkEPList[i].hw_len = 0;
779 TxBulkEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
780 IO_FIELD(USB_EP_command, epid, i));
781 TxBulkEPList[i].sub = 0;
782 TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[0]);
783
784 /* No need configuring dummy EPs for the last one as it will never be used for
785 bulk traffic (i == INVALD_EPID at this point). */
786
787 /* Set up to start on the last EP so we will enable it when inserting traffic
788 for the first time (imitating the situation where the DMA has stopped
789 because there was no more traffic). */
790 *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
791 /* No point in starting the bulk channel yet.
792 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
793 DBFEXIT;
794}
795
796static void init_tx_ctrl_ep(void)
797{
798 int i;
799
800 DBFENTER;
801
802 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
803 CHECK_ALIGN(&TxCtrlEPList[i]);
804 TxCtrlEPList[i].hw_len = 0;
805 TxCtrlEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
806 TxCtrlEPList[i].sub = 0;
807 TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[i + 1]);
808 }
809
810 CHECK_ALIGN(&TxCtrlEPList[i]);
811 TxCtrlEPList[i].hw_len = 0;
812 TxCtrlEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
813 IO_FIELD(USB_EP_command, epid, i));
814
815 TxCtrlEPList[i].sub = 0;
816 TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[0]);
817
818 *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[0]);
819 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
820
821 DBFEXIT;
822}
823
824
825static void init_tx_intr_ep(void)
826{
827 int i;
828
829 DBFENTER;
830
831 /* Read comment at zout_buffer declaration for an explanation to this. */
832 TxIntrSB_zout.sw_len = 1;
833 TxIntrSB_zout.next = 0;
834 TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
835 TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
836 IO_STATE(USB_SB_command, tt, zout) |
837 IO_STATE(USB_SB_command, full, yes) |
838 IO_STATE(USB_SB_command, eot, yes) |
839 IO_STATE(USB_SB_command, eol, yes));
840
841 for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
842 CHECK_ALIGN(&TxIntrEPList[i]);
843 TxIntrEPList[i].hw_len = 0;
844 TxIntrEPList[i].command =
845 (IO_STATE(USB_EP_command, eof, yes) |
846 IO_STATE(USB_EP_command, enable, yes) |
847 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
848 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
849 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
850 }
851
852 CHECK_ALIGN(&TxIntrEPList[i]);
853 TxIntrEPList[i].hw_len = 0;
854 TxIntrEPList[i].command =
855 (IO_STATE(USB_EP_command, eof, yes) |
856 IO_STATE(USB_EP_command, eol, yes) |
857 IO_STATE(USB_EP_command, enable, yes) |
858 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
859 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
860 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
861
862 *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
863 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
864 DBFEXIT;
865}
866
867static void init_tx_isoc_ep(void)
868{
869 int i;
870
871 DBFENTER;
872
873 /* Read comment at zout_buffer declaration for an explanation to this. */
874 TxIsocSB_zout.sw_len = 1;
875 TxIsocSB_zout.next = 0;
876 TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
877 TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
878 IO_STATE(USB_SB_command, tt, zout) |
879 IO_STATE(USB_SB_command, full, yes) |
880 IO_STATE(USB_SB_command, eot, yes) |
881 IO_STATE(USB_SB_command, eol, yes));
882
883 /* The last isochronous EP descriptor is a dummy. */
884
885 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
886 CHECK_ALIGN(&TxIsocEPList[i]);
887 TxIsocEPList[i].hw_len = 0;
888 TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
889 TxIsocEPList[i].sub = 0;
890 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
891 }
892
893 CHECK_ALIGN(&TxIsocEPList[i]);
894 TxIsocEPList[i].hw_len = 0;
895
896 /* Must enable the last EP descr to get eof interrupt. */
897 TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
898 IO_STATE(USB_EP_command, eof, yes) |
899 IO_STATE(USB_EP_command, eol, yes) |
900 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
901 TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
902 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
903
904 *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
905 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
906
907 DBFEXIT;
908}
909
910static void etrax_usb_unlink_intr_urb(struct urb *urb)
911{
912 volatile USB_EP_Desc_t *first_ep; /* First EP in the list. */
913 volatile USB_EP_Desc_t *curr_ep; /* Current EP, the iterator. */
914 volatile USB_EP_Desc_t *next_ep; /* The EP after current. */
915 volatile USB_EP_Desc_t *unlink_ep; /* The one we should remove from the list. */
916
917 int epid;
918
919 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the List". */
920
921 DBFENTER;
922
923 epid = ((etrax_urb_priv_t *)urb->hcpriv)->epid;
924
925 first_ep = &TxIntrEPList[0];
926 curr_ep = first_ep;
927
928
929 /* Note that this loop removes all EP descriptors with this epid. This assumes
930 that all EP descriptors belong to the one and only urb for this epid. */
931
932 do {
933 next_ep = (USB_EP_Desc_t *)phys_to_virt(curr_ep->next);
934
935 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
936
937 dbg_intr("Found EP to unlink for epid %d", epid);
938
939 /* This is the one we should unlink. */
940 unlink_ep = next_ep;
941
942 /* Actually unlink the EP from the DMA list. */
943 curr_ep->next = unlink_ep->next;
944
945 /* Wait until the DMA is no longer at this descriptor. */
946 while (*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep));
947
948 /* Now we are free to remove it and its SB descriptor.
949 Note that it is assumed here that there is only one sb in the
950 sb list for this ep. */
951 kmem_cache_free(usb_desc_cache, phys_to_virt(unlink_ep->sub));
952 kmem_cache_free(usb_desc_cache, (USB_EP_Desc_t *)unlink_ep);
953 }
954
955 curr_ep = phys_to_virt(curr_ep->next);
956
957 } while (curr_ep != first_ep);
958 urb->hcpriv = NULL;
959}
960
961void etrax_usb_do_intr_recover(int epid)
962{
963 USB_EP_Desc_t *first_ep, *tmp_ep;
964
965 DBFENTER;
966
967 first_ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB2_EP);
968 tmp_ep = first_ep;
969
970 /* What this does is simply to walk the list of interrupt
971 ep descriptors and enable those that are disabled. */
972
973 do {
974 if (IO_EXTRACT(USB_EP_command, epid, tmp_ep->command) == epid &&
975 !(tmp_ep->command & IO_MASK(USB_EP_command, enable))) {
976 tmp_ep->command |= IO_STATE(USB_EP_command, enable, yes);
977 }
978
979 tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
980
981 } while (tmp_ep != first_ep);
982
983
984 DBFEXIT;
985}
986
987static int etrax_rh_unlink_urb (struct urb *urb)
988{
989 etrax_hc_t *hc;
990
991 DBFENTER;
992
993 hc = urb->dev->bus->hcpriv;
994
995 if (hc->rh.urb == urb) {
996 hc->rh.send = 0;
997 del_timer(&hc->rh.rh_int_timer);
998 }
999
1000 DBFEXIT;
1001 return 0;
1002}
1003
1004static void etrax_rh_send_irq(struct urb *urb)
1005{
1006 __u16 data = 0;
1007 etrax_hc_t *hc = urb->dev->bus->hcpriv;
1008 DBFENTER;
1009
1010/*
1011 dbg_rh("R_USB_FM_NUMBER : 0x%08X", *R_USB_FM_NUMBER);
1012 dbg_rh("R_USB_FM_REMAINING: 0x%08X", *R_USB_FM_REMAINING);
1013*/
1014
1015 data |= (hc->rh.wPortChange_1) ? (1 << 1) : 0;
1016 data |= (hc->rh.wPortChange_2) ? (1 << 2) : 0;
1017
1018 *((__u16 *)urb->transfer_buffer) = cpu_to_le16(data);
1019 /* FIXME: Why is actual_length set to 1 when data is 2 bytes?
1020 Since only 1 byte is used, why not declare data as __u8? */
1021 urb->actual_length = 1;
1022 urb->status = 0;
1023
1024 if (hc->rh.send && urb->complete) {
1025 dbg_rh("wPortChange_1: 0x%04X", hc->rh.wPortChange_1);
1026 dbg_rh("wPortChange_2: 0x%04X", hc->rh.wPortChange_2);
1027
1028 urb->complete(urb, NULL);
1029 }
1030
1031 DBFEXIT;
1032}
1033
1034static void etrax_rh_init_int_timer(struct urb *urb)
1035{
1036 etrax_hc_t *hc;
1037
1038 DBFENTER;
1039
1040 hc = urb->dev->bus->hcpriv;
1041 hc->rh.interval = urb->interval;
1042 init_timer(&hc->rh.rh_int_timer);
1043 hc->rh.rh_int_timer.function = etrax_rh_int_timer_do;
1044 hc->rh.rh_int_timer.data = (unsigned long)urb;
1045 /* FIXME: Is the jiffies resolution enough? All intervals < 10 ms will be mapped
1046 to 0, and the rest to the nearest lower 10 ms. */
1047 hc->rh.rh_int_timer.expires = jiffies + ((HZ * hc->rh.interval) / 1000);
1048 add_timer(&hc->rh.rh_int_timer);
1049
1050 DBFEXIT;
1051}
1052
1053static void etrax_rh_int_timer_do(unsigned long ptr)
1054{
1055 struct urb *urb;
1056 etrax_hc_t *hc;
1057
1058 DBFENTER;
1059
1060 urb = (struct urb*)ptr;
1061 hc = urb->dev->bus->hcpriv;
1062
1063 if (hc->rh.send) {
1064 etrax_rh_send_irq(urb);
1065 }
1066
1067 DBFEXIT;
1068}
1069
1070static int etrax_usb_setup_epid(struct urb *urb)
1071{
1072 int epid;
1073 char devnum, endpoint, out_traffic, slow;
1074 int maxlen;
1075 unsigned long flags;
1076
1077 DBFENTER;
1078
1079 epid = etrax_usb_lookup_epid(urb);
1080 if ((epid != -1)){
1081 /* An epid that fits this urb has been found. */
1082 DBFEXIT;
1083 return epid;
1084 }
1085
1086 /* We must find and initiate a new epid for this urb. */
1087 epid = etrax_usb_allocate_epid();
1088
1089 if (epid == -1) {
1090 /* Failed to allocate a new epid. */
1091 DBFEXIT;
1092 return epid;
1093 }
1094
1095 /* We now have a new epid to use. Initiate it. */
1096 set_bit(epid, (void *)&epid_usage_bitmask);
1097
1098 devnum = usb_pipedevice(urb->pipe);
1099 endpoint = usb_pipeendpoint(urb->pipe);
1100 slow = usb_pipeslow(urb->pipe);
1101 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1102 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1103 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
1104 out_traffic = 1;
1105 } else {
1106 out_traffic = usb_pipeout(urb->pipe);
1107 }
1108
1109 save_flags(flags);
1110 cli();
1111
1112 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1113 nop();
1114
1115 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1116 *R_USB_EPT_DATA_ISO = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
1117 /* FIXME: Change any to the actual port? */
1118 IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
1119 IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
1120 IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
1121 IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
1122 } else {
1123 *R_USB_EPT_DATA = IO_STATE(R_USB_EPT_DATA, valid, yes) |
1124 IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
1125 /* FIXME: Change any to the actual port? */
1126 IO_STATE(R_USB_EPT_DATA, port, any) |
1127 IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
1128 IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
1129 IO_FIELD(R_USB_EPT_DATA, dev, devnum);
1130 }
1131
1132 restore_flags(flags);
1133
1134 if (out_traffic) {
1135 set_bit(epid, (void *)&epid_out_traffic);
1136 } else {
1137 clear_bit(epid, (void *)&epid_out_traffic);
1138 }
1139
1140 dbg_epid("Setting up epid %d with devnum %d, endpoint %d and max_len %d (%s)",
1141 epid, devnum, endpoint, maxlen, out_traffic ? "OUT" : "IN");
1142
1143 DBFEXIT;
1144 return epid;
1145}
1146
1147static void etrax_usb_free_epid(int epid)
1148{
1149 unsigned long flags;
1150
1151 DBFENTER;
1152
1153 if (!test_bit(epid, (void *)&epid_usage_bitmask)) {
1154 warn("Trying to free unused epid %d", epid);
1155 DBFEXIT;
1156 return;
1157 }
1158
1159 save_flags(flags);
1160 cli();
1161
1162 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1163 nop();
1164 while (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold));
1165 /* This will, among other things, set the valid field to 0. */
1166 *R_USB_EPT_DATA = 0;
1167 restore_flags(flags);
1168
1169 clear_bit(epid, (void *)&epid_usage_bitmask);
1170
1171
1172 dbg_epid("Freed epid %d", epid);
1173
1174 DBFEXIT;
1175}
1176
1177static int etrax_usb_lookup_epid(struct urb *urb)
1178{
1179 int i;
1180 __u32 data;
1181 char devnum, endpoint, slow, out_traffic;
1182 int maxlen;
1183 unsigned long flags;
1184
1185 DBFENTER;
1186
1187 devnum = usb_pipedevice(urb->pipe);
1188 endpoint = usb_pipeendpoint(urb->pipe);
1189 slow = usb_pipeslow(urb->pipe);
1190 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1191 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1192 /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
1193 out_traffic = 1;
1194 } else {
1195 out_traffic = usb_pipeout(urb->pipe);
1196 }
1197
1198 /* Step through att epids. */
1199 for (i = 0; i < NBR_OF_EPIDS; i++) {
1200 if (test_bit(i, (void *)&epid_usage_bitmask) &&
1201 test_bit(i, (void *)&epid_out_traffic) == out_traffic) {
1202
1203 save_flags(flags);
1204 cli();
1205 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, i);
1206 nop();
1207
1208 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1209 data = *R_USB_EPT_DATA_ISO;
1210 restore_flags(flags);
1211
1212 if ((IO_MASK(R_USB_EPT_DATA_ISO, valid) & data) &&
1213 (IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, data) == devnum) &&
1214 (IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, data) == endpoint) &&
1215 (IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, data) == maxlen)) {
1216 dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
1217 i, devnum, endpoint, out_traffic ? "OUT" : "IN");
1218 DBFEXIT;
1219 return i;
1220 }
1221 } else {
1222 data = *R_USB_EPT_DATA;
1223 restore_flags(flags);
1224
1225 if ((IO_MASK(R_USB_EPT_DATA, valid) & data) &&
1226 (IO_EXTRACT(R_USB_EPT_DATA, dev, data) == devnum) &&
1227 (IO_EXTRACT(R_USB_EPT_DATA, ep, data) == endpoint) &&
1228 (IO_EXTRACT(R_USB_EPT_DATA, low_speed, data) == slow) &&
1229 (IO_EXTRACT(R_USB_EPT_DATA, max_len, data) == maxlen)) {
1230 dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
1231 i, devnum, endpoint, out_traffic ? "OUT" : "IN");
1232 DBFEXIT;
1233 return i;
1234 }
1235 }
1236 }
1237 }
1238
1239 DBFEXIT;
1240 return -1;
1241}
1242
1243static int etrax_usb_allocate_epid(void)
1244{
1245 int i;
1246
1247 DBFENTER;
1248
1249 for (i = 0; i < NBR_OF_EPIDS; i++) {
1250 if (!test_bit(i, (void *)&epid_usage_bitmask)) {
1251 dbg_epid("Found free epid %d", i);
1252 DBFEXIT;
1253 return i;
1254 }
1255 }
1256
1257 dbg_epid("Found no free epids");
1258 DBFEXIT;
1259 return -1;
1260}
1261
1262static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags)
1263{
1264 etrax_hc_t *hc;
1265 int ret = -EINVAL;
1266
1267 DBFENTER;
1268
1269 if (!urb->dev || !urb->dev->bus) {
1270 return -ENODEV;
1271 }
1272 if (usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)) <= 0) {
1273 info("Submit urb to pipe with maxpacketlen 0, pipe 0x%X\n", urb->pipe);
1274 return -EMSGSIZE;
1275 }
1276
1277 if (urb->timeout) {
1278 /* FIXME. */
1279 warn("urb->timeout specified, ignoring.");
1280 }
1281
1282 hc = (etrax_hc_t*)urb->dev->bus->hcpriv;
1283
1284 if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
1285 /* This request is for the Virtual Root Hub. */
1286 ret = etrax_rh_submit_urb(urb);
1287
1288 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1289
1290 ret = etrax_usb_submit_bulk_urb(urb);
1291
1292 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1293
1294 ret = etrax_usb_submit_ctrl_urb(urb);
1295
1296 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1297 int bustime;
1298
1299 if (urb->bandwidth == 0) {
1300 bustime = usb_check_bandwidth(urb->dev, urb);
1301 if (bustime < 0) {
1302 ret = bustime;
1303 } else {
1304 ret = etrax_usb_submit_intr_urb(urb);
1305 if (ret == 0)
1306 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1307 }
1308 } else {
1309 /* Bandwidth already set. */
1310 ret = etrax_usb_submit_intr_urb(urb);
1311 }
1312
1313 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1314 int bustime;
1315
1316 if (urb->bandwidth == 0) {
1317 bustime = usb_check_bandwidth(urb->dev, urb);
1318 if (bustime < 0) {
1319 ret = bustime;
1320 } else {
1321 ret = etrax_usb_submit_isoc_urb(urb);
1322 if (ret == 0)
1323 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1324 }
1325 } else {
1326 /* Bandwidth already set. */
1327 ret = etrax_usb_submit_isoc_urb(urb);
1328 }
1329 }
1330
1331 DBFEXIT;
1332
1333 if (ret != 0)
1334 printk("Submit URB error %d\n", ret);
1335
1336 return ret;
1337}
1338
1339static int etrax_usb_unlink_urb(struct urb *urb, int status)
1340{
1341 etrax_hc_t *hc;
1342 etrax_urb_priv_t *urb_priv;
1343 int epid;
1344 unsigned int flags;
1345
1346 DBFENTER;
1347
1348 if (!urb) {
1349 return -EINVAL;
1350 }
1351
1352 /* Disable interrupts here since a descriptor interrupt for the isoc epid
1353 will modify the sb list. This could possibly be done more granular, but
1354 unlink_urb should not be used frequently anyway.
1355 */
1356
1357 save_flags(flags);
1358 cli();
1359
1360 if (!urb->dev || !urb->dev->bus) {
1361 restore_flags(flags);
1362 return -ENODEV;
1363 }
1364 if (!urb->hcpriv) {
1365 /* This happens if a device driver calls unlink on an urb that
1366 was never submitted (lazy driver) or if the urb was completed
1367 while unlink was being called. */
1368 restore_flags(flags);
1369 return 0;
1370 }
1371 if (urb->transfer_flags & URB_ASYNC_UNLINK) {
1372 /* FIXME. */
1373 /* If URB_ASYNC_UNLINK is set:
1374 unlink
1375 move to a separate urb list
1376 call complete at next sof with ECONNRESET
1377
1378 If not:
1379 wait 1 ms
1380 unlink
1381 call complete with ENOENT
1382 */
1383 warn("URB_ASYNC_UNLINK set, ignoring.");
1384 }
1385
1386 /* One might think that urb->status = -EINPROGRESS would be a requirement for unlinking,
1387 but that doesn't work for interrupt and isochronous traffic since they are completed
1388 repeatedly, and urb->status is set then. That may in itself be a bug though. */
1389
1390 hc = urb->dev->bus->hcpriv;
1391 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1392 epid = urb_priv->epid;
1393
1394 /* Set the urb status (synchronous unlink). */
1395 urb->status = -ENOENT;
1396 urb_priv->urb_state = UNLINK;
1397
1398 if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
1399 int ret;
1400 ret = etrax_rh_unlink_urb(urb);
1401 DBFEXIT;
1402 restore_flags(flags);
1403 return ret;
1404
1405 } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1406
1407 dbg_bulk("Unlink of bulk urb (0x%lx)", (unsigned long)urb);
1408
1409 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1410 /* The EP was enabled, disable it and wait. */
1411 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1412
1413 /* Ah, the luxury of busy-wait. */
1414 while (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[epid]));
1415 }
1416 /* Kicking dummy list out of the party. */
1417 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
1418
1419 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1420
1421 dbg_ctrl("Unlink of ctrl urb (0x%lx)", (unsigned long)urb);
1422
1423 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1424 /* The EP was enabled, disable it and wait. */
1425 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1426
1427 /* Ah, the luxury of busy-wait. */
1428 while (*R_DMA_CH8_SUB1_EP == virt_to_phys(&TxCtrlEPList[epid]));
1429 }
1430
1431 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1432
1433 dbg_intr("Unlink of intr urb (0x%lx)", (unsigned long)urb);
1434
1435 /* Separate function because it's a tad more complicated. */
1436 etrax_usb_unlink_intr_urb(urb);
1437
1438 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1439
1440 dbg_isoc("Unlink of isoc urb (0x%lx)", (unsigned long)urb);
1441
1442 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
1443 /* The EP was enabled, disable it and wait. */
1444 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
1445
1446 /* Ah, the luxury of busy-wait. */
1447 while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
1448 }
1449 }
1450
1451 /* Note that we need to remove the urb from the urb list *before* removing its SB
1452 descriptors. (This means that the isoc eof handler might get a null urb when we
1453 are unlinking the last urb.) */
1454
1455 if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1456
1457 urb_list_del(urb, epid);
1458 TxBulkEPList[epid].sub = 0;
1459 etrax_remove_from_sb_list(urb);
1460
1461 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1462
1463 urb_list_del(urb, epid);
1464 TxCtrlEPList[epid].sub = 0;
1465 etrax_remove_from_sb_list(urb);
1466
1467 } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1468
1469 urb_list_del(urb, epid);
1470 /* Sanity check (should never happen). */
1471 assert(urb_list_empty(epid));
1472
1473 /* Release allocated bandwidth. */
1474 usb_release_bandwidth(urb->dev, urb, 0);
1475
1476 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1477
1478 if (usb_pipeout(urb->pipe)) {
1479
1480 USB_SB_Desc_t *iter_sb, *prev_sb, *next_sb;
1481
1482 if (__urb_list_entry(urb, epid)) {
1483
1484 urb_list_del(urb, epid);
1485 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
1486 prev_sb = 0;
1487 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
1488 prev_sb = iter_sb;
1489 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1490 }
1491
1492 if (iter_sb == 0) {
1493 /* Unlink of the URB currently being transmitted. */
1494 prev_sb = 0;
1495 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
1496 }
1497
1498 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
1499 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1500 }
1501 if (iter_sb) {
1502 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
1503 } else {
1504 /* This should only happen if the DMA has completed
1505 processing the SB list for this EP while interrupts
1506 are disabled. */
1507 dbg_isoc("Isoc urb not found, already sent?");
1508 next_sb = 0;
1509 }
1510 if (prev_sb) {
1511 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
1512 } else {
1513 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
1514 }
1515
1516 etrax_remove_from_sb_list(urb);
1517 if (urb_list_empty(epid)) {
1518 TxIsocEPList[epid].sub = 0;
1519 dbg_isoc("Last isoc out urb epid %d", epid);
1520 } else if (next_sb || prev_sb) {
1521 dbg_isoc("Re-enable isoc out epid %d", epid);
1522
1523 TxIsocEPList[epid].hw_len = 0;
1524 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1525 } else {
1526 TxIsocEPList[epid].sub = 0;
1527 dbg_isoc("URB list non-empty and no SB list, EP disabled");
1528 }
1529 } else {
1530 dbg_isoc("Urb 0x%p not found, completed already?", urb);
1531 }
1532 } else {
1533
1534 urb_list_del(urb, epid);
1535
1536 /* For in traffic there is only one SB descriptor for each EP even
1537 though there may be several urbs (all urbs point at the same SB). */
1538 if (urb_list_empty(epid)) {
1539 /* No more urbs, remove the SB. */
1540 TxIsocEPList[epid].sub = 0;
1541 etrax_remove_from_sb_list(urb);
1542 } else {
1543 TxIsocEPList[epid].hw_len = 0;
1544 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1545 }
1546 }
1547 /* Release allocated bandwidth. */
1548 usb_release_bandwidth(urb->dev, urb, 1);
1549 }
1550 /* Free the epid if urb list is empty. */
1551 if (urb_list_empty(epid)) {
1552 etrax_usb_free_epid(epid);
1553 }
1554 restore_flags(flags);
1555
1556 /* Must be done before calling completion handler. */
1557 kfree(urb_priv);
1558 urb->hcpriv = 0;
1559
1560 if (urb->complete) {
1561 urb->complete(urb, NULL);
1562 }
1563
1564 DBFEXIT;
1565 return 0;
1566}
1567
1568static int etrax_usb_get_frame_number(struct usb_device *usb_dev)
1569{
1570 DBFENTER;
1571 DBFEXIT;
1572 return (*R_USB_FM_NUMBER & 0x7ff);
1573}
1574
1575static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc)
1576{
1577 DBFENTER;
1578
1579 /* This interrupt handler could be used when unlinking EP descriptors. */
1580
1581 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
1582 USB_EP_Desc_t *ep;
1583
1584 //dbg_bulk("dma8_sub0_descr (BULK) intr.");
1585
1586 /* It should be safe clearing the interrupt here, since we don't expect to get a new
1587 one until we restart the bulk channel. */
1588 *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
1589
1590 /* Wait while the DMA is running (though we don't expect it to be). */
1591 while (*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd));
1592
1593 /* Advance the DMA to the next EP descriptor. */
1594 ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1595
1596 //dbg_bulk("descr intr: DMA is at 0x%lx", (unsigned long)ep);
1597
1598 /* ep->next is already a physical address; no need for a virt_to_phys. */
1599 *R_DMA_CH8_SUB0_EP = ep->next;
1600
1601 /* Start the DMA bulk channel again. */
1602 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1603 }
1604 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
1605 struct urb *urb;
1606 int epid;
1607 etrax_urb_priv_t *urb_priv;
1608 unsigned long int flags;
1609
1610 dbg_ctrl("dma8_sub1_descr (CTRL) intr.");
1611 *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
1612
1613 /* The complete callback gets called so we cli. */
1614 save_flags(flags);
1615 cli();
1616
1617 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1618 if ((TxCtrlEPList[epid].sub == 0) ||
1619 (epid == DUMMY_EPID) ||
1620 (epid == INVALID_EPID)) {
1621 /* Nothing here to see. */
1622 continue;
1623 }
1624
1625 /* Get the first urb (if any). */
1626 urb = urb_list_first(epid);
1627
1628 if (urb) {
1629
1630 /* Sanity check. */
1631 assert(usb_pipetype(urb->pipe) == PIPE_CONTROL);
1632
1633 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1634 assert(urb_priv);
1635
1636 if (urb_priv->urb_state == WAITING_FOR_DESCR_INTR) {
1637 assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
1638
1639 etrax_usb_complete_urb(urb, 0);
1640 }
1641 }
1642 }
1643 restore_flags(flags);
1644 }
1645 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
1646 dbg_intr("dma8_sub2_descr (INTR) intr.");
1647 *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
1648 }
1649 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
1650 struct urb *urb;
1651 int epid;
1652 int epid_done;
1653 etrax_urb_priv_t *urb_priv;
1654 USB_SB_Desc_t *sb_desc;
1655
1656 usb_isoc_complete_data_t *comp_data = NULL;
1657
1658 /* One or more isoc out transfers are done. */
1659 dbg_isoc("dma8_sub3_descr (ISOC) intr.");
1660
1661 /* For each isoc out EP search for the first sb_desc with the intr flag
1662 set. This descriptor must be the last packet from an URB. Then
1663 traverse the URB list for the EP until the URB with urb_priv->last_sb
1664 matching the intr-marked sb_desc is found. All URBs before this have
1665 been sent.
1666 */
1667
1668 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1669 /* Skip past epids with no SB lists, epids used for in traffic,
1670 and special (dummy, invalid) epids. */
1671 if ((TxIsocEPList[epid].sub == 0) ||
1672 (test_bit(epid, (void *)&epid_out_traffic) == 0) ||
1673 (epid == DUMMY_EPID) ||
1674 (epid == INVALID_EPID)) {
1675 /* Nothing here to see. */
1676 continue;
1677 }
1678 sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
1679
1680 /* Find the last descriptor of the currently active URB for this ep.
1681 This is the first descriptor in the sub list marked for a descriptor
1682 interrupt. */
1683 while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
1684 sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
1685 }
1686 assert(sb_desc);
1687
1688 dbg_isoc("Check epid %d, sub 0x%p, SB 0x%p",
1689 epid,
1690 phys_to_virt(TxIsocEPList[epid].sub),
1691 sb_desc);
1692
1693 epid_done = 0;
1694
1695 /* Get the first urb (if any). */
1696 urb = urb_list_first(epid);
1697 assert(urb);
1698
1699 while (urb && !epid_done) {
1700
1701 /* Sanity check. */
1702 assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1703
1704 if (!usb_pipeout(urb->pipe)) {
1705 /* descr interrupts are generated only for out pipes. */
1706 epid_done = 1;
1707 continue;
1708 }
1709
1710 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1711 assert(urb_priv);
1712
1713 if (sb_desc != urb_priv->last_sb) {
1714
1715 /* This urb has been sent. */
1716 dbg_isoc("out URB 0x%p sent", urb);
1717
1718 urb_priv->urb_state = TRANSFER_DONE;
1719
1720 } else if ((sb_desc == urb_priv->last_sb) &&
1721 !(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
1722
1723 assert((sb_desc->command & IO_MASK(USB_SB_command, eol)) == IO_STATE(USB_SB_command, eol, yes));
1724 assert(sb_desc->next == 0);
1725
1726 dbg_isoc("out URB 0x%p last in list, epid disabled", urb);
1727 TxIsocEPList[epid].sub = 0;
1728 TxIsocEPList[epid].hw_len = 0;
1729 urb_priv->urb_state = TRANSFER_DONE;
1730
1731 epid_done = 1;
1732
1733 } else {
1734 epid_done = 1;
1735 }
1736 if (!epid_done) {
1737 urb = urb_list_next(urb, epid);
1738 }
1739 }
1740
1741 }
1742
1743 *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
1744
1745 comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
1746 assert(comp_data != NULL);
1747
1748 INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
1749 schedule_work(&comp_data->usb_bh);
1750 }
1751
1752 DBFEXIT;
1753 return IRQ_HANDLED;
1754}
1755
1756static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data)
1757{
1758 usb_isoc_complete_data_t *comp_data = (usb_isoc_complete_data_t*)data;
1759
1760 struct urb *urb;
1761 int epid;
1762 int epid_done;
1763 etrax_urb_priv_t *urb_priv;
1764
1765 DBFENTER;
1766
1767 dbg_isoc("dma8_sub3_descr (ISOC) bottom half.");
1768
1769 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
1770 unsigned long flags;
1771
1772 save_flags(flags);
1773 cli();
1774
1775 epid_done = 0;
1776
1777 /* The descriptor interrupt handler has marked all transmitted isoch. out
1778 URBs with TRANSFER_DONE. Now we traverse all epids and for all that
1779 have isoch. out traffic traverse its URB list and complete the
1780 transmitted URB.
1781 */
1782
1783 while (!epid_done) {
1784
1785 /* Get the first urb (if any). */
1786 urb = urb_list_first(epid);
1787 if (urb == 0) {
1788 epid_done = 1;
1789 continue;
1790 }
1791
1792 if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
1793 epid_done = 1;
1794 continue;
1795 }
1796
1797 if (!usb_pipeout(urb->pipe)) {
1798 /* descr interrupts are generated only for out pipes. */
1799 epid_done = 1;
1800 continue;
1801 }
1802
1803 dbg_isoc("Check epid %d, SB 0x%p", epid, (char*)TxIsocEPList[epid].sub);
1804
1805 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1806 assert(urb_priv);
1807
1808 if (urb_priv->urb_state == TRANSFER_DONE) {
1809 int i;
1810 struct usb_iso_packet_descriptor *packet;
1811
1812 /* This urb has been sent. */
1813 dbg_isoc("Completing isoc out URB 0x%p", urb);
1814
1815 for (i = 0; i < urb->number_of_packets; i++) {
1816 packet = &urb->iso_frame_desc[i];
1817 packet->status = 0;
1818 packet->actual_length = packet->length;
1819 }
1820
1821 etrax_usb_complete_isoc_urb(urb, 0);
1822
1823 if (urb_list_empty(epid)) {
1824 etrax_usb_free_epid(epid);
1825 epid_done = 1;
1826 }
1827 } else {
1828 epid_done = 1;
1829 }
1830 }
1831 restore_flags(flags);
1832
1833 }
1834 kmem_cache_free(isoc_compl_cache, comp_data);
1835
1836 DBFEXIT;
1837}
1838
1839
1840
1841static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc)
1842{
1843 struct urb *urb;
1844 etrax_urb_priv_t *urb_priv;
1845 int epid = 0;
1846 unsigned long flags;
1847
1848 /* Isoc diagnostics. */
1849 static int curr_fm = 0;
1850 static int prev_fm = 0;
1851
1852 DBFENTER;
1853
1854 /* Clear this interrupt. */
1855 *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
1856
1857 /* Note that this while loop assumes that all packets span only
1858 one rx descriptor. */
1859
1860 /* The reason we cli here is that we call the driver's callback functions. */
1861 save_flags(flags);
1862 cli();
1863
1864 while (myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
1865
1866 epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
1867 urb = urb_list_first(epid);
1868
1869 //printk("eop for epid %d, first urb 0x%lx\n", epid, (unsigned long)urb);
1870
1871 if (!urb) {
1872 err("No urb for epid %d in rx interrupt", epid);
1873 __dump_ept_data(epid);
1874 goto skip_out;
1875 }
1876
1877 /* Note that we cannot indescriminately assert(usb_pipein(urb->pipe)) since
1878 ctrl pipes are not. */
1879
1880 if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
1881 __u32 r_usb_ept_data;
1882 int no_error = 0;
1883
1884 assert(test_bit(epid, (void *)&epid_usage_bitmask));
1885
1886 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
1887 nop();
1888 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1889 r_usb_ept_data = *R_USB_EPT_DATA_ISO;
1890
1891 if ((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
1892 (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
1893 (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
1894 /* Not an error, just a failure to receive an expected iso
1895 in packet in this frame. This is not documented
1896 in the designers reference.
1897 */
1898 no_error++;
1899 } else {
1900 warn("R_USB_EPT_DATA_ISO for epid %d = 0x%x", epid, r_usb_ept_data);
1901 }
1902 } else {
1903 r_usb_ept_data = *R_USB_EPT_DATA;
1904 warn("R_USB_EPT_DATA for epid %d = 0x%x", epid, r_usb_ept_data);
1905 }
1906
1907 if (!no_error){
1908 warn("error in rx desc->status, epid %d, first urb = 0x%lx",
1909 epid, (unsigned long)urb);
1910 __dump_in_desc(myNextRxDesc);
1911
1912 warn("R_USB_STATUS = 0x%x", *R_USB_STATUS);
1913
1914 /* Check that ept was disabled when error occurred. */
1915 switch (usb_pipetype(urb->pipe)) {
1916 case PIPE_BULK:
1917 assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1918 break;
1919 case PIPE_CONTROL:
1920 assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1921 break;
1922 case PIPE_INTERRUPT:
1923 assert(!(TxIntrEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1924 break;
1925 case PIPE_ISOCHRONOUS:
1926 assert(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)));
1927 break;
1928 default:
1929 warn("etrax_usb_rx_interrupt: bad pipetype %d in urb 0x%p",
1930 usb_pipetype(urb->pipe),
1931 urb);
1932 }
1933 etrax_usb_complete_urb(urb, -EPROTO);
1934 goto skip_out;
1935 }
1936 }
1937
1938 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
1939 assert(urb_priv);
1940
1941 if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
1942 (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
1943 (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
1944
1945 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
1946 /* We get nodata for empty data transactions, and the rx descriptor's
1947 hw_len field is not valid in that case. No data to copy in other
1948 words. */
1949 } else {
1950 /* Make sure the data fits in the buffer. */
1951 assert(urb_priv->rx_offset + myNextRxDesc->hw_len
1952 <= urb->transfer_buffer_length);
1953
1954 memcpy(urb->transfer_buffer + urb_priv->rx_offset,
1955 phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
1956 urb_priv->rx_offset += myNextRxDesc->hw_len;
1957 }
1958
1959 if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
1960 if ((usb_pipetype(urb->pipe) == PIPE_CONTROL) &&
1961 ((TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)) ==
1962 IO_STATE(USB_EP_command, enable, yes))) {
1963 /* The EP is still enabled, so the OUT packet used to ack
1964 the in data is probably not processed yet. If the EP
1965 sub pointer has not moved beyond urb_priv->last_sb mark
1966 it for a descriptor interrupt and complete the urb in
1967 the descriptor interrupt handler.
1968 */
1969 USB_SB_Desc_t *sub = TxCtrlEPList[urb_priv->epid].sub ? phys_to_virt(TxCtrlEPList[urb_priv->epid].sub) : 0;
1970
1971 while ((sub != NULL) && (sub != urb_priv->last_sb)) {
1972 sub = sub->next ? phys_to_virt(sub->next) : 0;
1973 }
1974 if (sub != NULL) {
1975 /* The urb has not been fully processed. */
1976 urb_priv->urb_state = WAITING_FOR_DESCR_INTR;
1977 } else {
1978 warn("(CTRL) epid enabled and urb (0x%p) processed, ep->sub=0x%p", urb, (char*)TxCtrlEPList[urb_priv->epid].sub);
1979 etrax_usb_complete_urb(urb, 0);
1980 }
1981 } else {
1982 etrax_usb_complete_urb(urb, 0);
1983 }
1984 }
1985
1986 } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1987
1988 struct usb_iso_packet_descriptor *packet;
1989
1990 if (urb_priv->urb_state == UNLINK) {
1991 info("Ignoring rx data for urb being unlinked.");
1992 goto skip_out;
1993 } else if (urb_priv->urb_state == NOT_STARTED) {
1994 info("What? Got rx data for urb that isn't started?");
1995 goto skip_out;
1996 }
1997
1998 packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
1999 packet->status = 0;
2000
2001 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
2002 /* We get nodata for empty data transactions, and the rx descriptor's
2003 hw_len field is not valid in that case. We copy 0 bytes however to
2004 stay in synch. */
2005 packet->actual_length = 0;
2006 } else {
2007 packet->actual_length = myNextRxDesc->hw_len;
2008 /* Make sure the data fits in the buffer. */
2009 assert(packet->actual_length <= packet->length);
2010 memcpy(urb->transfer_buffer + packet->offset,
2011 phys_to_virt(myNextRxDesc->buf), packet->actual_length);
2012 }
2013
2014 /* Increment the packet counter. */
2015 urb_priv->isoc_packet_counter++;
2016
2017 /* Note that we don't care about the eot field in the rx descriptor's status.
2018 It will always be set for isoc traffic. */
2019 if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
2020
2021 /* Out-of-synch diagnostics. */
2022 curr_fm = (*R_USB_FM_NUMBER & 0x7ff);
2023 if (((prev_fm + urb_priv->isoc_packet_counter) % (0x7ff + 1)) != curr_fm) {
2024 /* This test is wrong, if there is more than one isoc
2025 in endpoint active it will always calculate wrong
2026 since prev_fm is shared by all endpoints.
2027
2028 FIXME Make this check per URB using urb->start_frame.
2029 */
2030 dbg_isoc("Out of synch? Previous frame = %d, current frame = %d",
2031 prev_fm, curr_fm);
2032
2033 }
2034 prev_fm = curr_fm;
2035
2036 /* Complete the urb with status OK. */
2037 etrax_usb_complete_isoc_urb(urb, 0);
2038 }
2039 }
2040
2041 skip_out:
2042
2043 /* DMA IN cache bug. Flush the DMA IN buffer from the cache. (struct etrax_dma_descr
2044 has the same layout as USB_IN_Desc for the relevant fields.) */
2045 prepare_rx_descriptor((struct etrax_dma_descr*)myNextRxDesc);
2046
2047 myPrevRxDesc = myNextRxDesc;
2048 myPrevRxDesc->command |= IO_MASK(USB_IN_command, eol);
2049 myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
2050 myLastRxDesc = myPrevRxDesc;
2051
2052 myNextRxDesc->status = 0;
2053 myNextRxDesc = phys_to_virt(myNextRxDesc->next);
2054 }
2055
2056 restore_flags(flags);
2057
2058 DBFEXIT;
2059
2060 return IRQ_HANDLED;
2061}
2062
2063
2064/* This function will unlink the SB descriptors associated with this urb. */
2065static int etrax_remove_from_sb_list(struct urb *urb)
2066{
2067 USB_SB_Desc_t *next_sb, *first_sb, *last_sb;
2068 etrax_urb_priv_t *urb_priv;
2069 int i = 0;
2070
2071 DBFENTER;
2072
2073 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2074 assert(urb_priv);
2075
2076 /* Just a sanity check. Since we don't fiddle with the DMA list the EP descriptor
2077 doesn't really need to be disabled, it's just that we expect it to be. */
2078 if (usb_pipetype(urb->pipe) == PIPE_BULK) {
2079 assert(!(TxBulkEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
2080 } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
2081 assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
2082 }
2083
2084 first_sb = urb_priv->first_sb;
2085 last_sb = urb_priv->last_sb;
2086
2087 assert(first_sb);
2088 assert(last_sb);
2089
2090 while (first_sb != last_sb) {
2091 next_sb = (USB_SB_Desc_t *)phys_to_virt(first_sb->next);
2092 kmem_cache_free(usb_desc_cache, first_sb);
2093 first_sb = next_sb;
2094 i++;
2095 }
2096 kmem_cache_free(usb_desc_cache, last_sb);
2097 i++;
2098 dbg_sb("%d SB descriptors freed", i);
2099 /* Compare i with urb->number_of_packets for Isoc traffic.
2100 Should be same when calling unlink_urb */
2101
2102 DBFEXIT;
2103
2104 return i;
2105}
2106
2107static int etrax_usb_submit_bulk_urb(struct urb *urb)
2108{
2109 int epid;
2110 int empty;
2111 unsigned long flags;
2112 etrax_urb_priv_t *urb_priv;
2113
2114 DBFENTER;
2115
2116 /* Epid allocation, empty check and list add must be protected.
2117 Read about this in etrax_usb_submit_ctrl_urb. */
2118
2119 spin_lock_irqsave(&urb_list_lock, flags);
2120 epid = etrax_usb_setup_epid(urb);
2121 if (epid == -1) {
2122 DBFEXIT;
2123 spin_unlock_irqrestore(&urb_list_lock, flags);
2124 return -ENOMEM;
2125 }
2126 empty = urb_list_empty(epid);
2127 urb_list_add(urb, epid);
2128 spin_unlock_irqrestore(&urb_list_lock, flags);
2129
2130 dbg_bulk("Adding bulk %s urb 0x%lx to %s list, epid %d",
2131 usb_pipein(urb->pipe) ? "IN" : "OUT", (unsigned long)urb, empty ? "empty" : "", epid);
2132
2133 /* Mark the urb as being in progress. */
2134 urb->status = -EINPROGRESS;
2135
2136 /* Setup the hcpriv data. */
2137 urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2138 assert(urb_priv != NULL);
2139 /* This sets rx_offset to 0. */
2140 urb_priv->urb_state = NOT_STARTED;
2141 urb->hcpriv = urb_priv;
2142
2143 if (empty) {
2144 etrax_usb_add_to_bulk_sb_list(urb, epid);
2145 }
2146
2147 DBFEXIT;
2148
2149 return 0;
2150}
2151
2152static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
2153{
2154 USB_SB_Desc_t *sb_desc;
2155 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2156 unsigned long flags;
2157 char maxlen;
2158
2159 DBFENTER;
2160
2161 dbg_bulk("etrax_usb_add_to_bulk_sb_list, urb 0x%lx", (unsigned long)urb);
2162
2163 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2164
2165 sb_desc = kmem_cache_zalloc(usb_desc_cache, SLAB_FLAG);
2166 assert(sb_desc != NULL);
2167
2168
2169 if (usb_pipeout(urb->pipe)) {
2170
2171 dbg_bulk("Grabbing bulk OUT, urb 0x%lx, epid %d", (unsigned long)urb, epid);
2172
2173 /* This is probably a sanity check of the bulk transaction length
2174 not being larger than 64 kB. */
2175 if (urb->transfer_buffer_length > 0xffff) {
2176 panic("urb->transfer_buffer_length > 0xffff");
2177 }
2178
2179 sb_desc->sw_len = urb->transfer_buffer_length;
2180
2181 /* The rem field is don't care if it's not a full-length transfer, so setting
2182 it shouldn't hurt. Also, rem isn't used for OUT traffic. */
2183 sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
2184 IO_STATE(USB_SB_command, tt, out) |
2185 IO_STATE(USB_SB_command, eot, yes) |
2186 IO_STATE(USB_SB_command, eol, yes));
2187
2188 /* The full field is set to yes, even if we don't actually check that this is
2189 a full-length transfer (i.e., that transfer_buffer_length % maxlen = 0).
2190 Setting full prevents the USB controller from sending an empty packet in
2191 that case. However, if URB_ZERO_PACKET was set we want that. */
2192 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
2193 sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
2194 }
2195
2196 sb_desc->buf = virt_to_phys(urb->transfer_buffer);
2197 sb_desc->next = 0;
2198
2199 } else if (usb_pipein(urb->pipe)) {
2200
2201 dbg_bulk("Grabbing bulk IN, urb 0x%lx, epid %d", (unsigned long)urb, epid);
2202
2203 sb_desc->sw_len = urb->transfer_buffer_length ?
2204 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2205
2206 /* The rem field is don't care if it's not a full-length transfer, so setting
2207 it shouldn't hurt. */
2208 sb_desc->command =
2209 (IO_FIELD(USB_SB_command, rem,
2210 urb->transfer_buffer_length % maxlen) |
2211 IO_STATE(USB_SB_command, tt, in) |
2212 IO_STATE(USB_SB_command, eot, yes) |
2213 IO_STATE(USB_SB_command, eol, yes));
2214
2215 sb_desc->buf = 0;
2216 sb_desc->next = 0;
2217 }
2218
2219 urb_priv->first_sb = sb_desc;
2220 urb_priv->last_sb = sb_desc;
2221 urb_priv->epid = epid;
2222
2223 urb->hcpriv = urb_priv;
2224
2225 /* Reset toggle bits and reset error count. */
2226 save_flags(flags);
2227 cli();
2228
2229 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2230 nop();
2231
2232 /* FIXME: Is this a special case since the hold field is checked,
2233 or should we check hold in a lot of other cases as well? */
2234 if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
2235 panic("Hold was set in %s", __FUNCTION__);
2236 }
2237
2238 /* Reset error counters (regardless of which direction this traffic is). */
2239 *R_USB_EPT_DATA &=
2240 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
2241 IO_MASK(R_USB_EPT_DATA, error_count_out));
2242
2243 /* Software must preset the toggle bits. */
2244 if (usb_pipeout(urb->pipe)) {
2245 char toggle =
2246 usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
2247 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
2248 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
2249 } else {
2250 char toggle =
2251 usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
2252 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
2253 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
2254 }
2255
2256 /* Assert that the EP descriptor is disabled. */
2257 assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
2258
2259 /* The reason we set the EP's sub pointer directly instead of
2260 walking the SB list and linking it last in the list is that we only
2261 have one active urb at a time (the rest are queued). */
2262
2263 /* Note that we cannot have interrupts running when we have set the SB descriptor
2264 but the EP is not yet enabled. If a bulk eot happens for another EP, we will
2265 find this EP disabled and with a SB != 0, which will make us think that it's done. */
2266 TxBulkEPList[epid].sub = virt_to_phys(sb_desc);
2267 TxBulkEPList[epid].hw_len = 0;
2268 /* Note that we don't have to fill in the ep_id field since this
2269 was done when we allocated the EP descriptors in init_tx_bulk_ep. */
2270
2271 /* Check if the dummy list is already with us (if several urbs were queued). */
2272 if (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0])) {
2273
2274 dbg_bulk("Inviting dummy list to the party for urb 0x%lx, epid %d",
2275 (unsigned long)urb, epid);
2276
2277 /* The last EP in the dummy list already has its next pointer set to
2278 TxBulkEPList[epid].next. */
2279
2280 /* We don't need to check if the DMA is at this EP or not before changing the
2281 next pointer, since we will do it in one 32-bit write (EP descriptors are
2282 32-bit aligned). */
2283 TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
2284 }
2285 /* Enable the EP descr. */
2286 dbg_bulk("Enabling bulk EP for urb 0x%lx, epid %d", (unsigned long)urb, epid);
2287 TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
2288
2289 /* Everything is set up, safe to enable interrupts again. */
2290 restore_flags(flags);
2291
2292 /* If the DMA bulk channel isn't running, we need to restart it if it
2293 has stopped at the last EP descriptor (DMA stopped because there was
2294 no more traffic) or if it has stopped at a dummy EP with the intr flag
2295 set (DMA stopped because we were too slow in inserting new traffic). */
2296 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
2297
2298 USB_EP_Desc_t *ep;
2299 ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
2300 dbg_bulk("DMA channel not running in add");
2301 dbg_bulk("DMA is at 0x%lx", (unsigned long)ep);
2302
2303 if (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[NBR_OF_EPIDS - 1]) ||
2304 (ep->command & 0x8) >> 3) {
2305 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
2306 /* Update/restart the bulk start timer since we just started the channel. */
2307 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
2308 /* Update/restart the bulk eot timer since we just inserted traffic. */
2309 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
2310 }
2311 }
2312
2313 DBFEXIT;
2314}
2315
2316static void etrax_usb_complete_bulk_urb(struct urb *urb, int status)
2317{
2318 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2319 int epid = urb_priv->epid;
2320 unsigned long flags;
2321
2322 DBFENTER;
2323
2324 if (status)
2325 warn("Completing bulk urb with status %d.", status);
2326
2327 dbg_bulk("Completing bulk urb 0x%lx for epid %d", (unsigned long)urb, epid);
2328
2329 /* Update the urb list. */
2330 urb_list_del(urb, epid);
2331
2332 /* For an IN pipe, we always set the actual length, regardless of whether there was
2333 an error or not (which means the device driver can use the data if it wants to). */
2334 if (usb_pipein(urb->pipe)) {
2335 urb->actual_length = urb_priv->rx_offset;
2336 } else {
2337 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2338 to want that. We wouldn't know of any partial writes if there was an error. */
2339 if (status == 0) {
2340 urb->actual_length = urb->transfer_buffer_length;
2341 } else {
2342 urb->actual_length = 0;
2343 }
2344 }
2345
2346 /* FIXME: Is there something of the things below we shouldn't do if there was an error?
2347 Like, maybe we shouldn't toggle the toggle bits, or maybe we shouldn't insert more traffic. */
2348
2349 save_flags(flags);
2350 cli();
2351
2352 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2353 nop();
2354
2355 /* We need to fiddle with the toggle bits because the hardware doesn't do it for us. */
2356 if (usb_pipeout(urb->pipe)) {
2357 char toggle =
2358 IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
2359 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2360 usb_pipeout(urb->pipe), toggle);
2361 } else {
2362 char toggle =
2363 IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
2364 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2365 usb_pipeout(urb->pipe), toggle);
2366 }
2367 restore_flags(flags);
2368
2369 /* Remember to free the SBs. */
2370 etrax_remove_from_sb_list(urb);
2371 kfree(urb_priv);
2372 urb->hcpriv = 0;
2373
2374 /* If there are any more urb's in the list we'd better start sending */
2375 if (!urb_list_empty(epid)) {
2376
2377 struct urb *new_urb;
2378
2379 /* Get the first urb. */
2380 new_urb = urb_list_first(epid);
2381 assert(new_urb);
2382
2383 dbg_bulk("More bulk for epid %d", epid);
2384
2385 etrax_usb_add_to_bulk_sb_list(new_urb, epid);
2386 }
2387
2388 urb->status = status;
2389
2390 /* We let any non-zero status from the layer above have precedence. */
2391 if (status == 0) {
2392 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2393 is to be treated as an error. */
2394 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2395 if (usb_pipein(urb->pipe) &&
2396 (urb->actual_length !=
2397 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
2398 urb->status = -EREMOTEIO;
2399 }
2400 }
2401 }
2402
2403 if (urb->complete) {
2404 urb->complete(urb, NULL);
2405 }
2406
2407 if (urb_list_empty(epid)) {
2408 /* This means that this EP is now free, deconfigure it. */
2409 etrax_usb_free_epid(epid);
2410
2411 /* No more traffic; time to clean up.
2412 Must set sub pointer to 0, since we look at the sub pointer when handling
2413 the bulk eot interrupt. */
2414
2415 dbg_bulk("No bulk for epid %d", epid);
2416
2417 TxBulkEPList[epid].sub = 0;
2418
2419 /* Unlink the dummy list. */
2420
2421 dbg_bulk("Kicking dummy list out of party for urb 0x%lx, epid %d",
2422 (unsigned long)urb, epid);
2423
2424 /* No need to wait for the DMA before changing the next pointer.
2425 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2426 the last one (INVALID_EPID) for actual traffic. */
2427 TxBulkEPList[epid].next =
2428 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2429 }
2430
2431 DBFEXIT;
2432}
2433
2434static int etrax_usb_submit_ctrl_urb(struct urb *urb)
2435{
2436 int epid;
2437 int empty;
2438 unsigned long flags;
2439 etrax_urb_priv_t *urb_priv;
2440
2441 DBFENTER;
2442
2443 /* FIXME: Return -ENXIO if there is already a queued urb for this endpoint? */
2444
2445 /* Epid allocation, empty check and list add must be protected.
2446
2447 Epid allocation because if we find an existing epid for this endpoint an urb might be
2448 completed (emptying the list) before we add the new urb to the list, causing the epid
2449 to be de-allocated. We would then start the transfer with an invalid epid -> epid attn.
2450
2451 Empty check and add because otherwise we might conclude that the list is not empty,
2452 after which it becomes empty before we add the new urb to the list, causing us not to
2453 insert the new traffic into the SB list. */
2454
2455 spin_lock_irqsave(&urb_list_lock, flags);
2456 epid = etrax_usb_setup_epid(urb);
2457 if (epid == -1) {
2458 spin_unlock_irqrestore(&urb_list_lock, flags);
2459 DBFEXIT;
2460 return -ENOMEM;
2461 }
2462 empty = urb_list_empty(epid);
2463 urb_list_add(urb, epid);
2464 spin_unlock_irqrestore(&urb_list_lock, flags);
2465
2466 dbg_ctrl("Adding ctrl urb 0x%lx to %s list, epid %d",
2467 (unsigned long)urb, empty ? "empty" : "", epid);
2468
2469 /* Mark the urb as being in progress. */
2470 urb->status = -EINPROGRESS;
2471
2472 /* Setup the hcpriv data. */
2473 urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2474 assert(urb_priv != NULL);
2475 /* This sets rx_offset to 0. */
2476 urb_priv->urb_state = NOT_STARTED;
2477 urb->hcpriv = urb_priv;
2478
2479 if (empty) {
2480 etrax_usb_add_to_ctrl_sb_list(urb, epid);
2481 }
2482
2483 DBFEXIT;
2484
2485 return 0;
2486}
2487
2488static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid)
2489{
2490 USB_SB_Desc_t *sb_desc_setup;
2491 USB_SB_Desc_t *sb_desc_data;
2492 USB_SB_Desc_t *sb_desc_status;
2493
2494 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2495
2496 unsigned long flags;
2497 char maxlen;
2498
2499 DBFENTER;
2500
2501 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2502
2503 sb_desc_setup = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2504 assert(sb_desc_setup != NULL);
2505 sb_desc_status = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2506 assert(sb_desc_status != NULL);
2507
2508 /* Initialize the mandatory setup SB descriptor (used only in control transfers) */
2509 sb_desc_setup->sw_len = 8;
2510 sb_desc_setup->command = (IO_FIELD(USB_SB_command, rem, 0) |
2511 IO_STATE(USB_SB_command, tt, setup) |
2512 IO_STATE(USB_SB_command, full, yes) |
2513 IO_STATE(USB_SB_command, eot, yes));
2514
2515 sb_desc_setup->buf = virt_to_phys(urb->setup_packet);
2516
2517 if (usb_pipeout(urb->pipe)) {
2518 dbg_ctrl("Transfer for epid %d is OUT", epid);
2519
2520 /* If this Control OUT transfer has an optional data stage we add an OUT token
2521 before the mandatory IN (status) token, hence the reordered SB list */
2522
2523 sb_desc_setup->next = virt_to_phys(sb_desc_status);
2524 if (urb->transfer_buffer) {
2525
2526 dbg_ctrl("This OUT transfer has an extra data stage");
2527
2528 sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2529 assert(sb_desc_data != NULL);
2530
2531 sb_desc_setup->next = virt_to_phys(sb_desc_data);
2532
2533 sb_desc_data->sw_len = urb->transfer_buffer_length;
2534 sb_desc_data->command = (IO_STATE(USB_SB_command, tt, out) |
2535 IO_STATE(USB_SB_command, full, yes) |
2536 IO_STATE(USB_SB_command, eot, yes));
2537 sb_desc_data->buf = virt_to_phys(urb->transfer_buffer);
2538 sb_desc_data->next = virt_to_phys(sb_desc_status);
2539 }
2540
2541 sb_desc_status->sw_len = 1;
2542 sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
2543 IO_STATE(USB_SB_command, tt, in) |
2544 IO_STATE(USB_SB_command, eot, yes) |
2545 IO_STATE(USB_SB_command, intr, yes) |
2546 IO_STATE(USB_SB_command, eol, yes));
2547
2548 sb_desc_status->buf = 0;
2549 sb_desc_status->next = 0;
2550
2551 } else if (usb_pipein(urb->pipe)) {
2552
2553 dbg_ctrl("Transfer for epid %d is IN", epid);
2554 dbg_ctrl("transfer_buffer_length = %d", urb->transfer_buffer_length);
2555 dbg_ctrl("rem is calculated to %d", urb->transfer_buffer_length % maxlen);
2556
2557 sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2558 assert(sb_desc_data != NULL);
2559
2560 sb_desc_setup->next = virt_to_phys(sb_desc_data);
2561
2562 sb_desc_data->sw_len = urb->transfer_buffer_length ?
2563 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2564 dbg_ctrl("sw_len got %d", sb_desc_data->sw_len);
2565
2566 sb_desc_data->command =
2567 (IO_FIELD(USB_SB_command, rem,
2568 urb->transfer_buffer_length % maxlen) |
2569 IO_STATE(USB_SB_command, tt, in) |
2570 IO_STATE(USB_SB_command, eot, yes));
2571
2572 sb_desc_data->buf = 0;
2573 sb_desc_data->next = virt_to_phys(sb_desc_status);
2574
2575 /* Read comment at zout_buffer declaration for an explanation to this. */
2576 sb_desc_status->sw_len = 1;
2577 sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
2578 IO_STATE(USB_SB_command, tt, zout) |
2579 IO_STATE(USB_SB_command, full, yes) |
2580 IO_STATE(USB_SB_command, eot, yes) |
2581 IO_STATE(USB_SB_command, intr, yes) |
2582 IO_STATE(USB_SB_command, eol, yes));
2583
2584 sb_desc_status->buf = virt_to_phys(&zout_buffer[0]);
2585 sb_desc_status->next = 0;
2586 }
2587
2588 urb_priv->first_sb = sb_desc_setup;
2589 urb_priv->last_sb = sb_desc_status;
2590 urb_priv->epid = epid;
2591
2592 urb_priv->urb_state = STARTED;
2593
2594 /* Reset toggle bits and reset error count, remember to di and ei */
2595 /* Warning: it is possible that this locking doesn't work with bottom-halves */
2596
2597 save_flags(flags);
2598 cli();
2599
2600 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2601 nop();
2602 if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
2603 panic("Hold was set in %s", __FUNCTION__);
2604 }
2605
2606
2607 /* FIXME: Compare with etrax_usb_add_to_bulk_sb_list where the toggle bits
2608 are set to a specific value. Why the difference? Read "Transfer and Toggle Bits
2609 in Designer's Reference, p. 8 - 11. */
2610 *R_USB_EPT_DATA &=
2611 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
2612 IO_MASK(R_USB_EPT_DATA, error_count_out) |
2613 IO_MASK(R_USB_EPT_DATA, t_in) |
2614 IO_MASK(R_USB_EPT_DATA, t_out));
2615
2616 /* Since we use the rx interrupt to complete ctrl urbs, we can enable interrupts now
2617 (i.e. we don't check the sub pointer on an eot interrupt like we do for bulk traffic). */
2618 restore_flags(flags);
2619
2620 /* Assert that the EP descriptor is disabled. */
2621 assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
2622
2623 /* Set up and enable the EP descriptor. */
2624 TxCtrlEPList[epid].sub = virt_to_phys(sb_desc_setup);
2625 TxCtrlEPList[epid].hw_len = 0;
2626 TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
2627
2628 /* We start the DMA sub channel without checking if it's running or not, because:
2629 1) If it's already running, issuing the start command is a nop.
2630 2) We avoid a test-and-set race condition. */
2631 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
2632
2633 DBFEXIT;
2634}
2635
2636static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status)
2637{
2638 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2639 int epid = urb_priv->epid;
2640
2641 DBFENTER;
2642
2643 if (status)
2644 warn("Completing ctrl urb with status %d.", status);
2645
2646 dbg_ctrl("Completing ctrl epid %d, urb 0x%lx", epid, (unsigned long)urb);
2647
2648 /* Remove this urb from the list. */
2649 urb_list_del(urb, epid);
2650
2651 /* For an IN pipe, we always set the actual length, regardless of whether there was
2652 an error or not (which means the device driver can use the data if it wants to). */
2653 if (usb_pipein(urb->pipe)) {
2654 urb->actual_length = urb_priv->rx_offset;
2655 }
2656
2657 /* FIXME: Is there something of the things below we shouldn't do if there was an error?
2658 Like, maybe we shouldn't insert more traffic. */
2659
2660 /* Remember to free the SBs. */
2661 etrax_remove_from_sb_list(urb);
2662 kfree(urb_priv);
2663 urb->hcpriv = 0;
2664
2665 /* If there are any more urbs in the list we'd better start sending. */
2666 if (!urb_list_empty(epid)) {
2667 struct urb *new_urb;
2668
2669 /* Get the first urb. */
2670 new_urb = urb_list_first(epid);
2671 assert(new_urb);
2672
2673 dbg_ctrl("More ctrl for epid %d, first urb = 0x%lx", epid, (unsigned long)new_urb);
2674
2675 etrax_usb_add_to_ctrl_sb_list(new_urb, epid);
2676 }
2677
2678 urb->status = status;
2679
2680 /* We let any non-zero status from the layer above have precedence. */
2681 if (status == 0) {
2682 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2683 is to be treated as an error. */
2684 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2685 if (usb_pipein(urb->pipe) &&
2686 (urb->actual_length !=
2687 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
2688 urb->status = -EREMOTEIO;
2689 }
2690 }
2691 }
2692
2693 if (urb->complete) {
2694 urb->complete(urb, NULL);
2695 }
2696
2697 if (urb_list_empty(epid)) {
2698 /* No more traffic. Time to clean up. */
2699 etrax_usb_free_epid(epid);
2700 /* Must set sub pointer to 0. */
2701 dbg_ctrl("No ctrl for epid %d", epid);
2702 TxCtrlEPList[epid].sub = 0;
2703 }
2704
2705 DBFEXIT;
2706}
2707
2708static int etrax_usb_submit_intr_urb(struct urb *urb)
2709{
2710
2711 int epid;
2712
2713 DBFENTER;
2714
2715 if (usb_pipeout(urb->pipe)) {
2716 /* Unsupported transfer type.
2717 We don't support interrupt out traffic. (If we do, we can't support
2718 intervals for neither in or out traffic, but are forced to schedule all
2719 interrupt traffic in one frame.) */
2720 return -EINVAL;
2721 }
2722
2723 epid = etrax_usb_setup_epid(urb);
2724 if (epid == -1) {
2725 DBFEXIT;
2726 return -ENOMEM;
2727 }
2728
2729 if (!urb_list_empty(epid)) {
2730 /* There is already a queued urb for this endpoint. */
2731 etrax_usb_free_epid(epid);
2732 return -ENXIO;
2733 }
2734
2735 urb->status = -EINPROGRESS;
2736
2737 dbg_intr("Add intr urb 0x%lx, to list, epid %d", (unsigned long)urb, epid);
2738
2739 urb_list_add(urb, epid);
2740 etrax_usb_add_to_intr_sb_list(urb, epid);
2741
2742 return 0;
2743
2744 DBFEXIT;
2745}
2746
2747static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid)
2748{
2749
2750 volatile USB_EP_Desc_t *tmp_ep;
2751 volatile USB_EP_Desc_t *first_ep;
2752
2753 char maxlen;
2754 int interval;
2755 int i;
2756
2757 etrax_urb_priv_t *urb_priv;
2758
2759 DBFENTER;
2760
2761 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2762 interval = urb->interval;
2763
2764 urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
2765 assert(urb_priv != NULL);
2766 urb->hcpriv = urb_priv;
2767
2768 first_ep = &TxIntrEPList[0];
2769
2770 /* Round of the interval to 2^n, it is obvious that this code favours
2771 smaller numbers, but that is actually a good thing */
2772 /* FIXME: The "rounding error" for larger intervals will be quite
2773 large. For in traffic this shouldn't be a problem since it will only
2774 mean that we "poll" more often. */
2775 for (i = 0; interval; i++) {
2776 interval = interval >> 1;
2777 }
2778 interval = 1 << (i - 1);
2779
2780 dbg_intr("Interval rounded to %d", interval);
2781
2782 tmp_ep = first_ep;
2783 i = 0;
2784 do {
2785 if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
2786 if ((i % interval) == 0) {
2787 /* Insert the traffic ep after tmp_ep */
2788 USB_EP_Desc_t *ep_desc;
2789 USB_SB_Desc_t *sb_desc;
2790
2791 dbg_intr("Inserting EP for epid %d", epid);
2792
2793 ep_desc = (USB_EP_Desc_t *)
2794 kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2795 sb_desc = (USB_SB_Desc_t *)
2796 kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
2797 assert(ep_desc != NULL);
2798 CHECK_ALIGN(ep_desc);
2799 assert(sb_desc != NULL);
2800
2801 ep_desc->sub = virt_to_phys(sb_desc);
2802 ep_desc->hw_len = 0;
2803 ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
2804 IO_STATE(USB_EP_command, enable, yes));
2805
2806
2807 /* Round upwards the number of packets of size maxlen
2808 that this SB descriptor should receive. */
2809 sb_desc->sw_len = urb->transfer_buffer_length ?
2810 (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
2811 sb_desc->next = 0;
2812 sb_desc->buf = 0;
2813 sb_desc->command =
2814 (IO_FIELD(USB_SB_command, rem, urb->transfer_buffer_length % maxlen) |
2815 IO_STATE(USB_SB_command, tt, in) |
2816 IO_STATE(USB_SB_command, eot, yes) |
2817 IO_STATE(USB_SB_command, eol, yes));
2818
2819 ep_desc->next = tmp_ep->next;
2820 tmp_ep->next = virt_to_phys(ep_desc);
2821 }
2822 i++;
2823 }
2824 tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
2825 } while (tmp_ep != first_ep);
2826
2827
2828 /* Note that first_sb/last_sb doesn't apply to interrupt traffic. */
2829 urb_priv->epid = epid;
2830
2831 /* We start the DMA sub channel without checking if it's running or not, because:
2832 1) If it's already running, issuing the start command is a nop.
2833 2) We avoid a test-and-set race condition. */
2834 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
2835
2836 DBFEXIT;
2837}
2838
2839
2840
2841static void etrax_usb_complete_intr_urb(struct urb *urb, int status)
2842{
2843 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
2844 int epid = urb_priv->epid;
2845
2846 DBFENTER;
2847
2848 if (status)
2849 warn("Completing intr urb with status %d.", status);
2850
2851 dbg_intr("Completing intr epid %d, urb 0x%lx", epid, (unsigned long)urb);
2852
2853 urb->status = status;
2854 urb->actual_length = urb_priv->rx_offset;
2855
2856 dbg_intr("interrupt urb->actual_length = %d", urb->actual_length);
2857
2858 /* We let any non-zero status from the layer above have precedence. */
2859 if (status == 0) {
2860 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
2861 is to be treated as an error. */
2862 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
2863 if (urb->actual_length !=
2864 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
2865 urb->status = -EREMOTEIO;
2866 }
2867 }
2868 }
2869
2870 /* The driver will resubmit the URB so we need to remove it first */
2871 etrax_usb_unlink_urb(urb, 0);
2872 if (urb->complete) {
2873 urb->complete(urb, NULL);
2874 }
2875
2876 DBFEXIT;
2877}
2878
2879
2880static int etrax_usb_submit_isoc_urb(struct urb *urb)
2881{
2882 int epid;
2883 unsigned long flags;
2884
2885 DBFENTER;
2886
2887 dbg_isoc("Submitting isoc urb = 0x%lx", (unsigned long)urb);
2888
2889 /* Epid allocation, empty check and list add must be protected.
2890 Read about this in etrax_usb_submit_ctrl_urb. */
2891
2892 spin_lock_irqsave(&urb_list_lock, flags);
2893 /* Is there an active epid for this urb ? */
2894 epid = etrax_usb_setup_epid(urb);
2895 if (epid == -1) {
2896 DBFEXIT;
2897 spin_unlock_irqrestore(&urb_list_lock, flags);
2898 return -ENOMEM;
2899 }
2900
2901 /* Ok, now we got valid endpoint, lets insert some traffic */
2902
2903 urb->status = -EINPROGRESS;
2904
2905 /* Find the last urb in the URB_List and add this urb after that one.
2906 Also add the traffic, that is do an etrax_usb_add_to_isoc_sb_list. This
2907 is important to make this in "real time" since isochronous traffic is
2908 time sensitive. */
2909
2910 dbg_isoc("Adding isoc urb to (possibly empty) list");
2911 urb_list_add(urb, epid);
2912 etrax_usb_add_to_isoc_sb_list(urb, epid);
2913 spin_unlock_irqrestore(&urb_list_lock, flags);
2914
2915 DBFEXIT;
2916
2917 return 0;
2918}
2919
2920static void etrax_usb_check_error_isoc_ep(const int epid)
2921{
2922 unsigned long int flags;
2923 int error_code;
2924 __u32 r_usb_ept_data;
2925
2926 /* We can't read R_USB_EPID_ATTN here since it would clear the iso_eof,
2927 bulk_eot and epid_attn interrupts. So we just check the status of
2928 the epid without testing if for it in R_USB_EPID_ATTN. */
2929
2930
2931 save_flags(flags);
2932 cli();
2933 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2934 nop();
2935 /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
2936 registers, they are located at the same address and are of the same size.
2937 In other words, this read should be ok for isoc also. */
2938 r_usb_ept_data = *R_USB_EPT_DATA;
2939 restore_flags(flags);
2940
2941 error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
2942
2943 if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
2944 warn("Hold was set for epid %d.", epid);
2945 return;
2946 }
2947
2948 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, no_error)) {
2949
2950 /* This indicates that the SB list of the ept was completed before
2951 new data was appended to it. This is not an error, but indicates
2952 large system or USB load and could possibly cause trouble for
2953 very timing sensitive USB device drivers so we log it.
2954 */
2955 info("Isoc. epid %d disabled with no error", epid);
2956 return;
2957
2958 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, stall)) {
2959 /* Not really a protocol error, just says that the endpoint gave
2960 a stall response. Note that error_code cannot be stall for isoc. */
2961 panic("Isoc traffic cannot stall");
2962
2963 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, bus_error)) {
2964 /* Two devices responded to a transaction request. Must be resolved
2965 by software. FIXME: Reset ports? */
2966 panic("Bus error for epid %d."
2967 " Two devices responded to transaction request",
2968 epid);
2969
2970 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
2971 /* DMA overrun or underrun. */
2972 warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
2973
2974 /* It seems that error_code = buffer_error in
2975 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
2976 are the same error. */
2977 }
2978}
2979
2980
2981static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
2982{
2983
2984 int i = 0;
2985
2986 etrax_urb_priv_t *urb_priv;
2987 USB_SB_Desc_t *prev_sb_desc, *next_sb_desc, *temp_sb_desc;
2988
2989 DBFENTER;
2990
2991 prev_sb_desc = next_sb_desc = temp_sb_desc = NULL;
2992
2993 urb_priv = kzalloc(sizeof(etrax_urb_priv_t), GFP_ATOMIC);
2994 assert(urb_priv != NULL);
2995
2996 urb->hcpriv = urb_priv;
2997 urb_priv->epid = epid;
2998
2999 if (usb_pipeout(urb->pipe)) {
3000
3001 if (urb->number_of_packets == 0) panic("etrax_usb_add_to_isoc_sb_list 0 packets\n");
3002
3003 dbg_isoc("Transfer for epid %d is OUT", epid);
3004 dbg_isoc("%d packets in URB", urb->number_of_packets);
3005
3006 /* Create one SB descriptor for each packet and link them together. */
3007 for (i = 0; i < urb->number_of_packets; i++) {
3008 if (!urb->iso_frame_desc[i].length)
3009 continue;
3010
3011 next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
3012 assert(next_sb_desc != NULL);
3013
3014 if (urb->iso_frame_desc[i].length > 0) {
3015
3016 next_sb_desc->command = (IO_STATE(USB_SB_command, tt, out) |
3017 IO_STATE(USB_SB_command, eot, yes));
3018
3019 next_sb_desc->sw_len = urb->iso_frame_desc[i].length;
3020 next_sb_desc->buf = virt_to_phys((char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset);
3021
3022 /* Check if full length transfer. */
3023 if (urb->iso_frame_desc[i].length ==
3024 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
3025 next_sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
3026 }
3027 } else {
3028 dbg_isoc("zero len packet");
3029 next_sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
3030 IO_STATE(USB_SB_command, tt, zout) |
3031 IO_STATE(USB_SB_command, eot, yes) |
3032 IO_STATE(USB_SB_command, full, yes));
3033
3034 next_sb_desc->sw_len = 1;
3035 next_sb_desc->buf = virt_to_phys(&zout_buffer[0]);
3036 }
3037
3038 /* First SB descriptor that belongs to this urb */
3039 if (i == 0)
3040 urb_priv->first_sb = next_sb_desc;
3041 else
3042 prev_sb_desc->next = virt_to_phys(next_sb_desc);
3043
3044 prev_sb_desc = next_sb_desc;
3045 }
3046
3047 next_sb_desc->command |= (IO_STATE(USB_SB_command, intr, yes) |
3048 IO_STATE(USB_SB_command, eol, yes));
3049 next_sb_desc->next = 0;
3050 urb_priv->last_sb = next_sb_desc;
3051
3052 } else if (usb_pipein(urb->pipe)) {
3053
3054 dbg_isoc("Transfer for epid %d is IN", epid);
3055 dbg_isoc("transfer_buffer_length = %d", urb->transfer_buffer_length);
3056 dbg_isoc("rem is calculated to %d", urb->iso_frame_desc[urb->number_of_packets - 1].length);
3057
3058 /* Note that in descriptors for periodic traffic are not consumed. This means that
3059 the USB controller never propagates in the SB list. In other words, if there already
3060 is an SB descriptor in the list for this EP we don't have to do anything. */
3061 if (TxIsocEPList[epid].sub == 0) {
3062 dbg_isoc("Isoc traffic not already running, allocating SB");
3063
3064 next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
3065 assert(next_sb_desc != NULL);
3066
3067 next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
3068 IO_STATE(USB_SB_command, eot, yes) |
3069 IO_STATE(USB_SB_command, eol, yes));
3070
3071 next_sb_desc->next = 0;
3072 next_sb_desc->sw_len = 1; /* Actual number of packets is not relevant
3073 for periodic in traffic as long as it is more
3074 than zero. Set to 1 always. */
3075 next_sb_desc->buf = 0;
3076
3077 /* The rem field is don't care for isoc traffic, so we don't set it. */
3078
3079 /* Only one SB descriptor that belongs to this urb. */
3080 urb_priv->first_sb = next_sb_desc;
3081 urb_priv->last_sb = next_sb_desc;
3082
3083 } else {
3084
3085 dbg_isoc("Isoc traffic already running, just setting first/last_sb");
3086
3087 /* Each EP for isoc in will have only one SB descriptor, setup when submitting the
3088 already active urb. Note that even though we may have several first_sb/last_sb
3089 pointing at the same SB descriptor, they are freed only once (when the list has
3090 become empty). */
3091 urb_priv->first_sb = phys_to_virt(TxIsocEPList[epid].sub);
3092 urb_priv->last_sb = phys_to_virt(TxIsocEPList[epid].sub);
3093 return;
3094 }
3095
3096 }
3097
3098 /* Find the spot to insert this urb and add it. */
3099 if (TxIsocEPList[epid].sub == 0) {
3100 /* First SB descriptor inserted in this list (in or out). */
3101 dbg_isoc("Inserting SB desc first in list");
3102 TxIsocEPList[epid].hw_len = 0;
3103 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3104
3105 } else {
3106 /* Isochronous traffic is already running, insert new traffic last (only out). */
3107 dbg_isoc("Inserting SB desc last in list");
3108 temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
3109 while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
3110 IO_STATE(USB_SB_command, eol, yes)) {
3111 assert(temp_sb_desc->next);
3112 temp_sb_desc = phys_to_virt(temp_sb_desc->next);
3113 }
3114 dbg_isoc("Appending list on desc 0x%p", temp_sb_desc);
3115
3116 /* Next pointer must be set before eol is removed. */
3117 temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
3118 /* Clear the previous end of list flag since there is a new in the
3119 added SB descriptor list. */
3120 temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
3121
3122 if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
3123 /* 8.8.5 in Designer's Reference says we should check for and correct
3124 any errors in the EP here. That should not be necessary if epid_attn
3125 is handled correctly, so we assume all is ok. */
3126 dbg_isoc("EP disabled");
3127 etrax_usb_check_error_isoc_ep(epid);
3128
3129 /* The SB list was exhausted. */
3130 if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
3131 /* The new sublist did not get processed before the EP was
3132 disabled. Setup the EP again. */
3133 dbg_isoc("Set EP sub to new list");
3134 TxIsocEPList[epid].hw_len = 0;
3135 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3136 }
3137 }
3138 }
3139
3140 if (urb->transfer_flags & URB_ISO_ASAP) {
3141 /* The isoc transfer should be started as soon as possible. The start_frame
3142 field is a return value if URB_ISO_ASAP was set. Comparing R_USB_FM_NUMBER
3143 with a USB Chief trace shows that the first isoc IN token is sent 2 frames
3144 later. I'm not sure how this affects usage of the start_frame field by the
3145 device driver, or how it affects things when USB_ISO_ASAP is not set, so
3146 therefore there's no compensation for the 2 frame "lag" here. */
3147 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3148 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3149 urb_priv->urb_state = STARTED;
3150 dbg_isoc("URB_ISO_ASAP set, urb->start_frame set to %d", urb->start_frame);
3151 } else {
3152 /* Not started yet. */
3153 urb_priv->urb_state = NOT_STARTED;
3154 dbg_isoc("urb_priv->urb_state set to NOT_STARTED");
3155 }
3156
3157 /* We start the DMA sub channel without checking if it's running or not, because:
3158 1) If it's already running, issuing the start command is a nop.
3159 2) We avoid a test-and-set race condition. */
3160 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3161
3162 DBFEXIT;
3163}
3164
3165static void etrax_usb_complete_isoc_urb(struct urb *urb, int status)
3166{
3167 etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3168 int epid = urb_priv->epid;
3169 int auto_resubmit = 0;
3170
3171 DBFENTER;
3172 dbg_isoc("complete urb 0x%p, status %d", urb, status);
3173
3174 if (status)
3175 warn("Completing isoc urb with status %d.", status);
3176
3177 if (usb_pipein(urb->pipe)) {
3178 int i;
3179
3180 /* Make that all isoc packets have status and length set before
3181 completing the urb. */
3182 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++) {
3183 urb->iso_frame_desc[i].actual_length = 0;
3184 urb->iso_frame_desc[i].status = -EPROTO;
3185 }
3186
3187 urb_list_del(urb, epid);
3188
3189 if (!list_empty(&urb_list[epid])) {
3190 ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
3191 } else {
3192 unsigned long int flags;
3193 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
3194 /* The EP was enabled, disable it and wait. */
3195 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
3196
3197 /* Ah, the luxury of busy-wait. */
3198 while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
3199 }
3200
3201 etrax_remove_from_sb_list(urb);
3202 TxIsocEPList[epid].sub = 0;
3203 TxIsocEPList[epid].hw_len = 0;
3204
3205 save_flags(flags);
3206 cli();
3207 etrax_usb_free_epid(epid);
3208 restore_flags(flags);
3209 }
3210
3211 urb->hcpriv = 0;
3212 kfree(urb_priv);
3213
3214 /* Release allocated bandwidth. */
3215 usb_release_bandwidth(urb->dev, urb, 0);
3216 } else if (usb_pipeout(urb->pipe)) {
3217 int freed_descr;
3218
3219 dbg_isoc("Isoc out urb complete 0x%p", urb);
3220
3221 /* Update the urb list. */
3222 urb_list_del(urb, epid);
3223
3224 freed_descr = etrax_remove_from_sb_list(urb);
3225 dbg_isoc("freed %d descriptors of %d packets", freed_descr, urb->number_of_packets);
3226 assert(freed_descr == urb->number_of_packets);
3227 urb->hcpriv = 0;
3228 kfree(urb_priv);
3229
3230 /* Release allocated bandwidth. */
3231 usb_release_bandwidth(urb->dev, urb, 0);
3232 }
3233
3234 urb->status = status;
3235 if (urb->complete) {
3236 urb->complete(urb, NULL);
3237 }
3238
3239 if (auto_resubmit) {
3240 /* Check that urb was not unlinked by the complete callback. */
3241 if (__urb_list_entry(urb, epid)) {
3242 /* Move this one down the list. */
3243 urb_list_move_last(urb, epid);
3244
3245 /* Mark the now first urb as started (may already be). */
3246 ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
3247
3248 /* Must set this to 0 since this urb is still active after
3249 completion. */
3250 urb_priv->isoc_packet_counter = 0;
3251 } else {
3252 warn("(ISOC) automatic resubmit urb 0x%p removed by complete.", urb);
3253 }
3254 }
3255
3256 DBFEXIT;
3257}
3258
3259static void etrax_usb_complete_urb(struct urb *urb, int status)
3260{
3261 switch (usb_pipetype(urb->pipe)) {
3262 case PIPE_BULK:
3263 etrax_usb_complete_bulk_urb(urb, status);
3264 break;
3265 case PIPE_CONTROL:
3266 etrax_usb_complete_ctrl_urb(urb, status);
3267 break;
3268 case PIPE_INTERRUPT:
3269 etrax_usb_complete_intr_urb(urb, status);
3270 break;
3271 case PIPE_ISOCHRONOUS:
3272 etrax_usb_complete_isoc_urb(urb, status);
3273 break;
3274 default:
3275 err("Unknown pipetype");
3276 }
3277}
3278
3279
3280
3281static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc)
3282{
3283 usb_interrupt_registers_t *reg;
3284 unsigned long flags;
3285 __u32 irq_mask;
3286 __u8 status;
3287 __u32 epid_attn;
3288 __u16 port_status_1;
3289 __u16 port_status_2;
3290 __u32 fm_number;
3291
3292 DBFENTER;
3293
3294 /* Read critical registers into local variables, do kmalloc afterwards. */
3295 save_flags(flags);
3296 cli();
3297
3298 irq_mask = *R_USB_IRQ_MASK_READ;
3299 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that R_USB_STATUS
3300 must be read before R_USB_EPID_ATTN since reading the latter clears the
3301 ourun and perror fields of R_USB_STATUS. */
3302 status = *R_USB_STATUS;
3303
3304 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn interrupts. */
3305 epid_attn = *R_USB_EPID_ATTN;
3306
3307 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
3308 port_status interrupt. */
3309 port_status_1 = *R_USB_RH_PORT_STATUS_1;
3310 port_status_2 = *R_USB_RH_PORT_STATUS_2;
3311
3312 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
3313 /* Note: the lower 11 bits contain the actual frame number, sent with each sof. */
3314 fm_number = *R_USB_FM_NUMBER;
3315
3316 restore_flags(flags);
3317
3318 reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, GFP_ATOMIC);
3319
3320 assert(reg != NULL);
3321
3322 reg->hc = (etrax_hc_t *)vhc;
3323
3324 /* Now put register values into kmalloc'd area. */
3325 reg->r_usb_irq_mask_read = irq_mask;
3326 reg->r_usb_status = status;
3327 reg->r_usb_epid_attn = epid_attn;
3328 reg->r_usb_rh_port_status_1 = port_status_1;
3329 reg->r_usb_rh_port_status_2 = port_status_2;
3330 reg->r_usb_fm_number = fm_number;
3331
3332 INIT_WORK(&reg->usb_bh, etrax_usb_hc_interrupt_bottom_half, reg);
3333 schedule_work(&reg->usb_bh);
3334
3335 DBFEXIT;
3336
3337 return IRQ_HANDLED;
3338}
3339
3340static void etrax_usb_hc_interrupt_bottom_half(void *data)
3341{
3342 usb_interrupt_registers_t *reg = (usb_interrupt_registers_t *)data;
3343 __u32 irq_mask = reg->r_usb_irq_mask_read;
3344
3345 DBFENTER;
3346
3347 /* Interrupts are handled in order of priority. */
3348 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
3349 etrax_usb_hc_epid_attn_interrupt(reg);
3350 }
3351 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
3352 etrax_usb_hc_port_status_interrupt(reg);
3353 }
3354 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
3355 etrax_usb_hc_ctl_status_interrupt(reg);
3356 }
3357 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
3358 etrax_usb_hc_isoc_eof_interrupt();
3359 }
3360 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
3361 /* Update/restart the bulk start timer since obviously the channel is running. */
3362 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
3363 /* Update/restart the bulk eot timer since we just received an bulk eot interrupt. */
3364 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3365
3366 etrax_usb_hc_bulk_eot_interrupt(0);
3367 }
3368
3369 kmem_cache_free(top_half_reg_cache, reg);
3370
3371 DBFEXIT;
3372}
3373
3374
3375void etrax_usb_hc_isoc_eof_interrupt(void)
3376{
3377 struct urb *urb;
3378 etrax_urb_priv_t *urb_priv;
3379 int epid;
3380 unsigned long flags;
3381
3382 DBFENTER;
3383
3384 /* Do not check the invalid epid (it has a valid sub pointer). */
3385 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
3386
3387 /* Do not check the invalid epid (it has a valid sub pointer). */
3388 if ((epid == DUMMY_EPID) || (epid == INVALID_EPID))
3389 continue;
3390
3391 /* Disable interrupts to block the isoc out descriptor interrupt handler
3392 from being called while the isoc EPID list is being checked.
3393 */
3394 save_flags(flags);
3395 cli();
3396
3397 if (TxIsocEPList[epid].sub == 0) {
3398 /* Nothing here to see. */
3399 restore_flags(flags);
3400 continue;
3401 }
3402
3403 /* Get the first urb (if any). */
3404 urb = urb_list_first(epid);
3405 if (urb == 0) {
3406 warn("Ignoring NULL urb");
3407 restore_flags(flags);
3408 continue;
3409 }
3410 if (usb_pipein(urb->pipe)) {
3411
3412 /* Sanity check. */
3413 assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
3414
3415 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3416 assert(urb_priv);
3417
3418 if (urb_priv->urb_state == NOT_STARTED) {
3419
3420 /* If ASAP is not set and urb->start_frame is the current frame,
3421 start the transfer. */
3422 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3423 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
3424
3425 dbg_isoc("Enabling isoc IN EP descr for epid %d", epid);
3426 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3427
3428 /* This urb is now active. */
3429 urb_priv->urb_state = STARTED;
3430 continue;
3431 }
3432 }
3433 }
3434 restore_flags(flags);
3435 }
3436
3437 DBFEXIT;
3438
3439}
3440
3441void etrax_usb_hc_bulk_eot_interrupt(int timer_induced)
3442{
3443 int epid;
3444
3445 /* The technique is to run one urb at a time, wait for the eot interrupt at which
3446 point the EP descriptor has been disabled. */
3447
3448 DBFENTER;
3449 dbg_bulk("bulk eot%s", timer_induced ? ", called by timer" : "");
3450
3451 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3452
3453 if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
3454 (TxBulkEPList[epid].sub != 0)) {
3455
3456 struct urb *urb;
3457 etrax_urb_priv_t *urb_priv;
3458 unsigned long flags;
3459 __u32 r_usb_ept_data;
3460
3461 /* Found a disabled EP descriptor which has a non-null sub pointer.
3462 Verify that this ctrl EP descriptor got disabled no errors.
3463 FIXME: Necessary to check error_code? */
3464 dbg_bulk("for epid %d?", epid);
3465
3466 /* Get the first urb. */
3467 urb = urb_list_first(epid);
3468
3469 /* FIXME: Could this happen for valid reasons? Why did it disappear? Because of
3470 wrong unlinking? */
3471 if (!urb) {
3472 warn("NULL urb for epid %d", epid);
3473 continue;
3474 }
3475
3476 assert(urb);
3477 urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
3478 assert(urb_priv);
3479
3480 /* Sanity checks. */
3481 assert(usb_pipetype(urb->pipe) == PIPE_BULK);
3482 if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
3483 err("bulk endpoint got disabled before reaching last sb");
3484 }
3485
3486 /* For bulk IN traffic, there seems to be a race condition between
3487 between the bulk eot and eop interrupts, or rather an uncertainty regarding
3488 the order in which they happen. Normally we expect the eop interrupt from
3489 DMA channel 9 to happen before the eot interrupt.
3490
3491 Therefore, we complete the bulk IN urb in the rx interrupt handler instead. */
3492
3493 if (usb_pipein(urb->pipe)) {
3494 dbg_bulk("in urb, continuing");
3495 continue;
3496 }
3497
3498 save_flags(flags);
3499 cli();
3500 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
3501 nop();
3502 r_usb_ept_data = *R_USB_EPT_DATA;
3503 restore_flags(flags);
3504
3505 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data) ==
3506 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3507 /* This means that the endpoint has no error, is disabled
3508 and had inserted traffic, i.e. transfer successfully completed. */
3509 etrax_usb_complete_bulk_urb(urb, 0);
3510 } else {
3511 /* Shouldn't happen. We expect errors to be caught by epid attention. */
3512 err("Found disabled bulk EP desc, error_code != no_error");
3513 }
3514 }
3515 }
3516
3517 /* Normally, we should find (at least) one disabled EP descriptor with a valid sub pointer.
3518 However, because of the uncertainty in the deliverance of the eop/eot interrupts, we may
3519 not. Also, we might find two disabled EPs when handling an eot interrupt, and then find
3520 none the next time. */
3521
3522 DBFEXIT;
3523
3524}
3525
3526void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg)
3527{
3528 /* This function handles the epid attention interrupt. There are a variety of reasons
3529 for this interrupt to happen (Designer's Reference, p. 8 - 22 for the details):
3530
3531 invalid ep_id - Invalid epid in an EP (EP disabled).
3532 stall - Not strictly an error condition (EP disabled).
3533 3rd error - Three successive transaction errors (EP disabled).
3534 buffer ourun - Buffer overrun or underrun (EP disabled).
3535 past eof1 - Intr or isoc transaction proceeds past EOF1.
3536 near eof - Intr or isoc transaction would not fit inside the frame.
3537 zout transfer - If zout transfer for a bulk endpoint (EP disabled).
3538 setup transfer - If setup transfer for a non-ctrl endpoint (EP disabled). */
3539
3540 int epid;
3541
3542
3543 DBFENTER;
3544
3545 assert(reg != NULL);
3546
3547 /* Note that we loop through all epids. We still want to catch errors for
3548 the invalid one, even though we might handle them differently. */
3549 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3550
3551 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
3552
3553 struct urb *urb;
3554 __u32 r_usb_ept_data;
3555 unsigned long flags;
3556 int error_code;
3557
3558 save_flags(flags);
3559 cli();
3560 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
3561 nop();
3562 /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
3563 registers, they are located at the same address and are of the same size.
3564 In other words, this read should be ok for isoc also. */
3565 r_usb_ept_data = *R_USB_EPT_DATA;
3566 restore_flags(flags);
3567
3568 /* First some sanity checks. */
3569 if (epid == INVALID_EPID) {
3570 /* FIXME: What if it became disabled? Could seriously hurt interrupt
3571 traffic. (Use do_intr_recover.) */
3572 warn("Got epid_attn for INVALID_EPID (%d).", epid);
3573 err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
3574 err("R_USB_STATUS = 0x%x", reg->r_usb_status);
3575 continue;
3576 } else if (epid == DUMMY_EPID) {
3577 /* We definitely don't care about these ones. Besides, they are
3578 always disabled, so any possible disabling caused by the
3579 epid attention interrupt is irrelevant. */
3580 warn("Got epid_attn for DUMMY_EPID (%d).", epid);
3581 continue;
3582 }
3583
3584 /* Get the first urb in the urb list for this epid. We blatantly assume
3585 that only the first urb could have caused the epid attention.
3586 (For bulk and ctrl, only one urb is active at any one time. For intr
3587 and isoc we remove them once they are completed.) */
3588 urb = urb_list_first(epid);
3589
3590 if (urb == NULL) {
3591 err("Got epid_attn for epid %i with no urb.", epid);
3592 err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
3593 err("R_USB_STATUS = 0x%x", reg->r_usb_status);
3594 continue;
3595 }
3596
3597 switch (usb_pipetype(urb->pipe)) {
3598 case PIPE_BULK:
3599 warn("Got epid attn for bulk endpoint, epid %d", epid);
3600 break;
3601 case PIPE_CONTROL:
3602 warn("Got epid attn for control endpoint, epid %d", epid);
3603 break;
3604 case PIPE_INTERRUPT:
3605 warn("Got epid attn for interrupt endpoint, epid %d", epid);
3606 break;
3607 case PIPE_ISOCHRONOUS:
3608 warn("Got epid attn for isochronous endpoint, epid %d", epid);
3609 break;
3610 }
3611
3612 if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
3613 if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
3614 warn("Hold was set for epid %d.", epid);
3615 continue;
3616 }
3617 }
3618
3619 /* Even though error_code occupies bits 22 - 23 in both R_USB_EPT_DATA and
3620 R_USB_EPT_DATA_ISOC, we separate them here so we don't forget in other places. */
3621 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
3622 error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
3623 } else {
3624 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data);
3625 }
3626
3627 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
3628 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3629
3630 /* Isoc traffic doesn't have error_count_in/error_count_out. */
3631 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
3632 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, r_usb_ept_data) == 3 ||
3633 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, r_usb_ept_data) == 3)) {
3634 /* 3rd error. */
3635 warn("3rd error for epid %i", epid);
3636 etrax_usb_complete_urb(urb, -EPROTO);
3637
3638 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
3639
3640 warn("Perror for epid %d", epid);
3641
3642 if (!(r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
3643 /* invalid ep_id */
3644 panic("Perror because of invalid epid."
3645 " Deconfigured too early?");
3646 } else {
3647 /* past eof1, near eof, zout transfer, setup transfer */
3648
3649 /* Dump the urb and the relevant EP descriptor list. */
3650
3651 __dump_urb(urb);
3652 __dump_ept_data(epid);
3653 __dump_ep_list(usb_pipetype(urb->pipe));
3654
3655 panic("Something wrong with DMA descriptor contents."
3656 " Too much traffic inserted?");
3657 }
3658 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
3659 /* buffer ourun */
3660 panic("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
3661 }
3662
3663 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, stall)) {
3664 /* Not really a protocol error, just says that the endpoint gave
3665 a stall response. Note that error_code cannot be stall for isoc. */
3666 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
3667 panic("Isoc traffic cannot stall");
3668 }
3669
3670 warn("Stall for epid %d", epid);
3671 etrax_usb_complete_urb(urb, -EPIPE);
3672
3673 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, bus_error)) {
3674 /* Two devices responded to a transaction request. Must be resolved
3675 by software. FIXME: Reset ports? */
3676 panic("Bus error for epid %d."
3677 " Two devices responded to transaction request",
3678 epid);
3679
3680 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
3681 /* DMA overrun or underrun. */
3682 warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
3683
3684 /* It seems that error_code = buffer_error in
3685 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
3686 are the same error. */
3687 etrax_usb_complete_urb(urb, -EPROTO);
3688 }
3689 }
3690 }
3691
3692 DBFEXIT;
3693
3694}
3695
3696void etrax_usb_bulk_start_timer_func(unsigned long dummy)
3697{
3698
3699 /* We might enable an EP descriptor behind the current DMA position when it's about
3700 to decide that there are no more bulk traffic and it should stop the bulk channel.
3701 Therefore we periodically check if the bulk channel is stopped and there is an
3702 enabled bulk EP descriptor, in which case we start the bulk channel. */
3703 dbg_bulk("bulk_start_timer timed out.");
3704
3705 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
3706 int epid;
3707
3708 dbg_bulk("Bulk DMA channel not running.");
3709
3710 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
3711 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
3712 dbg_bulk("Found enabled EP for epid %d, starting bulk channel.\n",
3713 epid);
3714 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
3715
3716 /* Restart the bulk eot timer since we just started the bulk channel. */
3717 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3718
3719 /* No need to search any further. */
3720 break;
3721 }
3722 }
3723 } else {
3724 dbg_bulk("Bulk DMA channel running.");
3725 }
3726}
3727
3728void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg)
3729{
3730 etrax_hc_t *hc = reg->hc;
3731 __u16 r_usb_rh_port_status_1 = reg->r_usb_rh_port_status_1;
3732 __u16 r_usb_rh_port_status_2 = reg->r_usb_rh_port_status_2;
3733
3734 DBFENTER;
3735
3736 /* The Etrax RH does not include a wPortChange register, so this has to be handled in software
3737 (by saving the old port status value for comparison when the port status interrupt happens).
3738 See section 11.16.2.6.2 in the USB 1.1 spec for details. */
3739
3740 dbg_rh("hc->rh.prev_wPortStatus_1 = 0x%x", hc->rh.prev_wPortStatus_1);
3741 dbg_rh("hc->rh.prev_wPortStatus_2 = 0x%x", hc->rh.prev_wPortStatus_2);
3742 dbg_rh("r_usb_rh_port_status_1 = 0x%x", r_usb_rh_port_status_1);
3743 dbg_rh("r_usb_rh_port_status_2 = 0x%x", r_usb_rh_port_status_2);
3744
3745 /* C_PORT_CONNECTION is set on any transition. */
3746 hc->rh.wPortChange_1 |=
3747 ((r_usb_rh_port_status_1 & (1 << RH_PORT_CONNECTION)) !=
3748 (hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_CONNECTION))) ?
3749 (1 << RH_PORT_CONNECTION) : 0;
3750
3751 hc->rh.wPortChange_2 |=
3752 ((r_usb_rh_port_status_2 & (1 << RH_PORT_CONNECTION)) !=
3753 (hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_CONNECTION))) ?
3754 (1 << RH_PORT_CONNECTION) : 0;
3755
3756 /* C_PORT_ENABLE is _only_ set on a one to zero transition, i.e. when
3757 the port is disabled, not when it's enabled. */
3758 hc->rh.wPortChange_1 |=
3759 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_ENABLE))
3760 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_ENABLE))) ?
3761 (1 << RH_PORT_ENABLE) : 0;
3762
3763 hc->rh.wPortChange_2 |=
3764 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_ENABLE))
3765 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_ENABLE))) ?
3766 (1 << RH_PORT_ENABLE) : 0;
3767
3768 /* C_PORT_SUSPEND is set to one when the device has transitioned out
3769 of the suspended state, i.e. when suspend goes from one to zero. */
3770 hc->rh.wPortChange_1 |=
3771 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_SUSPEND))
3772 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_SUSPEND))) ?
3773 (1 << RH_PORT_SUSPEND) : 0;
3774
3775 hc->rh.wPortChange_2 |=
3776 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_SUSPEND))
3777 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_SUSPEND))) ?
3778 (1 << RH_PORT_SUSPEND) : 0;
3779
3780
3781 /* C_PORT_RESET is set when reset processing on this port is complete. */
3782 hc->rh.wPortChange_1 |=
3783 ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_RESET))
3784 && !(r_usb_rh_port_status_1 & (1 << RH_PORT_RESET))) ?
3785 (1 << RH_PORT_RESET) : 0;
3786
3787 hc->rh.wPortChange_2 |=
3788 ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_RESET))
3789 && !(r_usb_rh_port_status_2 & (1 << RH_PORT_RESET))) ?
3790 (1 << RH_PORT_RESET) : 0;
3791
3792 /* Save the new values for next port status change. */
3793 hc->rh.prev_wPortStatus_1 = r_usb_rh_port_status_1;
3794 hc->rh.prev_wPortStatus_2 = r_usb_rh_port_status_2;
3795
3796 dbg_rh("hc->rh.wPortChange_1 set to 0x%x", hc->rh.wPortChange_1);
3797 dbg_rh("hc->rh.wPortChange_2 set to 0x%x", hc->rh.wPortChange_2);
3798
3799 DBFEXIT;
3800
3801}
3802
3803void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg)
3804{
3805 DBFENTER;
3806
3807 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
3808 list for the corresponding epid? */
3809 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
3810 panic("USB controller got ourun.");
3811 }
3812 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
3813
3814 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
3815 an interrupt pipe. I don't see how re-enabling all EP descriptors
3816 will help if there was a programming error. */
3817 panic("USB controller got perror.");
3818 }
3819
3820 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
3821 /* We should never operate in device mode. */
3822 panic("USB controller in device mode.");
3823 }
3824
3825 /* These if-statements could probably be nested. */
3826 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, host_mode)) {
3827 info("USB controller in host mode.");
3828 }
3829 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, started)) {
3830 info("USB controller started.");
3831 }
3832 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, running)) {
3833 info("USB controller running.");
3834 }
3835
3836 DBFEXIT;
3837
3838}
3839
3840
3841static int etrax_rh_submit_urb(struct urb *urb)
3842{
3843 struct usb_device *usb_dev = urb->dev;
3844 etrax_hc_t *hc = usb_dev->bus->hcpriv;
3845 unsigned int pipe = urb->pipe;
3846 struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
3847 void *data = urb->transfer_buffer;
3848 int leni = urb->transfer_buffer_length;
3849 int len = 0;
3850 int stat = 0;
3851
3852 __u16 bmRType_bReq;
3853 __u16 wValue;
3854 __u16 wIndex;
3855 __u16 wLength;
3856
3857 DBFENTER;
3858
3859 /* FIXME: What is this interrupt urb that is sent to the root hub? */
3860 if (usb_pipetype (pipe) == PIPE_INTERRUPT) {
3861 dbg_rh("Root-Hub submit IRQ: every %d ms", urb->interval);
3862 hc->rh.urb = urb;
3863 hc->rh.send = 1;
3864 /* FIXME: We could probably remove this line since it's done
3865 in etrax_rh_init_int_timer. (Don't remove it from
3866 etrax_rh_init_int_timer though.) */
3867 hc->rh.interval = urb->interval;
3868 etrax_rh_init_int_timer(urb);
3869 DBFEXIT;
3870
3871 return 0;
3872 }
3873
3874 bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
3875 wValue = le16_to_cpu(cmd->wValue);
3876 wIndex = le16_to_cpu(cmd->wIndex);
3877 wLength = le16_to_cpu(cmd->wLength);
3878
3879 dbg_rh("bmRType_bReq : 0x%04x (%d)", bmRType_bReq, bmRType_bReq);
3880 dbg_rh("wValue : 0x%04x (%d)", wValue, wValue);
3881 dbg_rh("wIndex : 0x%04x (%d)", wIndex, wIndex);
3882 dbg_rh("wLength : 0x%04x (%d)", wLength, wLength);
3883
3884 switch (bmRType_bReq) {
3885
3886 /* Request Destination:
3887 without flags: Device,
3888 RH_INTERFACE: interface,
3889 RH_ENDPOINT: endpoint,
3890 RH_CLASS means HUB here,
3891 RH_OTHER | RH_CLASS almost ever means HUB_PORT here
3892 */
3893
3894 case RH_GET_STATUS:
3895 *(__u16 *) data = cpu_to_le16 (1);
3896 OK (2);
3897
3898 case RH_GET_STATUS | RH_INTERFACE:
3899 *(__u16 *) data = cpu_to_le16 (0);
3900 OK (2);
3901
3902 case RH_GET_STATUS | RH_ENDPOINT:
3903 *(__u16 *) data = cpu_to_le16 (0);
3904 OK (2);
3905
3906 case RH_GET_STATUS | RH_CLASS:
3907 *(__u32 *) data = cpu_to_le32 (0);
3908 OK (4); /* hub power ** */
3909
3910 case RH_GET_STATUS | RH_OTHER | RH_CLASS:
3911 if (wIndex == 1) {
3912 *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_1);
3913 *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_1);
3914 } else if (wIndex == 2) {
3915 *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_2);
3916 *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_2);
3917 } else {
3918 dbg_rh("RH_GET_STATUS whith invalid wIndex!");
3919 OK(0);
3920 }
3921
3922 OK(4);
3923
3924 case RH_CLEAR_FEATURE | RH_ENDPOINT:
3925 switch (wValue) {
3926 case (RH_ENDPOINT_STALL):
3927 OK (0);
3928 }
3929 break;
3930
3931 case RH_CLEAR_FEATURE | RH_CLASS:
3932 switch (wValue) {
3933 case (RH_C_HUB_OVER_CURRENT):
3934 OK (0); /* hub power over current ** */
3935 }
3936 break;
3937
3938 case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
3939 switch (wValue) {
3940 case (RH_PORT_ENABLE):
3941 if (wIndex == 1) {
3942
3943 dbg_rh("trying to do disable port 1");
3944
3945 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
3946
3947 while (hc->rh.prev_wPortStatus_1 &
3948 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes));
3949 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
3950 dbg_rh("Port 1 is disabled");
3951
3952 } else if (wIndex == 2) {
3953
3954 dbg_rh("trying to do disable port 2");
3955
3956 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
3957
3958 while (hc->rh.prev_wPortStatus_2 &
3959 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes));
3960 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
3961 dbg_rh("Port 2 is disabled");
3962
3963 } else {
3964 dbg_rh("RH_CLEAR_FEATURE->RH_PORT_ENABLE "
3965 "with invalid wIndex == %d!", wIndex);
3966 }
3967
3968 OK (0);
3969 case (RH_PORT_SUSPEND):
3970 /* Opposite to suspend should be resume, so we'll do a resume. */
3971 /* FIXME: USB 1.1, 11.16.2.2 says:
3972 "Clearing the PORT_SUSPEND feature causes a host-initiated resume
3973 on the specified port. If the port is not in the Suspended state,
3974 the hub should treat this request as a functional no-operation."
3975 Shouldn't we check if the port is in a suspended state before
3976 resuming? */
3977
3978 /* Make sure the controller isn't busy. */
3979 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
3980
3981 if (wIndex == 1) {
3982 *R_USB_COMMAND =
3983 IO_STATE(R_USB_COMMAND, port_sel, port1) |
3984 IO_STATE(R_USB_COMMAND, port_cmd, resume) |
3985 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
3986 } else if (wIndex == 2) {
3987 *R_USB_COMMAND =
3988 IO_STATE(R_USB_COMMAND, port_sel, port2) |
3989 IO_STATE(R_USB_COMMAND, port_cmd, resume) |
3990 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
3991 } else {
3992 dbg_rh("RH_CLEAR_FEATURE->RH_PORT_SUSPEND "
3993 "with invalid wIndex == %d!", wIndex);
3994 }
3995
3996 OK (0);
3997 case (RH_PORT_POWER):
3998 OK (0); /* port power ** */
3999 case (RH_C_PORT_CONNECTION):
4000 if (wIndex == 1) {
4001 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_CONNECTION);
4002 } else if (wIndex == 2) {
4003 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_CONNECTION);
4004 } else {
4005 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_CONNECTION "
4006 "with invalid wIndex == %d!", wIndex);
4007 }
4008
4009 OK (0);
4010 case (RH_C_PORT_ENABLE):
4011 if (wIndex == 1) {
4012 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_ENABLE);
4013 } else if (wIndex == 2) {
4014 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_ENABLE);
4015 } else {
4016 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_ENABLE "
4017 "with invalid wIndex == %d!", wIndex);
4018 }
4019 OK (0);
4020 case (RH_C_PORT_SUSPEND):
4021/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
4022 OK (0);
4023 case (RH_C_PORT_OVER_CURRENT):
4024 OK (0); /* port power over current ** */
4025 case (RH_C_PORT_RESET):
4026 if (wIndex == 1) {
4027 hc->rh.wPortChange_1 &= ~(1 << RH_PORT_RESET);
4028 } else if (wIndex == 2) {
4029 hc->rh.wPortChange_2 &= ~(1 << RH_PORT_RESET);
4030 } else {
4031 dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_RESET "
4032 "with invalid index == %d!", wIndex);
4033 }
4034
4035 OK (0);
4036
4037 }
4038 break;
4039
4040 case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
4041 switch (wValue) {
4042 case (RH_PORT_SUSPEND):
4043
4044 /* Make sure the controller isn't busy. */
4045 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4046
4047 if (wIndex == 1) {
4048 *R_USB_COMMAND =
4049 IO_STATE(R_USB_COMMAND, port_sel, port1) |
4050 IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
4051 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4052 } else if (wIndex == 2) {
4053 *R_USB_COMMAND =
4054 IO_STATE(R_USB_COMMAND, port_sel, port2) |
4055 IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
4056 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4057 } else {
4058 dbg_rh("RH_SET_FEATURE->RH_PORT_SUSPEND "
4059 "with invalid wIndex == %d!", wIndex);
4060 }
4061
4062 OK (0);
4063 case (RH_PORT_RESET):
4064 if (wIndex == 1) {
4065
4066 port_1_reset:
4067 dbg_rh("Doing reset of port 1");
4068
4069 /* Make sure the controller isn't busy. */
4070 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4071
4072 *R_USB_COMMAND =
4073 IO_STATE(R_USB_COMMAND, port_sel, port1) |
4074 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4075 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4076
4077 /* We must wait at least 10 ms for the device to recover.
4078 15 ms should be enough. */
4079 udelay(15000);
4080
4081 /* Wait for reset bit to go low (should be done by now). */
4082 while (hc->rh.prev_wPortStatus_1 &
4083 IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes));
4084
4085 /* If the port status is
4086 1) connected and enabled then there is a device and everything is fine
4087 2) neither connected nor enabled then there is no device, also fine
4088 3) connected and not enabled then we try again
4089 (Yes, there are other port status combinations besides these.) */
4090
4091 if ((hc->rh.prev_wPortStatus_1 &
4092 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
4093 (hc->rh.prev_wPortStatus_1 &
4094 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
4095 dbg_rh("Connected device on port 1, but port not enabled?"
4096 " Trying reset again.");
4097 goto port_2_reset;
4098 }
4099
4100 /* Diagnostic printouts. */
4101 if ((hc->rh.prev_wPortStatus_1 &
4102 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, no)) &&
4103 (hc->rh.prev_wPortStatus_1 &
4104 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
4105 dbg_rh("No connected device on port 1");
4106 } else if ((hc->rh.prev_wPortStatus_1 &
4107 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
4108 (hc->rh.prev_wPortStatus_1 &
4109 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))) {
4110 dbg_rh("Connected device on port 1, port 1 enabled");
4111 }
4112
4113 } else if (wIndex == 2) {
4114
4115 port_2_reset:
4116 dbg_rh("Doing reset of port 2");
4117
4118 /* Make sure the controller isn't busy. */
4119 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4120
4121 /* Issue the reset command. */
4122 *R_USB_COMMAND =
4123 IO_STATE(R_USB_COMMAND, port_sel, port2) |
4124 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4125 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
4126
4127 /* We must wait at least 10 ms for the device to recover.
4128 15 ms should be enough. */
4129 udelay(15000);
4130
4131 /* Wait for reset bit to go low (should be done by now). */
4132 while (hc->rh.prev_wPortStatus_2 &
4133 IO_STATE(R_USB_RH_PORT_STATUS_2, reset, yes));
4134
4135 /* If the port status is
4136 1) connected and enabled then there is a device and everything is fine
4137 2) neither connected nor enabled then there is no device, also fine
4138 3) connected and not enabled then we try again
4139 (Yes, there are other port status combinations besides these.) */
4140
4141 if ((hc->rh.prev_wPortStatus_2 &
4142 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
4143 (hc->rh.prev_wPortStatus_2 &
4144 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
4145 dbg_rh("Connected device on port 2, but port not enabled?"
4146 " Trying reset again.");
4147 goto port_2_reset;
4148 }
4149
4150 /* Diagnostic printouts. */
4151 if ((hc->rh.prev_wPortStatus_2 &
4152 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, no)) &&
4153 (hc->rh.prev_wPortStatus_2 &
4154 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
4155 dbg_rh("No connected device on port 2");
4156 } else if ((hc->rh.prev_wPortStatus_2 &
4157 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
4158 (hc->rh.prev_wPortStatus_2 &
4159 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes))) {
4160 dbg_rh("Connected device on port 2, port 2 enabled");
4161 }
4162
4163 } else {
4164 dbg_rh("RH_SET_FEATURE->RH_PORT_RESET with invalid wIndex = %d", wIndex);
4165 }
4166
4167 /* Make sure the controller isn't busy. */
4168 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4169
4170 /* If all enabled ports were disabled the host controller goes down into
4171 started mode, so we need to bring it back into the running state.
4172 (This is safe even if it's already in the running state.) */
4173 *R_USB_COMMAND =
4174 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4175 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4176 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4177
4178 dbg_rh("...Done");
4179 OK(0);
4180
4181 case (RH_PORT_POWER):
4182 OK (0); /* port power ** */
4183 case (RH_PORT_ENABLE):
4184 /* There is no port enable command in the host controller, so if the
4185 port is already enabled, we do nothing. If not, we reset the port
4186 (with an ugly goto). */
4187
4188 if (wIndex == 1) {
4189 if (hc->rh.prev_wPortStatus_1 &
4190 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no)) {
4191 goto port_1_reset;
4192 }
4193 } else if (wIndex == 2) {
4194 if (hc->rh.prev_wPortStatus_2 &
4195 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no)) {
4196 goto port_2_reset;
4197 }
4198 } else {
4199 dbg_rh("RH_SET_FEATURE->RH_GET_STATUS with invalid wIndex = %d", wIndex);
4200 }
4201 OK (0);
4202 }
4203 break;
4204
4205 case RH_SET_ADDRESS:
4206 hc->rh.devnum = wValue;
4207 dbg_rh("RH address set to: %d", hc->rh.devnum);
4208 OK (0);
4209
4210 case RH_GET_DESCRIPTOR:
4211 switch ((wValue & 0xff00) >> 8) {
4212 case (0x01): /* device descriptor */
4213 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_dev_des), wLength));
4214 memcpy (data, root_hub_dev_des, len);
4215 OK (len);
4216 case (0x02): /* configuration descriptor */
4217 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_config_des), wLength));
4218 memcpy (data, root_hub_config_des, len);
4219 OK (len);
4220 case (0x03): /* string descriptors */
4221 len = usb_root_hub_string (wValue & 0xff,
4222 0xff, "ETRAX 100LX",
4223 data, wLength);
4224 if (len > 0) {
4225 OK(min(leni, len));
4226 } else {
4227 stat = -EPIPE;
4228 }
4229
4230 }
4231 break;
4232
4233 case RH_GET_DESCRIPTOR | RH_CLASS:
4234 root_hub_hub_des[2] = hc->rh.numports;
4235 len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
4236 memcpy (data, root_hub_hub_des, len);
4237 OK (len);
4238
4239 case RH_GET_CONFIGURATION:
4240 *(__u8 *) data = 0x01;
4241 OK (1);
4242
4243 case RH_SET_CONFIGURATION:
4244 OK (0);
4245
4246 default:
4247 stat = -EPIPE;
4248 }
4249
4250 urb->actual_length = len;
4251 urb->status = stat;
4252 urb->dev = NULL;
4253 if (urb->complete) {
4254 urb->complete(urb, NULL);
4255 }
4256 DBFEXIT;
4257
4258 return 0;
4259}
4260
4261static void
4262etrax_usb_bulk_eot_timer_func(unsigned long dummy)
4263{
4264 /* Because of a race condition in the top half, we might miss a bulk eot.
4265 This timer "simulates" a bulk eot if we don't get one for a while, hopefully
4266 correcting the situation. */
4267 dbg_bulk("bulk_eot_timer timed out.");
4268 etrax_usb_hc_bulk_eot_interrupt(1);
4269}
4270
4271static void*
4272etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
4273 unsigned mem_flags, dma_addr_t *dma)
4274{
4275 return kmalloc(size, mem_flags);
4276}
4277
4278static void
4279etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma)
4280{
4281 kfree(addr);
4282}
4283
4284
4285static struct device fake_device;
4286
4287static int __init etrax_usb_hc_init(void)
4288{
4289 static etrax_hc_t *hc;
4290 struct usb_bus *bus;
4291 struct usb_device *usb_rh;
4292 int i;
4293
4294 DBFENTER;
4295
4296 info("ETRAX 100LX USB-HCD %s (c) 2001-2003 Axis Communications AB\n", usb_hcd_version);
4297
4298 hc = kmalloc(sizeof(etrax_hc_t), GFP_KERNEL);
4299 assert(hc != NULL);
4300
4301 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
4302 /* Note that we specify sizeof(USB_EP_Desc_t) as the size, but also allocate
4303 SB descriptors from this cache. This is ok since sizeof(USB_EP_Desc_t) ==
4304 sizeof(USB_SB_Desc_t). */
4305
4306 usb_desc_cache = kmem_cache_create("usb_desc_cache", sizeof(USB_EP_Desc_t), 0,
4307 SLAB_HWCACHE_ALIGN, 0, 0);
4308 assert(usb_desc_cache != NULL);
4309
4310 top_half_reg_cache = kmem_cache_create("top_half_reg_cache",
4311 sizeof(usb_interrupt_registers_t),
4312 0, SLAB_HWCACHE_ALIGN, 0, 0);
4313 assert(top_half_reg_cache != NULL);
4314
4315 isoc_compl_cache = kmem_cache_create("isoc_compl_cache",
4316 sizeof(usb_isoc_complete_data_t),
4317 0, SLAB_HWCACHE_ALIGN, 0, 0);
4318 assert(isoc_compl_cache != NULL);
4319
4320 etrax_usb_bus = bus = usb_alloc_bus(&etrax_usb_device_operations);
4321 hc->bus = bus;
4322 bus->bus_name="ETRAX 100LX";
4323 bus->hcpriv = hc;
4324
4325 /* Initialize RH to the default address.
4326 And make sure that we have no status change indication */
4327 hc->rh.numports = 2; /* The RH has two ports */
4328 hc->rh.devnum = 1;
4329 hc->rh.wPortChange_1 = 0;
4330 hc->rh.wPortChange_2 = 0;
4331
4332 /* Also initate the previous values to zero */
4333 hc->rh.prev_wPortStatus_1 = 0;
4334 hc->rh.prev_wPortStatus_2 = 0;
4335
4336 /* Initialize the intr-traffic flags */
4337 /* FIXME: This isn't used. (Besides, the error field isn't initialized.) */
4338 hc->intr.sleeping = 0;
4339 hc->intr.wq = NULL;
4340
4341 epid_usage_bitmask = 0;
4342 epid_out_traffic = 0;
4343
4344 /* Mark the invalid epid as being used. */
4345 set_bit(INVALID_EPID, (void *)&epid_usage_bitmask);
4346 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, INVALID_EPID);
4347 nop();
4348 /* The valid bit should still be set ('invalid' is in our world; not the hardware's). */
4349 *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, yes) |
4350 IO_FIELD(R_USB_EPT_DATA, max_len, 1));
4351
4352 /* Mark the dummy epid as being used. */
4353 set_bit(DUMMY_EPID, (void *)&epid_usage_bitmask);
4354 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, DUMMY_EPID);
4355 nop();
4356 *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, no) |
4357 IO_FIELD(R_USB_EPT_DATA, max_len, 1));
4358
4359 /* Initialize the urb list by initiating a head for each list. */
4360 for (i = 0; i < NBR_OF_EPIDS; i++) {
4361 INIT_LIST_HEAD(&urb_list[i]);
4362 }
4363 spin_lock_init(&urb_list_lock);
4364
4365 INIT_LIST_HEAD(&urb_unlink_list);
4366
4367
4368 /* Initiate the bulk start timer. */
4369 init_timer(&bulk_start_timer);
4370 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
4371 bulk_start_timer.function = etrax_usb_bulk_start_timer_func;
4372 add_timer(&bulk_start_timer);
4373
4374
4375 /* Initiate the bulk eot timer. */
4376 init_timer(&bulk_eot_timer);
4377 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
4378 bulk_eot_timer.function = etrax_usb_bulk_eot_timer_func;
4379 add_timer(&bulk_eot_timer);
4380
4381 /* Set up the data structures for USB traffic. Note that this must be done before
4382 any interrupt that relies on sane DMA list occurrs. */
4383 init_rx_buffers();
4384 init_tx_bulk_ep();
4385 init_tx_ctrl_ep();
4386 init_tx_intr_ep();
4387 init_tx_isoc_ep();
4388
4389 device_initialize(&fake_device);
4390 kobject_set_name(&fake_device.kobj, "etrax_usb");
4391 kobject_add(&fake_device.kobj);
4392 kobject_uevent(&fake_device.kobj, KOBJ_ADD);
4393 hc->bus->controller = &fake_device;
4394 usb_register_bus(hc->bus);
4395
4396 *R_IRQ_MASK2_SET =
4397 /* Note that these interrupts are not used. */
4398 IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
4399 /* Sub channel 1 (ctrl) descr. interrupts are used. */
4400 IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
4401 IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
4402 /* Sub channel 3 (isoc) descr. interrupts are used. */
4403 IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
4404
4405 /* Note that the dma9_descr interrupt is not used. */
4406 *R_IRQ_MASK2_SET =
4407 IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
4408 IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
4409
4410 /* FIXME: Enable iso_eof only when isoc traffic is running. */
4411 *R_USB_IRQ_MASK_SET =
4412 IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) |
4413 IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
4414 IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
4415 IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
4416 IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
4417
4418
4419 if (request_irq(ETRAX_USB_HC_IRQ, etrax_usb_hc_interrupt_top_half, 0,
4420 "ETRAX 100LX built-in USB (HC)", hc)) {
4421 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
4422 etrax_usb_hc_cleanup();
4423 DBFEXIT;
4424 return -1;
4425 }
4426
4427 if (request_irq(ETRAX_USB_RX_IRQ, etrax_usb_rx_interrupt, 0,
4428 "ETRAX 100LX built-in USB (Rx)", hc)) {
4429 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
4430 etrax_usb_hc_cleanup();
4431 DBFEXIT;
4432 return -1;
4433 }
4434
4435 if (request_irq(ETRAX_USB_TX_IRQ, etrax_usb_tx_interrupt, 0,
4436 "ETRAX 100LX built-in USB (Tx)", hc)) {
4437 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
4438 etrax_usb_hc_cleanup();
4439 DBFEXIT;
4440 return -1;
4441 }
4442
4443 /* R_USB_COMMAND:
4444 USB commands in host mode. The fields in this register should all be
4445 written to in one write. Do not read-modify-write one field at a time. A
4446 write to this register will trigger events in the USB controller and an
4447 incomplete command may lead to unpredictable results, and in worst case
4448 even to a deadlock in the controller.
4449 (Note however that the busy field is read-only, so no need to write to it.) */
4450
4451 /* Check the busy bit before writing to R_USB_COMMAND. */
4452
4453 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4454
4455 /* Reset the USB interface. */
4456 *R_USB_COMMAND =
4457 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4458 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4459 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
4460
4461 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to 0x2A30 (10800),
4462 to guarantee that control traffic gets 10% of the bandwidth, and periodic transfer may
4463 allocate the rest (90%). This doesn't work though. Read on for a lenghty explanation.
4464
4465 While there is a difference between rev. 2 and rev. 3 of the ETRAX 100LX regarding the NAK
4466 behaviour, it doesn't solve this problem. What happens is that a control transfer will not
4467 be interrupted in its data stage when PSTART happens (the point at which periodic traffic
4468 is started). Thus, if PSTART is set to 10800 and its IN or OUT token is NAKed until just before
4469 PSTART happens, it will continue the IN/OUT transfer as long as it's ACKed. After it's done,
4470 there may be too little time left for an isochronous transfer, causing an epid attention
4471 interrupt due to perror. The work-around for this is to let the control transfers run at the
4472 end of the frame instead of at the beginning, and will be interrupted just fine if it doesn't
4473 fit into the frame. However, since there will *always* be a control transfer at the beginning
4474 of the frame, regardless of what we set PSTART to, that transfer might be a 64-byte transfer
4475 which consumes up to 15% of the frame, leaving only 85% for periodic traffic. The solution to
4476 this would be to 'dummy allocate' 5% of the frame with the usb_claim_bandwidth function to make
4477 sure that the periodic transfers that are inserted will always fit in the frame.
4478
4479 The idea was suggested that a control transfer could be split up into several 8 byte transfers,
4480 so that it would be interrupted by PSTART, but since this can't be done for an IN transfer this
4481 hasn't been implemented.
4482
4483 The value 11960 is chosen to be just after the SOF token, with a couple of bit times extra
4484 for possible bit stuffing. */
4485
4486 *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
4487
4488#ifdef CONFIG_ETRAX_USB_HOST_PORT1
4489 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
4490#endif
4491
4492#ifdef CONFIG_ETRAX_USB_HOST_PORT2
4493 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
4494#endif
4495
4496 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4497
4498 /* Configure the USB interface as a host controller. */
4499 *R_USB_COMMAND =
4500 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4501 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4502 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
4503
4504 /* Note: Do not reset any ports here. Await the port status interrupts, to have a controlled
4505 sequence of resetting the ports. If we reset both ports now, and there are devices
4506 on both ports, we will get a bus error because both devices will answer the set address
4507 request. */
4508
4509 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4510
4511 /* Start processing of USB traffic. */
4512 *R_USB_COMMAND =
4513 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4514 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4515 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4516
4517 while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
4518
4519 usb_rh = usb_alloc_dev(NULL, hc->bus, 0);
4520 hc->bus->root_hub = usb_rh;
4521 usb_rh->state = USB_STATE_ADDRESS;
4522 usb_rh->speed = USB_SPEED_FULL;
4523 usb_rh->devnum = 1;
4524 hc->bus->devnum_next = 2;
4525 usb_rh->ep0.desc.wMaxPacketSize = __const_cpu_to_le16(64);
4526 usb_get_device_descriptor(usb_rh, USB_DT_DEVICE_SIZE);
4527 usb_new_device(usb_rh);
4528
4529 DBFEXIT;
4530
4531 return 0;
4532}
4533
4534static void etrax_usb_hc_cleanup(void)
4535{
4536 DBFENTER;
4537
4538 free_irq(ETRAX_USB_HC_IRQ, NULL);
4539 free_irq(ETRAX_USB_RX_IRQ, NULL);
4540 free_irq(ETRAX_USB_TX_IRQ, NULL);
4541
4542 usb_deregister_bus(etrax_usb_bus);
4543
4544 /* FIXME: call kmem_cache_destroy here? */
4545
4546 DBFEXIT;
4547}
4548
4549module_init(etrax_usb_hc_init);
4550module_exit(etrax_usb_hc_cleanup);
diff --git a/drivers/usb/host/hc_crisv10.h b/drivers/usb/host/hc_crisv10.h
deleted file mode 100644
index 62f77111d418..000000000000
--- a/drivers/usb/host/hc_crisv10.h
+++ /dev/null
@@ -1,289 +0,0 @@
1#ifndef __LINUX_ETRAX_USB_H
2#define __LINUX_ETRAX_USB_H
3
4#include <linux/types.h>
5#include <linux/list.h>
6
7typedef struct USB_IN_Desc {
8 volatile __u16 sw_len;
9 volatile __u16 command;
10 volatile unsigned long next;
11 volatile unsigned long buf;
12 volatile __u16 hw_len;
13 volatile __u16 status;
14} USB_IN_Desc_t;
15
16typedef struct USB_SB_Desc {
17 volatile __u16 sw_len;
18 volatile __u16 command;
19 volatile unsigned long next;
20 volatile unsigned long buf;
21 __u32 dummy;
22} USB_SB_Desc_t;
23
24typedef struct USB_EP_Desc {
25 volatile __u16 hw_len;
26 volatile __u16 command;
27 volatile unsigned long sub;
28 volatile unsigned long next;
29 __u32 dummy;
30} USB_EP_Desc_t;
31
32struct virt_root_hub {
33 int devnum;
34 void *urb;
35 void *int_addr;
36 int send;
37 int interval;
38 int numports;
39 struct timer_list rh_int_timer;
40 volatile __u16 wPortChange_1;
41 volatile __u16 wPortChange_2;
42 volatile __u16 prev_wPortStatus_1;
43 volatile __u16 prev_wPortStatus_2;
44};
45
46struct etrax_usb_intr_traffic {
47 int sleeping;
48 int error;
49 struct wait_queue *wq;
50};
51
52typedef struct etrax_usb_hc {
53 struct usb_bus *bus;
54 struct virt_root_hub rh;
55 struct etrax_usb_intr_traffic intr;
56} etrax_hc_t;
57
58typedef enum {
59 STARTED,
60 NOT_STARTED,
61 UNLINK,
62 TRANSFER_DONE,
63 WAITING_FOR_DESCR_INTR
64} etrax_usb_urb_state_t;
65
66
67
68typedef struct etrax_usb_urb_priv {
69 /* The first_sb field is used for freeing all SB descriptors belonging
70 to an urb. The corresponding ep descriptor's sub pointer cannot be
71 used for this since the DMA advances the sub pointer as it processes
72 the sb list. */
73 USB_SB_Desc_t *first_sb;
74 /* The last_sb field referes to the last SB descriptor that belongs to
75 this urb. This is important to know so we can free the SB descriptors
76 that ranges between first_sb and last_sb. */
77 USB_SB_Desc_t *last_sb;
78
79 /* The rx_offset field is used in ctrl and bulk traffic to keep track
80 of the offset in the urb's transfer_buffer where incoming data should be
81 copied to. */
82 __u32 rx_offset;
83
84 /* Counter used in isochronous transfers to keep track of the
85 number of packets received/transmitted. */
86 __u32 isoc_packet_counter;
87
88 /* This field is used to pass information about the urb's current state between
89 the various interrupt handlers (thus marked volatile). */
90 volatile etrax_usb_urb_state_t urb_state;
91
92 /* Connection between the submitted urb and ETRAX epid number */
93 __u8 epid;
94
95 /* The rx_data_list field is used for periodic traffic, to hold
96 received data for later processing in the the complete_urb functions,
97 where the data us copied to the urb's transfer_buffer. Basically, we
98 use this intermediate storage because we don't know when it's safe to
99 reuse the transfer_buffer (FIXME?). */
100 struct list_head rx_data_list;
101} etrax_urb_priv_t;
102
103/* This struct is for passing data from the top half to the bottom half. */
104typedef struct usb_interrupt_registers
105{
106 etrax_hc_t *hc;
107 __u32 r_usb_epid_attn;
108 __u8 r_usb_status;
109 __u16 r_usb_rh_port_status_1;
110 __u16 r_usb_rh_port_status_2;
111 __u32 r_usb_irq_mask_read;
112 __u32 r_usb_fm_number;
113 struct work_struct usb_bh;
114} usb_interrupt_registers_t;
115
116/* This struct is for passing data from the isoc top half to the isoc bottom half. */
117typedef struct usb_isoc_complete_data
118{
119 struct urb *urb;
120 struct work_struct usb_bh;
121} usb_isoc_complete_data_t;
122
123/* This struct holds data we get from the rx descriptors for DMA channel 9
124 for periodic traffic (intr and isoc). */
125typedef struct rx_data
126{
127 void *data;
128 int length;
129 struct list_head list;
130} rx_data_t;
131
132typedef struct urb_entry
133{
134 struct urb *urb;
135 struct list_head list;
136} urb_entry_t;
137
138/* ---------------------------------------------------------------------------
139 Virtual Root HUB
140 ------------------------------------------------------------------------- */
141/* destination of request */
142#define RH_INTERFACE 0x01
143#define RH_ENDPOINT 0x02
144#define RH_OTHER 0x03
145
146#define RH_CLASS 0x20
147#define RH_VENDOR 0x40
148
149/* Requests: bRequest << 8 | bmRequestType */
150#define RH_GET_STATUS 0x0080
151#define RH_CLEAR_FEATURE 0x0100
152#define RH_SET_FEATURE 0x0300
153#define RH_SET_ADDRESS 0x0500
154#define RH_GET_DESCRIPTOR 0x0680
155#define RH_SET_DESCRIPTOR 0x0700
156#define RH_GET_CONFIGURATION 0x0880
157#define RH_SET_CONFIGURATION 0x0900
158#define RH_GET_STATE 0x0280
159#define RH_GET_INTERFACE 0x0A80
160#define RH_SET_INTERFACE 0x0B00
161#define RH_SYNC_FRAME 0x0C80
162/* Our Vendor Specific Request */
163#define RH_SET_EP 0x2000
164
165
166/* Hub port features */
167#define RH_PORT_CONNECTION 0x00
168#define RH_PORT_ENABLE 0x01
169#define RH_PORT_SUSPEND 0x02
170#define RH_PORT_OVER_CURRENT 0x03
171#define RH_PORT_RESET 0x04
172#define RH_PORT_POWER 0x08
173#define RH_PORT_LOW_SPEED 0x09
174#define RH_C_PORT_CONNECTION 0x10
175#define RH_C_PORT_ENABLE 0x11
176#define RH_C_PORT_SUSPEND 0x12
177#define RH_C_PORT_OVER_CURRENT 0x13
178#define RH_C_PORT_RESET 0x14
179
180/* Hub features */
181#define RH_C_HUB_LOCAL_POWER 0x00
182#define RH_C_HUB_OVER_CURRENT 0x01
183
184#define RH_DEVICE_REMOTE_WAKEUP 0x00
185#define RH_ENDPOINT_STALL 0x01
186
187/* Our Vendor Specific feature */
188#define RH_REMOVE_EP 0x00
189
190
191#define RH_ACK 0x01
192#define RH_REQ_ERR -1
193#define RH_NACK 0x00
194
195/* Field definitions for */
196
197#define USB_IN_command__eol__BITNR 0 /* command macros */
198#define USB_IN_command__eol__WIDTH 1
199#define USB_IN_command__eol__no 0
200#define USB_IN_command__eol__yes 1
201
202#define USB_IN_command__intr__BITNR 3
203#define USB_IN_command__intr__WIDTH 1
204#define USB_IN_command__intr__no 0
205#define USB_IN_command__intr__yes 1
206
207#define USB_IN_status__eop__BITNR 1 /* status macros. */
208#define USB_IN_status__eop__WIDTH 1
209#define USB_IN_status__eop__no 0
210#define USB_IN_status__eop__yes 1
211
212#define USB_IN_status__eot__BITNR 5
213#define USB_IN_status__eot__WIDTH 1
214#define USB_IN_status__eot__no 0
215#define USB_IN_status__eot__yes 1
216
217#define USB_IN_status__error__BITNR 6
218#define USB_IN_status__error__WIDTH 1
219#define USB_IN_status__error__no 0
220#define USB_IN_status__error__yes 1
221
222#define USB_IN_status__nodata__BITNR 7
223#define USB_IN_status__nodata__WIDTH 1
224#define USB_IN_status__nodata__no 0
225#define USB_IN_status__nodata__yes 1
226
227#define USB_IN_status__epid__BITNR 8
228#define USB_IN_status__epid__WIDTH 5
229
230#define USB_EP_command__eol__BITNR 0
231#define USB_EP_command__eol__WIDTH 1
232#define USB_EP_command__eol__no 0
233#define USB_EP_command__eol__yes 1
234
235#define USB_EP_command__eof__BITNR 1
236#define USB_EP_command__eof__WIDTH 1
237#define USB_EP_command__eof__no 0
238#define USB_EP_command__eof__yes 1
239
240#define USB_EP_command__intr__BITNR 3
241#define USB_EP_command__intr__WIDTH 1
242#define USB_EP_command__intr__no 0
243#define USB_EP_command__intr__yes 1
244
245#define USB_EP_command__enable__BITNR 4
246#define USB_EP_command__enable__WIDTH 1
247#define USB_EP_command__enable__no 0
248#define USB_EP_command__enable__yes 1
249
250#define USB_EP_command__hw_valid__BITNR 5
251#define USB_EP_command__hw_valid__WIDTH 1
252#define USB_EP_command__hw_valid__no 0
253#define USB_EP_command__hw_valid__yes 1
254
255#define USB_EP_command__epid__BITNR 8
256#define USB_EP_command__epid__WIDTH 5
257
258#define USB_SB_command__eol__BITNR 0 /* command macros. */
259#define USB_SB_command__eol__WIDTH 1
260#define USB_SB_command__eol__no 0
261#define USB_SB_command__eol__yes 1
262
263#define USB_SB_command__eot__BITNR 1
264#define USB_SB_command__eot__WIDTH 1
265#define USB_SB_command__eot__no 0
266#define USB_SB_command__eot__yes 1
267
268#define USB_SB_command__intr__BITNR 3
269#define USB_SB_command__intr__WIDTH 1
270#define USB_SB_command__intr__no 0
271#define USB_SB_command__intr__yes 1
272
273#define USB_SB_command__tt__BITNR 4
274#define USB_SB_command__tt__WIDTH 2
275#define USB_SB_command__tt__zout 0
276#define USB_SB_command__tt__in 1
277#define USB_SB_command__tt__out 2
278#define USB_SB_command__tt__setup 3
279
280
281#define USB_SB_command__rem__BITNR 8
282#define USB_SB_command__rem__WIDTH 6
283
284#define USB_SB_command__full__BITNR 6
285#define USB_SB_command__full__WIDTH 1
286#define USB_SB_command__full__no 0
287#define USB_SB_command__full__yes 1
288
289#endif
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index f0d29eda3c6d..e8bbe8bc2598 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -486,9 +486,6 @@ static int ohci_run (struct ohci_hcd *ohci)
486 * or if bus glue did the same (e.g. for PCI add-in cards with 486 * or if bus glue did the same (e.g. for PCI add-in cards with
487 * PCI PM support). 487 * PCI PM support).
488 */ 488 */
489 ohci_dbg (ohci, "resetting from state '%s', control = 0x%x\n",
490 hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS),
491 ohci_readl (ohci, &ohci->regs->control));
492 if ((ohci->hc_control & OHCI_CTRL_RWC) != 0 489 if ((ohci->hc_control & OHCI_CTRL_RWC) != 0
493 && !device_may_wakeup(hcd->self.controller)) 490 && !device_may_wakeup(hcd->self.controller))
494 device_init_wakeup(hcd->self.controller, 1); 491 device_init_wakeup(hcd->self.controller, 1);
@@ -744,9 +741,6 @@ static void ohci_stop (struct usb_hcd *hcd)
744{ 741{
745 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 742 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
746 743
747 ohci_dbg (ohci, "stop %s controller (state 0x%02x)\n",
748 hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS),
749 hcd->state);
750 ohci_dump (ohci, 1); 744 ohci_dump (ohci, 1);
751 745
752 flush_scheduled_work(); 746 flush_scheduled_work();
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index b331ac4d0d62..79705609fd0c 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -20,10 +20,16 @@
20 20
21/*-------------------------------------------------------------------------*/ 21/*-------------------------------------------------------------------------*/
22 22
23static int broken_suspend(struct usb_hcd *hcd)
24{
25 device_init_wakeup(&hcd->self.root_hub->dev, 0);
26 return 0;
27}
28
23/* AMD 756, for most chips (early revs), corrupts register 29/* AMD 756, for most chips (early revs), corrupts register
24 * values on read ... so enable the vendor workaround. 30 * values on read ... so enable the vendor workaround.
25 */ 31 */
26static int __devinit ohci_quirk_amd756(struct usb_hcd *hcd) 32static int ohci_quirk_amd756(struct usb_hcd *hcd)
27{ 33{
28 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 34 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
29 35
@@ -31,16 +37,14 @@ static int __devinit ohci_quirk_amd756(struct usb_hcd *hcd)
31 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n"); 37 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
32 38
33 /* also erratum 10 (suspend/resume issues) */ 39 /* also erratum 10 (suspend/resume issues) */
34 device_init_wakeup(&hcd->self.root_hub->dev, 0); 40 return broken_suspend(hcd);
35
36 return 0;
37} 41}
38 42
39/* Apple's OHCI driver has a lot of bizarre workarounds 43/* Apple's OHCI driver has a lot of bizarre workarounds
40 * for this chip. Evidently control and bulk lists 44 * for this chip. Evidently control and bulk lists
41 * can get confused. (B&W G3 models, and ...) 45 * can get confused. (B&W G3 models, and ...)
42 */ 46 */
43static int __devinit ohci_quirk_opti(struct usb_hcd *hcd) 47static int ohci_quirk_opti(struct usb_hcd *hcd)
44{ 48{
45 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 49 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
46 50
@@ -53,7 +57,7 @@ static int __devinit ohci_quirk_opti(struct usb_hcd *hcd)
53 * identify the USB (fn2). This quirk might apply to more or 57 * identify the USB (fn2). This quirk might apply to more or
54 * even all NSC stuff. 58 * even all NSC stuff.
55 */ 59 */
56static int __devinit ohci_quirk_ns(struct usb_hcd *hcd) 60static int ohci_quirk_ns(struct usb_hcd *hcd)
57{ 61{
58 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 62 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
59 struct pci_dev *b; 63 struct pci_dev *b;
@@ -75,7 +79,7 @@ static int __devinit ohci_quirk_ns(struct usb_hcd *hcd)
75 * delays before control or bulk queues get re-activated 79 * delays before control or bulk queues get re-activated
76 * in finish_unlinks() 80 * in finish_unlinks()
77 */ 81 */
78static int __devinit ohci_quirk_zfmicro(struct usb_hcd *hcd) 82static int ohci_quirk_zfmicro(struct usb_hcd *hcd)
79{ 83{
80 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 84 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
81 85
@@ -88,7 +92,7 @@ static int __devinit ohci_quirk_zfmicro(struct usb_hcd *hcd)
88/* Check for Toshiba SCC OHCI which has big endian registers 92/* Check for Toshiba SCC OHCI which has big endian registers
89 * and little endian in memory data structures 93 * and little endian in memory data structures
90 */ 94 */
91static int __devinit ohci_quirk_toshiba_scc(struct usb_hcd *hcd) 95static int ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
92{ 96{
93 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 97 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
94 98
@@ -129,6 +133,18 @@ static const struct pci_device_id ohci_pci_quirks[] = {
129 PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6), 133 PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6),
130 .driver_data = (unsigned long)ohci_quirk_toshiba_scc, 134 .driver_data = (unsigned long)ohci_quirk_toshiba_scc,
131 }, 135 },
136 {
137 /* Toshiba portege 4000 */
138 .vendor = PCI_VENDOR_ID_AL,
139 .device = 0x5237,
140 .subvendor = PCI_VENDOR_ID_TOSHIBA_2,
141 .subdevice = 0x0004,
142 .driver_data = (unsigned long) broken_suspend,
143 },
144 {
145 PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
146 .driver_data = (unsigned long) broken_suspend,
147 },
132 /* FIXME for some of the early AMD 760 southbridges, OHCI 148 /* FIXME for some of the early AMD 760 southbridges, OHCI
133 * won't work at all. blacklist them. 149 * won't work at all. blacklist them.
134 */ 150 */
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 19a0cc02b9a2..4aed305982ec 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -123,10 +123,14 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
123 123
124static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) 124static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
125{ 125{
126 if (!list_empty(&td->list)) 126 if (!list_empty(&td->list)) {
127 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); 127 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
128 if (!list_empty(&td->fl_list)) 128 WARN_ON(1);
129 }
130 if (!list_empty(&td->fl_list)) {
129 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); 131 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
132 WARN_ON(1);
133 }
130 134
131 dma_pool_free(uhci->td_pool, td, td->dma_handle); 135 dma_pool_free(uhci->td_pool, td, td->dma_handle);
132} 136}
@@ -291,8 +295,10 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
291static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 295static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
292{ 296{
293 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); 297 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
294 if (!list_empty(&qh->queue)) 298 if (!list_empty(&qh->queue)) {
295 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); 299 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
300 WARN_ON(1);
301 }
296 302
297 list_del(&qh->node); 303 list_del(&qh->node);
298 if (qh->udev) { 304 if (qh->udev) {
@@ -740,9 +746,11 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
740{ 746{
741 struct uhci_td *td, *tmp; 747 struct uhci_td *td, *tmp;
742 748
743 if (!list_empty(&urbp->node)) 749 if (!list_empty(&urbp->node)) {
744 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", 750 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
745 urbp->urb); 751 urbp->urb);
752 WARN_ON(1);
753 }
746 754
747 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 755 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
748 uhci_remove_td_from_urbp(td); 756 uhci_remove_td_from_urbp(td);
diff --git a/drivers/usb/input/ati_remote2.c b/drivers/usb/input/ati_remote2.c
index 83f1f79db7c7..6459be90599c 100644
--- a/drivers/usb/input/ati_remote2.c
+++ b/drivers/usb/input/ati_remote2.c
@@ -2,6 +2,7 @@
2 * ati_remote2 - ATI/Philips USB RF remote driver 2 * ati_remote2 - ATI/Philips USB RF remote driver
3 * 3 *
4 * Copyright (C) 2005 Ville Syrjala <syrjala@sci.fi> 4 * Copyright (C) 2005 Ville Syrjala <syrjala@sci.fi>
5 * Copyright (C) 2007 Peter Stokes <linux@dadeos.freeserve.co.uk>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 8 * it under the terms of the GNU General Public License version 2
@@ -11,13 +12,29 @@
11#include <linux/usb/input.h> 12#include <linux/usb/input.h>
12 13
13#define DRIVER_DESC "ATI/Philips USB RF remote driver" 14#define DRIVER_DESC "ATI/Philips USB RF remote driver"
14#define DRIVER_VERSION "0.1" 15#define DRIVER_VERSION "0.2"
15 16
16MODULE_DESCRIPTION(DRIVER_DESC); 17MODULE_DESCRIPTION(DRIVER_DESC);
17MODULE_VERSION(DRIVER_VERSION); 18MODULE_VERSION(DRIVER_VERSION);
18MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>"); 19MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>");
19MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
20 21
22/*
23 * ATI Remote Wonder II Channel Configuration
24 *
25 * The remote control can by assigned one of sixteen "channels" in order to facilitate
26 * the use of multiple remote controls within range of each other.
27 * A remote's "channel" may be altered by pressing and holding the "PC" button for
28 * approximately 3 seconds, after which the button will slowly flash the count of the
29 * currently configured "channel", using the numeric keypad enter a number between 1 and
30 * 16 and then the "PC" button again, the button will slowly flash the count of the
31 * newly configured "channel".
32 */
33
34static unsigned int channel_mask = 0xFFFF;
35module_param(channel_mask, uint, 0644);
36MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>");
37
21static unsigned int mode_mask = 0x1F; 38static unsigned int mode_mask = 0x1F;
22module_param(mode_mask, uint, 0644); 39module_param(mode_mask, uint, 0644);
23MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); 40MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
@@ -146,15 +163,23 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2)
146{ 163{
147 struct input_dev *idev = ar2->idev; 164 struct input_dev *idev = ar2->idev;
148 u8 *data = ar2->buf[0]; 165 u8 *data = ar2->buf[0];
166 int channel, mode;
167
168 channel = data[0] >> 4;
169
170 if (!((1 << channel) & channel_mask))
171 return;
149 172
150 if (data[0] > 4) { 173 mode = data[0] & 0x0F;
174
175 if (mode > 4) {
151 dev_err(&ar2->intf[0]->dev, 176 dev_err(&ar2->intf[0]->dev,
152 "Unknown mode byte (%02x %02x %02x %02x)\n", 177 "Unknown mode byte (%02x %02x %02x %02x)\n",
153 data[3], data[2], data[1], data[0]); 178 data[3], data[2], data[1], data[0]);
154 return; 179 return;
155 } 180 }
156 181
157 if (!((1 << data[0]) & mode_mask)) 182 if (!((1 << mode) & mode_mask))
158 return; 183 return;
159 184
160 input_event(idev, EV_REL, REL_X, (s8) data[1]); 185 input_event(idev, EV_REL, REL_X, (s8) data[1]);
@@ -177,9 +202,16 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
177{ 202{
178 struct input_dev *idev = ar2->idev; 203 struct input_dev *idev = ar2->idev;
179 u8 *data = ar2->buf[1]; 204 u8 *data = ar2->buf[1];
180 int hw_code, index; 205 int channel, mode, hw_code, index;
206
207 channel = data[0] >> 4;
208
209 if (!((1 << channel) & channel_mask))
210 return;
181 211
182 if (data[0] > 4) { 212 mode = data[0] & 0x0F;
213
214 if (mode > 4) {
183 dev_err(&ar2->intf[1]->dev, 215 dev_err(&ar2->intf[1]->dev,
184 "Unknown mode byte (%02x %02x %02x %02x)\n", 216 "Unknown mode byte (%02x %02x %02x %02x)\n",
185 data[3], data[2], data[1], data[0]); 217 data[3], data[2], data[1], data[0]);
@@ -199,16 +231,16 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2)
199 * events for the mouse pad so we filter out any subsequent 231 * events for the mouse pad so we filter out any subsequent
200 * events from the same mode key. 232 * events from the same mode key.
201 */ 233 */
202 if (ar2->mode == data[0]) 234 if (ar2->mode == mode)
203 return; 235 return;
204 236
205 if (data[1] == 0) 237 if (data[1] == 0)
206 ar2->mode = data[0]; 238 ar2->mode = mode;
207 239
208 hw_code |= data[0] << 8; 240 hw_code |= mode << 8;
209 } 241 }
210 242
211 if (!((1 << data[0]) & mode_mask)) 243 if (!((1 << mode) & mode_mask))
212 return; 244 return;
213 245
214 index = ati_remote2_lookup(hw_code); 246 index = ati_remote2_lookup(hw_code);
@@ -379,6 +411,41 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
379 } 411 }
380} 412}
381 413
414static int ati_remote2_setup(struct ati_remote2 *ar2)
415{
416 int r, i, channel;
417
418 /*
419 * Configure receiver to only accept input from remote "channel"
420 * channel == 0 -> Accept input from any remote channel
421 * channel == 1 -> Only accept input from remote channel 1
422 * channel == 2 -> Only accept input from remote channel 2
423 * ...
424 * channel == 16 -> Only accept input from remote channel 16
425 */
426
427 channel = 0;
428 for (i = 0; i < 16; i++) {
429 if ((1 << i) & channel_mask) {
430 if (!(~(1 << i) & 0xFFFF & channel_mask))
431 channel = i + 1;
432 break;
433 }
434 }
435
436 r = usb_control_msg(ar2->udev, usb_sndctrlpipe(ar2->udev, 0),
437 0x20,
438 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
439 channel, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT);
440 if (r) {
441 dev_err(&ar2->udev->dev, "%s - failed to set channel due to error: %d\n",
442 __FUNCTION__, r);
443 return r;
444 }
445
446 return 0;
447}
448
382static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id) 449static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id)
383{ 450{
384 struct usb_device *udev = interface_to_usbdev(interface); 451 struct usb_device *udev = interface_to_usbdev(interface);
@@ -409,6 +476,10 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
409 if (r) 476 if (r)
410 goto fail2; 477 goto fail2;
411 478
479 r = ati_remote2_setup(ar2);
480 if (r)
481 goto fail2;
482
412 usb_make_path(udev, ar2->phys, sizeof(ar2->phys)); 483 usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
413 strlcat(ar2->phys, "/input0", sizeof(ar2->phys)); 484 strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
414 485
diff --git a/drivers/usb/input/gtco.c b/drivers/usb/input/gtco.c
index 203cdc1bbba4..ae756e0afc99 100644
--- a/drivers/usb/input/gtco.c
+++ b/drivers/usb/input/gtco.c
@@ -1047,13 +1047,10 @@ static void gtco_disconnect(struct usb_interface *interface)
1047 1047
1048 /* Grab private device ptr */ 1048 /* Grab private device ptr */
1049 struct gtco *device = usb_get_intfdata (interface); 1049 struct gtco *device = usb_get_intfdata (interface);
1050 struct input_dev *inputdev;
1051
1052 inputdev = device->inputdevice;
1053 1050
1054 /* Now reverse all the registration stuff */ 1051 /* Now reverse all the registration stuff */
1055 if (device) { 1052 if (device) {
1056 input_unregister_device(inputdev); 1053 input_unregister_device(device->inputdevice);
1057 usb_kill_urb(device->urbinfo); 1054 usb_kill_urb(device->urbinfo);
1058 usb_free_urb(device->urbinfo); 1055 usb_free_urb(device->urbinfo);
1059 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE, 1056 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 75bfab95ab3c..77145f9db043 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -285,23 +285,24 @@ static int adu_open(struct inode *inode, struct file *file)
285 /* save device in the file's private structure */ 285 /* save device in the file's private structure */
286 file->private_data = dev; 286 file->private_data = dev;
287 287
288 /* initialize in direction */ 288 if (dev->open_count == 1) {
289 dev->read_buffer_length = 0; 289 /* initialize in direction */
290 290 dev->read_buffer_length = 0;
291 /* fixup first read by having urb waiting for it */
292 usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
293 usb_rcvintpipe(dev->udev,
294 dev->interrupt_in_endpoint->bEndpointAddress),
295 dev->interrupt_in_buffer,
296 le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
297 adu_interrupt_in_callback, dev,
298 dev->interrupt_in_endpoint->bInterval);
299 /* dev->interrupt_in_urb->transfer_flags |= URB_ASYNC_UNLINK; */
300 dev->read_urb_finished = 0;
301 usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
302 /* we ignore failure */
303 /* end of fixup for first read */
304 291
292 /* fixup first read by having urb waiting for it */
293 usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
294 usb_rcvintpipe(dev->udev,
295 dev->interrupt_in_endpoint->bEndpointAddress),
296 dev->interrupt_in_buffer,
297 le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize),
298 adu_interrupt_in_callback, dev,
299 dev->interrupt_in_endpoint->bInterval);
300 /* dev->interrupt_in_urb->transfer_flags |= URB_ASYNC_UNLINK; */
301 dev->read_urb_finished = 0;
302 retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
303 if (retval)
304 --dev->open_count;
305 }
305 up(&dev->sem); 306 up(&dev->sem);
306 307
307exit_no_device: 308exit_no_device:
@@ -469,7 +470,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
469 adu_interrupt_in_callback, 470 adu_interrupt_in_callback,
470 dev, 471 dev,
471 dev->interrupt_in_endpoint->bInterval); 472 dev->interrupt_in_endpoint->bInterval);
472 retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); 473 retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
473 if (!retval) { 474 if (!retval) {
474 spin_unlock_irqrestore(&dev->buflock, flags); 475 spin_unlock_irqrestore(&dev->buflock, flags);
475 dbg(2," %s : submitted OK", __FUNCTION__); 476 dbg(2," %s : submitted OK", __FUNCTION__);
@@ -539,7 +540,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
539 size_t bytes_written = 0; 540 size_t bytes_written = 0;
540 size_t bytes_to_write; 541 size_t bytes_to_write;
541 size_t buffer_size; 542 size_t buffer_size;
542 int retval = 0; 543 int retval;
543 int timeout = 0; 544 int timeout = 0;
544 545
545 dbg(2," %s : enter, count = %Zd", __FUNCTION__, count); 546 dbg(2," %s : enter, count = %Zd", __FUNCTION__, count);
@@ -547,7 +548,9 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
547 dev = file->private_data; 548 dev = file->private_data;
548 549
549 /* lock this object */ 550 /* lock this object */
550 down_interruptible(&dev->sem); 551 retval = down_interruptible(&dev->sem);
552 if (retval)
553 goto exit_nolock;
551 554
552 /* verify that the device wasn't unplugged */ 555 /* verify that the device wasn't unplugged */
553 if (dev->udev == NULL || dev->minor == 0) { 556 if (dev->udev == NULL || dev->minor == 0) {
@@ -575,7 +578,11 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
575 } 578 }
576 up(&dev->sem); 579 up(&dev->sem);
577 timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout); 580 timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout);
578 down_interruptible(&dev->sem); 581 retval = down_interruptible(&dev->sem);
582 if (retval) {
583 retval = bytes_written ? bytes_written : retval;
584 goto exit_nolock;
585 }
579 if (timeout > 0) { 586 if (timeout > 0) {
580 break; 587 break;
581 } 588 }
@@ -637,6 +644,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
637exit: 644exit:
638 /* unlock the device */ 645 /* unlock the device */
639 up(&dev->sem); 646 up(&dev->sem);
647exit_nolock:
640 648
641 dbg(2," %s : leave, return value %d", __FUNCTION__, retval); 649 dbg(2," %s : leave, return value %d", __FUNCTION__, retval);
642 650
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index b63b5f34b2aa..d721380b242d 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -246,11 +246,13 @@ static void cypress_disconnect(struct usb_interface *interface)
246 struct cypress *dev; 246 struct cypress *dev;
247 247
248 dev = usb_get_intfdata(interface); 248 dev = usb_get_intfdata(interface);
249 usb_set_intfdata(interface, NULL);
250 249
251 /* remove device attribute files */ 250 /* remove device attribute files */
252 device_remove_file(&interface->dev, &dev_attr_port0); 251 device_remove_file(&interface->dev, &dev_attr_port0);
253 device_remove_file(&interface->dev, &dev_attr_port1); 252 device_remove_file(&interface->dev, &dev_attr_port1);
253 /* the intfdata can be set to NULL only after the
254 * device files have been removed */
255 usb_set_intfdata(interface, NULL);
254 256
255 usb_put_dev(dev->udev); 257 usb_put_dev(dev->udev);
256 258
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index bc3327e3dd78..e2172e5cf152 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2304,7 +2304,6 @@ static int ftdi_elan_checkingPCI(struct usb_ftdi *ftdi)
2304#define OHCI_QUIRK_SUPERIO 0x02 2304#define OHCI_QUIRK_SUPERIO 0x02
2305#define OHCI_QUIRK_INITRESET 0x04 2305#define OHCI_QUIRK_INITRESET 0x04
2306#define OHCI_BIG_ENDIAN 0x08 2306#define OHCI_BIG_ENDIAN 0x08
2307#define OHCI_QUIRK_ZFMICRO 0x10
2308#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR 2307#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
2309#define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \ 2308#define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
2310 OHCI_INTR_WDH) 2309 OHCI_INTR_WDH)
@@ -2910,24 +2909,28 @@ static int __init ftdi_elan_init(void)
2910 INIT_LIST_HEAD(&ftdi_static_list); 2909 INIT_LIST_HEAD(&ftdi_static_list);
2911 status_queue = create_singlethread_workqueue("ftdi-status-control"); 2910 status_queue = create_singlethread_workqueue("ftdi-status-control");
2912 if (!status_queue) 2911 if (!status_queue)
2913 goto err1; 2912 goto err_status_queue;
2914 command_queue = create_singlethread_workqueue("ftdi-command-engine"); 2913 command_queue = create_singlethread_workqueue("ftdi-command-engine");
2915 if (!command_queue) 2914 if (!command_queue)
2916 goto err2; 2915 goto err_command_queue;
2917 respond_queue = create_singlethread_workqueue("ftdi-respond-engine"); 2916 respond_queue = create_singlethread_workqueue("ftdi-respond-engine");
2918 if (!respond_queue) 2917 if (!respond_queue)
2919 goto err3; 2918 goto err_respond_queue;
2920 result = usb_register(&ftdi_elan_driver); 2919 result = usb_register(&ftdi_elan_driver);
2921 if (result) 2920 if (result) {
2921 destroy_workqueue(status_queue);
2922 destroy_workqueue(command_queue);
2923 destroy_workqueue(respond_queue);
2922 printk(KERN_ERR "usb_register failed. Error number %d\n", 2924 printk(KERN_ERR "usb_register failed. Error number %d\n",
2923 result); 2925 result);
2926 }
2924 return result; 2927 return result;
2925 2928
2926 err3: 2929 err_respond_queue:
2927 destroy_workqueue(command_queue); 2930 destroy_workqueue(command_queue);
2928 err2: 2931 err_command_queue:
2929 destroy_workqueue(status_queue); 2932 destroy_workqueue(status_queue);
2930 err1: 2933 err_status_queue:
2931 printk(KERN_ERR "%s couldn't create workqueue\n", ftdi_elan_driver.name); 2934 printk(KERN_ERR "%s couldn't create workqueue\n", ftdi_elan_driver.name);
2932 return -ENOMEM; 2935 return -ENOMEM;
2933} 2936}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index d69665c8de02..fc51207b71b8 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -118,7 +118,7 @@ static int usb_get_report(struct usb_device *dev,
118 USB_DIR_IN | USB_TYPE_CLASS | 118 USB_DIR_IN | USB_TYPE_CLASS |
119 USB_RECIP_INTERFACE, (type << 8) + id, 119 USB_RECIP_INTERFACE, (type << 8) + id,
120 inter->desc.bInterfaceNumber, buf, size, 120 inter->desc.bInterfaceNumber, buf, size,
121 GET_TIMEOUT); 121 GET_TIMEOUT*HZ);
122} 122}
123//#endif 123//#endif
124 124
@@ -133,7 +133,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
133 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 133 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
134 (type << 8) + id, 134 (type << 8) + id,
135 intf->cur_altsetting->desc.bInterfaceNumber, buf, 135 intf->cur_altsetting->desc.bInterfaceNumber, buf,
136 size, 1); 136 size, HZ);
137} 137}
138 138
139/*---------------------*/ 139/*---------------------*/
@@ -417,14 +417,14 @@ static ssize_t iowarrior_write(struct file *file,
417 if (!int_out_urb) { 417 if (!int_out_urb) {
418 retval = -ENOMEM; 418 retval = -ENOMEM;
419 dbg("%s Unable to allocate urb ", __func__); 419 dbg("%s Unable to allocate urb ", __func__);
420 goto error; 420 goto error_no_urb;
421 } 421 }
422 buf = usb_buffer_alloc(dev->udev, dev->report_size, 422 buf = usb_buffer_alloc(dev->udev, dev->report_size,
423 GFP_KERNEL, &int_out_urb->transfer_dma); 423 GFP_KERNEL, &int_out_urb->transfer_dma);
424 if (!buf) { 424 if (!buf) {
425 retval = -ENOMEM; 425 retval = -ENOMEM;
426 dbg("%s Unable to allocate buffer ", __func__); 426 dbg("%s Unable to allocate buffer ", __func__);
427 goto error; 427 goto error_no_buffer;
428 } 428 }
429 usb_fill_int_urb(int_out_urb, dev->udev, 429 usb_fill_int_urb(int_out_urb, dev->udev,
430 usb_sndintpipe(dev->udev, 430 usb_sndintpipe(dev->udev,
@@ -459,7 +459,9 @@ static ssize_t iowarrior_write(struct file *file,
459error: 459error:
460 usb_buffer_free(dev->udev, dev->report_size, buf, 460 usb_buffer_free(dev->udev, dev->report_size, buf,
461 int_out_urb->transfer_dma); 461 int_out_urb->transfer_dma);
462error_no_buffer:
462 usb_free_urb(int_out_urb); 463 usb_free_urb(int_out_urb);
464error_no_urb:
463 atomic_dec(&dev->write_busy); 465 atomic_dec(&dev->write_busy);
464 wake_up_interruptible(&dev->write_wait); 466 wake_up_interruptible(&dev->write_wait);
465exit: 467exit:
@@ -748,7 +750,6 @@ static int iowarrior_probe(struct usb_interface *interface,
748 struct usb_endpoint_descriptor *endpoint; 750 struct usb_endpoint_descriptor *endpoint;
749 int i; 751 int i;
750 int retval = -ENOMEM; 752 int retval = -ENOMEM;
751 int idele = 0;
752 753
753 /* allocate memory for our device state and intialize it */ 754 /* allocate memory for our device state and intialize it */
754 dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL); 755 dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
@@ -824,11 +825,10 @@ static int iowarrior_probe(struct usb_interface *interface,
824 825
825 /* Set the idle timeout to 0, if this is interface 0 */ 826 /* Set the idle timeout to 0, if this is interface 0 */
826 if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { 827 if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
827 idele = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 828 usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
828 0x0A, 829 0x0A,
829 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 830 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
830 0, NULL, 0, USB_CTRL_SET_TIMEOUT); 831 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
831 dbg("idele = %d", idele);
832 } 832 }
833 /* allow device read and ioctl */ 833 /* allow device read and ioctl */
834 dev->present = 1; 834 dev->present = 1;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 788a11e6772f..11555bde655b 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -62,6 +62,8 @@
62#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 62#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
63#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 63#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
64 64
65#define USB_VENDOR_ID_MICROCHIP 0x04d8
66#define USB_DEVICE_ID_PICDEM 0x000c
65 67
66#ifdef CONFIG_USB_DYNAMIC_MINORS 68#ifdef CONFIG_USB_DYNAMIC_MINORS
67#define USB_LD_MINOR_BASE 0 69#define USB_LD_MINOR_BASE 0
@@ -89,6 +91,7 @@ static struct usb_device_id ld_usb_table [] = {
89 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, 91 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
90 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, 92 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
91 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, 93 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
94 { USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICDEM) },
92 { } /* Terminating entry */ 95 { } /* Terminating entry */
93}; 96};
94MODULE_DEVICE_TABLE(usb, ld_usb_table); 97MODULE_DEVICE_TABLE(usb, ld_usb_table);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index ada2ebc464ae..887ef953f3d8 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -47,6 +47,7 @@ struct usb_lcd {
47#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref) 47#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
48 48
49static struct usb_driver lcd_driver; 49static struct usb_driver lcd_driver;
50static DEFINE_MUTEX(usb_lcd_open_mutex);
50 51
51 52
52static void lcd_delete(struct kref *kref) 53static void lcd_delete(struct kref *kref)
@@ -68,6 +69,7 @@ static int lcd_open(struct inode *inode, struct file *file)
68 69
69 subminor = iminor(inode); 70 subminor = iminor(inode);
70 71
72 mutex_lock(&usb_lcd_open_mutex);
71 interface = usb_find_interface(&lcd_driver, subminor); 73 interface = usb_find_interface(&lcd_driver, subminor);
72 if (!interface) { 74 if (!interface) {
73 err ("USBLCD: %s - error, can't find device for minor %d", 75 err ("USBLCD: %s - error, can't find device for minor %d",
@@ -89,6 +91,7 @@ static int lcd_open(struct inode *inode, struct file *file)
89 file->private_data = dev; 91 file->private_data = dev;
90 92
91exit: 93exit:
94 mutex_unlock(&usb_lcd_open_mutex);
92 return retval; 95 return retval;
93} 96}
94 97
@@ -347,7 +350,7 @@ static void lcd_disconnect(struct usb_interface *interface)
347 int minor = interface->minor; 350 int minor = interface->minor;
348 351
349 /* prevent skel_open() from racing skel_disconnect() */ 352 /* prevent skel_open() from racing skel_disconnect() */
350 lock_kernel(); 353 mutex_lock(&usb_lcd_open_mutex);
351 354
352 dev = usb_get_intfdata(interface); 355 dev = usb_get_intfdata(interface);
353 usb_set_intfdata(interface, NULL); 356 usb_set_intfdata(interface, NULL);
@@ -355,7 +358,7 @@ static void lcd_disconnect(struct usb_interface *interface)
355 /* give back our minor */ 358 /* give back our minor */
356 usb_deregister_dev(interface, &lcd_class); 359 usb_deregister_dev(interface, &lcd_class);
357 360
358 unlock_kernel(); 361 mutex_unlock(&usb_lcd_open_mutex);
359 362
360 /* decrement our usage count */ 363 /* decrement our usage count */
361 kref_put(&dev->kref, lcd_delete); 364 kref_put(&dev->kref, lcd_delete);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index b2bedd974ac3..0af11a66207c 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -356,8 +356,10 @@ static inline char mon_bin_get_setup(unsigned char *setupb,
356 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') 356 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
357 return '-'; 357 return '-';
358 358
359 if (urb->transfer_flags & URB_NO_SETUP_DMA_MAP) 359 if (urb->dev->bus->uses_dma &&
360 (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
360 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN); 361 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN);
362 }
361 if (urb->setup_packet == NULL) 363 if (urb->setup_packet == NULL)
362 return 'Z'; 364 return 'Z';
363 365
@@ -369,7 +371,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp,
369 unsigned int offset, struct urb *urb, unsigned int length) 371 unsigned int offset, struct urb *urb, unsigned int length)
370{ 372{
371 373
372 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) { 374 if (urb->dev->bus->uses_dma &&
375 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
373 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); 376 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
374 return 0; 377 return 0;
375 } 378 }
@@ -440,7 +443,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
440 /* We use the fact that usb_pipein() returns 0x80 */ 443 /* We use the fact that usb_pipein() returns 0x80 */
441 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 444 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
442 ep->devnum = usb_pipedevice(urb->pipe); 445 ep->devnum = usb_pipedevice(urb->pipe);
443 ep->busnum = rp->r.m_bus->u_bus->busnum; 446 ep->busnum = urb->dev->bus->busnum;
444 ep->id = (unsigned long) urb; 447 ep->id = (unsigned long) urb;
445 ep->ts_sec = ts.tv_sec; 448 ep->ts_sec = ts.tv_sec;
446 ep->ts_usec = ts.tv_usec; 449 ep->ts_usec = ts.tv_usec;
@@ -500,7 +503,7 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
500 /* We use the fact that usb_pipein() returns 0x80 */ 503 /* We use the fact that usb_pipein() returns 0x80 */
501 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe); 504 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
502 ep->devnum = usb_pipedevice(urb->pipe); 505 ep->devnum = usb_pipedevice(urb->pipe);
503 ep->busnum = rp->r.m_bus->u_bus->busnum; 506 ep->busnum = urb->dev->bus->busnum;
504 ep->id = (unsigned long) urb; 507 ep->id = (unsigned long) urb;
505 ep->status = error; 508 ep->status = error;
506 509
@@ -515,7 +518,6 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
515static int mon_bin_open(struct inode *inode, struct file *file) 518static int mon_bin_open(struct inode *inode, struct file *file)
516{ 519{
517 struct mon_bus *mbus; 520 struct mon_bus *mbus;
518 struct usb_bus *ubus;
519 struct mon_reader_bin *rp; 521 struct mon_reader_bin *rp;
520 size_t size; 522 size_t size;
521 int rc; 523 int rc;
@@ -525,7 +527,7 @@ static int mon_bin_open(struct inode *inode, struct file *file)
525 mutex_unlock(&mon_lock); 527 mutex_unlock(&mon_lock);
526 return -ENODEV; 528 return -ENODEV;
527 } 529 }
528 if ((ubus = mbus->u_bus) == NULL) { 530 if (mbus != &mon_bus0 && mbus->u_bus == NULL) {
529 printk(KERN_ERR TAG ": consistency error on open\n"); 531 printk(KERN_ERR TAG ": consistency error on open\n");
530 mutex_unlock(&mon_lock); 532 mutex_unlock(&mon_lock);
531 return -ENODEV; 533 return -ENODEV;
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index c9739e7b35e5..8a1df2c9c73e 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -16,8 +16,6 @@
16#include "usb_mon.h" 16#include "usb_mon.h"
17#include "../core/hcd.h" 17#include "../core/hcd.h"
18 18
19static void mon_submit(struct usb_bus *ubus, struct urb *urb);
20static void mon_complete(struct usb_bus *ubus, struct urb *urb);
21static void mon_stop(struct mon_bus *mbus); 19static void mon_stop(struct mon_bus *mbus);
22static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); 20static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
23static void mon_bus_drop(struct kref *r); 21static void mon_bus_drop(struct kref *r);
@@ -25,6 +23,7 @@ static void mon_bus_init(struct usb_bus *ubus);
25 23
26DEFINE_MUTEX(mon_lock); 24DEFINE_MUTEX(mon_lock);
27 25
26struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */
28static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ 27static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
29 28
30/* 29/*
@@ -35,22 +34,19 @@ static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
35void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r) 34void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r)
36{ 35{
37 unsigned long flags; 36 unsigned long flags;
38 struct usb_bus *ubus; 37 struct list_head *p;
39 38
40 spin_lock_irqsave(&mbus->lock, flags); 39 spin_lock_irqsave(&mbus->lock, flags);
41 if (mbus->nreaders == 0) { 40 if (mbus->nreaders == 0) {
42 ubus = mbus->u_bus; 41 if (mbus == &mon_bus0) {
43 if (ubus->monitored) { 42 list_for_each (p, &mon_buses) {
44 /* 43 struct mon_bus *m1;
45 * Something is really broken, refuse to go on and 44 m1 = list_entry(p, struct mon_bus, bus_link);
46 * possibly corrupt ops pointers or worse. 45 m1->u_bus->monitored = 1;
47 */ 46 }
48 printk(KERN_ERR TAG ": bus %d is already monitored\n", 47 } else {
49 ubus->busnum); 48 mbus->u_bus->monitored = 1;
50 spin_unlock_irqrestore(&mbus->lock, flags);
51 return;
52 } 49 }
53 ubus->monitored = 1;
54 } 50 }
55 mbus->nreaders++; 51 mbus->nreaders++;
56 list_add_tail(&r->r_link, &mbus->r_list); 52 list_add_tail(&r->r_link, &mbus->r_list);
@@ -80,77 +76,79 @@ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r)
80 76
81/* 77/*
82 */ 78 */
83static void mon_submit(struct usb_bus *ubus, struct urb *urb) 79static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb)
84{ 80{
85 struct mon_bus *mbus;
86 unsigned long flags; 81 unsigned long flags;
87 struct list_head *pos; 82 struct list_head *pos;
88 struct mon_reader *r; 83 struct mon_reader *r;
89 84
90 mbus = ubus->mon_bus;
91 if (mbus == NULL)
92 goto out_unlocked;
93
94 spin_lock_irqsave(&mbus->lock, flags); 85 spin_lock_irqsave(&mbus->lock, flags);
95 if (mbus->nreaders == 0)
96 goto out_locked;
97
98 mbus->cnt_events++; 86 mbus->cnt_events++;
99 list_for_each (pos, &mbus->r_list) { 87 list_for_each (pos, &mbus->r_list) {
100 r = list_entry(pos, struct mon_reader, r_link); 88 r = list_entry(pos, struct mon_reader, r_link);
101 r->rnf_submit(r->r_data, urb); 89 r->rnf_submit(r->r_data, urb);
102 } 90 }
103
104 spin_unlock_irqrestore(&mbus->lock, flags); 91 spin_unlock_irqrestore(&mbus->lock, flags);
105 return; 92 return;
93}
106 94
107out_locked: 95static void mon_submit(struct usb_bus *ubus, struct urb *urb)
108 spin_unlock_irqrestore(&mbus->lock, flags); 96{
109out_unlocked: 97 struct mon_bus *mbus;
110 return; 98
99 if ((mbus = ubus->mon_bus) != NULL)
100 mon_bus_submit(mbus, urb);
101 mon_bus_submit(&mon_bus0, urb);
111} 102}
112 103
113/* 104/*
114 */ 105 */
115static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error) 106static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error)
116{ 107{
117 struct mon_bus *mbus;
118 unsigned long flags; 108 unsigned long flags;
119 struct list_head *pos; 109 struct list_head *pos;
120 struct mon_reader *r; 110 struct mon_reader *r;
121 111
122 mbus = ubus->mon_bus;
123 if (mbus == NULL)
124 goto out_unlocked;
125
126 spin_lock_irqsave(&mbus->lock, flags); 112 spin_lock_irqsave(&mbus->lock, flags);
127 if (mbus->nreaders == 0)
128 goto out_locked;
129
130 mbus->cnt_events++; 113 mbus->cnt_events++;
131 list_for_each (pos, &mbus->r_list) { 114 list_for_each (pos, &mbus->r_list) {
132 r = list_entry(pos, struct mon_reader, r_link); 115 r = list_entry(pos, struct mon_reader, r_link);
133 r->rnf_error(r->r_data, urb, error); 116 r->rnf_error(r->r_data, urb, error);
134 } 117 }
135
136 spin_unlock_irqrestore(&mbus->lock, flags); 118 spin_unlock_irqrestore(&mbus->lock, flags);
137 return; 119 return;
120}
138 121
139out_locked: 122static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
140 spin_unlock_irqrestore(&mbus->lock, flags); 123{
141out_unlocked: 124 struct mon_bus *mbus;
142 return; 125
126 if ((mbus = ubus->mon_bus) != NULL)
127 mon_bus_submit_error(mbus, urb, error);
128 mon_bus_submit_error(&mon_bus0, urb, error);
143} 129}
144 130
145/* 131/*
146 */ 132 */
147static void mon_complete(struct usb_bus *ubus, struct urb *urb) 133static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb)
148{ 134{
149 struct mon_bus *mbus;
150 unsigned long flags; 135 unsigned long flags;
151 struct list_head *pos; 136 struct list_head *pos;
152 struct mon_reader *r; 137 struct mon_reader *r;
153 138
139 spin_lock_irqsave(&mbus->lock, flags);
140 mbus->cnt_events++;
141 list_for_each (pos, &mbus->r_list) {
142 r = list_entry(pos, struct mon_reader, r_link);
143 r->rnf_complete(r->r_data, urb);
144 }
145 spin_unlock_irqrestore(&mbus->lock, flags);
146}
147
148static void mon_complete(struct usb_bus *ubus, struct urb *urb)
149{
150 struct mon_bus *mbus;
151
154 mbus = ubus->mon_bus; 152 mbus = ubus->mon_bus;
155 if (mbus == NULL) { 153 if (mbus == NULL) {
156 /* 154 /*
@@ -162,13 +160,8 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb)
162 return; 160 return;
163 } 161 }
164 162
165 spin_lock_irqsave(&mbus->lock, flags); 163 mon_bus_complete(mbus, urb);
166 mbus->cnt_events++; 164 mon_bus_complete(&mon_bus0, urb);
167 list_for_each (pos, &mbus->r_list) {
168 r = list_entry(pos, struct mon_reader, r_link);
169 r->rnf_complete(r->r_data, urb);
170 }
171 spin_unlock_irqrestore(&mbus->lock, flags);
172} 165}
173 166
174/* int (*unlink_urb) (struct urb *urb, int status); */ 167/* int (*unlink_urb) (struct urb *urb, int status); */
@@ -179,14 +172,26 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb)
179static void mon_stop(struct mon_bus *mbus) 172static void mon_stop(struct mon_bus *mbus)
180{ 173{
181 struct usb_bus *ubus = mbus->u_bus; 174 struct usb_bus *ubus = mbus->u_bus;
175 struct list_head *p;
182 176
183 /* 177 if (mbus == &mon_bus0) {
184 * A stop can be called for a dissolved mon_bus in case of 178 list_for_each (p, &mon_buses) {
185 * a reader staying across an rmmod foo_hcd. 179 mbus = list_entry(p, struct mon_bus, bus_link);
186 */ 180 /*
187 if (ubus != NULL) { 181 * We do not change nreaders here, so rely on mon_lock.
188 ubus->monitored = 0; 182 */
189 mb(); 183 if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL)
184 ubus->monitored = 0;
185 }
186 } else {
187 /*
188 * A stop can be called for a dissolved mon_bus in case of
189 * a reader staying across an rmmod foo_hcd, so test ->u_bus.
190 */
191 if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) {
192 ubus->monitored = 0;
193 mb();
194 }
190 } 195 }
191} 196}
192 197
@@ -199,6 +204,10 @@ static void mon_stop(struct mon_bus *mbus)
199static void mon_bus_add(struct usb_bus *ubus) 204static void mon_bus_add(struct usb_bus *ubus)
200{ 205{
201 mon_bus_init(ubus); 206 mon_bus_init(ubus);
207 mutex_lock(&mon_lock);
208 if (mon_bus0.nreaders != 0)
209 ubus->monitored = 1;
210 mutex_unlock(&mon_lock);
202} 211}
203 212
204/* 213/*
@@ -250,12 +259,7 @@ static struct usb_mon_operations mon_ops_0 = {
250static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus) 259static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus)
251{ 260{
252 261
253 /*
254 * Never happens, but...
255 */
256 if (ubus->monitored) { 262 if (ubus->monitored) {
257 printk(KERN_ERR TAG ": bus %d is dissolved while monitored\n",
258 ubus->busnum);
259 ubus->monitored = 0; 263 ubus->monitored = 0;
260 mb(); 264 mb();
261 } 265 }
@@ -263,6 +267,8 @@ static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus)
263 ubus->mon_bus = NULL; 267 ubus->mon_bus = NULL;
264 mbus->u_bus = NULL; 268 mbus->u_bus = NULL;
265 mb(); 269 mb();
270
271 /* We want synchronize_irq() here, but that needs an argument. */
266} 272}
267 273
268/* 274/*
@@ -295,9 +301,8 @@ static void mon_bus_init(struct usb_bus *ubus)
295 */ 301 */
296 mbus->u_bus = ubus; 302 mbus->u_bus = ubus;
297 ubus->mon_bus = mbus; 303 ubus->mon_bus = mbus;
298 mbus->uses_dma = ubus->uses_dma;
299 304
300 mbus->text_inited = mon_text_add(mbus, ubus); 305 mbus->text_inited = mon_text_add(mbus, ubus->busnum);
301 // mon_bin_add(...) 306 // mon_bin_add(...)
302 307
303 mutex_lock(&mon_lock); 308 mutex_lock(&mon_lock);
@@ -309,6 +314,18 @@ err_alloc:
309 return; 314 return;
310} 315}
311 316
317static void mon_bus0_init(void)
318{
319 struct mon_bus *mbus = &mon_bus0;
320
321 kref_init(&mbus->ref);
322 spin_lock_init(&mbus->lock);
323 INIT_LIST_HEAD(&mbus->r_list);
324
325 mbus->text_inited = mon_text_add(mbus, 0);
326 // mbus->bin_inited = mon_bin_add(mbus, 0);
327}
328
312/* 329/*
313 * Search a USB bus by number. Notice that USB bus numbers start from one, 330 * Search a USB bus by number. Notice that USB bus numbers start from one,
314 * which we may later use to identify "all" with zero. 331 * which we may later use to identify "all" with zero.
@@ -322,6 +339,9 @@ struct mon_bus *mon_bus_lookup(unsigned int num)
322 struct list_head *p; 339 struct list_head *p;
323 struct mon_bus *mbus; 340 struct mon_bus *mbus;
324 341
342 if (num == 0) {
343 return &mon_bus0;
344 }
325 list_for_each (p, &mon_buses) { 345 list_for_each (p, &mon_buses) {
326 mbus = list_entry(p, struct mon_bus, bus_link); 346 mbus = list_entry(p, struct mon_bus, bus_link);
327 if (mbus->u_bus->busnum == num) { 347 if (mbus->u_bus->busnum == num) {
@@ -341,6 +361,8 @@ static int __init mon_init(void)
341 if ((rc = mon_bin_init()) != 0) 361 if ((rc = mon_bin_init()) != 0)
342 goto err_bin; 362 goto err_bin;
343 363
364 mon_bus0_init();
365
344 if (usb_mon_register(&mon_ops_0) != 0) { 366 if (usb_mon_register(&mon_ops_0) != 0) {
345 printk(KERN_NOTICE TAG ": unable to register with the core\n"); 367 printk(KERN_NOTICE TAG ": unable to register with the core\n");
346 rc = -ENODEV; 368 rc = -ENODEV;
@@ -374,6 +396,7 @@ static void __exit mon_exit(void)
374 usb_mon_deregister(); 396 usb_mon_deregister();
375 397
376 mutex_lock(&mon_lock); 398 mutex_lock(&mon_lock);
399
377 while (!list_empty(&mon_buses)) { 400 while (!list_empty(&mon_buses)) {
378 p = mon_buses.next; 401 p = mon_buses.next;
379 mbus = list_entry(p, struct mon_bus, bus_link); 402 mbus = list_entry(p, struct mon_bus, bus_link);
@@ -397,6 +420,11 @@ static void __exit mon_exit(void)
397 mon_dissolve(mbus, mbus->u_bus); 420 mon_dissolve(mbus, mbus->u_bus);
398 kref_put(&mbus->ref, mon_bus_drop); 421 kref_put(&mbus->ref, mon_bus_drop);
399 } 422 }
423
424 mbus = &mon_bus0;
425 if (mbus->text_inited)
426 mon_text_del(mbus);
427
400 mutex_unlock(&mon_lock); 428 mutex_unlock(&mon_lock);
401 429
402 mon_text_exit(); 430 mon_text_exit();
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 494ee3b9a226..ec0cc51e39ac 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -31,9 +31,21 @@
31 * to a local DoS. But we have to keep to root in order to prevent 31 * to a local DoS. But we have to keep to root in order to prevent
32 * password sniffing from HID devices. 32 * password sniffing from HID devices.
33 */ 33 */
34#define EVENT_MAX (2*PAGE_SIZE / sizeof(struct mon_event_text)) 34#define EVENT_MAX (4*PAGE_SIZE / sizeof(struct mon_event_text))
35 35
36#define PRINTF_DFL 160 36/*
37 * Potentially unlimited number; we limit it for similar allocations.
38 * The usbfs limits this to 128, but we're not quite as generous.
39 */
40#define ISODESC_MAX 5
41
42#define PRINTF_DFL 250 /* with 5 ISOs segs */
43
44struct mon_iso_desc {
45 int status;
46 unsigned int offset;
47 unsigned int length; /* Unsigned here, signed in URB. Historic. */
48};
37 49
38struct mon_event_text { 50struct mon_event_text {
39 struct list_head e_link; 51 struct list_head e_link;
@@ -41,10 +53,16 @@ struct mon_event_text {
41 unsigned int pipe; /* Pipe */ 53 unsigned int pipe; /* Pipe */
42 unsigned long id; /* From pointer, most of the time */ 54 unsigned long id; /* From pointer, most of the time */
43 unsigned int tstamp; 55 unsigned int tstamp;
56 int busnum;
44 int length; /* Depends on type: xfer length or act length */ 57 int length; /* Depends on type: xfer length or act length */
45 int status; 58 int status;
59 int interval;
60 int start_frame;
61 int error_count;
46 char setup_flag; 62 char setup_flag;
47 char data_flag; 63 char data_flag;
64 int numdesc; /* Full number */
65 struct mon_iso_desc isodesc[ISODESC_MAX];
48 unsigned char setup[SETUP_MAX]; 66 unsigned char setup[SETUP_MAX];
49 unsigned char data[DATA_MAX]; 67 unsigned char data[DATA_MAX];
50}; 68};
@@ -68,6 +86,28 @@ static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
68 86
69static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); 87static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
70 88
89struct mon_text_ptr {
90 int cnt, limit;
91 char *pbuf;
92};
93
94static struct mon_event_text *
95 mon_text_read_wait(struct mon_reader_text *rp, struct file *file);
96static void mon_text_read_head_t(struct mon_reader_text *rp,
97 struct mon_text_ptr *p, const struct mon_event_text *ep);
98static void mon_text_read_head_u(struct mon_reader_text *rp,
99 struct mon_text_ptr *p, const struct mon_event_text *ep);
100static void mon_text_read_statset(struct mon_reader_text *rp,
101 struct mon_text_ptr *p, const struct mon_event_text *ep);
102static void mon_text_read_intstat(struct mon_reader_text *rp,
103 struct mon_text_ptr *p, const struct mon_event_text *ep);
104static void mon_text_read_isostat(struct mon_reader_text *rp,
105 struct mon_text_ptr *p, const struct mon_event_text *ep);
106static void mon_text_read_isodesc(struct mon_reader_text *rp,
107 struct mon_text_ptr *p, const struct mon_event_text *ep);
108static void mon_text_read_data(struct mon_reader_text *rp,
109 struct mon_text_ptr *p, const struct mon_event_text *ep);
110
71/* 111/*
72 * mon_text_submit 112 * mon_text_submit
73 * mon_text_complete 113 * mon_text_complete
@@ -84,8 +124,10 @@ static inline char mon_text_get_setup(struct mon_event_text *ep,
84 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S') 124 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
85 return '-'; 125 return '-';
86 126
87 if (mbus->uses_dma && (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 127 if (urb->dev->bus->uses_dma &&
128 (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
88 return mon_dmapeek(ep->setup, urb->setup_dma, SETUP_MAX); 129 return mon_dmapeek(ep->setup, urb->setup_dma, SETUP_MAX);
130 }
89 if (urb->setup_packet == NULL) 131 if (urb->setup_packet == NULL)
90 return 'Z'; /* '0' would be not as pretty. */ 132 return 'Z'; /* '0' would be not as pretty. */
91 133
@@ -104,10 +146,10 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
104 len = DATA_MAX; 146 len = DATA_MAX;
105 147
106 if (usb_pipein(pipe)) { 148 if (usb_pipein(pipe)) {
107 if (ev_type == 'S') 149 if (ev_type != 'C')
108 return '<'; 150 return '<';
109 } else { 151 } else {
110 if (ev_type == 'C') 152 if (ev_type != 'S')
111 return '>'; 153 return '>';
112 } 154 }
113 155
@@ -120,8 +162,10 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
120 * contain non-NULL garbage in case the upper level promised to 162 * contain non-NULL garbage in case the upper level promised to
121 * set DMA for the HCD. 163 * set DMA for the HCD.
122 */ 164 */
123 if (mbus->uses_dma && (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) 165 if (urb->dev->bus->uses_dma &&
166 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
124 return mon_dmapeek(ep->data, urb->transfer_dma, len); 167 return mon_dmapeek(ep->data, urb->transfer_dma, len);
168 }
125 169
126 if (urb->transfer_buffer == NULL) 170 if (urb->transfer_buffer == NULL)
127 return 'Z'; /* '0' would be not as pretty. */ 171 return 'Z'; /* '0' would be not as pretty. */
@@ -146,6 +190,9 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
146{ 190{
147 struct mon_event_text *ep; 191 struct mon_event_text *ep;
148 unsigned int stamp; 192 unsigned int stamp;
193 struct usb_iso_packet_descriptor *fp;
194 struct mon_iso_desc *dp;
195 int i, ndesc;
149 196
150 stamp = mon_get_timestamp(); 197 stamp = mon_get_timestamp();
151 198
@@ -158,12 +205,36 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
158 ep->type = ev_type; 205 ep->type = ev_type;
159 ep->pipe = urb->pipe; 206 ep->pipe = urb->pipe;
160 ep->id = (unsigned long) urb; 207 ep->id = (unsigned long) urb;
208 ep->busnum = urb->dev->bus->busnum;
161 ep->tstamp = stamp; 209 ep->tstamp = stamp;
162 ep->length = (ev_type == 'S') ? 210 ep->length = (ev_type == 'S') ?
163 urb->transfer_buffer_length : urb->actual_length; 211 urb->transfer_buffer_length : urb->actual_length;
164 /* Collecting status makes debugging sense for submits, too */ 212 /* Collecting status makes debugging sense for submits, too */
165 ep->status = urb->status; 213 ep->status = urb->status;
166 214
215 if (usb_pipeint(urb->pipe)) {
216 ep->interval = urb->interval;
217 } else if (usb_pipeisoc(urb->pipe)) {
218 ep->interval = urb->interval;
219 ep->start_frame = urb->start_frame;
220 ep->error_count = urb->error_count;
221 }
222 ep->numdesc = urb->number_of_packets;
223 if (usb_pipeisoc(urb->pipe) && urb->number_of_packets > 0) {
224 if ((ndesc = urb->number_of_packets) > ISODESC_MAX)
225 ndesc = ISODESC_MAX;
226 fp = urb->iso_frame_desc;
227 dp = ep->isodesc;
228 for (i = 0; i < ndesc; i++) {
229 dp->status = fp->status;
230 dp->offset = fp->offset;
231 dp->length = (ev_type == 'S') ?
232 fp->length : fp->actual_length;
233 fp++;
234 dp++;
235 }
236 }
237
167 ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus); 238 ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus);
168 ep->data_flag = mon_text_get_data(ep, urb, ep->length, ev_type, 239 ep->data_flag = mon_text_get_data(ep, urb, ep->length, ev_type,
169 rp->r.m_bus); 240 rp->r.m_bus);
@@ -199,6 +270,7 @@ static void mon_text_error(void *data, struct urb *urb, int error)
199 ep->type = 'E'; 270 ep->type = 'E';
200 ep->pipe = urb->pipe; 271 ep->pipe = urb->pipe;
201 ep->id = (unsigned long) urb; 272 ep->id = (unsigned long) urb;
273 ep->busnum = 0;
202 ep->tstamp = 0; 274 ep->tstamp = 0;
203 ep->length = 0; 275 ep->length = 0;
204 ep->status = error; 276 ep->status = error;
@@ -237,13 +309,11 @@ static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp,
237static int mon_text_open(struct inode *inode, struct file *file) 309static int mon_text_open(struct inode *inode, struct file *file)
238{ 310{
239 struct mon_bus *mbus; 311 struct mon_bus *mbus;
240 struct usb_bus *ubus;
241 struct mon_reader_text *rp; 312 struct mon_reader_text *rp;
242 int rc; 313 int rc;
243 314
244 mutex_lock(&mon_lock); 315 mutex_lock(&mon_lock);
245 mbus = inode->i_private; 316 mbus = inode->i_private;
246 ubus = mbus->u_bus;
247 317
248 rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); 318 rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL);
249 if (rp == NULL) { 319 if (rp == NULL) {
@@ -267,8 +337,7 @@ static int mon_text_open(struct inode *inode, struct file *file)
267 rp->r.rnf_error = mon_text_error; 337 rp->r.rnf_error = mon_text_error;
268 rp->r.rnf_complete = mon_text_complete; 338 rp->r.rnf_complete = mon_text_complete;
269 339
270 snprintf(rp->slab_name, SLAB_NAME_SZ, "mon%dt_%lx", ubus->busnum, 340 snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
271 (long)rp);
272 rp->e_slab = kmem_cache_create(rp->slab_name, 341 rp->e_slab = kmem_cache_create(rp->slab_name,
273 sizeof(struct mon_event_text), sizeof(long), 0, 342 sizeof(struct mon_event_text), sizeof(long), 0,
274 mon_text_ctor, NULL); 343 mon_text_ctor, NULL);
@@ -300,17 +369,75 @@ err_alloc:
300 * dd if=/dbg/usbmon/0t bs=10 369 * dd if=/dbg/usbmon/0t bs=10
301 * Also, we do not allow seeks and do not bother advancing the offset. 370 * Also, we do not allow seeks and do not bother advancing the offset.
302 */ 371 */
303static ssize_t mon_text_read(struct file *file, char __user *buf, 372static ssize_t mon_text_read_t(struct file *file, char __user *buf,
304 size_t nbytes, loff_t *ppos) 373 size_t nbytes, loff_t *ppos)
305{ 374{
306 struct mon_reader_text *rp = file->private_data; 375 struct mon_reader_text *rp = file->private_data;
376 struct mon_event_text *ep;
377 struct mon_text_ptr ptr;
378
379 if (IS_ERR(ep = mon_text_read_wait(rp, file)))
380 return PTR_ERR(ep);
381 mutex_lock(&rp->printf_lock);
382 ptr.cnt = 0;
383 ptr.pbuf = rp->printf_buf;
384 ptr.limit = rp->printf_size;
385
386 mon_text_read_head_t(rp, &ptr, ep);
387 mon_text_read_statset(rp, &ptr, ep);
388 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
389 " %d", ep->length);
390 mon_text_read_data(rp, &ptr, ep);
391
392 if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
393 ptr.cnt = -EFAULT;
394 mutex_unlock(&rp->printf_lock);
395 kmem_cache_free(rp->e_slab, ep);
396 return ptr.cnt;
397}
398
399static ssize_t mon_text_read_u(struct file *file, char __user *buf,
400 size_t nbytes, loff_t *ppos)
401{
402 struct mon_reader_text *rp = file->private_data;
403 struct mon_event_text *ep;
404 struct mon_text_ptr ptr;
405
406 if (IS_ERR(ep = mon_text_read_wait(rp, file)))
407 return PTR_ERR(ep);
408 mutex_lock(&rp->printf_lock);
409 ptr.cnt = 0;
410 ptr.pbuf = rp->printf_buf;
411 ptr.limit = rp->printf_size;
412
413 mon_text_read_head_u(rp, &ptr, ep);
414 if (ep->type == 'E') {
415 mon_text_read_statset(rp, &ptr, ep);
416 } else if (usb_pipeisoc(ep->pipe)) {
417 mon_text_read_isostat(rp, &ptr, ep);
418 mon_text_read_isodesc(rp, &ptr, ep);
419 } else if (usb_pipeint(ep->pipe)) {
420 mon_text_read_intstat(rp, &ptr, ep);
421 } else {
422 mon_text_read_statset(rp, &ptr, ep);
423 }
424 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
425 " %d", ep->length);
426 mon_text_read_data(rp, &ptr, ep);
427
428 if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
429 ptr.cnt = -EFAULT;
430 mutex_unlock(&rp->printf_lock);
431 kmem_cache_free(rp->e_slab, ep);
432 return ptr.cnt;
433}
434
435static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
436 struct file *file)
437{
307 struct mon_bus *mbus = rp->r.m_bus; 438 struct mon_bus *mbus = rp->r.m_bus;
308 DECLARE_WAITQUEUE(waita, current); 439 DECLARE_WAITQUEUE(waita, current);
309 struct mon_event_text *ep; 440 struct mon_event_text *ep;
310 int cnt, limit;
311 char *pbuf;
312 char udir, utype;
313 int data_len, i;
314 441
315 add_wait_queue(&rp->wait, &waita); 442 add_wait_queue(&rp->wait, &waita);
316 set_current_state(TASK_INTERRUPTIBLE); 443 set_current_state(TASK_INTERRUPTIBLE);
@@ -318,7 +445,7 @@ static ssize_t mon_text_read(struct file *file, char __user *buf,
318 if (file->f_flags & O_NONBLOCK) { 445 if (file->f_flags & O_NONBLOCK) {
319 set_current_state(TASK_RUNNING); 446 set_current_state(TASK_RUNNING);
320 remove_wait_queue(&rp->wait, &waita); 447 remove_wait_queue(&rp->wait, &waita);
321 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 448 return ERR_PTR(-EWOULDBLOCK);
322 } 449 }
323 /* 450 /*
324 * We do not count nwaiters, because ->release is supposed 451 * We do not count nwaiters, because ->release is supposed
@@ -327,17 +454,19 @@ static ssize_t mon_text_read(struct file *file, char __user *buf,
327 schedule(); 454 schedule();
328 if (signal_pending(current)) { 455 if (signal_pending(current)) {
329 remove_wait_queue(&rp->wait, &waita); 456 remove_wait_queue(&rp->wait, &waita);
330 return -EINTR; 457 return ERR_PTR(-EINTR);
331 } 458 }
332 set_current_state(TASK_INTERRUPTIBLE); 459 set_current_state(TASK_INTERRUPTIBLE);
333 } 460 }
334 set_current_state(TASK_RUNNING); 461 set_current_state(TASK_RUNNING);
335 remove_wait_queue(&rp->wait, &waita); 462 remove_wait_queue(&rp->wait, &waita);
463 return ep;
464}
336 465
337 mutex_lock(&rp->printf_lock); 466static void mon_text_read_head_t(struct mon_reader_text *rp,
338 cnt = 0; 467 struct mon_text_ptr *p, const struct mon_event_text *ep)
339 pbuf = rp->printf_buf; 468{
340 limit = rp->printf_size; 469 char udir, utype;
341 470
342 udir = usb_pipein(ep->pipe) ? 'i' : 'o'; 471 udir = usb_pipein(ep->pipe) ? 'i' : 'o';
343 switch (usb_pipetype(ep->pipe)) { 472 switch (usb_pipetype(ep->pipe)) {
@@ -346,13 +475,38 @@ static ssize_t mon_text_read(struct file *file, char __user *buf,
346 case PIPE_CONTROL: utype = 'C'; break; 475 case PIPE_CONTROL: utype = 'C'; break;
347 default: /* PIPE_BULK */ utype = 'B'; 476 default: /* PIPE_BULK */ utype = 'B';
348 } 477 }
349 cnt += snprintf(pbuf + cnt, limit - cnt, 478 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
350 "%lx %u %c %c%c:%03u:%02u", 479 "%lx %u %c %c%c:%03u:%02u",
351 ep->id, ep->tstamp, ep->type, 480 ep->id, ep->tstamp, ep->type,
352 utype, udir, usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe)); 481 utype, udir,
482 usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe));
483}
484
485static void mon_text_read_head_u(struct mon_reader_text *rp,
486 struct mon_text_ptr *p, const struct mon_event_text *ep)
487{
488 char udir, utype;
489
490 udir = usb_pipein(ep->pipe) ? 'i' : 'o';
491 switch (usb_pipetype(ep->pipe)) {
492 case PIPE_ISOCHRONOUS: utype = 'Z'; break;
493 case PIPE_INTERRUPT: utype = 'I'; break;
494 case PIPE_CONTROL: utype = 'C'; break;
495 default: /* PIPE_BULK */ utype = 'B';
496 }
497 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
498 "%lx %u %c %c%c:%d:%03u:%u",
499 ep->id, ep->tstamp, ep->type,
500 utype, udir,
501 ep->busnum, usb_pipedevice(ep->pipe), usb_pipeendpoint(ep->pipe));
502}
503
504static void mon_text_read_statset(struct mon_reader_text *rp,
505 struct mon_text_ptr *p, const struct mon_event_text *ep)
506{
353 507
354 if (ep->setup_flag == 0) { /* Setup packet is present and captured */ 508 if (ep->setup_flag == 0) { /* Setup packet is present and captured */
355 cnt += snprintf(pbuf + cnt, limit - cnt, 509 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
356 " s %02x %02x %04x %04x %04x", 510 " s %02x %02x %04x %04x %04x",
357 ep->setup[0], 511 ep->setup[0],
358 ep->setup[1], 512 ep->setup[1],
@@ -360,40 +514,86 @@ static ssize_t mon_text_read(struct file *file, char __user *buf,
360 (ep->setup[5] << 8) | ep->setup[4], 514 (ep->setup[5] << 8) | ep->setup[4],
361 (ep->setup[7] << 8) | ep->setup[6]); 515 (ep->setup[7] << 8) | ep->setup[6]);
362 } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */ 516 } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */
363 cnt += snprintf(pbuf + cnt, limit - cnt, 517 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
364 " %c __ __ ____ ____ ____", ep->setup_flag); 518 " %c __ __ ____ ____ ____", ep->setup_flag);
365 } else { /* No setup for this kind of URB */ 519 } else { /* No setup for this kind of URB */
366 cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->status); 520 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
521 " %d", ep->status);
367 } 522 }
368 cnt += snprintf(pbuf + cnt, limit - cnt, " %d", ep->length); 523}
524
525static void mon_text_read_intstat(struct mon_reader_text *rp,
526 struct mon_text_ptr *p, const struct mon_event_text *ep)
527{
528 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
529 " %d:%d", ep->status, ep->interval);
530}
531
532static void mon_text_read_isostat(struct mon_reader_text *rp,
533 struct mon_text_ptr *p, const struct mon_event_text *ep)
534{
535 if (ep->type == 'S') {
536 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
537 " %d:%d:%d", ep->status, ep->interval, ep->start_frame);
538 } else {
539 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
540 " %d:%d:%d:%d",
541 ep->status, ep->interval, ep->start_frame, ep->error_count);
542 }
543}
544
545static void mon_text_read_isodesc(struct mon_reader_text *rp,
546 struct mon_text_ptr *p, const struct mon_event_text *ep)
547{
548 int ndesc; /* Display this many */
549 int i;
550 const struct mon_iso_desc *dp;
551
552 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
553 " %d", ep->numdesc);
554 ndesc = ep->numdesc;
555 if (ndesc > ISODESC_MAX)
556 ndesc = ISODESC_MAX;
557 if (ndesc < 0)
558 ndesc = 0;
559 dp = ep->isodesc;
560 for (i = 0; i < ndesc; i++) {
561 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
562 " %d:%u:%u", dp->status, dp->offset, dp->length);
563 dp++;
564 }
565}
566
567static void mon_text_read_data(struct mon_reader_text *rp,
568 struct mon_text_ptr *p, const struct mon_event_text *ep)
569{
570 int data_len, i;
369 571
370 if ((data_len = ep->length) > 0) { 572 if ((data_len = ep->length) > 0) {
371 if (ep->data_flag == 0) { 573 if (ep->data_flag == 0) {
372 cnt += snprintf(pbuf + cnt, limit - cnt, " ="); 574 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
575 " =");
373 if (data_len >= DATA_MAX) 576 if (data_len >= DATA_MAX)
374 data_len = DATA_MAX; 577 data_len = DATA_MAX;
375 for (i = 0; i < data_len; i++) { 578 for (i = 0; i < data_len; i++) {
376 if (i % 4 == 0) { 579 if (i % 4 == 0) {
377 cnt += snprintf(pbuf + cnt, limit - cnt, 580 p->cnt += snprintf(p->pbuf + p->cnt,
581 p->limit - p->cnt,
378 " "); 582 " ");
379 } 583 }
380 cnt += snprintf(pbuf + cnt, limit - cnt, 584 p->cnt += snprintf(p->pbuf + p->cnt,
585 p->limit - p->cnt,
381 "%02x", ep->data[i]); 586 "%02x", ep->data[i]);
382 } 587 }
383 cnt += snprintf(pbuf + cnt, limit - cnt, "\n"); 588 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
589 "\n");
384 } else { 590 } else {
385 cnt += snprintf(pbuf + cnt, limit - cnt, 591 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
386 " %c\n", ep->data_flag); 592 " %c\n", ep->data_flag);
387 } 593 }
388 } else { 594 } else {
389 cnt += snprintf(pbuf + cnt, limit - cnt, "\n"); 595 p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n");
390 } 596 }
391
392 if (copy_to_user(buf, rp->printf_buf, cnt))
393 cnt = -EFAULT;
394 mutex_unlock(&rp->printf_lock);
395 kmem_cache_free(rp->e_slab, ep);
396 return cnt;
397} 597}
398 598
399static int mon_text_release(struct inode *inode, struct file *file) 599static int mon_text_release(struct inode *inode, struct file *file)
@@ -439,34 +639,46 @@ static int mon_text_release(struct inode *inode, struct file *file)
439 return 0; 639 return 0;
440} 640}
441 641
442static const struct file_operations mon_fops_text = { 642static const struct file_operations mon_fops_text_t = {
443 .owner = THIS_MODULE, 643 .owner = THIS_MODULE,
444 .open = mon_text_open, 644 .open = mon_text_open,
445 .llseek = no_llseek, 645 .llseek = no_llseek,
446 .read = mon_text_read, 646 .read = mon_text_read_t,
447 /* .write = mon_text_write, */
448 /* .poll = mon_text_poll, */
449 /* .ioctl = mon_text_ioctl, */
450 .release = mon_text_release, 647 .release = mon_text_release,
451}; 648};
452 649
453int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus) 650static const struct file_operations mon_fops_text_u = {
651 .owner = THIS_MODULE,
652 .open = mon_text_open,
653 .llseek = no_llseek,
654 .read = mon_text_read_u,
655 .release = mon_text_release,
656};
657
658int mon_text_add(struct mon_bus *mbus, int busnum)
454{ 659{
455 struct dentry *d; 660 struct dentry *d;
456 enum { NAMESZ = 10 }; 661 enum { NAMESZ = 10 };
457 char name[NAMESZ]; 662 char name[NAMESZ];
458 int rc; 663 int rc;
459 664
460 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum); 665 rc = snprintf(name, NAMESZ, "%dt", busnum);
461 if (rc <= 0 || rc >= NAMESZ) 666 if (rc <= 0 || rc >= NAMESZ)
462 goto err_print_t; 667 goto err_print_t;
463 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text); 668 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text_t);
464 if (d == NULL) 669 if (d == NULL)
465 goto err_create_t; 670 goto err_create_t;
466 mbus->dent_t = d; 671 mbus->dent_t = d;
467 672
468 /* XXX The stats do not belong to here (text API), but oh well... */ 673 rc = snprintf(name, NAMESZ, "%du", busnum);
469 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum); 674 if (rc <= 0 || rc >= NAMESZ)
675 goto err_print_u;
676 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text_u);
677 if (d == NULL)
678 goto err_create_u;
679 mbus->dent_u = d;
680
681 rc = snprintf(name, NAMESZ, "%ds", busnum);
470 if (rc <= 0 || rc >= NAMESZ) 682 if (rc <= 0 || rc >= NAMESZ)
471 goto err_print_s; 683 goto err_print_s;
472 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_stat); 684 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_stat);
@@ -478,6 +690,10 @@ int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
478 690
479err_create_s: 691err_create_s:
480err_print_s: 692err_print_s:
693 debugfs_remove(mbus->dent_u);
694 mbus->dent_u = NULL;
695err_create_u:
696err_print_u:
481 debugfs_remove(mbus->dent_t); 697 debugfs_remove(mbus->dent_t);
482 mbus->dent_t = NULL; 698 mbus->dent_t = NULL;
483err_create_t: 699err_create_t:
@@ -487,6 +703,7 @@ err_print_t:
487 703
488void mon_text_del(struct mon_bus *mbus) 704void mon_text_del(struct mon_bus *mbus)
489{ 705{
706 debugfs_remove(mbus->dent_u);
490 debugfs_remove(mbus->dent_t); 707 debugfs_remove(mbus->dent_t);
491 debugfs_remove(mbus->dent_s); 708 debugfs_remove(mbus->dent_s);
492} 709}
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index efdfd8993d9e..13d63255283e 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -22,7 +22,7 @@ struct mon_bus {
22 int text_inited; 22 int text_inited;
23 struct dentry *dent_s; /* Debugging file */ 23 struct dentry *dent_s; /* Debugging file */
24 struct dentry *dent_t; /* Text interface file */ 24 struct dentry *dent_t; /* Text interface file */
25 int uses_dma; 25 struct dentry *dent_u; /* Second text interface file */
26 26
27 /* Ref */ 27 /* Ref */
28 int nreaders; /* Under mon_lock AND mbus->lock */ 28 int nreaders; /* Under mon_lock AND mbus->lock */
@@ -52,7 +52,7 @@ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r);
52 52
53struct mon_bus *mon_bus_lookup(unsigned int num); 53struct mon_bus *mon_bus_lookup(unsigned int num);
54 54
55int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus); 55int /*bool*/ mon_text_add(struct mon_bus *mbus, int busnum);
56void mon_text_del(struct mon_bus *mbus); 56void mon_text_del(struct mon_bus *mbus);
57// void mon_bin_add(struct mon_bus *); 57// void mon_bin_add(struct mon_bus *);
58 58
@@ -81,4 +81,6 @@ extern struct mutex mon_lock;
81 81
82extern const struct file_operations mon_fops_stat; 82extern const struct file_operations mon_fops_stat;
83 83
84extern struct mon_bus mon_bus0; /* Only for redundant checks */
85
84#endif /* __USB_MON_H */ 86#endif /* __USB_MON_H */
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 5808ea082459..d5ef97bc4d01 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -298,7 +298,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
298 if (ax_skb) { 298 if (ax_skb) {
299 ax_skb->len = size; 299 ax_skb->len = size;
300 ax_skb->data = packet; 300 ax_skb->data = packet;
301 ax_skb->tail = packet + size; 301 skb_set_tail_pointer(ax_skb, size);
302 usbnet_skb_return(dev, ax_skb); 302 usbnet_skb_return(dev, ax_skb);
303 } else { 303 } else {
304 return 0; 304 return 0;
@@ -338,7 +338,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
338 && ((headroom + tailroom) >= (4 + padlen))) { 338 && ((headroom + tailroom) >= (4 + padlen))) {
339 if ((headroom < 4) || (tailroom < padlen)) { 339 if ((headroom < 4) || (tailroom < padlen)) {
340 skb->data = memmove(skb->head + 4, skb->data, skb->len); 340 skb->data = memmove(skb->head + 4, skb->data, skb->len);
341 skb->tail = skb->data + skb->len; 341 skb_set_tail_pointer(skb, skb->len);
342 } 342 }
343 } else { 343 } else {
344 struct sk_buff *skb2; 344 struct sk_buff *skb2;
@@ -352,11 +352,11 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
352 skb_push(skb, 4); 352 skb_push(skb, 4);
353 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); 353 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
354 cpu_to_le32s(&packet_len); 354 cpu_to_le32s(&packet_len);
355 memcpy(skb->data, &packet_len, sizeof(packet_len)); 355 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
356 356
357 if ((skb->len % 512) == 0) { 357 if ((skb->len % 512) == 0) {
358 cpu_to_le32s(&padbytes); 358 cpu_to_le32s(&padbytes);
359 memcpy( skb->tail, &padbytes, sizeof(padbytes)); 359 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
360 skb_put(skb, sizeof(padbytes)); 360 skb_put(skb, sizeof(padbytes));
361 } 361 }
362 return skb; 362 return skb;
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c
index 4852012735f6..86e90c59d551 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/usb/net/catc.c
@@ -255,7 +255,6 @@ static void catc_rx_done(struct urb *urb)
255 if (!(skb = dev_alloc_skb(pkt_len))) 255 if (!(skb = dev_alloc_skb(pkt_len)))
256 return; 256 return;
257 257
258 skb->dev = catc->netdev;
259 eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); 258 eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
260 skb_put(skb, pkt_len); 259 skb_put(skb, pkt_len);
261 260
@@ -356,7 +355,7 @@ resubmit:
356 * Transmit routines. 355 * Transmit routines.
357 */ 356 */
358 357
359static void catc_tx_run(struct catc *catc) 358static int catc_tx_run(struct catc *catc)
360{ 359{
361 int status; 360 int status;
362 361
@@ -374,12 +373,14 @@ static void catc_tx_run(struct catc *catc)
374 catc->tx_ptr = 0; 373 catc->tx_ptr = 0;
375 374
376 catc->netdev->trans_start = jiffies; 375 catc->netdev->trans_start = jiffies;
376 return status;
377} 377}
378 378
379static void catc_tx_done(struct urb *urb) 379static void catc_tx_done(struct urb *urb)
380{ 380{
381 struct catc *catc = urb->context; 381 struct catc *catc = urb->context;
382 unsigned long flags; 382 unsigned long flags;
383 int r;
383 384
384 if (urb->status == -ECONNRESET) { 385 if (urb->status == -ECONNRESET) {
385 dbg("Tx Reset."); 386 dbg("Tx Reset.");
@@ -398,10 +399,13 @@ static void catc_tx_done(struct urb *urb)
398 399
399 spin_lock_irqsave(&catc->tx_lock, flags); 400 spin_lock_irqsave(&catc->tx_lock, flags);
400 401
401 if (catc->tx_ptr) 402 if (catc->tx_ptr) {
402 catc_tx_run(catc); 403 r = catc_tx_run(catc);
403 else 404 if (unlikely(r < 0))
405 clear_bit(TX_RUNNING, &catc->flags);
406 } else {
404 clear_bit(TX_RUNNING, &catc->flags); 407 clear_bit(TX_RUNNING, &catc->flags);
408 }
405 409
406 netif_wake_queue(catc->netdev); 410 netif_wake_queue(catc->netdev);
407 411
@@ -412,6 +416,7 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
412{ 416{
413 struct catc *catc = netdev_priv(netdev); 417 struct catc *catc = netdev_priv(netdev);
414 unsigned long flags; 418 unsigned long flags;
419 int r = 0;
415 char *tx_buf; 420 char *tx_buf;
416 421
417 spin_lock_irqsave(&catc->tx_lock, flags); 422 spin_lock_irqsave(&catc->tx_lock, flags);
@@ -419,11 +424,14 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
419 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; 424 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
420 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; 425 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
421 *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); 426 *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len);
422 memcpy(tx_buf + 2, skb->data, skb->len); 427 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
423 catc->tx_ptr += skb->len + 2; 428 catc->tx_ptr += skb->len + 2;
424 429
425 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) 430 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) {
426 catc_tx_run(catc); 431 r = catc_tx_run(catc);
432 if (r < 0)
433 clear_bit(TX_RUNNING, &catc->flags);
434 }
427 435
428 if ((catc->is_f5u011 && catc->tx_ptr) 436 if ((catc->is_f5u011 && catc->tx_ptr)
429 || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) 437 || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
@@ -431,8 +439,10 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
431 439
432 spin_unlock_irqrestore(&catc->tx_lock, flags); 440 spin_unlock_irqrestore(&catc->tx_lock, flags);
433 441
434 catc->stats.tx_bytes += skb->len; 442 if (r >= 0) {
435 catc->stats.tx_packets++; 443 catc->stats.tx_bytes += skb->len;
444 catc->stats.tx_packets++;
445 }
436 446
437 dev_kfree_skb(skb); 447 dev_kfree_skb(skb);
438 448
diff --git a/drivers/usb/net/dm9601.c b/drivers/usb/net/dm9601.c
index 5130cc7eb314..a67638601477 100644
--- a/drivers/usb/net/dm9601.c
+++ b/drivers/usb/net/dm9601.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/stddef.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/netdevice.h> 17#include <linux/netdevice.h>
17#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
@@ -85,7 +86,7 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
85 usb_sndctrlpipe(dev->udev, 0), 86 usb_sndctrlpipe(dev->udev, 0),
86 DM_WRITE_REG, 87 DM_WRITE_REG,
87 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE, 88 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
88 value, reg, 0, 0, USB_CTRL_SET_TIMEOUT); 89 value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
89} 90}
90 91
91static void dm_write_async_callback(struct urb *urb) 92static void dm_write_async_callback(struct urb *urb)
@@ -171,7 +172,7 @@ static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
171 172
172 usb_fill_control_urb(urb, dev->udev, 173 usb_fill_control_urb(urb, dev->udev,
173 usb_sndctrlpipe(dev->udev, 0), 174 usb_sndctrlpipe(dev->udev, 0),
174 (void *)req, 0, 0, dm_write_async_callback, req); 175 (void *)req, NULL, 0, dm_write_async_callback, req);
175 176
176 status = usb_submit_urb(urb, GFP_ATOMIC); 177 status = usb_submit_urb(urb, GFP_ATOMIC);
177 if (status < 0) { 178 if (status < 0) {
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c
index d257a8e026d6..031cf5ca4dbb 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/usb/net/gl620a.c
@@ -157,7 +157,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
157 if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { 157 if ((headroom < (4 + 4*1)) || (tailroom < padlen)) {
158 skb->data = memmove(skb->head + (4 + 4*1), 158 skb->data = memmove(skb->head + (4 + 4*1),
159 skb->data, skb->len); 159 skb->data, skb->len);
160 skb->tail = skb->data + skb->len; 160 skb_set_tail_pointer(skb, skb->len);
161 } 161 }
162 } else { 162 } else {
163 struct sk_buff *skb2; 163 struct sk_buff *skb2;
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index de95268ae4b8..a0cc05d21a6a 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -636,8 +636,6 @@ static void kaweth_usb_receive(struct urb *urb)
636 636
637 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 637 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
638 638
639 skb->dev = net;
640
641 eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); 639 eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
642 640
643 skb_put(skb, pkt_len); 641 skb_put(skb, pkt_len);
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c
index ccebfdef4751..19bf8dae70c9 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/usb/net/net1080.c
@@ -520,7 +520,7 @@ net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
520 skb->data = memmove(skb->head 520 skb->data = memmove(skb->head
521 + sizeof (struct nc_header), 521 + sizeof (struct nc_header),
522 skb->data, skb->len); 522 skb->data, skb->len);
523 skb->tail = skb->data + len; 523 skb_set_tail_pointer(skb, len);
524 goto encapsulate; 524 goto encapsulate;
525 } 525 }
526 } 526 }
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index d48c024cff59..a05fd97e5bc2 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -316,6 +316,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
316 return ret; 316 return ret;
317} 317}
318 318
319/* Returns 0 on success, error on failure */
319static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) 320static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
320{ 321{
321 int i; 322 int i;
@@ -574,7 +575,6 @@ static void fill_skb_pool(pegasus_t * pegasus)
574 */ 575 */
575 if (pegasus->rx_pool[i] == NULL) 576 if (pegasus->rx_pool[i] == NULL)
576 return; 577 return;
577 pegasus->rx_pool[i]->dev = pegasus->net;
578 skb_reserve(pegasus->rx_pool[i], 2); 578 skb_reserve(pegasus->rx_pool[i], 2);
579 } 579 }
580} 580}
@@ -847,10 +847,6 @@ static void intr_callback(struct urb *urb)
847 * d[0].NO_CARRIER kicks in only with failed TX. 847 * d[0].NO_CARRIER kicks in only with failed TX.
848 * ... so monitoring with MII may be safest. 848 * ... so monitoring with MII may be safest.
849 */ 849 */
850 if (d[0] & NO_CARRIER)
851 netif_carrier_off(net);
852 else
853 netif_carrier_on(net);
854 850
855 /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ 851 /* bytes 3-4 == rx_lostpkt, reg 2E/2F */
856 pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; 852 pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
@@ -883,7 +879,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net)
883 netif_stop_queue(net); 879 netif_stop_queue(net);
884 880
885 ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); 881 ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16);
886 memcpy(pegasus->tx_buff + 2, skb->data, skb->len); 882 skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len);
887 usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, 883 usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb,
888 usb_sndbulkpipe(pegasus->usb, 2), 884 usb_sndbulkpipe(pegasus->usb, 2),
889 pegasus->tx_buff, count, 885 pegasus->tx_buff, count,
@@ -950,7 +946,7 @@ static void set_carrier(struct net_device *net)
950 pegasus_t *pegasus = netdev_priv(net); 946 pegasus_t *pegasus = netdev_priv(net);
951 u16 tmp; 947 u16 tmp;
952 948
953 if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) 949 if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
954 return; 950 return;
955 951
956 if (tmp & BMSR_LSTATUS) 952 if (tmp & BMSR_LSTATUS)
@@ -1408,8 +1404,10 @@ static void pegasus_disconnect(struct usb_interface *intf)
1408 unlink_all_urbs(pegasus); 1404 unlink_all_urbs(pegasus);
1409 free_all_urbs(pegasus); 1405 free_all_urbs(pegasus);
1410 free_skb_pool(pegasus); 1406 free_skb_pool(pegasus);
1411 if (pegasus->rx_skb) 1407 if (pegasus->rx_skb != NULL) {
1412 dev_kfree_skb(pegasus->rx_skb); 1408 dev_kfree_skb(pegasus->rx_skb);
1409 pegasus->rx_skb = NULL;
1410 }
1413 free_netdev(pegasus->net); 1411 free_netdev(pegasus->net);
1414} 1412}
1415 1413
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index 39a21c74fdf4..980e4aaa97aa 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -253,6 +253,7 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
253 * of that mess as possible. 253 * of that mess as possible.
254 */ 254 */
255#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101) 255#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101)
256#define OID_GEN_MAXIMUM_FRAME_SIZE ccpu2(0x00010106)
256#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e) 257#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e)
257 258
258/* 259/*
@@ -349,7 +350,7 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
349 case RNDIS_MSG_INDICATE: { /* fault */ 350 case RNDIS_MSG_INDICATE: { /* fault */
350 // struct rndis_indicate *msg = (void *)buf; 351 // struct rndis_indicate *msg = (void *)buf;
351 dev_info(&info->control->dev, 352 dev_info(&info->control->dev,
352 "rndis fault indication\n"); 353 "rndis fault indication\n");
353 } 354 }
354 break; 355 break;
355 case RNDIS_MSG_KEEPALIVE: { /* ping */ 356 case RNDIS_MSG_KEEPALIVE: { /* ping */
@@ -387,6 +388,71 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
387 return -ETIMEDOUT; 388 return -ETIMEDOUT;
388} 389}
389 390
391/*
392 * rndis_query:
393 *
394 * Performs a query for @oid along with 0 or more bytes of payload as
395 * specified by @in_len. If @reply_len is not set to -1 then the reply
396 * length is checked against this value, resulting in an error if it
397 * doesn't match.
398 *
399 * NOTE: Adding a payload exactly or greater than the size of the expected
400 * response payload is an evident requirement MSFT added for ActiveSync.
401 *
402 * The only exception is for OIDs that return a variably sized response,
403 * in which case no payload should be added. This undocumented (and
404 * nonsensical!) issue was found by sniffing protocol requests from the
405 * ActiveSync 4.1 Windows driver.
406 */
407static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
408 void *buf, u32 oid, u32 in_len,
409 void **reply, int *reply_len)
410{
411 int retval;
412 union {
413 void *buf;
414 struct rndis_msg_hdr *header;
415 struct rndis_query *get;
416 struct rndis_query_c *get_c;
417 } u;
418 u32 off, len;
419
420 u.buf = buf;
421
422 memset(u.get, 0, sizeof *u.get + in_len);
423 u.get->msg_type = RNDIS_MSG_QUERY;
424 u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
425 u.get->oid = oid;
426 u.get->len = cpu_to_le32(in_len);
427 u.get->offset = ccpu2(20);
428
429 retval = rndis_command(dev, u.header);
430 if (unlikely(retval < 0)) {
431 dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n",
432 oid, retval);
433 return retval;
434 }
435
436 off = le32_to_cpu(u.get_c->offset);
437 len = le32_to_cpu(u.get_c->len);
438 if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
439 goto response_error;
440
441 if (*reply_len != -1 && len != *reply_len)
442 goto response_error;
443
444 *reply = (unsigned char *) &u.get_c->request_id + off;
445 *reply_len = len;
446
447 return retval;
448
449response_error:
450 dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) "
451 "invalid response - off %d len %d\n",
452 oid, off, len);
453 return -EDOM;
454}
455
390static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) 456static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
391{ 457{
392 int retval; 458 int retval;
@@ -403,6 +469,8 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
403 struct rndis_set_c *set_c; 469 struct rndis_set_c *set_c;
404 } u; 470 } u;
405 u32 tmp; 471 u32 tmp;
472 int reply_len;
473 unsigned char *bp;
406 474
407 /* we can't rely on i/o from stack working, or stack allocation */ 475 /* we can't rely on i/o from stack working, or stack allocation */
408 u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); 476 u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
@@ -421,6 +489,12 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
421 * TX we'll stick to one Ethernet packet plus RNDIS framing. 489 * TX we'll stick to one Ethernet packet plus RNDIS framing.
422 * For RX we handle drivers that zero-pad to end-of-packet. 490 * For RX we handle drivers that zero-pad to end-of-packet.
423 * Don't let userspace change these settings. 491 * Don't let userspace change these settings.
492 *
493 * NOTE: there still seems to be wierdness here, as if we need
494 * to do some more things to make sure WinCE targets accept this.
495 * They default to jumbograms of 8KB or 16KB, which is absurd
496 * for such low data rates and which is also more than Linux
497 * can usually expect to allocate for SKB data...
424 */ 498 */
425 net->hard_header_len += sizeof (struct rndis_data_hdr); 499 net->hard_header_len += sizeof (struct rndis_data_hdr);
426 dev->hard_mtu = net->mtu + net->hard_header_len; 500 dev->hard_mtu = net->mtu + net->hard_header_len;
@@ -434,7 +508,7 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
434 if (unlikely(retval < 0)) { 508 if (unlikely(retval < 0)) {
435 /* it might not even be an RNDIS device!! */ 509 /* it might not even be an RNDIS device!! */
436 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); 510 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval);
437 goto fail_and_release; 511 goto fail_and_release;
438 } 512 }
439 tmp = le32_to_cpu(u.init_c->max_transfer_size); 513 tmp = le32_to_cpu(u.init_c->max_transfer_size);
440 if (tmp < dev->hard_mtu) { 514 if (tmp < dev->hard_mtu) {
@@ -450,34 +524,15 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
450 dev->hard_mtu, tmp, dev->rx_urb_size, 524 dev->hard_mtu, tmp, dev->rx_urb_size,
451 1 << le32_to_cpu(u.init_c->packet_alignment)); 525 1 << le32_to_cpu(u.init_c->packet_alignment));
452 526
453 /* Get designated host ethernet address. 527 /* Get designated host ethernet address */
454 * 528 reply_len = ETH_ALEN;
455 * Adding a payload exactly the same size as the expected response 529 retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS,
456 * payload is an evident requirement MSFT added for ActiveSync. 530 48, (void **) &bp, &reply_len);
457 * This undocumented (and nonsensical) issue was found by sniffing 531 if (unlikely(retval< 0)) {
458 * protocol requests from the ActiveSync 4.1 Windows driver.
459 */
460 memset(u.get, 0, sizeof *u.get + 48);
461 u.get->msg_type = RNDIS_MSG_QUERY;
462 u.get->msg_len = ccpu2(sizeof *u.get + 48);
463 u.get->oid = OID_802_3_PERMANENT_ADDRESS;
464 u.get->len = ccpu2(48);
465 u.get->offset = ccpu2(20);
466
467 retval = rndis_command(dev, u.header);
468 if (unlikely(retval < 0)) {
469 dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); 532 dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
470 goto fail_and_release; 533 goto fail_and_release;
471 } 534 }
472 tmp = le32_to_cpu(u.get_c->offset); 535 memcpy(net->dev_addr, bp, ETH_ALEN);
473 if (unlikely((tmp + 8) > (CONTROL_BUFFER_SIZE - ETH_ALEN)
474 || u.get_c->len != ccpu2(ETH_ALEN))) {
475 dev_err(&intf->dev, "rndis ethaddr off %d len %d ?\n",
476 tmp, le32_to_cpu(u.get_c->len));
477 retval = -EDOM;
478 goto fail_and_release;
479 }
480 memcpy(net->dev_addr, tmp + (char *)&u.get_c->request_id, ETH_ALEN);
481 536
482 /* set a nonzero filter to enable data transfers */ 537 /* set a nonzero filter to enable data transfers */
483 memset(u.set, 0, sizeof *u.set); 538 memset(u.set, 0, sizeof *u.set);
@@ -502,6 +557,7 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
502fail_and_release: 557fail_and_release:
503 usb_set_intfdata(info->data, NULL); 558 usb_set_intfdata(info->data, NULL);
504 usb_driver_release_interface(driver_of(intf), info->data); 559 usb_driver_release_interface(driver_of(intf), info->data);
560 info->data = NULL;
505fail: 561fail:
506 kfree(u.buf); 562 kfree(u.buf);
507 return retval; 563 return retval;
@@ -588,7 +644,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
588 if (likely((sizeof *hdr) <= room)) { 644 if (likely((sizeof *hdr) <= room)) {
589 skb->data = memmove(skb->head + sizeof *hdr, 645 skb->data = memmove(skb->head + sizeof *hdr,
590 skb->data, len); 646 skb->data, len);
591 skb->tail = skb->data + len; 647 skb_set_tail_pointer(skb, len);
592 goto fill; 648 goto fill;
593 } 649 }
594 } 650 }
@@ -618,7 +674,7 @@ fill:
618 674
619static const struct driver_info rndis_info = { 675static const struct driver_info rndis_info = {
620 .description = "RNDIS device", 676 .description = "RNDIS device",
621 .flags = FLAG_ETHER | FLAG_FRAMING_RN, 677 .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT,
622 .bind = rndis_bind, 678 .bind = rndis_bind,
623 .unbind = rndis_unbind, 679 .unbind = rndis_unbind,
624 .status = rndis_status, 680 .status = rndis_status,
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index ea153dc9b0ac..fa598f0340cf 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -646,7 +646,6 @@ static void fill_skb_pool(rtl8150_t *dev)
646 if (!skb) { 646 if (!skb) {
647 return; 647 return;
648 } 648 }
649 skb->dev = dev->netdev;
650 skb_reserve(skb, 2); 649 skb_reserve(skb, 2);
651 dev->rx_skb_pool[i] = skb; 650 dev->rx_skb_pool[i] = skb;
652 } 651 }
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index de69b183bd2f..f9cd42d058b0 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -203,7 +203,6 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
203{ 203{
204 int status; 204 int status;
205 205
206 skb->dev = dev->net;
207 skb->protocol = eth_type_trans (skb, dev->net); 206 skb->protocol = eth_type_trans (skb, dev->net);
208 dev->stats.rx_packets++; 207 dev->stats.rx_packets++;
209 dev->stats.rx_bytes += skb->len; 208 dev->stats.rx_bytes += skb->len;
@@ -735,8 +734,7 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
735{ 734{
736 struct usbnet *dev = netdev_priv(net); 735 struct usbnet *dev = netdev_priv(net);
737 736
738 /* REVISIT don't always return "usbnet" */ 737 strncpy (info->driver, dev->driver_name, sizeof info->driver);
739 strncpy (info->driver, driver_name, sizeof info->driver);
740 strncpy (info->version, DRIVER_VERSION, sizeof info->version); 738 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
741 strncpy (info->fw_version, dev->driver_info->description, 739 strncpy (info->fw_version, dev->driver_info->description,
742 sizeof info->fw_version); 740 sizeof info->fw_version);
@@ -1116,10 +1114,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1116 struct driver_info *info; 1114 struct driver_info *info;
1117 struct usb_device *xdev; 1115 struct usb_device *xdev;
1118 int status; 1116 int status;
1117 const char *name;
1119 1118
1119 name = udev->dev.driver->name;
1120 info = (struct driver_info *) prod->driver_info; 1120 info = (struct driver_info *) prod->driver_info;
1121 if (!info) { 1121 if (!info) {
1122 dev_dbg (&udev->dev, "blacklisted by %s\n", driver_name); 1122 dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1123 return -ENODEV; 1123 return -ENODEV;
1124 } 1124 }
1125 xdev = interface_to_usbdev (udev); 1125 xdev = interface_to_usbdev (udev);
@@ -1139,6 +1139,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1139 dev = netdev_priv(net); 1139 dev = netdev_priv(net);
1140 dev->udev = xdev; 1140 dev->udev = xdev;
1141 dev->driver_info = info; 1141 dev->driver_info = info;
1142 dev->driver_name = name;
1142 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1143 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1143 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1144 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1144 skb_queue_head_init (&dev->rxq); 1145 skb_queue_head_init (&dev->rxq);
diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h
index 07c70abbe0ec..cbb53e065d6c 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/usb/net/usbnet.h
@@ -29,6 +29,7 @@ struct usbnet {
29 /* housekeeping */ 29 /* housekeeping */
30 struct usb_device *udev; 30 struct usb_device *udev;
31 struct driver_info *driver_info; 31 struct driver_info *driver_info;
32 const char *driver_name;
32 wait_queue_head_t *wait; 33 wait_queue_head_t *wait;
33 struct mutex phy_mutex; 34 struct mutex phy_mutex;
34 35
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 2f4d303ee36f..ba5d1dc03036 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -423,11 +423,11 @@ config USB_SERIAL_MCT_U232
423 module will be called mct_u232. 423 module will be called mct_u232.
424 424
425config USB_SERIAL_MOS7720 425config USB_SERIAL_MOS7720
426 tristate "USB Moschip 7720 Single Port Serial Driver" 426 tristate "USB Moschip 7720 Serial Driver"
427 depends on USB_SERIAL 427 depends on USB_SERIAL
428 ---help--- 428 ---help---
429 Say Y here if you want to use a USB Serial single port adapter from 429 Say Y here if you want to use USB Serial single and double
430 Moschip Semiconductor Tech. 430 port adapters from Moschip Semiconductor Tech.
431 431
432 To compile this driver as a module, choose M here: the 432 To compile this driver as a module, choose M here: the
433 module will be called mos7720. 433 module will be called mos7720.
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 11dad42c3c60..b675735bfbee 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -209,6 +209,7 @@ static void aircable_send(struct usb_serial_port *port)
209 int count, result; 209 int count, result;
210 struct aircable_private *priv = usb_get_serial_port_data(port); 210 struct aircable_private *priv = usb_get_serial_port_data(port);
211 unsigned char* buf; 211 unsigned char* buf;
212 u16 *dbuf;
212 dbg("%s - port %d", __FUNCTION__, port->number); 213 dbg("%s - port %d", __FUNCTION__, port->number);
213 if (port->write_urb_busy) 214 if (port->write_urb_busy)
214 return; 215 return;
@@ -226,8 +227,8 @@ static void aircable_send(struct usb_serial_port *port)
226 227
227 buf[0] = TX_HEADER_0; 228 buf[0] = TX_HEADER_0;
228 buf[1] = TX_HEADER_1; 229 buf[1] = TX_HEADER_1;
229 buf[2] = (unsigned char)count; 230 dbuf = (u16 *)&buf[2];
230 buf[3] = (unsigned char)(count >> 8); 231 *dbuf = cpu_to_le16((u16)count);
231 serial_buf_get(priv->tx_buf,buf + HCI_HEADER_LENGTH, MAX_HCI_FRAMESIZE); 232 serial_buf_get(priv->tx_buf,buf + HCI_HEADER_LENGTH, MAX_HCI_FRAMESIZE);
232 233
233 memcpy(port->write_urb->transfer_buffer, buf, 234 memcpy(port->write_urb->transfer_buffer, buf,
@@ -434,7 +435,7 @@ static void aircable_write_bulk_callback(struct urb *urb)
434 __FUNCTION__, urb->status); 435 __FUNCTION__, urb->status);
435 port->write_urb->transfer_buffer_length = 1; 436 port->write_urb->transfer_buffer_length = 1;
436 port->write_urb->dev = port->serial->dev; 437 port->write_urb->dev = port->serial->dev;
437 result = usb_submit_urb(port->write_urb, GFP_KERNEL); 438 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
438 if (result) 439 if (result)
439 dev_err(&urb->dev->dev, 440 dev_err(&urb->dev->dev,
440 "%s - failed resubmitting write urb, error %d\n", 441 "%s - failed resubmitting write urb, error %d\n",
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index edd685791a6b..ea2175bb2274 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -341,7 +341,7 @@ static int ark3116_open(struct usb_serial_port *port, struct file *filp)
341 341
342 result = usb_serial_generic_open(port, filp); 342 result = usb_serial_generic_open(port, filp);
343 if (result) 343 if (result)
344 return result; 344 goto err_out;
345 345
346 /* open */ 346 /* open */
347 ARK3116_RCV(serial, 111, 0xFE, 0xC0, 0x0000, 0x0003, 0x02, buf); 347 ARK3116_RCV(serial, 111, 0xFE, 0xC0, 0x0000, 0x0003, 0x02, buf);
@@ -372,6 +372,7 @@ static int ark3116_open(struct usb_serial_port *port, struct file *filp)
372 if (port->tty) 372 if (port->tty)
373 ark3116_set_termios(port, &tmp_termios); 373 ark3116_set_termios(port, &tmp_termios);
374 374
375err_out:
375 kfree(buf); 376 kfree(buf);
376 377
377 return result; 378 return result;
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index d7d0ba986a80..e831cb7f64fd 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -58,9 +58,11 @@ static struct usb_device_id id_table [] = {
58 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 58 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
59 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ 59 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
60 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ 60 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
61 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
61 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 62 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
62 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 63 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
63 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 64 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
65 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
64 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 66 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
65 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ 67 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
66 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 68 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8ff9d54b21e6..95a1805b064f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -342,6 +342,7 @@ static struct usb_device_id id_table_combined [] = {
342 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 342 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
343 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 343 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
344 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 344 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
345 { USB_DEVICE(FTDI_VID, FTDI_USBX_707_PID) },
345 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, 346 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) },
346 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, 347 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) },
347 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) }, 348 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) },
@@ -1433,6 +1434,7 @@ static int ftdi_write (struct usb_serial_port *port,
1433 dbg("%s - write limit hit\n", __FUNCTION__); 1434 dbg("%s - write limit hit\n", __FUNCTION__);
1434 return 0; 1435 return 0;
1435 } 1436 }
1437 priv->tx_outstanding_urbs++;
1436 spin_unlock_irqrestore(&priv->tx_lock, flags); 1438 spin_unlock_irqrestore(&priv->tx_lock, flags);
1437 1439
1438 data_offset = priv->write_offset; 1440 data_offset = priv->write_offset;
@@ -1450,14 +1452,15 @@ static int ftdi_write (struct usb_serial_port *port,
1450 buffer = kmalloc (transfer_size, GFP_ATOMIC); 1452 buffer = kmalloc (transfer_size, GFP_ATOMIC);
1451 if (!buffer) { 1453 if (!buffer) {
1452 err("%s ran out of kernel memory for urb ...", __FUNCTION__); 1454 err("%s ran out of kernel memory for urb ...", __FUNCTION__);
1453 return -ENOMEM; 1455 count = -ENOMEM;
1456 goto error_no_buffer;
1454 } 1457 }
1455 1458
1456 urb = usb_alloc_urb(0, GFP_ATOMIC); 1459 urb = usb_alloc_urb(0, GFP_ATOMIC);
1457 if (!urb) { 1460 if (!urb) {
1458 err("%s - no more free urbs", __FUNCTION__); 1461 err("%s - no more free urbs", __FUNCTION__);
1459 kfree (buffer); 1462 count = -ENOMEM;
1460 return -ENOMEM; 1463 goto error_no_urb;
1461 } 1464 }
1462 1465
1463 /* Copy data */ 1466 /* Copy data */
@@ -1499,10 +1502,9 @@ static int ftdi_write (struct usb_serial_port *port,
1499 if (status) { 1502 if (status) {
1500 err("%s - failed submitting write urb, error %d", __FUNCTION__, status); 1503 err("%s - failed submitting write urb, error %d", __FUNCTION__, status);
1501 count = status; 1504 count = status;
1502 kfree (buffer); 1505 goto error;
1503 } else { 1506 } else {
1504 spin_lock_irqsave(&priv->tx_lock, flags); 1507 spin_lock_irqsave(&priv->tx_lock, flags);
1505 ++priv->tx_outstanding_urbs;
1506 priv->tx_outstanding_bytes += count; 1508 priv->tx_outstanding_bytes += count;
1507 priv->tx_bytes += count; 1509 priv->tx_bytes += count;
1508 spin_unlock_irqrestore(&priv->tx_lock, flags); 1510 spin_unlock_irqrestore(&priv->tx_lock, flags);
@@ -1510,10 +1512,19 @@ static int ftdi_write (struct usb_serial_port *port,
1510 1512
1511 /* we are done with this urb, so let the host driver 1513 /* we are done with this urb, so let the host driver
1512 * really free it when it is finished with it */ 1514 * really free it when it is finished with it */
1513 usb_free_urb (urb); 1515 usb_free_urb(urb);
1514 1516
1515 dbg("%s write returning: %d", __FUNCTION__, count); 1517 dbg("%s write returning: %d", __FUNCTION__, count);
1516 return count; 1518 return count;
1519error:
1520 usb_free_urb(urb);
1521error_no_urb:
1522 kfree (buffer);
1523error_no_buffer:
1524 spin_lock_irqsave(&priv->tx_lock, flags);
1525 priv->tx_outstanding_urbs--;
1526 spin_unlock_irqrestore(&priv->tx_lock, flags);
1527 return count;
1517} /* ftdi_write */ 1528} /* ftdi_write */
1518 1529
1519 1530
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 513cfe1b768b..77ad0a09b384 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -31,6 +31,7 @@
31#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */ 31#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
32#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */ 32#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
33#define FTDI_NF_RIC_PID 0x0001 /* Product Id */ 33#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
34#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
34 35
35 36
36/* www.canusb.com Lawicel CANUSB device */ 37/* www.canusb.com Lawicel CANUSB device */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 6a26a2e683a6..18f74ac76565 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -111,7 +111,7 @@ struct edgeport_port {
111 111
112 struct TxFifo txfifo; /* transmit fifo -- size will be maxTxCredits */ 112 struct TxFifo txfifo; /* transmit fifo -- size will be maxTxCredits */
113 struct urb *write_urb; /* write URB for this port */ 113 struct urb *write_urb; /* write URB for this port */
114 char write_in_progress; /* TRUE while a write URB is outstanding */ 114 bool write_in_progress; /* 'true' while a write URB is outstanding */
115 spinlock_t ep_lock; 115 spinlock_t ep_lock;
116 116
117 __u8 shadowLCR; /* last LCR value received */ 117 __u8 shadowLCR; /* last LCR value received */
@@ -123,11 +123,11 @@ struct edgeport_port {
123 __u8 validDataMask; 123 __u8 validDataMask;
124 __u32 baudRate; 124 __u32 baudRate;
125 125
126 char open; 126 bool open;
127 char openPending; 127 bool openPending;
128 char commandPending; 128 bool commandPending;
129 char closePending; 129 bool closePending;
130 char chaseResponsePending; 130 bool chaseResponsePending;
131 131
132 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ 132 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
133 wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ 133 wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
@@ -156,7 +156,7 @@ struct edgeport_serial {
156 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ 156 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */
157 unsigned char * bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ 157 unsigned char * bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
158 struct urb * read_urb; /* our bulk read urb */ 158 struct urb * read_urb; /* our bulk read urb */
159 int read_in_progress; 159 bool read_in_progress;
160 spinlock_t es_lock; 160 spinlock_t es_lock;
161 161
162 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ 162 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */
@@ -212,7 +212,7 @@ static int debug;
212 212
213static int low_latency = 1; /* tty low latency flag, on by default */ 213static int low_latency = 1; /* tty low latency flag, on by default */
214 214
215static int CmdUrbs = 0; /* Number of outstanding Command Write Urbs */ 215static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
216 216
217 217
218/* local function prototypes */ 218/* local function prototypes */
@@ -631,14 +631,14 @@ static void edge_interrupt_callback (struct urb *urb)
631 if (edge_serial->rxBytesAvail > 0 && 631 if (edge_serial->rxBytesAvail > 0 &&
632 !edge_serial->read_in_progress) { 632 !edge_serial->read_in_progress) {
633 dbg("%s - posting a read", __FUNCTION__); 633 dbg("%s - posting a read", __FUNCTION__);
634 edge_serial->read_in_progress = TRUE; 634 edge_serial->read_in_progress = true;
635 635
636 /* we have pending bytes on the bulk in pipe, send a request */ 636 /* we have pending bytes on the bulk in pipe, send a request */
637 edge_serial->read_urb->dev = edge_serial->serial->dev; 637 edge_serial->read_urb->dev = edge_serial->serial->dev;
638 result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); 638 result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
639 if (result) { 639 if (result) {
640 dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __FUNCTION__, result); 640 dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __FUNCTION__, result);
641 edge_serial->read_in_progress = FALSE; 641 edge_serial->read_in_progress = false;
642 } 642 }
643 } 643 }
644 spin_unlock(&edge_serial->es_lock); 644 spin_unlock(&edge_serial->es_lock);
@@ -695,13 +695,13 @@ static void edge_bulk_in_callback (struct urb *urb)
695 695
696 if (urb->status) { 696 if (urb->status) {
697 dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status); 697 dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
698 edge_serial->read_in_progress = FALSE; 698 edge_serial->read_in_progress = false;
699 return; 699 return;
700 } 700 }
701 701
702 if (urb->actual_length == 0) { 702 if (urb->actual_length == 0) {
703 dbg("%s - read bulk callback with no data", __FUNCTION__); 703 dbg("%s - read bulk callback with no data", __FUNCTION__);
704 edge_serial->read_in_progress = FALSE; 704 edge_serial->read_in_progress = false;
705 return; 705 return;
706 } 706 }
707 707
@@ -725,10 +725,10 @@ static void edge_bulk_in_callback (struct urb *urb)
725 status = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); 725 status = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
726 if (status) { 726 if (status) {
727 dev_err(&urb->dev->dev, "%s - usb_submit_urb(read bulk) failed, status = %d\n", __FUNCTION__, status); 727 dev_err(&urb->dev->dev, "%s - usb_submit_urb(read bulk) failed, status = %d\n", __FUNCTION__, status);
728 edge_serial->read_in_progress = FALSE; 728 edge_serial->read_in_progress = false;
729 } 729 }
730 } else { 730 } else {
731 edge_serial->read_in_progress = FALSE; 731 edge_serial->read_in_progress = false;
732 } 732 }
733 733
734 spin_unlock(&edge_serial->es_lock); 734 spin_unlock(&edge_serial->es_lock);
@@ -759,7 +759,7 @@ static void edge_bulk_out_data_callback (struct urb *urb)
759 } 759 }
760 760
761 // Release the Write URB 761 // Release the Write URB
762 edge_port->write_in_progress = FALSE; 762 edge_port->write_in_progress = false;
763 763
764 // Check if more data needs to be sent 764 // Check if more data needs to be sent
765 send_more_port_data((struct edgeport_serial *)(usb_get_serial_data(edge_port->port->serial)), edge_port); 765 send_more_port_data((struct edgeport_serial *)(usb_get_serial_data(edge_port->port->serial)), edge_port);
@@ -779,8 +779,8 @@ static void edge_bulk_out_cmd_callback (struct urb *urb)
779 779
780 dbg("%s", __FUNCTION__); 780 dbg("%s", __FUNCTION__);
781 781
782 CmdUrbs--; 782 atomic_dec(&CmdUrbs);
783 dbg("%s - FREE URB %p (outstanding %d)", __FUNCTION__, urb, CmdUrbs); 783 dbg("%s - FREE URB %p (outstanding %d)", __FUNCTION__, urb, atomic_read(&CmdUrbs));
784 784
785 785
786 /* clean up the transfer buffer */ 786 /* clean up the transfer buffer */
@@ -802,7 +802,7 @@ static void edge_bulk_out_cmd_callback (struct urb *urb)
802 tty_wakeup(tty); 802 tty_wakeup(tty);
803 803
804 /* we have completed the command */ 804 /* we have completed the command */
805 edge_port->commandPending = FALSE; 805 edge_port->commandPending = false;
806 wake_up(&edge_port->wait_command); 806 wake_up(&edge_port->wait_command);
807} 807}
808 808
@@ -868,7 +868,7 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
868 port0->bulk_in_buffer, 868 port0->bulk_in_buffer,
869 edge_serial->read_urb->transfer_buffer_length, 869 edge_serial->read_urb->transfer_buffer_length,
870 edge_bulk_in_callback, edge_serial); 870 edge_bulk_in_callback, edge_serial);
871 edge_serial->read_in_progress = FALSE; 871 edge_serial->read_in_progress = false;
872 872
873 /* start interrupt read for this edgeport 873 /* start interrupt read for this edgeport
874 * this interrupt will continue as long as the edgeport is connected */ 874 * this interrupt will continue as long as the edgeport is connected */
@@ -890,26 +890,26 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
890 /* initialize our port settings */ 890 /* initialize our port settings */
891 edge_port->txCredits = 0; /* Can't send any data yet */ 891 edge_port->txCredits = 0; /* Can't send any data yet */
892 edge_port->shadowMCR = MCR_MASTER_IE; /* Must always set this bit to enable ints! */ 892 edge_port->shadowMCR = MCR_MASTER_IE; /* Must always set this bit to enable ints! */
893 edge_port->chaseResponsePending = FALSE; 893 edge_port->chaseResponsePending = false;
894 894
895 /* send a open port command */ 895 /* send a open port command */
896 edge_port->openPending = TRUE; 896 edge_port->openPending = true;
897 edge_port->open = FALSE; 897 edge_port->open = false;
898 response = send_iosp_ext_cmd (edge_port, IOSP_CMD_OPEN_PORT, 0); 898 response = send_iosp_ext_cmd (edge_port, IOSP_CMD_OPEN_PORT, 0);
899 899
900 if (response < 0) { 900 if (response < 0) {
901 dev_err(&port->dev, "%s - error sending open port command\n", __FUNCTION__); 901 dev_err(&port->dev, "%s - error sending open port command\n", __FUNCTION__);
902 edge_port->openPending = FALSE; 902 edge_port->openPending = false;
903 return -ENODEV; 903 return -ENODEV;
904 } 904 }
905 905
906 /* now wait for the port to be completely opened */ 906 /* now wait for the port to be completely opened */
907 wait_event_timeout(edge_port->wait_open, (edge_port->openPending != TRUE), OPEN_TIMEOUT); 907 wait_event_timeout(edge_port->wait_open, !edge_port->openPending, OPEN_TIMEOUT);
908 908
909 if (edge_port->open == FALSE) { 909 if (!edge_port->open) {
910 /* open timed out */ 910 /* open timed out */
911 dbg("%s - open timedout", __FUNCTION__); 911 dbg("%s - open timedout", __FUNCTION__);
912 edge_port->openPending = FALSE; 912 edge_port->openPending = false;
913 return -ENODEV; 913 return -ENODEV;
914 } 914 }
915 915
@@ -928,7 +928,7 @@ static int edge_open (struct usb_serial_port *port, struct file * filp)
928 928
929 /* Allocate a URB for the write */ 929 /* Allocate a URB for the write */
930 edge_port->write_urb = usb_alloc_urb (0, GFP_KERNEL); 930 edge_port->write_urb = usb_alloc_urb (0, GFP_KERNEL);
931 edge_port->write_in_progress = FALSE; 931 edge_port->write_in_progress = false;
932 932
933 if (!edge_port->write_urb) { 933 if (!edge_port->write_urb) {
934 dbg("%s - no memory", __FUNCTION__); 934 dbg("%s - no memory", __FUNCTION__);
@@ -966,7 +966,7 @@ static void block_until_chase_response(struct edgeport_port *edge_port)
966 lastCredits = edge_port->txCredits; 966 lastCredits = edge_port->txCredits;
967 967
968 // Did we get our Chase response 968 // Did we get our Chase response
969 if (edge_port->chaseResponsePending == FALSE) { 969 if (!edge_port->chaseResponsePending) {
970 dbg("%s - Got Chase Response", __FUNCTION__); 970 dbg("%s - Got Chase Response", __FUNCTION__);
971 971
972 // did we get all of our credit back? 972 // did we get all of our credit back?
@@ -985,7 +985,7 @@ static void block_until_chase_response(struct edgeport_port *edge_port)
985 // No activity.. count down. 985 // No activity.. count down.
986 loop--; 986 loop--;
987 if (loop == 0) { 987 if (loop == 0) {
988 edge_port->chaseResponsePending = FALSE; 988 edge_port->chaseResponsePending = false;
989 dbg("%s - Chase TIMEOUT", __FUNCTION__); 989 dbg("%s - Chase TIMEOUT", __FUNCTION__);
990 return; 990 return;
991 } 991 }
@@ -1068,13 +1068,13 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
1068 // block until tx is empty 1068 // block until tx is empty
1069 block_until_tx_empty(edge_port); 1069 block_until_tx_empty(edge_port);
1070 1070
1071 edge_port->closePending = TRUE; 1071 edge_port->closePending = true;
1072 1072
1073 if ((!edge_serial->is_epic) || 1073 if ((!edge_serial->is_epic) ||
1074 ((edge_serial->is_epic) && 1074 ((edge_serial->is_epic) &&
1075 (edge_serial->epic_descriptor.Supports.IOSPChase))) { 1075 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1076 /* flush and chase */ 1076 /* flush and chase */
1077 edge_port->chaseResponsePending = TRUE; 1077 edge_port->chaseResponsePending = true;
1078 1078
1079 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1079 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1080 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1080 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
@@ -1082,7 +1082,7 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
1082 // block until chase finished 1082 // block until chase finished
1083 block_until_chase_response(edge_port); 1083 block_until_chase_response(edge_port);
1084 } else { 1084 } else {
1085 edge_port->chaseResponsePending = FALSE; 1085 edge_port->chaseResponsePending = false;
1086 } 1086 }
1087 } 1087 }
1088 1088
@@ -1094,10 +1094,10 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
1094 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0); 1094 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0);
1095 } 1095 }
1096 1096
1097 //port->close = TRUE; 1097 //port->close = true;
1098 edge_port->closePending = FALSE; 1098 edge_port->closePending = false;
1099 edge_port->open = FALSE; 1099 edge_port->open = false;
1100 edge_port->openPending = FALSE; 1100 edge_port->openPending = false;
1101 1101
1102 usb_kill_urb(edge_port->write_urb); 1102 usb_kill_urb(edge_port->write_urb);
1103 1103
@@ -1247,7 +1247,7 @@ static void send_more_port_data(struct edgeport_serial *edge_serial, struct edge
1247 } 1247 }
1248 1248
1249 // lock this write 1249 // lock this write
1250 edge_port->write_in_progress = TRUE; 1250 edge_port->write_in_progress = true;
1251 1251
1252 // get a pointer to the write_urb 1252 // get a pointer to the write_urb
1253 urb = edge_port->write_urb; 1253 urb = edge_port->write_urb;
@@ -1261,7 +1261,7 @@ static void send_more_port_data(struct edgeport_serial *edge_serial, struct edge
1261 buffer = kmalloc (count+2, GFP_ATOMIC); 1261 buffer = kmalloc (count+2, GFP_ATOMIC);
1262 if (buffer == NULL) { 1262 if (buffer == NULL) {
1263 dev_err(&edge_port->port->dev, "%s - no more kernel memory...\n", __FUNCTION__); 1263 dev_err(&edge_port->port->dev, "%s - no more kernel memory...\n", __FUNCTION__);
1264 edge_port->write_in_progress = FALSE; 1264 edge_port->write_in_progress = false;
1265 goto exit_send; 1265 goto exit_send;
1266 } 1266 }
1267 buffer[0] = IOSP_BUILD_DATA_HDR1 (edge_port->port->number - edge_port->port->serial->minor, count); 1267 buffer[0] = IOSP_BUILD_DATA_HDR1 (edge_port->port->number - edge_port->port->serial->minor, count);
@@ -1301,7 +1301,7 @@ static void send_more_port_data(struct edgeport_serial *edge_serial, struct edge
1301 if (status) { 1301 if (status) {
1302 /* something went wrong */ 1302 /* something went wrong */
1303 dev_err(&edge_port->port->dev, "%s - usb_submit_urb(write bulk) failed, status = %d, data lost\n", __FUNCTION__, status); 1303 dev_err(&edge_port->port->dev, "%s - usb_submit_urb(write bulk) failed, status = %d, data lost\n", __FUNCTION__, status);
1304 edge_port->write_in_progress = FALSE; 1304 edge_port->write_in_progress = false;
1305 1305
1306 /* revert the credits as something bad happened. */ 1306 /* revert the credits as something bad happened. */
1307 edge_port->txCredits += count; 1307 edge_port->txCredits += count;
@@ -1332,7 +1332,7 @@ static int edge_write_room (struct usb_serial_port *port)
1332 1332
1333 if (edge_port == NULL) 1333 if (edge_port == NULL)
1334 return -ENODEV; 1334 return -ENODEV;
1335 if (edge_port->closePending == TRUE) 1335 if (edge_port->closePending)
1336 return -ENODEV; 1336 return -ENODEV;
1337 1337
1338 dbg("%s - port %d", __FUNCTION__, port->number); 1338 dbg("%s - port %d", __FUNCTION__, port->number);
@@ -1371,7 +1371,7 @@ static int edge_chars_in_buffer (struct usb_serial_port *port)
1371 1371
1372 if (edge_port == NULL) 1372 if (edge_port == NULL)
1373 return -ENODEV; 1373 return -ENODEV;
1374 if (edge_port->closePending == TRUE) 1374 if (edge_port->closePending)
1375 return -ENODEV; 1375 return -ENODEV;
1376 1376
1377 if (!edge_port->open) { 1377 if (!edge_port->open) {
@@ -1762,7 +1762,7 @@ static void edge_break (struct usb_serial_port *port, int break_state)
1762 ((edge_serial->is_epic) && 1762 ((edge_serial->is_epic) &&
1763 (edge_serial->epic_descriptor.Supports.IOSPChase))) { 1763 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1764 /* flush and chase */ 1764 /* flush and chase */
1765 edge_port->chaseResponsePending = TRUE; 1765 edge_port->chaseResponsePending = true;
1766 1766
1767 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1767 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1768 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1768 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
@@ -1770,7 +1770,7 @@ static void edge_break (struct usb_serial_port *port, int break_state)
1770 // block until chase finished 1770 // block until chase finished
1771 block_until_chase_response(edge_port); 1771 block_until_chase_response(edge_port);
1772 } else { 1772 } else {
1773 edge_port->chaseResponsePending = FALSE; 1773 edge_port->chaseResponsePending = false;
1774 } 1774 }
1775 } 1775 }
1776 1776
@@ -1952,13 +1952,13 @@ static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2
1952 // Also, we currently clear flag and close the port regardless of content of above's Byte3. 1952 // Also, we currently clear flag and close the port regardless of content of above's Byte3.
1953 // We could choose to do something else when Byte3 says Timeout on Chase from Edgeport, 1953 // We could choose to do something else when Byte3 says Timeout on Chase from Edgeport,
1954 // like wait longer in block_until_chase_response, but for now we don't. 1954 // like wait longer in block_until_chase_response, but for now we don't.
1955 edge_port->chaseResponsePending = FALSE; 1955 edge_port->chaseResponsePending = false;
1956 wake_up (&edge_port->wait_chase); 1956 wake_up (&edge_port->wait_chase);
1957 return; 1957 return;
1958 1958
1959 case IOSP_EXT_STATUS_RX_CHECK_RSP: 1959 case IOSP_EXT_STATUS_RX_CHECK_RSP:
1960 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __FUNCTION__, edge_serial->rxPort, byte3 ); 1960 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __FUNCTION__, edge_serial->rxPort, byte3 );
1961 //Port->RxCheckRsp = TRUE; 1961 //Port->RxCheckRsp = true;
1962 return; 1962 return;
1963 } 1963 }
1964 } 1964 }
@@ -1974,8 +1974,8 @@ static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2
1974 change_port_settings (edge_port, edge_port->port->tty->termios); 1974 change_port_settings (edge_port, edge_port->port->tty->termios);
1975 1975
1976 /* we have completed the open */ 1976 /* we have completed the open */
1977 edge_port->openPending = FALSE; 1977 edge_port->openPending = false;
1978 edge_port->open = TRUE; 1978 edge_port->open = true;
1979 wake_up(&edge_port->wait_open); 1979 wake_up(&edge_port->wait_open);
1980 return; 1980 return;
1981 } 1981 }
@@ -1983,7 +1983,7 @@ static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2
1983 // If port is closed, silently discard all rcvd status. We can 1983 // If port is closed, silently discard all rcvd status. We can
1984 // have cases where buffered status is received AFTER the close 1984 // have cases where buffered status is received AFTER the close
1985 // port command is sent to the Edgeport. 1985 // port command is sent to the Edgeport.
1986 if ((!edge_port->open ) || (edge_port->closePending)) { 1986 if (!edge_port->open || edge_port->closePending) {
1987 return; 1987 return;
1988 } 1988 }
1989 1989
@@ -1991,14 +1991,14 @@ static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2
1991 // Not currently sent by Edgeport 1991 // Not currently sent by Edgeport
1992 case IOSP_STATUS_LSR: 1992 case IOSP_STATUS_LSR:
1993 dbg("%s - Port %u LSR Status = %02x", __FUNCTION__, edge_serial->rxPort, byte2); 1993 dbg("%s - Port %u LSR Status = %02x", __FUNCTION__, edge_serial->rxPort, byte2);
1994 handle_new_lsr (edge_port, FALSE, byte2, 0); 1994 handle_new_lsr(edge_port, false, byte2, 0);
1995 break; 1995 break;
1996 1996
1997 case IOSP_STATUS_LSR_DATA: 1997 case IOSP_STATUS_LSR_DATA:
1998 dbg("%s - Port %u LSR Status = %02x, Data = %02x", __FUNCTION__, edge_serial->rxPort, byte2, byte3); 1998 dbg("%s - Port %u LSR Status = %02x, Data = %02x", __FUNCTION__, edge_serial->rxPort, byte2, byte3);
1999 // byte2 is LSR Register 1999 // byte2 is LSR Register
2000 // byte3 is broken data byte 2000 // byte3 is broken data byte
2001 handle_new_lsr (edge_port, TRUE, byte2, byte3); 2001 handle_new_lsr(edge_port, true, byte2, byte3);
2002 break; 2002 break;
2003 // 2003 //
2004 // case IOSP_EXT_4_STATUS: 2004 // case IOSP_EXT_4_STATUS:
@@ -2317,14 +2317,14 @@ static int write_cmd_usb (struct edgeport_port *edge_port, unsigned char *buffer
2317 if (!urb) 2317 if (!urb)
2318 return -ENOMEM; 2318 return -ENOMEM;
2319 2319
2320 CmdUrbs++; 2320 atomic_inc(&CmdUrbs);
2321 dbg("%s - ALLOCATE URB %p (outstanding %d)", __FUNCTION__, urb, CmdUrbs); 2321 dbg("%s - ALLOCATE URB %p (outstanding %d)", __FUNCTION__, urb, atomic_read(&CmdUrbs));
2322 2322
2323 usb_fill_bulk_urb (urb, edge_serial->serial->dev, 2323 usb_fill_bulk_urb (urb, edge_serial->serial->dev,
2324 usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint), 2324 usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint),
2325 buffer, length, edge_bulk_out_cmd_callback, edge_port); 2325 buffer, length, edge_bulk_out_cmd_callback, edge_port);
2326 2326
2327 edge_port->commandPending = TRUE; 2327 edge_port->commandPending = true;
2328 status = usb_submit_urb(urb, GFP_ATOMIC); 2328 status = usb_submit_urb(urb, GFP_ATOMIC);
2329 2329
2330 if (status) { 2330 if (status) {
@@ -2332,16 +2332,16 @@ static int write_cmd_usb (struct edgeport_port *edge_port, unsigned char *buffer
2332 dev_err(&edge_port->port->dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __FUNCTION__, status); 2332 dev_err(&edge_port->port->dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __FUNCTION__, status);
2333 usb_kill_urb(urb); 2333 usb_kill_urb(urb);
2334 usb_free_urb(urb); 2334 usb_free_urb(urb);
2335 CmdUrbs--; 2335 atomic_dec(&CmdUrbs);
2336 return status; 2336 return status;
2337 } 2337 }
2338 2338
2339 // wait for command to finish 2339 // wait for command to finish
2340 timeout = COMMAND_TIMEOUT; 2340 timeout = COMMAND_TIMEOUT;
2341#if 0 2341#if 0
2342 wait_event (&edge_port->wait_command, (edge_port->commandPending == FALSE)); 2342 wait_event (&edge_port->wait_command, !edge_port->commandPending);
2343 2343
2344 if (edge_port->commandPending == TRUE) { 2344 if (edge_port->commandPending) {
2345 /* command timed out */ 2345 /* command timed out */
2346 dbg("%s - command timed out", __FUNCTION__); 2346 dbg("%s - command timed out", __FUNCTION__);
2347 status = -EINVAL; 2347 status = -EINVAL;
@@ -2524,8 +2524,8 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi
2524 2524
2525 dbg("%s - port %d", __FUNCTION__, edge_port->port->number); 2525 dbg("%s - port %d", __FUNCTION__, edge_port->port->number);
2526 2526
2527 if ((!edge_port->open) && 2527 if (!edge_port->open &&
2528 (!edge_port->openPending)) { 2528 !edge_port->openPending) {
2529 dbg("%s - port not opened", __FUNCTION__); 2529 dbg("%s - port not opened", __FUNCTION__);
2530 return; 2530 return;
2531 } 2531 }
@@ -2836,9 +2836,9 @@ static int edge_startup (struct usb_serial *serial)
2836 struct usb_device *dev; 2836 struct usb_device *dev;
2837 int i, j; 2837 int i, j;
2838 int response; 2838 int response;
2839 int interrupt_in_found; 2839 bool interrupt_in_found;
2840 int bulk_in_found; 2840 bool bulk_in_found;
2841 int bulk_out_found; 2841 bool bulk_out_found;
2842 static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0, 2842 static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
2843 EDGE_COMPATIBILITY_MASK1, 2843 EDGE_COMPATIBILITY_MASK1,
2844 EDGE_COMPATIBILITY_MASK2 }; 2844 EDGE_COMPATIBILITY_MASK2 };
@@ -2936,14 +2936,14 @@ static int edge_startup (struct usb_serial *serial)
2936 if (edge_serial->is_epic) { 2936 if (edge_serial->is_epic) {
2937 /* EPIC thing, set up our interrupt polling now and our read urb, so 2937 /* EPIC thing, set up our interrupt polling now and our read urb, so
2938 * that the device knows it really is connected. */ 2938 * that the device knows it really is connected. */
2939 interrupt_in_found = bulk_in_found = bulk_out_found = FALSE; 2939 interrupt_in_found = bulk_in_found = bulk_out_found = false;
2940 for (i = 0; i < serial->interface->altsetting[0].desc.bNumEndpoints; ++i) { 2940 for (i = 0; i < serial->interface->altsetting[0].desc.bNumEndpoints; ++i) {
2941 struct usb_endpoint_descriptor *endpoint; 2941 struct usb_endpoint_descriptor *endpoint;
2942 int buffer_size; 2942 int buffer_size;
2943 2943
2944 endpoint = &serial->interface->altsetting[0].endpoint[i].desc; 2944 endpoint = &serial->interface->altsetting[0].endpoint[i].desc;
2945 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 2945 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
2946 if ((!interrupt_in_found) && 2946 if (!interrupt_in_found &&
2947 (usb_endpoint_is_int_in(endpoint))) { 2947 (usb_endpoint_is_int_in(endpoint))) {
2948 /* we found a interrupt in endpoint */ 2948 /* we found a interrupt in endpoint */
2949 dbg("found interrupt in"); 2949 dbg("found interrupt in");
@@ -2972,10 +2972,10 @@ static int edge_startup (struct usb_serial *serial)
2972 edge_serial, 2972 edge_serial,
2973 endpoint->bInterval); 2973 endpoint->bInterval);
2974 2974
2975 interrupt_in_found = TRUE; 2975 interrupt_in_found = true;
2976 } 2976 }
2977 2977
2978 if ((!bulk_in_found) && 2978 if (!bulk_in_found &&
2979 (usb_endpoint_is_bulk_in(endpoint))) { 2979 (usb_endpoint_is_bulk_in(endpoint))) {
2980 /* we found a bulk in endpoint */ 2980 /* we found a bulk in endpoint */
2981 dbg("found bulk in"); 2981 dbg("found bulk in");
@@ -3001,19 +3001,19 @@ static int edge_startup (struct usb_serial *serial)
3001 endpoint->wMaxPacketSize, 3001 endpoint->wMaxPacketSize,
3002 edge_bulk_in_callback, 3002 edge_bulk_in_callback,
3003 edge_serial); 3003 edge_serial);
3004 bulk_in_found = TRUE; 3004 bulk_in_found = true;
3005 } 3005 }
3006 3006
3007 if ((!bulk_out_found) && 3007 if (!bulk_out_found &&
3008 (usb_endpoint_is_bulk_out(endpoint))) { 3008 (usb_endpoint_is_bulk_out(endpoint))) {
3009 /* we found a bulk out endpoint */ 3009 /* we found a bulk out endpoint */
3010 dbg("found bulk out"); 3010 dbg("found bulk out");
3011 edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress; 3011 edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress;
3012 bulk_out_found = TRUE; 3012 bulk_out_found = true;
3013 } 3013 }
3014 } 3014 }
3015 3015
3016 if ((!interrupt_in_found) || (!bulk_in_found) || (!bulk_out_found)) { 3016 if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
3017 err ("Error - the proper endpoints were not found!"); 3017 err ("Error - the proper endpoints were not found!");
3018 return -ENODEV; 3018 return -ENODEV;
3019 } 3019 }
@@ -3083,6 +3083,7 @@ static int __init edgeport_init(void)
3083 retval = usb_register(&io_driver); 3083 retval = usb_register(&io_driver);
3084 if (retval) 3084 if (retval)
3085 goto failed_usb_register; 3085 goto failed_usb_register;
3086 atomic_set(&CmdUrbs, 0);
3086 info(DRIVER_DESC " " DRIVER_VERSION); 3087 info(DRIVER_DESC " " DRIVER_VERSION);
3087 return 0; 3088 return 0;
3088 3089
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index 29a913a6daca..cb201c1f67f9 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -19,12 +19,6 @@
19#define MAX_RS232_PORTS 8 /* Max # of RS-232 ports per device */ 19#define MAX_RS232_PORTS 8 /* Max # of RS-232 ports per device */
20 20
21/* typedefs that the insideout headers need */ 21/* typedefs that the insideout headers need */
22#ifndef TRUE
23 #define TRUE (1)
24#endif
25#ifndef FALSE
26 #define FALSE (0)
27#endif
28#ifndef LOW8 22#ifndef LOW8
29 #define LOW8(a) ((unsigned char)(a & 0xff)) 23 #define LOW8(a) ((unsigned char)(a & 0xff))
30#endif 24#endif
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d16e2e1764ad..4df0ec74e0b1 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -255,6 +255,7 @@ static struct usb_device_id ipaq_id_table [] = {
255 { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ 255 { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */
256 { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ 256 { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */
257 { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ 257 { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */
258 { USB_DEVICE(0x04DD, 0x9151) }, /* SHARP S01SH USB Modem */
258 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ 259 { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */
259 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ 260 { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */
260 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ 261 { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index b2097c45a235..7b085f334ceb 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -238,7 +238,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
238 if (rc < 0) 238 if (rc < 0)
239 err("Reading line status failed (error = %d)", rc); 239 err("Reading line status failed (error = %d)", rc);
240 else { 240 else {
241 status = status_buf[0] + (status_buf[1]<<8); 241 status = le16_to_cpu(*(u16 *)status_buf);
242 242
243 info("%s - read status %x %x", __FUNCTION__, 243 info("%s - read status %x %x", __FUNCTION__,
244 status_buf[0], status_buf[1]); 244 status_buf[0], status_buf[1]);
@@ -257,7 +257,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
257static int klsi_105_startup (struct usb_serial *serial) 257static int klsi_105_startup (struct usb_serial *serial)
258{ 258{
259 struct klsi_105_private *priv; 259 struct klsi_105_private *priv;
260 int i; 260 int i, j;
261 261
262 /* check if we support the product id (see keyspan.c) 262 /* check if we support the product id (see keyspan.c)
263 * FIXME 263 * FIXME
@@ -265,12 +265,12 @@ static int klsi_105_startup (struct usb_serial *serial)
265 265
266 /* allocate the private data structure */ 266 /* allocate the private data structure */
267 for (i=0; i<serial->num_ports; i++) { 267 for (i=0; i<serial->num_ports; i++) {
268 int j;
269 priv = kmalloc(sizeof(struct klsi_105_private), 268 priv = kmalloc(sizeof(struct klsi_105_private),
270 GFP_KERNEL); 269 GFP_KERNEL);
271 if (!priv) { 270 if (!priv) {
272 dbg("%skmalloc for klsi_105_private failed.", __FUNCTION__); 271 dbg("%skmalloc for klsi_105_private failed.", __FUNCTION__);
273 return -ENOMEM; 272 i--;
273 goto err_cleanup;
274 } 274 }
275 /* set initial values for control structures */ 275 /* set initial values for control structures */
276 priv->cfg.pktlen = 5; 276 priv->cfg.pktlen = 5;
@@ -292,15 +292,14 @@ static int klsi_105_startup (struct usb_serial *serial)
292 priv->write_urb_pool[j] = urb; 292 priv->write_urb_pool[j] = urb;
293 if (urb == NULL) { 293 if (urb == NULL) {
294 err("No more urbs???"); 294 err("No more urbs???");
295 continue; 295 goto err_cleanup;
296 } 296 }
297 297
298 urb->transfer_buffer = NULL;
299 urb->transfer_buffer = kmalloc (URB_TRANSFER_BUFFER_SIZE, 298 urb->transfer_buffer = kmalloc (URB_TRANSFER_BUFFER_SIZE,
300 GFP_KERNEL); 299 GFP_KERNEL);
301 if (!urb->transfer_buffer) { 300 if (!urb->transfer_buffer) {
302 err("%s - out of memory for urb buffers.", __FUNCTION__); 301 err("%s - out of memory for urb buffers.", __FUNCTION__);
303 continue; 302 goto err_cleanup;
304 } 303 }
305 } 304 }
306 305
@@ -308,7 +307,20 @@ static int klsi_105_startup (struct usb_serial *serial)
308 init_waitqueue_head(&serial->port[i]->write_wait); 307 init_waitqueue_head(&serial->port[i]->write_wait);
309 } 308 }
310 309
311 return (0); 310 return 0;
311
312err_cleanup:
313 for (; i >= 0; i--) {
314 priv = usb_get_serial_port_data(serial->port[i]);
315 for (j=0; j < NUM_URBS; j++) {
316 if (priv->write_urb_pool[j]) {
317 kfree(priv->write_urb_pool[j]->transfer_buffer);
318 usb_free_urb(priv->write_urb_pool[j]);
319 }
320 }
321 usb_set_serial_port_data(serial->port[i], NULL);
322 }
323 return -ENOMEM;
312} /* klsi_105_startup */ 324} /* klsi_105_startup */
313 325
314 326
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4cd839b1407f..3db1adc25f84 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -438,17 +438,21 @@ static int mct_u232_open (struct usb_serial_port *port, struct file *filp)
438 if (retval) { 438 if (retval) {
439 err("usb_submit_urb(read bulk) failed pipe 0x%x err %d", 439 err("usb_submit_urb(read bulk) failed pipe 0x%x err %d",
440 port->read_urb->pipe, retval); 440 port->read_urb->pipe, retval);
441 goto exit; 441 goto error;
442 } 442 }
443 443
444 port->interrupt_in_urb->dev = port->serial->dev; 444 port->interrupt_in_urb->dev = port->serial->dev;
445 retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 445 retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
446 if (retval) 446 if (retval) {
447 usb_kill_urb(port->read_urb);
447 err(" usb_submit_urb(read int) failed pipe 0x%x err %d", 448 err(" usb_submit_urb(read int) failed pipe 0x%x err %d",
448 port->interrupt_in_urb->pipe, retval); 449 port->interrupt_in_urb->pipe, retval);
449 450 goto error;
450exit: 451 }
451 return 0; 452 return 0;
453
454error:
455 return retval;
452} /* mct_u232_open */ 456} /* mct_u232_open */
453 457
454 458
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 19bf403f9db2..b563e2ad8728 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -103,11 +103,9 @@ static void mos7720_interrupt_callback(struct urb *urb)
103{ 103{
104 int result; 104 int result;
105 int length; 105 int length;
106 __u32 *data; 106 __u8 *data;
107 unsigned int status;
108 __u8 sp1; 107 __u8 sp1;
109 __u8 sp2; 108 __u8 sp2;
110 __u8 st;
111 109
112 dbg("%s"," : Entering\n"); 110 dbg("%s"," : Entering\n");
113 111
@@ -141,18 +139,19 @@ static void mos7720_interrupt_callback(struct urb *urb)
141 * Byte 2 IIR Port 2 (port.number is 1) 139 * Byte 2 IIR Port 2 (port.number is 1)
142 * Byte 3 -------------- 140 * Byte 3 --------------
143 * Byte 4 FIFO status for both */ 141 * Byte 4 FIFO status for both */
144 if (length && length > 4) { 142
143 /* the above description is inverted
144 * oneukum 2007-03-14 */
145
146 if (unlikely(length != 4)) {
145 dbg("Wrong data !!!"); 147 dbg("Wrong data !!!");
146 return; 148 return;
147 } 149 }
148 150
149 status = *data; 151 sp1 = data[3];
150 152 sp2 = data[2];
151 sp1 = (status & 0xff000000)>>24;
152 sp2 = (status & 0x00ff0000)>>16;
153 st = status & 0x000000ff;
154 153
155 if ((sp1 & 0x01) || (sp2 & 0x01)) { 154 if ((sp1 | sp2) & 0x01) {
156 /* No Interrupt Pending in both the ports */ 155 /* No Interrupt Pending in both the ports */
157 dbg("No Interrupt !!!"); 156 dbg("No Interrupt !!!");
158 } else { 157 } else {
@@ -333,6 +332,7 @@ static int mos7720_open(struct usb_serial_port *port, struct file * filp)
333 int response; 332 int response;
334 int port_number; 333 int port_number;
335 char data; 334 char data;
335 int allocated_urbs = 0;
336 int j; 336 int j;
337 337
338 serial = port->serial; 338 serial = port->serial;
@@ -353,7 +353,7 @@ static int mos7720_open(struct usb_serial_port *port, struct file * filp)
353 353
354 /* Initialising the write urb pool */ 354 /* Initialising the write urb pool */
355 for (j = 0; j < NUM_URBS; ++j) { 355 for (j = 0; j < NUM_URBS; ++j) {
356 urb = usb_alloc_urb(0,GFP_ATOMIC); 356 urb = usb_alloc_urb(0,GFP_KERNEL);
357 mos7720_port->write_urb_pool[j] = urb; 357 mos7720_port->write_urb_pool[j] = urb;
358 358
359 if (urb == NULL) { 359 if (urb == NULL) {
@@ -365,10 +365,16 @@ static int mos7720_open(struct usb_serial_port *port, struct file * filp)
365 GFP_KERNEL); 365 GFP_KERNEL);
366 if (!urb->transfer_buffer) { 366 if (!urb->transfer_buffer) {
367 err("%s-out of memory for urb buffers.", __FUNCTION__); 367 err("%s-out of memory for urb buffers.", __FUNCTION__);
368 usb_free_urb(mos7720_port->write_urb_pool[j]);
369 mos7720_port->write_urb_pool[j] = NULL;
368 continue; 370 continue;
369 } 371 }
372 allocated_urbs++;
370 } 373 }
371 374
375 if (!allocated_urbs)
376 return -ENOMEM;
377
372 /* Initialize MCS7720 -- Write Init values to corresponding Registers 378 /* Initialize MCS7720 -- Write Init values to corresponding Registers
373 * 379 *
374 * Register Index 380 * Register Index
@@ -526,7 +532,7 @@ static int mos7720_chars_in_buffer(struct usb_serial_port *port)
526 } 532 }
527 533
528 for (i = 0; i < NUM_URBS; ++i) { 534 for (i = 0; i < NUM_URBS; ++i) {
529 if (mos7720_port->write_urb_pool[i]->status == -EINPROGRESS) 535 if (mos7720_port->write_urb_pool[i] && mos7720_port->write_urb_pool[i]->status == -EINPROGRESS)
530 chars += URB_TRANSFER_BUFFER_SIZE; 536 chars += URB_TRANSFER_BUFFER_SIZE;
531 } 537 }
532 dbg("%s - returns %d", __FUNCTION__, chars); 538 dbg("%s - returns %d", __FUNCTION__, chars);
@@ -629,7 +635,7 @@ static int mos7720_write_room(struct usb_serial_port *port)
629 } 635 }
630 636
631 for (i = 0; i < NUM_URBS; ++i) { 637 for (i = 0; i < NUM_URBS; ++i) {
632 if (mos7720_port->write_urb_pool[i]->status != -EINPROGRESS) 638 if (mos7720_port->write_urb_pool[i] && mos7720_port->write_urb_pool[i]->status != -EINPROGRESS)
633 room += URB_TRANSFER_BUFFER_SIZE; 639 room += URB_TRANSFER_BUFFER_SIZE;
634 } 640 }
635 641
@@ -664,7 +670,7 @@ static int mos7720_write(struct usb_serial_port *port,
664 urb = NULL; 670 urb = NULL;
665 671
666 for (i = 0; i < NUM_URBS; ++i) { 672 for (i = 0; i < NUM_URBS; ++i) {
667 if (mos7720_port->write_urb_pool[i]->status != -EINPROGRESS) { 673 if (mos7720_port->write_urb_pool[i] && mos7720_port->write_urb_pool[i]->status != -EINPROGRESS) {
668 urb = mos7720_port->write_urb_pool[i]; 674 urb = mos7720_port->write_urb_pool[i];
669 dbg("URB:%d",i); 675 dbg("URB:%d",i);
670 break; 676 break;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c6cca859af45..2366e7b63ece 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -176,9 +176,12 @@ struct moschip_port {
176 int port_num; /*Actual port number in the device(1,2,etc) */ 176 int port_num; /*Actual port number in the device(1,2,etc) */
177 struct urb *write_urb; /* write URB for this port */ 177 struct urb *write_urb; /* write URB for this port */
178 struct urb *read_urb; /* read URB for this port */ 178 struct urb *read_urb; /* read URB for this port */
179 struct urb *int_urb;
179 __u8 shadowLCR; /* last LCR value received */ 180 __u8 shadowLCR; /* last LCR value received */
180 __u8 shadowMCR; /* last MCR value received */ 181 __u8 shadowMCR; /* last MCR value received */
181 char open; 182 char open;
183 char open_ports;
184 char zombie;
182 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ 185 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
183 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ 186 wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
184 int delta_msr_cond; 187 int delta_msr_cond;
@@ -191,17 +194,17 @@ struct moschip_port {
191 __u8 DcrRegOffset; 194 __u8 DcrRegOffset;
192 //for processing control URBS in interrupt context 195 //for processing control URBS in interrupt context
193 struct urb *control_urb; 196 struct urb *control_urb;
197 struct usb_ctrlrequest *dr;
194 char *ctrl_buf; 198 char *ctrl_buf;
195 int MsrLsr; 199 int MsrLsr;
196 200
201 spinlock_t pool_lock;
197 struct urb *write_urb_pool[NUM_URBS]; 202 struct urb *write_urb_pool[NUM_URBS];
203 char busy[NUM_URBS];
198}; 204};
199 205
200 206
201static int debug; 207static int debug;
202static int mos7840_num_ports; //this says the number of ports in the device
203static int mos7840_num_open_ports;
204
205 208
206/* 209/*
207 * mos7840_set_reg_sync 210 * mos7840_set_reg_sync
@@ -254,7 +257,7 @@ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
254 struct usb_device *dev = port->serial->dev; 257 struct usb_device *dev = port->serial->dev;
255 val = val & 0x00ff; 258 val = val & 0x00ff;
256 // For the UART control registers, the application number need to be Or'ed 259 // For the UART control registers, the application number need to be Or'ed
257 if (mos7840_num_ports == 4) { 260 if (port->serial->num_ports == 4) {
258 val |= 261 val |=
259 (((__u16) port->number - (__u16) (port->serial->minor)) + 262 (((__u16) port->number - (__u16) (port->serial->minor)) +
260 1) << 8; 263 1) << 8;
@@ -294,7 +297,7 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
294 297
295 //dbg("application number is %4x \n",(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); 298 //dbg("application number is %4x \n",(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8);
296 /*Wval is same as application number */ 299 /*Wval is same as application number */
297 if (mos7840_num_ports == 4) { 300 if (port->serial->num_ports == 4) {
298 Wval = 301 Wval =
299 (((__u16) port->number - (__u16) (port->serial->minor)) + 302 (((__u16) port->number - (__u16) (port->serial->minor)) +
300 1) << 8; 303 1) << 8;
@@ -352,7 +355,7 @@ static inline struct moschip_port *mos7840_get_port_private(struct
352 return (struct moschip_port *)usb_get_serial_port_data(port); 355 return (struct moschip_port *)usb_get_serial_port_data(port);
353} 356}
354 357
355static int mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) 358static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
356{ 359{
357 struct moschip_port *mos7840_port; 360 struct moschip_port *mos7840_port;
358 struct async_icount *icount; 361 struct async_icount *icount;
@@ -366,22 +369,24 @@ static int mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
366 /* update input line counters */ 369 /* update input line counters */
367 if (new_msr & MOS_MSR_DELTA_CTS) { 370 if (new_msr & MOS_MSR_DELTA_CTS) {
368 icount->cts++; 371 icount->cts++;
372 smp_wmb();
369 } 373 }
370 if (new_msr & MOS_MSR_DELTA_DSR) { 374 if (new_msr & MOS_MSR_DELTA_DSR) {
371 icount->dsr++; 375 icount->dsr++;
376 smp_wmb();
372 } 377 }
373 if (new_msr & MOS_MSR_DELTA_CD) { 378 if (new_msr & MOS_MSR_DELTA_CD) {
374 icount->dcd++; 379 icount->dcd++;
380 smp_wmb();
375 } 381 }
376 if (new_msr & MOS_MSR_DELTA_RI) { 382 if (new_msr & MOS_MSR_DELTA_RI) {
377 icount->rng++; 383 icount->rng++;
384 smp_wmb();
378 } 385 }
379 } 386 }
380
381 return 0;
382} 387}
383 388
384static int mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr) 389static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
385{ 390{
386 struct async_icount *icount; 391 struct async_icount *icount;
387 392
@@ -400,18 +405,20 @@ static int mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
400 icount = &port->icount; 405 icount = &port->icount;
401 if (new_lsr & SERIAL_LSR_BI) { 406 if (new_lsr & SERIAL_LSR_BI) {
402 icount->brk++; 407 icount->brk++;
408 smp_wmb();
403 } 409 }
404 if (new_lsr & SERIAL_LSR_OE) { 410 if (new_lsr & SERIAL_LSR_OE) {
405 icount->overrun++; 411 icount->overrun++;
412 smp_wmb();
406 } 413 }
407 if (new_lsr & SERIAL_LSR_PE) { 414 if (new_lsr & SERIAL_LSR_PE) {
408 icount->parity++; 415 icount->parity++;
416 smp_wmb();
409 } 417 }
410 if (new_lsr & SERIAL_LSR_FE) { 418 if (new_lsr & SERIAL_LSR_FE) {
411 icount->frame++; 419 icount->frame++;
420 smp_wmb();
412 } 421 }
413
414 return 0;
415} 422}
416 423
417/************************************************************************/ 424/************************************************************************/
@@ -426,12 +433,15 @@ static void mos7840_control_callback(struct urb *urb)
426 unsigned char *data; 433 unsigned char *data;
427 struct moschip_port *mos7840_port; 434 struct moschip_port *mos7840_port;
428 __u8 regval = 0x0; 435 __u8 regval = 0x0;
436 int result = 0;
429 437
430 if (!urb) { 438 if (!urb) {
431 dbg("%s", "Invalid Pointer !!!!:\n"); 439 dbg("%s", "Invalid Pointer !!!!:\n");
432 return; 440 return;
433 } 441 }
434 442
443 mos7840_port = (struct moschip_port *)urb->context;
444
435 switch (urb->status) { 445 switch (urb->status) {
436 case 0: 446 case 0:
437 /* success */ 447 /* success */
@@ -449,8 +459,6 @@ static void mos7840_control_callback(struct urb *urb)
449 goto exit; 459 goto exit;
450 } 460 }
451 461
452 mos7840_port = (struct moschip_port *)urb->context;
453
454 dbg("%s urb buffer size is %d\n", __FUNCTION__, urb->actual_length); 462 dbg("%s urb buffer size is %d\n", __FUNCTION__, urb->actual_length);
455 dbg("%s mos7840_port->MsrLsr is %d port %d\n", __FUNCTION__, 463 dbg("%s mos7840_port->MsrLsr is %d port %d\n", __FUNCTION__,
456 mos7840_port->MsrLsr, mos7840_port->port_num); 464 mos7840_port->MsrLsr, mos7840_port->port_num);
@@ -462,21 +470,26 @@ static void mos7840_control_callback(struct urb *urb)
462 else if (mos7840_port->MsrLsr == 1) 470 else if (mos7840_port->MsrLsr == 1)
463 mos7840_handle_new_lsr(mos7840_port, regval); 471 mos7840_handle_new_lsr(mos7840_port, regval);
464 472
465 exit: 473exit:
466 return; 474 spin_lock(&mos7840_port->pool_lock);
475 if (!mos7840_port->zombie)
476 result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC);
477 spin_unlock(&mos7840_port->pool_lock);
478 if (result) {
479 dev_err(&urb->dev->dev,
480 "%s - Error %d submitting interrupt urb\n",
481 __FUNCTION__, result);
482 }
467} 483}
468 484
469static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, 485static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
470 __u16 * val) 486 __u16 * val)
471{ 487{
472 struct usb_device *dev = mcs->port->serial->dev; 488 struct usb_device *dev = mcs->port->serial->dev;
473 struct usb_ctrlrequest *dr = NULL; 489 struct usb_ctrlrequest *dr = mcs->dr;
474 unsigned char *buffer = NULL; 490 unsigned char *buffer = mcs->ctrl_buf;
475 int ret = 0; 491 int ret;
476 buffer = (__u8 *) mcs->ctrl_buf;
477 492
478// dr=(struct usb_ctrlrequest *)(buffer);
479 dr = (void *)(buffer + 2);
480 dr->bRequestType = MCS_RD_RTYPE; 493 dr->bRequestType = MCS_RD_RTYPE;
481 dr->bRequest = MCS_RDREQ; 494 dr->bRequest = MCS_RDREQ;
482 dr->wValue = cpu_to_le16(Wval); //0; 495 dr->wValue = cpu_to_le16(Wval); //0;
@@ -506,8 +519,8 @@ static void mos7840_interrupt_callback(struct urb *urb)
506 __u16 Data; 519 __u16 Data;
507 unsigned char *data; 520 unsigned char *data;
508 __u8 sp[5], st; 521 __u8 sp[5], st;
509 int i; 522 int i, rv = 0;
510 __u16 wval; 523 __u16 wval, wreg = 0;
511 524
512 dbg("%s", " : Entering\n"); 525 dbg("%s", " : Entering\n");
513 if (!urb) { 526 if (!urb) {
@@ -569,31 +582,34 @@ static void mos7840_interrupt_callback(struct urb *urb)
569 dbg("Serial Port %d: Receiver status error or ", i); 582 dbg("Serial Port %d: Receiver status error or ", i);
570 dbg("address bit detected in 9-bit mode\n"); 583 dbg("address bit detected in 9-bit mode\n");
571 mos7840_port->MsrLsr = 1; 584 mos7840_port->MsrLsr = 1;
572 mos7840_get_reg(mos7840_port, wval, 585 wreg = LINE_STATUS_REGISTER;
573 LINE_STATUS_REGISTER,
574 &Data);
575 break; 586 break;
576 case SERIAL_IIR_MS: 587 case SERIAL_IIR_MS:
577 dbg("Serial Port %d: Modem status change\n", i); 588 dbg("Serial Port %d: Modem status change\n", i);
578 mos7840_port->MsrLsr = 0; 589 mos7840_port->MsrLsr = 0;
579 mos7840_get_reg(mos7840_port, wval, 590 wreg = MODEM_STATUS_REGISTER;
580 MODEM_STATUS_REGISTER,
581 &Data);
582 break; 591 break;
583 } 592 }
593 spin_lock(&mos7840_port->pool_lock);
594 if (!mos7840_port->zombie) {
595 rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
596 } else {
597 spin_unlock(&mos7840_port->pool_lock);
598 return;
599 }
600 spin_unlock(&mos7840_port->pool_lock);
584 } 601 }
585 } 602 }
586 } 603 }
587 exit: 604 if (!(rv < 0)) /* the completion handler for the control urb will resubmit */
605 return;
606exit:
588 result = usb_submit_urb(urb, GFP_ATOMIC); 607 result = usb_submit_urb(urb, GFP_ATOMIC);
589 if (result) { 608 if (result) {
590 dev_err(&urb->dev->dev, 609 dev_err(&urb->dev->dev,
591 "%s - Error %d submitting interrupt urb\n", 610 "%s - Error %d submitting interrupt urb\n",
592 __FUNCTION__, result); 611 __FUNCTION__, result);
593 } 612 }
594
595 return;
596
597} 613}
598 614
599static int mos7840_port_paranoia_check(struct usb_serial_port *port, 615static int mos7840_port_paranoia_check(struct usb_serial_port *port,
@@ -634,7 +650,8 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
634 if (!port || 650 if (!port ||
635 mos7840_port_paranoia_check(port, function) || 651 mos7840_port_paranoia_check(port, function) ||
636 mos7840_serial_paranoia_check(port->serial, function)) { 652 mos7840_serial_paranoia_check(port->serial, function)) {
637 /* then say that we don't have a valid usb_serial thing, which will * end up genrating -ENODEV return values */ 653 /* then say that we don't have a valid usb_serial thing, which will
654 * end up genrating -ENODEV return values */
638 return NULL; 655 return NULL;
639 } 656 }
640 657
@@ -699,6 +716,7 @@ static void mos7840_bulk_in_callback(struct urb *urb)
699 tty_flip_buffer_push(tty); 716 tty_flip_buffer_push(tty);
700 } 717 }
701 mos7840_port->icount.rx += urb->actual_length; 718 mos7840_port->icount.rx += urb->actual_length;
719 smp_wmb();
702 dbg("mos7840_port->icount.rx is %d:\n", 720 dbg("mos7840_port->icount.rx is %d:\n",
703 mos7840_port->icount.rx); 721 mos7840_port->icount.rx);
704 } 722 }
@@ -708,15 +726,14 @@ static void mos7840_bulk_in_callback(struct urb *urb)
708 return; 726 return;
709 } 727 }
710 728
711 if (mos7840_port->read_urb->status != -EINPROGRESS) {
712 mos7840_port->read_urb->dev = serial->dev;
713 729
714 status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); 730 mos7840_port->read_urb->dev = serial->dev;
715 731
716 if (status) { 732 status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
717 dbg(" usb_submit_urb(read bulk) failed, status = %d", 733
718 status); 734 if (status) {
719 } 735 dbg(" usb_submit_urb(read bulk) failed, status = %d",
736 status);
720 } 737 }
721} 738}
722 739
@@ -730,17 +747,28 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
730{ 747{
731 struct moschip_port *mos7840_port; 748 struct moschip_port *mos7840_port;
732 struct tty_struct *tty; 749 struct tty_struct *tty;
750 int i;
751
733 if (!urb) { 752 if (!urb) {
734 dbg("%s", "Invalid Pointer !!!!:\n"); 753 dbg("%s", "Invalid Pointer !!!!:\n");
735 return; 754 return;
736 } 755 }
737 756
757 mos7840_port = (struct moschip_port *)urb->context;
758 spin_lock(&mos7840_port->pool_lock);
759 for (i = 0; i < NUM_URBS; i++) {
760 if (urb == mos7840_port->write_urb_pool[i]) {
761 mos7840_port->busy[i] = 0;
762 break;
763 }
764 }
765 spin_unlock(&mos7840_port->pool_lock);
766
738 if (urb->status) { 767 if (urb->status) {
739 dbg("nonzero write bulk status received:%d\n", urb->status); 768 dbg("nonzero write bulk status received:%d\n", urb->status);
740 return; 769 return;
741 } 770 }
742 771
743 mos7840_port = (struct moschip_port *)urb->context;
744 if (!mos7840_port) { 772 if (!mos7840_port) {
745 dbg("%s", "NULL mos7840_port pointer \n"); 773 dbg("%s", "NULL mos7840_port pointer \n");
746 return; 774 return;
@@ -792,13 +820,13 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
792 __u16 Data; 820 __u16 Data;
793 int status; 821 int status;
794 struct moschip_port *mos7840_port; 822 struct moschip_port *mos7840_port;
823 struct moschip_port *port0;
795 824
796 if (mos7840_port_paranoia_check(port, __FUNCTION__)) { 825 if (mos7840_port_paranoia_check(port, __FUNCTION__)) {
797 dbg("%s", "Port Paranoia failed \n"); 826 dbg("%s", "Port Paranoia failed \n");
798 return -ENODEV; 827 return -ENODEV;
799 } 828 }
800 829
801 mos7840_num_open_ports++;
802 serial = port->serial; 830 serial = port->serial;
803 831
804 if (mos7840_serial_paranoia_check(serial, __FUNCTION__)) { 832 if (mos7840_serial_paranoia_check(serial, __FUNCTION__)) {
@@ -807,16 +835,18 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
807 } 835 }
808 836
809 mos7840_port = mos7840_get_port_private(port); 837 mos7840_port = mos7840_get_port_private(port);
838 port0 = mos7840_get_port_private(serial->port[0]);
810 839
811 if (mos7840_port == NULL) 840 if (mos7840_port == NULL || port0 == NULL)
812 return -ENODEV; 841 return -ENODEV;
813 842
814 usb_clear_halt(serial->dev, port->write_urb->pipe); 843 usb_clear_halt(serial->dev, port->write_urb->pipe);
815 usb_clear_halt(serial->dev, port->read_urb->pipe); 844 usb_clear_halt(serial->dev, port->read_urb->pipe);
845 port0->open_ports++;
816 846
817 /* Initialising the write urb pool */ 847 /* Initialising the write urb pool */
818 for (j = 0; j < NUM_URBS; ++j) { 848 for (j = 0; j < NUM_URBS; ++j) {
819 urb = usb_alloc_urb(0, GFP_ATOMIC); 849 urb = usb_alloc_urb(0, GFP_KERNEL);
820 mos7840_port->write_urb_pool[j] = urb; 850 mos7840_port->write_urb_pool[j] = urb;
821 851
822 if (urb == NULL) { 852 if (urb == NULL) {
@@ -824,10 +854,10 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
824 continue; 854 continue;
825 } 855 }
826 856
827 urb->transfer_buffer = NULL; 857 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
828 urb->transfer_buffer =
829 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
830 if (!urb->transfer_buffer) { 858 if (!urb->transfer_buffer) {
859 usb_free_urb(urb);
860 mos7840_port->write_urb_pool[j] = NULL;
831 err("%s-out of memory for urb buffers.", __FUNCTION__); 861 err("%s-out of memory for urb buffers.", __FUNCTION__);
832 continue; 862 continue;
833 } 863 }
@@ -879,9 +909,7 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
879 } 909 }
880 Data |= 0x08; //Driver done bit 910 Data |= 0x08; //Driver done bit
881 Data |= 0x20; //rx_disable 911 Data |= 0x20; //rx_disable
882 status = 0; 912 status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data);
883 status =
884 mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data);
885 if (status < 0) { 913 if (status < 0) {
886 dbg("writing Controlreg failed\n"); 914 dbg("writing Controlreg failed\n");
887 return -1; 915 return -1;
@@ -893,7 +921,6 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
893 //////////////////////////////////// 921 ////////////////////////////////////
894 922
895 Data = 0x00; 923 Data = 0x00;
896 status = 0;
897 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); 924 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
898 if (status < 0) { 925 if (status < 0) {
899 dbg("disableing interrupts failed\n"); 926 dbg("disableing interrupts failed\n");
@@ -901,7 +928,6 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
901 } 928 }
902 // Set FIFO_CONTROL_REGISTER to the default value 929 // Set FIFO_CONTROL_REGISTER to the default value
903 Data = 0x00; 930 Data = 0x00;
904 status = 0;
905 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 931 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
906 if (status < 0) { 932 if (status < 0) {
907 dbg("Writing FIFO_CONTROL_REGISTER failed\n"); 933 dbg("Writing FIFO_CONTROL_REGISTER failed\n");
@@ -909,7 +935,6 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
909 } 935 }
910 936
911 Data = 0xcf; 937 Data = 0xcf;
912 status = 0;
913 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 938 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
914 if (status < 0) { 939 if (status < 0) {
915 dbg("Writing FIFO_CONTROL_REGISTER failed\n"); 940 dbg("Writing FIFO_CONTROL_REGISTER failed\n");
@@ -917,22 +942,18 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
917 } 942 }
918 943
919 Data = 0x03; 944 Data = 0x03;
920 status = 0;
921 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); 945 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
922 mos7840_port->shadowLCR = Data; 946 mos7840_port->shadowLCR = Data;
923 947
924 Data = 0x0b; 948 Data = 0x0b;
925 status = 0;
926 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); 949 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
927 mos7840_port->shadowMCR = Data; 950 mos7840_port->shadowMCR = Data;
928 951
929 Data = 0x00; 952 Data = 0x00;
930 status = 0;
931 status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); 953 status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
932 mos7840_port->shadowLCR = Data; 954 mos7840_port->shadowLCR = Data;
933 955
934 Data |= SERIAL_LCR_DLAB; //data latch enable in LCR 0x80 956 Data |= SERIAL_LCR_DLAB; //data latch enable in LCR 0x80
935 status = 0;
936 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); 957 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
937 958
938 Data = 0x0c; 959 Data = 0x0c;
@@ -999,7 +1020,7 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
999/* Check to see if we've set up our endpoint info yet * 1020/* Check to see if we've set up our endpoint info yet *
1000 * (can't set it up in mos7840_startup as the structures * 1021 * (can't set it up in mos7840_startup as the structures *
1001 * were not set up at that time.) */ 1022 * were not set up at that time.) */
1002 if (mos7840_num_open_ports == 1) { 1023 if (port0->open_ports == 1) {
1003 if (serial->port[0]->interrupt_in_buffer == NULL) { 1024 if (serial->port[0]->interrupt_in_buffer == NULL) {
1004 1025
1005 /* set up interrupt urb */ 1026 /* set up interrupt urb */
@@ -1097,6 +1118,7 @@ static int mos7840_chars_in_buffer(struct usb_serial_port *port)
1097{ 1118{
1098 int i; 1119 int i;
1099 int chars = 0; 1120 int chars = 0;
1121 unsigned long flags;
1100 struct moschip_port *mos7840_port; 1122 struct moschip_port *mos7840_port;
1101 1123
1102 dbg("%s \n", " mos7840_chars_in_buffer:entering ..........."); 1124 dbg("%s \n", " mos7840_chars_in_buffer:entering ...........");
@@ -1112,13 +1134,15 @@ static int mos7840_chars_in_buffer(struct usb_serial_port *port)
1112 return -1; 1134 return -1;
1113 } 1135 }
1114 1136
1137 spin_lock_irqsave(&mos7840_port->pool_lock,flags);
1115 for (i = 0; i < NUM_URBS; ++i) { 1138 for (i = 0; i < NUM_URBS; ++i) {
1116 if (mos7840_port->write_urb_pool[i]->status == -EINPROGRESS) { 1139 if (mos7840_port->busy[i]) {
1117 chars += URB_TRANSFER_BUFFER_SIZE; 1140 chars += URB_TRANSFER_BUFFER_SIZE;
1118 } 1141 }
1119 } 1142 }
1143 spin_unlock_irqrestore(&mos7840_port->pool_lock,flags);
1120 dbg("%s - returns %d", __FUNCTION__, chars); 1144 dbg("%s - returns %d", __FUNCTION__, chars);
1121 return (chars); 1145 return chars;
1122 1146
1123} 1147}
1124 1148
@@ -1172,6 +1196,7 @@ static void mos7840_close(struct usb_serial_port *port, struct file *filp)
1172{ 1196{
1173 struct usb_serial *serial; 1197 struct usb_serial *serial;
1174 struct moschip_port *mos7840_port; 1198 struct moschip_port *mos7840_port;
1199 struct moschip_port *port0;
1175 int j; 1200 int j;
1176 __u16 Data; 1201 __u16 Data;
1177 1202
@@ -1189,10 +1214,10 @@ static void mos7840_close(struct usb_serial_port *port, struct file *filp)
1189 } 1214 }
1190 1215
1191 mos7840_port = mos7840_get_port_private(port); 1216 mos7840_port = mos7840_get_port_private(port);
1217 port0 = mos7840_get_port_private(serial->port[0]);
1192 1218
1193 if (mos7840_port == NULL) { 1219 if (mos7840_port == NULL || port0 == NULL)
1194 return; 1220 return;
1195 }
1196 1221
1197 for (j = 0; j < NUM_URBS; ++j) 1222 for (j = 0; j < NUM_URBS; ++j)
1198 usb_kill_urb(mos7840_port->write_urb_pool[j]); 1223 usb_kill_urb(mos7840_port->write_urb_pool[j]);
@@ -1234,12 +1259,13 @@ static void mos7840_close(struct usb_serial_port *port, struct file *filp)
1234 } 1259 }
1235// if(mos7840_port->ctrl_buf != NULL) 1260// if(mos7840_port->ctrl_buf != NULL)
1236// kfree(mos7840_port->ctrl_buf); 1261// kfree(mos7840_port->ctrl_buf);
1237 mos7840_num_open_ports--; 1262 port0->open_ports--;
1238 dbg("mos7840_num_open_ports in close%d:in port%d\n", 1263 dbg("mos7840_num_open_ports in close%d:in port%d\n",
1239 mos7840_num_open_ports, port->number); 1264 port0->open_ports, port->number);
1240 if (mos7840_num_open_ports == 0) { 1265 if (port0->open_ports == 0) {
1241 if (serial->port[0]->interrupt_in_urb) { 1266 if (serial->port[0]->interrupt_in_urb) {
1242 dbg("%s", "Shutdown interrupt_in_urb\n"); 1267 dbg("%s", "Shutdown interrupt_in_urb\n");
1268 usb_kill_urb(serial->port[0]->interrupt_in_urb);
1243 } 1269 }
1244 } 1270 }
1245 1271
@@ -1368,6 +1394,7 @@ static int mos7840_write_room(struct usb_serial_port *port)
1368{ 1394{
1369 int i; 1395 int i;
1370 int room = 0; 1396 int room = 0;
1397 unsigned long flags;
1371 struct moschip_port *mos7840_port; 1398 struct moschip_port *mos7840_port;
1372 1399
1373 dbg("%s \n", " mos7840_write_room:entering ..........."); 1400 dbg("%s \n", " mos7840_write_room:entering ...........");
@@ -1384,14 +1411,17 @@ static int mos7840_write_room(struct usb_serial_port *port)
1384 return -1; 1411 return -1;
1385 } 1412 }
1386 1413
1414 spin_lock_irqsave(&mos7840_port->pool_lock, flags);
1387 for (i = 0; i < NUM_URBS; ++i) { 1415 for (i = 0; i < NUM_URBS; ++i) {
1388 if (mos7840_port->write_urb_pool[i]->status != -EINPROGRESS) { 1416 if (!mos7840_port->busy[i]) {
1389 room += URB_TRANSFER_BUFFER_SIZE; 1417 room += URB_TRANSFER_BUFFER_SIZE;
1390 } 1418 }
1391 } 1419 }
1420 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
1392 1421
1422 room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1;
1393 dbg("%s - returns %d", __FUNCTION__, room); 1423 dbg("%s - returns %d", __FUNCTION__, room);
1394 return (room); 1424 return room;
1395 1425
1396} 1426}
1397 1427
@@ -1410,6 +1440,7 @@ static int mos7840_write(struct usb_serial_port *port,
1410 int i; 1440 int i;
1411 int bytes_sent = 0; 1441 int bytes_sent = 0;
1412 int transfer_size; 1442 int transfer_size;
1443 unsigned long flags;
1413 1444
1414 struct moschip_port *mos7840_port; 1445 struct moschip_port *mos7840_port;
1415 struct usb_serial *serial; 1446 struct usb_serial *serial;
@@ -1476,13 +1507,16 @@ static int mos7840_write(struct usb_serial_port *port,
1476 /* try to find a free urb in the list */ 1507 /* try to find a free urb in the list */
1477 urb = NULL; 1508 urb = NULL;
1478 1509
1510 spin_lock_irqsave(&mos7840_port->pool_lock, flags);
1479 for (i = 0; i < NUM_URBS; ++i) { 1511 for (i = 0; i < NUM_URBS; ++i) {
1480 if (mos7840_port->write_urb_pool[i]->status != -EINPROGRESS) { 1512 if (!mos7840_port->busy[i]) {
1513 mos7840_port->busy[i] = 1;
1481 urb = mos7840_port->write_urb_pool[i]; 1514 urb = mos7840_port->write_urb_pool[i];
1482 dbg("\nURB:%d", i); 1515 dbg("\nURB:%d", i);
1483 break; 1516 break;
1484 } 1517 }
1485 } 1518 }
1519 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
1486 1520
1487 if (urb == NULL) { 1521 if (urb == NULL) {
1488 dbg("%s - no more free urbs", __FUNCTION__); 1522 dbg("%s - no more free urbs", __FUNCTION__);
@@ -1518,6 +1552,7 @@ static int mos7840_write(struct usb_serial_port *port,
1518 status = usb_submit_urb(urb, GFP_ATOMIC); 1552 status = usb_submit_urb(urb, GFP_ATOMIC);
1519 1553
1520 if (status) { 1554 if (status) {
1555 mos7840_port->busy[i] = 0;
1521 err("%s - usb_submit_urb(write bulk) failed with status = %d", 1556 err("%s - usb_submit_urb(write bulk) failed with status = %d",
1522 __FUNCTION__, status); 1557 __FUNCTION__, status);
1523 bytes_sent = status; 1558 bytes_sent = status;
@@ -1525,6 +1560,7 @@ static int mos7840_write(struct usb_serial_port *port,
1525 } 1560 }
1526 bytes_sent = transfer_size; 1561 bytes_sent = transfer_size;
1527 mos7840_port->icount.tx += transfer_size; 1562 mos7840_port->icount.tx += transfer_size;
1563 smp_wmb();
1528 dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx); 1564 dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx);
1529 exit: 1565 exit:
1530 1566
@@ -2490,6 +2526,7 @@ static int mos7840_ioctl(struct usb_serial_port *port, struct file *file,
2490 if (signal_pending(current)) 2526 if (signal_pending(current))
2491 return -ERESTARTSYS; 2527 return -ERESTARTSYS;
2492 cnow = mos7840_port->icount; 2528 cnow = mos7840_port->icount;
2529 smp_rmb();
2493 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2530 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2494 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) 2531 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
2495 return -EIO; /* no change => error */ 2532 return -EIO; /* no change => error */
@@ -2506,6 +2543,7 @@ static int mos7840_ioctl(struct usb_serial_port *port, struct file *file,
2506 2543
2507 case TIOCGICOUNT: 2544 case TIOCGICOUNT:
2508 cnow = mos7840_port->icount; 2545 cnow = mos7840_port->icount;
2546 smp_rmb();
2509 icount.cts = cnow.cts; 2547 icount.cts = cnow.cts;
2510 icount.dsr = cnow.dsr; 2548 icount.dsr = cnow.dsr;
2511 icount.rng = cnow.rng; 2549 icount.rng = cnow.rng;
@@ -2535,19 +2573,18 @@ static int mos7840_ioctl(struct usb_serial_port *port, struct file *file,
2535 2573
2536static int mos7840_calc_num_ports(struct usb_serial *serial) 2574static int mos7840_calc_num_ports(struct usb_serial *serial)
2537{ 2575{
2576 int mos7840_num_ports = 0;
2538 2577
2539 dbg("numberofendpoints: %d \n", 2578 dbg("numberofendpoints: %d \n",
2540 (int)serial->interface->cur_altsetting->desc.bNumEndpoints); 2579 (int)serial->interface->cur_altsetting->desc.bNumEndpoints);
2541 dbg("numberofendpoints: %d \n", 2580 dbg("numberofendpoints: %d \n",
2542 (int)serial->interface->altsetting->desc.bNumEndpoints); 2581 (int)serial->interface->altsetting->desc.bNumEndpoints);
2543 if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) { 2582 if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
2544 mos7840_num_ports = 2; 2583 mos7840_num_ports = serial->num_ports = 2;
2545 serial->type->num_ports = 2;
2546 } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) { 2584 } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) {
2547 mos7840_num_ports = 4; 2585 serial->num_bulk_in = 4;
2548 serial->type->num_bulk_in = 4; 2586 serial->num_bulk_out = 4;
2549 serial->type->num_bulk_out = 4; 2587 mos7840_num_ports = serial->num_ports = 4;
2550 serial->type->num_ports = 4;
2551 } 2588 }
2552 2589
2553 return mos7840_num_ports; 2590 return mos7840_num_ports;
@@ -2583,7 +2620,9 @@ static int mos7840_startup(struct usb_serial *serial)
2583 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL); 2620 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
2584 if (mos7840_port == NULL) { 2621 if (mos7840_port == NULL) {
2585 err("%s - Out of memory", __FUNCTION__); 2622 err("%s - Out of memory", __FUNCTION__);
2586 return -ENOMEM; 2623 status = -ENOMEM;
2624 i--; /* don't follow NULL pointer cleaning up */
2625 goto error;
2587 } 2626 }
2588 2627
2589 /* Initialize all port interrupt end point to port 0 int endpoint * 2628 /* Initialize all port interrupt end point to port 0 int endpoint *
@@ -2591,6 +2630,7 @@ static int mos7840_startup(struct usb_serial *serial)
2591 2630
2592 mos7840_port->port = serial->port[i]; 2631 mos7840_port->port = serial->port[i];
2593 mos7840_set_port_private(serial->port[i], mos7840_port); 2632 mos7840_set_port_private(serial->port[i], mos7840_port);
2633 spin_lock_init(&mos7840_port->pool_lock);
2594 2634
2595 mos7840_port->port_num = ((serial->port[i]->number - 2635 mos7840_port->port_num = ((serial->port[i]->number -
2596 (serial->port[i]->serial->minor)) + 2636 (serial->port[i]->serial->minor)) +
@@ -2601,22 +2641,22 @@ static int mos7840_startup(struct usb_serial *serial)
2601 mos7840_port->ControlRegOffset = 0x1; 2641 mos7840_port->ControlRegOffset = 0x1;
2602 mos7840_port->DcrRegOffset = 0x4; 2642 mos7840_port->DcrRegOffset = 0x4;
2603 } else if ((mos7840_port->port_num == 2) 2643 } else if ((mos7840_port->port_num == 2)
2604 && (mos7840_num_ports == 4)) { 2644 && (serial->num_ports == 4)) {
2605 mos7840_port->SpRegOffset = 0x8; 2645 mos7840_port->SpRegOffset = 0x8;
2606 mos7840_port->ControlRegOffset = 0x9; 2646 mos7840_port->ControlRegOffset = 0x9;
2607 mos7840_port->DcrRegOffset = 0x16; 2647 mos7840_port->DcrRegOffset = 0x16;
2608 } else if ((mos7840_port->port_num == 2) 2648 } else if ((mos7840_port->port_num == 2)
2609 && (mos7840_num_ports == 2)) { 2649 && (serial->num_ports == 2)) {
2610 mos7840_port->SpRegOffset = 0xa; 2650 mos7840_port->SpRegOffset = 0xa;
2611 mos7840_port->ControlRegOffset = 0xb; 2651 mos7840_port->ControlRegOffset = 0xb;
2612 mos7840_port->DcrRegOffset = 0x19; 2652 mos7840_port->DcrRegOffset = 0x19;
2613 } else if ((mos7840_port->port_num == 3) 2653 } else if ((mos7840_port->port_num == 3)
2614 && (mos7840_num_ports == 4)) { 2654 && (serial->num_ports == 4)) {
2615 mos7840_port->SpRegOffset = 0xa; 2655 mos7840_port->SpRegOffset = 0xa;
2616 mos7840_port->ControlRegOffset = 0xb; 2656 mos7840_port->ControlRegOffset = 0xb;
2617 mos7840_port->DcrRegOffset = 0x19; 2657 mos7840_port->DcrRegOffset = 0x19;
2618 } else if ((mos7840_port->port_num == 4) 2658 } else if ((mos7840_port->port_num == 4)
2619 && (mos7840_num_ports == 4)) { 2659 && (serial->num_ports == 4)) {
2620 mos7840_port->SpRegOffset = 0xc; 2660 mos7840_port->SpRegOffset = 0xc;
2621 mos7840_port->ControlRegOffset = 0xd; 2661 mos7840_port->ControlRegOffset = 0xd;
2622 mos7840_port->DcrRegOffset = 0x1c; 2662 mos7840_port->DcrRegOffset = 0x1c;
@@ -2701,21 +2741,19 @@ static int mos7840_startup(struct usb_serial *serial)
2701 dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status); 2741 dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status);
2702 2742
2703 Data = 0x20; 2743 Data = 0x20;
2704 status = 0;
2705 status = 2744 status =
2706 mos7840_set_reg_sync(serial->port[i], CLK_MULTI_REGISTER, 2745 mos7840_set_reg_sync(serial->port[i], CLK_MULTI_REGISTER,
2707 Data); 2746 Data);
2708 if (status < 0) { 2747 if (status < 0) {
2709 dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n", 2748 dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n",
2710 status); 2749 status);
2711 break; 2750 goto error;
2712 } else 2751 } else
2713 dbg("CLK_MULTI_REGISTER Writing success status%d\n", 2752 dbg("CLK_MULTI_REGISTER Writing success status%d\n",
2714 status); 2753 status);
2715 2754
2716 //write value 0x0 to scratchpad register 2755 //write value 0x0 to scratchpad register
2717 Data = 0x00; 2756 Data = 0x00;
2718 status = 0;
2719 status = 2757 status =
2720 mos7840_set_uart_reg(serial->port[i], SCRATCH_PAD_REGISTER, 2758 mos7840_set_uart_reg(serial->port[i], SCRATCH_PAD_REGISTER,
2721 Data); 2759 Data);
@@ -2729,7 +2767,7 @@ static int mos7840_startup(struct usb_serial *serial)
2729 2767
2730 //Zero Length flag register 2768 //Zero Length flag register
2731 if ((mos7840_port->port_num != 1) 2769 if ((mos7840_port->port_num != 1)
2732 && (mos7840_num_ports == 2)) { 2770 && (serial->num_ports == 2)) {
2733 2771
2734 Data = 0xff; 2772 Data = 0xff;
2735 status = 0; 2773 status = 0;
@@ -2770,14 +2808,17 @@ static int mos7840_startup(struct usb_serial *serial)
2770 i + 1, status); 2808 i + 1, status);
2771 2809
2772 } 2810 }
2773 mos7840_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC); 2811 mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
2774 mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL); 2812 mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
2775 2813 mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
2814 if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf || !mos7840_port->dr) {
2815 status = -ENOMEM;
2816 goto error;
2817 }
2776 } 2818 }
2777 2819
2778 //Zero Length flag enable 2820 //Zero Length flag enable
2779 Data = 0x0f; 2821 Data = 0x0f;
2780 status = 0;
2781 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); 2822 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
2782 if (status < 0) { 2823 if (status < 0) {
2783 dbg("Writing ZLP_REG5 failed status-0x%x\n", status); 2824 dbg("Writing ZLP_REG5 failed status-0x%x\n", status);
@@ -2789,6 +2830,17 @@ static int mos7840_startup(struct usb_serial *serial)
2789 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 2830 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2790 (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); 2831 (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ);
2791 return 0; 2832 return 0;
2833error:
2834 for (/* nothing */; i >= 0; i--) {
2835 mos7840_port = mos7840_get_port_private(serial->port[i]);
2836
2837 kfree(mos7840_port->dr);
2838 kfree(mos7840_port->ctrl_buf);
2839 usb_free_urb(mos7840_port->control_urb);
2840 kfree(mos7840_port);
2841 serial->port[i] = NULL;
2842 }
2843 return status;
2792} 2844}
2793 2845
2794/**************************************************************************** 2846/****************************************************************************
@@ -2799,6 +2851,7 @@ static int mos7840_startup(struct usb_serial *serial)
2799static void mos7840_shutdown(struct usb_serial *serial) 2851static void mos7840_shutdown(struct usb_serial *serial)
2800{ 2852{
2801 int i; 2853 int i;
2854 unsigned long flags;
2802 struct moschip_port *mos7840_port; 2855 struct moschip_port *mos7840_port;
2803 dbg("%s \n", " shutdown :entering.........."); 2856 dbg("%s \n", " shutdown :entering..........");
2804 2857
@@ -2814,8 +2867,12 @@ static void mos7840_shutdown(struct usb_serial *serial)
2814 2867
2815 for (i = 0; i < serial->num_ports; ++i) { 2868 for (i = 0; i < serial->num_ports; ++i) {
2816 mos7840_port = mos7840_get_port_private(serial->port[i]); 2869 mos7840_port = mos7840_get_port_private(serial->port[i]);
2817 kfree(mos7840_port->ctrl_buf); 2870 spin_lock_irqsave(&mos7840_port->pool_lock, flags);
2871 mos7840_port->zombie = 1;
2872 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
2818 usb_kill_urb(mos7840_port->control_urb); 2873 usb_kill_urb(mos7840_port->control_urb);
2874 kfree(mos7840_port->ctrl_buf);
2875 kfree(mos7840_port->dr);
2819 kfree(mos7840_port); 2876 kfree(mos7840_port);
2820 mos7840_set_port_private(serial->port[i], NULL); 2877 mos7840_set_port_private(serial->port[i], NULL);
2821 } 2878 }
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 0216ac12a27d..4adfab988e86 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -69,6 +69,7 @@ static void omninet_write_bulk_callback (struct urb *urb);
69static int omninet_write (struct usb_serial_port *port, const unsigned char *buf, int count); 69static int omninet_write (struct usb_serial_port *port, const unsigned char *buf, int count);
70static int omninet_write_room (struct usb_serial_port *port); 70static int omninet_write_room (struct usb_serial_port *port);
71static void omninet_shutdown (struct usb_serial *serial); 71static void omninet_shutdown (struct usb_serial *serial);
72static int omninet_attach (struct usb_serial *serial);
72 73
73static struct usb_device_id id_table [] = { 74static struct usb_device_id id_table [] = {
74 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, 75 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
@@ -99,6 +100,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
99 .num_bulk_in = 1, 100 .num_bulk_in = 1,
100 .num_bulk_out = 2, 101 .num_bulk_out = 2,
101 .num_ports = 1, 102 .num_ports = 1,
103 .attach = omninet_attach,
102 .open = omninet_open, 104 .open = omninet_open,
103 .close = omninet_close, 105 .close = omninet_close,
104 .write = omninet_write, 106 .write = omninet_write,
@@ -145,22 +147,30 @@ struct omninet_data
145 __u8 od_outseq; // Sequence number for bulk_out URBs 147 __u8 od_outseq; // Sequence number for bulk_out URBs
146}; 148};
147 149
150static int omninet_attach (struct usb_serial *serial)
151{
152 struct omninet_data *od;
153 struct usb_serial_port *port = serial->port[0];
154
155 od = kmalloc( sizeof(struct omninet_data), GFP_KERNEL );
156 if( !od ) {
157 err("%s- kmalloc(%Zd) failed.", __FUNCTION__, sizeof(struct omninet_data));
158 return -ENOMEM;
159 }
160 usb_set_serial_port_data(port, od);
161 return 0;
162}
163
148static int omninet_open (struct usb_serial_port *port, struct file *filp) 164static int omninet_open (struct usb_serial_port *port, struct file *filp)
149{ 165{
150 struct usb_serial *serial = port->serial; 166 struct usb_serial *serial = port->serial;
151 struct usb_serial_port *wport; 167 struct usb_serial_port *wport;
152 struct omninet_data *od; 168 struct omninet_data *od = usb_get_serial_port_data(port);
153 int result = 0; 169 int result = 0;
154 170
155 dbg("%s - port %d", __FUNCTION__, port->number); 171 dbg("%s - port %d", __FUNCTION__, port->number);
156 172
157 od = kmalloc( sizeof(struct omninet_data), GFP_KERNEL ); 173 od = kmalloc( sizeof(struct omninet_data), GFP_KERNEL );
158 if( !od ) {
159 err("%s- kmalloc(%Zd) failed.", __FUNCTION__, sizeof(struct omninet_data));
160 return -ENOMEM;
161 }
162
163 usb_set_serial_port_data(port, od);
164 wport = serial->port[1]; 174 wport = serial->port[1];
165 wport->tty = port->tty; 175 wport->tty = port->tty;
166 176
@@ -170,24 +180,17 @@ static int omninet_open (struct usb_serial_port *port, struct file *filp)
170 port->read_urb->transfer_buffer, port->read_urb->transfer_buffer_length, 180 port->read_urb->transfer_buffer, port->read_urb->transfer_buffer_length,
171 omninet_read_bulk_callback, port); 181 omninet_read_bulk_callback, port);
172 result = usb_submit_urb(port->read_urb, GFP_KERNEL); 182 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
173 if (result) 183 if (result) {
174 err("%s - failed submitting read urb, error %d", __FUNCTION__, result); 184 err("%s - failed submitting read urb, error %d", __FUNCTION__, result);
185 }
175 186
176 return result; 187 return result;
177} 188}
178 189
179static void omninet_close (struct usb_serial_port *port, struct file * filp) 190static void omninet_close (struct usb_serial_port *port, struct file * filp)
180{ 191{
181 struct usb_serial *serial = port->serial;
182 struct usb_serial_port *wport;
183
184 dbg("%s - port %d", __FUNCTION__, port->number); 192 dbg("%s - port %d", __FUNCTION__, port->number);
185
186 wport = serial->port[1];
187 usb_kill_urb(wport->write_urb);
188 usb_kill_urb(port->read_urb); 193 usb_kill_urb(port->read_urb);
189
190 kfree(usb_get_serial_port_data(port));
191} 194}
192 195
193 196
@@ -326,7 +329,12 @@ static void omninet_write_bulk_callback (struct urb *urb)
326 329
327static void omninet_shutdown (struct usb_serial *serial) 330static void omninet_shutdown (struct usb_serial *serial)
328{ 331{
332 struct usb_serial_port *wport = serial->port[1];
333 struct usb_serial_port *port = serial->port[0];
329 dbg ("%s", __FUNCTION__); 334 dbg ("%s", __FUNCTION__);
335
336 usb_kill_urb(wport->write_urb);
337 kfree(usb_get_serial_port_data(port));
330} 338}
331 339
332 340
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e178e6f40319..8c3f55b080b4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -113,6 +113,12 @@ static int option_send_setup(struct usb_serial_port *port);
113#define ANYDATA_VENDOR_ID 0x16d5 113#define ANYDATA_VENDOR_ID 0x16d5
114#define ANYDATA_PRODUCT_ID 0x6501 114#define ANYDATA_PRODUCT_ID 0x6501
115 115
116#define BANDRICH_VENDOR_ID 0x1A8D
117#define BANDRICH_PRODUCT_C100_1 0x1002
118#define BANDRICH_PRODUCT_C100_2 0x1003
119
120#define DELL_VENDOR_ID 0x413C
121
116static struct usb_device_id option_ids[] = { 122static struct usb_device_id option_ids[] = {
117 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 123 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
118 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 124 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -165,6 +171,9 @@ static struct usb_device_id option_ids[] = {
165 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ 171 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */
166 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ 172 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */
167 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) }, 173 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) },
174 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
175 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
176 { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard */
168 { } /* Terminating entry */ 177 { } /* Terminating entry */
169}; 178};
170MODULE_DEVICE_TABLE(usb, option_ids); 179MODULE_DEVICE_TABLE(usb, option_ids);
@@ -591,12 +600,6 @@ static int option_open(struct usb_serial_port *port, struct file *filp)
591 return (0); 600 return (0);
592} 601}
593 602
594static inline void stop_urb(struct urb *urb)
595{
596 if (urb && urb->status == -EINPROGRESS)
597 usb_kill_urb(urb);
598}
599
600static void option_close(struct usb_serial_port *port, struct file *filp) 603static void option_close(struct usb_serial_port *port, struct file *filp)
601{ 604{
602 int i; 605 int i;
@@ -614,9 +617,9 @@ static void option_close(struct usb_serial_port *port, struct file *filp)
614 617
615 /* Stop reading/writing urbs */ 618 /* Stop reading/writing urbs */
616 for (i = 0; i < N_IN_URB; i++) 619 for (i = 0; i < N_IN_URB; i++)
617 stop_urb(portdata->in_urbs[i]); 620 usb_kill_urb(portdata->in_urbs[i]);
618 for (i = 0; i < N_OUT_URB; i++) 621 for (i = 0; i < N_OUT_URB; i++)
619 stop_urb(portdata->out_urbs[i]); 622 usb_kill_urb(portdata->out_urbs[i]);
620 } 623 }
621 port->tty = NULL; 624 port->tty = NULL;
622} 625}
@@ -747,9 +750,9 @@ static void option_shutdown(struct usb_serial *serial)
747 port = serial->port[i]; 750 port = serial->port[i];
748 portdata = usb_get_serial_port_data(port); 751 portdata = usb_get_serial_port_data(port);
749 for (j = 0; j < N_IN_URB; j++) 752 for (j = 0; j < N_IN_URB; j++)
750 stop_urb(portdata->in_urbs[j]); 753 usb_kill_urb(portdata->in_urbs[j]);
751 for (j = 0; j < N_OUT_URB; j++) 754 for (j = 0; j < N_OUT_URB; j++)
752 stop_urb(portdata->out_urbs[j]); 755 usb_kill_urb(portdata->out_urbs[j]);
753 } 756 }
754 757
755 /* Now free them */ 758 /* Now free them */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ecedd833818d..644607de4c11 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -456,12 +456,6 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
456 return (0); 456 return (0);
457} 457}
458 458
459static inline void stop_urb(struct urb *urb)
460{
461 if (urb && urb->status == -EINPROGRESS)
462 usb_kill_urb(urb);
463}
464
465static void sierra_close(struct usb_serial_port *port, struct file *filp) 459static void sierra_close(struct usb_serial_port *port, struct file *filp)
466{ 460{
467 int i; 461 int i;
@@ -479,9 +473,9 @@ static void sierra_close(struct usb_serial_port *port, struct file *filp)
479 473
480 /* Stop reading/writing urbs */ 474 /* Stop reading/writing urbs */
481 for (i = 0; i < N_IN_URB; i++) 475 for (i = 0; i < N_IN_URB; i++)
482 stop_urb(portdata->in_urbs[i]); 476 usb_unlink_urb(portdata->in_urbs[i]);
483 for (i = 0; i < N_OUT_URB; i++) 477 for (i = 0; i < N_OUT_URB; i++)
484 stop_urb(portdata->out_urbs[i]); 478 usb_unlink_urb(portdata->out_urbs[i]);
485 } 479 }
486 port->tty = NULL; 480 port->tty = NULL;
487} 481}
@@ -583,17 +577,26 @@ static void sierra_shutdown(struct usb_serial *serial)
583 /* Stop reading/writing urbs */ 577 /* Stop reading/writing urbs */
584 for (i = 0; i < serial->num_ports; ++i) { 578 for (i = 0; i < serial->num_ports; ++i) {
585 port = serial->port[i]; 579 port = serial->port[i];
580 if (!port)
581 continue;
586 portdata = usb_get_serial_port_data(port); 582 portdata = usb_get_serial_port_data(port);
583 if (!portdata)
584 continue;
585
587 for (j = 0; j < N_IN_URB; j++) 586 for (j = 0; j < N_IN_URB; j++)
588 stop_urb(portdata->in_urbs[j]); 587 usb_unlink_urb(portdata->in_urbs[j]);
589 for (j = 0; j < N_OUT_URB; j++) 588 for (j = 0; j < N_OUT_URB; j++)
590 stop_urb(portdata->out_urbs[j]); 589 usb_unlink_urb(portdata->out_urbs[j]);
591 } 590 }
592 591
593 /* Now free them */ 592 /* Now free them */
594 for (i = 0; i < serial->num_ports; ++i) { 593 for (i = 0; i < serial->num_ports; ++i) {
595 port = serial->port[i]; 594 port = serial->port[i];
595 if (!port)
596 continue;
596 portdata = usb_get_serial_port_data(port); 597 portdata = usb_get_serial_port_data(port);
598 if (!portdata)
599 continue;
597 600
598 for (j = 0; j < N_IN_URB; j++) { 601 for (j = 0; j < N_IN_URB; j++) {
599 if (portdata->in_urbs[j]) { 602 if (portdata->in_urbs[j]) {
@@ -612,6 +615,8 @@ static void sierra_shutdown(struct usb_serial *serial)
612 /* Now free per port private data */ 615 /* Now free per port private data */
613 for (i = 0; i < serial->num_ports; i++) { 616 for (i = 0; i < serial->num_ports; i++) {
614 port = serial->port[i]; 617 port = serial->port[i];
618 if (!port)
619 continue;
615 kfree(usb_get_serial_port_data(port)); 620 kfree(usb_get_serial_port_data(port));
616 } 621 }
617} 622}
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 2f59ff226e2c..ffbe601cde2a 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -384,19 +384,21 @@ static int visor_write (struct usb_serial_port *port, const unsigned char *buf,
384 dbg("%s - write limit hit\n", __FUNCTION__); 384 dbg("%s - write limit hit\n", __FUNCTION__);
385 return 0; 385 return 0;
386 } 386 }
387 priv->outstanding_urbs++;
387 spin_unlock_irqrestore(&priv->lock, flags); 388 spin_unlock_irqrestore(&priv->lock, flags);
388 389
389 buffer = kmalloc (count, GFP_ATOMIC); 390 buffer = kmalloc (count, GFP_ATOMIC);
390 if (!buffer) { 391 if (!buffer) {
391 dev_err(&port->dev, "out of memory\n"); 392 dev_err(&port->dev, "out of memory\n");
392 return -ENOMEM; 393 count = -ENOMEM;
394 goto error_no_buffer;
393 } 395 }
394 396
395 urb = usb_alloc_urb(0, GFP_ATOMIC); 397 urb = usb_alloc_urb(0, GFP_ATOMIC);
396 if (!urb) { 398 if (!urb) {
397 dev_err(&port->dev, "no more free urbs\n"); 399 dev_err(&port->dev, "no more free urbs\n");
398 kfree (buffer); 400 count = -ENOMEM;
399 return -ENOMEM; 401 goto error_no_urb;
400 } 402 }
401 403
402 memcpy (buffer, buf, count); 404 memcpy (buffer, buf, count);
@@ -415,19 +417,27 @@ static int visor_write (struct usb_serial_port *port, const unsigned char *buf,
415 dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", 417 dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n",
416 __FUNCTION__, status); 418 __FUNCTION__, status);
417 count = status; 419 count = status;
418 kfree (buffer); 420 goto error;
419 } else { 421 } else {
420 spin_lock_irqsave(&priv->lock, flags); 422 spin_lock_irqsave(&priv->lock, flags);
421 ++priv->outstanding_urbs;
422 priv->bytes_out += count; 423 priv->bytes_out += count;
423 spin_unlock_irqrestore(&priv->lock, flags); 424 spin_unlock_irqrestore(&priv->lock, flags);
424 } 425 }
425 426
426 /* we are done with this urb, so let the host driver 427 /* we are done with this urb, so let the host driver
427 * really free it when it is finished with it */ 428 * really free it when it is finished with it */
428 usb_free_urb (urb); 429 usb_free_urb(urb);
429 430
430 return count; 431 return count;
432error:
433 usb_free_urb(urb);
434error_no_urb:
435 kfree(buffer);
436error_no_buffer:
437 spin_lock_irqsave(&priv->lock, flags);
438 --priv->outstanding_urbs;
439 spin_unlock_irqrestore(&priv->lock, flags);
440 return count;
431} 441}
432 442
433 443
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index bf16e9e1d84e..27c5f8f9a2d5 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -1109,7 +1109,7 @@ static int firm_send_command (struct usb_serial_port *port, __u8 command, __u8 *
1109 command_port = port->serial->port[COMMAND_PORT]; 1109 command_port = port->serial->port[COMMAND_PORT];
1110 command_info = usb_get_serial_port_data(command_port); 1110 command_info = usb_get_serial_port_data(command_port);
1111 spin_lock_irqsave(&command_info->lock, flags); 1111 spin_lock_irqsave(&command_info->lock, flags);
1112 command_info->command_finished = FALSE; 1112 command_info->command_finished = false;
1113 1113
1114 transfer_buffer = (__u8 *)command_port->write_urb->transfer_buffer; 1114 transfer_buffer = (__u8 *)command_port->write_urb->transfer_buffer;
1115 transfer_buffer[0] = command; 1115 transfer_buffer[0] = command;
@@ -1124,12 +1124,12 @@ static int firm_send_command (struct usb_serial_port *port, __u8 command, __u8 *
1124 spin_unlock_irqrestore(&command_info->lock, flags); 1124 spin_unlock_irqrestore(&command_info->lock, flags);
1125 1125
1126 /* wait for the command to complete */ 1126 /* wait for the command to complete */
1127 wait_event_interruptible_timeout(command_info->wait_command, 1127 wait_event_interruptible_timeout(command_info->wait_command,
1128 (command_info->command_finished != FALSE), COMMAND_TIMEOUT); 1128 (bool)command_info->command_finished, COMMAND_TIMEOUT);
1129 1129
1130 spin_lock_irqsave(&command_info->lock, flags); 1130 spin_lock_irqsave(&command_info->lock, flags);
1131 1131
1132 if (command_info->command_finished == FALSE) { 1132 if (command_info->command_finished == false) {
1133 dbg("%s - command timed out.", __FUNCTION__); 1133 dbg("%s - command timed out.", __FUNCTION__);
1134 retval = -ETIMEDOUT; 1134 retval = -ETIMEDOUT;
1135 goto exit; 1135 goto exit;
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index d714eff58dc0..f16079705664 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -20,10 +20,6 @@
20#define __LINUX_USB_SERIAL_WHITEHEAT_H 20#define __LINUX_USB_SERIAL_WHITEHEAT_H
21 21
22 22
23#define FALSE 0
24#define TRUE 1
25
26
27/* WhiteHEAT commands */ 23/* WhiteHEAT commands */
28#define WHITEHEAT_OPEN 1 /* open the port */ 24#define WHITEHEAT_OPEN 1 /* open the port */
29#define WHITEHEAT_CLOSE 2 /* close the port */ 25#define WHITEHEAT_CLOSE 2 /* close the port */
diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
index 599ad10a761b..06d1107dbd47 100644
--- a/drivers/usb/storage/libusual.c
+++ b/drivers/usb/storage/libusual.c
@@ -117,6 +117,7 @@ EXPORT_SYMBOL_GPL(usb_usual_check_type);
117static int usu_probe(struct usb_interface *intf, 117static int usu_probe(struct usb_interface *intf,
118 const struct usb_device_id *id) 118 const struct usb_device_id *id)
119{ 119{
120 int rc;
120 unsigned long type; 121 unsigned long type;
121 struct task_struct* task; 122 struct task_struct* task;
122 unsigned long flags; 123 unsigned long flags;
@@ -135,7 +136,7 @@ static int usu_probe(struct usb_interface *intf,
135 136
136 task = kthread_run(usu_probe_thread, (void*)type, "libusual_%d", type); 137 task = kthread_run(usu_probe_thread, (void*)type, "libusual_%d", type);
137 if (IS_ERR(task)) { 138 if (IS_ERR(task)) {
138 int rc = PTR_ERR(task); 139 rc = PTR_ERR(task);
139 printk(KERN_WARNING "libusual: " 140 printk(KERN_WARNING "libusual: "
140 "Unable to start the thread for %s: %d\n", 141 "Unable to start the thread for %s: %d\n",
141 bias_names[type], rc); 142 bias_names[type], rc);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4a9d0d5c7282..8b3145ab7757 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1371,15 +1371,6 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1371 US_SC_DEVICE, US_PR_DEVICE, NULL, 1371 US_SC_DEVICE, US_PR_DEVICE, NULL,
1372 US_FL_IGNORE_RESIDUE ), 1372 US_FL_IGNORE_RESIDUE ),
1373 1373
1374/* This prevents the kernel from detecting the virtual cd-drive with the
1375 * Windows drivers. <johann.wilhelm@student.tugraz.at>
1376*/
1377UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0xffff,
1378 "HUAWEI",
1379 "E220 USB-UMTS Install",
1380 US_SC_DEVICE, US_PR_DEVICE, NULL,
1381 US_FL_IGNORE_DEVICE),
1382
1383/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ 1374/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
1384UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001, 1375UNUSUAL_DEV( 0x132b, 0x000b, 0x0001, 0x0001,
1385 "Minolta", 1376 "Minolta",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 46929a1b6f24..8432bf171d2e 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -34,18 +34,25 @@ static struct usb_device_id skel_table [] = {
34}; 34};
35MODULE_DEVICE_TABLE(usb, skel_table); 35MODULE_DEVICE_TABLE(usb, skel_table);
36 36
37/* to prevent a race between open and disconnect */
38static DEFINE_MUTEX(skel_open_lock);
39
37 40
38/* Get a minor range for your devices from the usb maintainer */ 41/* Get a minor range for your devices from the usb maintainer */
39#define USB_SKEL_MINOR_BASE 192 42#define USB_SKEL_MINOR_BASE 192
40 43
41/* our private defines. if this grows any larger, use your own .h file */ 44/* our private defines. if this grows any larger, use your own .h file */
42#define MAX_TRANSFER (PAGE_SIZE - 512) 45#define MAX_TRANSFER (PAGE_SIZE - 512)
46/* MAX_TRANSFER is chosen so that the VM is not stressed by
47 allocations > PAGE_SIZE and the number of packets in a page
48 is an integer 512 is the largest possible packet on EHCI */
43#define WRITES_IN_FLIGHT 8 49#define WRITES_IN_FLIGHT 8
50/* arbitrarily chosen */
44 51
45/* Structure to hold all of our device specific stuff */ 52/* Structure to hold all of our device specific stuff */
46struct usb_skel { 53struct usb_skel {
47 struct usb_device *dev; /* the usb device for this device */ 54 struct usb_device *udev; /* the usb device for this device */
48 struct usb_interface *interface; /* the interface for this device */ 55 struct usb_interface *interface; /* the interface for this device */
49 struct semaphore limit_sem; /* limiting the number of writes in progress */ 56 struct semaphore limit_sem; /* limiting the number of writes in progress */
50 unsigned char *bulk_in_buffer; /* the buffer to receive data */ 57 unsigned char *bulk_in_buffer; /* the buffer to receive data */
51 size_t bulk_in_size; /* the size of the receive buffer */ 58 size_t bulk_in_size; /* the size of the receive buffer */
@@ -76,8 +83,10 @@ static int skel_open(struct inode *inode, struct file *file)
76 83
77 subminor = iminor(inode); 84 subminor = iminor(inode);
78 85
86 mutex_lock(&skel_open_lock);
79 interface = usb_find_interface(&skel_driver, subminor); 87 interface = usb_find_interface(&skel_driver, subminor);
80 if (!interface) { 88 if (!interface) {
89 mutex_unlock(&skel_open_lock);
81 err ("%s - error, can't find device for minor %d", 90 err ("%s - error, can't find device for minor %d",
82 __FUNCTION__, subminor); 91 __FUNCTION__, subminor);
83 retval = -ENODEV; 92 retval = -ENODEV;
@@ -86,12 +95,15 @@ static int skel_open(struct inode *inode, struct file *file)
86 95
87 dev = usb_get_intfdata(interface); 96 dev = usb_get_intfdata(interface);
88 if (!dev) { 97 if (!dev) {
98 mutex_unlock(&skel_open_lock);
89 retval = -ENODEV; 99 retval = -ENODEV;
90 goto exit; 100 goto exit;
91 } 101 }
92 102
93 /* increment our usage count for the device */ 103 /* increment our usage count for the device */
94 kref_get(&dev->kref); 104 kref_get(&dev->kref);
105 /* now we can drop the lock */
106 mutex_unlock(&skel_open_lock);
95 107
96 /* prevent the device from being autosuspended */ 108 /* prevent the device from being autosuspended */
97 retval = usb_autopm_get_interface(interface); 109 retval = usb_autopm_get_interface(interface);
@@ -201,12 +213,6 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
201 goto exit; 213 goto exit;
202 } 214 }
203 215
204 mutex_lock(&dev->io_mutex);
205 if (!dev->interface) { /* disconnect() was called */
206 retval = -ENODEV;
207 goto error;
208 }
209
210 /* create a urb, and a buffer for it, and copy the data to the urb */ 216 /* create a urb, and a buffer for it, and copy the data to the urb */
211 urb = usb_alloc_urb(0, GFP_KERNEL); 217 urb = usb_alloc_urb(0, GFP_KERNEL);
212 if (!urb) { 218 if (!urb) {
@@ -225,6 +231,14 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
225 goto error; 231 goto error;
226 } 232 }
227 233
234 /* this lock makes sure we don't submit URBs to gone devices */
235 mutex_lock(&dev->io_mutex);
236 if (!dev->interface) { /* disconnect() was called */
237 mutex_unlock(&dev->io_mutex);
238 retval = -ENODEV;
239 goto error;
240 }
241
228 /* initialize the urb properly */ 242 /* initialize the urb properly */
229 usb_fill_bulk_urb(urb, dev->udev, 243 usb_fill_bulk_urb(urb, dev->udev,
230 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), 244 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
@@ -233,6 +247,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
233 247
234 /* send the data out the bulk port */ 248 /* send the data out the bulk port */
235 retval = usb_submit_urb(urb, GFP_KERNEL); 249 retval = usb_submit_urb(urb, GFP_KERNEL);
250 mutex_unlock(&dev->io_mutex);
236 if (retval) { 251 if (retval) {
237 err("%s - failed submitting write urb, error %d", __FUNCTION__, retval); 252 err("%s - failed submitting write urb, error %d", __FUNCTION__, retval);
238 goto error; 253 goto error;
@@ -241,7 +256,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
241 /* release our reference to this urb, the USB core will eventually free it entirely */ 256 /* release our reference to this urb, the USB core will eventually free it entirely */
242 usb_free_urb(urb); 257 usb_free_urb(urb);
243 258
244 mutex_unlock(&dev->io_mutex); 259
245 return writesize; 260 return writesize;
246 261
247error: 262error:
@@ -249,7 +264,6 @@ error:
249 usb_buffer_free(dev->udev, writesize, buf, urb->transfer_dma); 264 usb_buffer_free(dev->udev, writesize, buf, urb->transfer_dma);
250 usb_free_urb(urb); 265 usb_free_urb(urb);
251 } 266 }
252 mutex_unlock(&dev->io_mutex);
253 up(&dev->limit_sem); 267 up(&dev->limit_sem);
254 268
255exit: 269exit:
@@ -344,6 +358,7 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i
344 358
345error: 359error:
346 if (dev) 360 if (dev)
361 /* this frees allocated memory */
347 kref_put(&dev->kref, skel_delete); 362 kref_put(&dev->kref, skel_delete);
348 return retval; 363 return retval;
349} 364}
@@ -354,20 +369,21 @@ static void skel_disconnect(struct usb_interface *interface)
354 int minor = interface->minor; 369 int minor = interface->minor;
355 370
356 /* prevent skel_open() from racing skel_disconnect() */ 371 /* prevent skel_open() from racing skel_disconnect() */
357 lock_kernel(); 372 mutex_lock(&skel_open_lock);
358 373
359 dev = usb_get_intfdata(interface); 374 dev = usb_get_intfdata(interface);
360 usb_set_intfdata(interface, NULL); 375 usb_set_intfdata(interface, NULL);
361 376
362 /* give back our minor */ 377 /* give back our minor */
363 usb_deregister_dev(interface, &skel_class); 378 usb_deregister_dev(interface, &skel_class);
379 mutex_unlock(&skel_open_lock);
364 380
365 /* prevent more I/O from starting */ 381 /* prevent more I/O from starting */
366 mutex_lock(&dev->io_mutex); 382 mutex_lock(&dev->io_mutex);
367 dev->interface = NULL; 383 dev->interface = NULL;
368 mutex_unlock(&dev->io_mutex); 384 mutex_unlock(&dev->io_mutex);
369 385
370 unlock_kernel(); 386
371 387
372 /* decrement our usage count */ 388 /* decrement our usage count */
373 kref_put(&dev->kref, skel_delete); 389 kref_put(&dev->kref, skel_delete);
@@ -380,6 +396,7 @@ static struct usb_driver skel_driver = {
380 .probe = skel_probe, 396 .probe = skel_probe,
381 .disconnect = skel_disconnect, 397 .disconnect = skel_disconnect,
382 .id_table = skel_table, 398 .id_table = skel_table,
399 .supports_autosuspend = 1,
383}; 400};
384 401
385static int __init usb_skel_init(void) 402static int __init usb_skel_init(void)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 5af684b40496..b1cb72c3780f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -139,7 +139,7 @@ config FB_TILEBLITTING
139 This is particularly important to one driver, matroxfb. If 139 This is particularly important to one driver, matroxfb. If
140 unsure, say N. 140 unsure, say N.
141 141
142comment "Frambuffer hardware drivers" 142comment "Frame buffer hardware drivers"
143 depends on FB 143 depends on FB
144 144
145config FB_CIRRUS 145config FB_CIRRUS
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index d7627fc4f11e..8514f2a6f060 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2899,7 +2899,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
2899 struct fb_info *info, unsigned long addr) 2899 struct fb_info *info, unsigned long addr)
2900{ 2900{
2901 struct atyfb_par *par = info->par; 2901 struct atyfb_par *par = info->par;
2902 struct pcidev_cookie *pcp; 2902 struct device_node *dp;
2903 char prop[128]; 2903 char prop[128];
2904 int node, len, i, j, ret; 2904 int node, len, i, j, ret;
2905 u32 mem, chip_id; 2905 u32 mem, chip_id;
@@ -3037,8 +3037,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
3037 node = 0; 3037 node = 0;
3038 } 3038 }
3039 3039
3040 pcp = pdev->sysdata; 3040 dp = pci_device_to_OF_node(pdev);
3041 if (node == pcp->prom_node->node) { 3041 if (node == dp->node) {
3042 struct fb_var_screeninfo *var = &default_var; 3042 struct fb_var_screeninfo *var = &default_var;
3043 unsigned int N, P, Q, M, T, R; 3043 unsigned int N, P, Q, M, T, R;
3044 u32 v_total, h_total; 3044 u32 v_total, h_total;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 1bf6f42eb400..a4b3fd185de7 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -410,7 +410,7 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
410} 410}
411#endif 411#endif
412 412
413#ifdef CONFIG_PPC_OF 413#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
414/* 414/*
415 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device 415 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
416 * tree. Hopefully, ATI OF driver is kind enough to fill these 416 * tree. Hopefully, ATI OF driver is kind enough to fill these
@@ -440,7 +440,7 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
440 440
441 return 0; 441 return 0;
442} 442}
443#endif /* CONFIG_PPC_OF */ 443#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
444 444
445/* 445/*
446 * Read PLL infos from chip registers 446 * Read PLL infos from chip registers
@@ -645,7 +645,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
645 rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; 645 rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK;
646 646
647 647
648#ifdef CONFIG_PPC_OF 648#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
649 /* 649 /*
650 * Retrieve PLL infos from Open Firmware first 650 * Retrieve PLL infos from Open Firmware first
651 */ 651 */
@@ -653,7 +653,7 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
653 printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n"); 653 printk(KERN_INFO "radeonfb: Retrieved PLL infos from Open Firmware\n");
654 goto found; 654 goto found;
655 } 655 }
656#endif /* CONFIG_PPC_OF */ 656#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
657 657
658 /* 658 /*
659 * Check out if we have an X86 which gave us some PLL informations 659 * Check out if we have an X86 which gave us some PLL informations
@@ -2231,7 +2231,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2231 rinfo->family == CHIP_FAMILY_RS200) 2231 rinfo->family == CHIP_FAMILY_RS200)
2232 rinfo->errata |= CHIP_ERRATA_PLL_DELAY; 2232 rinfo->errata |= CHIP_ERRATA_PLL_DELAY;
2233 2233
2234#ifdef CONFIG_PPC_OF 2234#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
2235 /* On PPC, we obtain the OF device-node pointer to the firmware 2235 /* On PPC, we obtain the OF device-node pointer to the firmware
2236 * data for this chip 2236 * data for this chip
2237 */ 2237 */
@@ -2240,6 +2240,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2240 printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n", 2240 printk(KERN_WARNING "radeonfb (%s): Cannot match card to OF node !\n",
2241 pci_name(rinfo->pdev)); 2241 pci_name(rinfo->pdev));
2242 2242
2243#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
2244#ifdef CONFIG_PPC_OF
2243 /* On PPC, the firmware sets up a memory mapping that tends 2245 /* On PPC, the firmware sets up a memory mapping that tends
2244 * to cause lockups when enabling the engine. We reconfigure 2246 * to cause lockups when enabling the engine. We reconfigure
2245 * the card internal memory mappings properly 2247 * the card internal memory mappings properly
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 38c7dbf8c151..737b5c09dbdb 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -52,7 +52,7 @@ static char *radeon_get_mon_name(int type)
52} 52}
53 53
54 54
55#ifdef CONFIG_PPC_OF 55#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
56/* 56/*
57 * Try to find monitor informations & EDID data out of the Open Firmware 57 * Try to find monitor informations & EDID data out of the Open Firmware
58 * device-tree. This also contains some "hacks" to work around a few machine 58 * device-tree. This also contains some "hacks" to work around a few machine
@@ -156,7 +156,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
156 } 156 }
157 return MT_NONE; 157 return MT_NONE;
158} 158}
159#endif /* CONFIG_PPC_OF */ 159#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
160 160
161 161
162static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo) 162static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
@@ -495,11 +495,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
495 * Old single head cards 495 * Old single head cards
496 */ 496 */
497 if (!rinfo->has_CRTC2) { 497 if (!rinfo->has_CRTC2) {
498#ifdef CONFIG_PPC_OF 498#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
499 if (rinfo->mon1_type == MT_NONE) 499 if (rinfo->mon1_type == MT_NONE)
500 rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, 500 rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
501 &rinfo->mon1_EDID); 501 &rinfo->mon1_EDID);
502#endif /* CONFIG_PPC_OF */ 502#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
503#ifdef CONFIG_FB_RADEON_I2C 503#ifdef CONFIG_FB_RADEON_I2C
504 if (rinfo->mon1_type == MT_NONE) 504 if (rinfo->mon1_type == MT_NONE)
505 rinfo->mon1_type = 505 rinfo->mon1_type =
@@ -544,11 +544,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
544 /* 544 /*
545 * Probe primary head (DVI or laptop internal panel) 545 * Probe primary head (DVI or laptop internal panel)
546 */ 546 */
547#ifdef CONFIG_PPC_OF 547#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
548 if (rinfo->mon1_type == MT_NONE) 548 if (rinfo->mon1_type == MT_NONE)
549 rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0, 549 rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
550 &rinfo->mon1_EDID); 550 &rinfo->mon1_EDID);
551#endif /* CONFIG_PPC_OF */ 551#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
552#ifdef CONFIG_FB_RADEON_I2C 552#ifdef CONFIG_FB_RADEON_I2C
553 if (rinfo->mon1_type == MT_NONE) 553 if (rinfo->mon1_type == MT_NONE)
554 rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi, 554 rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi,
@@ -572,11 +572,11 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
572 /* 572 /*
573 * Probe secondary head (mostly VGA, can be DVI) 573 * Probe secondary head (mostly VGA, can be DVI)
574 */ 574 */
575#ifdef CONFIG_PPC_OF 575#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
576 if (rinfo->mon2_type == MT_NONE) 576 if (rinfo->mon2_type == MT_NONE)
577 rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1, 577 rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1,
578 &rinfo->mon2_EDID); 578 &rinfo->mon2_EDID);
579#endif /* CONFIG_PPC_OF */ 579#endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */
580#ifdef CONFIG_FB_RADEON_I2C 580#ifdef CONFIG_FB_RADEON_I2C
581 if (rinfo->mon2_type == MT_NONE) 581 if (rinfo->mon2_type == MT_NONE)
582 rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga, 582 rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga,
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index d5ff224a6258..319000360285 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -16,7 +16,7 @@
16 16
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#ifdef CONFIG_PPC_OF 19#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
20#include <asm/prom.h> 20#include <asm/prom.h>
21#endif 21#endif
22 22
@@ -292,7 +292,7 @@ struct radeonfb_info {
292 unsigned long fb_local_base; 292 unsigned long fb_local_base;
293 293
294 struct pci_dev *pdev; 294 struct pci_dev *pdev;
295#ifdef CONFIG_PPC_OF 295#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
296 struct device_node *of_node; 296 struct device_node *of_node;
297#endif 297#endif
298 298
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 767c850f8eb7..f042428a84f4 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -266,7 +266,7 @@ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes,
266static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, 266static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
267 struct device_node *dp) 267 struct device_node *dp)
268{ 268{
269 char *params; 269 const char *params;
270 char *p; 270 char *p;
271 int ww, hh; 271 int ww, hh;
272 272
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 90592fb59156..eb1a4812ad1d 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -44,8 +44,8 @@
44 44
45#include <asm/io.h> 45#include <asm/io.h>
46 46
47#ifdef __sparc__ 47#ifdef CONFIG_SPARC
48#include <asm/pbm.h> 48#include <asm/prom.h>
49#include <asm/pcic.h> 49#include <asm/pcic.h>
50#endif 50#endif
51 51
@@ -96,7 +96,7 @@ struct fb_var_screeninfo default_var = {
96 .vmode = FB_VMODE_NONINTERLACED 96 .vmode = FB_VMODE_NONINTERLACED
97}; 97};
98 98
99#ifdef __sparc__ 99#ifdef CONFIG_SPARC
100struct fb_var_screeninfo default_var_1024x768 __initdata = { 100struct fb_var_screeninfo default_var_1024x768 __initdata = {
101 /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ 101 /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
102 .xres = 1024, 102 .xres = 1024,
@@ -188,7 +188,7 @@ static inline void iga_outb(struct iga_par *par, unsigned char val,
188 pci_outb(par, val, reg+1); 188 pci_outb(par, val, reg+1);
189} 189}
190 190
191#endif /* __sparc__ */ 191#endif /* CONFIG_SPARC */
192 192
193/* 193/*
194 * Very important functionality for the JavaEngine1 computer: 194 * Very important functionality for the JavaEngine1 computer:
@@ -217,7 +217,7 @@ static void iga_blank_border(struct iga_par *par)
217 iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); 217 iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
218} 218}
219 219
220#ifdef __sparc__ 220#ifdef CONFIG_SPARC
221static int igafb_mmap(struct fb_info *info, 221static int igafb_mmap(struct fb_info *info,
222 struct vm_area_struct *vma) 222 struct vm_area_struct *vma)
223{ 223{
@@ -271,7 +271,7 @@ static int igafb_mmap(struct fb_info *info,
271 vma->vm_flags |= VM_IO; 271 vma->vm_flags |= VM_IO;
272 return 0; 272 return 0;
273} 273}
274#endif /* __sparc__ */ 274#endif /* CONFIG_SPARC */
275 275
276static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, 276static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
277 unsigned blue, unsigned transp, 277 unsigned blue, unsigned transp,
@@ -323,7 +323,7 @@ static struct fb_ops igafb_ops = {
323 .fb_fillrect = cfb_fillrect, 323 .fb_fillrect = cfb_fillrect,
324 .fb_copyarea = cfb_copyarea, 324 .fb_copyarea = cfb_copyarea,
325 .fb_imageblit = cfb_imageblit, 325 .fb_imageblit = cfb_imageblit,
326#ifdef __sparc__ 326#ifdef CONFIG_SPARC
327 .fb_mmap = igafb_mmap, 327 .fb_mmap = igafb_mmap,
328#endif 328#endif
329}; 329};
@@ -424,7 +424,7 @@ int __init igafb_init(void)
424 424
425 par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; 425 par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
426 426
427#ifdef __sparc__ 427#ifdef CONFIG_SPARC
428 /* 428 /*
429 * The following is sparc specific and this is why: 429 * The following is sparc specific and this is why:
430 * 430 *
@@ -477,8 +477,8 @@ int __init igafb_init(void)
477 * Set default vmode and cmode from PROM properties. 477 * Set default vmode and cmode from PROM properties.
478 */ 478 */
479 { 479 {
480 struct pcidev_cookie *cookie = pdev->sysdata; 480 struct device_node *dp = pci_device_to_OF_node(pdev);
481 int node = cookie->prom_node; 481 int node = dp->node;
482 int width = prom_getintdefault(node, "width", 1024); 482 int width = prom_getintdefault(node, "width", 1024);
483 int height = prom_getintdefault(node, "height", 768); 483 int height = prom_getintdefault(node, "height", 768);
484 int depth = prom_getintdefault(node, "depth", 8); 484 int depth = prom_getintdefault(node, "depth", 8);
@@ -534,7 +534,7 @@ int __init igafb_init(void)
534 kfree(info); 534 kfree(info);
535 } 535 }
536 536
537#ifdef __sparc__ 537#ifdef CONFIG_SPARC
538 /* 538 /*
539 * Add /dev/fb mmap values. 539 * Add /dev/fb mmap values.
540 */ 540 */
@@ -552,7 +552,7 @@ int __init igafb_init(void)
552 par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ 552 par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
553 par->mmap_map[1].prot_mask = SRMMU_CACHE; 553 par->mmap_map[1].prot_mask = SRMMU_CACHE;
554 par->mmap_map[1].prot_flag = SRMMU_WRITE; 554 par->mmap_map[1].prot_flag = SRMMU_WRITE;
555#endif /* __sparc__ */ 555#endif /* CONFIG_SPARC */
556 556
557 return 0; 557 return 0;
558} 558}