aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-08 05:43:49 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-08 05:43:49 -0400
commit1d6ae775d7a948c9575658eb41184fd2e506c0df (patch)
tree8128a28e89d82f13bb8e3a2160382240c66e2816 /drivers
parent739cdbf1d8f0739b80035b80d69d871e33749b86 (diff)
parentcaf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c167
-rw-r--r--drivers/atm/nicstar.h16
-rw-r--r--drivers/atm/zatm.c10
-rw-r--r--drivers/base/attribute_container.c86
-rw-r--r--drivers/base/bus.c42
-rw-r--r--drivers/base/class.c39
-rw-r--r--drivers/base/core.c19
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/driver.c15
-rw-r--r--drivers/base/firmware_class.c79
-rw-r--r--drivers/base/node.c24
-rw-r--r--drivers/base/power/resume.c8
-rw-r--r--drivers/base/power/runtime.c8
-rw-r--r--drivers/base/power/suspend.c12
-rw-r--r--drivers/base/power/sysfs.c8
-rw-r--r--drivers/base/sys.c110
-rw-r--r--drivers/base/transport_class.c19
-rw-r--r--drivers/block/Kconfig46
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/block/cfq-iosched.c3
-rw-r--r--drivers/block/cryptoloop.c6
-rw-r--r--drivers/block/deadline-iosched.c12
-rw-r--r--drivers/block/floppy.c41
-rw-r--r--drivers/block/genhd.c2
-rw-r--r--drivers/block/ll_rw_blk.c196
-rw-r--r--drivers/block/scsi_ioctl.c60
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/bluetooth/bfusb.c16
-rw-r--r--drivers/bluetooth/bluecard_cs.c24
-rw-r--r--drivers/bluetooth/bpa10x.c17
-rw-r--r--drivers/bluetooth/bt3c_cs.c12
-rw-r--r--drivers/bluetooth/btuart_cs.c10
-rw-r--r--drivers/bluetooth/dtl1_cs.c10
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c4
-rw-r--r--drivers/bluetooth/hci_usb.c23
-rw-r--r--drivers/bluetooth/hci_vhci.c386
-rw-r--r--drivers/bluetooth/hci_vhci.h50
-rw-r--r--drivers/cdrom/cdrom.c15
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/digi1.h38
-rw-r--r--drivers/char/digiFep1.h154
-rw-r--r--drivers/char/drm/Kconfig16
-rw-r--r--drivers/char/drm/Makefile7
-rw-r--r--drivers/char/drm/drm.h8
-rw-r--r--drivers/char/drm/drmP.h137
-rw-r--r--drivers/char/drm/drm_agpsupport.c143
-rw-r--r--drivers/char/drm/drm_bufs.c647
-rw-r--r--drivers/char/drm/drm_context.c19
-rw-r--r--drivers/char/drm/drm_drv.c79
-rw-r--r--drivers/char/drm/drm_fops.c6
-rw-r--r--drivers/char/drm/drm_ioctl.c2
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_pci.c55
-rw-r--r--drivers/char/drm/drm_pciids.h79
-rw-r--r--drivers/char/drm/drm_proc.c17
-rw-r--r--drivers/char/drm/drm_scatter.c11
-rw-r--r--drivers/char/drm/drm_stub.c8
-rw-r--r--drivers/char/drm/drm_sysfs.c1
-rw-r--r--drivers/char/drm/drm_vm.c92
-rw-r--r--drivers/char/drm/ffb_drv.c5
-rw-r--r--drivers/char/drm/gamma_context.h492
-rw-r--r--drivers/char/drm/gamma_dma.c946
-rw-r--r--drivers/char/drm/gamma_drm.h90
-rw-r--r--drivers/char/drm/gamma_drv.c59
-rw-r--r--drivers/char/drm/gamma_drv.h147
-rw-r--r--drivers/char/drm/gamma_lists.h215
-rw-r--r--drivers/char/drm/gamma_lock.h140
-rw-r--r--drivers/char/drm/gamma_old_dma.h313
-rw-r--r--drivers/char/drm/i810_dma.c22
-rw-r--r--drivers/char/drm/i810_drv.c1
-rw-r--r--drivers/char/drm/i810_drv.h1
-rw-r--r--drivers/char/drm/i830_dma.c22
-rw-r--r--drivers/char/drm/i830_drv.c1
-rw-r--r--drivers/char/drm/i830_drv.h1
-rw-r--r--drivers/char/drm/i915_dma.c31
-rw-r--r--drivers/char/drm/i915_drv.c1
-rw-r--r--drivers/char/drm/i915_drv.h4
-rw-r--r--drivers/char/drm/mga_dma.c612
-rw-r--r--drivers/char/drm/mga_drm.h98
-rw-r--r--drivers/char/drm/mga_drv.c45
-rw-r--r--drivers/char/drm/mga_drv.h94
-rw-r--r--drivers/char/drm/mga_ioc32.c67
-rw-r--r--drivers/char/drm/mga_irq.c72
-rw-r--r--drivers/char/drm/mga_state.c158
-rw-r--r--drivers/char/drm/mga_warp.c141
-rw-r--r--drivers/char/drm/r128_cce.c6
-rw-r--r--drivers/char/drm/r128_drm.h2
-rw-r--r--drivers/char/drm/r300_cmdbuf.c801
-rw-r--r--drivers/char/drm/r300_reg.h1412
-rw-r--r--drivers/char/drm/radeon_cp.c35
-rw-r--r--drivers/char/drm/radeon_drm.h46
-rw-r--r--drivers/char/drm/radeon_drv.c1
-rw-r--r--drivers/char/drm/radeon_drv.h30
-rw-r--r--drivers/char/drm/radeon_state.c75
-rw-r--r--drivers/char/drm/savage_bci.c1096
-rw-r--r--drivers/char/drm/savage_drm.h209
-rw-r--r--drivers/char/drm/savage_drv.c112
-rw-r--r--drivers/char/drm/savage_drv.h579
-rw-r--r--drivers/char/drm/savage_state.c1146
-rw-r--r--drivers/char/epca.c1588
-rw-r--r--drivers/char/epca.h108
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c69
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c101
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c336
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c168
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c395
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c46
-rw-r--r--drivers/char/mbcs.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c9
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/mwave/mwavedd.c21
-rw-r--r--drivers/char/mxser.c1
-rw-r--r--drivers/char/random.c34
-rw-r--r--drivers/char/rtc.c5
-rw-r--r--drivers/char/snsc_event.c11
-rw-r--r--drivers/char/sonypi.c118
-rw-r--r--drivers/char/tpm/tpm_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_infineon.c76
-rw-r--r--drivers/char/tty_io.c6
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/vt.c21
-rw-r--r--drivers/char/watchdog/Kconfig7
-rw-r--r--drivers/char/watchdog/Makefile72
-rw-r--r--drivers/char/watchdog/booke_wdt.c192
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c2
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c87
-rw-r--r--drivers/char/watchdog/scx200_wdt.c2
-rw-r--r--drivers/char/watchdog/softdog.c13
-rw-r--r--drivers/char/watchdog/w83627hf_wdt.c6
-rw-r--r--drivers/firmware/Kconfig27
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/dcdbas.c596
-rw-r--r--drivers/firmware/dcdbas.h107
-rw-r--r--drivers/firmware/dell_rbu.c634
-rw-r--r--drivers/hwmon/Kconfig72
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/adm1021.c35
-rw-r--r--drivers/hwmon/adm1025.c31
-rw-r--r--drivers/hwmon/adm1026.c25
-rw-r--r--drivers/hwmon/adm1031.c24
-rw-r--r--drivers/hwmon/adm9240.c33
-rw-r--r--drivers/hwmon/asb100.c56
-rw-r--r--drivers/hwmon/atxp1.c26
-rw-r--r--drivers/hwmon/ds1621.c29
-rw-r--r--drivers/hwmon/fscher.c27
-rw-r--r--drivers/hwmon/fscpos.c27
-rw-r--r--drivers/hwmon/gl518sm.c28
-rw-r--r--drivers/hwmon/gl520sm.c31
-rw-r--r--drivers/hwmon/hwmon-vid.c189
-rw-r--r--drivers/hwmon/hwmon.c98
-rw-r--r--drivers/hwmon/it87.c76
-rw-r--r--drivers/hwmon/lm63.c27
-rw-r--r--drivers/hwmon/lm75.c41
-rw-r--r--drivers/hwmon/lm75.h2
-rw-r--r--drivers/hwmon/lm77.c24
-rw-r--r--drivers/hwmon/lm78.c84
-rw-r--r--drivers/hwmon/lm80.c27
-rw-r--r--drivers/hwmon/lm83.c27
-rw-r--r--drivers/hwmon/lm85.c39
-rw-r--r--drivers/hwmon/lm87.c31
-rw-r--r--drivers/hwmon/lm90.c27
-rw-r--r--drivers/hwmon/lm92.c28
-rw-r--r--drivers/hwmon/max1619.c28
-rw-r--r--drivers/hwmon/pc87360.c852
-rw-r--r--drivers/hwmon/sis5595.c70
-rw-r--r--drivers/hwmon/smsc47b397.c74
-rw-r--r--drivers/hwmon/smsc47m1.c70
-rw-r--r--drivers/hwmon/via686a.c76
-rw-r--r--drivers/hwmon/w83627ehf.c64
-rw-r--r--drivers/hwmon/w83627hf.c80
-rw-r--r--drivers/hwmon/w83781d.c90
-rw-r--r--drivers/hwmon/w83792d.c1649
-rw-r--r--drivers/hwmon/w83l785ts.c27
-rw-r--r--drivers/i2c/Makefile4
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-ite.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c16
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-sgi.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-sibyte.c4
-rw-r--r--drivers/i2c/busses/Kconfig8
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c4
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isa.c161
-rw-r--r--drivers/i2c/busses/i2c-keywest.c15
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c12
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c33
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/chips/Kconfig10
-rw-r--r--drivers/i2c/chips/ds1337.c11
-rw-r--r--drivers/i2c/chips/ds1374.c3
-rw-r--r--drivers/i2c/chips/eeprom.c17
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/i2c/chips/m41t00.c3
-rw-r--r--drivers/i2c/chips/max6875.c462
-rw-r--r--drivers/i2c/chips/pca9539.c12
-rw-r--r--drivers/i2c/chips/pcf8574.c13
-rw-r--r--drivers/i2c/chips/pcf8591.c13
-rw-r--r--drivers/i2c/chips/rtc8564.c1
-rw-r--r--drivers/i2c/i2c-core.c256
-rw-r--r--drivers/i2c/i2c-dev.c5
-rw-r--r--drivers/i2c/i2c-sensor-detect.c145
-rw-r--r--drivers/i2c/i2c-sensor-vid.c98
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/ide/pci/sc1200.c12
-rw-r--r--drivers/ide/ppc/pmac.c16
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/ieee1394/nodemgr.c8
-rw-r--r--drivers/ieee1394/pcilynx.c20
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/gameport/emu10k1-gp.c2
-rw-r--r--drivers/input/gameport/fm801-gp.c2
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/cobra.c2
-rw-r--r--drivers/input/joystick/db9.c2
-rw-r--r--drivers/input/joystick/gamecon.c2
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip.c2
-rw-r--r--drivers/input/joystick/grip_mp.c2
-rw-r--r--drivers/input/joystick/guillemot.c2
-rw-r--r--drivers/input/joystick/interact.c2
-rw-r--r--drivers/input/joystick/sidewinder.c2
-rw-r--r--drivers/input/joystick/tmdc.c2
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/corgikbd.c104
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/serio/serport.c4
-rw-r--r--drivers/input/touchscreen/corgi_ts.c51
-rw-r--r--drivers/isdn/act2000/capi.c2
-rw-r--r--drivers/isdn/hisax/hisax.h3
-rw-r--r--drivers/isdn/i4l/isdn_net.c1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c1
-rw-r--r--drivers/isdn/i4l/isdn_v110.c4
-rw-r--r--drivers/macintosh/mediabay.c6
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/dm-crypt.c7
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/media/common/saa7146_i2c.c4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c3
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig6
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c2
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-i2c.c1
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c50
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c1
-rw-r--r--drivers/media/dvb/ttpci/Kconfig3
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c3
-rw-r--r--drivers/media/video/Kconfig1
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/adv7170.c2
-rw-r--r--drivers/media/video/adv7175.c2
-rw-r--r--drivers/media/video/bt819.c2
-rw-r--r--drivers/media/video/bt832.c4
-rw-r--r--drivers/media/video/bt856.c2
-rw-r--r--drivers/media/video/bttv-driver.c1
-rw-r--r--drivers/media/video/bttv-i2c.c12
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c8
-rw-r--r--drivers/media/video/indycam.c412
-rw-r--r--drivers/media/video/indycam.h112
-rw-r--r--drivers/media/video/ir-kbd-i2c.c6
-rw-r--r--drivers/media/video/meye.c3
-rw-r--r--drivers/media/video/msp3400.c8
-rw-r--r--drivers/media/video/ovcamchip/ov6x20.c6
-rw-r--r--drivers/media/video/ovcamchip/ov6x30.c4
-rw-r--r--drivers/media/video/ovcamchip/ovcamchip_core.c14
-rw-r--r--drivers/media/video/saa7110.c1
-rw-r--r--drivers/media/video/saa7111.c2
-rw-r--r--drivers/media/video/saa7114.c2
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c10
-rw-r--r--drivers/media/video/saa7185.c2
-rw-r--r--drivers/media/video/saa7191.c512
-rw-r--r--drivers/media/video/saa7191.h139
-rw-r--r--drivers/media/video/tda7432.c4
-rw-r--r--drivers/media/video/tda9840.c4
-rw-r--r--drivers/media/video/tda9875.c4
-rw-r--r--drivers/media/video/tda9887.c10
-rw-r--r--drivers/media/video/tea6415c.c4
-rw-r--r--drivers/media/video/tea6420.c4
-rw-r--r--drivers/media/video/tuner-3036.c3
-rw-r--r--drivers/media/video/tuner-core.c4
-rw-r--r--drivers/media/video/tvaudio.c51
-rw-r--r--drivers/media/video/tveeprom.c2
-rw-r--r--drivers/media/video/tvmixer.c14
-rw-r--r--drivers/media/video/vino.c4273
-rw-r--r--drivers/media/video/vino.h61
-rw-r--r--drivers/media/video/vpx3220.c1
-rw-r--r--drivers/media/video/zoran_card.c2
-rw-r--r--drivers/message/fusion/lsi/mpi.h19
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h85
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt67
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h203
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h11
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h74
-rw-r--r--drivers/message/fusion/mptbase.c321
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c14
-rw-r--r--drivers/message/fusion/mptscsih.c140
-rw-r--r--drivers/message/fusion/mptspi.c6
-rw-r--r--drivers/misc/ibmasm/uart.c20
-rw-r--r--drivers/mmc/mmc.c530
-rw-r--r--drivers/mmc/mmc_block.c9
-rw-r--r--drivers/mmc/mmc_sysfs.c21
-rw-r--r--drivers/mmc/pxamci.c11
-rw-r--r--drivers/mmc/wbsd.c118
-rw-r--r--drivers/mmc/wbsd.h6
-rw-r--r--drivers/mtd/nand/nand_base.c1
-rw-r--r--drivers/net/3c59x.c19
-rw-r--r--drivers/net/Kconfig44
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/arcnet/arcnet.c25
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/bnx2.c247
-rw-r--r--drivers/net/bnx2.h10
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c5
-rw-r--r--drivers/net/chelsio/Makefile11
-rw-r--r--drivers/net/chelsio/common.h314
-rw-r--r--drivers/net/chelsio/cphy.h148
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h145
-rw-r--r--drivers/net/chelsio/cxgb2.c1256
-rw-r--r--drivers/net/chelsio/elmer0.h151
-rw-r--r--drivers/net/chelsio/espi.c346
-rw-r--r--drivers/net/chelsio/espi.h68
-rw-r--r--drivers/net/chelsio/gmac.h134
-rw-r--r--drivers/net/chelsio/mv88x201x.c252
-rw-r--r--drivers/net/chelsio/pm3393.c826
-rw-r--r--drivers/net/chelsio/regs.h468
-rw-r--r--drivers/net/chelsio/sge.c1684
-rw-r--r--drivers/net/chelsio/sge.h105
-rw-r--r--drivers/net/chelsio/subr.c812
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h213
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e100.c241
-rw-r--r--drivers/net/e1000/e1000_hw.h2
-rw-r--r--drivers/net/e1000/e1000_main.c14
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c1179
-rw-r--r--drivers/net/irda/smsc-ircc2.h50
-rw-r--r--drivers/net/irda/vlsi_ir.c21
-rw-r--r--drivers/net/iseries_veth.c872
-rw-r--r--drivers/net/iseries_veth.h46
-rw-r--r--drivers/net/mv643xx_eth.c139
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/ne3210.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/rrunner.c3
-rw-r--r--drivers/net/s2io-regs.h13
-rw-r--r--drivers/net/s2io.c122
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/shaper.c50
-rw-r--r--drivers/net/sis190.c1884
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/spider_net.c2334
-rw-r--r--drivers/net/spider_net.h469
-rw-r--r--drivers/net/spider_net_ethtool.c126
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sungem.c1
-rw-r--r--drivers/net/sungem.h2
-rw-r--r--drivers/net/tg3.c523
-rw-r--r--drivers/net/tg3.h10
-rw-r--r--drivers/net/tulip/Kconfig12
-rw-r--r--drivers/net/tulip/Makefile1
-rw-r--r--drivers/net/tulip/de2104x.c2
-rw-r--r--drivers/net/tulip/media.c36
-rw-r--r--drivers/net/tulip/timer.c1
-rw-r--r--drivers/net/tulip/tulip.h8
-rw-r--r--drivers/net/tulip/tulip_core.c35
-rw-r--r--drivers/net/tulip/uli526x.c1749
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/wan/hdlc_generic.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/sdla_fr.c22
-rw-r--r--drivers/net/wan/syncppp.c3
-rw-r--r--drivers/net/wireless/Kconfig144
-rw-r--r--drivers/net/wireless/Makefile8
-rw-r--r--drivers/net/wireless/airo.c123
-rw-r--r--drivers/net/wireless/atmel.c79
-rw-r--r--drivers/net/wireless/hostap/Kconfig73
-rw-r--r--drivers/net/wireless/hostap/Makefile5
-rw-r--r--drivers/net/wireless/hostap/hostap.c1198
-rw-r--r--drivers/net/wireless/hostap/hostap.h57
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h96
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c1091
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c524
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3288
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h261
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h435
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h55
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1030
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c766
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3445
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c499
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4102
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c473
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c645
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c448
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h1033
-rw-r--r--drivers/net/wireless/ieee802_11.h78
-rw-r--r--drivers/net/wireless/ipw2100.c8680
-rw-r--r--drivers/net/wireless/ipw2100.h1167
-rw-r--r--drivers/net/wireless/ipw2200.c7383
-rw-r--r--drivers/net/wireless/ipw2200.h1680
-rw-r--r--drivers/net/wireless/netwave_cs.c7
-rw-r--r--drivers/net/wireless/orinoco.c104
-rw-r--r--drivers/net/wireless/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/orinoco_nortel.c324
-rw-r--r--drivers/net/wireless/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c866
-rw-r--r--drivers/net/wireless/ray_cs.h7
-rw-r--r--drivers/net/wireless/spectrum_cs.c1120
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c26
-rw-r--r--drivers/net/wireless/wavelan_cs.h6
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h17
-rw-r--r--drivers/net/wireless/wl3501.h5
-rw-r--r--drivers/net/wireless/wl3501_cs.c18
-rw-r--r--drivers/parport/parport_pc.c3
-rw-r--r--drivers/pci/msi.c17
-rw-r--r--drivers/pci/msi.h5
-rw-r--r--drivers/pci/pci.c14
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--drivers/pci/rom.c24
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c116
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c2
-rw-r--r--drivers/pcmcia/sa11xx_base.c2
-rw-r--r--drivers/pcmcia/topic.h17
-rw-r--r--drivers/pcmcia/yenta_socket.c125
-rw-r--r--drivers/pcmcia/yenta_socket.h8
-rw-r--r--drivers/pnp/card.c7
-rw-r--r--drivers/pnp/driver.c7
-rw-r--r--drivers/pnp/isapnp/core.c33
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/pnpacpi/core.c16
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h1
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c18
-rw-r--r--drivers/pnp/pnpbios/core.c26
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h1
-rw-r--r--drivers/pnp/pnpbios/proc.c8
-rw-r--r--drivers/pnp/pnpbios/rsparser.c16
-rw-r--r--drivers/pnp/quirks.c7
-rw-r--r--drivers/pnp/support.c7
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_devmap.c8
-rw-r--r--drivers/s390/block/dasd_diag.c334
-rw-r--r--drivers/s390/block/dasd_diag.h105
-rw-r--r--drivers/s390/block/dasd_genhd.c10
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c17
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/char/raw3270.c16
-rw-r--r--drivers/s390/cio/cio.c7
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/ioasm.h26
-rw-r--r--drivers/s390/crypto/z90common.h3
-rw-r--r--drivers/s390/crypto/z90hardware.c127
-rw-r--r--drivers/s390/crypto/z90main.c246
-rw-r--r--drivers/s390/net/claw.c20
-rw-r--r--drivers/s390/s390mach.c2
-rw-r--r--drivers/sbus/char/Kconfig39
-rw-r--r--drivers/scsi/3w-xxxx.c57
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c9
-rw-r--r--drivers/scsi/NCR53c406a.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c327
-rw-r--r--drivers/scsi/aacraid/aacraid.h55
-rw-r--r--drivers/scsi/aacraid/commctrl.c20
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c20
-rw-r--r--drivers/scsi/aacraid/linit.c106
-rw-r--r--drivers/scsi/aacraid/rkt.c20
-rw-r--r--drivers/scsi/aacraid/rx.c20
-rw-r--r--drivers/scsi/aacraid/sa.c22
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/ahci.c54
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx1
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c104
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4570
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h288
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c82
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c14
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c88
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.seq5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c36
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c60
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c97
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c29
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped933
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c1377
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h890
-rw-r--r--drivers/scsi/ata_piix.c11
-rw-r--r--drivers/scsi/ch.c42
-rw-r--r--drivers/scsi/constants.c49
-rw-r--r--drivers/scsi/hosts.c113
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c181
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/libata-core.c19
-rw-r--r--drivers/scsi/libata-scsi.c8
-rw-r--r--drivers/scsi/libata.h1
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mesh.c6
-rw-r--r--drivers/scsi/qla1280.c359
-rw-r--r--drivers/scsi/qla1280.h336
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c136
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h157
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c564
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/raid_class.c250
-rw-r--r--drivers/scsi/sata_mv.c843
-rw-r--r--drivers/scsi/sata_nv.c9
-rw-r--r--drivers/scsi/sata_promise.c68
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c12
-rw-r--r--drivers/scsi/sata_svw.c7
-rw-r--r--drivers/scsi/sata_sx4.c54
-rw-r--r--drivers/scsi/sata_uli.c4
-rw-r--r--drivers/scsi/sata_vsc.c5
-rw-r--r--drivers/scsi/scsi.c17
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_error.c48
-rw-r--r--drivers/scsi/scsi_ioctl.c64
-rw-r--r--drivers/scsi/scsi_lib.c296
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c128
-rw-r--r--drivers/scsi/scsi_sysfs.c62
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/scsi_transport_spi.c168
-rw-r--r--drivers/scsi/sd.c185
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c75
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c62
-rw-r--r--drivers/scsi/st.c156
-rw-r--r--drivers/scsi/st.h3
-rw-r--r--drivers/serial/21285.c10
-rw-r--r--drivers/serial/68328serial.c1
-rw-r--r--drivers/serial/68360serial.c8
-rw-r--r--drivers/serial/8250.c89
-rw-r--r--drivers/serial/8250.h6
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/serial/amba-pl010.c8
-rw-r--r--drivers/serial/amba-pl011.c8
-rw-r--r--drivers/serial/au1x00_uart.c8
-rw-r--r--drivers/serial/clps711x.c10
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c21
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c11
-rw-r--r--drivers/serial/crisv10.c15
-rw-r--r--drivers/serial/dz.c10
-rw-r--r--drivers/serial/icom.c13
-rw-r--r--drivers/serial/imx.c28
-rw-r--r--drivers/serial/ioc4_serial.c6
-rw-r--r--drivers/serial/ip22zilog.c4
-rw-r--r--drivers/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/serial/m32r_sio.c8
-rw-r--r--drivers/serial/mcfserial.c1
-rw-r--r--drivers/serial/mpc52xx_uart.c8
-rw-r--r--drivers/serial/mpsc.c8
-rw-r--r--drivers/serial/mux.c10
-rw-r--r--drivers/serial/pmac_zilog.c13
-rw-r--r--drivers/serial/pxa.c8
-rw-r--r--drivers/serial/s3c2410.c10
-rw-r--r--drivers/serial/sa1100.c8
-rw-r--r--drivers/serial/serial_core.c164
-rw-r--r--drivers/serial/serial_lh7a40x.c9
-rw-r--r--drivers/serial/serial_txx9.c8
-rw-r--r--drivers/serial/sh-sci.c12
-rw-r--r--drivers/serial/sn_console.c8
-rw-r--r--drivers/serial/sunsab.c8
-rw-r--r--drivers/serial/sunsu.c38
-rw-r--r--drivers/serial/sunzilog.c4
-rw-r--r--drivers/serial/uart00.c8
-rw-r--r--drivers/serial/v850e_uart.c6
-rw-r--r--drivers/serial/vr41xx_siu.c8
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ehci-sched.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/ohci-dbg.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c8
-rw-r--r--drivers/usb/input/acecad.c2
-rw-r--r--drivers/usb/input/itmtouch.c2
-rw-r--r--drivers/usb/input/pid.c2
-rw-r--r--drivers/usb/media/w9968cf.c12
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/net/Makefile2
-rw-r--r--drivers/usb/net/usbnet.c21
-rw-r--r--drivers/usb/net/zd1201.c16
-rw-r--r--drivers/video/aty/aty128fb.c14
-rw-r--r--drivers/video/aty/atyfb_base.c11
-rw-r--r--drivers/video/aty/radeon_i2c.c2
-rw-r--r--drivers/video/aty/radeon_pm.c12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/locomolcd.c157
-rw-r--r--drivers/video/chipsfb.c4
-rw-r--r--drivers/video/i810/i810_main.c6
-rw-r--r--drivers/video/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/nvidia/nv_i2c.c3
-rw-r--r--drivers/video/pmag-aa-fb.c2
-rw-r--r--drivers/video/pmag-ba-fb.c285
-rw-r--r--drivers/video/pmagb-b-fb.c417
-rw-r--r--drivers/video/q40fb.c1
-rw-r--r--drivers/video/riva/rivafb-i2c.c3
-rw-r--r--drivers/video/s1d13xxxfb.c2
-rw-r--r--drivers/video/savage/savagefb-i2c.c3
-rw-r--r--drivers/video/savage/savagefb_driver.c1
-rw-r--r--drivers/video/w100fb.c1912
-rw-r--r--drivers/video/w100fb.h777
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/w1/w1_netlink.c2
704 files changed, 87645 insertions, 22019 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 73c6b85299c1..d74a7c5e75dd 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -513,7 +513,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
513 513
514 // VC layer stats 514 // VC layer stats
515 atomic_inc(&atm_vcc->stats->rx); 515 atomic_inc(&atm_vcc->stats->rx);
516 do_gettimeofday(&skb->stamp); 516 __net_timestamp(skb);
517 // end of our responsability 517 // end of our responsability
518 atm_vcc->push (atm_vcc, skb); 518 atm_vcc->push (atm_vcc, skb);
519 return; 519 return;
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index f2f01cb82cb4..57f1810fdccd 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -325,7 +325,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
325 result = -ENOBUFS; 325 result = -ENOBUFS;
326 goto done; 326 goto done;
327 } 327 }
328 do_gettimeofday(&new_skb->stamp); 328 __net_timestamp(new_skb);
329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); 329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
330 out_vcc->push(out_vcc,new_skb); 330 out_vcc->push(out_vcc,new_skb);
331 atomic_inc(&vcc->stats->tx); 331 atomic_inc(&vcc->stats->tx);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 10da36934769..c13c4d736ef5 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -537,7 +537,7 @@ static int rx_aal0(struct atm_vcc *vcc)
537 return 0; 537 return 0;
538 } 538 }
539 skb_put(skb,length); 539 skb_put(skb,length);
540 skb->stamp = eni_vcc->timestamp; 540 skb_set_timestamp(skb, &eni_vcc->timestamp);
541 DPRINTK("got len %ld\n",length); 541 DPRINTK("got len %ld\n",length);
542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; 542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
543 eni_vcc->rxing++; 543 eni_vcc->rxing++;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b078fa548ebf..58219744f5db 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
815 skb_put (skb, qe->p1 & 0xffff); 815 skb_put (skb, qe->p1 & 0xffff);
816 ATM_SKB(skb)->vcc = atm_vcc; 816 ATM_SKB(skb)->vcc = atm_vcc;
817 atomic_inc(&atm_vcc->stats->rx); 817 atomic_inc(&atm_vcc->stats->rx);
818 do_gettimeofday(&skb->stamp); 818 __net_timestamp(skb);
819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); 819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
820 atm_vcc->push (atm_vcc, skb); 820 atm_vcc->push (atm_vcc, skb);
821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe); 821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 5f702199543a..2bf723a7b6e6 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1176,7 +1176,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
1176 return -ENOMEM; 1176 return -ENOMEM;
1177 } 1177 }
1178 1178
1179 do_gettimeofday(&skb->stamp); 1179 __net_timestamp(skb);
1180 1180
1181#ifdef FORE200E_52BYTE_AAL0_SDU 1181#ifdef FORE200E_52BYTE_AAL0_SDU
1182 if (cell_header) { 1182 if (cell_header) {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 28250c9b32d6..fde9334059af 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1886,7 +1886,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1886 if (rx_skb_reserve > 0) 1886 if (rx_skb_reserve > 0)
1887 skb_reserve(skb, rx_skb_reserve); 1887 skb_reserve(skb, rx_skb_reserve);
1888 1888
1889 do_gettimeofday(&skb->stamp); 1889 __net_timestamp(skb);
1890 1890
1891 for (iov = he_vcc->iov_head; 1891 for (iov = he_vcc->iov_head;
1892 iov < he_vcc->iov_tail; ++iov) { 1892 iov < he_vcc->iov_tail; ++iov) {
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 924a2c8988bd..0cded0468003 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc; 1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
1035 // VC layer stats 1035 // VC layer stats
1036 atomic_inc(&vcc->stats->rx); 1036 atomic_inc(&vcc->stats->rx);
1037 do_gettimeofday(&skb->stamp); 1037 __net_timestamp(skb);
1038 // end of our responsability 1038 // end of our responsability
1039 vcc->push (vcc, skb); 1039 vcc->push (vcc, skb);
1040 } 1040 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 30b7e990ed0b..b4a76cade646 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1101,7 +1101,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1101 cell, ATM_CELL_PAYLOAD); 1101 cell, ATM_CELL_PAYLOAD);
1102 1102
1103 ATM_SKB(sb)->vcc = vcc; 1103 ATM_SKB(sb)->vcc = vcc;
1104 do_gettimeofday(&sb->stamp); 1104 __net_timestamp(sb);
1105 vcc->push(vcc, sb); 1105 vcc->push(vcc, sb);
1106 atomic_inc(&vcc->stats->rx); 1106 atomic_inc(&vcc->stats->rx);
1107 1107
@@ -1179,7 +1179,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1179 1179
1180 skb_trim(skb, len); 1180 skb_trim(skb, len);
1181 ATM_SKB(skb)->vcc = vcc; 1181 ATM_SKB(skb)->vcc = vcc;
1182 do_gettimeofday(&skb->stamp); 1182 __net_timestamp(skb);
1183 1183
1184 vcc->push(vcc, skb); 1184 vcc->push(vcc, skb);
1185 atomic_inc(&vcc->stats->rx); 1185 atomic_inc(&vcc->stats->rx);
@@ -1201,7 +1201,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1201 1201
1202 skb_trim(skb, len); 1202 skb_trim(skb, len);
1203 ATM_SKB(skb)->vcc = vcc; 1203 ATM_SKB(skb)->vcc = vcc;
1204 do_gettimeofday(&skb->stamp); 1204 __net_timestamp(skb);
1205 1205
1206 vcc->push(vcc, skb); 1206 vcc->push(vcc, skb);
1207 atomic_inc(&vcc->stats->rx); 1207 atomic_inc(&vcc->stats->rx);
@@ -1340,7 +1340,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1340 ATM_CELL_PAYLOAD); 1340 ATM_CELL_PAYLOAD);
1341 1341
1342 ATM_SKB(sb)->vcc = vcc; 1342 ATM_SKB(sb)->vcc = vcc;
1343 do_gettimeofday(&sb->stamp); 1343 __net_timestamp(sb);
1344 vcc->push(vcc, sb); 1344 vcc->push(vcc, sb);
1345 atomic_inc(&vcc->stats->rx); 1345 atomic_inc(&vcc->stats->rx);
1346 1346
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ffe3afa723b8..51ec14787293 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1427,7 +1427,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
1427 skb_put(skb, size); 1427 skb_put(skb, size);
1428 vcc_rx_memcpy(skb->data, lvcc, size); 1428 vcc_rx_memcpy(skb->data, lvcc, size);
1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; 1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
1430 do_gettimeofday(&skb->stamp); 1430 __net_timestamp(skb);
1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); 1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx); 1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx);
1433 out: 1433 out:
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b2a7b754fd14..c57e20dcb0f8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
214static void __devinit ns_init_card_error(ns_dev *card, int error); 214static void __devinit ns_init_card_error(ns_dev *card, int error);
215static scq_info *get_scq(int size, u32 scd); 215static scq_info *get_scq(int size, u32 scd);
216static void free_scq(scq_info *scq, struct atm_vcc *vcc); 216static void free_scq(scq_info *scq, struct atm_vcc *vcc);
217static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 217static void push_rxbufs(ns_dev *, struct sk_buff *);
218 u32 handle2, u32 addr2);
219static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); 218static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
220static int ns_open(struct atm_vcc *vcc); 219static int ns_open(struct atm_vcc *vcc);
221static void ns_close(struct atm_vcc *vcc); 220static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
766 ns_init_card_error(card, error); 765 ns_init_card_error(card, error);
767 return error; 766 return error;
768 } 767 }
768 NS_SKB_CB(hb)->buf_type = BUF_NONE;
769 skb_queue_tail(&card->hbpool.queue, hb); 769 skb_queue_tail(&card->hbpool.queue, hb);
770 card->hbpool.count++; 770 card->hbpool.count++;
771 } 771 }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
786 ns_init_card_error(card, error); 786 ns_init_card_error(card, error);
787 return error; 787 return error;
788 } 788 }
789 NS_SKB_CB(lb)->buf_type = BUF_LG;
789 skb_queue_tail(&card->lbpool.queue, lb); 790 skb_queue_tail(&card->lbpool.queue, lb);
790 skb_reserve(lb, NS_SMBUFSIZE); 791 skb_reserve(lb, NS_SMBUFSIZE);
791 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 792 push_rxbufs(card, lb);
792 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 793 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
793 if (j == 1) 794 if (j == 1)
794 { 795 {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
822 ns_init_card_error(card, error); 823 ns_init_card_error(card, error);
823 return error; 824 return error;
824 } 825 }
826 NS_SKB_CB(sb)->buf_type = BUF_SM;
825 skb_queue_tail(&card->sbpool.queue, sb); 827 skb_queue_tail(&card->sbpool.queue, sb);
826 skb_reserve(sb, NS_AAL0_HEADER); 828 skb_reserve(sb, NS_AAL0_HEADER);
827 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 829 push_rxbufs(card, sb);
828 } 830 }
829 /* Test for strange behaviour which leads to crashes */ 831 /* Test for strange behaviour which leads to crashes */
830 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 832 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
852 ns_init_card_error(card, error); 854 ns_init_card_error(card, error);
853 return error; 855 return error;
854 } 856 }
857 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
855 skb_queue_tail(&card->iovpool.queue, iovb); 858 skb_queue_tail(&card->iovpool.queue, iovb);
856 card->iovpool.count++; 859 card->iovpool.count++;
857 } 860 }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1078 1081
1079/* The handles passed must be pointers to the sk_buff containing the small 1082/* The handles passed must be pointers to the sk_buff containing the small
1080 or large buffer(s) cast to u32. */ 1083 or large buffer(s) cast to u32. */
1081static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 1084static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1082 u32 handle2, u32 addr2)
1083{ 1085{
1086 struct ns_skb_cb *cb = NS_SKB_CB(skb);
1087 u32 handle1, addr1;
1088 u32 handle2, addr2;
1084 u32 stat; 1089 u32 stat;
1085 unsigned long flags; 1090 unsigned long flags;
1086 1091
1092 /* *BARF* */
1093 handle2 = addr2 = 0;
1094 handle1 = (u32)skb;
1095 addr1 = (u32)virt_to_bus(skb->data);
1087 1096
1088#ifdef GENERAL_DEBUG 1097#ifdef GENERAL_DEBUG
1089 if (!addr1) 1098 if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1093 stat = readl(card->membase + STAT); 1102 stat = readl(card->membase + STAT);
1094 card->sbfqc = ns_stat_sfbqc_get(stat); 1103 card->sbfqc = ns_stat_sfbqc_get(stat);
1095 card->lbfqc = ns_stat_lfbqc_get(stat); 1104 card->lbfqc = ns_stat_lfbqc_get(stat);
1096 if (type == BUF_SM) 1105 if (cb->buf_type == BUF_SM)
1097 { 1106 {
1098 if (!addr2) 1107 if (!addr2)
1099 { 1108 {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1111 } 1120 }
1112 } 1121 }
1113 } 1122 }
1114 else /* type == BUF_LG */ 1123 else /* buf_type == BUF_LG */
1115 { 1124 {
1116 if (!addr2) 1125 if (!addr2)
1117 { 1126 {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1132 1141
1133 if (addr2) 1142 if (addr2)
1134 { 1143 {
1135 if (type == BUF_SM) 1144 if (cb->buf_type == BUF_SM)
1136 { 1145 {
1137 if (card->sbfqc >= card->sbnr.max) 1146 if (card->sbfqc >= card->sbnr.max)
1138 { 1147 {
1139 skb_unlink((struct sk_buff *) handle1); 1148 skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1140 dev_kfree_skb_any((struct sk_buff *) handle1); 1149 dev_kfree_skb_any((struct sk_buff *) handle1);
1141 skb_unlink((struct sk_buff *) handle2); 1150 skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1142 dev_kfree_skb_any((struct sk_buff *) handle2); 1151 dev_kfree_skb_any((struct sk_buff *) handle2);
1143 return; 1152 return;
1144 } 1153 }
1145 else 1154 else
1146 card->sbfqc += 2; 1155 card->sbfqc += 2;
1147 } 1156 }
1148 else /* (type == BUF_LG) */ 1157 else /* (buf_type == BUF_LG) */
1149 { 1158 {
1150 if (card->lbfqc >= card->lbnr.max) 1159 if (card->lbfqc >= card->lbnr.max)
1151 { 1160 {
1152 skb_unlink((struct sk_buff *) handle1); 1161 skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1153 dev_kfree_skb_any((struct sk_buff *) handle1); 1162 dev_kfree_skb_any((struct sk_buff *) handle1);
1154 skb_unlink((struct sk_buff *) handle2); 1163 skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1155 dev_kfree_skb_any((struct sk_buff *) handle2); 1164 dev_kfree_skb_any((struct sk_buff *) handle2);
1156 return; 1165 return;
1157 } 1166 }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1166 writel(handle2, card->membase + DR2); 1175 writel(handle2, card->membase + DR2);
1167 writel(addr1, card->membase + DR1); 1176 writel(addr1, card->membase + DR1);
1168 writel(handle1, card->membase + DR0); 1177 writel(handle1, card->membase + DR0);
1169 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); 1178 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1170 1179
1171 spin_unlock_irqrestore(&card->res_lock, flags); 1180 spin_unlock_irqrestore(&card->res_lock, flags);
1172 1181
1173 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1182 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1174 (type == BUF_SM ? "small" : "large"), addr1, addr2); 1183 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1175 } 1184 }
1176 1185
1177 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1186 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1322 card->efbie = 0; 1331 card->efbie = 0;
1323 break; 1332 break;
1324 } 1333 }
1334 NS_SKB_CB(sb)->buf_type = BUF_SM;
1325 skb_queue_tail(&card->sbpool.queue, sb); 1335 skb_queue_tail(&card->sbpool.queue, sb);
1326 skb_reserve(sb, NS_AAL0_HEADER); 1336 skb_reserve(sb, NS_AAL0_HEADER);
1327 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 1337 push_rxbufs(card, sb);
1328 } 1338 }
1329 card->sbfqc = i; 1339 card->sbfqc = i;
1330 process_rsq(card); 1340 process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1348 card->efbie = 0; 1358 card->efbie = 0;
1349 break; 1359 break;
1350 } 1360 }
1361 NS_SKB_CB(lb)->buf_type = BUF_LG;
1351 skb_queue_tail(&card->lbpool.queue, lb); 1362 skb_queue_tail(&card->lbpool.queue, lb);
1352 skb_reserve(lb, NS_SMBUFSIZE); 1363 skb_reserve(lb, NS_SMBUFSIZE);
1353 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 1364 push_rxbufs(card, lb);
1354 } 1365 }
1355 card->lbfqc = i; 1366 card->lbfqc = i;
1356 process_rsq(card); 1367 process_rsq(card);
@@ -2202,7 +2213,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2202 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); 2213 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2203 skb_put(sb, ATM_CELL_PAYLOAD); 2214 skb_put(sb, ATM_CELL_PAYLOAD);
2204 ATM_SKB(sb)->vcc = vcc; 2215 ATM_SKB(sb)->vcc = vcc;
2205 do_gettimeofday(&sb->stamp); 2216 __net_timestamp(sb);
2206 vcc->push(vcc, sb); 2217 vcc->push(vcc, sb);
2207 atomic_inc(&vcc->stats->rx); 2218 atomic_inc(&vcc->stats->rx);
2208 cell += ATM_CELL_PAYLOAD; 2219 cell += ATM_CELL_PAYLOAD;
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2227 recycle_rx_buf(card, skb); 2238 recycle_rx_buf(card, skb);
2228 return; 2239 return;
2229 } 2240 }
2241 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2230 } 2242 }
2231 else 2243 else
2232 if (--card->iovpool.count < card->iovnr.min) 2244 if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2234 struct sk_buff *new_iovb; 2246 struct sk_buff *new_iovb;
2235 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2247 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2236 { 2248 {
2249 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2237 skb_queue_tail(&card->iovpool.queue, new_iovb); 2250 skb_queue_tail(&card->iovpool.queue, new_iovb);
2238 card->iovpool.count++; 2251 card->iovpool.count++;
2239 } 2252 }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2264 2277
2265 if (NS_SKB(iovb)->iovcnt == 1) 2278 if (NS_SKB(iovb)->iovcnt == 1)
2266 { 2279 {
2267 if (skb->list != &card->sbpool.queue) 2280 if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2268 { 2281 {
2269 printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2282 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2270 card->index); 2283 card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2278 } 2291 }
2279 else /* NS_SKB(iovb)->iovcnt >= 2 */ 2292 else /* NS_SKB(iovb)->iovcnt >= 2 */
2280 { 2293 {
2281 if (skb->list != &card->lbpool.queue) 2294 if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2282 { 2295 {
2283 printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2296 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2284 card->index); 2297 card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2322 /* skb points to a small buffer */ 2335 /* skb points to a small buffer */
2323 if (!atm_charge(vcc, skb->truesize)) 2336 if (!atm_charge(vcc, skb->truesize))
2324 { 2337 {
2325 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 2338 push_rxbufs(card, skb);
2326 0, 0);
2327 atomic_inc(&vcc->stats->rx_drop); 2339 atomic_inc(&vcc->stats->rx_drop);
2328 } 2340 }
2329 else 2341 else
@@ -2334,7 +2346,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2334 skb->destructor = ns_sb_destructor; 2346 skb->destructor = ns_sb_destructor;
2335#endif /* NS_USE_DESTRUCTORS */ 2347#endif /* NS_USE_DESTRUCTORS */
2336 ATM_SKB(skb)->vcc = vcc; 2348 ATM_SKB(skb)->vcc = vcc;
2337 do_gettimeofday(&skb->stamp); 2349 __net_timestamp(skb);
2338 vcc->push(vcc, skb); 2350 vcc->push(vcc, skb);
2339 atomic_inc(&vcc->stats->rx); 2351 atomic_inc(&vcc->stats->rx);
2340 } 2352 }
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2350 { 2362 {
2351 if (!atm_charge(vcc, sb->truesize)) 2363 if (!atm_charge(vcc, sb->truesize))
2352 { 2364 {
2353 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2365 push_rxbufs(card, sb);
2354 0, 0);
2355 atomic_inc(&vcc->stats->rx_drop); 2366 atomic_inc(&vcc->stats->rx_drop);
2356 } 2367 }
2357 else 2368 else
@@ -2362,21 +2373,19 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2362 sb->destructor = ns_sb_destructor; 2373 sb->destructor = ns_sb_destructor;
2363#endif /* NS_USE_DESTRUCTORS */ 2374#endif /* NS_USE_DESTRUCTORS */
2364 ATM_SKB(sb)->vcc = vcc; 2375 ATM_SKB(sb)->vcc = vcc;
2365 do_gettimeofday(&sb->stamp); 2376 __net_timestamp(sb);
2366 vcc->push(vcc, sb); 2377 vcc->push(vcc, sb);
2367 atomic_inc(&vcc->stats->rx); 2378 atomic_inc(&vcc->stats->rx);
2368 } 2379 }
2369 2380
2370 push_rxbufs(card, BUF_LG, (u32) skb, 2381 push_rxbufs(card, skb);
2371 (u32) virt_to_bus(skb->data), 0, 0);
2372 2382
2373 } 2383 }
2374 else /* len > NS_SMBUFSIZE, the usual case */ 2384 else /* len > NS_SMBUFSIZE, the usual case */
2375 { 2385 {
2376 if (!atm_charge(vcc, skb->truesize)) 2386 if (!atm_charge(vcc, skb->truesize))
2377 { 2387 {
2378 push_rxbufs(card, BUF_LG, (u32) skb, 2388 push_rxbufs(card, skb);
2379 (u32) virt_to_bus(skb->data), 0, 0);
2380 atomic_inc(&vcc->stats->rx_drop); 2389 atomic_inc(&vcc->stats->rx_drop);
2381 } 2390 }
2382 else 2391 else
@@ -2389,13 +2398,12 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2389 memcpy(skb->data, sb->data, NS_SMBUFSIZE); 2398 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2390 skb_put(skb, len - NS_SMBUFSIZE); 2399 skb_put(skb, len - NS_SMBUFSIZE);
2391 ATM_SKB(skb)->vcc = vcc; 2400 ATM_SKB(skb)->vcc = vcc;
2392 do_gettimeofday(&skb->stamp); 2401 __net_timestamp(skb);
2393 vcc->push(vcc, skb); 2402 vcc->push(vcc, skb);
2394 atomic_inc(&vcc->stats->rx); 2403 atomic_inc(&vcc->stats->rx);
2395 } 2404 }
2396 2405
2397 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2406 push_rxbufs(card, sb);
2398 0, 0);
2399 2407
2400 } 2408 }
2401 2409
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2430 card->hbpool.count++; 2438 card->hbpool.count++;
2431 } 2439 }
2432 } 2440 }
2441 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2433 } 2442 }
2434 else 2443 else
2435 if (--card->hbpool.count < card->hbnr.min) 2444 if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2437 struct sk_buff *new_hb; 2446 struct sk_buff *new_hb;
2438 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2447 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2439 { 2448 {
2449 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2440 skb_queue_tail(&card->hbpool.queue, new_hb); 2450 skb_queue_tail(&card->hbpool.queue, new_hb);
2441 card->hbpool.count++; 2451 card->hbpool.count++;
2442 } 2452 }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2444 { 2454 {
2445 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2455 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2446 { 2456 {
2457 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2447 skb_queue_tail(&card->hbpool.queue, new_hb); 2458 skb_queue_tail(&card->hbpool.queue, new_hb);
2448 card->hbpool.count++; 2459 card->hbpool.count++;
2449 } 2460 }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2473 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2474 iov++; 2485 iov++;
2475 /* Free the small buffer */ 2486 /* Free the small buffer */
2476 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2487 push_rxbufs(card, sb);
2477 0, 0);
2478 2488
2479 /* Copy all large buffers to the huge buffer and free them */ 2489 /* Copy all large buffers to the huge buffer and free them */
2480 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2490 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2485 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2486 iov++; 2496 iov++;
2487 remaining -= tocopy; 2497 remaining -= tocopy;
2488 push_rxbufs(card, BUF_LG, (u32) lb, 2498 push_rxbufs(card, lb);
2489 (u32) virt_to_bus(lb->data), 0, 0);
2490 } 2499 }
2491#ifdef EXTRA_DEBUG 2500#ifdef EXTRA_DEBUG
2492 if (remaining != 0 || hb->len != len) 2501 if (remaining != 0 || hb->len != len)
@@ -2496,7 +2505,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2496#ifdef NS_USE_DESTRUCTORS 2505#ifdef NS_USE_DESTRUCTORS
2497 hb->destructor = ns_hb_destructor; 2506 hb->destructor = ns_hb_destructor;
2498#endif /* NS_USE_DESTRUCTORS */ 2507#endif /* NS_USE_DESTRUCTORS */
2499 do_gettimeofday(&hb->stamp); 2508 __net_timestamp(hb);
2500 vcc->push(vcc, hb); 2509 vcc->push(vcc, hb);
2501 atomic_inc(&vcc->stats->rx); 2510 atomic_inc(&vcc->stats->rx);
2502 } 2511 }
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
2527 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2536 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2528 if (sb == NULL) 2537 if (sb == NULL)
2529 break; 2538 break;
2539 NS_SKB_CB(sb)->buf_type = BUF_SM;
2530 skb_queue_tail(&card->sbpool.queue, sb); 2540 skb_queue_tail(&card->sbpool.queue, sb);
2531 skb_reserve(sb, NS_AAL0_HEADER); 2541 skb_reserve(sb, NS_AAL0_HEADER);
2532 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2542 push_rxbufs(card, sb);
2533 } while (card->sbfqc < card->sbnr.min); 2543 } while (card->sbfqc < card->sbnr.min);
2534} 2544}
2535 2545
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
2550 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2560 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2551 if (lb == NULL) 2561 if (lb == NULL)
2552 break; 2562 break;
2563 NS_SKB_CB(lb)->buf_type = BUF_LG;
2553 skb_queue_tail(&card->lbpool.queue, lb); 2564 skb_queue_tail(&card->lbpool.queue, lb);
2554 skb_reserve(lb, NS_SMBUFSIZE); 2565 skb_reserve(lb, NS_SMBUFSIZE);
2555 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2566 push_rxbufs(card, lb);
2556 } while (card->lbfqc < card->lbnr.min); 2567 } while (card->lbfqc < card->lbnr.min);
2557} 2568}
2558 2569
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2569 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2580 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2570 if (hb == NULL) 2581 if (hb == NULL)
2571 break; 2582 break;
2583 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2572 skb_queue_tail(&card->hbpool.queue, hb); 2584 skb_queue_tail(&card->hbpool.queue, hb);
2573 card->hbpool.count++; 2585 card->hbpool.count++;
2574 } 2586 }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
2577#endif /* NS_USE_DESTRUCTORS */ 2589#endif /* NS_USE_DESTRUCTORS */
2578 2590
2579 2591
2580
2581static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2592static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2582{ 2593{
2583 if (skb->list == &card->sbpool.queue) 2594 struct ns_skb_cb *cb = NS_SKB_CB(skb);
2584 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2585 else if (skb->list == &card->lbpool.queue)
2586 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2587 else
2588 {
2589 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2590 dev_kfree_skb_any(skb);
2591 }
2592}
2593 2595
2596 if (unlikely(cb->buf_type == BUF_NONE)) {
2597 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2598 dev_kfree_skb_any(skb);
2599 } else
2600 push_rxbufs(card, skb);
2601}
2594 2602
2595 2603
2596static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2604static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2597{ 2605{
2598 struct sk_buff *skb; 2606 while (count-- > 0)
2599 2607 recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2600 for (; count > 0; count--)
2601 {
2602 skb = (struct sk_buff *) (iov++)->iov_base;
2603 if (skb->list == &card->sbpool.queue)
2604 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2605 0, 0);
2606 else if (skb->list == &card->lbpool.queue)
2607 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2608 0, 0);
2609 else
2610 {
2611 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2612 dev_kfree_skb_any(skb);
2613 }
2614 }
2615} 2608}
2616 2609
2617 2610
2618
2619static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2611static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2620{ 2612{
2621 if (card->iovpool.count < card->iovnr.max) 2613 if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2631 2623
2632static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2624static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2633{ 2625{
2634 skb_unlink(sb); 2626 skb_unlink(sb, &card->sbpool.queue);
2635#ifdef NS_USE_DESTRUCTORS 2627#ifdef NS_USE_DESTRUCTORS
2636 if (card->sbfqc < card->sbnr.min) 2628 if (card->sbfqc < card->sbnr.min)
2637#else 2629#else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2640 struct sk_buff *new_sb; 2632 struct sk_buff *new_sb;
2641 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2633 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2642 { 2634 {
2635 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2643 skb_queue_tail(&card->sbpool.queue, new_sb); 2636 skb_queue_tail(&card->sbpool.queue, new_sb);
2644 skb_reserve(new_sb, NS_AAL0_HEADER); 2637 skb_reserve(new_sb, NS_AAL0_HEADER);
2645 push_rxbufs(card, BUF_SM, (u32) new_sb, 2638 push_rxbufs(card, new_sb);
2646 (u32) virt_to_bus(new_sb->data), 0, 0);
2647 } 2639 }
2648 } 2640 }
2649 if (card->sbfqc < card->sbnr.init) 2641 if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2652 struct sk_buff *new_sb; 2644 struct sk_buff *new_sb;
2653 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2645 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2654 { 2646 {
2647 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2655 skb_queue_tail(&card->sbpool.queue, new_sb); 2648 skb_queue_tail(&card->sbpool.queue, new_sb);
2656 skb_reserve(new_sb, NS_AAL0_HEADER); 2649 skb_reserve(new_sb, NS_AAL0_HEADER);
2657 push_rxbufs(card, BUF_SM, (u32) new_sb, 2650 push_rxbufs(card, new_sb);
2658 (u32) virt_to_bus(new_sb->data), 0, 0);
2659 } 2651 }
2660 } 2652 }
2661} 2653}
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2664 2656
2665static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2657static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2666{ 2658{
2667 skb_unlink(lb); 2659 skb_unlink(lb, &card->lbpool.queue);
2668#ifdef NS_USE_DESTRUCTORS 2660#ifdef NS_USE_DESTRUCTORS
2669 if (card->lbfqc < card->lbnr.min) 2661 if (card->lbfqc < card->lbnr.min)
2670#else 2662#else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2673 struct sk_buff *new_lb; 2665 struct sk_buff *new_lb;
2674 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2666 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2675 { 2667 {
2668 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2676 skb_queue_tail(&card->lbpool.queue, new_lb); 2669 skb_queue_tail(&card->lbpool.queue, new_lb);
2677 skb_reserve(new_lb, NS_SMBUFSIZE); 2670 skb_reserve(new_lb, NS_SMBUFSIZE);
2678 push_rxbufs(card, BUF_LG, (u32) new_lb, 2671 push_rxbufs(card, new_lb);
2679 (u32) virt_to_bus(new_lb->data), 0, 0);
2680 } 2672 }
2681 } 2673 }
2682 if (card->lbfqc < card->lbnr.init) 2674 if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2685 struct sk_buff *new_lb; 2677 struct sk_buff *new_lb;
2686 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2678 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2687 { 2679 {
2680 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2688 skb_queue_tail(&card->lbpool.queue, new_lb); 2681 skb_queue_tail(&card->lbpool.queue, new_lb);
2689 skb_reserve(new_lb, NS_SMBUFSIZE); 2682 skb_reserve(new_lb, NS_SMBUFSIZE);
2690 push_rxbufs(card, BUF_LG, (u32) new_lb, 2683 push_rxbufs(card, new_lb);
2691 (u32) virt_to_bus(new_lb->data), 0, 0);
2692 } 2684 }
2693 } 2685 }
2694} 2686}
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2880 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2872 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2881 if (sb == NULL) 2873 if (sb == NULL)
2882 return -ENOMEM; 2874 return -ENOMEM;
2875 NS_SKB_CB(sb)->buf_type = BUF_SM;
2883 skb_queue_tail(&card->sbpool.queue, sb); 2876 skb_queue_tail(&card->sbpool.queue, sb);
2884 skb_reserve(sb, NS_AAL0_HEADER); 2877 skb_reserve(sb, NS_AAL0_HEADER);
2885 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2878 push_rxbufs(card, sb);
2886 } 2879 }
2887 break; 2880 break;
2888 2881
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2894 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2887 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2895 if (lb == NULL) 2888 if (lb == NULL)
2896 return -ENOMEM; 2889 return -ENOMEM;
2890 NS_SKB_CB(lb)->buf_type = BUF_LG;
2897 skb_queue_tail(&card->lbpool.queue, lb); 2891 skb_queue_tail(&card->lbpool.queue, lb);
2898 skb_reserve(lb, NS_SMBUFSIZE); 2892 skb_reserve(lb, NS_SMBUFSIZE);
2899 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2893 push_rxbufs(card, lb);
2900 } 2894 }
2901 break; 2895 break;
2902 2896
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2923 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2917 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2924 if (hb == NULL) 2918 if (hb == NULL)
2925 return -ENOMEM; 2919 return -ENOMEM;
2920 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2926 ns_grab_int_lock(card, flags); 2921 ns_grab_int_lock(card, flags);
2927 skb_queue_tail(&card->hbpool.queue, hb); 2922 skb_queue_tail(&card->hbpool.queue, hb);
2928 card->hbpool.count++; 2923 card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2953 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2948 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2954 if (iovb == NULL) 2949 if (iovb == NULL)
2955 return -ENOMEM; 2950 return -ENOMEM;
2951 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2956 ns_grab_int_lock(card, flags); 2952 ns_grab_int_lock(card, flags);
2957 skb_queue_tail(&card->iovpool.queue, iovb); 2953 skb_queue_tail(&card->iovpool.queue, iovb);
2958 card->iovpool.count++; 2954 card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2979} 2975}
2980 2976
2981 2977
2982
2983static void which_list(ns_dev *card, struct sk_buff *skb) 2978static void which_list(ns_dev *card, struct sk_buff *skb)
2984{ 2979{
2985 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? 2980 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2986 "small" : skb->list == &card->lbpool.queue ? "large" :
2987 skb->list == &card->hbpool.queue ? "huge" :
2988 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2989} 2981}
2990 2982
2991 2983
2992
2993static void ns_poll(unsigned long arg) 2984static void ns_poll(unsigned long arg)
2994{ 2985{
2995 int i; 2986 int i;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index ea83c46c8ba5..5997bcb45b59 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -103,8 +103,14 @@
103 103
104#define NS_IOREMAP_SIZE 4096 104#define NS_IOREMAP_SIZE 4096
105 105
106#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ 106/*
107#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ 107 * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
108 * BUG_SM and BUG_LG are both used by the driver and the device.
109 * BUF_NONE is only used by the driver.
110 */
111#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
112#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
113#define BUF_NONE 0xffffffff /* Software only: */
108 114
109#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ 115#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
110#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ 116#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
684/* Device driver structures ***************************************************/ 690/* Device driver structures ***************************************************/
685 691
686 692
693struct ns_skb_cb {
694 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
695};
696
697#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
698
687typedef struct tsq_info 699typedef struct tsq_info
688{ 700{
689 void *org; 701 void *org;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a2b236a966e0..55959e4d1cb7 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -400,7 +400,7 @@ unsigned long *x;
400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> 400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
401 uPD98401_AAL5_ES_SHIFT,error); 401 uPD98401_AAL5_ES_SHIFT,error);
402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; 402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
403 do_gettimeofday(&skb->stamp); 403 __net_timestamp(skb);
404#if 0 404#if 0
405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], 405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], 406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
417 chan = (here[3] & uPD98401_AAL5_CHAN) >> 417 chan = (here[3] & uPD98401_AAL5_CHAN) >>
418 uPD98401_AAL5_CHAN_SHIFT; 418 uPD98401_AAL5_CHAN_SHIFT;
419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { 419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
420 int pos;
420 vcc = zatm_dev->rx_map[chan]; 421 vcc = zatm_dev->rx_map[chan];
421 if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) 422 pos = ZATM_VCC(vcc)->pool;
422 zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; 423 if (skb == zatm_dev->last_free[pos])
423 skb_unlink(skb); 424 zatm_dev->last_free[pos] = NULL;
425 skb_unlink(skb, zatm_dev->pool + pos);
424 } 426 }
425 else { 427 else {
426 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " 428 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index ec615d854be9..373e7b728fa7 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -22,11 +22,26 @@
22/* This is a private structure used to tie the classdev and the 22/* This is a private structure used to tie the classdev and the
23 * container .. it should never be visible outside this file */ 23 * container .. it should never be visible outside this file */
24struct internal_container { 24struct internal_container {
25 struct list_head node; 25 struct klist_node node;
26 struct attribute_container *cont; 26 struct attribute_container *cont;
27 struct class_device classdev; 27 struct class_device classdev;
28}; 28};
29 29
30static void internal_container_klist_get(struct klist_node *n)
31{
32 struct internal_container *ic =
33 container_of(n, struct internal_container, node);
34 class_device_get(&ic->classdev);
35}
36
37static void internal_container_klist_put(struct klist_node *n)
38{
39 struct internal_container *ic =
40 container_of(n, struct internal_container, node);
41 class_device_put(&ic->classdev);
42}
43
44
30/** 45/**
31 * attribute_container_classdev_to_container - given a classdev, return the container 46 * attribute_container_classdev_to_container - given a classdev, return the container
32 * 47 *
@@ -57,7 +72,8 @@ int
57attribute_container_register(struct attribute_container *cont) 72attribute_container_register(struct attribute_container *cont)
58{ 73{
59 INIT_LIST_HEAD(&cont->node); 74 INIT_LIST_HEAD(&cont->node);
60 INIT_LIST_HEAD(&cont->containers); 75 klist_init(&cont->containers,internal_container_klist_get,
76 internal_container_klist_put);
61 77
62 down(&attribute_container_mutex); 78 down(&attribute_container_mutex);
63 list_add_tail(&cont->node, &attribute_container_list); 79 list_add_tail(&cont->node, &attribute_container_list);
@@ -77,11 +93,13 @@ attribute_container_unregister(struct attribute_container *cont)
77{ 93{
78 int retval = -EBUSY; 94 int retval = -EBUSY;
79 down(&attribute_container_mutex); 95 down(&attribute_container_mutex);
80 if (!list_empty(&cont->containers)) 96 spin_lock(&cont->containers.k_lock);
97 if (!list_empty(&cont->containers.k_list))
81 goto out; 98 goto out;
82 retval = 0; 99 retval = 0;
83 list_del(&cont->node); 100 list_del(&cont->node);
84 out: 101 out:
102 spin_unlock(&cont->containers.k_lock);
85 up(&attribute_container_mutex); 103 up(&attribute_container_mutex);
86 return retval; 104 return retval;
87 105
@@ -140,7 +158,6 @@ attribute_container_add_device(struct device *dev,
140 continue; 158 continue;
141 } 159 }
142 memset(ic, 0, sizeof(struct internal_container)); 160 memset(ic, 0, sizeof(struct internal_container));
143 INIT_LIST_HEAD(&ic->node);
144 ic->cont = cont; 161 ic->cont = cont;
145 class_device_initialize(&ic->classdev); 162 class_device_initialize(&ic->classdev);
146 ic->classdev.dev = get_device(dev); 163 ic->classdev.dev = get_device(dev);
@@ -151,11 +168,22 @@ attribute_container_add_device(struct device *dev,
151 fn(cont, dev, &ic->classdev); 168 fn(cont, dev, &ic->classdev);
152 else 169 else
153 attribute_container_add_class_device(&ic->classdev); 170 attribute_container_add_class_device(&ic->classdev);
154 list_add_tail(&ic->node, &cont->containers); 171 klist_add_tail(&ic->node, &cont->containers);
155 } 172 }
156 up(&attribute_container_mutex); 173 up(&attribute_container_mutex);
157} 174}
158 175
176/* FIXME: can't break out of this unless klist_iter_exit is also
177 * called before doing the break
178 */
179#define klist_for_each_entry(pos, head, member, iter) \
180 for (klist_iter_init(head, iter); (pos = ({ \
181 struct klist_node *n = klist_next(iter); \
182 n ? container_of(n, typeof(*pos), member) : \
183 ({ klist_iter_exit(iter) ; NULL; }); \
184 }) ) != NULL; )
185
186
159/** 187/**
160 * attribute_container_remove_device - make device eligible for removal. 188 * attribute_container_remove_device - make device eligible for removal.
161 * 189 *
@@ -182,17 +210,19 @@ attribute_container_remove_device(struct device *dev,
182 210
183 down(&attribute_container_mutex); 211 down(&attribute_container_mutex);
184 list_for_each_entry(cont, &attribute_container_list, node) { 212 list_for_each_entry(cont, &attribute_container_list, node) {
185 struct internal_container *ic, *tmp; 213 struct internal_container *ic;
214 struct klist_iter iter;
186 215
187 if (attribute_container_no_classdevs(cont)) 216 if (attribute_container_no_classdevs(cont))
188 continue; 217 continue;
189 218
190 if (!cont->match(cont, dev)) 219 if (!cont->match(cont, dev))
191 continue; 220 continue;
192 list_for_each_entry_safe(ic, tmp, &cont->containers, node) { 221
222 klist_for_each_entry(ic, &cont->containers, node, &iter) {
193 if (dev != ic->classdev.dev) 223 if (dev != ic->classdev.dev)
194 continue; 224 continue;
195 list_del(&ic->node); 225 klist_del(&ic->node);
196 if (fn) 226 if (fn)
197 fn(cont, dev, &ic->classdev); 227 fn(cont, dev, &ic->classdev);
198 else { 228 else {
@@ -225,12 +255,18 @@ attribute_container_device_trigger(struct device *dev,
225 255
226 down(&attribute_container_mutex); 256 down(&attribute_container_mutex);
227 list_for_each_entry(cont, &attribute_container_list, node) { 257 list_for_each_entry(cont, &attribute_container_list, node) {
228 struct internal_container *ic, *tmp; 258 struct internal_container *ic;
259 struct klist_iter iter;
229 260
230 if (!cont->match(cont, dev)) 261 if (!cont->match(cont, dev))
231 continue; 262 continue;
232 263
233 list_for_each_entry_safe(ic, tmp, &cont->containers, node) { 264 if (attribute_container_no_classdevs(cont)) {
265 fn(cont, dev, NULL);
266 continue;
267 }
268
269 klist_for_each_entry(ic, &cont->containers, node, &iter) {
234 if (dev == ic->classdev.dev) 270 if (dev == ic->classdev.dev)
235 fn(cont, dev, &ic->classdev); 271 fn(cont, dev, &ic->classdev);
236 } 272 }
@@ -368,6 +404,36 @@ attribute_container_class_device_del(struct class_device *classdev)
368} 404}
369EXPORT_SYMBOL_GPL(attribute_container_class_device_del); 405EXPORT_SYMBOL_GPL(attribute_container_class_device_del);
370 406
407/**
408 * attribute_container_find_class_device - find the corresponding class_device
409 *
410 * @cont: the container
411 * @dev: the generic device
412 *
413 * Looks up the device in the container's list of class devices and returns
414 * the corresponding class_device.
415 */
416struct class_device *
417attribute_container_find_class_device(struct attribute_container *cont,
418 struct device *dev)
419{
420 struct class_device *cdev = NULL;
421 struct internal_container *ic;
422 struct klist_iter iter;
423
424 klist_for_each_entry(ic, &cont->containers, node, &iter) {
425 if (ic->classdev.dev == dev) {
426 cdev = &ic->classdev;
427 /* FIXME: must exit iterator then break */
428 klist_iter_exit(&iter);
429 break;
430 }
431 }
432
433 return cdev;
434}
435EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
436
371int __init 437int __init
372attribute_container_init(void) 438attribute_container_init(void)
373{ 439{
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index ab53832d57e5..03204bfd17af 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -156,7 +156,9 @@ static ssize_t driver_unbind(struct device_driver *drv,
156 device_release_driver(dev); 156 device_release_driver(dev);
157 err = count; 157 err = count;
158 } 158 }
159 return err; 159 if (err)
160 return err;
161 return count;
160} 162}
161static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind); 163static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
162 164
@@ -358,7 +360,7 @@ int bus_add_device(struct device * dev)
358 if (bus) { 360 if (bus) {
359 pr_debug("bus %s: add device %s\n", bus->name, dev->bus_id); 361 pr_debug("bus %s: add device %s\n", bus->name, dev->bus_id);
360 device_attach(dev); 362 device_attach(dev);
361 klist_add_tail(&bus->klist_devices, &dev->knode_bus); 363 klist_add_tail(&dev->knode_bus, &bus->klist_devices);
362 error = device_add_attrs(bus, dev); 364 error = device_add_attrs(bus, dev);
363 if (!error) { 365 if (!error) {
364 sysfs_create_link(&bus->devices.kobj, &dev->kobj, dev->bus_id); 366 sysfs_create_link(&bus->devices.kobj, &dev->kobj, dev->bus_id);
@@ -446,7 +448,7 @@ int bus_add_driver(struct device_driver * drv)
446 } 448 }
447 449
448 driver_attach(drv); 450 driver_attach(drv);
449 klist_add_tail(&bus->klist_drivers, &drv->knode_bus); 451 klist_add_tail(&drv->knode_bus, &bus->klist_drivers);
450 module_add_driver(drv->owner, drv); 452 module_add_driver(drv->owner, drv);
451 453
452 driver_add_attrs(bus, drv); 454 driver_add_attrs(bus, drv);
@@ -566,6 +568,36 @@ static void bus_remove_attrs(struct bus_type * bus)
566 } 568 }
567} 569}
568 570
571static void klist_devices_get(struct klist_node *n)
572{
573 struct device *dev = container_of(n, struct device, knode_bus);
574
575 get_device(dev);
576}
577
578static void klist_devices_put(struct klist_node *n)
579{
580 struct device *dev = container_of(n, struct device, knode_bus);
581
582 put_device(dev);
583}
584
585static void klist_drivers_get(struct klist_node *n)
586{
587 struct device_driver *drv = container_of(n, struct device_driver,
588 knode_bus);
589
590 get_driver(drv);
591}
592
593static void klist_drivers_put(struct klist_node *n)
594{
595 struct device_driver *drv = container_of(n, struct device_driver,
596 knode_bus);
597
598 put_driver(drv);
599}
600
569/** 601/**
570 * bus_register - register a bus with the system. 602 * bus_register - register a bus with the system.
571 * @bus: bus. 603 * @bus: bus.
@@ -600,8 +632,8 @@ int bus_register(struct bus_type * bus)
600 if (retval) 632 if (retval)
601 goto bus_drivers_fail; 633 goto bus_drivers_fail;
602 634
603 klist_init(&bus->klist_devices); 635 klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put);
604 klist_init(&bus->klist_drivers); 636 klist_init(&bus->klist_drivers, klist_drivers_get, klist_drivers_put);
605 bus_add_attrs(bus); 637 bus_add_attrs(bus);
606 638
607 pr_debug("bus type '%s' registered\n", bus->name); 639 pr_debug("bus type '%s' registered\n", bus->name);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0154a1623b21..d164c32a97ad 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -299,10 +299,8 @@ static void class_dev_release(struct kobject * kobj)
299 299
300 pr_debug("device class '%s': release.\n", cd->class_id); 300 pr_debug("device class '%s': release.\n", cd->class_id);
301 301
302 if (cd->devt_attr) { 302 kfree(cd->devt_attr);
303 kfree(cd->devt_attr); 303 cd->devt_attr = NULL;
304 cd->devt_attr = NULL;
305 }
306 304
307 if (cls->release) 305 if (cls->release)
308 cls->release(cd); 306 cls->release(cd);
@@ -452,10 +450,29 @@ void class_device_initialize(struct class_device *class_dev)
452 INIT_LIST_HEAD(&class_dev->node); 450 INIT_LIST_HEAD(&class_dev->node);
453} 451}
454 452
453static char *make_class_name(struct class_device *class_dev)
454{
455 char *name;
456 int size;
457
458 size = strlen(class_dev->class->name) +
459 strlen(kobject_name(&class_dev->kobj)) + 2;
460
461 name = kmalloc(size, GFP_KERNEL);
462 if (!name)
463 return ERR_PTR(-ENOMEM);
464
465 strcpy(name, class_dev->class->name);
466 strcat(name, ":");
467 strcat(name, kobject_name(&class_dev->kobj));
468 return name;
469}
470
455int class_device_add(struct class_device *class_dev) 471int class_device_add(struct class_device *class_dev)
456{ 472{
457 struct class * parent = NULL; 473 struct class * parent = NULL;
458 struct class_interface * class_intf; 474 struct class_interface * class_intf;
475 char *class_name = NULL;
459 int error; 476 int error;
460 477
461 class_dev = class_device_get(class_dev); 478 class_dev = class_device_get(class_dev);
@@ -500,9 +517,13 @@ int class_device_add(struct class_device *class_dev)
500 } 517 }
501 518
502 class_device_add_attrs(class_dev); 519 class_device_add_attrs(class_dev);
503 if (class_dev->dev) 520 if (class_dev->dev) {
521 class_name = make_class_name(class_dev);
504 sysfs_create_link(&class_dev->kobj, 522 sysfs_create_link(&class_dev->kobj,
505 &class_dev->dev->kobj, "device"); 523 &class_dev->dev->kobj, "device");
524 sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
525 class_name);
526 }
506 527
507 /* notify any interfaces this device is now here */ 528 /* notify any interfaces this device is now here */
508 if (parent) { 529 if (parent) {
@@ -519,6 +540,7 @@ int class_device_add(struct class_device *class_dev)
519 if (error && parent) 540 if (error && parent)
520 class_put(parent); 541 class_put(parent);
521 class_device_put(class_dev); 542 class_device_put(class_dev);
543 kfree(class_name);
522 return error; 544 return error;
523} 545}
524 546
@@ -584,6 +606,7 @@ void class_device_del(struct class_device *class_dev)
584{ 606{
585 struct class * parent = class_dev->class; 607 struct class * parent = class_dev->class;
586 struct class_interface * class_intf; 608 struct class_interface * class_intf;
609 char *class_name = NULL;
587 610
588 if (parent) { 611 if (parent) {
589 down(&parent->sem); 612 down(&parent->sem);
@@ -594,8 +617,11 @@ void class_device_del(struct class_device *class_dev)
594 up(&parent->sem); 617 up(&parent->sem);
595 } 618 }
596 619
597 if (class_dev->dev) 620 if (class_dev->dev) {
621 class_name = make_class_name(class_dev);
598 sysfs_remove_link(&class_dev->kobj, "device"); 622 sysfs_remove_link(&class_dev->kobj, "device");
623 sysfs_remove_link(&class_dev->dev->kobj, class_name);
624 }
599 if (class_dev->devt_attr) 625 if (class_dev->devt_attr)
600 class_device_remove_file(class_dev, class_dev->devt_attr); 626 class_device_remove_file(class_dev, class_dev->devt_attr);
601 class_device_remove_attrs(class_dev); 627 class_device_remove_attrs(class_dev);
@@ -605,6 +631,7 @@ void class_device_del(struct class_device *class_dev)
605 631
606 if (parent) 632 if (parent)
607 class_put(parent); 633 class_put(parent);
634 kfree(class_name);
608} 635}
609 636
610void class_device_unregister(struct class_device *class_dev) 637void class_device_unregister(struct class_device *class_dev)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index efe03a024a5b..6ab73f5c799a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -191,6 +191,20 @@ void device_remove_file(struct device * dev, struct device_attribute * attr)
191 } 191 }
192} 192}
193 193
194static void klist_children_get(struct klist_node *n)
195{
196 struct device *dev = container_of(n, struct device, knode_parent);
197
198 get_device(dev);
199}
200
201static void klist_children_put(struct klist_node *n)
202{
203 struct device *dev = container_of(n, struct device, knode_parent);
204
205 put_device(dev);
206}
207
194 208
195/** 209/**
196 * device_initialize - init device structure. 210 * device_initialize - init device structure.
@@ -207,7 +221,8 @@ void device_initialize(struct device *dev)
207{ 221{
208 kobj_set_kset_s(dev, devices_subsys); 222 kobj_set_kset_s(dev, devices_subsys);
209 kobject_init(&dev->kobj); 223 kobject_init(&dev->kobj);
210 klist_init(&dev->klist_children); 224 klist_init(&dev->klist_children, klist_children_get,
225 klist_children_put);
211 INIT_LIST_HEAD(&dev->dma_pools); 226 INIT_LIST_HEAD(&dev->dma_pools);
212 init_MUTEX(&dev->sem); 227 init_MUTEX(&dev->sem);
213} 228}
@@ -249,7 +264,7 @@ int device_add(struct device *dev)
249 if ((error = bus_add_device(dev))) 264 if ((error = bus_add_device(dev)))
250 goto BusError; 265 goto BusError;
251 if (parent) 266 if (parent)
252 klist_add_tail(&parent->klist_children, &dev->knode_parent); 267 klist_add_tail(&dev->knode_parent, &parent->klist_children);
253 268
254 /* notify platform of device entry */ 269 /* notify platform of device entry */
255 if (platform_notify) 270 if (platform_notify)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 16323f9cbff0..d5bbce38282f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -42,7 +42,7 @@ void device_bind_driver(struct device * dev)
42{ 42{
43 pr_debug("bound device '%s' to driver '%s'\n", 43 pr_debug("bound device '%s' to driver '%s'\n",
44 dev->bus_id, dev->driver->name); 44 dev->bus_id, dev->driver->name);
45 klist_add_tail(&dev->driver->klist_devices, &dev->knode_driver); 45 klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
46 sysfs_create_link(&dev->driver->kobj, &dev->kobj, 46 sysfs_create_link(&dev->driver->kobj, &dev->kobj,
47 kobject_name(&dev->kobj)); 47 kobject_name(&dev->kobj));
48 sysfs_create_link(&dev->kobj, &dev->driver->kobj, "driver"); 48 sysfs_create_link(&dev->kobj, &dev->driver->kobj, "driver");
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 291c5954a3af..ef3fe513e398 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -142,6 +142,19 @@ void put_driver(struct device_driver * drv)
142 kobject_put(&drv->kobj); 142 kobject_put(&drv->kobj);
143} 143}
144 144
145static void klist_devices_get(struct klist_node *n)
146{
147 struct device *dev = container_of(n, struct device, knode_driver);
148
149 get_device(dev);
150}
151
152static void klist_devices_put(struct klist_node *n)
153{
154 struct device *dev = container_of(n, struct device, knode_driver);
155
156 put_device(dev);
157}
145 158
146/** 159/**
147 * driver_register - register driver with bus 160 * driver_register - register driver with bus
@@ -157,7 +170,7 @@ void put_driver(struct device_driver * drv)
157 */ 170 */
158int driver_register(struct device_driver * drv) 171int driver_register(struct device_driver * drv)
159{ 172{
160 klist_init(&drv->klist_devices); 173 klist_init(&drv->klist_devices, klist_devices_get, klist_devices_put);
161 init_completion(&drv->unloaded); 174 init_completion(&drv->unloaded);
162 return bus_add_driver(drv); 175 return bus_add_driver(drv);
163} 176}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 652281402c92..5bfa2e9a7c26 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -28,6 +28,7 @@ enum {
28 FW_STATUS_DONE, 28 FW_STATUS_DONE,
29 FW_STATUS_ABORT, 29 FW_STATUS_ABORT,
30 FW_STATUS_READY, 30 FW_STATUS_READY,
31 FW_STATUS_READY_NOHOTPLUG,
31}; 32};
32 33
33static int loading_timeout = 10; /* In seconds */ 34static int loading_timeout = 10; /* In seconds */
@@ -344,7 +345,7 @@ error_kfree:
344 345
345static int 346static int
346fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p, 347fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p,
347 const char *fw_name, struct device *device) 348 const char *fw_name, struct device *device, int hotplug)
348{ 349{
349 struct class_device *class_dev; 350 struct class_device *class_dev;
350 struct firmware_priv *fw_priv; 351 struct firmware_priv *fw_priv;
@@ -376,7 +377,10 @@ fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p,
376 goto error_unreg; 377 goto error_unreg;
377 } 378 }
378 379
379 set_bit(FW_STATUS_READY, &fw_priv->status); 380 if (hotplug)
381 set_bit(FW_STATUS_READY, &fw_priv->status);
382 else
383 set_bit(FW_STATUS_READY_NOHOTPLUG, &fw_priv->status);
380 *class_dev_p = class_dev; 384 *class_dev_p = class_dev;
381 goto out; 385 goto out;
382 386
@@ -386,21 +390,9 @@ out:
386 return retval; 390 return retval;
387} 391}
388 392
389/** 393static int
390 * request_firmware: - request firmware to hotplug and wait for it 394_request_firmware(const struct firmware **firmware_p, const char *name,
391 * Description: 395 struct device *device, int hotplug)
392 * @firmware will be used to return a firmware image by the name
393 * of @name for device @device.
394 *
395 * Should be called from user context where sleeping is allowed.
396 *
397 * @name will be use as $FIRMWARE in the hotplug environment and
398 * should be distinctive enough not to be confused with any other
399 * firmware image for this or any other device.
400 **/
401int
402request_firmware(const struct firmware **firmware_p, const char *name,
403 struct device *device)
404{ 396{
405 struct class_device *class_dev; 397 struct class_device *class_dev;
406 struct firmware_priv *fw_priv; 398 struct firmware_priv *fw_priv;
@@ -419,22 +411,25 @@ request_firmware(const struct firmware **firmware_p, const char *name,
419 } 411 }
420 memset(firmware, 0, sizeof (*firmware)); 412 memset(firmware, 0, sizeof (*firmware));
421 413
422 retval = fw_setup_class_device(firmware, &class_dev, name, device); 414 retval = fw_setup_class_device(firmware, &class_dev, name, device,
415 hotplug);
423 if (retval) 416 if (retval)
424 goto error_kfree_fw; 417 goto error_kfree_fw;
425 418
426 fw_priv = class_get_devdata(class_dev); 419 fw_priv = class_get_devdata(class_dev);
427 420
428 if (loading_timeout > 0) { 421 if (hotplug) {
429 fw_priv->timeout.expires = jiffies + loading_timeout * HZ; 422 if (loading_timeout > 0) {
430 add_timer(&fw_priv->timeout); 423 fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
431 } 424 add_timer(&fw_priv->timeout);
432 425 }
433 kobject_hotplug(&class_dev->kobj, KOBJ_ADD);
434 wait_for_completion(&fw_priv->completion);
435 set_bit(FW_STATUS_DONE, &fw_priv->status);
436 426
437 del_timer_sync(&fw_priv->timeout); 427 kobject_hotplug(&class_dev->kobj, KOBJ_ADD);
428 wait_for_completion(&fw_priv->completion);
429 set_bit(FW_STATUS_DONE, &fw_priv->status);
430 del_timer_sync(&fw_priv->timeout);
431 } else
432 wait_for_completion(&fw_priv->completion);
438 433
439 down(&fw_lock); 434 down(&fw_lock);
440 if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) { 435 if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) {
@@ -455,6 +450,26 @@ out:
455} 450}
456 451
457/** 452/**
453 * request_firmware: - request firmware to hotplug and wait for it
454 * Description:
455 * @firmware will be used to return a firmware image by the name
456 * of @name for device @device.
457 *
458 * Should be called from user context where sleeping is allowed.
459 *
460 * @name will be use as $FIRMWARE in the hotplug environment and
461 * should be distinctive enough not to be confused with any other
462 * firmware image for this or any other device.
463 **/
464int
465request_firmware(const struct firmware **firmware_p, const char *name,
466 struct device *device)
467{
468 int hotplug = 1;
469 return _request_firmware(firmware_p, name, device, hotplug);
470}
471
472/**
458 * release_firmware: - release the resource associated with a firmware image 473 * release_firmware: - release the resource associated with a firmware image
459 **/ 474 **/
460void 475void
@@ -491,6 +506,7 @@ struct firmware_work {
491 struct device *device; 506 struct device *device;
492 void *context; 507 void *context;
493 void (*cont)(const struct firmware *fw, void *context); 508 void (*cont)(const struct firmware *fw, void *context);
509 int hotplug;
494}; 510};
495 511
496static int 512static int
@@ -503,7 +519,8 @@ request_firmware_work_func(void *arg)
503 return 0; 519 return 0;
504 } 520 }
505 daemonize("%s/%s", "firmware", fw_work->name); 521 daemonize("%s/%s", "firmware", fw_work->name);
506 request_firmware(&fw, fw_work->name, fw_work->device); 522 _request_firmware(&fw, fw_work->name, fw_work->device,
523 fw_work->hotplug);
507 fw_work->cont(fw, fw_work->context); 524 fw_work->cont(fw, fw_work->context);
508 release_firmware(fw); 525 release_firmware(fw);
509 module_put(fw_work->module); 526 module_put(fw_work->module);
@@ -518,6 +535,9 @@ request_firmware_work_func(void *arg)
518 * Asynchronous variant of request_firmware() for contexts where 535 * Asynchronous variant of request_firmware() for contexts where
519 * it is not possible to sleep. 536 * it is not possible to sleep.
520 * 537 *
538 * @hotplug invokes hotplug event to copy the firmware image if this flag
539 * is non-zero else the firmware copy must be done manually.
540 *
521 * @cont will be called asynchronously when the firmware request is over. 541 * @cont will be called asynchronously when the firmware request is over.
522 * 542 *
523 * @context will be passed over to @cont. 543 * @context will be passed over to @cont.
@@ -527,7 +547,7 @@ request_firmware_work_func(void *arg)
527 **/ 547 **/
528int 548int
529request_firmware_nowait( 549request_firmware_nowait(
530 struct module *module, 550 struct module *module, int hotplug,
531 const char *name, struct device *device, void *context, 551 const char *name, struct device *device, void *context,
532 void (*cont)(const struct firmware *fw, void *context)) 552 void (*cont)(const struct firmware *fw, void *context))
533{ 553{
@@ -548,6 +568,7 @@ request_firmware_nowait(
548 .device = device, 568 .device = device,
549 .context = context, 569 .context = context,
550 .cont = cont, 570 .cont = cont,
571 .hotplug = hotplug,
551 }; 572 };
552 573
553 ret = kernel_thread(request_firmware_work_func, fw_work, 574 ret = kernel_thread(request_firmware_work_func, fw_work,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 904b27caf697..16c513aa4d48 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -39,13 +39,25 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
39 int n; 39 int n;
40 int nid = dev->id; 40 int nid = dev->id;
41 struct sysinfo i; 41 struct sysinfo i;
42 struct page_state ps;
42 unsigned long inactive; 43 unsigned long inactive;
43 unsigned long active; 44 unsigned long active;
44 unsigned long free; 45 unsigned long free;
45 46
46 si_meminfo_node(&i, nid); 47 si_meminfo_node(&i, nid);
48 get_page_state_node(&ps, nid);
47 __get_zone_counts(&active, &inactive, &free, NODE_DATA(nid)); 49 __get_zone_counts(&active, &inactive, &free, NODE_DATA(nid));
48 50
51 /* Check for negative values in these approximate counters */
52 if ((long)ps.nr_dirty < 0)
53 ps.nr_dirty = 0;
54 if ((long)ps.nr_writeback < 0)
55 ps.nr_writeback = 0;
56 if ((long)ps.nr_mapped < 0)
57 ps.nr_mapped = 0;
58 if ((long)ps.nr_slab < 0)
59 ps.nr_slab = 0;
60
49 n = sprintf(buf, "\n" 61 n = sprintf(buf, "\n"
50 "Node %d MemTotal: %8lu kB\n" 62 "Node %d MemTotal: %8lu kB\n"
51 "Node %d MemFree: %8lu kB\n" 63 "Node %d MemFree: %8lu kB\n"
@@ -55,7 +67,11 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
55 "Node %d HighTotal: %8lu kB\n" 67 "Node %d HighTotal: %8lu kB\n"
56 "Node %d HighFree: %8lu kB\n" 68 "Node %d HighFree: %8lu kB\n"
57 "Node %d LowTotal: %8lu kB\n" 69 "Node %d LowTotal: %8lu kB\n"
58 "Node %d LowFree: %8lu kB\n", 70 "Node %d LowFree: %8lu kB\n"
71 "Node %d Dirty: %8lu kB\n"
72 "Node %d Writeback: %8lu kB\n"
73 "Node %d Mapped: %8lu kB\n"
74 "Node %d Slab: %8lu kB\n",
59 nid, K(i.totalram), 75 nid, K(i.totalram),
60 nid, K(i.freeram), 76 nid, K(i.freeram),
61 nid, K(i.totalram - i.freeram), 77 nid, K(i.totalram - i.freeram),
@@ -64,7 +80,11 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
64 nid, K(i.totalhigh), 80 nid, K(i.totalhigh),
65 nid, K(i.freehigh), 81 nid, K(i.freehigh),
66 nid, K(i.totalram - i.totalhigh), 82 nid, K(i.totalram - i.totalhigh),
67 nid, K(i.freeram - i.freehigh)); 83 nid, K(i.freeram - i.freehigh),
84 nid, K(ps.nr_dirty),
85 nid, K(ps.nr_writeback),
86 nid, K(ps.nr_mapped),
87 nid, K(ps.nr_slab));
68 n += hugetlb_report_node_meminfo(nid, buf + n); 88 n += hugetlb_report_node_meminfo(nid, buf + n);
69 return n; 89 return n;
70} 90}
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index bdd96b03b885..0a7aa07b9a2a 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -26,11 +26,11 @@ int resume_device(struct device * dev)
26 26
27 down(&dev->sem); 27 down(&dev->sem);
28 if (dev->power.pm_parent 28 if (dev->power.pm_parent
29 && dev->power.pm_parent->power.power_state) { 29 && dev->power.pm_parent->power.power_state.event) {
30 dev_err(dev, "PM: resume from %d, parent %s still %d\n", 30 dev_err(dev, "PM: resume from %d, parent %s still %d\n",
31 dev->power.power_state, 31 dev->power.power_state.event,
32 dev->power.pm_parent->bus_id, 32 dev->power.pm_parent->bus_id,
33 dev->power.pm_parent->power.power_state); 33 dev->power.pm_parent->power.power_state.event);
34 } 34 }
35 if (dev->bus && dev->bus->resume) { 35 if (dev->bus && dev->bus->resume) {
36 dev_dbg(dev,"resuming\n"); 36 dev_dbg(dev,"resuming\n");
@@ -54,7 +54,7 @@ void dpm_resume(void)
54 list_add_tail(entry, &dpm_active); 54 list_add_tail(entry, &dpm_active);
55 55
56 up(&dpm_list_sem); 56 up(&dpm_list_sem);
57 if (!dev->power.prev_state) 57 if (!dev->power.prev_state.event)
58 resume_device(dev); 58 resume_device(dev);
59 down(&dpm_list_sem); 59 down(&dpm_list_sem);
60 put_device(dev); 60 put_device(dev);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 325962d80191..e8f0519f5dfa 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,10 +13,10 @@
13static void runtime_resume(struct device * dev) 13static void runtime_resume(struct device * dev)
14{ 14{
15 dev_dbg(dev, "resuming\n"); 15 dev_dbg(dev, "resuming\n");
16 if (!dev->power.power_state) 16 if (!dev->power.power_state.event)
17 return; 17 return;
18 if (!resume_device(dev)) 18 if (!resume_device(dev))
19 dev->power.power_state = 0; 19 dev->power.power_state = PMSG_ON;
20} 20}
21 21
22 22
@@ -49,10 +49,10 @@ int dpm_runtime_suspend(struct device * dev, pm_message_t state)
49 int error = 0; 49 int error = 0;
50 50
51 down(&dpm_sem); 51 down(&dpm_sem);
52 if (dev->power.power_state == state) 52 if (dev->power.power_state.event == state.event)
53 goto Done; 53 goto Done;
54 54
55 if (dev->power.power_state) 55 if (dev->power.power_state.event)
56 runtime_resume(dev); 56 runtime_resume(dev);
57 57
58 if (!(error = suspend_device(dev, state))) 58 if (!(error = suspend_device(dev, state)))
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 2ccee3763acf..50501764d050 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -40,22 +40,22 @@ int suspend_device(struct device * dev, pm_message_t state)
40 int error = 0; 40 int error = 0;
41 41
42 down(&dev->sem); 42 down(&dev->sem);
43 if (dev->power.power_state) { 43 if (dev->power.power_state.event) {
44 dev_dbg(dev, "PM: suspend %d-->%d\n", 44 dev_dbg(dev, "PM: suspend %d-->%d\n",
45 dev->power.power_state, state); 45 dev->power.power_state.event, state.event);
46 } 46 }
47 if (dev->power.pm_parent 47 if (dev->power.pm_parent
48 && dev->power.pm_parent->power.power_state) { 48 && dev->power.pm_parent->power.power_state.event) {
49 dev_err(dev, 49 dev_err(dev,
50 "PM: suspend %d->%d, parent %s already %d\n", 50 "PM: suspend %d->%d, parent %s already %d\n",
51 dev->power.power_state, state, 51 dev->power.power_state.event, state.event,
52 dev->power.pm_parent->bus_id, 52 dev->power.pm_parent->bus_id,
53 dev->power.pm_parent->power.power_state); 53 dev->power.pm_parent->power.power_state.event);
54 } 54 }
55 55
56 dev->power.prev_state = dev->power.power_state; 56 dev->power.prev_state = dev->power.power_state;
57 57
58 if (dev->bus && dev->bus->suspend && !dev->power.power_state) { 58 if (dev->bus && dev->bus->suspend && !dev->power.power_state.event) {
59 dev_dbg(dev, "suspending\n"); 59 dev_dbg(dev, "suspending\n");
60 error = dev->bus->suspend(dev, state); 60 error = dev->bus->suspend(dev, state);
61 } 61 }
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f82b3df9545f..8d04fb435c17 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -26,19 +26,19 @@
26 26
27static ssize_t state_show(struct device * dev, struct device_attribute *attr, char * buf) 27static ssize_t state_show(struct device * dev, struct device_attribute *attr, char * buf)
28{ 28{
29 return sprintf(buf, "%u\n", dev->power.power_state); 29 return sprintf(buf, "%u\n", dev->power.power_state.event);
30} 30}
31 31
32static ssize_t state_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) 32static ssize_t state_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n)
33{ 33{
34 u32 state; 34 pm_message_t state;
35 char * rest; 35 char * rest;
36 int error = 0; 36 int error = 0;
37 37
38 state = simple_strtoul(buf, &rest, 10); 38 state.event = simple_strtoul(buf, &rest, 10);
39 if (*rest) 39 if (*rest)
40 return -EINVAL; 40 return -EINVAL;
41 if (state) 41 if (state.event)
42 error = dpm_runtime_suspend(dev, state); 42 error = dpm_runtime_suspend(dev, state);
43 else 43 else
44 dpm_runtime_resume(dev); 44 dpm_runtime_resume(dev);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 214b96435409..3431eb6004c3 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -288,6 +288,27 @@ void sysdev_shutdown(void)
288 up(&sysdev_drivers_lock); 288 up(&sysdev_drivers_lock);
289} 289}
290 290
291static void __sysdev_resume(struct sys_device *dev)
292{
293 struct sysdev_class *cls = dev->cls;
294 struct sysdev_driver *drv;
295
296 /* First, call the class-specific one */
297 if (cls->resume)
298 cls->resume(dev);
299
300 /* Call auxillary drivers next. */
301 list_for_each_entry(drv, &cls->drivers, entry) {
302 if (drv->resume)
303 drv->resume(dev);
304 }
305
306 /* Call global drivers. */
307 list_for_each_entry(drv, &sysdev_drivers, entry) {
308 if (drv->resume)
309 drv->resume(dev);
310 }
311}
291 312
292/** 313/**
293 * sysdev_suspend - Suspend all system devices. 314 * sysdev_suspend - Suspend all system devices.
@@ -305,38 +326,93 @@ void sysdev_shutdown(void)
305int sysdev_suspend(pm_message_t state) 326int sysdev_suspend(pm_message_t state)
306{ 327{
307 struct sysdev_class * cls; 328 struct sysdev_class * cls;
329 struct sys_device *sysdev, *err_dev;
330 struct sysdev_driver *drv, *err_drv;
331 int ret;
308 332
309 pr_debug("Suspending System Devices\n"); 333 pr_debug("Suspending System Devices\n");
310 334
311 list_for_each_entry_reverse(cls, &system_subsys.kset.list, 335 list_for_each_entry_reverse(cls, &system_subsys.kset.list,
312 kset.kobj.entry) { 336 kset.kobj.entry) {
313 struct sys_device * sysdev;
314 337
315 pr_debug("Suspending type '%s':\n", 338 pr_debug("Suspending type '%s':\n",
316 kobject_name(&cls->kset.kobj)); 339 kobject_name(&cls->kset.kobj));
317 340
318 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { 341 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
319 struct sysdev_driver * drv;
320 pr_debug(" %s\n", kobject_name(&sysdev->kobj)); 342 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
321 343
322 /* Call global drivers first. */ 344 /* Call global drivers first. */
323 list_for_each_entry(drv, &sysdev_drivers, entry) { 345 list_for_each_entry(drv, &sysdev_drivers, entry) {
324 if (drv->suspend) 346 if (drv->suspend) {
325 drv->suspend(sysdev, state); 347 ret = drv->suspend(sysdev, state);
348 if (ret)
349 goto gbl_driver;
350 }
326 } 351 }
327 352
328 /* Call auxillary drivers next. */ 353 /* Call auxillary drivers next. */
329 list_for_each_entry(drv, &cls->drivers, entry) { 354 list_for_each_entry(drv, &cls->drivers, entry) {
330 if (drv->suspend) 355 if (drv->suspend) {
331 drv->suspend(sysdev, state); 356 ret = drv->suspend(sysdev, state);
357 if (ret)
358 goto aux_driver;
359 }
332 } 360 }
333 361
334 /* Now call the generic one */ 362 /* Now call the generic one */
335 if (cls->suspend) 363 if (cls->suspend) {
336 cls->suspend(sysdev, state); 364 ret = cls->suspend(sysdev, state);
365 if (ret)
366 goto cls_driver;
367 }
337 } 368 }
338 } 369 }
339 return 0; 370 return 0;
371 /* resume current sysdev */
372cls_driver:
373 drv = NULL;
374 printk(KERN_ERR "Class suspend failed for %s\n",
375 kobject_name(&sysdev->kobj));
376
377aux_driver:
378 if (drv)
379 printk(KERN_ERR "Class driver suspend failed for %s\n",
380 kobject_name(&sysdev->kobj));
381 list_for_each_entry(err_drv, &cls->drivers, entry) {
382 if (err_drv == drv)
383 break;
384 if (err_drv->resume)
385 err_drv->resume(sysdev);
386 }
387 drv = NULL;
388
389gbl_driver:
390 if (drv)
391 printk(KERN_ERR "sysdev driver suspend failed for %s\n",
392 kobject_name(&sysdev->kobj));
393 list_for_each_entry(err_drv, &sysdev_drivers, entry) {
394 if (err_drv == drv)
395 break;
396 if (err_drv->resume)
397 err_drv->resume(sysdev);
398 }
399 /* resume other sysdevs in current class */
400 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
401 if (err_dev == sysdev)
402 break;
403 pr_debug(" %s\n", kobject_name(&err_dev->kobj));
404 __sysdev_resume(err_dev);
405 }
406
407 /* resume other classes */
408 list_for_each_entry_continue(cls, &system_subsys.kset.list,
409 kset.kobj.entry) {
410 list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) {
411 pr_debug(" %s\n", kobject_name(&err_dev->kobj));
412 __sysdev_resume(err_dev);
413 }
414 }
415 return ret;
340} 416}
341 417
342 418
@@ -362,25 +438,9 @@ int sysdev_resume(void)
362 kobject_name(&cls->kset.kobj)); 438 kobject_name(&cls->kset.kobj));
363 439
364 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { 440 list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) {
365 struct sysdev_driver * drv;
366 pr_debug(" %s\n", kobject_name(&sysdev->kobj)); 441 pr_debug(" %s\n", kobject_name(&sysdev->kobj));
367 442
368 /* First, call the class-specific one */ 443 __sysdev_resume(sysdev);
369 if (cls->resume)
370 cls->resume(sysdev);
371
372 /* Call auxillary drivers next. */
373 list_for_each_entry(drv, &cls->drivers, entry) {
374 if (drv->resume)
375 drv->resume(sysdev);
376 }
377
378 /* Call global drivers. */
379 list_for_each_entry(drv, &sysdev_drivers, entry) {
380 if (drv->resume)
381 drv->resume(sysdev);
382 }
383
384 } 444 }
385 } 445 }
386 return 0; 446 return 0;
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 6c2b447a3336..f25e7c6b2d27 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -7,7 +7,7 @@
7 * This file is licensed under GPLv2 7 * This file is licensed under GPLv2
8 * 8 *
9 * The basic idea here is to allow any "device controller" (which 9 * The basic idea here is to allow any "device controller" (which
10 * would most often be a Host Bus Adapter" to use the services of one 10 * would most often be a Host Bus Adapter to use the services of one
11 * or more tranport classes for performing transport specific 11 * or more tranport classes for performing transport specific
12 * services. Transport specific services are things that the generic 12 * services. Transport specific services are things that the generic
13 * command layer doesn't want to know about (speed settings, line 13 * command layer doesn't want to know about (speed settings, line
@@ -64,7 +64,9 @@ void transport_class_unregister(struct transport_class *tclass)
64} 64}
65EXPORT_SYMBOL_GPL(transport_class_unregister); 65EXPORT_SYMBOL_GPL(transport_class_unregister);
66 66
67static int anon_transport_dummy_function(struct device *dev) 67static int anon_transport_dummy_function(struct transport_container *tc,
68 struct device *dev,
69 struct class_device *cdev)
68{ 70{
69 /* do nothing */ 71 /* do nothing */
70 return 0; 72 return 0;
@@ -115,9 +117,10 @@ static int transport_setup_classdev(struct attribute_container *cont,
115 struct class_device *classdev) 117 struct class_device *classdev)
116{ 118{
117 struct transport_class *tclass = class_to_transport_class(cont->class); 119 struct transport_class *tclass = class_to_transport_class(cont->class);
120 struct transport_container *tcont = attribute_container_to_transport_container(cont);
118 121
119 if (tclass->setup) 122 if (tclass->setup)
120 tclass->setup(dev); 123 tclass->setup(tcont, dev, classdev);
121 124
122 return 0; 125 return 0;
123} 126}
@@ -178,12 +181,14 @@ void transport_add_device(struct device *dev)
178EXPORT_SYMBOL_GPL(transport_add_device); 181EXPORT_SYMBOL_GPL(transport_add_device);
179 182
180static int transport_configure(struct attribute_container *cont, 183static int transport_configure(struct attribute_container *cont,
181 struct device *dev) 184 struct device *dev,
185 struct class_device *cdev)
182{ 186{
183 struct transport_class *tclass = class_to_transport_class(cont->class); 187 struct transport_class *tclass = class_to_transport_class(cont->class);
188 struct transport_container *tcont = attribute_container_to_transport_container(cont);
184 189
185 if (tclass->configure) 190 if (tclass->configure)
186 tclass->configure(dev); 191 tclass->configure(tcont, dev, cdev);
187 192
188 return 0; 193 return 0;
189} 194}
@@ -202,7 +207,7 @@ static int transport_configure(struct attribute_container *cont,
202 */ 207 */
203void transport_configure_device(struct device *dev) 208void transport_configure_device(struct device *dev)
204{ 209{
205 attribute_container_trigger(dev, transport_configure); 210 attribute_container_device_trigger(dev, transport_configure);
206} 211}
207EXPORT_SYMBOL_GPL(transport_configure_device); 212EXPORT_SYMBOL_GPL(transport_configure_device);
208 213
@@ -215,7 +220,7 @@ static int transport_remove_classdev(struct attribute_container *cont,
215 struct transport_class *tclass = class_to_transport_class(cont->class); 220 struct transport_class *tclass = class_to_transport_class(cont->class);
216 221
217 if (tclass->remove) 222 if (tclass->remove)
218 tclass->remove(dev); 223 tclass->remove(tcont, dev, classdev);
219 224
220 if (tclass->remove != anon_transport_dummy_function) { 225 if (tclass->remove != anon_transport_dummy_function) {
221 if (tcont->statistics) 226 if (tcont->statistics)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b594768b0241..51b0af1cebee 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -6,7 +6,7 @@ menu "Block devices"
6 6
7config BLK_DEV_FD 7config BLK_DEV_FD
8 tristate "Normal floppy disk support" 8 tristate "Normal floppy disk support"
9 depends on (!ARCH_S390 && !M68K && !IA64 && !UML && !ARM) || Q40 || (SUN3X && BROKEN) || ARCH_RPC || ARCH_EBSA285 9 depends on ARCH_MAY_HAVE_PC_FDC
10 ---help--- 10 ---help---
11 If you want to use the floppy disk drive(s) of your PC under Linux, 11 If you want to use the floppy disk drive(s) of your PC under Linux,
12 say Y. Information about this driver, especially important for IBM 12 say Y. Information about this driver, especially important for IBM
@@ -408,54 +408,12 @@ config BLK_DEV_INITRD
408 "real" root file system, etc. See <file:Documentation/initrd.txt> 408 "real" root file system, etc. See <file:Documentation/initrd.txt>
409 for details. 409 for details.
410 410
411config INITRAMFS_SOURCE
412 string "Initramfs source file(s)"
413 default ""
414 help
415 This can be either a single cpio archive with a .cpio suffix or a
416 space-separated list of directories and files for building the
417 initramfs image. A cpio archive should contain a filesystem archive
418 to be used as an initramfs image. Directories should contain a
419 filesystem layout to be included in the initramfs image. Files
420 should contain entries according to the format described by the
421 "usr/gen_init_cpio" program in the kernel tree.
422
423 When multiple directories and files are specified then the
424 initramfs image will be the aggregate of all of them.
425
426 See <file:Documentation/early-userspace/README for more details.
427
428 If you are not sure, leave it blank.
429
430config INITRAMFS_ROOT_UID
431 int "User ID to map to 0 (user root)"
432 depends on INITRAMFS_SOURCE!=""
433 default "0"
434 help
435 This setting is only meaningful if the INITRAMFS_SOURCE is
436 contains a directory. Setting this user ID (UID) to something
437 other than "0" will cause all files owned by that UID to be
438 owned by user root in the initial ramdisk image.
439
440 If you are not sure, leave it set to "0".
441
442config INITRAMFS_ROOT_GID
443 int "Group ID to map to 0 (group root)"
444 depends on INITRAMFS_SOURCE!=""
445 default "0"
446 help
447 This setting is only meaningful if the INITRAMFS_SOURCE is
448 contains a directory. Setting this group ID (GID) to something
449 other than "0" will cause all files owned by that GID to be
450 owned by group root in the initial ramdisk image.
451
452 If you are not sure, leave it set to "0".
453 411
454#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64 412#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
455#for instance. 413#for instance.
456config LBD 414config LBD
457 bool "Support for Large Block Devices" 415 bool "Support for Large Block Devices"
458 depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML 416 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
459 help 417 help
460 Say Y here if you want to attach large (bigger than 2TB) discs to 418 Say Y here if you want to attach large (bigger than 2TB) discs to
461 your machine, or if you want to have a raid or loopback device 419 your machine, or if you want to have a raid or loopback device
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6e231c5a1199..ded33ba31acc 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -35,7 +35,7 @@ aoedev_newdev(ulong nframes)
35 struct aoedev *d; 35 struct aoedev *d;
36 struct frame *f, *e; 36 struct frame *f, *e;
37 37
38 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 38 d = kzalloc(sizeof *d, GFP_ATOMIC);
39 if (d == NULL) 39 if (d == NULL)
40 return NULL; 40 return NULL;
41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); 41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 9e6f51c528b0..4be976940f69 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -120,7 +120,7 @@ aoenet_xmit(struct sk_buff *sl)
120 * (1) len doesn't include the header by default. I want this. 120 * (1) len doesn't include the header by default. I want this.
121 */ 121 */
122static int 122static int
123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt) 123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
124{ 124{
125 struct aoe_hdr *h; 125 struct aoe_hdr *h;
126 u32 n; 126 u32 n;
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..30c0903c7cdd 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -2260,8 +2260,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
2260 if (!atomic_dec_and_test(&cfqd->ref)) 2260 if (!atomic_dec_and_test(&cfqd->ref))
2261 return; 2261 return;
2262 2262
2263 blk_put_queue(q);
2264
2265 cfq_shutdown_timer_wq(cfqd); 2263 cfq_shutdown_timer_wq(cfqd);
2266 q->elevator->elevator_data = NULL; 2264 q->elevator->elevator_data = NULL;
2267 2265
@@ -2318,7 +2316,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2318 e->elevator_data = cfqd; 2316 e->elevator_data = cfqd;
2319 2317
2320 cfqd->queue = q; 2318 cfqd->queue = q;
2321 atomic_inc(&q->refcnt);
2322 2319
2323 cfqd->max_queued = q->nr_requests / 4; 2320 cfqd->max_queued = q->nr_requests / 4;
2324 q->nr_batching = cfq_queued; 2321 q->nr_batching = cfq_queued;
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 5be6f998d8c5..3d4261c39f16 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
57 mode = strsep(&cmsp, "-"); 57 mode = strsep(&cmsp, "-");
58 58
59 if (mode == NULL || strcmp(mode, "cbc") == 0) 59 if (mode == NULL || strcmp(mode, "cbc") == 0)
60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC); 60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC |
61 CRYPTO_TFM_REQ_MAY_SLEEP);
61 else if (strcmp(mode, "ecb") == 0) 62 else if (strcmp(mode, "ecb") == 0)
62 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB); 63 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB |
64 CRYPTO_TFM_REQ_MAY_SLEEP);
63 if (tfm == NULL) 65 if (tfm == NULL)
64 return -EINVAL; 66 return -EINVAL;
65 67
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index ff5201e02153..24594c57c323 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -507,18 +507,12 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
507 const int reads = !list_empty(&dd->fifo_list[READ]); 507 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]); 508 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq; 509 struct deadline_rq *drq;
510 int data_dir, other_dir; 510 int data_dir;
511 511
512 /* 512 /*
513 * batches are currently reads XOR writes 513 * batches are currently reads XOR writes
514 */ 514 */
515 drq = NULL; 515 drq = dd->next_drq[WRITE] ? : dd->next_drq[READ];
516
517 if (dd->next_drq[READ])
518 drq = dd->next_drq[READ];
519
520 if (dd->next_drq[WRITE])
521 drq = dd->next_drq[WRITE];
522 516
523 if (drq) { 517 if (drq) {
524 /* we have a "next request" */ 518 /* we have a "next request" */
@@ -544,7 +538,6 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
544 goto dispatch_writes; 538 goto dispatch_writes;
545 539
546 data_dir = READ; 540 data_dir = READ;
547 other_dir = WRITE;
548 541
549 goto dispatch_find_request; 542 goto dispatch_find_request;
550 } 543 }
@@ -560,7 +553,6 @@ dispatch_writes:
560 dd->starved = 0; 553 dd->starved = 0;
561 554
562 data_dir = WRITE; 555 data_dir = WRITE;
563 other_dir = READ;
564 556
565 goto dispatch_find_request; 557 goto dispatch_find_request;
566 } 558 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index f0c1084b840f..888dad5eef34 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -493,6 +493,8 @@ static struct floppy_struct user_params[N_DRIVE];
493 493
494static sector_t floppy_sizes[256]; 494static sector_t floppy_sizes[256];
495 495
496static char floppy_device_name[] = "floppy";
497
496/* 498/*
497 * The driver is trying to determine the correct media format 499 * The driver is trying to determine the correct media format
498 * while probing is set. rw_interrupt() clears it after a 500 * while probing is set. rw_interrupt() clears it after a
@@ -4191,18 +4193,24 @@ static int __init floppy_setup(char *str)
4191 4193
4192static int have_no_fdc = -ENODEV; 4194static int have_no_fdc = -ENODEV;
4193 4195
4196static ssize_t floppy_cmos_show(struct device *dev,
4197 struct device_attribute *attr, char *buf)
4198{
4199 struct platform_device *p;
4200 int drive;
4201
4202 p = container_of(dev, struct platform_device,dev);
4203 drive = p->id;
4204 return sprintf(buf, "%X\n", UDP->cmos);
4205}
4206DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL);
4207
4194static void floppy_device_release(struct device *dev) 4208static void floppy_device_release(struct device *dev)
4195{ 4209{
4196 complete(&device_release); 4210 complete(&device_release);
4197} 4211}
4198 4212
4199static struct platform_device floppy_device = { 4213static struct platform_device floppy_device[N_DRIVE];
4200 .name = "floppy",
4201 .id = 0,
4202 .dev = {
4203 .release = floppy_device_release,
4204 }
4205};
4206 4214
4207static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4215static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4208{ 4216{
@@ -4370,20 +4378,26 @@ static int __init floppy_init(void)
4370 goto out_flush_work; 4378 goto out_flush_work;
4371 } 4379 }
4372 4380
4373 err = platform_device_register(&floppy_device);
4374 if (err)
4375 goto out_flush_work;
4376
4377 for (drive = 0; drive < N_DRIVE; drive++) { 4381 for (drive = 0; drive < N_DRIVE; drive++) {
4378 if (!(allowed_drive_mask & (1 << drive))) 4382 if (!(allowed_drive_mask & (1 << drive)))
4379 continue; 4383 continue;
4380 if (fdc_state[FDC(drive)].version == FDC_NONE) 4384 if (fdc_state[FDC(drive)].version == FDC_NONE)
4381 continue; 4385 continue;
4386
4387 floppy_device[drive].name = floppy_device_name;
4388 floppy_device[drive].id = drive;
4389 floppy_device[drive].dev.release = floppy_device_release;
4390
4391 err = platform_device_register(&floppy_device[drive]);
4392 if (err)
4393 goto out_flush_work;
4394
4395 device_create_file(&floppy_device[drive].dev,&dev_attr_cmos);
4382 /* to be cleaned up... */ 4396 /* to be cleaned up... */
4383 disks[drive]->private_data = (void *)(long)drive; 4397 disks[drive]->private_data = (void *)(long)drive;
4384 disks[drive]->queue = floppy_queue; 4398 disks[drive]->queue = floppy_queue;
4385 disks[drive]->flags |= GENHD_FL_REMOVABLE; 4399 disks[drive]->flags |= GENHD_FL_REMOVABLE;
4386 disks[drive]->driverfs_dev = &floppy_device.dev; 4400 disks[drive]->driverfs_dev = &floppy_device[drive].dev;
4387 add_disk(disks[drive]); 4401 add_disk(disks[drive]);
4388 } 4402 }
4389 4403
@@ -4603,10 +4617,11 @@ void cleanup_module(void)
4603 fdc_state[FDC(drive)].version != FDC_NONE) { 4617 fdc_state[FDC(drive)].version != FDC_NONE) {
4604 del_gendisk(disks[drive]); 4618 del_gendisk(disks[drive]);
4605 unregister_devfs_entries(drive); 4619 unregister_devfs_entries(drive);
4620 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4621 platform_device_unregister(&floppy_device[drive]);
4606 } 4622 }
4607 put_disk(disks[drive]); 4623 put_disk(disks[drive]);
4608 } 4624 }
4609 platform_device_unregister(&floppy_device);
4610 devfs_remove("floppy"); 4625 devfs_remove("floppy");
4611 4626
4612 del_timer_sync(&fd_timeout); 4627 del_timer_sync(&fd_timeout);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 47fd3659a061..d42840cc0d1d 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -45,7 +45,7 @@ int get_blkdev_list(char *p, int used)
45 struct blk_major_name *n; 45 struct blk_major_name *n;
46 int i, len; 46 int i, len;
47 47
48 len = sprintf(p, "\nBlock devices:\n"); 48 len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n");
49 49
50 down(&block_subsys_sem); 50 down(&block_subsys_sem);
51 for (i = 0; i < ARRAY_SIZE(major_names); i++) { 51 for (i = 0; i < ARRAY_SIZE(major_names); i++) {
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 3c818544475e..483d71b10cf9 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -235,8 +235,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
235 * set defaults 235 * set defaults
236 */ 236 */
237 q->nr_requests = BLKDEV_MAX_RQ; 237 q->nr_requests = BLKDEV_MAX_RQ;
238 q->max_phys_segments = MAX_PHYS_SEGMENTS; 238 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
239 q->max_hw_segments = MAX_HW_SEGMENTS; 239 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
240 q->make_request_fn = mfn; 240 q->make_request_fn = mfn;
241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
242 q->backing_dev_info.state = 0; 242 q->backing_dev_info.state = 0;
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
284 rq->special = NULL; 284 rq->special = NULL;
285 rq->data_len = 0; 285 rq->data_len = 0;
286 rq->data = NULL; 286 rq->data = NULL;
287 rq->nr_phys_segments = 0;
287 rq->sense = NULL; 288 rq->sense = NULL;
288 rq->end_io = NULL; 289 rq->end_io = NULL;
289 rq->end_io_data = NULL; 290 rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
2115/** 2116/**
2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2117 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2117 * @q: request queue where request should be inserted 2118 * @q: request queue where request should be inserted
2118 * @rw: READ or WRITE data 2119 * @rq: request structure to fill
2119 * @ubuf: the user buffer 2120 * @ubuf: the user buffer
2120 * @len: length of user data 2121 * @len: length of user data
2121 * 2122 *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2133 * original bio must be passed back in to blk_rq_unmap_user() for proper
2133 * unmapping. 2134 * unmapping.
2134 */ 2135 */
2135struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2136int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2136 unsigned int len) 2137 unsigned int len)
2137{ 2138{
2138 unsigned long uaddr; 2139 unsigned long uaddr;
2139 struct request *rq;
2140 struct bio *bio; 2140 struct bio *bio;
2141 int reading;
2141 2142
2142 if (len > (q->max_sectors << 9)) 2143 if (len > (q->max_sectors << 9))
2143 return ERR_PTR(-EINVAL); 2144 return -EINVAL;
2144 if ((!len && ubuf) || (len && !ubuf)) 2145 if (!len || !ubuf)
2145 return ERR_PTR(-EINVAL); 2146 return -EINVAL;
2146 2147
2147 rq = blk_get_request(q, rw, __GFP_WAIT); 2148 reading = rq_data_dir(rq) == READ;
2148 if (!rq)
2149 return ERR_PTR(-ENOMEM);
2150 2149
2151 /* 2150 /*
2152 * if alignment requirement is satisfied, map in user pages for 2151 * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2154 */ 2153 */
2155 uaddr = (unsigned long) ubuf; 2154 uaddr = (unsigned long) ubuf;
2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2155 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2157 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2156 bio = bio_map_user(q, NULL, uaddr, len, reading);
2158 else 2157 else
2159 bio = bio_copy_user(q, uaddr, len, rw == READ); 2158 bio = bio_copy_user(q, uaddr, len, reading);
2160 2159
2161 if (!IS_ERR(bio)) { 2160 if (!IS_ERR(bio)) {
2162 rq->bio = rq->biotail = bio; 2161 rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2164 2163
2165 rq->buffer = rq->data = NULL; 2164 rq->buffer = rq->data = NULL;
2166 rq->data_len = len; 2165 rq->data_len = len;
2167 return rq; 2166 return 0;
2168 } 2167 }
2169 2168
2170 /* 2169 /*
2171 * bio is the err-ptr 2170 * bio is the err-ptr
2172 */ 2171 */
2173 blk_put_request(rq); 2172 return PTR_ERR(bio);
2174 return (struct request *) bio;
2175} 2173}
2176 2174
2177EXPORT_SYMBOL(blk_rq_map_user); 2175EXPORT_SYMBOL(blk_rq_map_user);
2178 2176
2179/** 2177/**
2178 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2179 * @q: request queue where request should be inserted
2180 * @rq: request to map data to
2181 * @iov: pointer to the iovec
2182 * @iov_count: number of elements in the iovec
2183 *
2184 * Description:
2185 * Data will be mapped directly for zero copy io, if possible. Otherwise
2186 * a kernel bounce buffer is used.
2187 *
2188 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2189 * still in process context.
2190 *
2191 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2192 * before being submitted to the device, as pages mapped may be out of
2193 * reach. It's the callers responsibility to make sure this happens. The
2194 * original bio must be passed back in to blk_rq_unmap_user() for proper
2195 * unmapping.
2196 */
2197int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2198 struct sg_iovec *iov, int iov_count)
2199{
2200 struct bio *bio;
2201
2202 if (!iov || iov_count <= 0)
2203 return -EINVAL;
2204
2205 /* we don't allow misaligned data like bio_map_user() does. If the
2206 * user is using sg, they're expected to know the alignment constraints
2207 * and respect them accordingly */
2208 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2209 if (IS_ERR(bio))
2210 return PTR_ERR(bio);
2211
2212 rq->bio = rq->biotail = bio;
2213 blk_rq_bio_prep(q, rq, bio);
2214 rq->buffer = rq->data = NULL;
2215 rq->data_len = bio->bi_size;
2216 return 0;
2217}
2218
2219EXPORT_SYMBOL(blk_rq_map_user_iov);
2220
2221/**
2180 * blk_rq_unmap_user - unmap a request with user data 2222 * blk_rq_unmap_user - unmap a request with user data
2181 * @rq: request to be unmapped 2223 * @bio: bio to be unmapped
2182 * @bio: bio for the request
2183 * @ulen: length of user buffer 2224 * @ulen: length of user buffer
2184 * 2225 *
2185 * Description: 2226 * Description:
2186 * Unmap a request previously mapped by blk_rq_map_user(). 2227 * Unmap a bio previously mapped by blk_rq_map_user().
2187 */ 2228 */
2188int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2229int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2189{ 2230{
2190 int ret = 0; 2231 int ret = 0;
2191 2232
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2196 ret = bio_uncopy_user(bio); 2237 ret = bio_uncopy_user(bio);
2197 } 2238 }
2198 2239
2199 blk_put_request(rq); 2240 return 0;
2200 return ret;
2201} 2241}
2202 2242
2203EXPORT_SYMBOL(blk_rq_unmap_user); 2243EXPORT_SYMBOL(blk_rq_unmap_user);
2204 2244
2205/** 2245/**
2246 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2247 * @q: request queue where request should be inserted
2248 * @rq: request to fill
2249 * @kbuf: the kernel buffer
2250 * @len: length of user data
2251 * @gfp_mask: memory allocation flags
2252 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask)
2255{
2256 struct bio *bio;
2257
2258 if (len > (q->max_sectors << 9))
2259 return -EINVAL;
2260 if (!len || !kbuf)
2261 return -EINVAL;
2262
2263 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2264 if (IS_ERR(bio))
2265 return PTR_ERR(bio);
2266
2267 if (rq_data_dir(rq) == WRITE)
2268 bio->bi_rw |= (1 << BIO_RW);
2269
2270 rq->bio = rq->biotail = bio;
2271 blk_rq_bio_prep(q, rq, bio);
2272
2273 rq->buffer = rq->data = NULL;
2274 rq->data_len = len;
2275 return 0;
2276}
2277
2278EXPORT_SYMBOL(blk_rq_map_kern);
2279
2280/**
2281 * blk_execute_rq_nowait - insert a request into queue for execution
2282 * @q: queue to insert the request in
2283 * @bd_disk: matching gendisk
2284 * @rq: request to insert
2285 * @at_head: insert request at head or tail of queue
2286 * @done: I/O completion handler
2287 *
2288 * Description:
2289 * Insert a fully prepared request at the back of the io scheduler queue
2290 * for execution. Don't wait for completion.
2291 */
2292void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2293 struct request *rq, int at_head,
2294 void (*done)(struct request *))
2295{
2296 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2297
2298 rq->rq_disk = bd_disk;
2299 rq->flags |= REQ_NOMERGE;
2300 rq->end_io = done;
2301 elv_add_request(q, rq, where, 1);
2302 generic_unplug_device(q);
2303}
2304
2305/**
2206 * blk_execute_rq - insert a request into queue for execution 2306 * blk_execute_rq - insert a request into queue for execution
2207 * @q: queue to insert the request in 2307 * @q: queue to insert the request in
2208 * @bd_disk: matching gendisk 2308 * @bd_disk: matching gendisk
2209 * @rq: request to insert 2309 * @rq: request to insert
2310 * @at_head: insert request at head or tail of queue
2210 * 2311 *
2211 * Description: 2312 * Description:
2212 * Insert a fully prepared request at the back of the io scheduler queue 2313 * Insert a fully prepared request at the back of the io scheduler queue
2213 * for execution. 2314 * for execution and wait for completion.
2214 */ 2315 */
2215int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2316int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2216 struct request *rq) 2317 struct request *rq, int at_head)
2217{ 2318{
2218 DECLARE_COMPLETION(wait); 2319 DECLARE_COMPLETION(wait);
2219 char sense[SCSI_SENSE_BUFFERSIZE]; 2320 char sense[SCSI_SENSE_BUFFERSIZE];
2220 int err = 0; 2321 int err = 0;
2221 2322
2222 rq->rq_disk = bd_disk;
2223
2224 /* 2323 /*
2225 * we need an extra reference to the request, so we can look at 2324 * we need an extra reference to the request, so we can look at
2226 * it after io completion 2325 * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2233 rq->sense_len = 0; 2332 rq->sense_len = 0;
2234 } 2333 }
2235 2334
2236 rq->flags |= REQ_NOMERGE;
2237 rq->waiting = &wait; 2335 rq->waiting = &wait;
2238 rq->end_io = blk_end_sync_rq; 2336 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2239 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2240 generic_unplug_device(q);
2241 wait_for_completion(&wait); 2337 wait_for_completion(&wait);
2242 rq->waiting = NULL; 2338 rq->waiting = NULL;
2243 2339
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2277 2373
2278EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2279 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2280static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2281{ 2415{
2282 int rw = rq_data_dir(rq); 2416 int rw = rq_data_dir(rq);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 681871ca5d60..abb2df249fd3 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
216 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
217{ 217{
218 unsigned long start_time; 218 unsigned long start_time;
219 int reading, writing; 219 int writing = 0, ret = 0;
220 struct request *rq; 220 struct request *rq;
221 struct bio *bio; 221 struct bio *bio;
222 char sense[SCSI_SENSE_BUFFERSIZE]; 222 char sense[SCSI_SENSE_BUFFERSIZE];
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
231 if (verify_command(file, cmd)) 231 if (verify_command(file, cmd))
232 return -EPERM; 232 return -EPERM;
233 233
234 /*
235 * we'll do that later
236 */
237 if (hdr->iovec_count)
238 return -EOPNOTSUPP;
239
240 if (hdr->dxfer_len > (q->max_sectors << 9)) 234 if (hdr->dxfer_len > (q->max_sectors << 9))
241 return -EIO; 235 return -EIO;
242 236
243 reading = writing = 0; 237 if (hdr->dxfer_len)
244 if (hdr->dxfer_len) {
245 switch (hdr->dxfer_direction) { 238 switch (hdr->dxfer_direction) {
246 default: 239 default:
247 return -EINVAL; 240 return -EINVAL;
248 case SG_DXFER_TO_FROM_DEV: 241 case SG_DXFER_TO_FROM_DEV:
249 reading = 1;
250 /* fall through */
251 case SG_DXFER_TO_DEV: 242 case SG_DXFER_TO_DEV:
252 writing = 1; 243 writing = 1;
253 break; 244 break;
254 case SG_DXFER_FROM_DEV: 245 case SG_DXFER_FROM_DEV:
255 reading = 1;
256 break; 246 break;
257 } 247 }
258 248
259 rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, 249 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
260 hdr->dxfer_len); 250 if (!rq)
251 return -ENOMEM;
252
253 if (hdr->iovec_count) {
254 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
255 struct sg_iovec *iov;
256
257 iov = kmalloc(size, GFP_KERNEL);
258 if (!iov) {
259 ret = -ENOMEM;
260 goto out;
261 }
262
263 if (copy_from_user(iov, hdr->dxferp, size)) {
264 kfree(iov);
265 ret = -EFAULT;
266 goto out;
267 }
268
269 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
270 kfree(iov);
271 } else if (hdr->dxfer_len)
272 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
261 273
262 if (IS_ERR(rq)) 274 if (ret)
263 return PTR_ERR(rq); 275 goto out;
264 } else
265 rq = blk_get_request(q, READ, __GFP_WAIT);
266 276
267 /* 277 /*
268 * fill in request structure 278 * fill in request structure
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
298 * (if he doesn't check that is his problem). 308 * (if he doesn't check that is his problem).
299 * N.B. a non-zero SCSI status is _not_ necessarily an error. 309 * N.B. a non-zero SCSI status is _not_ necessarily an error.
300 */ 310 */
301 blk_execute_rq(q, bd_disk, rq); 311 blk_execute_rq(q, bd_disk, rq, 0);
302 312
303 /* write to all output members */ 313 /* write to all output members */
304 hdr->status = 0xff & rq->errors; 314 hdr->status = 0xff & rq->errors;
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
320 hdr->sb_len_wr = len; 330 hdr->sb_len_wr = len;
321 } 331 }
322 332
323 if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) 333 if (blk_rq_unmap_user(bio, hdr->dxfer_len))
324 return -EFAULT; 334 ret = -EFAULT;
325 335
326 /* may not have succeeded, but output values written to control 336 /* may not have succeeded, but output values written to control
327 * structure (struct sg_io_hdr). */ 337 * structure (struct sg_io_hdr). */
328 return 0; 338out:
339 blk_put_request(rq);
340 return ret;
329} 341}
330 342
331#define OMAX_SB_LEN 16 /* For backward compatibility */ 343#define OMAX_SB_LEN 16 /* For backward compatibility */
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
408 rq->data_len = bytes; 420 rq->data_len = bytes;
409 rq->flags |= REQ_BLOCK_PC; 421 rq->flags |= REQ_BLOCK_PC;
410 422
411 blk_execute_rq(q, bd_disk, rq); 423 blk_execute_rq(q, bd_disk, rq, 0);
412 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 424 err = rq->errors & 0xff; /* only 8 bit SCSI status */
413 if (err) { 425 if (err) {
414 if (rq->sense_len && rq->sense) { 426 if (rq->sense_len && rq->sense) {
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
561 rq->cmd[0] = GPCMD_START_STOP_UNIT; 573 rq->cmd[0] = GPCMD_START_STOP_UNIT;
562 rq->cmd[4] = 0x02 + (close != 0); 574 rq->cmd[4] = 0x02 + (close != 0);
563 rq->cmd_len = 6; 575 rq->cmd_len = 6;
564 err = blk_execute_rq(q, bd_disk, rq); 576 err = blk_execute_rq(q, bd_disk, rq, 0);
565 blk_put_request(rq); 577 blk_put_request(rq);
566 break; 578 break;
567 default: 579 default:
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 46e56a25d2c8..e46ecd23b3ac 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -776,7 +776,7 @@ static int viodasd_remove(struct vio_dev *vdev)
776 */ 776 */
777static struct vio_device_id viodasd_device_table[] __devinitdata = { 777static struct vio_device_id viodasd_device_table[] __devinitdata = {
778 { "viodasd", "" }, 778 { "viodasd", "" },
779 { 0, } 779 { "", "" }
780}; 780};
781 781
782MODULE_DEVICE_TABLE(vio, viodasd_device_table); 782MODULE_DEVICE_TABLE(vio, viodasd_device_table);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index c42d7e6ac1c5..1e9db0156ea7 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
158 if (err) { 158 if (err) {
159 BT_ERR("%s bulk tx submit failed urb %p err %d", 159 BT_ERR("%s bulk tx submit failed urb %p err %d",
160 bfusb->hdev->name, urb, err); 160 bfusb->hdev->name, urb, err);
161 skb_unlink(skb); 161 skb_unlink(skb, &bfusb->pending_q);
162 usb_free_urb(urb); 162 usb_free_urb(urb);
163 } else 163 } else
164 atomic_inc(&bfusb->pending_tx); 164 atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
212 212
213 read_lock(&bfusb->lock); 213 read_lock(&bfusb->lock);
214 214
215 skb_unlink(skb); 215 skb_unlink(skb, &bfusb->pending_q);
216 skb_queue_tail(&bfusb->completed_q, skb); 216 skb_queue_tail(&bfusb->completed_q, skb);
217 217
218 bfusb_tx_wakeup(bfusb); 218 bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
253 if (err) { 253 if (err) {
254 BT_ERR("%s bulk rx submit failed urb %p err %d", 254 BT_ERR("%s bulk rx submit failed urb %p err %d",
255 bfusb->hdev->name, urb, err); 255 bfusb->hdev->name, urb, err);
256 skb_unlink(skb); 256 skb_unlink(skb, &bfusb->pending_q);
257 kfree_skb(skb); 257 kfree_skb(skb);
258 usb_free_urb(urb); 258 usb_free_urb(urb);
259 } 259 }
@@ -330,7 +330,7 @@ static inline int bfusb_recv_block(struct bfusb *bfusb, int hdr, unsigned char *
330 } 330 }
331 331
332 skb->dev = (void *) bfusb->hdev; 332 skb->dev = (void *) bfusb->hdev;
333 skb->pkt_type = pkt_type; 333 bt_cb(skb)->pkt_type = pkt_type;
334 334
335 bfusb->reassembly = skb; 335 bfusb->reassembly = skb;
336 } else { 336 } else {
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
398 buf += len; 398 buf += len;
399 } 399 }
400 400
401 skb_unlink(skb); 401 skb_unlink(skb, &bfusb->pending_q);
402 kfree_skb(skb); 402 kfree_skb(skb);
403 403
404 bfusb_rx_submit(bfusb, urb); 404 bfusb_rx_submit(bfusb, urb);
@@ -485,7 +485,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
485 unsigned char buf[3]; 485 unsigned char buf[3];
486 int sent = 0, size, count; 486 int sent = 0, size, count;
487 487
488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
489 489
490 if (!hdev) { 490 if (!hdev) {
491 BT_ERR("Frame for unknown HCI device (hdev=NULL)"); 491 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
@@ -497,7 +497,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
497 497
498 bfusb = (struct bfusb *) hdev->driver_data; 498 bfusb = (struct bfusb *) hdev->driver_data;
499 499
500 switch (skb->pkt_type) { 500 switch (bt_cb(skb)->pkt_type) {
501 case HCI_COMMAND_PKT: 501 case HCI_COMMAND_PKT:
502 hdev->stat.cmd_tx++; 502 hdev->stat.cmd_tx++;
503 break; 503 break;
@@ -510,7 +510,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
510 }; 510 };
511 511
512 /* Prepend skb with frame type */ 512 /* Prepend skb with frame type */
513 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 513 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
514 514
515 count = skb->len; 515 count = skb->len;
516 516
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index bd2ec7e284cc..26fe9c0e1d20 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -270,7 +270,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
270 if (!(skb = skb_dequeue(&(info->txq)))) 270 if (!(skb = skb_dequeue(&(info->txq))))
271 break; 271 break;
272 272
273 if (skb->pkt_type & 0x80) { 273 if (bt_cb(skb)->pkt_type & 0x80) {
274 /* Disable RTS */ 274 /* Disable RTS */
275 info->ctrl_reg |= REG_CONTROL_RTS; 275 info->ctrl_reg |= REG_CONTROL_RTS;
276 outb(info->ctrl_reg, iobase + REG_CONTROL); 276 outb(info->ctrl_reg, iobase + REG_CONTROL);
@@ -288,13 +288,13 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
288 /* Mark the buffer as dirty */ 288 /* Mark the buffer as dirty */
289 clear_bit(ready_bit, &(info->tx_state)); 289 clear_bit(ready_bit, &(info->tx_state));
290 290
291 if (skb->pkt_type & 0x80) { 291 if (bt_cb(skb)->pkt_type & 0x80) {
292 DECLARE_WAIT_QUEUE_HEAD(wq); 292 DECLARE_WAIT_QUEUE_HEAD(wq);
293 DEFINE_WAIT(wait); 293 DEFINE_WAIT(wait);
294 294
295 unsigned char baud_reg; 295 unsigned char baud_reg;
296 296
297 switch (skb->pkt_type) { 297 switch (bt_cb(skb)->pkt_type) {
298 case PKT_BAUD_RATE_460800: 298 case PKT_BAUD_RATE_460800:
299 baud_reg = REG_CONTROL_BAUD_RATE_460800; 299 baud_reg = REG_CONTROL_BAUD_RATE_460800;
300 break; 300 break;
@@ -410,9 +410,9 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
411 411
412 info->rx_skb->dev = (void *) info->hdev; 412 info->rx_skb->dev = (void *) info->hdev;
413 info->rx_skb->pkt_type = buf[i]; 413 bt_cb(info->rx_skb)->pkt_type = buf[i];
414 414
415 switch (info->rx_skb->pkt_type) { 415 switch (bt_cb(info->rx_skb)->pkt_type) {
416 416
417 case 0x00: 417 case 0x00:
418 /* init packet */ 418 /* init packet */
@@ -444,7 +444,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
444 444
445 default: 445 default:
446 /* unknown packet */ 446 /* unknown packet */
447 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 447 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
448 info->hdev->stat.err_rx++; 448 info->hdev->stat.err_rx++;
449 449
450 kfree_skb(info->rx_skb); 450 kfree_skb(info->rx_skb);
@@ -586,21 +586,21 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
586 switch (baud) { 586 switch (baud) {
587 case 460800: 587 case 460800:
588 cmd[4] = 0x00; 588 cmd[4] = 0x00;
589 skb->pkt_type = PKT_BAUD_RATE_460800; 589 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800;
590 break; 590 break;
591 case 230400: 591 case 230400:
592 cmd[4] = 0x01; 592 cmd[4] = 0x01;
593 skb->pkt_type = PKT_BAUD_RATE_230400; 593 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400;
594 break; 594 break;
595 case 115200: 595 case 115200:
596 cmd[4] = 0x02; 596 cmd[4] = 0x02;
597 skb->pkt_type = PKT_BAUD_RATE_115200; 597 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200;
598 break; 598 break;
599 case 57600: 599 case 57600:
600 /* Fall through... */ 600 /* Fall through... */
601 default: 601 default:
602 cmd[4] = 0x03; 602 cmd[4] = 0x03;
603 skb->pkt_type = PKT_BAUD_RATE_57600; 603 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600;
604 break; 604 break;
605 } 605 }
606 606
@@ -680,7 +680,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
680 680
681 info = (bluecard_info_t *)(hdev->driver_data); 681 info = (bluecard_info_t *)(hdev->driver_data);
682 682
683 switch (skb->pkt_type) { 683 switch (bt_cb(skb)->pkt_type) {
684 case HCI_COMMAND_PKT: 684 case HCI_COMMAND_PKT:
685 hdev->stat.cmd_tx++; 685 hdev->stat.cmd_tx++;
686 break; 686 break;
@@ -693,7 +693,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
693 }; 693 };
694 694
695 /* Prepend skb with frame type */ 695 /* Prepend skb with frame type */
696 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 696 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
697 skb_queue_tail(&(info->txq), skb); 697 skb_queue_tail(&(info->txq), skb);
698 698
699 bluecard_write_wakeup(info); 699 bluecard_write_wakeup(info);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index f696da6f417b..a1bf8f066c88 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -105,7 +105,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
105 if (skb) { 105 if (skb) {
106 memcpy(skb_put(skb, len), buf, len); 106 memcpy(skb_put(skb, len), buf, len);
107 skb->dev = (void *) data->hdev; 107 skb->dev = (void *) data->hdev;
108 skb->pkt_type = HCI_ACLDATA_PKT; 108 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
109 hci_recv_frame(skb); 109 hci_recv_frame(skb);
110 } 110 }
111 break; 111 break;
@@ -117,7 +117,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
117 if (skb) { 117 if (skb) {
118 memcpy(skb_put(skb, len), buf, len); 118 memcpy(skb_put(skb, len), buf, len);
119 skb->dev = (void *) data->hdev; 119 skb->dev = (void *) data->hdev;
120 skb->pkt_type = HCI_SCODATA_PKT; 120 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
121 hci_recv_frame(skb); 121 hci_recv_frame(skb);
122 } 122 }
123 break; 123 break;
@@ -129,7 +129,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
129 if (skb) { 129 if (skb) {
130 memcpy(skb_put(skb, len), buf, len); 130 memcpy(skb_put(skb, len), buf, len);
131 skb->dev = (void *) data->hdev; 131 skb->dev = (void *) data->hdev;
132 skb->pkt_type = HCI_VENDOR_PKT; 132 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
133 hci_recv_frame(skb); 133 hci_recv_frame(skb);
134 } 134 }
135 break; 135 break;
@@ -190,7 +190,7 @@ static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int s
190 } 190 }
191 191
192 skb->dev = (void *) data->hdev; 192 skb->dev = (void *) data->hdev;
193 skb->pkt_type = pkt_type; 193 bt_cb(skb)->pkt_type = pkt_type;
194 194
195 memcpy(skb_put(skb, size), buf, size); 195 memcpy(skb_put(skb, size), buf, size);
196 196
@@ -307,7 +307,8 @@ unlock:
307 read_unlock(&data->lock); 307 read_unlock(&data->lock);
308} 308}
309 309
310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, size_t size, int flags, void *data) 310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
311 size_t size, unsigned int __nocast flags, void *data)
311{ 312{
312 struct urb *urb; 313 struct urb *urb;
313 struct usb_ctrlrequest *cr; 314 struct usb_ctrlrequest *cr;
@@ -487,7 +488,7 @@ static int bpa10x_send_frame(struct sk_buff *skb)
487 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 488 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
488 struct bpa10x_data *data; 489 struct bpa10x_data *data;
489 490
490 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 491 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
491 492
492 if (!hdev) { 493 if (!hdev) {
493 BT_ERR("Frame for unknown HCI device"); 494 BT_ERR("Frame for unknown HCI device");
@@ -500,9 +501,9 @@ static int bpa10x_send_frame(struct sk_buff *skb)
500 data = hdev->driver_data; 501 data = hdev->driver_data;
501 502
502 /* Prepend skb with frame type */ 503 /* Prepend skb with frame type */
503 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 504 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
504 505
505 switch (skb->pkt_type) { 506 switch (bt_cb(skb)->pkt_type) {
506 case HCI_COMMAND_PKT: 507 case HCI_COMMAND_PKT:
507 hdev->stat.cmd_tx++; 508 hdev->stat.cmd_tx++;
508 skb_queue_tail(&data->cmd_queue, skb); 509 skb_queue_tail(&data->cmd_queue, skb);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index adf1750ea58d..2e0338d80f32 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -259,11 +259,11 @@ static void bt3c_receive(bt3c_info_t *info)
259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
260 260
261 info->rx_skb->dev = (void *) info->hdev; 261 info->rx_skb->dev = (void *) info->hdev;
262 info->rx_skb->pkt_type = inb(iobase + DATA_L); 262 bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
263 inb(iobase + DATA_H); 263 inb(iobase + DATA_H);
264 //printk("bt3c: PACKET_TYPE=%02x\n", info->rx_skb->pkt_type); 264 //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
265 265
266 switch (info->rx_skb->pkt_type) { 266 switch (bt_cb(info->rx_skb)->pkt_type) {
267 267
268 case HCI_EVENT_PKT: 268 case HCI_EVENT_PKT:
269 info->rx_state = RECV_WAIT_EVENT_HEADER; 269 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -282,7 +282,7 @@ static void bt3c_receive(bt3c_info_t *info)
282 282
283 default: 283 default:
284 /* Unknown packet */ 284 /* Unknown packet */
285 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 285 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
286 info->hdev->stat.err_rx++; 286 info->hdev->stat.err_rx++;
287 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 287 clear_bit(HCI_RUNNING, &(info->hdev->flags));
288 288
@@ -439,7 +439,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
439 439
440 info = (bt3c_info_t *) (hdev->driver_data); 440 info = (bt3c_info_t *) (hdev->driver_data);
441 441
442 switch (skb->pkt_type) { 442 switch (bt_cb(skb)->pkt_type) {
443 case HCI_COMMAND_PKT: 443 case HCI_COMMAND_PKT:
444 hdev->stat.cmd_tx++; 444 hdev->stat.cmd_tx++;
445 break; 445 break;
@@ -452,7 +452,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
452 }; 452 };
453 453
454 /* Prepend skb with frame type */ 454 /* Prepend skb with frame type */
455 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 455 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
456 skb_queue_tail(&(info->txq), skb); 456 skb_queue_tail(&(info->txq), skb);
457 457
458 spin_lock_irqsave(&(info->lock), flags); 458 spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index e4c59fdc0e12..89486ea7a021 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -211,9 +211,9 @@ static void btuart_receive(btuart_info_t *info)
211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
212 212
213 info->rx_skb->dev = (void *) info->hdev; 213 info->rx_skb->dev = (void *) info->hdev;
214 info->rx_skb->pkt_type = inb(iobase + UART_RX); 214 bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
215 215
216 switch (info->rx_skb->pkt_type) { 216 switch (bt_cb(info->rx_skb)->pkt_type) {
217 217
218 case HCI_EVENT_PKT: 218 case HCI_EVENT_PKT:
219 info->rx_state = RECV_WAIT_EVENT_HEADER; 219 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -232,7 +232,7 @@ static void btuart_receive(btuart_info_t *info)
232 232
233 default: 233 default:
234 /* Unknown packet */ 234 /* Unknown packet */
235 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 235 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
236 info->hdev->stat.err_rx++; 236 info->hdev->stat.err_rx++;
237 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 237 clear_bit(HCI_RUNNING, &(info->hdev->flags));
238 238
@@ -447,7 +447,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
447 447
448 info = (btuart_info_t *)(hdev->driver_data); 448 info = (btuart_info_t *)(hdev->driver_data);
449 449
450 switch (skb->pkt_type) { 450 switch (bt_cb(skb)->pkt_type) {
451 case HCI_COMMAND_PKT: 451 case HCI_COMMAND_PKT:
452 hdev->stat.cmd_tx++; 452 hdev->stat.cmd_tx++;
453 break; 453 break;
@@ -460,7 +460,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
460 }; 460 };
461 461
462 /* Prepend skb with frame type */ 462 /* Prepend skb with frame type */
463 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 463 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
464 skb_queue_tail(&(info->txq), skb); 464 skb_queue_tail(&(info->txq), skb);
465 465
466 btuart_write_wakeup(info); 466 btuart_write_wakeup(info);
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index e39868c3da48..84c1f8839422 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -251,7 +251,7 @@ static void dtl1_receive(dtl1_info_t *info)
251 info->rx_count = nsh->len + (nsh->len & 0x0001); 251 info->rx_count = nsh->len + (nsh->len & 0x0001);
252 break; 252 break;
253 case RECV_WAIT_DATA: 253 case RECV_WAIT_DATA:
254 info->rx_skb->pkt_type = nsh->type; 254 bt_cb(info->rx_skb)->pkt_type = nsh->type;
255 255
256 /* remove PAD byte if it exists */ 256 /* remove PAD byte if it exists */
257 if (nsh->len & 0x0001) { 257 if (nsh->len & 0x0001) {
@@ -262,7 +262,7 @@ static void dtl1_receive(dtl1_info_t *info)
262 /* remove NSH */ 262 /* remove NSH */
263 skb_pull(info->rx_skb, NSHL); 263 skb_pull(info->rx_skb, NSHL);
264 264
265 switch (info->rx_skb->pkt_type) { 265 switch (bt_cb(info->rx_skb)->pkt_type) {
266 case 0x80: 266 case 0x80:
267 /* control data for the Nokia Card */ 267 /* control data for the Nokia Card */
268 dtl1_control(info, info->rx_skb); 268 dtl1_control(info, info->rx_skb);
@@ -272,12 +272,12 @@ static void dtl1_receive(dtl1_info_t *info)
272 case 0x84: 272 case 0x84:
273 /* send frame to the HCI layer */ 273 /* send frame to the HCI layer */
274 info->rx_skb->dev = (void *) info->hdev; 274 info->rx_skb->dev = (void *) info->hdev;
275 info->rx_skb->pkt_type &= 0x0f; 275 bt_cb(info->rx_skb)->pkt_type &= 0x0f;
276 hci_recv_frame(info->rx_skb); 276 hci_recv_frame(info->rx_skb);
277 break; 277 break;
278 default: 278 default:
279 /* unknown packet */ 279 /* unknown packet */
280 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 280 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
281 kfree_skb(info->rx_skb); 281 kfree_skb(info->rx_skb);
282 break; 282 break;
283 } 283 }
@@ -410,7 +410,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
410 410
411 info = (dtl1_info_t *)(hdev->driver_data); 411 info = (dtl1_info_t *)(hdev->driver_data);
412 412
413 switch (skb->pkt_type) { 413 switch (bt_cb(skb)->pkt_type) {
414 case HCI_COMMAND_PKT: 414 case HCI_COMMAND_PKT:
415 hdev->stat.cmd_tx++; 415 hdev->stat.cmd_tx++;
416 nsh.type = 0x81; 416 nsh.type = 0x81;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 858fddb046de..0ee324e1265d 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -149,7 +149,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
149 return 0; 149 return 0;
150 } 150 }
151 151
152 switch (skb->pkt_type) { 152 switch (bt_cb(skb)->pkt_type) {
153 case HCI_ACLDATA_PKT: 153 case HCI_ACLDATA_PKT:
154 case HCI_COMMAND_PKT: 154 case HCI_COMMAND_PKT:
155 skb_queue_tail(&bcsp->rel, skb); 155 skb_queue_tail(&bcsp->rel, skb);
@@ -227,7 +227,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
227 if (!nskb) 227 if (!nskb)
228 return NULL; 228 return NULL;
229 229
230 nskb->pkt_type = pkt_type; 230 bt_cb(nskb)->pkt_type = pkt_type;
231 231
232 bcsp_slip_msgdelim(nskb); 232 bcsp_slip_msgdelim(nskb);
233 233
@@ -286,7 +286,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
286 since they have priority */ 286 since they have priority */
287 287
288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { 288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
290 if (nskb) { 290 if (nskb) {
291 kfree_skb(skb); 291 kfree_skb(skb);
292 return nskb; 292 return nskb;
@@ -303,7 +303,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
303 spin_lock_irqsave(&bcsp->unack.lock, flags); 303 spin_lock_irqsave(&bcsp->unack.lock, flags);
304 304
305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { 305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
307 if (nskb) { 307 if (nskb) {
308 __skb_queue_tail(&bcsp->unack, skb); 308 __skb_queue_tail(&bcsp->unack, skb);
309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); 309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
@@ -401,7 +401,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
401 if (!nskb) 401 if (!nskb)
402 return; 402 return;
403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); 403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
404 nskb->pkt_type = BCSP_LE_PKT; 404 bt_cb(nskb)->pkt_type = BCSP_LE_PKT;
405 405
406 skb_queue_head(&bcsp->unrel, nskb); 406 skb_queue_head(&bcsp->unrel, nskb);
407 hci_uart_tx_wakeup(hu); 407 hci_uart_tx_wakeup(hu);
@@ -483,14 +483,14 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
483 bcsp_pkt_cull(bcsp); 483 bcsp_pkt_cull(bcsp);
484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && 484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
485 bcsp->rx_skb->data[0] & 0x80) { 485 bcsp->rx_skb->data[0] & 0x80) {
486 bcsp->rx_skb->pkt_type = HCI_ACLDATA_PKT; 486 bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
487 pass_up = 1; 487 pass_up = 1;
488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && 488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
489 bcsp->rx_skb->data[0] & 0x80) { 489 bcsp->rx_skb->data[0] & 0x80) {
490 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 490 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
491 pass_up = 1; 491 pass_up = 1;
492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { 492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
493 bcsp->rx_skb->pkt_type = HCI_SCODATA_PKT; 493 bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
494 pass_up = 1; 494 pass_up = 1;
495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && 495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
496 !(bcsp->rx_skb->data[0] & 0x80)) { 496 !(bcsp->rx_skb->data[0] & 0x80)) {
@@ -512,7 +512,7 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
512 hdr.evt = 0xff; 512 hdr.evt = 0xff;
513 hdr.plen = bcsp->rx_skb->len; 513 hdr.plen = bcsp->rx_skb->len;
514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); 514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
515 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 515 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
516 516
517 hci_recv_frame(bcsp->rx_skb); 517 hci_recv_frame(bcsp->rx_skb);
518 } else { 518 } else {
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 533323b60e63..cf8a22d58d96 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
112 BT_DBG("hu %p skb %p", hu, skb); 112 BT_DBG("hu %p skb %p", hu, skb);
113 113
114 /* Prepend skb with frame type */ 114 /* Prepend skb with frame type */
115 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 115 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
116 skb_queue_tail(&h4->txq, skb); 116 skb_queue_tail(&h4->txq, skb);
117 return 0; 117 return 0;
118} 118}
@@ -239,7 +239,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
239 return 0; 239 return 0;
240 } 240 }
241 h4->rx_skb->dev = (void *) hu->hdev; 241 h4->rx_skb->dev = (void *) hu->hdev;
242 h4->rx_skb->pkt_type = type; 242 bt_cb(h4->rx_skb)->pkt_type = type;
243 } 243 }
244 return count; 244 return count;
245} 245}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 90be2eae52e0..aed80cc22890 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -153,7 +153,7 @@ restart:
153 break; 153 break;
154 } 154 }
155 155
156 hci_uart_tx_complete(hu, skb->pkt_type); 156 hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
157 kfree_skb(skb); 157 kfree_skb(skb);
158 } 158 }
159 159
@@ -229,7 +229,7 @@ static int hci_uart_send_frame(struct sk_buff *skb)
229 hu = (struct hci_uart *) hdev->driver_data; 229 hu = (struct hci_uart *) hdev->driver_data;
230 tty = hu->tty; 230 tty = hu->tty;
231 231
232 BT_DBG("%s: type %d len %d", hdev->name, skb->pkt_type, skb->len); 232 BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
233 233
234 hu->proto->enqueue(hu, skb); 234 hu->proto->enqueue(hu, skb);
235 235
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 657719b8254f..67d96b5cbb96 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -127,7 +127,7 @@ static struct usb_device_id blacklist_ids[] = {
127 { } /* Terminating entry */ 127 { } /* Terminating entry */
128}; 128};
129 129
130static struct _urb *_urb_alloc(int isoc, int gfp) 130static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
131{ 131{
132 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 132 struct _urb *_urb = kmalloc(sizeof(struct _urb) +
133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp); 133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
@@ -443,7 +443,7 @@ static int __tx_submit(struct hci_usb *husb, struct _urb *_urb)
443 443
444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb) 444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
445{ 445{
446 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 446 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
447 struct usb_ctrlrequest *dr; 447 struct usb_ctrlrequest *dr;
448 struct urb *urb; 448 struct urb *urb;
449 449
@@ -451,7 +451,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
451 _urb = _urb_alloc(0, GFP_ATOMIC); 451 _urb = _urb_alloc(0, GFP_ATOMIC);
452 if (!_urb) 452 if (!_urb)
453 return -ENOMEM; 453 return -ENOMEM;
454 _urb->type = skb->pkt_type; 454 _urb->type = bt_cb(skb)->pkt_type;
455 455
456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC); 456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
457 if (!dr) { 457 if (!dr) {
@@ -479,7 +479,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
479 479
480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb) 480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
481{ 481{
482 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 482 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
483 struct urb *urb; 483 struct urb *urb;
484 int pipe; 484 int pipe;
485 485
@@ -487,7 +487,7 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
487 _urb = _urb_alloc(0, GFP_ATOMIC); 487 _urb = _urb_alloc(0, GFP_ATOMIC);
488 if (!_urb) 488 if (!_urb)
489 return -ENOMEM; 489 return -ENOMEM;
490 _urb->type = skb->pkt_type; 490 _urb->type = bt_cb(skb)->pkt_type;
491 } 491 }
492 492
493 urb = &_urb->urb; 493 urb = &_urb->urb;
@@ -505,14 +505,14 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
505#ifdef CONFIG_BT_HCIUSB_SCO 505#ifdef CONFIG_BT_HCIUSB_SCO
506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb) 506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb)
507{ 507{
508 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 508 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
509 struct urb *urb; 509 struct urb *urb;
510 510
511 if (!_urb) { 511 if (!_urb) {
512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC); 512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC);
513 if (!_urb) 513 if (!_urb)
514 return -ENOMEM; 514 return -ENOMEM;
515 _urb->type = skb->pkt_type; 515 _urb->type = bt_cb(skb)->pkt_type;
516 } 516 }
517 517
518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len); 518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len);
@@ -601,11 +601,11 @@ static int hci_usb_send_frame(struct sk_buff *skb)
601 if (!test_bit(HCI_RUNNING, &hdev->flags)) 601 if (!test_bit(HCI_RUNNING, &hdev->flags))
602 return -EBUSY; 602 return -EBUSY;
603 603
604 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); 604 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
605 605
606 husb = (struct hci_usb *) hdev->driver_data; 606 husb = (struct hci_usb *) hdev->driver_data;
607 607
608 switch (skb->pkt_type) { 608 switch (bt_cb(skb)->pkt_type) {
609 case HCI_COMMAND_PKT: 609 case HCI_COMMAND_PKT:
610 hdev->stat.cmd_tx++; 610 hdev->stat.cmd_tx++;
611 break; 611 break;
@@ -627,7 +627,7 @@ static int hci_usb_send_frame(struct sk_buff *skb)
627 627
628 read_lock(&husb->completion_lock); 628 read_lock(&husb->completion_lock);
629 629
630 skb_queue_tail(__transmit_q(husb, skb->pkt_type), skb); 630 skb_queue_tail(__transmit_q(husb, bt_cb(skb)->pkt_type), skb);
631 hci_usb_tx_wakeup(husb); 631 hci_usb_tx_wakeup(husb);
632 632
633 read_unlock(&husb->completion_lock); 633 read_unlock(&husb->completion_lock);
@@ -682,7 +682,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
682 return -ENOMEM; 682 return -ENOMEM;
683 } 683 }
684 skb->dev = (void *) husb->hdev; 684 skb->dev = (void *) husb->hdev;
685 skb->pkt_type = type; 685 bt_cb(skb)->pkt_type = type;
686 686
687 __reassembly(husb, type) = skb; 687 __reassembly(husb, type) = skb;
688 688
@@ -702,6 +702,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
702 if (!scb->expect) { 702 if (!scb->expect) {
703 /* Complete frame */ 703 /* Complete frame */
704 __reassembly(husb, type) = NULL; 704 __reassembly(husb, type) = NULL;
705 bt_cb(skb)->pkt_type = type;
705 hci_recv_frame(skb); 706 hci_recv_frame(skb);
706 } 707 }
707 708
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index f9b956fb2b8b..52cbd45c308f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -1,229 +1,220 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* 1/*
26 * Bluetooth HCI virtual device driver.
27 * 2 *
28 * $Id: hci_vhci.c,v 1.3 2002/04/17 17:37:20 maxk Exp $ 3 * Bluetooth virtual HCI driver
4 *
5 * Copyright (C) 2000-2001 Qualcomm Incorporated
6 * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
7 * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
29 */ 24 */
30#define VERSION "1.1"
31 25
32#include <linux/config.h> 26#include <linux/config.h>
33#include <linux/module.h> 27#include <linux/module.h>
34 28
35#include <linux/errno.h>
36#include <linux/kernel.h> 29#include <linux/kernel.h>
37#include <linux/major.h> 30#include <linux/init.h>
38#include <linux/sched.h>
39#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/sched.h>
40#include <linux/poll.h> 35#include <linux/poll.h>
41#include <linux/fcntl.h>
42#include <linux/init.h>
43#include <linux/random.h>
44 36
45#include <linux/skbuff.h> 37#include <linux/skbuff.h>
46#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
47 39
48#include <asm/system.h>
49#include <asm/uaccess.h>
50
51#include <net/bluetooth/bluetooth.h> 40#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 41#include <net/bluetooth/hci_core.h>
53#include "hci_vhci.h"
54 42
55/* HCI device part */ 43#ifndef CONFIG_BT_HCIVHCI_DEBUG
44#undef BT_DBG
45#define BT_DBG(D...)
46#endif
47
48#define VERSION "1.2"
49
50static int minor = MISC_DYNAMIC_MINOR;
51
52struct vhci_data {
53 struct hci_dev *hdev;
54
55 unsigned long flags;
56
57 wait_queue_head_t read_wait;
58 struct sk_buff_head readq;
59
60 struct fasync_struct *fasync;
61};
56 62
57static int hci_vhci_open(struct hci_dev *hdev) 63#define VHCI_FASYNC 0x0010
64
65static struct miscdevice vhci_miscdev;
66
67static int vhci_open_dev(struct hci_dev *hdev)
58{ 68{
59 set_bit(HCI_RUNNING, &hdev->flags); 69 set_bit(HCI_RUNNING, &hdev->flags);
60 return 0;
61}
62 70
63static int hci_vhci_flush(struct hci_dev *hdev)
64{
65 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) hdev->driver_data;
66 skb_queue_purge(&hci_vhci->readq);
67 return 0; 71 return 0;
68} 72}
69 73
70static int hci_vhci_close(struct hci_dev *hdev) 74static int vhci_close_dev(struct hci_dev *hdev)
71{ 75{
76 struct vhci_data *vhci = hdev->driver_data;
77
72 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 78 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
73 return 0; 79 return 0;
74 80
75 hci_vhci_flush(hdev); 81 skb_queue_purge(&vhci->readq);
82
76 return 0; 83 return 0;
77} 84}
78 85
79static void hci_vhci_destruct(struct hci_dev *hdev) 86static int vhci_flush(struct hci_dev *hdev)
80{ 87{
81 struct hci_vhci_struct *vhci; 88 struct vhci_data *vhci = hdev->driver_data;
82 89
83 if (!hdev) return; 90 skb_queue_purge(&vhci->readq);
84 91
85 vhci = (struct hci_vhci_struct *) hdev->driver_data; 92 return 0;
86 kfree(vhci);
87} 93}
88 94
89static int hci_vhci_send_frame(struct sk_buff *skb) 95static int vhci_send_frame(struct sk_buff *skb)
90{ 96{
91 struct hci_dev* hdev = (struct hci_dev *) skb->dev; 97 struct hci_dev* hdev = (struct hci_dev *) skb->dev;
92 struct hci_vhci_struct *hci_vhci; 98 struct vhci_data *vhci;
93 99
94 if (!hdev) { 100 if (!hdev) {
95 BT_ERR("Frame for uknown device (hdev=NULL)"); 101 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
96 return -ENODEV; 102 return -ENODEV;
97 } 103 }
98 104
99 if (!test_bit(HCI_RUNNING, &hdev->flags)) 105 if (!test_bit(HCI_RUNNING, &hdev->flags))
100 return -EBUSY; 106 return -EBUSY;
101 107
102 hci_vhci = (struct hci_vhci_struct *) hdev->driver_data; 108 vhci = hdev->driver_data;
109
110 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
111 skb_queue_tail(&vhci->readq, skb);
103 112
104 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 113 if (vhci->flags & VHCI_FASYNC)
105 skb_queue_tail(&hci_vhci->readq, skb); 114 kill_fasync(&vhci->fasync, SIGIO, POLL_IN);
106 115
107 if (hci_vhci->flags & VHCI_FASYNC) 116 wake_up_interruptible(&vhci->read_wait);
108 kill_fasync(&hci_vhci->fasync, SIGIO, POLL_IN);
109 wake_up_interruptible(&hci_vhci->read_wait);
110 117
111 return 0; 118 return 0;
112} 119}
113 120
114/* Character device part */ 121static void vhci_destruct(struct hci_dev *hdev)
115 122{
116/* Poll */ 123 kfree(hdev->driver_data);
117static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait)
118{
119 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
120
121 poll_wait(file, &hci_vhci->read_wait, wait);
122
123 if (!skb_queue_empty(&hci_vhci->readq))
124 return POLLIN | POLLRDNORM;
125
126 return POLLOUT | POLLWRNORM;
127} 124}
128 125
129/* Get packet from user space buffer(already verified) */ 126static inline ssize_t vhci_get_user(struct vhci_data *vhci,
130static inline ssize_t hci_vhci_get_user(struct hci_vhci_struct *hci_vhci, const char __user *buf, size_t count) 127 const char __user *buf, size_t count)
131{ 128{
132 struct sk_buff *skb; 129 struct sk_buff *skb;
133 130
134 if (count > HCI_MAX_FRAME_SIZE) 131 if (count > HCI_MAX_FRAME_SIZE)
135 return -EINVAL; 132 return -EINVAL;
136 133
137 if (!(skb = bt_skb_alloc(count, GFP_KERNEL))) 134 skb = bt_skb_alloc(count, GFP_KERNEL);
135 if (!skb)
138 return -ENOMEM; 136 return -ENOMEM;
139 137
140 if (copy_from_user(skb_put(skb, count), buf, count)) { 138 if (copy_from_user(skb_put(skb, count), buf, count)) {
141 kfree_skb(skb); 139 kfree_skb(skb);
142 return -EFAULT; 140 return -EFAULT;
143 } 141 }
144 142
145 skb->dev = (void *) hci_vhci->hdev; 143 skb->dev = (void *) vhci->hdev;
146 skb->pkt_type = *((__u8 *) skb->data); 144 bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
147 skb_pull(skb, 1); 145 skb_pull(skb, 1);
148 146
149 hci_recv_frame(skb); 147 hci_recv_frame(skb);
150 148
151 return count; 149 return count;
152}
153
154/* Write */
155static ssize_t hci_vhci_chr_write(struct file * file, const char __user * buf,
156 size_t count, loff_t *pos)
157{
158 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
159
160 if (!access_ok(VERIFY_READ, buf, count))
161 return -EFAULT;
162
163 return hci_vhci_get_user(hci_vhci, buf, count);
164} 150}
165 151
166/* Put packet to user space buffer(already verified) */ 152static inline ssize_t vhci_put_user(struct vhci_data *vhci,
167static inline ssize_t hci_vhci_put_user(struct hci_vhci_struct *hci_vhci, 153 struct sk_buff *skb, char __user *buf, int count)
168 struct sk_buff *skb, char __user *buf,
169 int count)
170{ 154{
171 int len = count, total = 0;
172 char __user *ptr = buf; 155 char __user *ptr = buf;
156 int len, total = 0;
157
158 len = min_t(unsigned int, skb->len, count);
173 159
174 len = min_t(unsigned int, skb->len, len);
175 if (copy_to_user(ptr, skb->data, len)) 160 if (copy_to_user(ptr, skb->data, len))
176 return -EFAULT; 161 return -EFAULT;
162
177 total += len; 163 total += len;
178 164
179 hci_vhci->hdev->stat.byte_tx += len; 165 vhci->hdev->stat.byte_tx += len;
180 switch (skb->pkt_type) {
181 case HCI_COMMAND_PKT:
182 hci_vhci->hdev->stat.cmd_tx++;
183 break;
184 166
185 case HCI_ACLDATA_PKT: 167 switch (bt_cb(skb)->pkt_type) {
186 hci_vhci->hdev->stat.acl_tx++; 168 case HCI_COMMAND_PKT:
187 break; 169 vhci->hdev->stat.cmd_tx++;
170 break;
171
172 case HCI_ACLDATA_PKT:
173 vhci->hdev->stat.acl_tx++;
174 break;
188 175
189 case HCI_SCODATA_PKT: 176 case HCI_SCODATA_PKT:
190 hci_vhci->hdev->stat.cmd_tx++; 177 vhci->hdev->stat.cmd_tx++;
191 break; 178 break;
192 }; 179 };
193 180
194 return total; 181 return total;
195} 182}
196 183
197/* Read */ 184static loff_t vhci_llseek(struct file * file, loff_t offset, int origin)
198static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t count, loff_t *pos) 185{
186 return -ESPIPE;
187}
188
189static ssize_t vhci_read(struct file * file, char __user * buf, size_t count, loff_t *pos)
199{ 190{
200 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
201 DECLARE_WAITQUEUE(wait, current); 191 DECLARE_WAITQUEUE(wait, current);
192 struct vhci_data *vhci = file->private_data;
202 struct sk_buff *skb; 193 struct sk_buff *skb;
203 ssize_t ret = 0; 194 ssize_t ret = 0;
204 195
205 add_wait_queue(&hci_vhci->read_wait, &wait); 196 add_wait_queue(&vhci->read_wait, &wait);
206 while (count) { 197 while (count) {
207 set_current_state(TASK_INTERRUPTIBLE); 198 set_current_state(TASK_INTERRUPTIBLE);
208 199
209 /* Read frames from device queue */ 200 skb = skb_dequeue(&vhci->readq);
210 if (!(skb = skb_dequeue(&hci_vhci->readq))) { 201 if (!skb) {
211 if (file->f_flags & O_NONBLOCK) { 202 if (file->f_flags & O_NONBLOCK) {
212 ret = -EAGAIN; 203 ret = -EAGAIN;
213 break; 204 break;
214 } 205 }
206
215 if (signal_pending(current)) { 207 if (signal_pending(current)) {
216 ret = -ERESTARTSYS; 208 ret = -ERESTARTSYS;
217 break; 209 break;
218 } 210 }
219 211
220 /* Nothing to read, let's sleep */
221 schedule(); 212 schedule();
222 continue; 213 continue;
223 } 214 }
224 215
225 if (access_ok(VERIFY_WRITE, buf, count)) 216 if (access_ok(VERIFY_WRITE, buf, count))
226 ret = hci_vhci_put_user(hci_vhci, skb, buf, count); 217 ret = vhci_put_user(vhci, skb, buf, count);
227 else 218 else
228 ret = -EFAULT; 219 ret = -EFAULT;
229 220
@@ -231,84 +222,90 @@ static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t c
231 break; 222 break;
232 } 223 }
233 set_current_state(TASK_RUNNING); 224 set_current_state(TASK_RUNNING);
234 remove_wait_queue(&hci_vhci->read_wait, &wait); 225 remove_wait_queue(&vhci->read_wait, &wait);
235 226
236 return ret; 227 return ret;
237} 228}
238 229
239static loff_t hci_vhci_chr_lseek(struct file * file, loff_t offset, int origin) 230static ssize_t vhci_write(struct file *file,
231 const char __user *buf, size_t count, loff_t *pos)
240{ 232{
241 return -ESPIPE; 233 struct vhci_data *vhci = file->private_data;
242}
243 234
244static int hci_vhci_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 235 if (!access_ok(VERIFY_READ, buf, count))
245{ 236 return -EFAULT;
246 return -EINVAL; 237
238 return vhci_get_user(vhci, buf, count);
247} 239}
248 240
249static int hci_vhci_chr_fasync(int fd, struct file *file, int on) 241static unsigned int vhci_poll(struct file *file, poll_table *wait)
250{ 242{
251 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 243 struct vhci_data *vhci = file->private_data;
252 int ret;
253 244
254 if ((ret = fasync_helper(fd, file, on, &hci_vhci->fasync)) < 0) 245 poll_wait(file, &vhci->read_wait, wait);
255 return ret;
256
257 if (on)
258 hci_vhci->flags |= VHCI_FASYNC;
259 else
260 hci_vhci->flags &= ~VHCI_FASYNC;
261 246
262 return 0; 247 if (!skb_queue_empty(&vhci->readq))
248 return POLLIN | POLLRDNORM;
249
250 return POLLOUT | POLLWRNORM;
263} 251}
264 252
265static int hci_vhci_chr_open(struct inode *inode, struct file * file) 253static int vhci_ioctl(struct inode *inode, struct file *file,
254 unsigned int cmd, unsigned long arg)
266{ 255{
267 struct hci_vhci_struct *hci_vhci = NULL; 256 return -EINVAL;
257}
258
259static int vhci_open(struct inode *inode, struct file *file)
260{
261 struct vhci_data *vhci;
268 struct hci_dev *hdev; 262 struct hci_dev *hdev;
269 263
270 if (!(hci_vhci = kmalloc(sizeof(struct hci_vhci_struct), GFP_KERNEL))) 264 vhci = kmalloc(sizeof(struct vhci_data), GFP_KERNEL);
265 if (!vhci)
271 return -ENOMEM; 266 return -ENOMEM;
272 267
273 memset(hci_vhci, 0, sizeof(struct hci_vhci_struct)); 268 memset(vhci, 0, sizeof(struct vhci_data));
274 269
275 skb_queue_head_init(&hci_vhci->readq); 270 skb_queue_head_init(&vhci->readq);
276 init_waitqueue_head(&hci_vhci->read_wait); 271 init_waitqueue_head(&vhci->read_wait);
277 272
278 /* Initialize and register HCI device */
279 hdev = hci_alloc_dev(); 273 hdev = hci_alloc_dev();
280 if (!hdev) { 274 if (!hdev) {
281 kfree(hci_vhci); 275 kfree(vhci);
282 return -ENOMEM; 276 return -ENOMEM;
283 } 277 }
284 278
285 hci_vhci->hdev = hdev; 279 vhci->hdev = hdev;
286 280
287 hdev->type = HCI_VHCI; 281 hdev->type = HCI_VHCI;
288 hdev->driver_data = hci_vhci; 282 hdev->driver_data = vhci;
283 SET_HCIDEV_DEV(hdev, vhci_miscdev.dev);
289 284
290 hdev->open = hci_vhci_open; 285 hdev->open = vhci_open_dev;
291 hdev->close = hci_vhci_close; 286 hdev->close = vhci_close_dev;
292 hdev->flush = hci_vhci_flush; 287 hdev->flush = vhci_flush;
293 hdev->send = hci_vhci_send_frame; 288 hdev->send = vhci_send_frame;
294 hdev->destruct = hci_vhci_destruct; 289 hdev->destruct = vhci_destruct;
295 290
296 hdev->owner = THIS_MODULE; 291 hdev->owner = THIS_MODULE;
297 292
298 if (hci_register_dev(hdev) < 0) { 293 if (hci_register_dev(hdev) < 0) {
299 kfree(hci_vhci); 294 BT_ERR("Can't register HCI device");
295 kfree(vhci);
300 hci_free_dev(hdev); 296 hci_free_dev(hdev);
301 return -EBUSY; 297 return -EBUSY;
302 } 298 }
303 299
304 file->private_data = hci_vhci; 300 file->private_data = vhci;
305 return nonseekable_open(inode, file); 301
302 return nonseekable_open(inode, file);
306} 303}
307 304
308static int hci_vhci_chr_close(struct inode *inode, struct file *file) 305static int vhci_release(struct inode *inode, struct file *file)
309{ 306{
310 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 307 struct vhci_data *vhci = file->private_data;
311 struct hci_dev *hdev = hci_vhci->hdev; 308 struct hci_dev *hdev = vhci->hdev;
312 309
313 if (hci_unregister_dev(hdev) < 0) { 310 if (hci_unregister_dev(hdev) < 0) {
314 BT_ERR("Can't unregister HCI device %s", hdev->name); 311 BT_ERR("Can't unregister HCI device %s", hdev->name);
@@ -317,48 +314,71 @@ static int hci_vhci_chr_close(struct inode *inode, struct file *file)
317 hci_free_dev(hdev); 314 hci_free_dev(hdev);
318 315
319 file->private_data = NULL; 316 file->private_data = NULL;
317
320 return 0; 318 return 0;
321} 319}
322 320
323static struct file_operations hci_vhci_fops = { 321static int vhci_fasync(int fd, struct file *file, int on)
324 .owner = THIS_MODULE, 322{
325 .llseek = hci_vhci_chr_lseek, 323 struct vhci_data *vhci = file->private_data;
326 .read = hci_vhci_chr_read, 324 int err;
327 .write = hci_vhci_chr_write, 325
328 .poll = hci_vhci_chr_poll, 326 err = fasync_helper(fd, file, on, &vhci->fasync);
329 .ioctl = hci_vhci_chr_ioctl, 327 if (err < 0)
330 .open = hci_vhci_chr_open, 328 return err;
331 .release = hci_vhci_chr_close, 329
332 .fasync = hci_vhci_chr_fasync 330 if (on)
331 vhci->flags |= VHCI_FASYNC;
332 else
333 vhci->flags &= ~VHCI_FASYNC;
334
335 return 0;
336}
337
338static struct file_operations vhci_fops = {
339 .owner = THIS_MODULE,
340 .llseek = vhci_llseek,
341 .read = vhci_read,
342 .write = vhci_write,
343 .poll = vhci_poll,
344 .ioctl = vhci_ioctl,
345 .open = vhci_open,
346 .release = vhci_release,
347 .fasync = vhci_fasync,
333}; 348};
334 349
335static struct miscdevice hci_vhci_miscdev= 350static struct miscdevice vhci_miscdev= {
336{ 351 .name = "vhci",
337 VHCI_MINOR, 352 .fops = &vhci_fops,
338 "hci_vhci",
339 &hci_vhci_fops
340}; 353};
341 354
342static int __init hci_vhci_init(void) 355static int __init vhci_init(void)
343{ 356{
344 BT_INFO("VHCI driver ver %s", VERSION); 357 BT_INFO("Virtual HCI driver ver %s", VERSION);
345 358
346 if (misc_register(&hci_vhci_miscdev)) { 359 vhci_miscdev.minor = minor;
347 BT_ERR("Can't register misc device %d\n", VHCI_MINOR); 360
361 if (misc_register(&vhci_miscdev) < 0) {
362 BT_ERR("Can't register misc device with minor %d", minor);
348 return -EIO; 363 return -EIO;
349 } 364 }
350 365
351 return 0; 366 return 0;
352} 367}
353 368
354static void hci_vhci_cleanup(void) 369static void __exit vhci_exit(void)
355{ 370{
356 misc_deregister(&hci_vhci_miscdev); 371 if (misc_deregister(&vhci_miscdev) < 0)
372 BT_ERR("Can't unregister misc device with minor %d", minor);
357} 373}
358 374
359module_init(hci_vhci_init); 375module_init(vhci_init);
360module_exit(hci_vhci_cleanup); 376module_exit(vhci_exit);
377
378module_param(minor, int, 0444);
379MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
361 380
362MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>"); 381MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
363MODULE_DESCRIPTION("Bluetooth VHCI driver ver " VERSION); 382MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
364MODULE_LICENSE("GPL"); 383MODULE_VERSION(VERSION);
384MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_vhci.h b/drivers/bluetooth/hci_vhci.h
deleted file mode 100644
index 53b11f9ef76d..000000000000
--- a/drivers/bluetooth/hci_vhci.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/*
26 * $Id: hci_vhci.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
27 */
28
29#ifndef __HCI_VHCI_H
30#define __HCI_VHCI_H
31
32#ifdef __KERNEL__
33
34struct hci_vhci_struct {
35 struct hci_dev *hdev;
36 __u32 flags;
37 wait_queue_head_t read_wait;
38 struct sk_buff_head readq;
39 struct fasync_struct *fasync;
40};
41
42/* VHCI device flags */
43#define VHCI_FASYNC 0x0010
44
45#endif /* __KERNEL__ */
46
47#define VHCI_DEV "/dev/vhci"
48#define VHCI_MINOR 250
49
50#endif /* __HCI_VHCI_H */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index beaa561f2ed8..153960348414 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,6 +2097,10 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 if (!q) 2097 if (!q)
2098 return -ENXIO; 2098 return -ENXIO;
2099 2099
2100 rq = blk_get_request(q, READ, GFP_KERNEL);
2101 if (!rq)
2102 return -ENOMEM;
2103
2100 cdi->last_sense = 0; 2104 cdi->last_sense = 0;
2101 2105
2102 while (nframes) { 2106 while (nframes) {
@@ -2108,9 +2112,9 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2108 2112
2109 len = nr * CD_FRAMESIZE_RAW; 2113 len = nr * CD_FRAMESIZE_RAW;
2110 2114
2111 rq = blk_rq_map_user(q, READ, ubuf, len); 2115 ret = blk_rq_map_user(q, rq, ubuf, len);
2112 if (IS_ERR(rq)) 2116 if (ret)
2113 return PTR_ERR(rq); 2117 break;
2114 2118
2115 memset(rq->cmd, 0, sizeof(rq->cmd)); 2119 memset(rq->cmd, 0, sizeof(rq->cmd));
2116 rq->cmd[0] = GPCMD_READ_CD; 2120 rq->cmd[0] = GPCMD_READ_CD;
@@ -2132,13 +2136,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2132 if (rq->bio) 2136 if (rq->bio)
2133 blk_queue_bounce(q, &rq->bio); 2137 blk_queue_bounce(q, &rq->bio);
2134 2138
2135 if (blk_execute_rq(q, cdi->disk, rq)) { 2139 if (blk_execute_rq(q, cdi->disk, rq, 0)) {
2136 struct request_sense *s = rq->sense; 2140 struct request_sense *s = rq->sense;
2137 ret = -EIO; 2141 ret = -EIO;
2138 cdi->last_sense = s->sense_key; 2142 cdi->last_sense = s->sense_key;
2139 } 2143 }
2140 2144
2141 if (blk_rq_unmap_user(rq, bio, len)) 2145 if (blk_rq_unmap_user(bio, len))
2142 ret = -EFAULT; 2146 ret = -EFAULT;
2143 2147
2144 if (ret) 2148 if (ret)
@@ -2149,6 +2153,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2149 ubuf += len; 2153 ubuf += len;
2150 } 2154 }
2151 2155
2156 blk_put_request(rq);
2152 return ret; 2157 return ret;
2153} 2158}
2154 2159
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 38dd9ffbe8bc..0829db58462f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -734,7 +734,7 @@ static int viocd_remove(struct vio_dev *vdev)
734 */ 734 */
735static struct vio_device_id viocd_device_table[] __devinitdata = { 735static struct vio_device_id viocd_device_table[] __devinitdata = {
736 { "viocd", "" }, 736 { "viocd", "" },
737 { 0, } 737 { "", "" }
738}; 738};
739 739
740MODULE_DEVICE_TABLE(vio, viocd_device_table); 740MODULE_DEVICE_TABLE(vio, viocd_device_table);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7333b41d4224..2bc9d64db106 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -138,7 +138,7 @@ config CYZ_INTR
138 138
139config DIGIEPCA 139config DIGIEPCA
140 tristate "Digiboard Intelligent Async Support" 140 tristate "Digiboard Intelligent Async Support"
141 depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP && (!64BIT || BROKEN) 141 depends on SERIAL_NONSTANDARD
142 ---help--- 142 ---help---
143 This is a driver for Digi International's Xx, Xeve, and Xem series 143 This is a driver for Digi International's Xx, Xeve, and Xem series
144 of cards which provide multiple serial ports. You would need 144 of cards which provide multiple serial ports. You would need
@@ -735,7 +735,7 @@ config SGI_IP27_RTC
735 735
736config GEN_RTC 736config GEN_RTC
737 tristate "Generic /dev/rtc emulation" 737 tristate "Generic /dev/rtc emulation"
738 depends on RTC!=y && !IA64 && !ARM && !PPC64 && !M32R && !SPARC32 738 depends on RTC!=y && !IA64 && !ARM && !PPC64 && !M32R && !SPARC32 && !SPARC64
739 ---help--- 739 ---help---
740 If you say Y here and create a character special file /dev/rtc with 740 If you say Y here and create a character special file /dev/rtc with
741 major number 10 and minor number 135 using mknod ("man mknod"), you 741 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -842,8 +842,7 @@ config SONYPI
842 842
843config TANBAC_TB0219 843config TANBAC_TB0219
844 tristate "TANBAC TB0219 base board support" 844 tristate "TANBAC TB0219 base board support"
845 depends TANBAC_TB0229 845 depends TANBAC_TB022X
846
847 846
848menu "Ftape, the floppy tape device driver" 847menu "Ftape, the floppy tape device driver"
849 848
diff --git a/drivers/char/digi1.h b/drivers/char/digi1.h
index 184378d23f8c..94d4eab5d3ca 100644
--- a/drivers/char/digi1.h
+++ b/drivers/char/digi1.h
@@ -1,46 +1,46 @@
1/* Definitions for DigiBoard ditty(1) command. */ 1/* Definitions for DigiBoard ditty(1) command. */
2 2
3#if !defined(TIOCMODG) 3#if !defined(TIOCMODG)
4#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */ 4#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
5#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */ 5#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
6#endif 6#endif
7 7
8#if !defined(TIOCMSET) 8#if !defined(TIOCMSET)
9#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */ 9#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
10#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */ 10#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
11#endif 11#endif
12 12
13#if !defined(TIOCMBIC) 13#if !defined(TIOCMBIC)
14#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */ 14#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
15#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */ 15#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
16#endif 16#endif
17 17
18#if !defined(TIOCSDTR) 18#if !defined(TIOCSDTR)
19#define TIOCSDTR ('e'<<8) | 0 /* set DTR */ 19#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
20#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */ 20#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
21#endif 21#endif
22 22
23/************************************************************************ 23/************************************************************************
24 * Ioctl command arguments for DIGI parameters. 24 * Ioctl command arguments for DIGI parameters.
25 ************************************************************************/ 25 ************************************************************************/
26#define DIGI_GETA ('e'<<8) | 94 /* Read params */ 26#define DIGI_GETA (('e'<<8) | 94) /* Read params */
27 27
28#define DIGI_SETA ('e'<<8) | 95 /* Set params */ 28#define DIGI_SETA (('e'<<8) | 95) /* Set params */
29#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */ 29#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
30#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */ 30#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
31 31
32#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */ 32#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
33 /* control characters */ 33 /* control characters */
34#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */ 34#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
35 /* control characters */ 35 /* control characters */
36#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */ 36#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
37 /* flow control chars */ 37 /* flow control chars */
38#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */ 38#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
39 /* flow control chars */ 39 /* flow control chars */
40 40
41#define DIGI_GETINFO ('e'<<8) | 103 /* Fill in digi_info */ 41#define DIGI_GETINFO (('e'<<8) | 103) /* Fill in digi_info */
42#define DIGI_POLLER ('e'<<8) | 104 /* Turn on/off poller */ 42#define DIGI_POLLER (('e'<<8) | 104) /* Turn on/off poller */
43#define DIGI_INIT ('e'<<8) | 105 /* Allow things to run. */ 43#define DIGI_INIT (('e'<<8) | 105) /* Allow things to run. */
44 44
45struct digiflow_struct 45struct digiflow_struct
46{ 46{
diff --git a/drivers/char/digiFep1.h b/drivers/char/digiFep1.h
index c47d7fcb8400..3c1f1922c798 100644
--- a/drivers/char/digiFep1.h
+++ b/drivers/char/digiFep1.h
@@ -13,88 +13,88 @@
13 13
14struct global_data 14struct global_data
15{ 15{
16 volatile ushort cin; 16 u16 cin;
17 volatile ushort cout; 17 u16 cout;
18 volatile ushort cstart; 18 u16 cstart;
19 volatile ushort cmax; 19 u16 cmax;
20 volatile ushort ein; 20 u16 ein;
21 volatile ushort eout; 21 u16 eout;
22 volatile ushort istart; 22 u16 istart;
23 volatile ushort imax; 23 u16 imax;
24}; 24};
25 25
26 26
27struct board_chan 27struct board_chan
28{ 28{
29 int filler1; 29 u32 filler1;
30 int filler2; 30 u32 filler2;
31 volatile ushort tseg; 31 u16 tseg;
32 volatile ushort tin; 32 u16 tin;
33 volatile ushort tout; 33 u16 tout;
34 volatile ushort tmax; 34 u16 tmax;
35 35
36 volatile ushort rseg; 36 u16 rseg;
37 volatile ushort rin; 37 u16 rin;
38 volatile ushort rout; 38 u16 rout;
39 volatile ushort rmax; 39 u16 rmax;
40 40
41 volatile ushort tlow; 41 u16 tlow;
42 volatile ushort rlow; 42 u16 rlow;
43 volatile ushort rhigh; 43 u16 rhigh;
44 volatile ushort incr; 44 u16 incr;
45 45
46 volatile ushort etime; 46 u16 etime;
47 volatile ushort edelay; 47 u16 edelay;
48 volatile unchar *dev; 48 unchar *dev;
49 49
50 volatile ushort iflag; 50 u16 iflag;
51 volatile ushort oflag; 51 u16 oflag;
52 volatile ushort cflag; 52 u16 cflag;
53 volatile ushort gmask; 53 u16 gmask;
54 54
55 volatile ushort col; 55 u16 col;
56 volatile ushort delay; 56 u16 delay;
57 volatile ushort imask; 57 u16 imask;
58 volatile ushort tflush; 58 u16 tflush;
59 59
60 int filler3; 60 u32 filler3;
61 int filler4; 61 u32 filler4;
62 int filler5; 62 u32 filler5;
63 int filler6; 63 u32 filler6;
64 64
65 volatile unchar num; 65 u8 num;
66 volatile unchar ract; 66 u8 ract;
67 volatile unchar bstat; 67 u8 bstat;
68 volatile unchar tbusy; 68 u8 tbusy;
69 volatile unchar iempty; 69 u8 iempty;
70 volatile unchar ilow; 70 u8 ilow;
71 volatile unchar idata; 71 u8 idata;
72 volatile unchar eflag; 72 u8 eflag;
73 73
74 volatile unchar tflag; 74 u8 tflag;
75 volatile unchar rflag; 75 u8 rflag;
76 volatile unchar xmask; 76 u8 xmask;
77 volatile unchar xval; 77 u8 xval;
78 volatile unchar mstat; 78 u8 mstat;
79 volatile unchar mchange; 79 u8 mchange;
80 volatile unchar mint; 80 u8 mint;
81 volatile unchar lstat; 81 u8 lstat;
82 82
83 volatile unchar mtran; 83 u8 mtran;
84 volatile unchar orun; 84 u8 orun;
85 volatile unchar startca; 85 u8 startca;
86 volatile unchar stopca; 86 u8 stopca;
87 volatile unchar startc; 87 u8 startc;
88 volatile unchar stopc; 88 u8 stopc;
89 volatile unchar vnext; 89 u8 vnext;
90 volatile unchar hflow; 90 u8 hflow;
91 91
92 volatile unchar fillc; 92 u8 fillc;
93 volatile unchar ochar; 93 u8 ochar;
94 volatile unchar omask; 94 u8 omask;
95 95
96 unchar filler7; 96 u8 filler7;
97 unchar filler8[28]; 97 u8 filler8[28];
98}; 98};
99 99
100 100
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 123417e43040..56ace9d5e2ae 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -23,13 +23,6 @@ config DRM_TDFX
23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), 23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
24 graphics card. If M is selected, the module will be called tdfx. 24 graphics card. If M is selected, the module will be called tdfx.
25 25
26config DRM_GAMMA
27 tristate "3dlabs GMX 2000"
28 depends on DRM && BROKEN
29 help
30 This is the old gamma driver, please tell me if it might actually
31 work.
32
33config DRM_R128 26config DRM_R128
34 tristate "ATI Rage 128" 27 tristate "ATI Rage 128"
35 depends on DRM && PCI 28 depends on DRM && PCI
@@ -82,7 +75,7 @@ endchoice
82 75
83config DRM_MGA 76config DRM_MGA
84 tristate "Matrox g200/g400" 77 tristate "Matrox g200/g400"
85 depends on DRM && AGP 78 depends on DRM
86 help 79 help
87 Choose this option if you have a Matrox G200, G400 or G450 graphics 80 Choose this option if you have a Matrox G200, G400 or G450 graphics
88 card. If M is selected, the module will be called mga. AGP 81 card. If M is selected, the module will be called mga. AGP
@@ -103,3 +96,10 @@ config DRM_VIA
103 Choose this option if you have a Via unichrome or compatible video 96 Choose this option if you have a Via unichrome or compatible video
104 chipset. If M is selected the module will be called via. 97 chipset. If M is selected the module will be called via.
105 98
99config DRM_SAVAGE
100 tristate "Savage video cards"
101 depends on DRM
102 help
103 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
104 chipset. If M is selected the module will be called savage.
105
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index ddd941045b1f..e41060c76226 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o 9 drm_sysfs.o
10 10
11gamma-objs := gamma_drv.o gamma_dma.o
12tdfx-objs := tdfx_drv.o 11tdfx-objs := tdfx_drv.o
13r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o 12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
14mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o 13mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
15i810-objs := i810_drv.o i810_dma.o 14i810-objs := i810_drv.o i810_dma.o
16i830-objs := i830_drv.o i830_dma.o i830_irq.o 15i830-objs := i830_drv.o i830_dma.o i830_irq.o
17i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
18radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o 17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
19ffb-objs := ffb_drv.o ffb_context.o 18ffb-objs := ffb_drv.o ffb_context.o
20sis-objs := sis_drv.o sis_ds.o sis_mm.o 19sis-objs := sis_drv.o sis_ds.o sis_mm.o
20savage-objs := savage_drv.o savage_bci.o savage_state.o
21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
22 22
23ifeq ($(CONFIG_COMPAT),y) 23ifeq ($(CONFIG_COMPAT),y)
@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o
29endif 29endif
30 30
31obj-$(CONFIG_DRM) += drm.o 31obj-$(CONFIG_DRM) += drm.o
32obj-$(CONFIG_DRM_GAMMA) += gamma.o
33obj-$(CONFIG_DRM_TDFX) += tdfx.o 32obj-$(CONFIG_DRM_TDFX) += tdfx.o
34obj-$(CONFIG_DRM_R128) += r128.o 33obj-$(CONFIG_DRM_R128) += r128.o
35obj-$(CONFIG_DRM_RADEON)+= radeon.o 34obj-$(CONFIG_DRM_RADEON)+= radeon.o
@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o
39obj-$(CONFIG_DRM_I915) += i915.o 38obj-$(CONFIG_DRM_I915) += i915.o
40obj-$(CONFIG_DRM_FFB) += ffb.o 39obj-$(CONFIG_DRM_FFB) += ffb.o
41obj-$(CONFIG_DRM_SIS) += sis.o 40obj-$(CONFIG_DRM_SIS) += sis.o
41obj-$(CONFIG_DRM_SAVAGE)+= savage.o
42obj-$(CONFIG_DRM_VIA) +=via.o 42obj-$(CONFIG_DRM_VIA) +=via.o
43 43
44
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index e8371dd87fbc..fc6598a81acd 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -98,7 +98,7 @@
98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
99 99
100 100
101typedef unsigned long drm_handle_t; 101typedef unsigned int drm_handle_t;
102typedef unsigned int drm_context_t; 102typedef unsigned int drm_context_t;
103typedef unsigned int drm_drawable_t; 103typedef unsigned int drm_drawable_t;
104typedef unsigned int drm_magic_t; 104typedef unsigned int drm_magic_t;
@@ -209,7 +209,8 @@ typedef enum drm_map_type {
209 _DRM_REGISTERS = 1, /**< no caching, no core dump */ 209 _DRM_REGISTERS = 1, /**< no caching, no core dump */
210 _DRM_SHM = 2, /**< shared, cached */ 210 _DRM_SHM = 2, /**< shared, cached */
211 _DRM_AGP = 3, /**< AGP/GART */ 211 _DRM_AGP = 3, /**< AGP/GART */
212 _DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ 212 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
213 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
213} drm_map_type_t; 214} drm_map_type_t;
214 215
215 216
@@ -368,7 +369,8 @@ typedef struct drm_buf_desc {
368 enum { 369 enum {
369 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ 370 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
370 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ 371 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
371 _DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ 372 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
373 _DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
372 } flags; 374 } flags;
373 unsigned long agp_start; /**< 375 unsigned long agp_start; /**<
374 * Start address of where the AGP buffers are 376 * Start address of where the AGP buffers are
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 5df09cc8c6db..6f98701dfe15 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -53,7 +53,6 @@
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/file.h> 54#include <linux/file.h>
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/version.h>
57#include <linux/jiffies.h> 56#include <linux/jiffies.h>
58#include <linux/smp_lock.h> /* For (un)lock_kernel */ 57#include <linux/smp_lock.h> /* For (un)lock_kernel */
59#include <linux/mm.h> 58#include <linux/mm.h>
@@ -96,6 +95,7 @@
96#define DRIVER_IRQ_SHARED 0x80 95#define DRIVER_IRQ_SHARED 0x80
97#define DRIVER_IRQ_VBL 0x100 96#define DRIVER_IRQ_VBL 0x100
98#define DRIVER_DMA_QUEUE 0x200 97#define DRIVER_DMA_QUEUE 0x200
98#define DRIVER_FB_DMA 0x400
99 99
100/***********************************************************************/ 100/***********************************************************************/
101/** \name Begin the DRM... */ 101/** \name Begin the DRM... */
@@ -160,36 +160,7 @@
160#define pte_unmap(pte) 160#define pte_unmap(pte)
161#endif 161#endif
162 162
163#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
164static inline struct page * vmalloc_to_page(void * vmalloc_addr)
165{
166 unsigned long addr = (unsigned long) vmalloc_addr;
167 struct page *page = NULL;
168 pgd_t *pgd = pgd_offset_k(addr);
169 pmd_t *pmd;
170 pte_t *ptep, pte;
171
172 if (!pgd_none(*pgd)) {
173 pmd = pmd_offset(pgd, addr);
174 if (!pmd_none(*pmd)) {
175 preempt_disable();
176 ptep = pte_offset_map(pmd, addr);
177 pte = *ptep;
178 if (pte_present(pte))
179 page = pte_page(pte);
180 pte_unmap(ptep);
181 preempt_enable();
182 }
183 }
184 return page;
185}
186#endif
187
188#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
189#define DRM_RPR_ARG(vma)
190#else
191#define DRM_RPR_ARG(vma) vma, 163#define DRM_RPR_ARG(vma) vma,
192#endif
193 164
194#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) 165#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
195 166
@@ -474,7 +445,8 @@ typedef struct drm_device_dma {
474 unsigned long byte_count; 445 unsigned long byte_count;
475 enum { 446 enum {
476 _DRM_DMA_USE_AGP = 0x01, 447 _DRM_DMA_USE_AGP = 0x01,
477 _DRM_DMA_USE_SG = 0x02 448 _DRM_DMA_USE_SG = 0x02,
449 _DRM_DMA_USE_FB = 0x04
478 } flags; 450 } flags;
479 451
480} drm_device_dma_t; 452} drm_device_dma_t;
@@ -525,12 +497,19 @@ typedef struct drm_sigdata {
525 drm_hw_lock_t *lock; 497 drm_hw_lock_t *lock;
526} drm_sigdata_t; 498} drm_sigdata_t;
527 499
500typedef struct drm_dma_handle {
501 dma_addr_t busaddr;
502 void *vaddr;
503 size_t size;
504} drm_dma_handle_t;
505
528/** 506/**
529 * Mappings list 507 * Mappings list
530 */ 508 */
531typedef struct drm_map_list { 509typedef struct drm_map_list {
532 struct list_head head; /**< list head */ 510 struct list_head head; /**< list head */
533 drm_map_t *map; /**< mapping */ 511 drm_map_t *map; /**< mapping */
512 unsigned int user_token;
534} drm_map_list_t; 513} drm_map_list_t;
535 514
536typedef drm_map_t drm_local_map_t; 515typedef drm_map_t drm_local_map_t;
@@ -578,7 +557,22 @@ struct drm_driver {
578 int (*kernel_context_switch)(struct drm_device *dev, int old, int new); 557 int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
579 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); 558 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
580 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); 559 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
560
561 /**
562 * Called by \c drm_device_is_agp. Typically used to determine if a
563 * card is really attached to AGP or not.
564 *
565 * \param dev DRM device handle
566 *
567 * \returns
568 * One of three values is returned depending on whether or not the
569 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
570 * (return of 1), or may or may not be AGP (return of 2).
571 */
572 int (*device_is_agp) (struct drm_device * dev);
573
581 /* these have to be filled in */ 574 /* these have to be filled in */
575
582 int (*postinit)(struct drm_device *, unsigned long flags); 576 int (*postinit)(struct drm_device *, unsigned long flags);
583 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); 577 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
584 void (*irq_preinstall)(struct drm_device *dev); 578 void (*irq_preinstall)(struct drm_device *dev);
@@ -722,12 +716,8 @@ typedef struct drm_device {
722 int pci_slot; /**< PCI slot number */ 716 int pci_slot; /**< PCI slot number */
723 int pci_func; /**< PCI function number */ 717 int pci_func; /**< PCI function number */
724#ifdef __alpha__ 718#ifdef __alpha__
725#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
726 struct pci_controler *hose;
727#else
728 struct pci_controller *hose; 719 struct pci_controller *hose;
729#endif 720#endif
730#endif
731 drm_sg_mem_t *sg; /**< Scatter gather memory */ 721 drm_sg_mem_t *sg; /**< Scatter gather memory */
732 unsigned long *ctx_bitmap; /**< context bitmap */ 722 unsigned long *ctx_bitmap; /**< context bitmap */
733 void *dev_private; /**< device private data */ 723 void *dev_private; /**< device private data */
@@ -736,6 +726,7 @@ typedef struct drm_device {
736 726
737 struct drm_driver *driver; 727 struct drm_driver *driver;
738 drm_local_map_t *agp_buffer_map; 728 drm_local_map_t *agp_buffer_map;
729 unsigned int agp_buffer_token;
739 drm_head_t primary; /**< primary screen head */ 730 drm_head_t primary; /**< primary screen head */
740} drm_device_t; 731} drm_device_t;
741 732
@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
806 drm_device_t *dev); 797 drm_device_t *dev);
807extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); 798extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
808 799
809extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); 800extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
810extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); 801extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
811extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); 802extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
812extern int drm_unbind_agp(DRM_AGP_MEM *handle); 803extern int drm_unbind_agp(DRM_AGP_MEM *handle);
@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev,
881 unsigned int context); 872 unsigned int context);
882 873
883 /* Buffer management support (drm_bufs.h) */ 874 /* Buffer management support (drm_bufs.h) */
875extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
876extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
877extern int drm_addmap(drm_device_t *dev, unsigned int offset,
878 unsigned int size, drm_map_type_t type,
879 drm_map_flags_t flags, drm_local_map_t **map_ptr);
880extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
881 unsigned int cmd, unsigned long arg);
882extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
883extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
884extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
885 unsigned int cmd, unsigned long arg);
886
884extern int drm_order( unsigned long size ); 887extern int drm_order( unsigned long size );
885extern int drm_addmap( struct inode *inode, struct file *filp,
886 unsigned int cmd, unsigned long arg );
887extern int drm_rmmap( struct inode *inode, struct file *filp,
888 unsigned int cmd, unsigned long arg );
889extern int drm_addbufs( struct inode *inode, struct file *filp, 888extern int drm_addbufs( struct inode *inode, struct file *filp,
890 unsigned int cmd, unsigned long arg ); 889 unsigned int cmd, unsigned long arg );
891extern int drm_infobufs( struct inode *inode, struct file *filp, 890extern int drm_infobufs( struct inode *inode, struct file *filp,
@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp,
896 unsigned int cmd, unsigned long arg ); 895 unsigned int cmd, unsigned long arg );
897extern int drm_mapbufs( struct inode *inode, struct file *filp, 896extern int drm_mapbufs( struct inode *inode, struct file *filp,
898 unsigned int cmd, unsigned long arg ); 897 unsigned int cmd, unsigned long arg );
898extern unsigned long drm_get_resource_start(drm_device_t *dev,
899 unsigned int resource);
900extern unsigned long drm_get_resource_len(drm_device_t *dev,
901 unsigned int resource);
899 902
900 /* DMA support (drm_dma.h) */ 903 /* DMA support (drm_dma.h) */
901extern int drm_dma_setup(drm_device_t *dev); 904extern int drm_dma_setup(drm_device_t *dev);
@@ -919,15 +922,18 @@ extern void drm_vbl_send_signals( drm_device_t *dev );
919 922
920 /* AGP/GART support (drm_agpsupport.h) */ 923 /* AGP/GART support (drm_agpsupport.h) */
921extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); 924extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
922extern int drm_agp_acquire(struct inode *inode, struct file *filp, 925extern int drm_agp_acquire(drm_device_t * dev);
923 unsigned int cmd, unsigned long arg); 926extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
924extern void drm_agp_do_release(drm_device_t *dev); 927 unsigned int cmd, unsigned long arg);
925extern int drm_agp_release(struct inode *inode, struct file *filp, 928extern int drm_agp_release(drm_device_t *dev);
926 unsigned int cmd, unsigned long arg); 929extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
927extern int drm_agp_enable(struct inode *inode, struct file *filp, 930 unsigned int cmd, unsigned long arg);
928 unsigned int cmd, unsigned long arg); 931extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
929extern int drm_agp_info(struct inode *inode, struct file *filp, 932extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
930 unsigned int cmd, unsigned long arg); 933 unsigned int cmd, unsigned long arg);
934extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
935extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
936 unsigned int cmd, unsigned long arg);
931extern int drm_agp_alloc(struct inode *inode, struct file *filp, 937extern int drm_agp_alloc(struct inode *inode, struct file *filp,
932 unsigned int cmd, unsigned long arg); 938 unsigned int cmd, unsigned long arg);
933extern int drm_agp_free(struct inode *inode, struct file *filp, 939extern int drm_agp_free(struct inode *inode, struct file *filp,
@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev,
976 unsigned long addr, 982 unsigned long addr,
977 dma_addr_t bus_addr); 983 dma_addr_t bus_addr);
978 984
979extern void *drm_pci_alloc(drm_device_t * dev, size_t size, 985extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
980 size_t align, dma_addr_t maxaddr, 986 size_t align, dma_addr_t maxaddr);
981 dma_addr_t * busaddr); 987extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
982 988extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
983extern void drm_pci_free(drm_device_t * dev, size_t size,
984 void *vaddr, dma_addr_t busaddr);
985 989
986 /* sysfs support (drm_sysfs.c) */ 990 /* sysfs support (drm_sysfs.c) */
987struct drm_sysfs_class; 991struct drm_sysfs_class;
@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi
1012 drm_ioremapfree( map->handle, map->size, dev ); 1016 drm_ioremapfree( map->handle, map->size, dev );
1013} 1017}
1014 1018
1015static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) 1019static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
1016{ 1020{
1017 struct list_head *_list; 1021 drm_map_list_t *_entry;
1018 list_for_each( _list, &dev->maplist->head ) { 1022 list_for_each_entry(_entry, &dev->maplist->head, head)
1019 drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); 1023 if (_entry->user_token == token)
1020 if ( _entry->map &&
1021 _entry->map->offset == offset ) {
1022 return _entry->map; 1024 return _entry->map;
1025 return NULL;
1026}
1027
1028static __inline__ int drm_device_is_agp(drm_device_t *dev)
1029{
1030 if ( dev->driver->device_is_agp != NULL ) {
1031 int err = (*dev->driver->device_is_agp)( dev );
1032
1033 if (err != 2) {
1034 return err;
1023 } 1035 }
1024 } 1036 }
1025 return NULL; 1037
1038 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1026} 1039}
1027 1040
1028static __inline__ void drm_core_dropmap(struct drm_map *map) 1041static __inline__ void drm_core_dropmap(struct drm_map *map)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 8d94c0b5fa44..8c215adcb4b2 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -37,7 +37,7 @@
37#if __OS_HAS_AGP 37#if __OS_HAS_AGP
38 38
39/** 39/**
40 * AGP information ioctl. 40 * Get AGP information.
41 * 41 *
42 * \param inode device inode. 42 * \param inode device inode.
43 * \param filp file pointer. 43 * \param filp file pointer.
@@ -48,51 +48,56 @@
48 * Verifies the AGP device has been initialized and acquired and fills in the 48 * Verifies the AGP device has been initialized and acquired and fills in the
49 * drm_agp_info structure with the information in drm_agp_head::agp_info. 49 * drm_agp_info structure with the information in drm_agp_head::agp_info.
50 */ 50 */
51int drm_agp_info(struct inode *inode, struct file *filp, 51int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
52 unsigned int cmd, unsigned long arg)
53{ 52{
54 drm_file_t *priv = filp->private_data;
55 drm_device_t *dev = priv->head->dev;
56 DRM_AGP_KERN *kern; 53 DRM_AGP_KERN *kern;
57 drm_agp_info_t info;
58 54
59 if (!dev->agp || !dev->agp->acquired) 55 if (!dev->agp || !dev->agp->acquired)
60 return -EINVAL; 56 return -EINVAL;
61 57
62 kern = &dev->agp->agp_info; 58 kern = &dev->agp->agp_info;
63 info.agp_version_major = kern->version.major; 59 info->agp_version_major = kern->version.major;
64 info.agp_version_minor = kern->version.minor; 60 info->agp_version_minor = kern->version.minor;
65 info.mode = kern->mode; 61 info->mode = kern->mode;
66 info.aperture_base = kern->aper_base; 62 info->aperture_base = kern->aper_base;
67 info.aperture_size = kern->aper_size * 1024 * 1024; 63 info->aperture_size = kern->aper_size * 1024 * 1024;
68 info.memory_allowed = kern->max_memory << PAGE_SHIFT; 64 info->memory_allowed = kern->max_memory << PAGE_SHIFT;
69 info.memory_used = kern->current_memory << PAGE_SHIFT; 65 info->memory_used = kern->current_memory << PAGE_SHIFT;
70 info.id_vendor = kern->device->vendor; 66 info->id_vendor = kern->device->vendor;
71 info.id_device = kern->device->device; 67 info->id_device = kern->device->device;
72 68
73 if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) 69 return 0;
70}
71EXPORT_SYMBOL(drm_agp_info);
72
73int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
74 unsigned int cmd, unsigned long arg)
75{
76 drm_file_t *priv = filp->private_data;
77 drm_device_t *dev = priv->head->dev;
78 drm_agp_info_t info;
79 int err;
80
81 err = drm_agp_info(dev, &info);
82 if (err)
83 return err;
84
85 if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
74 return -EFAULT; 86 return -EFAULT;
75 return 0; 87 return 0;
76} 88}
77 89
78/** 90/**
79 * Acquire the AGP device (ioctl). 91 * Acquire the AGP device.
80 * 92 *
81 * \param inode device inode. 93 * \param dev DRM device that is to acquire AGP
82 * \param filp file pointer.
83 * \param cmd command.
84 * \param arg user argument.
85 * \return zero on success or a negative number on failure. 94 * \return zero on success or a negative number on failure.
86 * 95 *
87 * Verifies the AGP device hasn't been acquired before and calls 96 * Verifies the AGP device hasn't been acquired before and calls
88 * agp_acquire(). 97 * \c agp_backend_acquire.
89 */ 98 */
90int drm_agp_acquire(struct inode *inode, struct file *filp, 99int drm_agp_acquire(drm_device_t *dev)
91 unsigned int cmd, unsigned long arg)
92{ 100{
93 drm_file_t *priv = filp->private_data;
94 drm_device_t *dev = priv->head->dev;
95
96 if (!dev->agp) 101 if (!dev->agp)
97 return -ENODEV; 102 return -ENODEV;
98 if (dev->agp->acquired) 103 if (dev->agp->acquired)
@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
102 dev->agp->acquired = 1; 107 dev->agp->acquired = 1;
103 return 0; 108 return 0;
104} 109}
110EXPORT_SYMBOL(drm_agp_acquire);
105 111
106/** 112/**
107 * Release the AGP device (ioctl). 113 * Acquire the AGP device (ioctl).
108 * 114 *
109 * \param inode device inode. 115 * \param inode device inode.
110 * \param filp file pointer. 116 * \param filp file pointer.
@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
112 * \param arg user argument. 118 * \param arg user argument.
113 * \return zero on success or a negative number on failure. 119 * \return zero on success or a negative number on failure.
114 * 120 *
115 * Verifies the AGP device has been acquired and calls agp_backend_release(). 121 * Verifies the AGP device hasn't been acquired before and calls
122 * \c agp_backend_acquire.
116 */ 123 */
117int drm_agp_release(struct inode *inode, struct file *filp, 124int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
118 unsigned int cmd, unsigned long arg) 125 unsigned int cmd, unsigned long arg)
119{ 126{
120 drm_file_t *priv = filp->private_data; 127 drm_file_t *priv = filp->private_data;
121 drm_device_t *dev = priv->head->dev; 128
129 return drm_agp_acquire( (drm_device_t *) priv->head->dev );
130}
122 131
132/**
133 * Release the AGP device.
134 *
135 * \param dev DRM device that is to release AGP
136 * \return zero on success or a negative number on failure.
137 *
138 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
139 */
140int drm_agp_release(drm_device_t *dev)
141{
123 if (!dev->agp || !dev->agp->acquired) 142 if (!dev->agp || !dev->agp->acquired)
124 return -EINVAL; 143 return -EINVAL;
125 agp_backend_release(dev->agp->bridge); 144 agp_backend_release(dev->agp->bridge);
126 dev->agp->acquired = 0; 145 dev->agp->acquired = 0;
127 return 0; 146 return 0;
128
129} 147}
148EXPORT_SYMBOL(drm_agp_release);
130 149
131/** 150int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
132 * Release the AGP device. 151 unsigned int cmd, unsigned long arg)
133 *
134 * Calls agp_backend_release().
135 */
136void drm_agp_do_release(drm_device_t *dev)
137{ 152{
138 agp_backend_release(dev->agp->bridge); 153 drm_file_t *priv = filp->private_data;
154 drm_device_t *dev = priv->head->dev;
155
156 return drm_agp_release(dev);
139} 157}
140 158
141/** 159/**
142 * Enable the AGP bus. 160 * Enable the AGP bus.
143 * 161 *
144 * \param inode device inode. 162 * \param dev DRM device that has previously acquired AGP.
145 * \param filp file pointer. 163 * \param mode Requested AGP mode.
146 * \param cmd command.
147 * \param arg pointer to a drm_agp_mode structure.
148 * \return zero on success or a negative number on failure. 164 * \return zero on success or a negative number on failure.
149 * 165 *
150 * Verifies the AGP device has been acquired but not enabled, and calls 166 * Verifies the AGP device has been acquired but not enabled, and calls
151 * agp_enable(). 167 * \c agp_enable.
152 */ 168 */
153int drm_agp_enable(struct inode *inode, struct file *filp, 169int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
154 unsigned int cmd, unsigned long arg)
155{ 170{
156 drm_file_t *priv = filp->private_data;
157 drm_device_t *dev = priv->head->dev;
158 drm_agp_mode_t mode;
159
160 if (!dev->agp || !dev->agp->acquired) 171 if (!dev->agp || !dev->agp->acquired)
161 return -EINVAL; 172 return -EINVAL;
162 173
163 if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
164 return -EFAULT;
165
166 dev->agp->mode = mode.mode; 174 dev->agp->mode = mode.mode;
167 agp_enable(dev->agp->bridge, mode.mode); 175 agp_enable(dev->agp->bridge, mode.mode);
168 dev->agp->base = dev->agp->agp_info.aper_base; 176 dev->agp->base = dev->agp->agp_info.aper_base;
169 dev->agp->enabled = 1; 177 dev->agp->enabled = 1;
170 return 0; 178 return 0;
171} 179}
180EXPORT_SYMBOL(drm_agp_enable);
181
182int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
183 unsigned int cmd, unsigned long arg)
184{
185 drm_file_t *priv = filp->private_data;
186 drm_device_t *dev = priv->head->dev;
187 drm_agp_mode_t mode;
188
189
190 if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
191 return -EFAULT;
192
193 return drm_agp_enable(dev, mode);
194}
172 195
173/** 196/**
174 * Allocate AGP memory. 197 * Allocate AGP memory.
@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
206 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 229 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
207 type = (u32) request.type; 230 type = (u32) request.type;
208 231
209 if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { 232 if (!(memory = drm_alloc_agp(dev, pages, type))) {
210 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 233 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
211 return -ENOMEM; 234 return -ENOMEM;
212 } 235 }
@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
403 return NULL; 426 return NULL;
404 } 427 }
405 head->memory = NULL; 428 head->memory = NULL;
406#if LINUX_VERSION_CODE <= 0x020408
407 head->cant_use_aperture = 0;
408 head->page_mask = ~(0xfff);
409#else
410 head->cant_use_aperture = head->agp_info.cant_use_aperture; 429 head->cant_use_aperture = head->agp_info.cant_use_aperture;
411 head->page_mask = head->agp_info.page_mask; 430 head->page_mask = head->agp_info.page_mask;
412#endif
413 431
414 return head; 432 return head;
415} 433}
@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
436 return -EINVAL; 454 return -EINVAL;
437 return agp_bind_memory(handle, start); 455 return agp_bind_memory(handle, start);
438} 456}
457EXPORT_SYMBOL(drm_agp_bind_memory);
439 458
440/** Calls agp_unbind_memory() */ 459/** Calls agp_unbind_memory() */
441int drm_agp_unbind_memory(DRM_AGP_MEM *handle) 460int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 4c6191d231b8..f28e70ae6606 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,37 +36,69 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39/** 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
42 *
43 * \param size size.
44 * \return order.
45 *
46 * \todo Can be made faster.
47 */
48int drm_order( unsigned long size )
49{ 40{
50 int order; 41 return pci_resource_start(dev->pdev, resource);
51 unsigned long tmp; 42}
43EXPORT_SYMBOL(drm_get_resource_start);
52 44
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
54 ; 46{
47 return pci_resource_len(dev->pdev, resource);
48}
49EXPORT_SYMBOL(drm_get_resource_len);
55 50
56 if (size & (size - 1)) 51static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
57 ++order; 52 drm_local_map_t *map)
53{
54 struct list_head *list;
58 55
59 return order; 56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 entry->map->offset == map->offset) {
60 return entry;
61 }
62 }
63
64 return NULL;
60} 65}
61EXPORT_SYMBOL(drm_order);
62 66
63#ifdef CONFIG_COMPAT
64/* 67/*
65 * Used to allocate 32-bit handles for _DRM_SHM regions 68 * Used to allocate 32-bit handles for mappings.
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
68 */ 69 */
69static unsigned int map32_handle = 0x10000000; 70#define START_RANGE 0x10000000
71#define END_RANGE 0x40000000
72
73#ifdef _LP64
74static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
75{
76 static unsigned int map32_handle = START_RANGE;
77 unsigned int hash;
78
79 if (lhandle & 0xffffffff00000000) {
80 hash = map32_handle;
81 map32_handle += PAGE_SIZE;
82 if (map32_handle > END_RANGE)
83 map32_handle = START_RANGE;
84 } else
85 hash = lhandle;
86
87 while (1) {
88 drm_map_list_t *_entry;
89 list_for_each_entry(_entry, &dev->maplist->head,head) {
90 if (_entry->user_token == hash)
91 break;
92 }
93 if (&_entry->head == &dev->maplist->head)
94 return hash;
95
96 hash += PAGE_SIZE;
97 map32_handle += PAGE_SIZE;
98 }
99}
100#else
101# define HandleID(x,dev) (unsigned int)(x)
70#endif 102#endif
71 103
72/** 104/**
@@ -82,25 +114,22 @@ static unsigned int map32_handle = 0x10000000;
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel. 115 * applicable and if supported by the kernel.
84 */ 116 */
85int drm_addmap( struct inode *inode, struct file *filp, 117int drm_addmap_core(drm_device_t * dev, unsigned int offset,
86 unsigned int cmd, unsigned long arg ) 118 unsigned int size, drm_map_type_t type,
119 drm_map_flags_t flags, drm_map_list_t **maplist)
87{ 120{
88 drm_file_t *priv = filp->private_data;
89 drm_device_t *dev = priv->head->dev;
90 drm_map_t *map; 121 drm_map_t *map;
91 drm_map_t __user *argp = (void __user *)arg;
92 drm_map_list_t *list; 122 drm_map_list_t *list;
93 123 drm_dma_handle_t *dmah;
94 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
95 124
96 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); 125 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
97 if ( !map ) 126 if ( !map )
98 return -ENOMEM; 127 return -ENOMEM;
99 128
100 if ( copy_from_user( map, argp, sizeof(*map) ) ) { 129 map->offset = offset;
101 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 130 map->size = size;
102 return -EFAULT; 131 map->flags = flags;
103 } 132 map->type = type;
104 133
105 /* Only allow shared memory to be removable since we only keep enough 134 /* Only allow shared memory to be removable since we only keep enough
106 * book keeping information about shared memory to allow for removal 135 * book keeping information about shared memory to allow for removal
@@ -122,7 +151,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
122 switch ( map->type ) { 151 switch ( map->type ) {
123 case _DRM_REGISTERS: 152 case _DRM_REGISTERS:
124 case _DRM_FRAME_BUFFER: 153 case _DRM_FRAME_BUFFER:
125#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) 154#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
126 if ( map->offset + map->size < map->offset || 155 if ( map->offset + map->size < map->offset ||
127 map->offset < virt_to_phys(high_memory) ) { 156 map->offset < virt_to_phys(high_memory) ) {
128 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 157 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
@@ -132,6 +161,24 @@ int drm_addmap( struct inode *inode, struct file *filp,
132#ifdef __alpha__ 161#ifdef __alpha__
133 map->offset += dev->hose->mem_space->start; 162 map->offset += dev->hose->mem_space->start;
134#endif 163#endif
164 /* Some drivers preinitialize some maps, without the X Server
165 * needing to be aware of it. Therefore, we just return success
166 * when the server tries to create a duplicate map.
167 */
168 list = drm_find_matching_map(dev, map);
169 if (list != NULL) {
170 if (list->map->size != map->size) {
171 DRM_DEBUG("Matching maps of type %d with "
172 "mismatched sizes, (%ld vs %ld)\n",
173 map->type, map->size, list->map->size);
174 list->map->size = map->size;
175 }
176
177 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
178 *maplist = list;
179 return 0;
180 }
181
135 if (drm_core_has_MTRR(dev)) { 182 if (drm_core_has_MTRR(dev)) {
136 if ( map->type == _DRM_FRAME_BUFFER || 183 if ( map->type == _DRM_FRAME_BUFFER ||
137 (map->flags & _DRM_WRITE_COMBINING) ) { 184 (map->flags & _DRM_WRITE_COMBINING) ) {
@@ -178,9 +225,22 @@ int drm_addmap( struct inode *inode, struct file *filp,
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 225 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 return -EINVAL; 226 return -EINVAL;
180 } 227 }
181 map->offset += dev->sg->handle; 228 map->offset += (unsigned long)dev->sg->virtual;
229 break;
230 case _DRM_CONSISTENT:
231 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
232 * As we're limiting the address to 2^32-1 (or less),
233 * casting it down to 32 bits is no problem, but we
234 * need to point to a 64bit variable first. */
235 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
236 if (!dmah) {
237 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
238 return -ENOMEM;
239 }
240 map->handle = dmah->vaddr;
241 map->offset = (unsigned long)dmah->busaddr;
242 kfree(dmah);
182 break; 243 break;
183
184 default: 244 default:
185 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 245 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
186 return -EINVAL; 246 return -EINVAL;
@@ -196,17 +256,57 @@ int drm_addmap( struct inode *inode, struct file *filp,
196 256
197 down(&dev->struct_sem); 257 down(&dev->struct_sem);
198 list_add(&list->head, &dev->maplist->head); 258 list_add(&list->head, &dev->maplist->head);
199#ifdef CONFIG_COMPAT 259 /* Assign a 32-bit handle */
200 /* Assign a 32-bit handle for _DRM_SHM mappings */
201 /* We do it here so that dev->struct_sem protects the increment */ 260 /* We do it here so that dev->struct_sem protects the increment */
202 if (map->type == _DRM_SHM) 261 list->user_token = HandleID(map->type==_DRM_SHM
203 map->offset = map32_handle += PAGE_SIZE; 262 ? (unsigned long)map->handle
204#endif 263 : map->offset, dev);
205 up(&dev->struct_sem); 264 up(&dev->struct_sem);
206 265
207 if ( copy_to_user( argp, map, sizeof(*map) ) ) 266 *maplist = list;
267 return 0;
268}
269
270int drm_addmap(drm_device_t *dev, unsigned int offset,
271 unsigned int size, drm_map_type_t type,
272 drm_map_flags_t flags, drm_local_map_t **map_ptr)
273{
274 drm_map_list_t *list;
275 int rc;
276
277 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
278 if (!rc)
279 *map_ptr = list->map;
280 return rc;
281}
282EXPORT_SYMBOL(drm_addmap);
283
284int drm_addmap_ioctl(struct inode *inode, struct file *filp,
285 unsigned int cmd, unsigned long arg)
286{
287 drm_file_t *priv = filp->private_data;
288 drm_device_t *dev = priv->head->dev;
289 drm_map_t map;
290 drm_map_list_t *maplist;
291 drm_map_t __user *argp = (void __user *)arg;
292 int err;
293
294 if (!(filp->f_mode & 3))
295 return -EACCES; /* Require read/write */
296
297 if (copy_from_user(& map, argp, sizeof(map))) {
208 return -EFAULT; 298 return -EFAULT;
209 if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) 299 }
300
301 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
302 &maplist);
303
304 if (err)
305 return err;
306
307 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
308 return -EFAULT;
309 if (put_user(maplist->user_token, &argp->handle))
210 return -EFAULT; 310 return -EFAULT;
211 return 0; 311 return 0;
212} 312}
@@ -226,81 +326,138 @@ int drm_addmap( struct inode *inode, struct file *filp,
226 * its being used, and free any associate resource (such as MTRR's) if it's not 326 * its being used, and free any associate resource (such as MTRR's) if it's not
227 * being on use. 327 * being on use.
228 * 328 *
229 * \sa addmap(). 329 * \sa drm_addmap
230 */ 330 */
231int drm_rmmap(struct inode *inode, struct file *filp, 331int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
232 unsigned int cmd, unsigned long arg)
233{ 332{
234 drm_file_t *priv = filp->private_data;
235 drm_device_t *dev = priv->head->dev;
236 struct list_head *list; 333 struct list_head *list;
237 drm_map_list_t *r_list = NULL; 334 drm_map_list_t *r_list = NULL;
238 drm_vma_entry_t *pt, *prev; 335 drm_dma_handle_t dmah;
239 drm_map_t *map; 336
337 /* Find the list entry for the map and remove it */
338 list_for_each(list, &dev->maplist->head) {
339 r_list = list_entry(list, drm_map_list_t, head);
340
341 if (r_list->map == map) {
342 list_del(list);
343 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
344 break;
345 }
346 }
347
348 /* List has wrapped around to the head pointer, or it's empty and we
349 * didn't find anything.
350 */
351 if (list == (&dev->maplist->head)) {
352 return -EINVAL;
353 }
354
355 switch (map->type) {
356 case _DRM_REGISTERS:
357 drm_ioremapfree(map->handle, map->size, dev);
358 /* FALLTHROUGH */
359 case _DRM_FRAME_BUFFER:
360 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
361 int retcode;
362 retcode = mtrr_del(map->mtrr, map->offset,
363 map->size);
364 DRM_DEBUG ("mtrr_del=%d\n", retcode);
365 }
366 break;
367 case _DRM_SHM:
368 vfree(map->handle);
369 break;
370 case _DRM_AGP:
371 case _DRM_SCATTER_GATHER:
372 break;
373 case _DRM_CONSISTENT:
374 dmah.vaddr = map->handle;
375 dmah.busaddr = map->offset;
376 dmah.size = map->size;
377 __drm_pci_free(dev, &dmah);
378 break;
379 }
380 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
381
382 return 0;
383}
384EXPORT_SYMBOL(drm_rmmap_locked);
385
386int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
387{
388 int ret;
389
390 down(&dev->struct_sem);
391 ret = drm_rmmap_locked(dev, map);
392 up(&dev->struct_sem);
393
394 return ret;
395}
396EXPORT_SYMBOL(drm_rmmap);
397
398/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
399 * the last close of the device, and this is necessary for cleanup when things
400 * exit uncleanly. Therefore, having userland manually remove mappings seems
401 * like a pointless exercise since they're going away anyway.
402 *
403 * One use case might be after addmap is allowed for normal users for SHM and
404 * gets used by drivers that the server doesn't need to care about. This seems
405 * unlikely.
406 */
407int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
408 unsigned int cmd, unsigned long arg)
409{
410 drm_file_t *priv = filp->private_data;
411 drm_device_t *dev = priv->head->dev;
240 drm_map_t request; 412 drm_map_t request;
241 int found_maps = 0; 413 drm_local_map_t *map = NULL;
414 struct list_head *list;
415 int ret;
242 416
243 if (copy_from_user(&request, (drm_map_t __user *)arg, 417 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
244 sizeof(request))) {
245 return -EFAULT; 418 return -EFAULT;
246 } 419 }
247 420
248 down(&dev->struct_sem); 421 down(&dev->struct_sem);
249 list = &dev->maplist->head;
250 list_for_each(list, &dev->maplist->head) { 422 list_for_each(list, &dev->maplist->head) {
251 r_list = list_entry(list, drm_map_list_t, head); 423 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
252 424
253 if(r_list->map && 425 if (r_list->map &&
254 r_list->map->offset == (unsigned long) request.handle && 426 r_list->user_token == (unsigned long) request.handle &&
255 r_list->map->flags & _DRM_REMOVABLE) break; 427 r_list->map->flags & _DRM_REMOVABLE) {
428 map = r_list->map;
429 break;
430 }
256 } 431 }
257 432
258 /* List has wrapped around to the head pointer, or its empty we didn't 433 /* List has wrapped around to the head pointer, or its empty we didn't
259 * find anything. 434 * find anything.
260 */ 435 */
261 if(list == (&dev->maplist->head)) { 436 if (list == (&dev->maplist->head)) {
262 up(&dev->struct_sem); 437 up(&dev->struct_sem);
263 return -EINVAL; 438 return -EINVAL;
264 } 439 }
265 map = r_list->map;
266 list_del(list);
267 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
268 440
269 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 441 if (!map)
270 if (pt->vma->vm_private_data == map) found_maps++; 442 return -EINVAL;
271 }
272 443
273 if(!found_maps) { 444 /* Register and framebuffer maps are permanent */
274 switch (map->type) { 445 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
275 case _DRM_REGISTERS: 446 up(&dev->struct_sem);
276 case _DRM_FRAME_BUFFER: 447 return 0;
277 if (drm_core_has_MTRR(dev)) {
278 if (map->mtrr >= 0) {
279 int retcode;
280 retcode = mtrr_del(map->mtrr,
281 map->offset,
282 map->size);
283 DRM_DEBUG("mtrr_del = %d\n", retcode);
284 }
285 }
286 drm_ioremapfree(map->handle, map->size, dev);
287 break;
288 case _DRM_SHM:
289 vfree(map->handle);
290 break;
291 case _DRM_AGP:
292 case _DRM_SCATTER_GATHER:
293 break;
294 }
295 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
296 } 448 }
449
450 ret = drm_rmmap_locked(dev, map);
451
297 up(&dev->struct_sem); 452 up(&dev->struct_sem);
298 return 0; 453
454 return ret;
299} 455}
300 456
301/** 457/**
302 * Cleanup after an error on one of the addbufs() functions. 458 * Cleanup after an error on one of the addbufs() functions.
303 * 459 *
460 * \param dev DRM device.
304 * \param entry buffer entry where the error occurred. 461 * \param entry buffer entry where the error occurred.
305 * 462 *
306 * Frees any pages and buffers associated with the given entry. 463 * Frees any pages and buffers associated with the given entry.
@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
344 501
345#if __OS_HAS_AGP 502#if __OS_HAS_AGP
346/** 503/**
347 * Add AGP buffers for DMA transfers (ioctl). 504 * Add AGP buffers for DMA transfers.
348 * 505 *
349 * \param inode device inode. 506 * \param dev drm_device_t to which the buffers are to be added.
350 * \param filp file pointer. 507 * \param request pointer to a drm_buf_desc_t describing the request.
351 * \param cmd command.
352 * \param arg pointer to a drm_buf_desc_t request.
353 * \return zero on success or a negative number on failure. 508 * \return zero on success or a negative number on failure.
354 * 509 *
355 * After some sanity checks creates a drm_buf structure for each buffer and 510 * After some sanity checks creates a drm_buf structure for each buffer and
356 * reallocates the buffer list of the same size order to accommodate the new 511 * reallocates the buffer list of the same size order to accommodate the new
357 * buffers. 512 * buffers.
358 */ 513 */
359static int drm_addbufs_agp( struct inode *inode, struct file *filp, 514int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
360 unsigned int cmd, unsigned long arg )
361{ 515{
362 drm_file_t *priv = filp->private_data;
363 drm_device_t *dev = priv->head->dev;
364 drm_device_dma_t *dma = dev->dma; 516 drm_device_dma_t *dma = dev->dma;
365 drm_buf_desc_t request;
366 drm_buf_entry_t *entry; 517 drm_buf_entry_t *entry;
367 drm_buf_t *buf; 518 drm_buf_t *buf;
368 unsigned long offset; 519 unsigned long offset;
@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
376 int byte_count; 527 int byte_count;
377 int i; 528 int i;
378 drm_buf_t **temp_buflist; 529 drm_buf_t **temp_buflist;
379 drm_buf_desc_t __user *argp = (void __user *)arg;
380 530
381 if ( !dma ) return -EINVAL; 531 if ( !dma ) return -EINVAL;
382 532
383 if ( copy_from_user( &request, argp, 533 count = request->count;
384 sizeof(request) ) ) 534 order = drm_order(request->size);
385 return -EFAULT;
386
387 count = request.count;
388 order = drm_order( request.size );
389 size = 1 << order; 535 size = 1 << order;
390 536
391 alignment = (request.flags & _DRM_PAGE_ALIGN) 537 alignment = (request->flags & _DRM_PAGE_ALIGN)
392 ? PAGE_ALIGN(size) : size; 538 ? PAGE_ALIGN(size) : size;
393 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 539 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
394 total = PAGE_SIZE << page_order; 540 total = PAGE_SIZE << page_order;
395 541
396 byte_count = 0; 542 byte_count = 0;
397 agp_offset = dev->agp->base + request.agp_start; 543 agp_offset = dev->agp->base + request->agp_start;
398 544
399 DRM_DEBUG( "count: %d\n", count ); 545 DRM_DEBUG( "count: %d\n", count );
400 DRM_DEBUG( "order: %d\n", order ); 546 DRM_DEBUG( "order: %d\n", order );
@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
508 654
509 up( &dev->struct_sem ); 655 up( &dev->struct_sem );
510 656
511 request.count = entry->buf_count; 657 request->count = entry->buf_count;
512 request.size = size; 658 request->size = size;
513
514 if ( copy_to_user( argp, &request, sizeof(request) ) )
515 return -EFAULT;
516 659
517 dma->flags = _DRM_DMA_USE_AGP; 660 dma->flags = _DRM_DMA_USE_AGP;
518 661
519 atomic_dec( &dev->buf_alloc ); 662 atomic_dec( &dev->buf_alloc );
520 return 0; 663 return 0;
521} 664}
665EXPORT_SYMBOL(drm_addbufs_agp);
522#endif /* __OS_HAS_AGP */ 666#endif /* __OS_HAS_AGP */
523 667
524static int drm_addbufs_pci( struct inode *inode, struct file *filp, 668int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
525 unsigned int cmd, unsigned long arg )
526{ 669{
527 drm_file_t *priv = filp->private_data;
528 drm_device_t *dev = priv->head->dev;
529 drm_device_dma_t *dma = dev->dma; 670 drm_device_dma_t *dma = dev->dma;
530 drm_buf_desc_t request;
531 int count; 671 int count;
532 int order; 672 int order;
533 int size; 673 int size;
@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
543 int page_count; 683 int page_count;
544 unsigned long *temp_pagelist; 684 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist; 685 drm_buf_t **temp_buflist;
546 drm_buf_desc_t __user *argp = (void __user *)arg;
547 686
548 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; 687 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
549 if ( !dma ) return -EINVAL; 688 if ( !dma ) return -EINVAL;
550 689
551 if ( copy_from_user( &request, argp, sizeof(request) ) ) 690 count = request->count;
552 return -EFAULT; 691 order = drm_order(request->size);
553
554 count = request.count;
555 order = drm_order( request.size );
556 size = 1 << order; 692 size = 1 << order;
557 693
558 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", 694 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559 request.count, request.size, size, 695 request->count, request->size, size,
560 order, dev->queue_count ); 696 order, dev->queue_count );
561 697
562 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 698 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 699 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
564 700
565 alignment = (request.flags & _DRM_PAGE_ALIGN) 701 alignment = (request->flags & _DRM_PAGE_ALIGN)
566 ? PAGE_ALIGN(size) : size; 702 ? PAGE_ALIGN(size) : size;
567 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 703 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568 total = PAGE_SIZE << page_order; 704 total = PAGE_SIZE << page_order;
@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
740 876
741 up( &dev->struct_sem ); 877 up( &dev->struct_sem );
742 878
743 request.count = entry->buf_count; 879 request->count = entry->buf_count;
744 request.size = size; 880 request->size = size;
745
746 if ( copy_to_user( argp, &request, sizeof(request) ) )
747 return -EFAULT;
748 881
749 atomic_dec( &dev->buf_alloc ); 882 atomic_dec( &dev->buf_alloc );
750 return 0; 883 return 0;
751 884
752} 885}
886EXPORT_SYMBOL(drm_addbufs_pci);
753 887
754static int drm_addbufs_sg( struct inode *inode, struct file *filp, 888static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
755 unsigned int cmd, unsigned long arg )
756{ 889{
757 drm_file_t *priv = filp->private_data;
758 drm_device_t *dev = priv->head->dev;
759 drm_device_dma_t *dma = dev->dma; 890 drm_device_dma_t *dma = dev->dma;
760 drm_buf_desc_t __user *argp = (void __user *)arg;
761 drm_buf_desc_t request;
762 drm_buf_entry_t *entry; 891 drm_buf_entry_t *entry;
763 drm_buf_t *buf; 892 drm_buf_t *buf;
764 unsigned long offset; 893 unsigned long offset;
@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
777 906
778 if ( !dma ) return -EINVAL; 907 if ( !dma ) return -EINVAL;
779 908
780 if ( copy_from_user( &request, argp, sizeof(request) ) ) 909 count = request->count;
781 return -EFAULT; 910 order = drm_order(request->size);
782
783 count = request.count;
784 order = drm_order( request.size );
785 size = 1 << order; 911 size = 1 << order;
786 912
787 alignment = (request.flags & _DRM_PAGE_ALIGN) 913 alignment = (request->flags & _DRM_PAGE_ALIGN)
788 ? PAGE_ALIGN(size) : size; 914 ? PAGE_ALIGN(size) : size;
789 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
790 total = PAGE_SIZE << page_order; 916 total = PAGE_SIZE << page_order;
791 917
792 byte_count = 0; 918 byte_count = 0;
793 agp_offset = request.agp_start; 919 agp_offset = request->agp_start;
794 920
795 DRM_DEBUG( "count: %d\n", count ); 921 DRM_DEBUG( "count: %d\n", count );
796 DRM_DEBUG( "order: %d\n", order ); 922 DRM_DEBUG( "order: %d\n", order );
@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
848 974
849 buf->offset = (dma->byte_count + offset); 975 buf->offset = (dma->byte_count + offset);
850 buf->bus_address = agp_offset + offset; 976 buf->bus_address = agp_offset + offset;
851 buf->address = (void *)(agp_offset + offset + dev->sg->handle); 977 buf->address = (void *)(agp_offset + offset
978 + (unsigned long)dev->sg->virtual);
852 buf->next = NULL; 979 buf->next = NULL;
853 buf->waiting = 0; 980 buf->waiting = 0;
854 buf->pending = 0; 981 buf->pending = 0;
@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
905 1032
906 up( &dev->struct_sem ); 1033 up( &dev->struct_sem );
907 1034
908 request.count = entry->buf_count; 1035 request->count = entry->buf_count;
909 request.size = size; 1036 request->size = size;
910
911 if ( copy_to_user( argp, &request, sizeof(request) ) )
912 return -EFAULT;
913 1037
914 dma->flags = _DRM_DMA_USE_SG; 1038 dma->flags = _DRM_DMA_USE_SG;
915 1039
@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
917 return 0; 1041 return 0;
918} 1042}
919 1043
1044static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1045{
1046 drm_device_dma_t *dma = dev->dma;
1047 drm_buf_entry_t *entry;
1048 drm_buf_t *buf;
1049 unsigned long offset;
1050 unsigned long agp_offset;
1051 int count;
1052 int order;
1053 int size;
1054 int alignment;
1055 int page_order;
1056 int total;
1057 int byte_count;
1058 int i;
1059 drm_buf_t **temp_buflist;
1060
1061 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1062 return -EINVAL;
1063
1064 if (!dma)
1065 return -EINVAL;
1066
1067 count = request->count;
1068 order = drm_order(request->size);
1069 size = 1 << order;
1070
1071 alignment = (request->flags & _DRM_PAGE_ALIGN)
1072 ? PAGE_ALIGN(size) : size;
1073 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1074 total = PAGE_SIZE << page_order;
1075
1076 byte_count = 0;
1077 agp_offset = request->agp_start;
1078
1079 DRM_DEBUG("count: %d\n", count);
1080 DRM_DEBUG("order: %d\n", order);
1081 DRM_DEBUG("size: %d\n", size);
1082 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1083 DRM_DEBUG("alignment: %d\n", alignment);
1084 DRM_DEBUG("page_order: %d\n", page_order);
1085 DRM_DEBUG("total: %d\n", total);
1086
1087 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1088 return -EINVAL;
1089 if (dev->queue_count)
1090 return -EBUSY; /* Not while in use */
1091
1092 spin_lock(&dev->count_lock);
1093 if (dev->buf_use) {
1094 spin_unlock(&dev->count_lock);
1095 return -EBUSY;
1096 }
1097 atomic_inc(&dev->buf_alloc);
1098 spin_unlock(&dev->count_lock);
1099
1100 down(&dev->struct_sem);
1101 entry = &dma->bufs[order];
1102 if (entry->buf_count) {
1103 up(&dev->struct_sem);
1104 atomic_dec(&dev->buf_alloc);
1105 return -ENOMEM; /* May only call once for each order */
1106 }
1107
1108 if (count < 0 || count > 4096) {
1109 up(&dev->struct_sem);
1110 atomic_dec(&dev->buf_alloc);
1111 return -EINVAL;
1112 }
1113
1114 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1115 DRM_MEM_BUFS);
1116 if (!entry->buflist) {
1117 up(&dev->struct_sem);
1118 atomic_dec(&dev->buf_alloc);
1119 return -ENOMEM;
1120 }
1121 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1122
1123 entry->buf_size = size;
1124 entry->page_order = page_order;
1125
1126 offset = 0;
1127
1128 while (entry->buf_count < count) {
1129 buf = &entry->buflist[entry->buf_count];
1130 buf->idx = dma->buf_count + entry->buf_count;
1131 buf->total = alignment;
1132 buf->order = order;
1133 buf->used = 0;
1134
1135 buf->offset = (dma->byte_count + offset);
1136 buf->bus_address = agp_offset + offset;
1137 buf->address = (void *)(agp_offset + offset);
1138 buf->next = NULL;
1139 buf->waiting = 0;
1140 buf->pending = 0;
1141 init_waitqueue_head(&buf->dma_wait);
1142 buf->filp = NULL;
1143
1144 buf->dev_priv_size = dev->driver->dev_priv_size;
1145 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1146 if (!buf->dev_private) {
1147 /* Set count correctly so we free the proper amount. */
1148 entry->buf_count = count;
1149 drm_cleanup_buf_error(dev, entry);
1150 up(&dev->struct_sem);
1151 atomic_dec(&dev->buf_alloc);
1152 return -ENOMEM;
1153 }
1154 memset(buf->dev_private, 0, buf->dev_priv_size);
1155
1156 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1157
1158 offset += alignment;
1159 entry->buf_count++;
1160 byte_count += PAGE_SIZE << page_order;
1161 }
1162
1163 DRM_DEBUG("byte_count: %d\n", byte_count);
1164
1165 temp_buflist = drm_realloc(dma->buflist,
1166 dma->buf_count * sizeof(*dma->buflist),
1167 (dma->buf_count + entry->buf_count)
1168 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1169 if (!temp_buflist) {
1170 /* Free the entry because it isn't valid */
1171 drm_cleanup_buf_error(dev, entry);
1172 up(&dev->struct_sem);
1173 atomic_dec(&dev->buf_alloc);
1174 return -ENOMEM;
1175 }
1176 dma->buflist = temp_buflist;
1177
1178 for (i = 0; i < entry->buf_count; i++) {
1179 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1180 }
1181
1182 dma->buf_count += entry->buf_count;
1183 dma->byte_count += byte_count;
1184
1185 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1186 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1187
1188 up(&dev->struct_sem);
1189
1190 request->count = entry->buf_count;
1191 request->size = size;
1192
1193 dma->flags = _DRM_DMA_USE_FB;
1194
1195 atomic_dec(&dev->buf_alloc);
1196 return 0;
1197}
1198
920/** 1199/**
921 * Add buffers for DMA transfers (ioctl). 1200 * Add buffers for DMA transfers (ioctl).
922 * 1201 *
@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp,
937 drm_buf_desc_t request; 1216 drm_buf_desc_t request;
938 drm_file_t *priv = filp->private_data; 1217 drm_file_t *priv = filp->private_data;
939 drm_device_t *dev = priv->head->dev; 1218 drm_device_t *dev = priv->head->dev;
1219 int ret;
940 1220
941 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1221 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
942 return -EINVAL; 1222 return -EINVAL;
@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp,
947 1227
948#if __OS_HAS_AGP 1228#if __OS_HAS_AGP
949 if ( request.flags & _DRM_AGP_BUFFER ) 1229 if ( request.flags & _DRM_AGP_BUFFER )
950 return drm_addbufs_agp( inode, filp, cmd, arg ); 1230 ret=drm_addbufs_agp(dev, &request);
951 else 1231 else
952#endif 1232#endif
953 if ( request.flags & _DRM_SG_BUFFER ) 1233 if ( request.flags & _DRM_SG_BUFFER )
954 return drm_addbufs_sg( inode, filp, cmd, arg ); 1234 ret=drm_addbufs_sg(dev, &request);
1235 else if ( request.flags & _DRM_FB_BUFFER)
1236 ret=drm_addbufs_fb(dev, &request);
955 else 1237 else
956 return drm_addbufs_pci( inode, filp, cmd, arg ); 1238 ret=drm_addbufs_pci(dev, &request);
1239
1240 if (ret==0) {
1241 if (copy_to_user((void __user *)arg, &request,
1242 sizeof(request))) {
1243 ret = -EFAULT;
1244 }
1245 }
1246 return ret;
957} 1247}
958 1248
959 1249
@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1196 return -EFAULT; 1486 return -EFAULT;
1197 1487
1198 if ( request.count >= dma->buf_count ) { 1488 if ( request.count >= dma->buf_count ) {
1199 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || 1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1200 (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { 1490 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) {
1201 drm_map_t *map = dev->agp_buffer_map; 1494 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token;
1202 1496
1203 if ( !map ) { 1497 if ( !map ) {
1204 retcode = -EINVAL; 1498 retcode = -EINVAL;
1205 goto done; 1499 goto done;
1206 } 1500 }
1207 1501
1208#if LINUX_VERSION_CODE <= 0x020402
1209 down( &current->mm->mmap_sem );
1210#else
1211 down_write( &current->mm->mmap_sem ); 1502 down_write( &current->mm->mmap_sem );
1212#endif
1213 virtual = do_mmap( filp, 0, map->size, 1503 virtual = do_mmap( filp, 0, map->size,
1214 PROT_READ | PROT_WRITE, 1504 PROT_READ | PROT_WRITE,
1215 MAP_SHARED, 1505 MAP_SHARED,
1216 (unsigned long)map->offset ); 1506 token );
1217#if LINUX_VERSION_CODE <= 0x020402
1218 up( &current->mm->mmap_sem );
1219#else
1220 up_write( &current->mm->mmap_sem ); 1507 up_write( &current->mm->mmap_sem );
1221#endif
1222 } else { 1508 } else {
1223#if LINUX_VERSION_CODE <= 0x020402
1224 down( &current->mm->mmap_sem );
1225#else
1226 down_write( &current->mm->mmap_sem ); 1509 down_write( &current->mm->mmap_sem );
1227#endif
1228 virtual = do_mmap( filp, 0, dma->byte_count, 1510 virtual = do_mmap( filp, 0, dma->byte_count,
1229 PROT_READ | PROT_WRITE, 1511 PROT_READ | PROT_WRITE,
1230 MAP_SHARED, 0 ); 1512 MAP_SHARED, 0 );
1231#if LINUX_VERSION_CODE <= 0x020402
1232 up( &current->mm->mmap_sem );
1233#else
1234 up_write( &current->mm->mmap_sem ); 1513 up_write( &current->mm->mmap_sem );
1235#endif
1236 } 1514 }
1237 if ( virtual > -1024UL ) { 1515 if ( virtual > -1024UL ) {
1238 /* Real error */ 1516 /* Real error */
@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1279 return retcode; 1557 return retcode;
1280} 1558}
1281 1559
1560/**
1561 * Compute size order. Returns the exponent of the smaller power of two which
1562 * is greater or equal to given number.
1563 *
1564 * \param size size.
1565 * \return order.
1566 *
1567 * \todo Can be made faster.
1568 */
1569int drm_order( unsigned long size )
1570{
1571 int order;
1572 unsigned long tmp;
1573
1574 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1575 ;
1576
1577 if (size & (size - 1))
1578 ++order;
1579
1580 return order;
1581}
1582EXPORT_SYMBOL(drm_order);
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index a7cfabd1ca2e..502892794c16 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
212 drm_ctx_priv_map_t __user *argp = (void __user *)arg; 212 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
213 drm_ctx_priv_map_t request; 213 drm_ctx_priv_map_t request;
214 drm_map_t *map; 214 drm_map_t *map;
215 drm_map_list_t *_entry;
215 216
216 if (copy_from_user(&request, argp, sizeof(request))) 217 if (copy_from_user(&request, argp, sizeof(request)))
217 return -EFAULT; 218 return -EFAULT;
@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
225 map = dev->context_sareas[request.ctx_id]; 226 map = dev->context_sareas[request.ctx_id];
226 up(&dev->struct_sem); 227 up(&dev->struct_sem);
227 228
228 request.handle = (void *) map->offset; 229 request.handle = 0;
230 list_for_each_entry(_entry, &dev->maplist->head,head) {
231 if (_entry->map == map) {
232 request.handle = (void *)(unsigned long)_entry->user_token;
233 break;
234 }
235 }
236 if (request.handle == 0)
237 return -EINVAL;
238
239
229 if (copy_to_user(argp, &request, sizeof(request))) 240 if (copy_to_user(argp, &request, sizeof(request)))
230 return -EFAULT; 241 return -EFAULT;
231 return 0; 242 return 0;
@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
262 list_for_each(list, &dev->maplist->head) { 273 list_for_each(list, &dev->maplist->head) {
263 r_list = list_entry(list, drm_map_list_t, head); 274 r_list = list_entry(list, drm_map_list_t, head);
264 if (r_list->map 275 if (r_list->map
265 && r_list->map->offset == (unsigned long) request.handle) 276 && r_list->user_token == (unsigned long) request.handle)
266 goto found; 277 goto found;
267 } 278 }
268bad: 279bad:
@@ -297,7 +308,7 @@ found:
297 * 308 *
298 * Attempt to set drm_device::context_flag. 309 * Attempt to set drm_device::context_flag.
299 */ 310 */
300int drm_context_switch( drm_device_t *dev, int old, int new ) 311static int drm_context_switch( drm_device_t *dev, int old, int new )
301{ 312{
302 if ( test_and_set_bit( 0, &dev->context_flag ) ) { 313 if ( test_and_set_bit( 0, &dev->context_flag ) ) {
303 DRM_ERROR( "Reentering -- FIXME\n" ); 314 DRM_ERROR( "Reentering -- FIXME\n" );
@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
369 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { 380 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
370 ctx.handle = i; 381 ctx.handle = i;
371 if ( copy_to_user( &res.contexts[i], 382 if ( copy_to_user( &res.contexts[i],
372 &i, sizeof(i) ) ) 383 &ctx, sizeof(ctx) ) )
373 return -EFAULT; 384 return -EFAULT;
374 } 385 }
375 } 386 }
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 3333c250c4d9..6ba48f346fcf 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, 70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, 71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
72 72
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, 73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, 74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 },
75 75
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, 76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, 77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = {
102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, 102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
103 103
104#if __OS_HAS_AGP 104#if __OS_HAS_AGP
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, 105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 },
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, 106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 },
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, 107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 },
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, 108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 },
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, 109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, 110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, 111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = {
127 * 127 *
128 * Frees every resource in \p dev. 128 * Frees every resource in \p dev.
129 * 129 *
130 * \sa drm_device and setup(). 130 * \sa drm_device
131 */ 131 */
132int drm_takedown( drm_device_t *dev ) 132int drm_takedown( drm_device_t *dev )
133{ 133{
134 drm_magic_entry_t *pt, *next; 134 drm_magic_entry_t *pt, *next;
135 drm_map_t *map;
136 drm_map_list_t *r_list; 135 drm_map_list_t *r_list;
137 struct list_head *list, *list_next;
138 drm_vma_entry_t *vma, *vma_next; 136 drm_vma_entry_t *vma, *vma_next;
139 int i; 137 int i;
140 138
@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev )
142 140
143 if (dev->driver->pretakedown) 141 if (dev->driver->pretakedown)
144 dev->driver->pretakedown(dev); 142 dev->driver->pretakedown(dev);
143 DRM_DEBUG("driver pretakedown completed\n");
145 144
146 if (dev->unique) { 145 if (dev->unique) {
147 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev )
178 } 177 }
179 dev->agp->memory = NULL; 178 dev->agp->memory = NULL;
180 179
181 if ( dev->agp->acquired ) drm_agp_do_release(dev); 180 if (dev->agp->acquired)
181 drm_agp_release(dev);
182 182
183 dev->agp->acquired = 0; 183 dev->agp->acquired = 0;
184 dev->agp->enabled = 0; 184 dev->agp->enabled = 0;
185 } 185 }
186 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
187 drm_sg_cleanup(dev->sg);
188 dev->sg = NULL;
189 }
186 190
187 /* Clear vma list (only built for debugging) */ 191 /* Clear vma list (only built for debugging) */
188 if ( dev->vmalist ) { 192 if ( dev->vmalist ) {
@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev )
194 } 198 }
195 199
196 if( dev->maplist ) { 200 if( dev->maplist ) {
197 list_for_each_safe( list, list_next, &dev->maplist->head ) { 201 while (!list_empty(&dev->maplist->head)) {
198 r_list = (drm_map_list_t *)list; 202 struct list_head *list = dev->maplist->head.next;
199 203 r_list = list_entry(list, drm_map_list_t, head);
200 if ( ( map = r_list->map ) ) { 204 drm_rmmap_locked(dev, r_list->map);
201 switch ( map->type ) { 205 }
202 case _DRM_REGISTERS:
203 case _DRM_FRAME_BUFFER:
204 if (drm_core_has_MTRR(dev)) {
205 if ( map->mtrr >= 0 ) {
206 int retcode;
207 retcode = mtrr_del( map->mtrr,
208 map->offset,
209 map->size );
210 DRM_DEBUG( "mtrr_del=%d\n", retcode );
211 }
212 }
213 drm_ioremapfree( map->handle, map->size, dev );
214 break;
215 case _DRM_SHM:
216 vfree(map->handle);
217 break;
218
219 case _DRM_AGP:
220 /* Do nothing here, because this is all
221 * handled in the AGP/GART driver.
222 */
223 break;
224 case _DRM_SCATTER_GATHER:
225 /* Handle it */
226 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
227 drm_sg_cleanup(dev->sg);
228 dev->sg = NULL;
229 }
230 break;
231 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 }
234 list_del( list );
235 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
236 }
237 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
238 dev->maplist = NULL;
239 } 206 }
240 207
241 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { 208 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev )
264 } 231 }
265 up( &dev->struct_sem ); 232 up( &dev->struct_sem );
266 233
234 DRM_DEBUG("takedown completed\n");
267 return 0; 235 return 0;
268} 236}
269 237
@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init);
312 * 280 *
313 * Cleans up all DRM device, calling takedown(). 281 * Cleans up all DRM device, calling takedown().
314 * 282 *
315 * \sa drm_init(). 283 * \sa drm_init
316 */ 284 */
317static void drm_cleanup( drm_device_t *dev ) 285static void drm_cleanup( drm_device_t *dev )
318{ 286{
@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev )
325 293
326 drm_takedown( dev ); 294 drm_takedown( dev );
327 295
296 if (dev->maplist) {
297 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
298 dev->maplist = NULL;
299 }
300
328 drm_ctxbitmap_cleanup( dev ); 301 drm_ctxbitmap_cleanup( dev );
329 302
330 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 303 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 10e64fde8d78..a1f4e9cd64ed 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev )
71 dev->magiclist[i].tail = NULL; 71 dev->magiclist[i].tail = NULL;
72 } 72 }
73 73
74 dev->maplist = drm_alloc(sizeof(*dev->maplist),
75 DRM_MEM_MAPS);
76 if(dev->maplist == NULL) return -ENOMEM;
77 memset(dev->maplist, 0, sizeof(*dev->maplist));
78 INIT_LIST_HEAD(&dev->maplist->head);
79
80 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), 74 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
81 DRM_MEM_CTXLIST); 75 DRM_MEM_CTXLIST);
82 if(dev->ctxlist == NULL) return -ENOMEM; 76 if(dev->ctxlist == NULL) return -ENOMEM;
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 39afda0ccabe..d2ed3ba5aca9 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp,
208 map.size = r_list->map->size; 208 map.size = r_list->map->size;
209 map.type = r_list->map->type; 209 map.type = r_list->map->type;
210 map.flags = r_list->map->flags; 210 map.flags = r_list->map->flags;
211 map.handle = r_list->map->handle; 211 map.handle = (void *)(unsigned long) r_list->user_token;
212 map.mtrr = r_list->map->mtrr; 212 map.mtrr = r_list->map->mtrr;
213 up(&dev->struct_sem); 213 up(&dev->struct_sem);
214 214
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index ace3d42f4407..ff483fb418aa 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area)
142 142
143#if __OS_HAS_AGP 143#if __OS_HAS_AGP
144/** Wrapper around agp_allocate_memory() */ 144/** Wrapper around agp_allocate_memory() */
145DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) 145DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
146{ 146{
147 return drm_agp_allocate_memory(bridge, pages, type); 147 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
148} 148}
149EXPORT_SYMBOL(drm_alloc_agp);
149 150
150/** Wrapper around agp_free_memory() */ 151/** Wrapper around agp_free_memory() */
151int drm_free_agp(DRM_AGP_MEM *handle, int pages) 152int drm_free_agp(DRM_AGP_MEM *handle, int pages)
152{ 153{
153 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 154 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
154} 155}
156EXPORT_SYMBOL(drm_free_agp);
155 157
156/** Wrapper around agp_bind_memory() */ 158/** Wrapper around agp_bind_memory() */
157int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) 159int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
158{ 160{
159 return drm_agp_bind_memory(handle, start); 161 return drm_agp_bind_memory(handle, start);
160} 162}
163EXPORT_SYMBOL(drm_bind_agp);
161 164
162/** Wrapper around agp_unbind_memory() */ 165/** Wrapper around agp_unbind_memory() */
163int drm_unbind_agp(DRM_AGP_MEM *handle) 166int drm_unbind_agp(DRM_AGP_MEM *handle)
164{ 167{
165 return drm_agp_unbind_memory(handle); 168 return drm_agp_unbind_memory(handle);
166} 169}
170EXPORT_SYMBOL(drm_unbind_agp);
167#endif /* agp */ 171#endif /* agp */
168#endif /* debug_memory */ 172#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 192e8762571c..09ed712c1a7f 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -46,11 +46,11 @@
46/** 46/**
47 * \brief Allocate a PCI consistent memory block, for DMA. 47 * \brief Allocate a PCI consistent memory block, for DMA.
48 */ 48 */
49void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, 49drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
50 dma_addr_t maxaddr, dma_addr_t * busaddr) 50 dma_addr_t maxaddr)
51{ 51{
52 void *address; 52 drm_dma_handle_t *dmah;
53#if DRM_DEBUG_MEMORY 53#ifdef DRM_DEBUG_MEMORY
54 int area = DRM_MEM_DMA; 54 int area = DRM_MEM_DMA;
55 55
56 spin_lock(&drm_mem_lock); 56 spin_lock(&drm_mem_lock);
@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
74 return NULL; 74 return NULL;
75 } 75 }
76 76
77 address = pci_alloc_consistent(dev->pdev, size, busaddr); 77 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
78 if (!dmah)
79 return NULL;
80
81 dmah->size = size;
82 dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
78 83
79#if DRM_DEBUG_MEMORY 84#ifdef DRM_DEBUG_MEMORY
80 if (address == NULL) { 85 if (dmah->vaddr == NULL) {
81 spin_lock(&drm_mem_lock); 86 spin_lock(&drm_mem_lock);
82 ++drm_mem_stats[area].fail_count; 87 ++drm_mem_stats[area].fail_count;
83 spin_unlock(&drm_mem_lock); 88 spin_unlock(&drm_mem_lock);
89 kfree(dmah);
84 return NULL; 90 return NULL;
85 } 91 }
86 92
@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
90 drm_ram_used += size; 96 drm_ram_used += size;
91 spin_unlock(&drm_mem_lock); 97 spin_unlock(&drm_mem_lock);
92#else 98#else
93 if (address == NULL) 99 if (dmah->vaddr == NULL) {
100 kfree(dmah);
94 return NULL; 101 return NULL;
102 }
95#endif 103#endif
96 104
97 memset(address, 0, size); 105 memset(dmah->vaddr, 0, size);
98 106
99 return address; 107 return dmah;
100} 108}
101EXPORT_SYMBOL(drm_pci_alloc); 109EXPORT_SYMBOL(drm_pci_alloc);
102 110
103/** 111/**
104 * \brief Free a PCI consistent memory block. 112 * \brief Free a PCI consistent memory block with freeing its descriptor.
113 *
114 * This function is for internal use in the Linux-specific DRM core code.
105 */ 115 */
106void 116void
107drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) 117__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
108{ 118{
109#if DRM_DEBUG_MEMORY 119#ifdef DRM_DEBUG_MEMORY
110 int area = DRM_MEM_DMA; 120 int area = DRM_MEM_DMA;
111 int alloc_count; 121 int alloc_count;
112 int free_count; 122 int free_count;
113#endif 123#endif
114 124
115 if (!vaddr) { 125 if (!dmah->vaddr) {
116#if DRM_DEBUG_MEMORY 126#ifdef DRM_DEBUG_MEMORY
117 DRM_MEM_ERROR(area, "Attempt to free address 0\n"); 127 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
118#endif 128#endif
119 } else { 129 } else {
120 pci_free_consistent(dev->pdev, size, vaddr, busaddr); 130 pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
131 dmah->busaddr);
121 } 132 }
122 133
123#if DRM_DEBUG_MEMORY 134#ifdef DRM_DEBUG_MEMORY
124 spin_lock(&drm_mem_lock); 135 spin_lock(&drm_mem_lock);
125 free_count = ++drm_mem_stats[area].free_count; 136 free_count = ++drm_mem_stats[area].free_count;
126 alloc_count = drm_mem_stats[area].succeed_count; 137 alloc_count = drm_mem_stats[area].succeed_count;
@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
135#endif 146#endif
136 147
137} 148}
149
150/**
151 * \brief Free a PCI consistent memory block
152 */
153void
154drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
155{
156 __drm_pci_free(dev, dmah);
157 kfree(dmah);
158}
138EXPORT_SYMBOL(drm_pci_free); 159EXPORT_SYMBOL(drm_pci_free);
139 160
140/*@}*/ 161/*@}*/
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 70ca4fa55c9d..58b1747cd440 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -25,6 +25,8 @@
25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
28 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
29 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
28 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 30 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
29 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 31 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
30 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ 32 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
@@ -33,7 +35,17 @@
33 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 35 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
34 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 36 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
35 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 37 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
38 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
39 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
40 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
41 {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
42 {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
43 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
44 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
45 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
36 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
37 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 49 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
38 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 50 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
39 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -56,6 +68,7 @@
56 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 68 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
57 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 69 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
58 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 70 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
71 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
59 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 72 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
60 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 73 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
61 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -116,9 +129,10 @@
116 {0, 0, 0} 129 {0, 0, 0}
117 130
118#define mga_PCI_IDS \ 131#define mga_PCI_IDS \
119 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 132 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
120 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 133 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
121 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 134 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
135 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
122 {0, 0, 0} 136 {0, 0, 0}
123 137
124#define mach64_PCI_IDS \ 138#define mach64_PCI_IDS \
@@ -162,9 +176,10 @@
162 176
163#define viadrv_PCI_IDS \ 177#define viadrv_PCI_IDS \
164 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 178 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
179 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
165 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 180 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
166 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 181 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
167 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 182 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
168 {0, 0, 0} 183 {0, 0, 0}
169 184
170#define i810_PCI_IDS \ 185#define i810_PCI_IDS \
@@ -181,33 +196,30 @@
181 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 196 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
182 {0, 0, 0} 197 {0, 0, 0}
183 198
184#define gamma_PCI_IDS \
185 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
186 {0, 0, 0}
187
188#define savage_PCI_IDS \ 199#define savage_PCI_IDS \
189 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 200 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
190 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
191 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 202 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
192 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 203 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
193 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 204 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
194 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 205 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
195 {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 206 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
196 {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 207 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
197 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 208 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
198 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 209 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
199 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 210 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
200 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 211 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
201 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 212 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
202 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 213 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
203 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 214 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
204 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 215 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
205 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 216 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
206 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 217 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
207 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 218 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
208 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 219 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
209 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 220 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
210 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 221 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
222 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
211 {0, 0, 0} 223 {0, 0, 0}
212 224
213#define ffb_PCI_IDS \ 225#define ffb_PCI_IDS \
@@ -223,10 +235,3 @@
223 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
224 {0, 0, 0} 236 {0, 0, 0}
225 237
226#define viadrv_PCI_IDS \
227 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0, 0, 0}
232
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 4774087d2e9e..32d2bb99462c 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
210 210
211 /* Hardcoded from _DRM_FRAME_BUFFER, 211 /* Hardcoded from _DRM_FRAME_BUFFER,
212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and 212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
213 _DRM_SCATTER_GATHER. */ 213 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; 214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
215 const char *type; 215 const char *type;
216 int i; 216 int i;
217 217
@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { 229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
230 r_list = list_entry(list, drm_map_list_t, head); 230 r_list = list_entry(list, drm_map_list_t, head);
231 map = r_list->map; 231 map = r_list->map;
232 if(!map) continue; 232 if(!map)
233 if (map->type < 0 || map->type > 4) type = "??"; 233 continue;
234 else type = types[map->type]; 234 if (map->type < 0 || map->type > 5)
235 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", 235 type = "??";
236 else
237 type = types[map->type];
238 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
236 i, 239 i,
237 map->offset, 240 map->offset,
238 map->size, 241 map->size,
239 type, 242 type,
240 map->flags, 243 map->flags,
241 (unsigned long)map->handle); 244 r_list->user_token);
242 if (map->mtrr < 0) { 245 if (map->mtrr < 0) {
243 DRM_PROC_PRINT("none\n"); 246 DRM_PROC_PRINT("none\n");
244 } else { 247 } else {
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 54fddb6ea2d1..ed267d49bc6a 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
61 DRM_MEM_SGLISTS ); 61 DRM_MEM_SGLISTS );
62} 62}
63 63
64#ifdef _LP64
65# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
66#else
67# define ScatterHandle(x) (unsigned int)(x)
68#endif
69
64int drm_sg_alloc( struct inode *inode, struct file *filp, 70int drm_sg_alloc( struct inode *inode, struct file *filp,
65 unsigned int cmd, unsigned long arg ) 71 unsigned int cmd, unsigned long arg )
66{ 72{
@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
133 */ 139 */
134 memset( entry->virtual, 0, pages << PAGE_SHIFT ); 140 memset( entry->virtual, 0, pages << PAGE_SHIFT );
135 141
136 entry->handle = (unsigned long)entry->virtual; 142 entry->handle = ScatterHandle((unsigned long)entry->virtual);
137 143
138 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); 144 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
139 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); 145 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
140 146
141 for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { 147 for (i = (unsigned long)entry->virtual, j = 0; j < pages;
148 i += PAGE_SIZE, j++) {
142 entry->pagelist[j] = vmalloc_to_page((void *)i); 149 entry->pagelist[j] = vmalloc_to_page((void *)i);
143 if (!entry->pagelist[j]) 150 if (!entry->pagelist[j])
144 goto failed; 151 goto failed;
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 48829a1a086a..95a976c96eb8 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
75 dev->pci_func = PCI_FUNC(pdev->devfn); 75 dev->pci_func = PCI_FUNC(pdev->devfn);
76 dev->irq = pdev->irq; 76 dev->irq = pdev->irq;
77 77
78 dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
79 if (dev->maplist == NULL)
80 return -ENOMEM;
81 INIT_LIST_HEAD(&dev->maplist->head);
82
78 /* the DRM has 6 basic counters */ 83 /* the DRM has 6 basic counters */
79 dev->counters = 6; 84 dev->counters = 6;
80 dev->types[0] = _DRM_STAT_LOCK; 85 dev->types[0] = _DRM_STAT_LOCK;
@@ -91,7 +96,8 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
91 goto error_out_unreg; 96 goto error_out_unreg;
92 97
93 if (drm_core_has_AGP(dev)) { 98 if (drm_core_has_AGP(dev)) {
94 dev->agp = drm_agp_init(dev); 99 if (drm_device_is_agp(dev))
100 dev->agp = drm_agp_init(dev);
95 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { 101 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
96 DRM_ERROR( "Cannot initialize the agpgart module.\n" ); 102 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
97 retcode = -EINVAL; 103 retcode = -EINVAL;
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 2fc10c4bbcdf..475cc5e555e1 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18 18
19#include "drm_core.h" 19#include "drm_core.h"
20#include "drmP.h"
20 21
21struct drm_sysfs_class { 22struct drm_sysfs_class {
22 struct class_device_attribute attr; 23 struct class_device_attribute attr;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 621220f3f372..ced4215e2275 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
73 r_list = list_entry(list, drm_map_list_t, head); 73 r_list = list_entry(list, drm_map_list_t, head);
74 map = r_list->map; 74 map = r_list->map;
75 if (!map) continue; 75 if (!map) continue;
76 if (map->offset == VM_OFFSET(vma)) break; 76 if (r_list->user_token == VM_OFFSET(vma))
77 break;
77 } 78 }
78 79
79 if (map && map->type == _DRM_AGP) { 80 if (map && map->type == _DRM_AGP) {
80 unsigned long offset = address - vma->vm_start; 81 unsigned long offset = address - vma->vm_start;
81 unsigned long baddr = VM_OFFSET(vma) + offset; 82 unsigned long baddr = map->offset + offset;
82 struct drm_agp_mem *agpmem; 83 struct drm_agp_mem *agpmem;
83 struct page *page; 84 struct page *page;
84 85
@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
210 } 211 }
211 212
212 if(!found_maps) { 213 if(!found_maps) {
214 drm_dma_handle_t dmah;
215
213 switch (map->type) { 216 switch (map->type) {
214 case _DRM_REGISTERS: 217 case _DRM_REGISTERS:
215 case _DRM_FRAME_BUFFER: 218 case _DRM_FRAME_BUFFER:
@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
228 case _DRM_AGP: 231 case _DRM_AGP:
229 case _DRM_SCATTER_GATHER: 232 case _DRM_SCATTER_GATHER:
230 break; 233 break;
234 case _DRM_CONSISTENT:
235 dmah.vaddr = map->handle;
236 dmah.busaddr = map->offset;
237 dmah.size = map->size;
238 __drm_pci_free(dev, &dmah);
239 break;
231 } 240 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 241 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 } 242 }
@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
296 305
297 306
298 offset = address - vma->vm_start; 307 offset = address - vma->vm_start;
299 map_offset = map->offset - dev->sg->handle; 308 map_offset = map->offset - (unsigned long)dev->sg->virtual;
300 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 309 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
301 page = entry->pagelist[page_offset]; 310 page = entry->pagelist[page_offset];
302 get_page(page); 311 get_page(page);
@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
305} 314}
306 315
307 316
308#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
309
310static struct page *drm_vm_nopage(struct vm_area_struct *vma, 317static struct page *drm_vm_nopage(struct vm_area_struct *vma,
311 unsigned long address, 318 unsigned long address,
312 int *type) { 319 int *type) {
@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
335 return drm_do_vm_sg_nopage(vma, address); 342 return drm_do_vm_sg_nopage(vma, address);
336} 343}
337 344
338#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
339
340static struct page *drm_vm_nopage(struct vm_area_struct *vma,
341 unsigned long address,
342 int unused) {
343 return drm_do_vm_nopage(vma, address);
344}
345
346static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
347 unsigned long address,
348 int unused) {
349 return drm_do_vm_shm_nopage(vma, address);
350}
351
352static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
353 unsigned long address,
354 int unused) {
355 return drm_do_vm_dma_nopage(vma, address);
356}
357
358static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
359 unsigned long address,
360 int unused) {
361 return drm_do_vm_sg_nopage(vma, address);
362}
363
364#endif
365
366
367/** AGP virtual memory operations */ 345/** AGP virtual memory operations */
368static struct vm_operations_struct drm_vm_ops = { 346static struct vm_operations_struct drm_vm_ops = {
369 .nopage = drm_vm_nopage, 347 .nopage = drm_vm_nopage,
@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
487 465
488 vma->vm_ops = &drm_vm_dma_ops; 466 vma->vm_ops = &drm_vm_dma_ops;
489 467
490#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
491 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
492#else
493 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 468 vma->vm_flags |= VM_RESERVED; /* Don't swap */
494#endif
495 469
496 vma->vm_file = filp; /* Needed for drm_vm_open() */ 470 vma->vm_file = filp; /* Needed for drm_vm_open() */
497 drm_vm_open(vma); 471 drm_vm_open(vma);
@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
560 for performance, even if the list was a 534 for performance, even if the list was a
561 bit longer. */ 535 bit longer. */
562 list_for_each(list, &dev->maplist->head) { 536 list_for_each(list, &dev->maplist->head) {
563 unsigned long off;
564 537
565 r_list = list_entry(list, drm_map_list_t, head); 538 r_list = list_entry(list, drm_map_list_t, head);
566 map = r_list->map; 539 map = r_list->map;
567 if (!map) continue; 540 if (!map) continue;
568 off = dev->driver->get_map_ofs(map); 541 if (r_list->user_token == VM_OFFSET(vma))
569 if (off == VM_OFFSET(vma)) break; 542 break;
570 } 543 }
571 544
572 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 545 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
605 /* fall through to _DRM_FRAME_BUFFER... */ 578 /* fall through to _DRM_FRAME_BUFFER... */
606 case _DRM_FRAME_BUFFER: 579 case _DRM_FRAME_BUFFER:
607 case _DRM_REGISTERS: 580 case _DRM_REGISTERS:
608 if (VM_OFFSET(vma) >= __pa(high_memory)) {
609#if defined(__i386__) || defined(__x86_64__) 581#if defined(__i386__) || defined(__x86_64__)
610 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { 582 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
611 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 583 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
612 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 584 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
613 } 585 }
614#elif defined(__powerpc__) 586#elif defined(__powerpc__)
615 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; 587 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
588 if (map->type == _DRM_REGISTERS)
589 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
616#endif 590#endif
617 vma->vm_flags |= VM_IO; /* not in core dump */ 591 vma->vm_flags |= VM_IO; /* not in core dump */
618 }
619#if defined(__ia64__) 592#if defined(__ia64__)
620 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 593 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
621 vma->vm_start)) 594 vma->vm_start))
@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
628 offset = dev->driver->get_reg_ofs(dev); 601 offset = dev->driver->get_reg_ofs(dev);
629#ifdef __sparc__ 602#ifdef __sparc__
630 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, 603 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
631 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 604 (map->offset + offset) >> PAGE_SHIFT,
632 vma->vm_end - vma->vm_start, 605 vma->vm_end - vma->vm_start,
633 vma->vm_page_prot)) 606 vma->vm_page_prot))
634#else 607#else
635 if (io_remap_pfn_range(vma, vma->vm_start, 608 if (io_remap_pfn_range(vma, vma->vm_start,
636 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 609 (map->offset + offset) >> PAGE_SHIFT,
637 vma->vm_end - vma->vm_start, 610 vma->vm_end - vma->vm_start,
638 vma->vm_page_prot)) 611 vma->vm_page_prot))
639#endif 612#endif
@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
641 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 614 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
642 " offset = 0x%lx\n", 615 " offset = 0x%lx\n",
643 map->type, 616 map->type,
644 vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); 617 vma->vm_start, vma->vm_end, map->offset + offset);
645 vma->vm_ops = &drm_vm_ops; 618 vma->vm_ops = &drm_vm_ops;
646 break; 619 break;
647 case _DRM_SHM: 620 case _DRM_SHM:
621 case _DRM_CONSISTENT:
622 /* Consistent memory is really like shared memory. It's only
623 * allocate in a different way */
648 vma->vm_ops = &drm_vm_shm_ops; 624 vma->vm_ops = &drm_vm_shm_ops;
649 vma->vm_private_data = (void *)map; 625 vma->vm_private_data = (void *)map;
650 /* Don't let this area swap. Change when 626 /* Don't let this area swap. Change when
651 DRM_KERNEL advisory is supported. */ 627 DRM_KERNEL advisory is supported. */
652#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
653 vma->vm_flags |= VM_LOCKED;
654#else
655 vma->vm_flags |= VM_RESERVED; 628 vma->vm_flags |= VM_RESERVED;
656#endif
657 break; 629 break;
658 case _DRM_SCATTER_GATHER: 630 case _DRM_SCATTER_GATHER:
659 vma->vm_ops = &drm_vm_sg_ops; 631 vma->vm_ops = &drm_vm_sg_ops;
660 vma->vm_private_data = (void *)map; 632 vma->vm_private_data = (void *)map;
661#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
662 vma->vm_flags |= VM_LOCKED;
663#else
664 vma->vm_flags |= VM_RESERVED; 633 vma->vm_flags |= VM_RESERVED;
665#endif
666 break; 634 break;
667 default: 635 default:
668 return -EINVAL; /* This should never happen. */ 636 return -EINVAL; /* This should never happen. */
669 } 637 }
670#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
671 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
672#else
673 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 638 vma->vm_flags |= VM_RESERVED; /* Don't swap */
674#endif
675 639
676 vma->vm_file = filp; /* Needed for drm_vm_open() */ 640 vma->vm_file = filp; /* Needed for drm_vm_open() */
677 drm_vm_open(vma); 641 drm_vm_open(vma);
diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c
index ec614fff8f04..1bd0d55ee0f0 100644
--- a/drivers/char/drm/ffb_drv.c
+++ b/drivers/char/drm/ffb_drv.c
@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
152 return NULL; 152 return NULL;
153 153
154 list_for_each(list, &dev->maplist->head) { 154 list_for_each(list, &dev->maplist->head) {
155 unsigned long uoff;
156
157 r_list = (drm_map_list_t *)list; 155 r_list = (drm_map_list_t *)list;
158 map = r_list->map; 156 map = r_list->map;
159 if (!map) 157 if (!map)
160 continue; 158 continue;
161 uoff = (map->offset & 0xffffffff); 159 if (r_list->user_token == off)
162 if (uoff == off)
163 return map; 160 return map;
164 } 161 }
165 162
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h
deleted file mode 100644
index d11b507f87ee..000000000000
--- a/drivers/char/drm/gamma_context.h
+++ /dev/null
@@ -1,492 +0,0 @@
1/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * ChangeLog:
31 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
32 * added context constructor/destructor hooks,
33 * needed by SiS driver's memory management.
34 */
35
36/* ================================================================
37 * Old-style context support -- only used by gamma.
38 */
39
40
41/* The drm_read and drm_write_string code (especially that which manages
42 the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
43 DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
44
45ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
46{
47 drm_file_t *priv = filp->private_data;
48 drm_device_t *dev = priv->dev;
49 int left;
50 int avail;
51 int send;
52 int cur;
53
54 DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
55
56 while (dev->buf_rp == dev->buf_wp) {
57 DRM_DEBUG(" sleeping\n");
58 if (filp->f_flags & O_NONBLOCK) {
59 return -EAGAIN;
60 }
61 interruptible_sleep_on(&dev->buf_readers);
62 if (signal_pending(current)) {
63 DRM_DEBUG(" interrupted\n");
64 return -ERESTARTSYS;
65 }
66 DRM_DEBUG(" awake\n");
67 }
68
69 left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
70 avail = DRM_BSZ - left;
71 send = DRM_MIN(avail, count);
72
73 while (send) {
74 if (dev->buf_wp > dev->buf_rp) {
75 cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
76 } else {
77 cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
78 }
79 if (copy_to_user(buf, dev->buf_rp, cur))
80 return -EFAULT;
81 dev->buf_rp += cur;
82 if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
83 send -= cur;
84 }
85
86 wake_up_interruptible(&dev->buf_writers);
87 return DRM_MIN(avail, count);
88}
89
90
91/* In an incredibly convoluted setup, the kernel module actually calls
92 * back into the X server to perform context switches on behalf of the
93 * 3d clients.
94 */
95int DRM(write_string)(drm_device_t *dev, const char *s)
96{
97 int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
98 int send = strlen(s);
99 int count;
100
101 DRM_DEBUG("%d left, %d to send (%p, %p)\n",
102 left, send, dev->buf_rp, dev->buf_wp);
103
104 if (left == 1 || dev->buf_wp != dev->buf_rp) {
105 DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
106 left,
107 dev->buf_wp,
108 dev->buf_rp);
109 }
110
111 while (send) {
112 if (dev->buf_wp >= dev->buf_rp) {
113 count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
114 if (count == left) --count; /* Leave a hole */
115 } else {
116 count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
117 }
118 strncpy(dev->buf_wp, s, count);
119 dev->buf_wp += count;
120 if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
121 send -= count;
122 }
123
124 if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
125
126 DRM_DEBUG("waking\n");
127 wake_up_interruptible(&dev->buf_readers);
128 return 0;
129}
130
131unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
132{
133 drm_file_t *priv = filp->private_data;
134 drm_device_t *dev = priv->dev;
135
136 poll_wait(filp, &dev->buf_readers, wait);
137 if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
138 return 0;
139}
140
141int DRM(context_switch)(drm_device_t *dev, int old, int new)
142{
143 char buf[64];
144 drm_queue_t *q;
145
146 if (test_and_set_bit(0, &dev->context_flag)) {
147 DRM_ERROR("Reentering -- FIXME\n");
148 return -EBUSY;
149 }
150
151 DRM_DEBUG("Context switch from %d to %d\n", old, new);
152
153 if (new >= dev->queue_count) {
154 clear_bit(0, &dev->context_flag);
155 return -EINVAL;
156 }
157
158 if (new == dev->last_context) {
159 clear_bit(0, &dev->context_flag);
160 return 0;
161 }
162
163 q = dev->queuelist[new];
164 atomic_inc(&q->use_count);
165 if (atomic_read(&q->use_count) == 1) {
166 atomic_dec(&q->use_count);
167 clear_bit(0, &dev->context_flag);
168 return -EINVAL;
169 }
170
171 /* This causes the X server to wake up & do a bunch of hardware
172 * interaction to actually effect the context switch.
173 */
174 sprintf(buf, "C %d %d\n", old, new);
175 DRM(write_string)(dev, buf);
176
177 atomic_dec(&q->use_count);
178
179 return 0;
180}
181
182int DRM(context_switch_complete)(drm_device_t *dev, int new)
183{
184 drm_device_dma_t *dma = dev->dma;
185
186 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
187 dev->last_switch = jiffies;
188
189 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
190 DRM_ERROR("Lock isn't held after context switch\n");
191 }
192
193 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
194 if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
195 DRM_KERNEL_CONTEXT)) {
196 DRM_ERROR("Cannot free lock\n");
197 }
198 }
199
200 clear_bit(0, &dev->context_flag);
201 wake_up_interruptible(&dev->context_wait);
202
203 return 0;
204}
205
206static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
207{
208 DRM_DEBUG("\n");
209
210 if (atomic_read(&q->use_count) != 1
211 || atomic_read(&q->finalization)
212 || atomic_read(&q->block_count)) {
213 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
214 atomic_read(&q->use_count),
215 atomic_read(&q->finalization),
216 atomic_read(&q->block_count));
217 }
218
219 atomic_set(&q->finalization, 0);
220 atomic_set(&q->block_count, 0);
221 atomic_set(&q->block_read, 0);
222 atomic_set(&q->block_write, 0);
223 atomic_set(&q->total_queued, 0);
224 atomic_set(&q->total_flushed, 0);
225 atomic_set(&q->total_locks, 0);
226
227 init_waitqueue_head(&q->write_queue);
228 init_waitqueue_head(&q->read_queue);
229 init_waitqueue_head(&q->flush_queue);
230
231 q->flags = ctx->flags;
232
233 DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
234
235 return 0;
236}
237
238
239/* drm_alloc_queue:
240PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
241 disappear (so all deallocation must be done after IOCTLs are off)
242 2) dev->queue_count < dev->queue_slots
243 3) dev->queuelist[i].use_count == 0 and
244 dev->queuelist[i].finalization == 0 if i not in use
245POST: 1) dev->queuelist[i].use_count == 1
246 2) dev->queue_count < dev->queue_slots */
247
248static int DRM(alloc_queue)(drm_device_t *dev)
249{
250 int i;
251 drm_queue_t *queue;
252 int oldslots;
253 int newslots;
254 /* Check for a free queue */
255 for (i = 0; i < dev->queue_count; i++) {
256 atomic_inc(&dev->queuelist[i]->use_count);
257 if (atomic_read(&dev->queuelist[i]->use_count) == 1
258 && !atomic_read(&dev->queuelist[i]->finalization)) {
259 DRM_DEBUG("%d (free)\n", i);
260 return i;
261 }
262 atomic_dec(&dev->queuelist[i]->use_count);
263 }
264 /* Allocate a new queue */
265 down(&dev->struct_sem);
266
267 queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
268 memset(queue, 0, sizeof(*queue));
269 atomic_set(&queue->use_count, 1);
270
271 ++dev->queue_count;
272 if (dev->queue_count >= dev->queue_slots) {
273 oldslots = dev->queue_slots * sizeof(*dev->queuelist);
274 if (!dev->queue_slots) dev->queue_slots = 1;
275 dev->queue_slots *= 2;
276 newslots = dev->queue_slots * sizeof(*dev->queuelist);
277
278 dev->queuelist = DRM(realloc)(dev->queuelist,
279 oldslots,
280 newslots,
281 DRM_MEM_QUEUES);
282 if (!dev->queuelist) {
283 up(&dev->struct_sem);
284 DRM_DEBUG("out of memory\n");
285 return -ENOMEM;
286 }
287 }
288 dev->queuelist[dev->queue_count-1] = queue;
289
290 up(&dev->struct_sem);
291 DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
292 return dev->queue_count - 1;
293}
294
295int DRM(resctx)(struct inode *inode, struct file *filp,
296 unsigned int cmd, unsigned long arg)
297{
298 drm_ctx_res_t __user *argp = (void __user *)arg;
299 drm_ctx_res_t res;
300 drm_ctx_t ctx;
301 int i;
302
303 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
304 if (copy_from_user(&res, argp, sizeof(res)))
305 return -EFAULT;
306 if (res.count >= DRM_RESERVED_CONTEXTS) {
307 memset(&ctx, 0, sizeof(ctx));
308 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
309 ctx.handle = i;
310 if (copy_to_user(&res.contexts[i],
311 &i,
312 sizeof(i)))
313 return -EFAULT;
314 }
315 }
316 res.count = DRM_RESERVED_CONTEXTS;
317 if (copy_to_user(argp, &res, sizeof(res)))
318 return -EFAULT;
319 return 0;
320}
321
322int DRM(addctx)(struct inode *inode, struct file *filp,
323 unsigned int cmd, unsigned long arg)
324{
325 drm_file_t *priv = filp->private_data;
326 drm_device_t *dev = priv->dev;
327 drm_ctx_t ctx;
328 drm_ctx_t __user *argp = (void __user *)arg;
329
330 if (copy_from_user(&ctx, argp, sizeof(ctx)))
331 return -EFAULT;
332 if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
333 /* Init kernel's context and get a new one. */
334 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
335 ctx.handle = DRM(alloc_queue)(dev);
336 }
337 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
338 DRM_DEBUG("%d\n", ctx.handle);
339 if (copy_to_user(argp, &ctx, sizeof(ctx)))
340 return -EFAULT;
341 return 0;
342}
343
344int DRM(modctx)(struct inode *inode, struct file *filp,
345 unsigned int cmd, unsigned long arg)
346{
347 drm_file_t *priv = filp->private_data;
348 drm_device_t *dev = priv->dev;
349 drm_ctx_t ctx;
350 drm_queue_t *q;
351
352 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
353 return -EFAULT;
354
355 DRM_DEBUG("%d\n", ctx.handle);
356
357 if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
358 q = dev->queuelist[ctx.handle];
359
360 atomic_inc(&q->use_count);
361 if (atomic_read(&q->use_count) == 1) {
362 /* No longer in use */
363 atomic_dec(&q->use_count);
364 return -EINVAL;
365 }
366
367 if (DRM_BUFCOUNT(&q->waitlist)) {
368 atomic_dec(&q->use_count);
369 return -EBUSY;
370 }
371
372 q->flags = ctx.flags;
373
374 atomic_dec(&q->use_count);
375 return 0;
376}
377
378int DRM(getctx)(struct inode *inode, struct file *filp,
379 unsigned int cmd, unsigned long arg)
380{
381 drm_file_t *priv = filp->private_data;
382 drm_device_t *dev = priv->dev;
383 drm_ctx_t __user *argp = (void __user *)arg;
384 drm_ctx_t ctx;
385 drm_queue_t *q;
386
387 if (copy_from_user(&ctx, argp, sizeof(ctx)))
388 return -EFAULT;
389
390 DRM_DEBUG("%d\n", ctx.handle);
391
392 if (ctx.handle >= dev->queue_count) return -EINVAL;
393 q = dev->queuelist[ctx.handle];
394
395 atomic_inc(&q->use_count);
396 if (atomic_read(&q->use_count) == 1) {
397 /* No longer in use */
398 atomic_dec(&q->use_count);
399 return -EINVAL;
400 }
401
402 ctx.flags = q->flags;
403 atomic_dec(&q->use_count);
404
405 if (copy_to_user(argp, &ctx, sizeof(ctx)))
406 return -EFAULT;
407
408 return 0;
409}
410
411int DRM(switchctx)(struct inode *inode, struct file *filp,
412 unsigned int cmd, unsigned long arg)
413{
414 drm_file_t *priv = filp->private_data;
415 drm_device_t *dev = priv->dev;
416 drm_ctx_t ctx;
417
418 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
419 return -EFAULT;
420 DRM_DEBUG("%d\n", ctx.handle);
421 return DRM(context_switch)(dev, dev->last_context, ctx.handle);
422}
423
424int DRM(newctx)(struct inode *inode, struct file *filp,
425 unsigned int cmd, unsigned long arg)
426{
427 drm_file_t *priv = filp->private_data;
428 drm_device_t *dev = priv->dev;
429 drm_ctx_t ctx;
430
431 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
432 return -EFAULT;
433 DRM_DEBUG("%d\n", ctx.handle);
434 DRM(context_switch_complete)(dev, ctx.handle);
435
436 return 0;
437}
438
439int DRM(rmctx)(struct inode *inode, struct file *filp,
440 unsigned int cmd, unsigned long arg)
441{
442 drm_file_t *priv = filp->private_data;
443 drm_device_t *dev = priv->dev;
444 drm_ctx_t ctx;
445 drm_queue_t *q;
446 drm_buf_t *buf;
447
448 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
449 return -EFAULT;
450 DRM_DEBUG("%d\n", ctx.handle);
451
452 if (ctx.handle >= dev->queue_count) return -EINVAL;
453 q = dev->queuelist[ctx.handle];
454
455 atomic_inc(&q->use_count);
456 if (atomic_read(&q->use_count) == 1) {
457 /* No longer in use */
458 atomic_dec(&q->use_count);
459 return -EINVAL;
460 }
461
462 atomic_inc(&q->finalization); /* Mark queue in finalization state */
463 atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
464 finalization) */
465
466 while (test_and_set_bit(0, &dev->interrupt_flag)) {
467 schedule();
468 if (signal_pending(current)) {
469 clear_bit(0, &dev->interrupt_flag);
470 return -EINTR;
471 }
472 }
473 /* Remove queued buffers */
474 while ((buf = DRM(waitlist_get)(&q->waitlist))) {
475 DRM(free_buffer)(dev, buf);
476 }
477 clear_bit(0, &dev->interrupt_flag);
478
479 /* Wakeup blocked processes */
480 wake_up_interruptible(&q->read_queue);
481 wake_up_interruptible(&q->write_queue);
482 wake_up_interruptible(&q->flush_queue);
483
484 /* Finalization over. Queue is made
485 available when both use_count and
486 finalization become 0, which won't
487 happen until all the waiting processes
488 stop waiting. */
489 atomic_dec(&q->finalization);
490 return 0;
491}
492
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c
deleted file mode 100644
index e486fb8d31e9..000000000000
--- a/drivers/char/drm/gamma_dma.c
+++ /dev/null
@@ -1,946 +0,0 @@
1/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#include "gamma.h"
33#include "drmP.h"
34#include "drm.h"
35#include "gamma_drm.h"
36#include "gamma_drv.h"
37
38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/delay.h>
40
41static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42 unsigned long length)
43{
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
46 mb();
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
48 cpu_relax();
49
50 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
51
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53 cpu_relax();
54
55 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
56}
57
58void gamma_dma_quiescent_single(drm_device_t *dev)
59{
60 drm_gamma_private_t *dev_priv =
61 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
63 cpu_relax();
64
65 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
66 cpu_relax();
67
68 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC, 0);
70
71 do {
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73 cpu_relax();
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
75}
76
77void gamma_dma_quiescent_dual(drm_device_t *dev)
78{
79 drm_gamma_private_t *dev_priv =
80 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
82 cpu_relax();
83
84 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
85 cpu_relax();
86
87 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
90
91 /* Read from first MX */
92 do {
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94 cpu_relax();
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97 /* Read from second MX */
98 do {
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100 cpu_relax();
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102}
103
104void gamma_dma_ready(drm_device_t *dev)
105{
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
108 while (GAMMA_READ(GAMMA_DMACOUNT))
109 cpu_relax();
110}
111
112static inline int gamma_dma_is_ready(drm_device_t *dev)
113{
114 drm_gamma_private_t *dev_priv =
115 (drm_gamma_private_t *)dev->dev_private;
116 return (!GAMMA_READ(GAMMA_DMACOUNT));
117}
118
119irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
120{
121 drm_device_t *dev = (drm_device_t *)arg;
122 drm_device_dma_t *dma = dev->dma;
123 drm_gamma_private_t *dev_priv =
124 (drm_gamma_private_t *)dev->dev_private;
125
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
128
129 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
130 cpu_relax();
131
132 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135 if (gamma_dma_is_ready(dev)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev->dma_flag))
138 return IRQ_HANDLED;
139 if (dma->this_buffer) {
140 gamma_free_buffer(dev, dma->this_buffer);
141 dma->this_buffer = NULL;
142 }
143 clear_bit(0, &dev->dma_flag);
144
145 /* Dispatch new buffer */
146 schedule_work(&dev->work);
147 }
148 return IRQ_HANDLED;
149}
150
151/* Only called by gamma_dma_schedule. */
152static int gamma_do_dma(drm_device_t *dev, int locked)
153{
154 unsigned long address;
155 unsigned long length;
156 drm_buf_t *buf;
157 int retcode = 0;
158 drm_device_dma_t *dma = dev->dma;
159
160 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
161
162
163 if (!dma->next_buffer) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev->dma_flag);
166 return -EINVAL;
167 }
168
169 buf = dma->next_buffer;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address = buf->idx << 12;
173 length = buf->used;
174
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf->context, buf->idx, length);
177
178 if (buf->list == DRM_LIST_RECLAIM) {
179 gamma_clear_next_buffer(dev);
180 gamma_free_buffer(dev, buf);
181 clear_bit(0, &dev->dma_flag);
182 return -EINVAL;
183 }
184
185 if (!length) {
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev);
188 gamma_free_buffer(dev, buf);
189 clear_bit(0, &dev->dma_flag);
190 return 0;
191 }
192
193 if (!gamma_dma_is_ready(dev)) {
194 clear_bit(0, &dev->dma_flag);
195 return -EBUSY;
196 }
197
198 if (buf->while_locked) {
199 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf->idx, current->pid);
203 }
204 } else {
205 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206 DRM_KERNEL_CONTEXT)) {
207 clear_bit(0, &dev->dma_flag);
208 return -EBUSY;
209 }
210 }
211
212 if (dev->last_context != buf->context
213 && !(dev->queuelist[buf->context]->flags
214 & _DRM_CONTEXT_PRESERVED)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch)(dev, dev->last_context,
217 buf->context)) {
218 DRM(clear_next_buffer)(dev);
219 DRM(free_buffer)(dev, buf);
220 }
221 retcode = -EBUSY;
222 goto cleanup;
223
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
228 TIME! */
229 }
230
231 gamma_clear_next_buffer(dev);
232 buf->pending = 1;
233 buf->waiting = 0;
234 buf->list = DRM_LIST_PEND;
235
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address = buf->idx << 12;
238
239 gamma_dma_dispatch(dev, address, length);
240 gamma_free_buffer(dev, dma->this_buffer);
241 dma->this_buffer = buf;
242
243 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
245
246 if (!buf->while_locked && !dev->context_flag && !locked) {
247 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248 DRM_KERNEL_CONTEXT)) {
249 DRM_ERROR("\n");
250 }
251 }
252cleanup:
253
254 clear_bit(0, &dev->dma_flag);
255
256
257 return retcode;
258}
259
260static void gamma_dma_timer_bh(unsigned long dev)
261{
262 gamma_dma_schedule((drm_device_t *)dev, 0);
263}
264
265void gamma_irq_immediate_bh(void *dev)
266{
267 gamma_dma_schedule(dev, 0);
268}
269
270int gamma_dma_schedule(drm_device_t *dev, int locked)
271{
272 int next;
273 drm_queue_t *q;
274 drm_buf_t *buf;
275 int retcode = 0;
276 int processed = 0;
277 int missed;
278 int expire = 20;
279 drm_device_dma_t *dma = dev->dma;
280
281 if (test_and_set_bit(0, &dev->interrupt_flag)) {
282 /* Not reentrant */
283 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284 return -EBUSY;
285 }
286 missed = atomic_read(&dev->counts[10]);
287
288
289again:
290 if (dev->context_flag) {
291 clear_bit(0, &dev->interrupt_flag);
292 return -EBUSY;
293 }
294 if (dma->next_buffer) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
299 again. */
300 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
301 } else {
302 do {
303 next = gamma_select_queue(dev, gamma_dma_timer_bh);
304 if (next >= 0) {
305 q = dev->queuelist[next];
306 buf = gamma_waitlist_get(&q->waitlist);
307 dma->next_buffer = buf;
308 dma->next_queue = q;
309 if (buf && buf->list == DRM_LIST_RECLAIM) {
310 gamma_clear_next_buffer(dev);
311 gamma_free_buffer(dev, buf);
312 }
313 }
314 } while (next >= 0 && !dma->next_buffer);
315 if (dma->next_buffer) {
316 if (!(retcode = gamma_do_dma(dev, locked))) {
317 ++processed;
318 }
319 }
320 }
321
322 if (--expire) {
323 if (missed != atomic_read(&dev->counts[10])) {
324 if (gamma_dma_is_ready(dev)) goto again;
325 }
326 if (processed && gamma_dma_is_ready(dev)) {
327 processed = 0;
328 goto again;
329 }
330 }
331
332 clear_bit(0, &dev->interrupt_flag);
333
334 return retcode;
335}
336
337static int gamma_dma_priority(struct file *filp,
338 drm_device_t *dev, drm_dma_t *d)
339{
340 unsigned long address;
341 unsigned long length;
342 int must_free = 0;
343 int retcode = 0;
344 int i;
345 int idx;
346 drm_buf_t *buf;
347 drm_buf_t *last_buf = NULL;
348 drm_device_dma_t *dma = dev->dma;
349 int *send_indices = NULL;
350 int *send_sizes = NULL;
351
352 DECLARE_WAITQUEUE(entry, current);
353
354 /* Turn off interrupt handling */
355 while (test_and_set_bit(0, &dev->interrupt_flag)) {
356 schedule();
357 if (signal_pending(current)) return -EINTR;
358 }
359 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
360 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
361 DRM_KERNEL_CONTEXT)) {
362 schedule();
363 if (signal_pending(current)) {
364 clear_bit(0, &dev->interrupt_flag);
365 return -EINTR;
366 }
367 }
368 ++must_free;
369 }
370
371 send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
372 DRM_MEM_DRIVER);
373 if (send_indices == NULL)
374 return -ENOMEM;
375 if (copy_from_user(send_indices, d->send_indices,
376 d->send_count * sizeof(*send_indices))) {
377 retcode = -EFAULT;
378 goto cleanup;
379 }
380
381 send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
382 DRM_MEM_DRIVER);
383 if (send_sizes == NULL)
384 return -ENOMEM;
385 if (copy_from_user(send_sizes, d->send_sizes,
386 d->send_count * sizeof(*send_sizes))) {
387 retcode = -EFAULT;
388 goto cleanup;
389 }
390
391 for (i = 0; i < d->send_count; i++) {
392 idx = send_indices[i];
393 if (idx < 0 || idx >= dma->buf_count) {
394 DRM_ERROR("Index %d (of %d max)\n",
395 send_indices[i], dma->buf_count - 1);
396 continue;
397 }
398 buf = dma->buflist[ idx ];
399 if (buf->filp != filp) {
400 DRM_ERROR("Process %d using buffer not owned\n",
401 current->pid);
402 retcode = -EINVAL;
403 goto cleanup;
404 }
405 if (buf->list != DRM_LIST_NONE) {
406 DRM_ERROR("Process %d using buffer on list %d\n",
407 current->pid, buf->list);
408 retcode = -EINVAL;
409 goto cleanup;
410 }
411 /* This isn't a race condition on
412 buf->list, since our concern is the
413 buffer reclaim during the time the
414 process closes the /dev/drm? handle, so
415 it can't also be doing DMA. */
416 buf->list = DRM_LIST_PRIO;
417 buf->used = send_sizes[i];
418 buf->context = d->context;
419 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
420 address = (unsigned long)buf->address;
421 length = buf->used;
422 if (!length) {
423 DRM_ERROR("0 length buffer\n");
424 }
425 if (buf->pending) {
426 DRM_ERROR("Sending pending buffer:"
427 " buffer %d, offset %d\n",
428 send_indices[i], i);
429 retcode = -EINVAL;
430 goto cleanup;
431 }
432 if (buf->waiting) {
433 DRM_ERROR("Sending waiting buffer:"
434 " buffer %d, offset %d\n",
435 send_indices[i], i);
436 retcode = -EINVAL;
437 goto cleanup;
438 }
439 buf->pending = 1;
440
441 if (dev->last_context != buf->context
442 && !(dev->queuelist[buf->context]->flags
443 & _DRM_CONTEXT_PRESERVED)) {
444 add_wait_queue(&dev->context_wait, &entry);
445 current->state = TASK_INTERRUPTIBLE;
446 /* PRE: dev->last_context != buf->context */
447 DRM(context_switch)(dev, dev->last_context,
448 buf->context);
449 /* POST: we will wait for the context
450 switch and will dispatch on a later call
451 when dev->last_context == buf->context.
452 NOTE WE HOLD THE LOCK THROUGHOUT THIS
453 TIME! */
454 schedule();
455 current->state = TASK_RUNNING;
456 remove_wait_queue(&dev->context_wait, &entry);
457 if (signal_pending(current)) {
458 retcode = -EINTR;
459 goto cleanup;
460 }
461 if (dev->last_context != buf->context) {
462 DRM_ERROR("Context mismatch: %d %d\n",
463 dev->last_context,
464 buf->context);
465 }
466 }
467
468 gamma_dma_dispatch(dev, address, length);
469 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
470 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
471
472 if (last_buf) {
473 gamma_free_buffer(dev, last_buf);
474 }
475 last_buf = buf;
476 }
477
478
479cleanup:
480 if (last_buf) {
481 gamma_dma_ready(dev);
482 gamma_free_buffer(dev, last_buf);
483 }
484 if (send_indices)
485 DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
486 DRM_MEM_DRIVER);
487 if (send_sizes)
488 DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
489 DRM_MEM_DRIVER);
490
491 if (must_free && !dev->context_flag) {
492 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
493 DRM_KERNEL_CONTEXT)) {
494 DRM_ERROR("\n");
495 }
496 }
497 clear_bit(0, &dev->interrupt_flag);
498 return retcode;
499}
500
501static int gamma_dma_send_buffers(struct file *filp,
502 drm_device_t *dev, drm_dma_t *d)
503{
504 DECLARE_WAITQUEUE(entry, current);
505 drm_buf_t *last_buf = NULL;
506 int retcode = 0;
507 drm_device_dma_t *dma = dev->dma;
508 int send_index;
509
510 if (get_user(send_index, &d->send_indices[d->send_count-1]))
511 return -EFAULT;
512
513 if (d->flags & _DRM_DMA_BLOCK) {
514 last_buf = dma->buflist[send_index];
515 add_wait_queue(&last_buf->dma_wait, &entry);
516 }
517
518 if ((retcode = gamma_dma_enqueue(filp, d))) {
519 if (d->flags & _DRM_DMA_BLOCK)
520 remove_wait_queue(&last_buf->dma_wait, &entry);
521 return retcode;
522 }
523
524 gamma_dma_schedule(dev, 0);
525
526 if (d->flags & _DRM_DMA_BLOCK) {
527 DRM_DEBUG("%d waiting\n", current->pid);
528 for (;;) {
529 current->state = TASK_INTERRUPTIBLE;
530 if (!last_buf->waiting && !last_buf->pending)
531 break; /* finished */
532 schedule();
533 if (signal_pending(current)) {
534 retcode = -EINTR; /* Can't restart */
535 break;
536 }
537 }
538 current->state = TASK_RUNNING;
539 DRM_DEBUG("%d running\n", current->pid);
540 remove_wait_queue(&last_buf->dma_wait, &entry);
541 if (!retcode
542 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
543 if (!waitqueue_active(&last_buf->dma_wait)) {
544 gamma_free_buffer(dev, last_buf);
545 }
546 }
547 if (retcode) {
548 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
549 d->context,
550 last_buf->waiting,
551 last_buf->pending,
552 (long)DRM_WAITCOUNT(dev, d->context),
553 last_buf->idx,
554 last_buf->list,
555 current->pid);
556 }
557 }
558 return retcode;
559}
560
561int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
562 unsigned long arg)
563{
564 drm_file_t *priv = filp->private_data;
565 drm_device_t *dev = priv->dev;
566 drm_device_dma_t *dma = dev->dma;
567 int retcode = 0;
568 drm_dma_t __user *argp = (void __user *)arg;
569 drm_dma_t d;
570
571 if (copy_from_user(&d, argp, sizeof(d)))
572 return -EFAULT;
573
574 if (d.send_count < 0 || d.send_count > dma->buf_count) {
575 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576 current->pid, d.send_count, dma->buf_count);
577 return -EINVAL;
578 }
579
580 if (d.request_count < 0 || d.request_count > dma->buf_count) {
581 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582 current->pid, d.request_count, dma->buf_count);
583 return -EINVAL;
584 }
585
586 if (d.send_count) {
587 if (d.flags & _DRM_DMA_PRIORITY)
588 retcode = gamma_dma_priority(filp, dev, &d);
589 else
590 retcode = gamma_dma_send_buffers(filp, dev, &d);
591 }
592
593 d.granted_count = 0;
594
595 if (!retcode && d.request_count) {
596 retcode = gamma_dma_get_buffers(filp, &d);
597 }
598
599 DRM_DEBUG("%d returning, granted = %d\n",
600 current->pid, d.granted_count);
601 if (copy_to_user(argp, &d, sizeof(d)))
602 return -EFAULT;
603
604 return retcode;
605}
606
607/* =============================================================
608 * DMA initialization, cleanup
609 */
610
611static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
612{
613 drm_gamma_private_t *dev_priv;
614 drm_device_dma_t *dma = dev->dma;
615 drm_buf_t *buf;
616 int i;
617 struct list_head *list;
618 unsigned long *pgt;
619
620 DRM_DEBUG( "%s\n", __FUNCTION__ );
621
622 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
623 DRM_MEM_DRIVER );
624 if ( !dev_priv )
625 return -ENOMEM;
626
627 dev->dev_private = (void *)dev_priv;
628
629 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
630
631 dev_priv->num_rast = init->num_rast;
632
633 list_for_each(list, &dev->maplist->head) {
634 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
635 if( r_list->map &&
636 r_list->map->type == _DRM_SHM &&
637 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
638 dev_priv->sarea = r_list->map;
639 break;
640 }
641 }
642
643 dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
644 dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
645 dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
646 dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
647
648 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
649 ((u8 *)dev_priv->sarea->handle +
650 init->sarea_priv_offset);
651
652 if (init->pcimode) {
653 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
654 pgt = buf->address;
655
656 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
657 buf = dma->buflist[i];
658 *pgt = virt_to_phys((void*)buf->address) | 0x07;
659 pgt++;
660 }
661
662 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
663 } else {
664 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
665 drm_core_ioremap( dev->agp_buffer_map, dev);
666
667 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
668 pgt = buf->address;
669
670 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
671 buf = dma->buflist[i];
672 *pgt = (unsigned long)buf->address + 0x07;
673 pgt++;
674 }
675
676 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
677
678 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
679 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
680 }
681 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
682 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
683 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
684
685 return 0;
686}
687
688int gamma_do_cleanup_dma( drm_device_t *dev )
689{
690 DRM_DEBUG( "%s\n", __FUNCTION__ );
691
692 /* Make sure interrupts are disabled here because the uninstall ioctl
693 * may not have been called from userspace and after dev_private
694 * is freed, it's too late.
695 */
696 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
697 if ( dev->irq_enabled )
698 DRM(irq_uninstall)(dev);
699
700 if ( dev->dev_private ) {
701
702 if ( dev->agp_buffer_map != NULL )
703 drm_core_ioremapfree( dev->agp_buffer_map, dev );
704
705 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
706 DRM_MEM_DRIVER );
707 dev->dev_private = NULL;
708 }
709
710 return 0;
711}
712
713int gamma_dma_init( struct inode *inode, struct file *filp,
714 unsigned int cmd, unsigned long arg )
715{
716 drm_file_t *priv = filp->private_data;
717 drm_device_t *dev = priv->dev;
718 drm_gamma_init_t init;
719
720 LOCK_TEST_WITH_RETURN( dev, filp );
721
722 if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
723 return -EFAULT;
724
725 switch ( init.func ) {
726 case GAMMA_INIT_DMA:
727 return gamma_do_init_dma( dev, &init );
728 case GAMMA_CLEANUP_DMA:
729 return gamma_do_cleanup_dma( dev );
730 }
731
732 return -EINVAL;
733}
734
735static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
736{
737 drm_device_dma_t *dma = dev->dma;
738 unsigned int *screenbuf;
739
740 DRM_DEBUG( "%s\n", __FUNCTION__ );
741
742 /* We've DRM_RESTRICTED this DMA buffer */
743
744 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
745
746#if 0
747 *buffer++ = 0x180; /* Tag (FilterMode) */
748 *buffer++ = 0x200; /* Allow FBColor through */
749 *buffer++ = 0x53B; /* Tag */
750 *buffer++ = copy->Pitch;
751 *buffer++ = 0x53A; /* Tag */
752 *buffer++ = copy->SrcAddress;
753 *buffer++ = 0x539; /* Tag */
754 *buffer++ = copy->WidthHeight; /* Initiates transfer */
755 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
756 *buffer++ = virt_to_phys((void*)screenbuf);
757 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
758 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
759
760 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
761 /* Now put it back to the screen */
762
763 *buffer++ = 0x180; /* Tag (FilterMode) */
764 *buffer++ = 0x400; /* Allow Sync through */
765 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
766 *buffer++ = 0x155; /* FBSourceData | count */
767 *buffer++ = 0x537; /* Tag */
768 *buffer++ = copy->Pitch;
769 *buffer++ = 0x536; /* Tag */
770 *buffer++ = copy->DstAddress;
771 *buffer++ = 0x535; /* Tag */
772 *buffer++ = copy->WidthHeight; /* Initiates transfer */
773 *buffer++ = 0x530; /* Tag - DMAAddr */
774 *buffer++ = virt_to_phys((void*)screenbuf);
775 *buffer++ = 0x531;
776 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
777#endif
778
779 /* need to dispatch it now */
780
781 return 0;
782}
783
784int gamma_dma_copy( struct inode *inode, struct file *filp,
785 unsigned int cmd, unsigned long arg )
786{
787 drm_file_t *priv = filp->private_data;
788 drm_device_t *dev = priv->dev;
789 drm_gamma_copy_t copy;
790
791 if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
792 return -EFAULT;
793
794 return gamma_do_copy_dma( dev, &copy );
795}
796
797/* =============================================================
798 * Per Context SAREA Support
799 */
800
801int gamma_getsareactx(struct inode *inode, struct file *filp,
802 unsigned int cmd, unsigned long arg)
803{
804 drm_file_t *priv = filp->private_data;
805 drm_device_t *dev = priv->dev;
806 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
807 drm_ctx_priv_map_t request;
808 drm_map_t *map;
809
810 if (copy_from_user(&request, argp, sizeof(request)))
811 return -EFAULT;
812
813 down(&dev->struct_sem);
814 if ((int)request.ctx_id >= dev->max_context) {
815 up(&dev->struct_sem);
816 return -EINVAL;
817 }
818
819 map = dev->context_sareas[request.ctx_id];
820 up(&dev->struct_sem);
821
822 request.handle = map->handle;
823 if (copy_to_user(argp, &request, sizeof(request)))
824 return -EFAULT;
825 return 0;
826}
827
828int gamma_setsareactx(struct inode *inode, struct file *filp,
829 unsigned int cmd, unsigned long arg)
830{
831 drm_file_t *priv = filp->private_data;
832 drm_device_t *dev = priv->dev;
833 drm_ctx_priv_map_t request;
834 drm_map_t *map = NULL;
835 drm_map_list_t *r_list;
836 struct list_head *list;
837
838 if (copy_from_user(&request,
839 (drm_ctx_priv_map_t __user *)arg,
840 sizeof(request)))
841 return -EFAULT;
842
843 down(&dev->struct_sem);
844 r_list = NULL;
845 list_for_each(list, &dev->maplist->head) {
846 r_list = list_entry(list, drm_map_list_t, head);
847 if(r_list->map &&
848 r_list->map->handle == request.handle) break;
849 }
850 if (list == &(dev->maplist->head)) {
851 up(&dev->struct_sem);
852 return -EINVAL;
853 }
854 map = r_list->map;
855 up(&dev->struct_sem);
856
857 if (!map) return -EINVAL;
858
859 down(&dev->struct_sem);
860 if ((int)request.ctx_id >= dev->max_context) {
861 up(&dev->struct_sem);
862 return -EINVAL;
863 }
864 dev->context_sareas[request.ctx_id] = map;
865 up(&dev->struct_sem);
866 return 0;
867}
868
869void gamma_driver_irq_preinstall( drm_device_t *dev ) {
870 drm_gamma_private_t *dev_priv =
871 (drm_gamma_private_t *)dev->dev_private;
872
873 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
874 cpu_relax();
875
876 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
877 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
878}
879
880void gamma_driver_irq_postinstall( drm_device_t *dev ) {
881 drm_gamma_private_t *dev_priv =
882 (drm_gamma_private_t *)dev->dev_private;
883
884 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
885 cpu_relax();
886
887 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
888 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
889 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
890}
891
892void gamma_driver_irq_uninstall( drm_device_t *dev ) {
893 drm_gamma_private_t *dev_priv =
894 (drm_gamma_private_t *)dev->dev_private;
895 if (!dev_priv)
896 return;
897
898 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
899 cpu_relax();
900
901 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
902 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
903 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
904}
905
906extern drm_ioctl_desc_t DRM(ioctls)[];
907
908static int gamma_driver_preinit(drm_device_t *dev)
909{
910 /* reset the finish ioctl */
911 DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
912 return 0;
913}
914
915static void gamma_driver_pretakedown(drm_device_t *dev)
916{
917 gamma_do_cleanup_dma(dev);
918}
919
920static void gamma_driver_dma_ready(drm_device_t *dev)
921{
922 gamma_dma_ready(dev);
923}
924
925static int gamma_driver_dma_quiescent(drm_device_t *dev)
926{
927 drm_gamma_private_t *dev_priv = (
928 drm_gamma_private_t *)dev->dev_private;
929 if (dev_priv->num_rast == 2)
930 gamma_dma_quiescent_dual(dev);
931 else gamma_dma_quiescent_single(dev);
932 return 0;
933}
934
935void gamma_driver_register_fns(drm_device_t *dev)
936{
937 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
938 DRM(fops).read = gamma_fops_read;
939 DRM(fops).poll = gamma_fops_poll;
940 dev->driver.preinit = gamma_driver_preinit;
941 dev->driver.pretakedown = gamma_driver_pretakedown;
942 dev->driver.dma_ready = gamma_driver_dma_ready;
943 dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
944 dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
945 dev->driver.dma_flush_unblock = gamma_flush_unblock;
946}
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h
deleted file mode 100644
index 20819ded0e15..000000000000
--- a/drivers/char/drm/gamma_drm.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _GAMMA_DRM_H_
2#define _GAMMA_DRM_H_
3
4typedef struct _drm_gamma_tex_region {
5 unsigned char next, prev; /* indices to form a circular LRU */
6 unsigned char in_use; /* owned by a client, or free? */
7 int age; /* tracked by clients to update local LRU's */
8} drm_gamma_tex_region_t;
9
10typedef struct {
11 unsigned int GDeltaMode;
12 unsigned int GDepthMode;
13 unsigned int GGeometryMode;
14 unsigned int GTransformMode;
15} drm_gamma_context_regs_t;
16
17typedef struct _drm_gamma_sarea {
18 drm_gamma_context_regs_t context_state;
19
20 unsigned int dirty;
21
22
23 /* Maintain an LRU of contiguous regions of texture space. If
24 * you think you own a region of texture memory, and it has an
25 * age different to the one you set, then you are mistaken and
26 * it has been stolen by another client. If global texAge
27 * hasn't changed, there is no need to walk the list.
28 *
29 * These regions can be used as a proxy for the fine-grained
30 * texture information of other clients - by maintaining them
31 * in the same lru which is used to age their own textures,
32 * clients have an approximate lru for the whole of global
33 * texture space, and can make informed decisions as to which
34 * areas to kick out. There is no need to choose whether to
35 * kick out your own texture or someone else's - simply eject
36 * them all in LRU order.
37 */
38
39#define GAMMA_NR_TEX_REGIONS 64
40 drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
41 /* Last elt is sentinal */
42 int texAge; /* last time texture was uploaded */
43 int last_enqueue; /* last time a buffer was enqueued */
44 int last_dispatch; /* age of the most recently dispatched buffer */
45 int last_quiescent; /* */
46 int ctxOwner; /* last context to upload state */
47
48 int vertex_prim;
49} drm_gamma_sarea_t;
50
51/* WARNING: If you change any of these defines, make sure to change the
52 * defines in the Xserver file (xf86drmGamma.h)
53 */
54
55/* Gamma specific ioctls
56 * The device specific ioctl range is 0x40 to 0x79.
57 */
58#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
59#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
60
61typedef struct drm_gamma_copy {
62 unsigned int DMAOutputAddress;
63 unsigned int DMAOutputCount;
64 unsigned int DMAReadGLINTSource;
65 unsigned int DMARectangleWriteAddress;
66 unsigned int DMARectangleWriteLinePitch;
67 unsigned int DMARectangleWrite;
68 unsigned int DMARectangleReadAddress;
69 unsigned int DMARectangleReadLinePitch;
70 unsigned int DMARectangleRead;
71 unsigned int DMARectangleReadTarget;
72} drm_gamma_copy_t;
73
74typedef struct drm_gamma_init {
75 enum {
76 GAMMA_INIT_DMA = 0x01,
77 GAMMA_CLEANUP_DMA = 0x02
78 } func;
79
80 int sarea_priv_offset;
81 int pcimode;
82 unsigned int mmio0;
83 unsigned int mmio1;
84 unsigned int mmio2;
85 unsigned int mmio3;
86 unsigned int buffers_offset;
87 int num_rast;
88} drm_gamma_init_t;
89
90#endif /* _GAMMA_DRM_H_ */
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c
deleted file mode 100644
index e7e64b62792a..000000000000
--- a/drivers/char/drm/gamma_drv.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include <linux/config.h>
33#include "gamma.h"
34#include "drmP.h"
35#include "drm.h"
36#include "gamma_drm.h"
37#include "gamma_drv.h"
38
39#include "drm_auth.h"
40#include "drm_agpsupport.h"
41#include "drm_bufs.h"
42#include "gamma_context.h" /* NOTE! */
43#include "drm_dma.h"
44#include "gamma_old_dma.h" /* NOTE */
45#include "drm_drawable.h"
46#include "drm_drv.h"
47
48#include "drm_fops.h"
49#include "drm_init.h"
50#include "drm_ioctl.h"
51#include "drm_irq.h"
52#include "gamma_lists.h" /* NOTE */
53#include "drm_lock.h"
54#include "gamma_lock.h" /* NOTE */
55#include "drm_memory.h"
56#include "drm_proc.h"
57#include "drm_vm.h"
58#include "drm_stub.h"
59#include "drm_scatter.h"
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h
deleted file mode 100644
index 146fcc6253cd..000000000000
--- a/drivers/char/drm/gamma_drv.h
+++ /dev/null
@@ -1,147 +0,0 @@
1/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#ifndef _GAMMA_DRV_H_
33#define _GAMMA_DRV_H_
34
35typedef struct drm_gamma_private {
36 drm_gamma_sarea_t *sarea_priv;
37 drm_map_t *sarea;
38 drm_map_t *mmio0;
39 drm_map_t *mmio1;
40 drm_map_t *mmio2;
41 drm_map_t *mmio3;
42 int num_rast;
43} drm_gamma_private_t;
44
45 /* gamma_dma.c */
46extern int gamma_dma_init( struct inode *inode, struct file *filp,
47 unsigned int cmd, unsigned long arg );
48extern int gamma_dma_copy( struct inode *inode, struct file *filp,
49 unsigned int cmd, unsigned long arg );
50
51extern int gamma_do_cleanup_dma( drm_device_t *dev );
52extern void gamma_dma_ready(drm_device_t *dev);
53extern void gamma_dma_quiescent_single(drm_device_t *dev);
54extern void gamma_dma_quiescent_dual(drm_device_t *dev);
55
56 /* gamma_dma.c */
57extern int gamma_dma_schedule(drm_device_t *dev, int locked);
58extern int gamma_dma(struct inode *inode, struct file *filp,
59 unsigned int cmd, unsigned long arg);
60extern int gamma_find_devices(void);
61extern int gamma_found(void);
62
63/* Gamma-specific code pulled from drm_fops.h:
64 */
65extern int DRM(finish)(struct inode *inode, struct file *filp,
66 unsigned int cmd, unsigned long arg);
67extern int DRM(flush_unblock)(drm_device_t *dev, int context,
68 drm_lock_flags_t flags);
69extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
70 drm_lock_flags_t flags);
71
72/* Gamma-specific code pulled from drm_dma.h:
73 */
74extern void DRM(clear_next_buffer)(drm_device_t *dev);
75extern int DRM(select_queue)(drm_device_t *dev,
76 void (*wrapper)(unsigned long));
77extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
78extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
79
80
81/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
82 */
83extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
84extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
85extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
86extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
87extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
88extern int DRM(freelist_destroy)(drm_freelist_t *bl);
89extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
90 drm_buf_t *buf);
91extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
92
93/* externs for gamma changes to the ops */
94extern struct file_operations DRM(fops);
95extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
96extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
97
98
99#define GLINT_DRI_BUF_COUNT 256
100
101#define GAMMA_OFF(reg) \
102 ((reg < 0x1000) \
103 ? reg \
104 : ((reg < 0x10000) \
105 ? (reg - 0x1000) \
106 : ((reg < 0x11000) \
107 ? (reg - 0x10000) \
108 : (reg - 0x11000))))
109
110#define GAMMA_BASE(reg) ((unsigned long) \
111 ((reg < 0x1000) ? dev_priv->mmio0->handle : \
112 ((reg < 0x10000) ? dev_priv->mmio1->handle : \
113 ((reg < 0x11000) ? dev_priv->mmio2->handle : \
114 dev_priv->mmio3->handle))))
115#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
116#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
117#define GAMMA_READ(reg) GAMMA_DEREF(reg)
118#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
119
120#define GAMMA_BROADCASTMASK 0x9378
121#define GAMMA_COMMANDINTENABLE 0x0c48
122#define GAMMA_DMAADDRESS 0x0028
123#define GAMMA_DMACOUNT 0x0030
124#define GAMMA_FILTERMODE 0x8c00
125#define GAMMA_GCOMMANDINTFLAGS 0x0c50
126#define GAMMA_GCOMMANDMODE 0x0c40
127#define GAMMA_QUEUED_DMA_MODE 1<<1
128#define GAMMA_GCOMMANDSTATUS 0x0c60
129#define GAMMA_GDELAYTIMER 0x0c38
130#define GAMMA_GDMACONTROL 0x0060
131#define GAMMA_USE_AGP 1<<1
132#define GAMMA_GINTENABLE 0x0808
133#define GAMMA_GINTFLAGS 0x0810
134#define GAMMA_INFIFOSPACE 0x0018
135#define GAMMA_OUTFIFOWORDS 0x0020
136#define GAMMA_OUTPUTFIFO 0x2000
137#define GAMMA_SYNC 0x8c40
138#define GAMMA_SYNC_TAG 0x0188
139#define GAMMA_PAGETABLEADDR 0x0C00
140#define GAMMA_PAGETABLELENGTH 0x0C08
141
142#define GAMMA_PASSTHROUGH 0x1FE
143#define GAMMA_DMAADDRTAG 0x530
144#define GAMMA_DMACOUNTTAG 0x531
145#define GAMMA_COMMANDINTTAG 0x532
146
147#endif
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h
deleted file mode 100644
index 2d93f412b96b..000000000000
--- a/drivers/char/drm/gamma_lists.h
+++ /dev/null
@@ -1,215 +0,0 @@
1/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
2 * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33
34
35int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
36{
37 if (bl->count) return -EINVAL;
38
39 bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
40 DRM_MEM_BUFLISTS);
41
42 if(!bl->bufs) return -ENOMEM;
43 memset(bl->bufs, 0, sizeof(*bl->bufs));
44 bl->count = count;
45 bl->rp = bl->bufs;
46 bl->wp = bl->bufs;
47 bl->end = &bl->bufs[bl->count+1];
48 spin_lock_init(&bl->write_lock);
49 spin_lock_init(&bl->read_lock);
50 return 0;
51}
52
53int DRM(waitlist_destroy)(drm_waitlist_t *bl)
54{
55 if (bl->rp != bl->wp) return -EINVAL;
56 if (bl->bufs) DRM(free)(bl->bufs,
57 (bl->count + 2) * sizeof(*bl->bufs),
58 DRM_MEM_BUFLISTS);
59 bl->count = 0;
60 bl->bufs = NULL;
61 bl->rp = NULL;
62 bl->wp = NULL;
63 bl->end = NULL;
64 return 0;
65}
66
67int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
68{
69 int left;
70 unsigned long flags;
71
72 left = DRM_LEFTCOUNT(bl);
73 if (!left) {
74 DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
75 buf->idx, buf->filp);
76 return -EINVAL;
77 }
78 buf->list = DRM_LIST_WAIT;
79
80 spin_lock_irqsave(&bl->write_lock, flags);
81 *bl->wp = buf;
82 if (++bl->wp >= bl->end) bl->wp = bl->bufs;
83 spin_unlock_irqrestore(&bl->write_lock, flags);
84
85 return 0;
86}
87
88drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
89{
90 drm_buf_t *buf;
91 unsigned long flags;
92
93 spin_lock_irqsave(&bl->read_lock, flags);
94 buf = *bl->rp;
95 if (bl->rp == bl->wp) {
96 spin_unlock_irqrestore(&bl->read_lock, flags);
97 return NULL;
98 }
99 if (++bl->rp >= bl->end) bl->rp = bl->bufs;
100 spin_unlock_irqrestore(&bl->read_lock, flags);
101
102 return buf;
103}
104
105int DRM(freelist_create)(drm_freelist_t *bl, int count)
106{
107 atomic_set(&bl->count, 0);
108 bl->next = NULL;
109 init_waitqueue_head(&bl->waiting);
110 bl->low_mark = 0;
111 bl->high_mark = 0;
112 atomic_set(&bl->wfh, 0);
113 spin_lock_init(&bl->lock);
114 ++bl->initialized;
115 return 0;
116}
117
118int DRM(freelist_destroy)(drm_freelist_t *bl)
119{
120 atomic_set(&bl->count, 0);
121 bl->next = NULL;
122 return 0;
123}
124
125int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
126{
127 drm_device_dma_t *dma = dev->dma;
128
129 if (!dma) {
130 DRM_ERROR("No DMA support\n");
131 return 1;
132 }
133
134 if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
135 DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
136 buf->idx, buf->waiting, buf->pending, buf->list);
137 }
138 if (!bl) return 1;
139 buf->list = DRM_LIST_FREE;
140
141 spin_lock(&bl->lock);
142 buf->next = bl->next;
143 bl->next = buf;
144 spin_unlock(&bl->lock);
145
146 atomic_inc(&bl->count);
147 if (atomic_read(&bl->count) > dma->buf_count) {
148 DRM_ERROR("%d of %d buffers free after addition of %d\n",
149 atomic_read(&bl->count), dma->buf_count, buf->idx);
150 return 1;
151 }
152 /* Check for high water mark */
153 if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
154 atomic_set(&bl->wfh, 0);
155 wake_up_interruptible(&bl->waiting);
156 }
157 return 0;
158}
159
160static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
161{
162 drm_buf_t *buf;
163
164 if (!bl) return NULL;
165
166 /* Get buffer */
167 spin_lock(&bl->lock);
168 if (!bl->next) {
169 spin_unlock(&bl->lock);
170 return NULL;
171 }
172 buf = bl->next;
173 bl->next = bl->next->next;
174 spin_unlock(&bl->lock);
175
176 atomic_dec(&bl->count);
177 buf->next = NULL;
178 buf->list = DRM_LIST_NONE;
179 if (buf->waiting || buf->pending) {
180 DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
181 buf->idx, buf->waiting, buf->pending, buf->list);
182 }
183
184 return buf;
185}
186
187drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
188{
189 drm_buf_t *buf = NULL;
190 DECLARE_WAITQUEUE(entry, current);
191
192 if (!bl || !bl->initialized) return NULL;
193
194 /* Check for low water mark */
195 if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
196 atomic_set(&bl->wfh, 1);
197 if (atomic_read(&bl->wfh)) {
198 if (block) {
199 add_wait_queue(&bl->waiting, &entry);
200 for (;;) {
201 current->state = TASK_INTERRUPTIBLE;
202 if (!atomic_read(&bl->wfh)
203 && (buf = DRM(freelist_try)(bl))) break;
204 schedule();
205 if (signal_pending(current)) break;
206 }
207 current->state = TASK_RUNNING;
208 remove_wait_queue(&bl->waiting, &entry);
209 }
210 return buf;
211 }
212
213 return DRM(freelist_try)(bl);
214}
215
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h
deleted file mode 100644
index ddec67e4ed16..000000000000
--- a/drivers/char/drm/gamma_lock.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/* lock.c -- IOCTLs for locking -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code extracted from drm_lock.h:
34 */
35static int DRM(flush_queue)(drm_device_t *dev, int context)
36{
37 DECLARE_WAITQUEUE(entry, current);
38 int ret = 0;
39 drm_queue_t *q = dev->queuelist[context];
40
41 DRM_DEBUG("\n");
42
43 atomic_inc(&q->use_count);
44 if (atomic_read(&q->use_count) > 1) {
45 atomic_inc(&q->block_write);
46 add_wait_queue(&q->flush_queue, &entry);
47 atomic_inc(&q->block_count);
48 for (;;) {
49 current->state = TASK_INTERRUPTIBLE;
50 if (!DRM_BUFCOUNT(&q->waitlist)) break;
51 schedule();
52 if (signal_pending(current)) {
53 ret = -EINTR; /* Can't restart */
54 break;
55 }
56 }
57 atomic_dec(&q->block_count);
58 current->state = TASK_RUNNING;
59 remove_wait_queue(&q->flush_queue, &entry);
60 }
61 atomic_dec(&q->use_count);
62
63 /* NOTE: block_write is still incremented!
64 Use drm_flush_unlock_queue to decrement. */
65 return ret;
66}
67
68static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
69{
70 drm_queue_t *q = dev->queuelist[context];
71
72 DRM_DEBUG("\n");
73
74 atomic_inc(&q->use_count);
75 if (atomic_read(&q->use_count) > 1) {
76 if (atomic_read(&q->block_write)) {
77 atomic_dec(&q->block_write);
78 wake_up_interruptible(&q->write_queue);
79 }
80 }
81 atomic_dec(&q->use_count);
82 return 0;
83}
84
85int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
86 drm_lock_flags_t flags)
87{
88 int ret = 0;
89 int i;
90
91 DRM_DEBUG("\n");
92
93 if (flags & _DRM_LOCK_FLUSH) {
94 ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
95 if (!ret) ret = DRM(flush_queue)(dev, context);
96 }
97 if (flags & _DRM_LOCK_FLUSH_ALL) {
98 for (i = 0; !ret && i < dev->queue_count; i++) {
99 ret = DRM(flush_queue)(dev, i);
100 }
101 }
102 return ret;
103}
104
105int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
106{
107 int ret = 0;
108 int i;
109
110 DRM_DEBUG("\n");
111
112 if (flags & _DRM_LOCK_FLUSH) {
113 ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
114 if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
115 }
116 if (flags & _DRM_LOCK_FLUSH_ALL) {
117 for (i = 0; !ret && i < dev->queue_count; i++) {
118 ret = DRM(flush_unblock_queue)(dev, i);
119 }
120 }
121
122 return ret;
123}
124
125int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
126 unsigned long arg)
127{
128 drm_file_t *priv = filp->private_data;
129 drm_device_t *dev = priv->dev;
130 int ret = 0;
131 drm_lock_t lock;
132
133 DRM_DEBUG("\n");
134
135 if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
136 return -EFAULT;
137 ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
138 DRM(flush_unblock)(dev, lock.context, lock.flags);
139 return ret;
140}
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h
deleted file mode 100644
index abdd454aab9f..000000000000
--- a/drivers/char/drm/gamma_old_dma.h
+++ /dev/null
@@ -1,313 +0,0 @@
1/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code pulled from drm_dma.h:
34 */
35
36void DRM(clear_next_buffer)(drm_device_t *dev)
37{
38 drm_device_dma_t *dma = dev->dma;
39
40 dma->next_buffer = NULL;
41 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
42 wake_up_interruptible(&dma->next_queue->flush_queue);
43 }
44 dma->next_queue = NULL;
45}
46
47int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
48{
49 int i;
50 int candidate = -1;
51 int j = jiffies;
52
53 if (!dev) {
54 DRM_ERROR("No device\n");
55 return -1;
56 }
57 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
58 /* This only happens between the time the
59 interrupt is initialized and the time
60 the queues are initialized. */
61 return -1;
62 }
63
64 /* Doing "while locked" DMA? */
65 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
66 return DRM_KERNEL_CONTEXT;
67 }
68
69 /* If there are buffers on the last_context
70 queue, and we have not been executing
71 this context very long, continue to
72 execute this context. */
73 if (dev->last_switch <= j
74 && dev->last_switch + DRM_TIME_SLICE > j
75 && DRM_WAITCOUNT(dev, dev->last_context)) {
76 return dev->last_context;
77 }
78
79 /* Otherwise, find a candidate */
80 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
81 if (DRM_WAITCOUNT(dev, i)) {
82 candidate = dev->last_checked = i;
83 break;
84 }
85 }
86
87 if (candidate < 0) {
88 for (i = 0; i < dev->queue_count; i++) {
89 if (DRM_WAITCOUNT(dev, i)) {
90 candidate = dev->last_checked = i;
91 break;
92 }
93 }
94 }
95
96 if (wrapper
97 && candidate >= 0
98 && candidate != dev->last_context
99 && dev->last_switch <= j
100 && dev->last_switch + DRM_TIME_SLICE > j) {
101 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
102 del_timer(&dev->timer);
103 dev->timer.function = wrapper;
104 dev->timer.data = (unsigned long)dev;
105 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
106 add_timer(&dev->timer);
107 }
108 return -1;
109 }
110
111 return candidate;
112}
113
114
115int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
116{
117 drm_file_t *priv = filp->private_data;
118 drm_device_t *dev = priv->dev;
119 int i;
120 drm_queue_t *q;
121 drm_buf_t *buf;
122 int idx;
123 int while_locked = 0;
124 drm_device_dma_t *dma = dev->dma;
125 int *ind;
126 int err;
127 DECLARE_WAITQUEUE(entry, current);
128
129 DRM_DEBUG("%d\n", d->send_count);
130
131 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
132 int context = dev->lock.hw_lock->lock;
133
134 if (!_DRM_LOCK_IS_HELD(context)) {
135 DRM_ERROR("No lock held during \"while locked\""
136 " request\n");
137 return -EINVAL;
138 }
139 if (d->context != _DRM_LOCKING_CONTEXT(context)
140 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
141 DRM_ERROR("Lock held by %d while %d makes"
142 " \"while locked\" request\n",
143 _DRM_LOCKING_CONTEXT(context),
144 d->context);
145 return -EINVAL;
146 }
147 q = dev->queuelist[DRM_KERNEL_CONTEXT];
148 while_locked = 1;
149 } else {
150 q = dev->queuelist[d->context];
151 }
152
153
154 atomic_inc(&q->use_count);
155 if (atomic_read(&q->block_write)) {
156 add_wait_queue(&q->write_queue, &entry);
157 atomic_inc(&q->block_count);
158 for (;;) {
159 current->state = TASK_INTERRUPTIBLE;
160 if (!atomic_read(&q->block_write)) break;
161 schedule();
162 if (signal_pending(current)) {
163 atomic_dec(&q->use_count);
164 remove_wait_queue(&q->write_queue, &entry);
165 return -EINTR;
166 }
167 }
168 atomic_dec(&q->block_count);
169 current->state = TASK_RUNNING;
170 remove_wait_queue(&q->write_queue, &entry);
171 }
172
173 ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
174 if (!ind)
175 return -ENOMEM;
176
177 if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
178 err = -EFAULT;
179 goto out;
180 }
181
182 err = -EINVAL;
183 for (i = 0; i < d->send_count; i++) {
184 idx = ind[i];
185 if (idx < 0 || idx >= dma->buf_count) {
186 DRM_ERROR("Index %d (of %d max)\n",
187 ind[i], dma->buf_count - 1);
188 goto out;
189 }
190 buf = dma->buflist[ idx ];
191 if (buf->filp != filp) {
192 DRM_ERROR("Process %d using buffer not owned\n",
193 current->pid);
194 goto out;
195 }
196 if (buf->list != DRM_LIST_NONE) {
197 DRM_ERROR("Process %d using buffer %d on list %d\n",
198 current->pid, buf->idx, buf->list);
199 goto out;
200 }
201 buf->used = ind[i];
202 buf->while_locked = while_locked;
203 buf->context = d->context;
204 if (!buf->used) {
205 DRM_ERROR("Queueing 0 length buffer\n");
206 }
207 if (buf->pending) {
208 DRM_ERROR("Queueing pending buffer:"
209 " buffer %d, offset %d\n",
210 ind[i], i);
211 goto out;
212 }
213 if (buf->waiting) {
214 DRM_ERROR("Queueing waiting buffer:"
215 " buffer %d, offset %d\n",
216 ind[i], i);
217 goto out;
218 }
219 buf->waiting = 1;
220 if (atomic_read(&q->use_count) == 1
221 || atomic_read(&q->finalization)) {
222 DRM(free_buffer)(dev, buf);
223 } else {
224 DRM(waitlist_put)(&q->waitlist, buf);
225 atomic_inc(&q->total_queued);
226 }
227 }
228 atomic_dec(&q->use_count);
229
230 return 0;
231
232out:
233 DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
234 atomic_dec(&q->use_count);
235 return err;
236}
237
238static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
239 int order)
240{
241 drm_file_t *priv = filp->private_data;
242 drm_device_t *dev = priv->dev;
243 int i;
244 drm_buf_t *buf;
245 drm_device_dma_t *dma = dev->dma;
246
247 for (i = d->granted_count; i < d->request_count; i++) {
248 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
249 d->flags & _DRM_DMA_WAIT);
250 if (!buf) break;
251 if (buf->pending || buf->waiting) {
252 DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
253 buf->idx,
254 buf->filp,
255 buf->waiting,
256 buf->pending);
257 }
258 buf->filp = filp;
259 if (copy_to_user(&d->request_indices[i],
260 &buf->idx,
261 sizeof(buf->idx)))
262 return -EFAULT;
263
264 if (copy_to_user(&d->request_sizes[i],
265 &buf->total,
266 sizeof(buf->total)))
267 return -EFAULT;
268
269 ++d->granted_count;
270 }
271 return 0;
272}
273
274
275int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
276{
277 int order;
278 int retcode = 0;
279 int tmp_order;
280
281 order = DRM(order)(dma->request_size);
282
283 dma->granted_count = 0;
284 retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
285
286 if (dma->granted_count < dma->request_count
287 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
288 for (tmp_order = order - 1;
289 !retcode
290 && dma->granted_count < dma->request_count
291 && tmp_order >= DRM_MIN_ORDER;
292 --tmp_order) {
293
294 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
295 tmp_order);
296 }
297 }
298
299 if (dma->granted_count < dma->request_count
300 && (dma->flags & _DRM_DMA_LARGER_OK)) {
301 for (tmp_order = order + 1;
302 !retcode
303 && dma->granted_count < dma->request_count
304 && tmp_order <= DRM_MAX_ORDER;
305 ++tmp_order) {
306
307 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
308 tmp_order);
309 }
310 }
311 return 0;
312}
313
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 18e0b7622893..2f1659b96fd1 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,11 +45,6 @@
45#define I810_BUF_UNMAPPED 0 45#define I810_BUF_UNMAPPED 0
46#define I810_BUF_MAPPED 1 46#define I810_BUF_MAPPED 1
47 47
48#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
49#define down_write down
50#define up_write up
51#endif
52
53static drm_buf_t *i810_freelist_get(drm_device_t *dev) 48static drm_buf_t *i810_freelist_get(drm_device_t *dev)
54{ 49{
55 drm_device_dma_t *dma = dev->dma; 50 drm_device_dma_t *dma = dev->dma;
@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev,
351 DRM_ERROR("can not find mmio map!\n"); 346 DRM_ERROR("can not find mmio map!\n");
352 return -EINVAL; 347 return -EINVAL;
353 } 348 }
349 dev->agp_buffer_token = init->buffers_offset;
354 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 350 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
355 if (!dev->agp_buffer_map) { 351 if (!dev->agp_buffer_map) {
356 dev->dev_private = (void *)dev_priv; 352 dev->dev_private = (void *)dev_priv;
@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = {
1383}; 1379};
1384 1380
1385int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1381int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1382
1383/**
1384 * Determine if the device really is AGP or not.
1385 *
1386 * All Intel graphics chipsets are treated as AGP, even if they are really
1387 * PCI-e.
1388 *
1389 * \param dev The device to be tested.
1390 *
1391 * \returns
1392 * A value of 1 is always retured to indictate every i810 is AGP.
1393 */
1394int i810_driver_device_is_agp(drm_device_t * dev)
1395{
1396 return 1;
1397}
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index ff51b3259af9..00609329d578 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -84,6 +84,7 @@ static struct drm_driver driver = {
84 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 84 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
85 .pretakedown = i810_driver_pretakedown, 85 .pretakedown = i810_driver_pretakedown,
86 .prerelease = i810_driver_prerelease, 86 .prerelease = i810_driver_prerelease,
87 .device_is_agp = i810_driver_device_is_agp,
87 .release = i810_driver_release, 88 .release = i810_driver_release,
88 .dma_quiescent = i810_driver_dma_quiescent, 89 .dma_quiescent = i810_driver_dma_quiescent,
89 .reclaim_buffers = i810_reclaim_buffers, 90 .reclaim_buffers = i810_reclaim_buffers,
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 1b40538d1725..62ee4f58c59a 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev);
120extern void i810_driver_release(drm_device_t *dev, struct file *filp); 120extern void i810_driver_release(drm_device_t *dev, struct file *filp);
121extern void i810_driver_pretakedown(drm_device_t *dev); 121extern void i810_driver_pretakedown(drm_device_t *dev);
122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); 122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
123extern int i810_driver_device_is_agp(drm_device_t * dev);
123 124
124#define I810_BASE(reg) ((unsigned long) \ 125#define I810_BASE(reg) ((unsigned long) \
125 dev_priv->mmio_map->handle) 126 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc7733035864..6f89d5796ef3 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,11 +47,6 @@
47#define I830_BUF_UNMAPPED 0 47#define I830_BUF_UNMAPPED 0
48#define I830_BUF_MAPPED 1 48#define I830_BUF_MAPPED 1
49 49
50#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
51#define down_write down
52#define up_write up
53#endif
54
55static drm_buf_t *i830_freelist_get(drm_device_t *dev) 50static drm_buf_t *i830_freelist_get(drm_device_t *dev)
56{ 51{
57 drm_device_dma_t *dma = dev->dma; 52 drm_device_dma_t *dma = dev->dma;
@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev,
358 DRM_ERROR("can not find mmio map!\n"); 353 DRM_ERROR("can not find mmio map!\n");
359 return -EINVAL; 354 return -EINVAL;
360 } 355 }
356 dev->agp_buffer_token = init->buffers_offset;
361 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 357 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
362 if(!dev->agp_buffer_map) { 358 if(!dev->agp_buffer_map) {
363 dev->dev_private = (void *)dev_priv; 359 dev->dev_private = (void *)dev_priv;
@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = {
1586}; 1582};
1587 1583
1588int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1584int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1585
1586/**
1587 * Determine if the device really is AGP or not.
1588 *
1589 * All Intel graphics chipsets are treated as AGP, even if they are really
1590 * PCI-e.
1591 *
1592 * \param dev The device to be tested.
1593 *
1594 * \returns
1595 * A value of 1 is always retured to indictate every i8xx is AGP.
1596 */
1597int i830_driver_device_is_agp(drm_device_t * dev)
1598{
1599 return 1;
1600}
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index bc36be76b8b2..0da9cd19919e 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -88,6 +88,7 @@ static struct drm_driver driver = {
88 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 88 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
89 .pretakedown = i830_driver_pretakedown, 89 .pretakedown = i830_driver_pretakedown,
90 .prerelease = i830_driver_prerelease, 90 .prerelease = i830_driver_prerelease,
91 .device_is_agp = i830_driver_device_is_agp,
91 .release = i830_driver_release, 92 .release = i830_driver_release,
92 .dma_quiescent = i830_driver_dma_quiescent, 93 .dma_quiescent = i830_driver_dma_quiescent,
93 .reclaim_buffers = i830_reclaim_buffers, 94 .reclaim_buffers = i830_reclaim_buffers,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index df7746131dea..63f96a8b6a4a 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev);
137extern void i830_driver_release(drm_device_t *dev, struct file *filp); 137extern void i830_driver_release(drm_device_t *dev, struct file *filp);
138extern int i830_driver_dma_quiescent(drm_device_t *dev); 138extern int i830_driver_dma_quiescent(drm_device_t *dev);
139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); 139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
140extern int i830_driver_device_is_agp(drm_device_t * dev);
140 141
141#define I830_BASE(reg) ((unsigned long) \ 142#define I830_BASE(reg) ((unsigned long) \
142 dev_priv->mmio_map->handle) 143 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index acf9e52a9507..34f552f90c4a 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev)
95 drm_core_ioremapfree( &dev_priv->ring.map, dev); 95 drm_core_ioremapfree( &dev_priv->ring.map, dev);
96 } 96 }
97 97
98 if (dev_priv->hw_status_page) { 98 if (dev_priv->status_page_dmah) {
99 drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, 99 drm_pci_free(dev, dev_priv->status_page_dmah);
100 dev_priv->dma_status_page);
101 /* Need to rewrite hardware status page */ 100 /* Need to rewrite hardware status page */
102 I915_WRITE(0x02080, 0x1ffff000); 101 I915_WRITE(0x02080, 0x1ffff000);
103 } 102 }
@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev,
174 dev_priv->allow_batchbuffer = 1; 173 dev_priv->allow_batchbuffer = 1;
175 174
176 /* Program Hardware Status Page */ 175 /* Program Hardware Status Page */
177 dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 176 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
178 0xffffffff, 177 0xffffffff);
179 &dev_priv->dma_status_page);
180 178
181 if (!dev_priv->hw_status_page) { 179 if (!dev_priv->status_page_dmah) {
182 dev->dev_private = (void *)dev_priv; 180 dev->dev_private = (void *)dev_priv;
183 i915_dma_cleanup(dev); 181 i915_dma_cleanup(dev);
184 DRM_ERROR("Can not allocate hardware status page\n"); 182 DRM_ERROR("Can not allocate hardware status page\n");
185 return DRM_ERR(ENOMEM); 183 return DRM_ERR(ENOMEM);
186 } 184 }
185 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
186 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
187
187 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 188 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
188 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 189 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
189 190
@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = {
731}; 732};
732 733
733int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 734int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
735
736/**
737 * Determine if the device really is AGP or not.
738 *
739 * All Intel graphics chipsets are treated as AGP, even if they are really
740 * PCI-e.
741 *
742 * \param dev The device to be tested.
743 *
744 * \returns
745 * A value of 1 is always retured to indictate every i9x5 is AGP.
746 */
747int i915_driver_device_is_agp(drm_device_t * dev)
748{
749 return 1;
750}
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 1f59d3fc79bc..106b9ec02213 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -79,6 +79,7 @@ static struct drm_driver driver = {
79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
80 .pretakedown = i915_driver_pretakedown, 80 .pretakedown = i915_driver_pretakedown,
81 .prerelease = i915_driver_prerelease, 81 .prerelease = i915_driver_prerelease,
82 .device_is_agp = i915_driver_device_is_agp,
82 .irq_preinstall = i915_driver_irq_preinstall, 83 .irq_preinstall = i915_driver_irq_preinstall,
83 .irq_postinstall = i915_driver_irq_postinstall, 84 .irq_postinstall = i915_driver_irq_postinstall,
84 .irq_uninstall = i915_driver_irq_uninstall, 85 .irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 9c37d2367dd5..70ed4e68eac8 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -79,9 +79,10 @@ typedef struct drm_i915_private {
79 drm_i915_sarea_t *sarea_priv; 79 drm_i915_sarea_t *sarea_priv;
80 drm_i915_ring_buffer_t ring; 80 drm_i915_ring_buffer_t ring;
81 81
82 drm_dma_handle_t *status_page_dmah;
82 void *hw_status_page; 83 void *hw_status_page;
83 unsigned long counter;
84 dma_addr_t dma_status_page; 84 dma_addr_t dma_status_page;
85 unsigned long counter;
85 86
86 int back_offset; 87 int back_offset;
87 int front_offset; 88 int front_offset;
@@ -102,6 +103,7 @@ typedef struct drm_i915_private {
102extern void i915_kernel_lost_context(drm_device_t * dev); 103extern void i915_kernel_lost_context(drm_device_t * dev);
103extern void i915_driver_pretakedown(drm_device_t *dev); 104extern void i915_driver_pretakedown(drm_device_t *dev);
104extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); 105extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
106extern int i915_driver_device_is_agp(drm_device_t *dev);
105 107
106/* i915_irq.c */ 108/* i915_irq.c */
107extern int i915_irq_emit(DRM_IOCTL_ARGS); 109extern int i915_irq_emit(DRM_IOCTL_ARGS);
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 832eaf8a5068..fc7d4a594bca 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -23,18 +23,21 @@
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE. 25 * DEALINGS IN THE SOFTWARE.
26 * 26 */
27 * Authors: 27
28 * Rickard E. (Rik) Faith <faith@valinux.com> 28/**
29 * Jeff Hartmann <jhartmann@valinux.com> 29 * \file mga_dma.c
30 * Keith Whitwell <keith@tungstengraphics.com> 30 * DMA support for MGA G200 / G400.
31 * 31 *
32 * Rewritten by: 32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
33 * Gareth Hughes <gareth@valinux.com> 33 * \author Jeff Hartmann <jhartmann@valinux.com>
34 * \author Keith Whitwell <keith@tungstengraphics.com>
35 * \author Gareth Hughes <gareth@valinux.com>
34 */ 36 */
35 37
36#include "drmP.h" 38#include "drmP.h"
37#include "drm.h" 39#include "drm.h"
40#include "drm_sarea.h"
38#include "mga_drm.h" 41#include "mga_drm.h"
39#include "mga_drv.h" 42#include "mga_drv.h"
40 43
@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
148 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 151 DRM_DEBUG( " space = 0x%06x\n", primary->space );
149 152
150 mga_flush_write_combine(); 153 mga_flush_write_combine();
151 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 154 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
152 155
153 DRM_DEBUG( "done.\n" ); 156 DRM_DEBUG( "done.\n" );
154} 157}
@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
190 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 193 DRM_DEBUG( " space = 0x%06x\n", primary->space );
191 194
192 mga_flush_write_combine(); 195 mga_flush_write_combine();
193 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 196 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
194 197
195 set_bit( 0, &primary->wrapped ); 198 set_bit( 0, &primary->wrapped );
196 DRM_DEBUG( "done.\n" ); 199 DRM_DEBUG( "done.\n" );
@@ -396,23 +399,390 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
396 * DMA initialization, cleanup 399 * DMA initialization, cleanup
397 */ 400 */
398 401
402
403int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
404{
405 drm_mga_private_t * dev_priv;
406
407 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
408 if (!dev_priv)
409 return DRM_ERR(ENOMEM);
410
411 dev->dev_private = (void *)dev_priv;
412 memset(dev_priv, 0, sizeof(drm_mga_private_t));
413
414 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
415 dev_priv->chipset = flags;
416
417 return 0;
418}
419
420#if __OS_HAS_AGP
421/**
422 * Bootstrap the driver for AGP DMA.
423 *
424 * \todo
425 * Investigate whether there is any benifit to storing the WARP microcode in
426 * AGP memory. If not, the microcode may as well always be put in PCI
427 * memory.
428 *
429 * \todo
430 * This routine needs to set dma_bs->agp_mode to the mode actually configured
431 * in the hardware. Looking just at the Linux AGP driver code, I don't see
432 * an easy way to determine this.
433 *
434 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
435 */
436static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
437 drm_mga_dma_bootstrap_t * dma_bs)
438{
439 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
440 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
441 int err;
442 unsigned offset;
443 const unsigned secondary_size = dma_bs->secondary_bin_count
444 * dma_bs->secondary_bin_size;
445 const unsigned agp_size = (dma_bs->agp_size << 20);
446 drm_buf_desc_t req;
447 drm_agp_mode_t mode;
448 drm_agp_info_t info;
449
450
451 /* Acquire AGP. */
452 err = drm_agp_acquire(dev);
453 if (err) {
454 DRM_ERROR("Unable to acquire AGP\n");
455 return err;
456 }
457
458 err = drm_agp_info(dev, &info);
459 if (err) {
460 DRM_ERROR("Unable to get AGP info\n");
461 return err;
462 }
463
464 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
465 err = drm_agp_enable(dev, mode);
466 if (err) {
467 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
468 return err;
469 }
470
471
472 /* In addition to the usual AGP mode configuration, the G200 AGP cards
473 * need to have the AGP mode "manually" set.
474 */
475
476 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
477 if (mode.mode & 0x02) {
478 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
479 }
480 else {
481 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
482 }
483 }
484
485
486 /* Allocate and bind AGP memory. */
487 dev_priv->agp_pages = agp_size / PAGE_SIZE;
488 dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
489 if (dev_priv->agp_mem == NULL) {
490 dev_priv->agp_pages = 0;
491 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
492 dma_bs->agp_size);
493 return DRM_ERR(ENOMEM);
494 }
495
496 err = drm_bind_agp( dev_priv->agp_mem, 0 );
497 if (err) {
498 DRM_ERROR("Unable to bind AGP memory\n");
499 return err;
500 }
501
502 offset = 0;
503 err = drm_addmap( dev, offset, warp_size,
504 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
505 if (err) {
506 DRM_ERROR("Unable to map WARP microcode\n");
507 return err;
508 }
509
510 offset += warp_size;
511 err = drm_addmap( dev, offset, dma_bs->primary_size,
512 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
513 if (err) {
514 DRM_ERROR("Unable to map primary DMA region\n");
515 return err;
516 }
517
518 offset += dma_bs->primary_size;
519 err = drm_addmap( dev, offset, secondary_size,
520 _DRM_AGP, 0, & dev->agp_buffer_map );
521 if (err) {
522 DRM_ERROR("Unable to map secondary DMA region\n");
523 return err;
524 }
525
526 (void) memset( &req, 0, sizeof(req) );
527 req.count = dma_bs->secondary_bin_count;
528 req.size = dma_bs->secondary_bin_size;
529 req.flags = _DRM_AGP_BUFFER;
530 req.agp_start = offset;
531
532 err = drm_addbufs_agp( dev, & req );
533 if (err) {
534 DRM_ERROR("Unable to add secondary DMA buffers\n");
535 return err;
536 }
537
538 offset += secondary_size;
539 err = drm_addmap( dev, offset, agp_size - offset,
540 _DRM_AGP, 0, & dev_priv->agp_textures );
541 if (err) {
542 DRM_ERROR("Unable to map AGP texture region\n");
543 return err;
544 }
545
546 drm_core_ioremap(dev_priv->warp, dev);
547 drm_core_ioremap(dev_priv->primary, dev);
548 drm_core_ioremap(dev->agp_buffer_map, dev);
549
550 if (!dev_priv->warp->handle ||
551 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
552 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
553 dev_priv->warp->handle, dev_priv->primary->handle,
554 dev->agp_buffer_map->handle);
555 return DRM_ERR(ENOMEM);
556 }
557
558 dev_priv->dma_access = MGA_PAGPXFER;
559 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
560
561 DRM_INFO("Initialized card for AGP DMA.\n");
562 return 0;
563}
564#else
565static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
566 drm_mga_dma_bootstrap_t * dma_bs)
567{
568 return -EINVAL;
569}
570#endif
571
572/**
573 * Bootstrap the driver for PCI DMA.
574 *
575 * \todo
576 * The algorithm for decreasing the size of the primary DMA buffer could be
577 * better. The size should be rounded up to the nearest page size, then
578 * decrease the request size by a single page each pass through the loop.
579 *
580 * \todo
581 * Determine whether the maximum address passed to drm_pci_alloc is correct.
582 * The same goes for drm_addbufs_pci.
583 *
584 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
585 */
586static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
587 drm_mga_dma_bootstrap_t * dma_bs)
588{
589 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
590 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
591 unsigned int primary_size;
592 unsigned int bin_count;
593 int err;
594 drm_buf_desc_t req;
595
596
597 if (dev->dma == NULL) {
598 DRM_ERROR("dev->dma is NULL\n");
599 return DRM_ERR(EFAULT);
600 }
601
602 /* The proper alignment is 0x100 for this mapping */
603 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
604 _DRM_READ_ONLY, &dev_priv->warp);
605 if (err != 0) {
606 DRM_ERROR("Unable to create mapping for WARP microcode\n");
607 return err;
608 }
609
610 /* Other than the bottom two bits being used to encode other
611 * information, there don't appear to be any restrictions on the
612 * alignment of the primary or secondary DMA buffers.
613 */
614
615 for ( primary_size = dma_bs->primary_size
616 ; primary_size != 0
617 ; primary_size >>= 1 ) {
618 /* The proper alignment for this mapping is 0x04 */
619 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
620 _DRM_READ_ONLY, &dev_priv->primary);
621 if (!err)
622 break;
623 }
624
625 if (err != 0) {
626 DRM_ERROR("Unable to allocate primary DMA region\n");
627 return DRM_ERR(ENOMEM);
628 }
629
630 if (dev_priv->primary->size != dma_bs->primary_size) {
631 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
632 dma_bs->primary_size,
633 (unsigned) dev_priv->primary->size);
634 dma_bs->primary_size = dev_priv->primary->size;
635 }
636
637 for ( bin_count = dma_bs->secondary_bin_count
638 ; bin_count > 0
639 ; bin_count-- ) {
640 (void) memset( &req, 0, sizeof(req) );
641 req.count = bin_count;
642 req.size = dma_bs->secondary_bin_size;
643
644 err = drm_addbufs_pci( dev, & req );
645 if (!err) {
646 break;
647 }
648 }
649
650 if (bin_count == 0) {
651 DRM_ERROR("Unable to add secondary DMA buffers\n");
652 return err;
653 }
654
655 if (bin_count != dma_bs->secondary_bin_count) {
656 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
657 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
658
659 dma_bs->secondary_bin_count = bin_count;
660 }
661
662 dev_priv->dma_access = 0;
663 dev_priv->wagp_enable = 0;
664
665 dma_bs->agp_mode = 0;
666
667 DRM_INFO("Initialized card for PCI DMA.\n");
668 return 0;
669}
670
671
672static int mga_do_dma_bootstrap(drm_device_t * dev,
673 drm_mga_dma_bootstrap_t * dma_bs)
674{
675 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
676 int err;
677 drm_mga_private_t * const dev_priv =
678 (drm_mga_private_t *) dev->dev_private;
679
680
681 dev_priv->used_new_dma_init = 1;
682
683 /* The first steps are the same for both PCI and AGP based DMA. Map
684 * the cards MMIO registers and map a status page.
685 */
686 err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
687 _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
688 if (err) {
689 DRM_ERROR("Unable to map MMIO region\n");
690 return err;
691 }
692
693
694 err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
695 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
696 & dev_priv->status );
697 if (err) {
698 DRM_ERROR("Unable to map status region\n");
699 return err;
700 }
701
702
703 /* The DMA initialization procedure is slightly different for PCI and
704 * AGP cards. AGP cards just allocate a large block of AGP memory and
705 * carve off portions of it for internal uses. The remaining memory
706 * is returned to user-mode to be used for AGP textures.
707 */
708 if (is_agp) {
709 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
710 }
711
712 /* If we attempted to initialize the card for AGP DMA but failed,
713 * clean-up any mess that may have been created.
714 */
715
716 if (err) {
717 mga_do_cleanup_dma(dev);
718 }
719
720
721 /* Not only do we want to try and initialized PCI cards for PCI DMA,
722 * but we also try to initialized AGP cards that could not be
723 * initialized for AGP DMA. This covers the case where we have an AGP
724 * card in a system with an unsupported AGP chipset. In that case the
725 * card will be detected as AGP, but we won't be able to allocate any
726 * AGP memory, etc.
727 */
728
729 if (!is_agp || err) {
730 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
731 }
732
733
734 return err;
735}
736
737int mga_dma_bootstrap(DRM_IOCTL_ARGS)
738{
739 DRM_DEVICE;
740 drm_mga_dma_bootstrap_t bootstrap;
741 int err;
742
743
744 DRM_COPY_FROM_USER_IOCTL(bootstrap,
745 (drm_mga_dma_bootstrap_t __user *) data,
746 sizeof(bootstrap));
747
748 err = mga_do_dma_bootstrap(dev, & bootstrap);
749 if (! err) {
750 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
751 const drm_mga_private_t * const dev_priv =
752 (drm_mga_private_t *) dev->dev_private;
753
754 if (dev_priv->agp_textures != NULL) {
755 bootstrap.texture_handle = dev_priv->agp_textures->offset;
756 bootstrap.texture_size = dev_priv->agp_textures->size;
757 }
758 else {
759 bootstrap.texture_handle = 0;
760 bootstrap.texture_size = 0;
761 }
762
763 bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
764 if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
765 sizeof(bootstrap))) {
766 err = DRM_ERR(EFAULT);
767 }
768 }
769 else {
770 mga_do_cleanup_dma(dev);
771 }
772
773 return err;
774}
775
399static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) 776static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
400{ 777{
401 drm_mga_private_t *dev_priv; 778 drm_mga_private_t *dev_priv;
402 int ret; 779 int ret;
403 DRM_DEBUG( "\n" ); 780 DRM_DEBUG( "\n" );
404 781
405 dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
406 if ( !dev_priv )
407 return DRM_ERR(ENOMEM);
408
409 memset( dev_priv, 0, sizeof(drm_mga_private_t) );
410 782
411 dev_priv->chipset = init->chipset; 783 dev_priv = dev->dev_private;
412 784
413 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 785 if (init->sgram) {
414
415 if ( init->sgram ) {
416 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; 786 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
417 } else { 787 } else {
418 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; 788 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
@@ -436,88 +806,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
436 806
437 DRM_GETSAREA(); 807 DRM_GETSAREA();
438 808
439 if(!dev_priv->sarea) { 809 if (!dev_priv->sarea) {
440 DRM_ERROR( "failed to find sarea!\n" ); 810 DRM_ERROR("failed to find sarea!\n");
441 /* Assign dev_private so we can do cleanup. */
442 dev->dev_private = (void *)dev_priv;
443 mga_do_cleanup_dma( dev );
444 return DRM_ERR(EINVAL); 811 return DRM_ERR(EINVAL);
445 } 812 }
446 813
447 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 814 if (! dev_priv->used_new_dma_init) {
448 if(!dev_priv->mmio) { 815 dev_priv->status = drm_core_findmap(dev, init->status_offset);
449 DRM_ERROR( "failed to find mmio region!\n" ); 816 if (!dev_priv->status) {
450 /* Assign dev_private so we can do cleanup. */ 817 DRM_ERROR("failed to find status page!\n");
451 dev->dev_private = (void *)dev_priv; 818 return DRM_ERR(EINVAL);
452 mga_do_cleanup_dma( dev ); 819 }
453 return DRM_ERR(EINVAL); 820 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
454 } 821 if (!dev_priv->mmio) {
455 dev_priv->status = drm_core_findmap(dev, init->status_offset); 822 DRM_ERROR("failed to find mmio region!\n");
456 if(!dev_priv->status) { 823 return DRM_ERR(EINVAL);
457 DRM_ERROR( "failed to find status page!\n" ); 824 }
458 /* Assign dev_private so we can do cleanup. */ 825 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
459 dev->dev_private = (void *)dev_priv; 826 if (!dev_priv->warp) {
460 mga_do_cleanup_dma( dev ); 827 DRM_ERROR("failed to find warp microcode region!\n");
461 return DRM_ERR(EINVAL); 828 return DRM_ERR(EINVAL);
462 } 829 }
463 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 830 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
464 if(!dev_priv->warp) { 831 if (!dev_priv->primary) {
465 DRM_ERROR( "failed to find warp microcode region!\n" ); 832 DRM_ERROR("failed to find primary dma region!\n");
466 /* Assign dev_private so we can do cleanup. */ 833 return DRM_ERR(EINVAL);
467 dev->dev_private = (void *)dev_priv; 834 }
468 mga_do_cleanup_dma( dev ); 835 dev->agp_buffer_token = init->buffers_offset;
469 return DRM_ERR(EINVAL); 836 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
470 } 837 if (!dev->agp_buffer_map) {
471 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 838 DRM_ERROR("failed to find dma buffer region!\n");
472 if(!dev_priv->primary) { 839 return DRM_ERR(EINVAL);
473 DRM_ERROR( "failed to find primary dma region!\n" ); 840 }
474 /* Assign dev_private so we can do cleanup. */ 841
475 dev->dev_private = (void *)dev_priv; 842 drm_core_ioremap(dev_priv->warp, dev);
476 mga_do_cleanup_dma( dev ); 843 drm_core_ioremap(dev_priv->primary, dev);
477 return DRM_ERR(EINVAL); 844 drm_core_ioremap(dev->agp_buffer_map, dev);
478 }
479 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
480 if(!dev->agp_buffer_map) {
481 DRM_ERROR( "failed to find dma buffer region!\n" );
482 /* Assign dev_private so we can do cleanup. */
483 dev->dev_private = (void *)dev_priv;
484 mga_do_cleanup_dma( dev );
485 return DRM_ERR(EINVAL);
486 } 845 }
487 846
488 dev_priv->sarea_priv = 847 dev_priv->sarea_priv =
489 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + 848 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
490 init->sarea_priv_offset); 849 init->sarea_priv_offset);
491 850
492 drm_core_ioremap( dev_priv->warp, dev ); 851 if (!dev_priv->warp->handle ||
493 drm_core_ioremap( dev_priv->primary, dev ); 852 !dev_priv->primary->handle ||
494 drm_core_ioremap( dev->agp_buffer_map, dev ); 853 ((dev_priv->dma_access != 0) &&
495 854 ((dev->agp_buffer_map == NULL) ||
496 if(!dev_priv->warp->handle || 855 (dev->agp_buffer_map->handle == NULL)))) {
497 !dev_priv->primary->handle || 856 DRM_ERROR("failed to ioremap agp regions!\n");
498 !dev->agp_buffer_map->handle ) {
499 DRM_ERROR( "failed to ioremap agp regions!\n" );
500 /* Assign dev_private so we can do cleanup. */
501 dev->dev_private = (void *)dev_priv;
502 mga_do_cleanup_dma( dev );
503 return DRM_ERR(ENOMEM); 857 return DRM_ERR(ENOMEM);
504 } 858 }
505 859
506 ret = mga_warp_install_microcode( dev_priv ); 860 ret = mga_warp_install_microcode(dev_priv);
507 if ( ret < 0 ) { 861 if (ret < 0) {
508 DRM_ERROR( "failed to install WARP ucode!\n" ); 862 DRM_ERROR("failed to install WARP ucode!\n");
509 /* Assign dev_private so we can do cleanup. */
510 dev->dev_private = (void *)dev_priv;
511 mga_do_cleanup_dma( dev );
512 return ret; 863 return ret;
513 } 864 }
514 865
515 ret = mga_warp_init( dev_priv ); 866 ret = mga_warp_init(dev_priv);
516 if ( ret < 0 ) { 867 if (ret < 0) {
517 DRM_ERROR( "failed to init WARP engine!\n" ); 868 DRM_ERROR("failed to init WARP engine!\n");
518 /* Assign dev_private so we can do cleanup. */
519 dev->dev_private = (void *)dev_priv;
520 mga_do_cleanup_dma( dev );
521 return ret; 869 return ret;
522 } 870 }
523 871
@@ -557,22 +905,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
557 dev_priv->sarea_priv->last_frame.head = 0; 905 dev_priv->sarea_priv->last_frame.head = 0;
558 dev_priv->sarea_priv->last_frame.wrap = 0; 906 dev_priv->sarea_priv->last_frame.wrap = 0;
559 907
560 if ( mga_freelist_init( dev, dev_priv ) < 0 ) { 908 if (mga_freelist_init(dev, dev_priv) < 0) {
561 DRM_ERROR( "could not initialize freelist\n" ); 909 DRM_ERROR("could not initialize freelist\n");
562 /* Assign dev_private so we can do cleanup. */
563 dev->dev_private = (void *)dev_priv;
564 mga_do_cleanup_dma( dev );
565 return DRM_ERR(ENOMEM); 910 return DRM_ERR(ENOMEM);
566 } 911 }
567 912
568 /* Make dev_private visable to others. */
569 dev->dev_private = (void *)dev_priv;
570 return 0; 913 return 0;
571} 914}
572 915
573static int mga_do_cleanup_dma( drm_device_t *dev ) 916static int mga_do_cleanup_dma( drm_device_t *dev )
574{ 917{
575 DRM_DEBUG( "\n" ); 918 int err = 0;
919 DRM_DEBUG("\n");
576 920
577 /* Make sure interrupts are disabled here because the uninstall ioctl 921 /* Make sure interrupts are disabled here because the uninstall ioctl
578 * may not have been called from userspace and after dev_private 922 * may not have been called from userspace and after dev_private
@@ -583,37 +927,73 @@ static int mga_do_cleanup_dma( drm_device_t *dev )
583 if ( dev->dev_private ) { 927 if ( dev->dev_private ) {
584 drm_mga_private_t *dev_priv = dev->dev_private; 928 drm_mga_private_t *dev_priv = dev->dev_private;
585 929
586 if ( dev_priv->warp != NULL ) 930 if ((dev_priv->warp != NULL)
587 drm_core_ioremapfree( dev_priv->warp, dev ); 931 && (dev_priv->mmio->type != _DRM_CONSISTENT))
588 if ( dev_priv->primary != NULL ) 932 drm_core_ioremapfree(dev_priv->warp, dev);
589 drm_core_ioremapfree( dev_priv->primary, dev ); 933
590 if ( dev->agp_buffer_map != NULL ) 934 if ((dev_priv->primary != NULL)
591 drm_core_ioremapfree( dev->agp_buffer_map, dev ); 935 && (dev_priv->primary->type != _DRM_CONSISTENT))
936 drm_core_ioremapfree(dev_priv->primary, dev);
937
938 if (dev->agp_buffer_map != NULL)
939 drm_core_ioremapfree(dev->agp_buffer_map, dev);
592 940
593 if ( dev_priv->head != NULL ) { 941 if (dev_priv->used_new_dma_init) {
594 mga_freelist_cleanup( dev ); 942#if __OS_HAS_AGP
943 if (dev_priv->agp_mem != NULL) {
944 dev_priv->agp_textures = NULL;
945 drm_unbind_agp(dev_priv->agp_mem);
946
947 drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
948 dev_priv->agp_pages = 0;
949 dev_priv->agp_mem = NULL;
950 }
951
952 if ((dev->agp != NULL) && dev->agp->acquired) {
953 err = drm_agp_release(dev);
954 }
955#endif
956 dev_priv->used_new_dma_init = 0;
595 } 957 }
596 958
597 drm_free( dev->dev_private, sizeof(drm_mga_private_t), 959 dev_priv->warp = NULL;
598 DRM_MEM_DRIVER ); 960 dev_priv->primary = NULL;
599 dev->dev_private = NULL; 961 dev_priv->mmio = NULL;
962 dev_priv->status = NULL;
963 dev_priv->sarea = NULL;
964 dev_priv->sarea_priv = NULL;
965 dev->agp_buffer_map = NULL;
966
967 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
968 dev_priv->warp_pipe = 0;
969 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
970
971 if (dev_priv->head != NULL) {
972 mga_freelist_cleanup(dev);
973 }
600 } 974 }
601 975
602 return 0; 976 return err;
603} 977}
604 978
605int mga_dma_init( DRM_IOCTL_ARGS ) 979int mga_dma_init( DRM_IOCTL_ARGS )
606{ 980{
607 DRM_DEVICE; 981 DRM_DEVICE;
608 drm_mga_init_t init; 982 drm_mga_init_t init;
983 int err;
609 984
610 LOCK_TEST_WITH_RETURN( dev, filp ); 985 LOCK_TEST_WITH_RETURN( dev, filp );
611 986
612 DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); 987 DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
988 sizeof(init));
613 989
614 switch ( init.func ) { 990 switch ( init.func ) {
615 case MGA_INIT_DMA: 991 case MGA_INIT_DMA:
616 return mga_do_init_dma( dev, &init ); 992 err = mga_do_init_dma(dev, &init);
993 if (err) {
994 (void) mga_do_cleanup_dma(dev);
995 }
996 return err;
617 case MGA_CLEANUP_DMA: 997 case MGA_CLEANUP_DMA:
618 return mga_do_cleanup_dma( dev ); 998 return mga_do_cleanup_dma( dev );
619 } 999 }
@@ -742,7 +1122,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
742 return ret; 1122 return ret;
743} 1123}
744 1124
745void mga_driver_pretakedown(drm_device_t *dev) 1125/**
1126 * Called just before the module is unloaded.
1127 */
1128int mga_driver_postcleanup(drm_device_t * dev)
1129{
1130 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1131 dev->dev_private = NULL;
1132
1133 return 0;
1134}
1135
1136/**
1137 * Called when the last opener of the device is closed.
1138 */
1139void mga_driver_pretakedown(drm_device_t * dev)
746{ 1140{
747 mga_do_cleanup_dma( dev ); 1141 mga_do_cleanup_dma( dev );
748} 1142}
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 521d4451d012..d20aab3bd57b 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -73,7 +73,8 @@
73 73
74#define MGA_CARD_TYPE_G200 1 74#define MGA_CARD_TYPE_G200 1
75#define MGA_CARD_TYPE_G400 2 75#define MGA_CARD_TYPE_G400 2
76 76#define MGA_CARD_TYPE_G450 3 /* not currently used */
77#define MGA_CARD_TYPE_G550 4
77 78
78#define MGA_FRONT 0x1 79#define MGA_FRONT 0x1
79#define MGA_BACK 0x2 80#define MGA_BACK 0x2
@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea {
225} drm_mga_sarea_t; 226} drm_mga_sarea_t;
226 227
227 228
228/* WARNING: If you change any of these defines, make sure to change the
229 * defines in the Xserver file (xf86drmMga.h)
230 */
231
232/* MGA specific ioctls 229/* MGA specific ioctls
233 * The device specific ioctl range is 0x40 to 0x79. 230 * The device specific ioctl range is 0x40 to 0x79.
234 */ 231 */
@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea {
243#define DRM_MGA_BLIT 0x08 240#define DRM_MGA_BLIT 0x08
244#define DRM_MGA_GETPARAM 0x09 241#define DRM_MGA_GETPARAM 0x09
245 242
243/* 3.2:
244 * ioctls for operating on fences.
245 */
246#define DRM_MGA_SET_FENCE 0x0a
247#define DRM_MGA_WAIT_FENCE 0x0b
248#define DRM_MGA_DMA_BOOTSTRAP 0x0c
249
250
246#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) 251#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
247#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) 252#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
248#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) 253#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea {
253#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) 258#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
254#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) 259#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
255#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) 260#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
261#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
262#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
263#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
256 264
257typedef struct _drm_mga_warp_index { 265typedef struct _drm_mga_warp_index {
258 int installed; 266 int installed;
@@ -291,12 +299,72 @@ typedef struct drm_mga_init {
291 unsigned long buffers_offset; 299 unsigned long buffers_offset;
292} drm_mga_init_t; 300} drm_mga_init_t;
293 301
294typedef struct drm_mga_fullscreen { 302typedef struct drm_mga_dma_bootstrap {
295 enum { 303 /**
296 MGA_INIT_FULLSCREEN = 0x01, 304 * \name AGP texture region
297 MGA_CLEANUP_FULLSCREEN = 0x02 305 *
298 } func; 306 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
299} drm_mga_fullscreen_t; 307 * be filled in with the actual AGP texture settings.
308 *
309 * \warning
310 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
311 * is zero, it means that PCI memory (most likely through the use of
312 * an IOMMU) is being used for "AGP" textures.
313 */
314 /*@{*/
315 unsigned long texture_handle; /**< Handle used to map AGP textures. */
316 uint32_t texture_size; /**< Size of the AGP texture region. */
317 /*@}*/
318
319
320 /**
321 * Requested size of the primary DMA region.
322 *
323 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
324 * filled in with the actual AGP mode. If AGP was not available
325 */
326 uint32_t primary_size;
327
328
329 /**
330 * Requested number of secondary DMA buffers.
331 *
332 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
333 * filled in with the actual number of secondary DMA buffers
334 * allocated. Particularly when PCI DMA is used, this may be
335 * (subtantially) less than the number requested.
336 */
337 uint32_t secondary_bin_count;
338
339
340 /**
341 * Requested size of each secondary DMA buffer.
342 *
343 * While the kernel \b is free to reduce
344 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
345 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
346 */
347 uint32_t secondary_bin_size;
348
349
350 /**
351 * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
352 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
353 * zero, it means that PCI DMA should be used, even if AGP is
354 * possible.
355 *
356 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
357 * filled in with the actual AGP mode. If AGP was not available
358 * (i.e., PCI DMA was used), this value will be zero.
359 */
360 uint32_t agp_mode;
361
362
363 /**
364 * Desired AGP GART size, measured in megabytes.
365 */
366 uint8_t agp_size;
367} drm_mga_dma_bootstrap_t;
300 368
301typedef struct drm_mga_clear { 369typedef struct drm_mga_clear {
302 unsigned int flags; 370 unsigned int flags;
@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit {
341 */ 409 */
342#define MGA_PARAM_IRQ_NR 1 410#define MGA_PARAM_IRQ_NR 1
343 411
412/* 3.2: Query the actual card type. The DDX only distinguishes between
413 * G200 chips and non-G200 chips, which it calls G400. It turns out that
414 * there are some very sublte differences between the G4x0 chips and the G550
415 * chips. Using this parameter query, a client-side driver can detect the
416 * difference between a G4x0 and a G550.
417 */
418#define MGA_PARAM_CARD_TYPE 2
419
344typedef struct drm_mga_getparam { 420typedef struct drm_mga_getparam {
345 int param; 421 int param;
346 void __user *value; 422 void __user *value;
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 844cca9cb29d..daabbba3b297 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,8 +38,15 @@
38 38
39#include "drm_pciids.h" 39#include "drm_pciids.h"
40 40
41static int mga_driver_device_is_agp(drm_device_t * dev);
41static int postinit( struct drm_device *dev, unsigned long flags ) 42static int postinit( struct drm_device *dev, unsigned long flags )
42{ 43{
44 drm_mga_private_t * const dev_priv =
45 (drm_mga_private_t *) dev->dev_private;
46
47 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
48 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
49
43 dev->counters += 3; 50 dev->counters += 3;
44 dev->types[6] = _DRM_STAT_IRQ; 51 dev->types[6] = _DRM_STAT_IRQ;
45 dev->types[7] = _DRM_STAT_PRIMARY; 52 dev->types[7] = _DRM_STAT_PRIMARY;
@@ -79,8 +86,11 @@ extern int mga_max_ioctl;
79 86
80static struct drm_driver driver = { 87static struct drm_driver driver = {
81 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 88 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
89 .preinit = mga_driver_preinit,
90 .postcleanup = mga_driver_postcleanup,
82 .pretakedown = mga_driver_pretakedown, 91 .pretakedown = mga_driver_pretakedown,
83 .dma_quiescent = mga_driver_dma_quiescent, 92 .dma_quiescent = mga_driver_dma_quiescent,
93 .device_is_agp = mga_driver_device_is_agp,
84 .vblank_wait = mga_driver_vblank_wait, 94 .vblank_wait = mga_driver_vblank_wait,
85 .irq_preinstall = mga_driver_irq_preinstall, 95 .irq_preinstall = mga_driver_irq_preinstall,
86 .irq_postinstall = mga_driver_irq_postinstall, 96 .irq_postinstall = mga_driver_irq_postinstall,
@@ -128,3 +138,38 @@ module_exit(mga_exit);
128MODULE_AUTHOR( DRIVER_AUTHOR ); 138MODULE_AUTHOR( DRIVER_AUTHOR );
129MODULE_DESCRIPTION( DRIVER_DESC ); 139MODULE_DESCRIPTION( DRIVER_DESC );
130MODULE_LICENSE("GPL and additional rights"); 140MODULE_LICENSE("GPL and additional rights");
141
142/**
143 * Determine if the device really is AGP or not.
144 *
145 * In addition to the usual tests performed by \c drm_device_is_agp, this
146 * function detects PCI G450 cards that appear to the system exactly like
147 * AGP G450 cards.
148 *
149 * \param dev The device to be tested.
150 *
151 * \returns
152 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
153 */
154int mga_driver_device_is_agp(drm_device_t * dev)
155{
156 const struct pci_dev * const pdev = dev->pdev;
157
158
159 /* There are PCI versions of the G450. These cards have the
160 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
161 * bridge chip. We detect these cards, which are not currently
162 * supported by this driver, by looking at the device ID of the
163 * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
164 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
165 * device.
166 */
167
168 if ( (pdev->device == 0x0525)
169 && (pdev->bus->self->vendor == 0x3388)
170 && (pdev->bus->self->device == 0x0021) ) {
171 return 0;
172 }
173
174 return 2;
175}
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 9412e2816eb7..b22fdbd4f830 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,10 +38,10 @@
38 38
39#define DRIVER_NAME "mga" 39#define DRIVER_NAME "mga"
40#define DRIVER_DESC "Matrox G200/G400" 40#define DRIVER_DESC "Matrox G200/G400"
41#define DRIVER_DATE "20021029" 41#define DRIVER_DATE "20050607"
42 42
43#define DRIVER_MAJOR 3 43#define DRIVER_MAJOR 3
44#define DRIVER_MINOR 1 44#define DRIVER_MINOR 2
45#define DRIVER_PATCHLEVEL 0 45#define DRIVER_PATCHLEVEL 0
46 46
47typedef struct drm_mga_primary_buffer { 47typedef struct drm_mga_primary_buffer {
@@ -87,9 +87,43 @@ typedef struct drm_mga_private {
87 int chipset; 87 int chipset;
88 int usec_timeout; 88 int usec_timeout;
89 89
90 /**
91 * If set, the new DMA initialization sequence was used. This is
92 * primarilly used to select how the driver should uninitialized its
93 * internal DMA structures.
94 */
95 int used_new_dma_init;
96
97 /**
98 * If AGP memory is used for DMA buffers, this will be the value
99 * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
100 */
101 u32 dma_access;
102
103 /**
104 * If AGP memory is used for DMA buffers, this will be the value
105 * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
106 * transfer).
107 */
108 u32 wagp_enable;
109
110 /**
111 * \name MMIO region parameters.
112 *
113 * \sa drm_mga_private_t::mmio
114 */
115 /*@{*/
116 u32 mmio_base; /**< Bus address of base of MMIO. */
117 u32 mmio_size; /**< Size of the MMIO region. */
118 /*@}*/
119
90 u32 clear_cmd; 120 u32 clear_cmd;
91 u32 maccess; 121 u32 maccess;
92 122
123 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired;
125 u32 next_fence_to_post;
126
93 unsigned int fb_cpp; 127 unsigned int fb_cpp;
94 unsigned int front_offset; 128 unsigned int front_offset;
95 unsigned int front_pitch; 129 unsigned int front_pitch;
@@ -108,35 +142,43 @@ typedef struct drm_mga_private {
108 drm_local_map_t *status; 142 drm_local_map_t *status;
109 drm_local_map_t *warp; 143 drm_local_map_t *warp;
110 drm_local_map_t *primary; 144 drm_local_map_t *primary;
111 drm_local_map_t *buffers;
112 drm_local_map_t *agp_textures; 145 drm_local_map_t *agp_textures;
146
147 DRM_AGP_MEM *agp_mem;
148 unsigned int agp_pages;
113} drm_mga_private_t; 149} drm_mga_private_t;
114 150
115 /* mga_dma.c */ 151 /* mga_dma.c */
116extern int mga_dma_init( DRM_IOCTL_ARGS ); 152extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
117extern int mga_dma_flush( DRM_IOCTL_ARGS ); 153extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
118extern int mga_dma_reset( DRM_IOCTL_ARGS ); 154extern int mga_dma_init(DRM_IOCTL_ARGS);
119extern int mga_dma_buffers( DRM_IOCTL_ARGS ); 155extern int mga_dma_flush(DRM_IOCTL_ARGS);
120extern void mga_driver_pretakedown(drm_device_t *dev); 156extern int mga_dma_reset(DRM_IOCTL_ARGS);
121extern int mga_driver_dma_quiescent(drm_device_t *dev); 157extern int mga_dma_buffers(DRM_IOCTL_ARGS);
122 158extern int mga_driver_postcleanup(drm_device_t * dev);
123extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); 159extern void mga_driver_pretakedown(drm_device_t * dev);
124 160extern int mga_driver_dma_quiescent(drm_device_t * dev);
125extern void mga_do_dma_flush( drm_mga_private_t *dev_priv ); 161
126extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); 162extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
127extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ); 163
164extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
165extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
166extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
128 167
129extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); 168extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
130 169
131 /* mga_warp.c */ 170 /* mga_warp.c */
132extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); 171extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
133extern int mga_warp_init( drm_mga_private_t *dev_priv ); 172extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
134 173extern int mga_warp_init(drm_mga_private_t * dev_priv);
135extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); 174
136extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); 175 /* mga_irq.c */
137extern void mga_driver_irq_preinstall( drm_device_t *dev ); 176extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
138extern void mga_driver_irq_postinstall( drm_device_t *dev ); 177extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
139extern void mga_driver_irq_uninstall( drm_device_t *dev ); 178extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
179extern void mga_driver_irq_preinstall(drm_device_t * dev);
180extern void mga_driver_irq_postinstall(drm_device_t * dev);
181extern void mga_driver_irq_uninstall(drm_device_t * dev);
140extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 182extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
141 unsigned long arg); 183 unsigned long arg);
142 184
@@ -527,6 +569,12 @@ do { \
527 */ 569 */
528#define MGA_EXEC 0x0100 570#define MGA_EXEC 0x0100
529 571
572/* AGP PLL encoding (for G200 only).
573 */
574#define MGA_AGP_PLL 0x1e4c
575# define MGA_AGP2XPLL_DISABLE (0 << 0)
576# define MGA_AGP2XPLL_ENABLE (1 << 0)
577
530/* Warp registers 578/* Warp registers
531 */ 579 */
532#define MGA_WR0 0x2d00 580#define MGA_WR0 0x2d00
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c
index bc745cfa2095..77d738e75a4d 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/char/drm/mga_ioc32.c
@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); 129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130} 130}
131 131
132typedef struct drm_mga_drm_bootstrap32 {
133 u32 texture_handle;
134 u32 texture_size;
135 u32 primary_size;
136 u32 secondary_bin_count;
137 u32 secondary_bin_size;
138 u32 agp_mode;
139 u8 agp_size;
140} drm_mga_dma_bootstrap32_t;
141
142static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
143 unsigned long arg)
144{
145 drm_mga_dma_bootstrap32_t dma_bootstrap32;
146 drm_mga_dma_bootstrap_t __user *dma_bootstrap;
147 int err;
148
149 if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
150 sizeof(dma_bootstrap32)))
151 return -EFAULT;
152
153 dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
154 if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
155 || __put_user(dma_bootstrap32.texture_handle,
156 &dma_bootstrap->texture_handle)
157 || __put_user(dma_bootstrap32.texture_size,
158 &dma_bootstrap->texture_size)
159 || __put_user(dma_bootstrap32.primary_size,
160 &dma_bootstrap->primary_size)
161 || __put_user(dma_bootstrap32.secondary_bin_count,
162 &dma_bootstrap->secondary_bin_count)
163 || __put_user(dma_bootstrap32.secondary_bin_size,
164 &dma_bootstrap->secondary_bin_size)
165 || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT;
168
169 err = drm_ioctl(file->f_dentry->d_inode, file,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap);
172 if (err)
173 return err;
174
175 if (__get_user(dma_bootstrap32.texture_handle,
176 &dma_bootstrap->texture_handle)
177 || __get_user(dma_bootstrap32.texture_size,
178 &dma_bootstrap->texture_size)
179 || __get_user(dma_bootstrap32.primary_size,
180 &dma_bootstrap->primary_size)
181 || __get_user(dma_bootstrap32.secondary_bin_count,
182 &dma_bootstrap->secondary_bin_count)
183 || __get_user(dma_bootstrap32.secondary_bin_size,
184 &dma_bootstrap->secondary_bin_size)
185 || __get_user(dma_bootstrap32.agp_mode,
186 &dma_bootstrap->agp_mode)
187 || __get_user(dma_bootstrap32.agp_size,
188 &dma_bootstrap->agp_size))
189 return -EFAULT;
190
191 if (copy_to_user((void __user *)arg, &dma_bootstrap32,
192 sizeof(dma_bootstrap32)))
193 return -EFAULT;
194
195 return 0;
196}
197
132drm_ioctl_compat_t *mga_compat_ioctls[] = { 198drm_ioctl_compat_t *mga_compat_ioctls[] = {
133 [DRM_MGA_INIT] = compat_mga_init, 199 [DRM_MGA_INIT] = compat_mga_init,
134 [DRM_MGA_GETPARAM] = compat_mga_getparam, 200 [DRM_MGA_GETPARAM] = compat_mga_getparam,
201 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
135}; 202};
136 203
137/** 204/**
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index bc0b6b5d43a6..52eaa4e788f9 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
41 drm_mga_private_t *dev_priv = 41 drm_mga_private_t *dev_priv =
42 (drm_mga_private_t *)dev->dev_private; 42 (drm_mga_private_t *)dev->dev_private;
43 int status; 43 int status;
44 int handled = 0;
45
46 status = MGA_READ(MGA_STATUS);
44 47
45 status = MGA_READ( MGA_STATUS );
46
47 /* VBLANK interrupt */ 48 /* VBLANK interrupt */
48 if ( status & MGA_VLINEPEN ) { 49 if ( status & MGA_VLINEPEN ) {
49 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); 50 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
50 atomic_inc(&dev->vbl_received); 51 atomic_inc(&dev->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 52 DRM_WAKEUP(&dev->vbl_queue);
52 drm_vbl_send_signals( dev ); 53 drm_vbl_send_signals(dev);
54 handled = 1;
55 }
56
57 /* SOFTRAP interrupt */
58 if (status & MGA_SOFTRAPEN) {
59 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
60 const u32 prim_end = MGA_READ(MGA_PRIMEND);
61
62
63 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
64
65 /* In addition to clearing the interrupt-pending bit, we
66 * have to write to MGA_PRIMEND to re-start the DMA operation.
67 */
68 if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
69 MGA_WRITE(MGA_PRIMEND, prim_end);
70 }
71
72 atomic_inc(&dev_priv->last_fence_retired);
73 DRM_WAKEUP(&dev_priv->fence_queue);
74 handled = 1;
75 }
76
77 if ( handled ) {
53 return IRQ_HANDLED; 78 return IRQ_HANDLED;
54 } 79 }
55 return IRQ_NONE; 80 return IRQ_NONE;
@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
73 return ret; 98 return ret;
74} 99}
75 100
76void mga_driver_irq_preinstall( drm_device_t *dev ) { 101int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
77 drm_mga_private_t *dev_priv = 102{
78 (drm_mga_private_t *)dev->dev_private; 103 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
104 unsigned int cur_fence;
105 int ret = 0;
106
107 /* Assume that the user has missed the current sequence number
108 * by about a day rather than she wants to wait for years
109 * using fences.
110 */
111 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
112 (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
113 - *sequence) <= (1 << 23)));
114
115 *sequence = cur_fence;
116
117 return ret;
118}
119
120void mga_driver_irq_preinstall(drm_device_t * dev)
121{
122 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
79 123
80 /* Disable *all* interrupts */ 124 /* Disable *all* interrupts */
81 MGA_WRITE( MGA_IEN, 0 ); 125 MGA_WRITE( MGA_IEN, 0 );
@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
83 MGA_WRITE( MGA_ICLEAR, ~0 ); 127 MGA_WRITE( MGA_ICLEAR, ~0 );
84} 128}
85 129
86void mga_driver_irq_postinstall( drm_device_t *dev ) { 130void mga_driver_irq_postinstall(drm_device_t * dev)
87 drm_mga_private_t *dev_priv = 131{
88 (drm_mga_private_t *)dev->dev_private; 132 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
133
134 DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
89 135
90 /* Turn on VBL interrupt */ 136 /* Turn on vertical blank interrupt and soft trap interrupt. */
91 MGA_WRITE( MGA_IEN, MGA_VLINEIEN ); 137 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
92} 138}
93 139
94void mga_driver_irq_uninstall( drm_device_t *dev ) { 140void mga_driver_irq_uninstall( drm_device_t *dev ) {
@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) {
98 return; 144 return;
99 145
100 /* Disable *all* interrupts */ 146 /* Disable *all* interrupts */
101 MGA_WRITE( MGA_IEN, 0 ); 147 MGA_WRITE(MGA_IEN, 0);
148
149 dev->irq_enabled = 0;
102} 150}
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 3c7a8f5ba501..05bbb4719376 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
53 53
54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit). 54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
55 */ 55 */
56 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 56 if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
57 DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, 57 DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
58 MGA_LEN + MGA_EXEC, 0x80000000, 58 MGA_LEN + MGA_EXEC, 0x80000000,
59 MGA_DWGCTL, ctx->dwgctl, 59 MGA_DWGCTL, ctx->dwgctl,
60 MGA_LEN + MGA_EXEC, 0x80000000 ); 60 MGA_LEN + MGA_EXEC, 0x80000000);
61 } 61 }
62 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 62 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
63 MGA_CXBNDRY, (box->x2 << 16) | box->x1, 63 MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
64 MGA_YTOP, box->y1 * pitch, 64 MGA_YTOP, box->y1 * pitch,
65 MGA_YBOT, box->y2 * pitch ); 65 MGA_YBOT, (box->y2 - 1) * pitch);
66 66
67 ADVANCE_DMA(); 67 ADVANCE_DMA();
68} 68}
@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
260 260
261 /* Padding required to to hardware bug. 261 /* Padding required to to hardware bug.
262 */ 262 */
263 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 263 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
264 MGA_DMAPAD, 0xffffffff, 264 MGA_DMAPAD, 0xffffffff,
265 MGA_DMAPAD, 0xffffffff, 265 MGA_DMAPAD, 0xffffffff,
266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | 266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
267 MGA_WMODE_START | 267 MGA_WMODE_START | dev_priv->wagp_enable));
268 MGA_WAGP_ENABLE) );
269 268
270 ADVANCE_DMA(); 269 ADVANCE_DMA();
271} 270}
@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
342 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ 341 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
343 342
344 /* Padding required to to hardware bug */ 343 /* Padding required to to hardware bug */
345 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 344 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
346 MGA_DMAPAD, 0xffffffff, 345 MGA_DMAPAD, 0xffffffff,
347 MGA_DMAPAD, 0xffffffff, 346 MGA_DMAPAD, 0xffffffff,
348 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | 347 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
349 MGA_WMODE_START | 348 MGA_WMODE_START | dev_priv->wagp_enable));
350 MGA_WAGP_ENABLE) );
351 349
352 ADVANCE_DMA(); 350 ADVANCE_DMA();
353} 351}
@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv )
459 if ( dirty & MGA_UPLOAD_TEX0 ) 457 if ( dirty & MGA_UPLOAD_TEX0 )
460 ret |= mga_verify_tex( dev_priv, 0 ); 458 ret |= mga_verify_tex( dev_priv, 0 );
461 459
462 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 460 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
463 if ( dirty & MGA_UPLOAD_TEX1 ) 461 if (dirty & MGA_UPLOAD_TEX1)
464 ret |= mga_verify_tex( dev_priv, 1 ); 462 ret |= mga_verify_tex(dev_priv, 1);
465 463
466 if ( dirty & MGA_UPLOAD_PIPE ) 464 if ( dirty & MGA_UPLOAD_PIPE )
467 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); 465 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
686 684
687 BEGIN_DMA( 1 ); 685 BEGIN_DMA( 1 );
688 686
689 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 687 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
690 MGA_DMAPAD, 0x00000000, 688 MGA_DMAPAD, 0x00000000,
691 MGA_SECADDRESS, (address | 689 MGA_SECADDRESS, (address |
692 MGA_DMA_VERTEX), 690 MGA_DMA_VERTEX),
693 MGA_SECEND, ((address + length) | 691 MGA_SECEND, ((address + length) |
694 MGA_PAGPXFER) ); 692 dev_priv->dma_access));
695 693
696 ADVANCE_DMA(); 694 ADVANCE_DMA();
697 } while ( ++i < sarea_priv->nbox ); 695 } while ( ++i < sarea_priv->nbox );
@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
733 731
734 BEGIN_DMA( 1 ); 732 BEGIN_DMA( 1 );
735 733
736 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 734 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
737 MGA_DMAPAD, 0x00000000, 735 MGA_DMAPAD, 0x00000000,
738 MGA_SETUPADDRESS, address + start, 736 MGA_SETUPADDRESS, address + start,
739 MGA_SETUPEND, ((address + end) | 737 MGA_SETUPEND, ((address + end) |
740 MGA_PAGPXFER) ); 738 dev_priv->dma_access));
741 739
742 ADVANCE_DMA(); 740 ADVANCE_DMA();
743 } while ( ++i < sarea_priv->nbox ); 741 } while ( ++i < sarea_priv->nbox );
@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
764 drm_mga_private_t *dev_priv = dev->dev_private; 762 drm_mga_private_t *dev_priv = dev->dev_private;
765 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 763 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
766 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; 764 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
767 u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; 765 u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
768 u32 y2; 766 u32 y2;
769 DMA_LOCALS; 767 DMA_LOCALS;
770 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); 768 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1095 case MGA_PARAM_IRQ_NR: 1093 case MGA_PARAM_IRQ_NR:
1096 value = dev->irq; 1094 value = dev->irq;
1097 break; 1095 break;
1096 case MGA_PARAM_CARD_TYPE:
1097 value = dev_priv->chipset;
1098 break;
1098 default: 1099 default:
1099 return DRM_ERR(EINVAL); 1100 return DRM_ERR(EINVAL);
1100 } 1101 }
@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1107 return 0; 1108 return 0;
1108} 1109}
1109 1110
1111static int mga_set_fence(DRM_IOCTL_ARGS)
1112{
1113 DRM_DEVICE;
1114 drm_mga_private_t *dev_priv = dev->dev_private;
1115 u32 temp;
1116 DMA_LOCALS;
1117
1118 if (!dev_priv) {
1119 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1120 return DRM_ERR(EINVAL);
1121 }
1122
1123 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1124
1125 /* I would normal do this assignment in the declaration of temp,
1126 * but dev_priv may be NULL.
1127 */
1128
1129 temp = dev_priv->next_fence_to_post;
1130 dev_priv->next_fence_to_post++;
1131
1132 BEGIN_DMA(1);
1133 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
1134 MGA_DMAPAD, 0x00000000,
1135 MGA_DMAPAD, 0x00000000,
1136 MGA_SOFTRAP, 0x00000000);
1137 ADVANCE_DMA();
1138
1139 if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
1140 DRM_ERROR("copy_to_user\n");
1141 return DRM_ERR(EFAULT);
1142 }
1143
1144 return 0;
1145}
1146
1147static int mga_wait_fence(DRM_IOCTL_ARGS)
1148{
1149 DRM_DEVICE;
1150 drm_mga_private_t *dev_priv = dev->dev_private;
1151 u32 fence;
1152
1153 if (!dev_priv) {
1154 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1155 return DRM_ERR(EINVAL);
1156 }
1157
1158 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
1159
1160 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1161
1162 mga_driver_fence_wait(dev, & fence);
1163
1164 if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
1165 DRM_ERROR("copy_to_user\n");
1166 return DRM_ERR(EFAULT);
1167 }
1168
1169 return 0;
1170}
1171
1110drm_ioctl_desc_t mga_ioctls[] = { 1172drm_ioctl_desc_t mga_ioctls[] = {
1111 [DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, 1173 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
1112 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, 1174 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
1113 [DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, 1175 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
1114 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, 1176 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
1115 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, 1177 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
1116 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, 1178 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
1117 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, 1179 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
1118 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, 1180 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
1119 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, 1181 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
1120 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, 1182 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
1183 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
1184 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
1185 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
1186
1121}; 1187};
1122 1188
1123int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1189int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index 0a3a0cc700dc..55ccc8a0ac29 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -48,65 +48,52 @@ do { \
48 vcbase += WARP_UCODE_SIZE( which ); \ 48 vcbase += WARP_UCODE_SIZE( which ); \
49} while (0) 49} while (0)
50 50
51 51static const unsigned int mga_warp_g400_microcode_size =
52static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) 52 (WARP_UCODE_SIZE(warp_g400_tgz) +
53{ 53 WARP_UCODE_SIZE(warp_g400_tgza) +
54 unsigned int size; 54 WARP_UCODE_SIZE(warp_g400_tgzaf) +
55 55 WARP_UCODE_SIZE(warp_g400_tgzf) +
56 size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + 56 WARP_UCODE_SIZE(warp_g400_tgzs) +
57 WARP_UCODE_SIZE( warp_g400_tgza ) + 57 WARP_UCODE_SIZE(warp_g400_tgzsa) +
58 WARP_UCODE_SIZE( warp_g400_tgzaf ) + 58 WARP_UCODE_SIZE(warp_g400_tgzsaf) +
59 WARP_UCODE_SIZE( warp_g400_tgzf ) + 59 WARP_UCODE_SIZE(warp_g400_tgzsf) +
60 WARP_UCODE_SIZE( warp_g400_tgzs ) + 60 WARP_UCODE_SIZE(warp_g400_t2gz) +
61 WARP_UCODE_SIZE( warp_g400_tgzsa ) + 61 WARP_UCODE_SIZE(warp_g400_t2gza) +
62 WARP_UCODE_SIZE( warp_g400_tgzsaf ) + 62 WARP_UCODE_SIZE(warp_g400_t2gzaf) +
63 WARP_UCODE_SIZE( warp_g400_tgzsf ) + 63 WARP_UCODE_SIZE(warp_g400_t2gzf) +
64 WARP_UCODE_SIZE( warp_g400_t2gz ) + 64 WARP_UCODE_SIZE(warp_g400_t2gzs) +
65 WARP_UCODE_SIZE( warp_g400_t2gza ) + 65 WARP_UCODE_SIZE(warp_g400_t2gzsa) +
66 WARP_UCODE_SIZE( warp_g400_t2gzaf ) + 66 WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
67 WARP_UCODE_SIZE( warp_g400_t2gzf ) + 67 WARP_UCODE_SIZE(warp_g400_t2gzsf));
68 WARP_UCODE_SIZE( warp_g400_t2gzs ) + 68
69 WARP_UCODE_SIZE( warp_g400_t2gzsa ) + 69static const unsigned int mga_warp_g200_microcode_size =
70 WARP_UCODE_SIZE( warp_g400_t2gzsaf ) + 70 (WARP_UCODE_SIZE(warp_g200_tgz) +
71 WARP_UCODE_SIZE( warp_g400_t2gzsf ) ); 71 WARP_UCODE_SIZE(warp_g200_tgza) +
72 72 WARP_UCODE_SIZE(warp_g200_tgzaf) +
73 size = PAGE_ALIGN( size ); 73 WARP_UCODE_SIZE(warp_g200_tgzf) +
74 74 WARP_UCODE_SIZE(warp_g200_tgzs) +
75 DRM_DEBUG( "G400 ucode size = %d bytes\n", size ); 75 WARP_UCODE_SIZE(warp_g200_tgzsa) +
76 return size; 76 WARP_UCODE_SIZE(warp_g200_tgzsaf) +
77} 77 WARP_UCODE_SIZE(warp_g200_tgzsf));
78 78
79static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv ) 79
80unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
80{ 81{
81 unsigned int size; 82 switch (dev_priv->chipset) {
82 83 case MGA_CARD_TYPE_G400:
83 size = ( WARP_UCODE_SIZE( warp_g200_tgz ) + 84 case MGA_CARD_TYPE_G550:
84 WARP_UCODE_SIZE( warp_g200_tgza ) + 85 return PAGE_ALIGN(mga_warp_g400_microcode_size);
85 WARP_UCODE_SIZE( warp_g200_tgzaf ) + 86 case MGA_CARD_TYPE_G200:
86 WARP_UCODE_SIZE( warp_g200_tgzf ) + 87 return PAGE_ALIGN(mga_warp_g200_microcode_size);
87 WARP_UCODE_SIZE( warp_g200_tgzs ) + 88 default:
88 WARP_UCODE_SIZE( warp_g200_tgzsa ) + 89 return 0;
89 WARP_UCODE_SIZE( warp_g200_tgzsaf ) + 90 }
90 WARP_UCODE_SIZE( warp_g200_tgzsf ) );
91
92 size = PAGE_ALIGN( size );
93
94 DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
95 return size;
96} 91}
97 92
98static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) 93static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
99{ 94{
100 unsigned char *vcbase = dev_priv->warp->handle; 95 unsigned char *vcbase = dev_priv->warp->handle;
101 unsigned long pcbase = dev_priv->warp->offset; 96 unsigned long pcbase = dev_priv->warp->offset;
102 unsigned int size;
103
104 size = mga_warp_g400_microcode_size( dev_priv );
105 if ( size > dev_priv->warp->size ) {
106 DRM_ERROR( "microcode too large! (%u > %lu)\n",
107 size, dev_priv->warp->size );
108 return DRM_ERR(ENOMEM);
109 }
110 97
111 memset( dev_priv->warp_pipe_phys, 0, 98 memset( dev_priv->warp_pipe_phys, 0,
112 sizeof(dev_priv->warp_pipe_phys) ); 99 sizeof(dev_priv->warp_pipe_phys) );
@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
136{ 123{
137 unsigned char *vcbase = dev_priv->warp->handle; 124 unsigned char *vcbase = dev_priv->warp->handle;
138 unsigned long pcbase = dev_priv->warp->offset; 125 unsigned long pcbase = dev_priv->warp->offset;
139 unsigned int size;
140
141 size = mga_warp_g200_microcode_size( dev_priv );
142 if ( size > dev_priv->warp->size ) {
143 DRM_ERROR( "microcode too large! (%u > %lu)\n",
144 size, dev_priv->warp->size );
145 return DRM_ERR(ENOMEM);
146 }
147 126
148 memset( dev_priv->warp_pipe_phys, 0, 127 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
149 sizeof(dev_priv->warp_pipe_phys) );
150 128
151 WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); 129 WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
152 WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); 130 WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
153 WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); 131 WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
154 WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); 132 WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
155 WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); 133 WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
156 WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); 134 WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
157 WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); 135 WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
158 WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); 136 WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
159 137
160 return 0; 138 return 0;
161} 139}
162 140
163int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) 141int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
164{ 142{
165 switch ( dev_priv->chipset ) { 143 const unsigned int size = mga_warp_microcode_size(dev_priv);
144
145 DRM_DEBUG("MGA ucode size = %d bytes\n", size);
146 if (size > dev_priv->warp->size) {
147 DRM_ERROR("microcode too large! (%u > %lu)\n",
148 size, dev_priv->warp->size);
149 return DRM_ERR(ENOMEM);
150 }
151
152 switch (dev_priv->chipset) {
166 case MGA_CARD_TYPE_G400: 153 case MGA_CARD_TYPE_G400:
167 return mga_warp_install_g400_microcode( dev_priv ); 154 case MGA_CARD_TYPE_G550:
155 return mga_warp_install_g400_microcode(dev_priv);
168 case MGA_CARD_TYPE_G200: 156 case MGA_CARD_TYPE_G200:
169 return mga_warp_install_g200_microcode( dev_priv ); 157 return mga_warp_install_g200_microcode( dev_priv );
170 default: 158 default:
@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
182 */ 170 */
183 switch ( dev_priv->chipset ) { 171 switch ( dev_priv->chipset ) {
184 case MGA_CARD_TYPE_G400: 172 case MGA_CARD_TYPE_G400:
185 MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); 173 case MGA_CARD_TYPE_G550:
186 MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); 174 MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
187 MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); 175 MGA_WRITE(MGA_WGETMSB, 0x00000E00);
188 MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); 176 MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
177 MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
189 break; 178 break;
190 case MGA_CARD_TYPE_G200: 179 case MGA_CARD_TYPE_G200:
191 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); 180 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 08ed8d01d9d9..895152206b31 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
326 ring_start = dev_priv->cce_ring->offset - dev->agp->base; 326 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
327 else 327 else
328#endif 328#endif
329 ring_start = dev_priv->cce_ring->offset - dev->sg->handle; 329 ring_start = dev_priv->cce_ring->offset -
330 (unsigned long)dev->sg->virtual;
330 331
331 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); 332 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
332 333
@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
487 r128_do_cleanup_cce( dev ); 488 r128_do_cleanup_cce( dev );
488 return DRM_ERR(EINVAL); 489 return DRM_ERR(EINVAL);
489 } 490 }
491 dev->agp_buffer_token = init->buffers_offset;
490 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 492 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
491 if(!dev->agp_buffer_map) { 493 if(!dev->agp_buffer_map) {
492 DRM_ERROR("could not find dma buffer region!\n"); 494 DRM_ERROR("could not find dma buffer region!\n");
@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
537 dev_priv->cce_buffers_offset = dev->agp->base; 539 dev_priv->cce_buffers_offset = dev->agp->base;
538 else 540 else
539#endif 541#endif
540 dev_priv->cce_buffers_offset = dev->sg->handle; 542 dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
541 543
542 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; 544 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle 545 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 0cba17d1e0ff..b616cd3ed2cd 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea {
215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) 215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) 216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) 217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
218#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) 218#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) 219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
220 220
221typedef struct drm_r128_init { 221typedef struct drm_r128_init {
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
new file mode 100644
index 000000000000..623f1f460cb5
--- /dev/null
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -0,0 +1,801 @@
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
40
41#define R300_SIMULTANEOUS_CLIPRECTS 4
42
43/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
44 */
45static const int r300_cliprect_cntl[4] = {
46 0xAAAA,
47 0xEEEE,
48 0xFEFE,
49 0xFFFE
50};
51
52
53/**
54 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55 * buffer, starting with index n.
56 */
57static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
58 drm_radeon_cmd_buffer_t* cmdbuf,
59 int n)
60{
61 drm_clip_rect_t box;
62 int nr;
63 int i;
64 RING_LOCALS;
65
66 nr = cmdbuf->nbox - n;
67 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
68 nr = R300_SIMULTANEOUS_CLIPRECTS;
69
70 DRM_DEBUG("%i cliprects\n", nr);
71
72 if (nr) {
73 BEGIN_RING(6 + nr*2);
74 OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
75
76 for(i = 0; i < nr; ++i) {
77 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
78 DRM_ERROR("copy cliprect faulted\n");
79 return DRM_ERR(EFAULT);
80 }
81
82 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
83 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
84 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
85 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
86
87 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
88 (box.y1 << R300_CLIPRECT_Y_SHIFT));
89 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
90 (box.y2 << R300_CLIPRECT_Y_SHIFT));
91 }
92
93 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
94
95 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
96 * client might be able to trample over memory.
97 * The impact should be very limited, but I'd rather be safe than
98 * sorry.
99 */
100 OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
101 OUT_RING( 0 );
102 OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
103 ADVANCE_RING();
104 } else {
105 /* Why we allow zero cliprect rendering:
106 * There are some commands in a command buffer that must be submitted
107 * even when there are no cliprects, e.g. DMA buffer discard
108 * or state setting (though state setting could be avoided by
109 * simulating a loss of context).
110 *
111 * Now since the cmdbuf interface is so chaotic right now (and is
112 * bound to remain that way for a bit until things settle down),
113 * it is basically impossible to filter out the commands that are
114 * necessary and those that aren't.
115 *
116 * So I choose the safe way and don't do any filtering at all;
117 * instead, I simply set up the engine so that all rendering
118 * can't produce any fragments.
119 */
120 BEGIN_RING(2);
121 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
122 ADVANCE_RING();
123 }
124
125 return 0;
126}
127
128u8 r300_reg_flags[0x10000>>2];
129
130
131void r300_init_reg_flags(void)
132{
133 int i;
134 memset(r300_reg_flags, 0, 0x10000>>2);
135 #define ADD_RANGE_MARK(reg, count,mark) \
136 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
137 r300_reg_flags[i]|=(mark);
138
139 #define MARK_SAFE 1
140 #define MARK_CHECK_OFFSET 2
141
142 #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
143
144 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
145 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
146 ADD_RANGE(0x2080, 1);
147 ADD_RANGE(R300_SE_VTE_CNTL, 2);
148 ADD_RANGE(0x2134, 2);
149 ADD_RANGE(0x2140, 1);
150 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
151 ADD_RANGE(0x21DC, 1);
152 ADD_RANGE(0x221C, 1);
153 ADD_RANGE(0x2220, 4);
154 ADD_RANGE(0x2288, 1);
155 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
156 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
157 ADD_RANGE(R300_GB_ENABLE, 1);
158 ADD_RANGE(R300_GB_MSPOS0, 5);
159 ADD_RANGE(R300_TX_ENABLE, 1);
160 ADD_RANGE(0x4200, 4);
161 ADD_RANGE(0x4214, 1);
162 ADD_RANGE(R300_RE_POINTSIZE, 1);
163 ADD_RANGE(0x4230, 3);
164 ADD_RANGE(R300_RE_LINE_CNT, 1);
165 ADD_RANGE(0x4238, 1);
166 ADD_RANGE(0x4260, 3);
167 ADD_RANGE(0x4274, 4);
168 ADD_RANGE(0x4288, 5);
169 ADD_RANGE(0x42A0, 1);
170 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
171 ADD_RANGE(0x42B4, 1);
172 ADD_RANGE(R300_RE_CULL_CNTL, 1);
173 ADD_RANGE(0x42C0, 2);
174 ADD_RANGE(R300_RS_CNTL_0, 2);
175 ADD_RANGE(R300_RS_INTERP_0, 8);
176 ADD_RANGE(R300_RS_ROUTE_0, 8);
177 ADD_RANGE(0x43A4, 2);
178 ADD_RANGE(0x43E8, 1);
179 ADD_RANGE(R300_PFS_CNTL_0, 3);
180 ADD_RANGE(R300_PFS_NODE_0, 4);
181 ADD_RANGE(R300_PFS_TEXI_0, 64);
182 ADD_RANGE(0x46A4, 5);
183 ADD_RANGE(R300_PFS_INSTR0_0, 64);
184 ADD_RANGE(R300_PFS_INSTR1_0, 64);
185 ADD_RANGE(R300_PFS_INSTR2_0, 64);
186 ADD_RANGE(R300_PFS_INSTR3_0, 64);
187 ADD_RANGE(0x4BC0, 1);
188 ADD_RANGE(0x4BC8, 3);
189 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
190 ADD_RANGE(0x4BD8, 1);
191 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
192 ADD_RANGE(0x4E00, 1);
193 ADD_RANGE(R300_RB3D_CBLEND, 2);
194 ADD_RANGE(R300_RB3D_COLORMASK, 1);
195 ADD_RANGE(0x4E10, 3);
196 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
197 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
198 ADD_RANGE(0x4E50, 9);
199 ADD_RANGE(0x4E88, 1);
200 ADD_RANGE(0x4EA0, 2);
201 ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
202 ADD_RANGE(0x4F10, 4);
203 ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
204 ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
205 ADD_RANGE(0x4F28, 1);
206 ADD_RANGE(0x4F30, 2);
207 ADD_RANGE(0x4F44, 1);
208 ADD_RANGE(0x4F54, 1);
209
210 ADD_RANGE(R300_TX_FILTER_0, 16);
211 ADD_RANGE(R300_TX_UNK1_0, 16);
212 ADD_RANGE(R300_TX_SIZE_0, 16);
213 ADD_RANGE(R300_TX_FORMAT_0, 16);
214 /* Texture offset is dangerous and needs more checking */
215 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
216 ADD_RANGE(R300_TX_UNK4_0, 16);
217 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
218
219 /* Sporadic registers used as primitives are emitted */
220 ADD_RANGE(0x4f18, 1);
221 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
222 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
223 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
224
225}
226
227static __inline__ int r300_check_range(unsigned reg, int count)
228{
229 int i;
230 if(reg & ~0xffff)return -1;
231 for(i=(reg>>2);i<(reg>>2)+count;i++)
232 if(r300_reg_flags[i]!=MARK_SAFE)return 1;
233 return 0;
234}
235
236 /* we expect offsets passed to the framebuffer to be either within video memory or
237 within AGP space */
238static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
239{
240 /* we realy want to check against end of video aperture
241 but this value is not being kept.
242 This code is correct for now (does the same thing as the
243 code that sets MC_FB_LOCATION) in radeon_cp.c */
244 if((offset>=dev_priv->fb_location) &&
245 (offset<dev_priv->gart_vm_start))return 0;
246 if((offset>=dev_priv->gart_vm_start) &&
247 (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
248 return 1;
249}
250
251static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
252 drm_radeon_cmd_buffer_t* cmdbuf,
253 drm_r300_cmd_header_t header)
254{
255 int reg;
256 int sz;
257 int i;
258 int values[64];
259 RING_LOCALS;
260
261 sz = header.packet0.count;
262 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
263
264 if((sz>64)||(sz<0)){
265 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
266 return DRM_ERR(EINVAL);
267 }
268 for(i=0;i<sz;i++){
269 values[i]=((int __user*)cmdbuf->buf)[i];
270 switch(r300_reg_flags[(reg>>2)+i]){
271 case MARK_SAFE:
272 break;
273 case MARK_CHECK_OFFSET:
274 if(r300_check_offset(dev_priv, (u32)values[i])){
275 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
276 return DRM_ERR(EINVAL);
277 }
278 break;
279 default:
280 DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
281 return DRM_ERR(EINVAL);
282 }
283 }
284
285 BEGIN_RING(1+sz);
286 OUT_RING( CP_PACKET0( reg, sz-1 ) );
287 OUT_RING_TABLE( values, sz );
288 ADVANCE_RING();
289
290 cmdbuf->buf += sz*4;
291 cmdbuf->bufsz -= sz*4;
292
293 return 0;
294}
295
296/**
297 * Emits a packet0 setting arbitrary registers.
298 * Called by r300_do_cp_cmdbuf.
299 *
300 * Note that checks are performed on contents and addresses of the registers
301 */
302static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
303 drm_radeon_cmd_buffer_t* cmdbuf,
304 drm_r300_cmd_header_t header)
305{
306 int reg;
307 int sz;
308 RING_LOCALS;
309
310 sz = header.packet0.count;
311 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
312
313 if (!sz)
314 return 0;
315
316 if (sz*4 > cmdbuf->bufsz)
317 return DRM_ERR(EINVAL);
318
319 if (reg+sz*4 >= 0x10000){
320 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
321 return DRM_ERR(EINVAL);
322 }
323
324 if(r300_check_range(reg, sz)){
325 /* go and check everything */
326 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
327 }
328 /* the rest of the data is safe to emit, whatever the values the user passed */
329
330 BEGIN_RING(1+sz);
331 OUT_RING( CP_PACKET0( reg, sz-1 ) );
332 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
333 ADVANCE_RING();
334
335 cmdbuf->buf += sz*4;
336 cmdbuf->bufsz -= sz*4;
337
338 return 0;
339}
340
341
342/**
343 * Uploads user-supplied vertex program instructions or parameters onto
344 * the graphics card.
345 * Called by r300_do_cp_cmdbuf.
346 */
347static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
348 drm_radeon_cmd_buffer_t* cmdbuf,
349 drm_r300_cmd_header_t header)
350{
351 int sz;
352 int addr;
353 RING_LOCALS;
354
355 sz = header.vpu.count;
356 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
357
358 if (!sz)
359 return 0;
360 if (sz*16 > cmdbuf->bufsz)
361 return DRM_ERR(EINVAL);
362
363 BEGIN_RING(5+sz*4);
364 /* Wait for VAP to come to senses.. */
365 /* there is no need to emit it multiple times, (only once before VAP is programmed,
366 but this optimization is for later */
367 OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
368 OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
369 OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
370 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
371
372 ADVANCE_RING();
373
374 cmdbuf->buf += sz*16;
375 cmdbuf->bufsz -= sz*16;
376
377 return 0;
378}
379
380
381/**
382 * Emit a clear packet from userspace.
383 * Called by r300_emit_packet3.
384 */
385static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
386 drm_radeon_cmd_buffer_t* cmdbuf)
387{
388 RING_LOCALS;
389
390 if (8*4 > cmdbuf->bufsz)
391 return DRM_ERR(EINVAL);
392
393 BEGIN_RING(10);
394 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
395 OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
396 (1<<R300_PRIM_NUM_VERTICES_SHIFT) );
397 OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
398 ADVANCE_RING();
399
400 cmdbuf->buf += 8*4;
401 cmdbuf->bufsz -= 8*4;
402
403 return 0;
404}
405
406static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
407 drm_radeon_cmd_buffer_t* cmdbuf,
408 u32 header)
409{
410 int count, i,k;
411 #define MAX_ARRAY_PACKET 64
412 u32 payload[MAX_ARRAY_PACKET];
413 u32 narrays;
414 RING_LOCALS;
415
416 count=(header>>16) & 0x3fff;
417
418 if((count+1)>MAX_ARRAY_PACKET){
419 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
420 return DRM_ERR(EINVAL);
421 }
422 memset(payload, 0, MAX_ARRAY_PACKET*4);
423 memcpy(payload, cmdbuf->buf+4, (count+1)*4);
424
425 /* carefully check packet contents */
426
427 narrays=payload[0];
428 k=0;
429 i=1;
430 while((k<narrays) && (i<(count+1))){
431 i++; /* skip attribute field */
432 if(r300_check_offset(dev_priv, payload[i])){
433 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
434 return DRM_ERR(EINVAL);
435 }
436 k++;
437 i++;
438 if(k==narrays)break;
439 /* have one more to process, they come in pairs */
440 if(r300_check_offset(dev_priv, payload[i])){
441 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
442 return DRM_ERR(EINVAL);
443 }
444 k++;
445 i++;
446 }
447 /* do the counts match what we expect ? */
448 if((k!=narrays) || (i!=(count+1))){
449 DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
450 return DRM_ERR(EINVAL);
451 }
452
453 /* all clear, output packet */
454
455 BEGIN_RING(count+2);
456 OUT_RING(header);
457 OUT_RING_TABLE(payload, count+1);
458 ADVANCE_RING();
459
460 cmdbuf->buf += (count+2)*4;
461 cmdbuf->bufsz -= (count+2)*4;
462
463 return 0;
464}
465
466static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
467 drm_radeon_cmd_buffer_t* cmdbuf)
468{
469 u32 header;
470 int count;
471 RING_LOCALS;
472
473 if (4 > cmdbuf->bufsz)
474 return DRM_ERR(EINVAL);
475
476 /* Fixme !! This simply emits a packet without much checking.
477 We need to be smarter. */
478
479 /* obtain first word - actual packet3 header */
480 header = *(u32 __user*)cmdbuf->buf;
481
482 /* Is it packet 3 ? */
483 if( (header>>30)!=0x3 ) {
484 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
485 return DRM_ERR(EINVAL);
486 }
487
488 count=(header>>16) & 0x3fff;
489
490 /* Check again now that we know how much data to expect */
491 if ((count+2)*4 > cmdbuf->bufsz){
492 DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
493 (count+2)*4, cmdbuf->bufsz);
494 return DRM_ERR(EINVAL);
495 }
496
497 /* Is it a packet type we know about ? */
498 switch(header & 0xff00){
499 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
500 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
501
502 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
503 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
504 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
505 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
506 case RADEON_WAIT_FOR_IDLE:
507 case RADEON_CP_NOP:
508 /* these packets are safe */
509 break;
510 default:
511 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
512 return DRM_ERR(EINVAL);
513 }
514
515
516 BEGIN_RING(count+2);
517 OUT_RING(header);
518 OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
519 ADVANCE_RING();
520
521 cmdbuf->buf += (count+2)*4;
522 cmdbuf->bufsz -= (count+2)*4;
523
524 return 0;
525}
526
527
528/**
529 * Emit a rendering packet3 from userspace.
530 * Called by r300_do_cp_cmdbuf.
531 */
532static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
533 drm_radeon_cmd_buffer_t* cmdbuf,
534 drm_r300_cmd_header_t header)
535{
536 int n;
537 int ret;
538 char __user* orig_buf = cmdbuf->buf;
539 int orig_bufsz = cmdbuf->bufsz;
540
541 /* This is a do-while-loop so that we run the interior at least once,
542 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
543 */
544 n = 0;
545 do {
546 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
547 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
548 if (ret)
549 return ret;
550
551 cmdbuf->buf = orig_buf;
552 cmdbuf->bufsz = orig_bufsz;
553 }
554
555 switch(header.packet3.packet) {
556 case R300_CMD_PACKET3_CLEAR:
557 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
558 ret = r300_emit_clear(dev_priv, cmdbuf);
559 if (ret) {
560 DRM_ERROR("r300_emit_clear failed\n");
561 return ret;
562 }
563 break;
564
565 case R300_CMD_PACKET3_RAW:
566 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
567 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
568 if (ret) {
569 DRM_ERROR("r300_emit_raw_packet3 failed\n");
570 return ret;
571 }
572 break;
573
574 default:
575 DRM_ERROR("bad packet3 type %i at %p\n",
576 header.packet3.packet,
577 cmdbuf->buf - sizeof(header));
578 return DRM_ERR(EINVAL);
579 }
580
581 n += R300_SIMULTANEOUS_CLIPRECTS;
582 } while(n < cmdbuf->nbox);
583
584 return 0;
585}
586
587/* Some of the R300 chips seem to be extremely touchy about the two registers
588 * that are configured in r300_pacify.
589 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
590 * sends a command buffer that contains only state setting commands and a
591 * vertex program/parameter upload sequence, this will eventually lead to a
592 * lockup, unless the sequence is bracketed by calls to r300_pacify.
593 * So we should take great care to *always* call r300_pacify before
594 * *anything* 3D related, and again afterwards. This is what the
595 * call bracket in r300_do_cp_cmdbuf is for.
596 */
597
598/**
599 * Emit the sequence to pacify R300.
600 */
601static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
602{
603 RING_LOCALS;
604
605 BEGIN_RING(6);
606 OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
607 OUT_RING( 0xa );
608 OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
609 OUT_RING( 0x3 );
610 OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
611 OUT_RING( 0x0 );
612 ADVANCE_RING();
613}
614
615
616/**
617 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
618 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
619 * be careful about how this function is called.
620 */
621static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
622{
623 drm_radeon_private_t *dev_priv = dev->dev_private;
624 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
625
626 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
627 buf->pending = 1;
628 buf->used = 0;
629}
630
631
632/**
633 * Parses and validates a user-supplied command buffer and emits appropriate
634 * commands on the DMA ring buffer.
635 * Called by the ioctl handler function radeon_cp_cmdbuf.
636 */
637int r300_do_cp_cmdbuf(drm_device_t* dev,
638 DRMFILE filp,
639 drm_file_t* filp_priv,
640 drm_radeon_cmd_buffer_t* cmdbuf)
641{
642 drm_radeon_private_t *dev_priv = dev->dev_private;
643 drm_device_dma_t *dma = dev->dma;
644 drm_buf_t *buf = NULL;
645 int emit_dispatch_age = 0;
646 int ret = 0;
647
648 DRM_DEBUG("\n");
649
650 /* See the comment above r300_emit_begin3d for why this call must be here,
651 * and what the cleanup gotos are for. */
652 r300_pacify(dev_priv);
653
654 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
655 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
656 if (ret)
657 goto cleanup;
658 }
659
660 while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
661 int idx;
662 drm_r300_cmd_header_t header;
663
664 header.u = *(unsigned int *)cmdbuf->buf;
665
666 cmdbuf->buf += sizeof(header);
667 cmdbuf->bufsz -= sizeof(header);
668
669 switch(header.header.cmd_type) {
670 case R300_CMD_PACKET0:
671 DRM_DEBUG("R300_CMD_PACKET0\n");
672 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
673 if (ret) {
674 DRM_ERROR("r300_emit_packet0 failed\n");
675 goto cleanup;
676 }
677 break;
678
679 case R300_CMD_VPU:
680 DRM_DEBUG("R300_CMD_VPU\n");
681 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
682 if (ret) {
683 DRM_ERROR("r300_emit_vpu failed\n");
684 goto cleanup;
685 }
686 break;
687
688 case R300_CMD_PACKET3:
689 DRM_DEBUG("R300_CMD_PACKET3\n");
690 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
691 if (ret) {
692 DRM_ERROR("r300_emit_packet3 failed\n");
693 goto cleanup;
694 }
695 break;
696
697 case R300_CMD_END3D:
698 DRM_DEBUG("R300_CMD_END3D\n");
699 /* TODO:
700 Ideally userspace driver should not need to issue this call,
701 i.e. the drm driver should issue it automatically and prevent
702 lockups.
703
704 In practice, we do not understand why this call is needed and what
705 it does (except for some vague guesses that it has to do with cache
706 coherence) and so the user space driver does it.
707
708 Once we are sure which uses prevent lockups the code could be moved
709 into the kernel and the userspace driver will not
710 need to use this command.
711
712 Note that issuing this command does not hurt anything
713 except, possibly, performance */
714 r300_pacify(dev_priv);
715 break;
716
717 case R300_CMD_CP_DELAY:
718 /* simple enough, we can do it here */
719 DRM_DEBUG("R300_CMD_CP_DELAY\n");
720 {
721 int i;
722 RING_LOCALS;
723
724 BEGIN_RING(header.delay.count);
725 for(i=0;i<header.delay.count;i++)
726 OUT_RING(RADEON_CP_PACKET2);
727 ADVANCE_RING();
728 }
729 break;
730
731 case R300_CMD_DMA_DISCARD:
732 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
733 idx = header.dma.buf_idx;
734 if (idx < 0 || idx >= dma->buf_count) {
735 DRM_ERROR("buffer index %d (of %d max)\n",
736 idx, dma->buf_count - 1);
737 ret = DRM_ERR(EINVAL);
738 goto cleanup;
739 }
740
741 buf = dma->buflist[idx];
742 if (buf->filp != filp || buf->pending) {
743 DRM_ERROR("bad buffer %p %p %d\n",
744 buf->filp, filp, buf->pending);
745 ret = DRM_ERR(EINVAL);
746 goto cleanup;
747 }
748
749 emit_dispatch_age = 1;
750 r300_discard_buffer(dev, buf);
751 break;
752
753 case R300_CMD_WAIT:
754 /* simple enough, we can do it here */
755 DRM_DEBUG("R300_CMD_WAIT\n");
756 if(header.wait.flags==0)break; /* nothing to do */
757
758 {
759 RING_LOCALS;
760
761 BEGIN_RING(2);
762 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
763 OUT_RING( (header.wait.flags & 0xf)<<14 );
764 ADVANCE_RING();
765 }
766 break;
767
768 default:
769 DRM_ERROR("bad cmd_type %i at %p\n",
770 header.header.cmd_type,
771 cmdbuf->buf - sizeof(header));
772 ret = DRM_ERR(EINVAL);
773 goto cleanup;
774 }
775 }
776
777 DRM_DEBUG("END\n");
778
779cleanup:
780 r300_pacify(dev_priv);
781
782 /* We emit the vertex buffer age here, outside the pacifier "brackets"
783 * for two reasons:
784 * (1) This may coalesce multiple age emissions into a single one and
785 * (2) more importantly, some chips lock up hard when scratch registers
786 * are written inside the pacifier bracket.
787 */
788 if (emit_dispatch_age) {
789 RING_LOCALS;
790
791 /* Emit the vertex buffer age */
792 BEGIN_RING(2);
793 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
794 ADVANCE_RING();
795 }
796
797 COMMIT_RING();
798
799 return ret;
800}
801
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
new file mode 100644
index 000000000000..c3e7ca3dbe3d
--- /dev/null
+++ b/drivers/char/drm/r300_reg.h
@@ -0,0 +1,1412 @@
1/**************************************************************************
2
3Copyright (C) 2004-2005 Nicolai Haehnle et al.
4
5Permission is hereby granted, free of charge, to any person obtaining a
6copy of this software and associated documentation files (the "Software"),
7to deal in the Software without restriction, including without limitation
8on the rights to use, copy, modify, merge, publish, distribute, sub
9license, and/or sell copies of the Software, and to permit persons to whom
10the Software is furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice (including the next
13paragraph) shall be included in all copies or substantial portions of the
14Software.
15
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24**************************************************************************/
25
26#ifndef _R300_REG_H
27#define _R300_REG_H
28
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
31# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
32# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
33# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
34# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
35# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
36# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
37# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
38
39
40#define R300_MC_INIT_GFX_LAT_TIMER 0x154
41# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
42# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
43# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
44# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
45# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
46# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
47# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
48# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
49
50/*
51This file contains registers and constants for the R300. They have been
52found mostly by examining command buffers captured using glxtest, as well
53as by extrapolating some known registers and constants from the R200.
54
55I am fairly certain that they are correct unless stated otherwise in comments.
56*/
57
58#define R300_SE_VPORT_XSCALE 0x1D98
59#define R300_SE_VPORT_XOFFSET 0x1D9C
60#define R300_SE_VPORT_YSCALE 0x1DA0
61#define R300_SE_VPORT_YOFFSET 0x1DA4
62#define R300_SE_VPORT_ZSCALE 0x1DA8
63#define R300_SE_VPORT_ZOFFSET 0x1DAC
64
65
66/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
67#define R300_VAP_VF_CNTL 0x2084
68
69# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
70# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
71# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
72# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
73# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
74# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
75# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
76# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
77# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
78# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
79# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
80# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
81
82# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
83 /* State based - direct writes to registers trigger vertex generation */
84# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
85# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
86# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
87# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
88
89 /* I don't think I saw these three used.. */
90# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
91# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
92# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
93
94 /* index size - when not set the indices are assumed to be 16 bit */
95# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
96 /* number of vertices */
97# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
98
99/* BEGIN: Wild guesses */
100#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
101# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
102# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
103# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
104# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
105# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
106# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
107
108#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
109# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
110# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
111# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
112# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
113# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
114# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
115# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
116# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
117/* END */
118
119#define R300_SE_VTE_CNTL 0x20b0
120# define R300_VPORT_X_SCALE_ENA 0x00000001
121# define R300_VPORT_X_OFFSET_ENA 0x00000002
122# define R300_VPORT_Y_SCALE_ENA 0x00000004
123# define R300_VPORT_Y_OFFSET_ENA 0x00000008
124# define R300_VPORT_Z_SCALE_ENA 0x00000010
125# define R300_VPORT_Z_OFFSET_ENA 0x00000020
126# define R300_VTX_XY_FMT 0x00000100
127# define R300_VTX_Z_FMT 0x00000200
128# define R300_VTX_W0_FMT 0x00000400
129# define R300_VTX_W0_NORMALIZE 0x00000800
130# define R300_VTX_ST_DENORMALIZED 0x00001000
131
132/* BEGIN: Vertex data assembly - lots of uncertainties */
133/* gap */
134/* Where do we get our vertex data?
135//
136// Vertex data either comes either from immediate mode registers or from
137// vertex arrays.
138// There appears to be no mixed mode (though we can force the pitch of
139// vertex arrays to 0, effectively reusing the same element over and over
140// again).
141//
142// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
143// if these registers influence vertex array processing.
144//
145// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
146//
147// In both cases, vertex attributes are then passed through INPUT_ROUTE.
148
149// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
150// into the vertex processor's input registers.
151// The first word routes the first input, the second word the second, etc.
152// The corresponding input is routed into the register with the given index.
153// The list is ended by a word with INPUT_ROUTE_END set.
154//
155// Always set COMPONENTS_4 in immediate mode. */
156
157#define R300_VAP_INPUT_ROUTE_0_0 0x2150
158# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
159# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
160# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
161# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
162# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
163# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
164# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
165# define R300_VAP_INPUT_ROUTE_END (1 << 13)
166# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
167# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
168# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
169# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
170#define R300_VAP_INPUT_ROUTE_0_1 0x2154
171#define R300_VAP_INPUT_ROUTE_0_2 0x2158
172#define R300_VAP_INPUT_ROUTE_0_3 0x215C
173#define R300_VAP_INPUT_ROUTE_0_4 0x2160
174#define R300_VAP_INPUT_ROUTE_0_5 0x2164
175#define R300_VAP_INPUT_ROUTE_0_6 0x2168
176#define R300_VAP_INPUT_ROUTE_0_7 0x216C
177
178/* gap */
179/* Notes:
180// - always set up to produce at least two attributes:
181// if vertex program uses only position, fglrx will set normal, too
182// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
183#define R300_VAP_INPUT_CNTL_0 0x2180
184# define R300_INPUT_CNTL_0_COLOR 0x00000001
185#define R300_VAP_INPUT_CNTL_1 0x2184
186# define R300_INPUT_CNTL_POS 0x00000001
187# define R300_INPUT_CNTL_NORMAL 0x00000002
188# define R300_INPUT_CNTL_COLOR 0x00000004
189# define R300_INPUT_CNTL_TC0 0x00000400
190# define R300_INPUT_CNTL_TC1 0x00000800
191# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
192# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
193# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
194# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
195# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
196# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
197
198/* gap */
199/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
200// are set to a swizzling bit pattern, other words are 0.
201//
202// In immediate mode, the pattern is always set to xyzw. In vertex array
203// mode, the swizzling pattern is e.g. used to set zw components in texture
204// coordinates with only tweo components. */
205#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
206# define R300_INPUT_ROUTE_SELECT_X 0
207# define R300_INPUT_ROUTE_SELECT_Y 1
208# define R300_INPUT_ROUTE_SELECT_Z 2
209# define R300_INPUT_ROUTE_SELECT_W 3
210# define R300_INPUT_ROUTE_SELECT_ZERO 4
211# define R300_INPUT_ROUTE_SELECT_ONE 5
212# define R300_INPUT_ROUTE_SELECT_MASK 7
213# define R300_INPUT_ROUTE_X_SHIFT 0
214# define R300_INPUT_ROUTE_Y_SHIFT 3
215# define R300_INPUT_ROUTE_Z_SHIFT 6
216# define R300_INPUT_ROUTE_W_SHIFT 9
217# define R300_INPUT_ROUTE_ENABLE (15 << 12)
218#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
219#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
220#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
221#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
222#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
223#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
224#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
225
226/* END */
227
228/* gap */
229/* BEGIN: Upload vertex program and data
230// The programmable vertex shader unit has a memory bank of unknown size
231// that can be written to in 16 byte units by writing the address into
232// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
233//
234// Pointers into the memory bank are always in multiples of 16 bytes.
235//
236// The memory bank is divided into areas with fixed meaning.
237//
238// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
239// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
240// whereas the difference between known addresses suggests size 512.
241//
242// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
243// Native reported limits and the VPI layout suggest size 256, whereas
244// difference between known addresses suggests size 512.
245//
246// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
247// floating point pointsize. The exact purpose of this state is uncertain,
248// as there is also the R300_RE_POINTSIZE register.
249//
250// Multiple vertex programs and parameter sets can be loaded at once,
251// which could explain the size discrepancy. */
252#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
253# define R300_PVS_UPLOAD_PROGRAM 0x00000000
254# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
255# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
256/* gap */
257#define R300_VAP_PVS_UPLOAD_DATA 0x2208
258/* END */
259
260/* gap */
261/* I do not know the purpose of this register. However, I do know that
262// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
263// for normal rendering. */
264#define R300_VAP_UNKNOWN_221C 0x221C
265# define R300_221C_NORMAL 0x00000000
266# define R300_221C_CLEAR 0x0001C000
267
268/* gap */
269/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
270// rendering commands and overwriting vertex program parameters.
271// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
272// avoids bugs caused by still running shaders reading bad data from memory. */
273#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
274
275/* Absolutely no clue what this register is about. */
276#define R300_VAP_UNKNOWN_2288 0x2288
277# define R300_2288_R300 0x00750000 /* -- nh */
278# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
279
280/* gap */
281/* Addresses are relative to the vertex program instruction area of the
282// memory bank. PROGRAM_END points to the last instruction of the active
283// program
284//
285// The meaning of the two UNKNOWN fields is obviously not known. However,
286// experiments so far have shown that both *must* point to an instruction
287// inside the vertex program, otherwise the GPU locks up.
288// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
289// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
290// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
291// For some reason this "section" is sometimes accepted other instruction that have
292// no relationship with position calculations.
293*/
294#define R300_VAP_PVS_CNTL_1 0x22D0
295# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
296# define R300_PVS_CNTL_1_POS_END_SHIFT 10
297# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
298/* Addresses are relative the the vertex program parameters area. */
299#define R300_VAP_PVS_CNTL_2 0x22D4
300# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
301# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
302#define R300_VAP_PVS_CNTL_3 0x22D8
303# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
304# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
305
306/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
307// immediate vertices */
308#define R300_VAP_VTX_COLOR_R 0x2464
309#define R300_VAP_VTX_COLOR_G 0x2468
310#define R300_VAP_VTX_COLOR_B 0x246C
311#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
312#define R300_VAP_VTX_POS_0_Y_1 0x2494
313#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
314#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
315#define R300_VAP_VTX_POS_0_Y_2 0x24A4
316#define R300_VAP_VTX_POS_0_Z_2 0x24A8
317#define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */
318
319/* gap */
320
321/* These are values from r300_reg/r300_reg.h - they are known to be correct
322 and are here so we can use one register file instead of several
323 - Vladimir */
324#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
325# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
326# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
327# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
328# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
329# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
330# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
331# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
332
333#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
334 /* each of the following is 3 bits wide, specifies number
335 of components */
336# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
337# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
338# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
339# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
340# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
341# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
342# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
343# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
344
345/* UNK30 seems to enables point to quad transformation on textures
346 (or something closely related to that).
347 This bit is rather fatal at the time being due to lackings at pixel shader side */
348#define R300_GB_ENABLE 0x4008
349# define R300_GB_POINT_STUFF_ENABLE (1<<0)
350# define R300_GB_LINE_STUFF_ENABLE (1<<1)
351# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
352# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
353# define R300_GB_UNK30 (1<<30)
354 /* each of the following is 2 bits wide */
355#define R300_GB_TEX_REPLICATE 0
356#define R300_GB_TEX_ST 1
357#define R300_GB_TEX_STR 2
358# define R300_GB_TEX0_SOURCE_SHIFT 16
359# define R300_GB_TEX1_SOURCE_SHIFT 18
360# define R300_GB_TEX2_SOURCE_SHIFT 20
361# define R300_GB_TEX3_SOURCE_SHIFT 22
362# define R300_GB_TEX4_SOURCE_SHIFT 24
363# define R300_GB_TEX5_SOURCE_SHIFT 26
364# define R300_GB_TEX6_SOURCE_SHIFT 28
365# define R300_GB_TEX7_SOURCE_SHIFT 30
366
367/* MSPOS - positions for multisample antialiasing (?) */
368#define R300_GB_MSPOS0 0x4010
369 /* shifts - each of the fields is 4 bits */
370# define R300_GB_MSPOS0__MS_X0_SHIFT 0
371# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
372# define R300_GB_MSPOS0__MS_X1_SHIFT 8
373# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
374# define R300_GB_MSPOS0__MS_X2_SHIFT 16
375# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
376# define R300_GB_MSPOS0__MSBD0_Y 24
377# define R300_GB_MSPOS0__MSBD0_X 28
378
379#define R300_GB_MSPOS1 0x4014
380# define R300_GB_MSPOS1__MS_X3_SHIFT 0
381# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
382# define R300_GB_MSPOS1__MS_X4_SHIFT 8
383# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
384# define R300_GB_MSPOS1__MS_X5_SHIFT 16
385# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
386# define R300_GB_MSPOS1__MSBD1 24
387
388
389#define R300_GB_TILE_CONFIG 0x4018
390# define R300_GB_TILE_ENABLE (1<<0)
391# define R300_GB_TILE_PIPE_COUNT_RV300 0
392# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
393# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
394# define R300_GB_TILE_SIZE_8 0
395# define R300_GB_TILE_SIZE_16 (1<<4)
396# define R300_GB_TILE_SIZE_32 (2<<4)
397# define R300_GB_SUPER_SIZE_1 (0<<6)
398# define R300_GB_SUPER_SIZE_2 (1<<6)
399# define R300_GB_SUPER_SIZE_4 (2<<6)
400# define R300_GB_SUPER_SIZE_8 (3<<6)
401# define R300_GB_SUPER_SIZE_16 (4<<6)
402# define R300_GB_SUPER_SIZE_32 (5<<6)
403# define R300_GB_SUPER_SIZE_64 (6<<6)
404# define R300_GB_SUPER_SIZE_128 (7<<6)
405# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
406# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
407# define R300_GB_SUPER_TILE_A 0
408# define R300_GB_SUPER_TILE_B (1<<15)
409# define R300_GB_SUBPIXEL_1_12 0
410# define R300_GB_SUBPIXEL_1_16 (1<<16)
411
412#define R300_GB_FIFO_SIZE 0x4024
413 /* each of the following is 2 bits wide */
414#define R300_GB_FIFO_SIZE_32 0
415#define R300_GB_FIFO_SIZE_64 1
416#define R300_GB_FIFO_SIZE_128 2
417#define R300_GB_FIFO_SIZE_256 3
418# define R300_SC_IFIFO_SIZE_SHIFT 0
419# define R300_SC_TZFIFO_SIZE_SHIFT 2
420# define R300_SC_BFIFO_SIZE_SHIFT 4
421
422# define R300_US_OFIFO_SIZE_SHIFT 12
423# define R300_US_WFIFO_SIZE_SHIFT 14
424 /* the following use the same constants as above, but meaning is
425 is times 2 (i.e. instead of 32 words it means 64 */
426# define R300_RS_TFIFO_SIZE_SHIFT 6
427# define R300_RS_CFIFO_SIZE_SHIFT 8
428# define R300_US_RAM_SIZE_SHIFT 10
429 /* watermarks, 3 bits wide */
430# define R300_RS_HIGHWATER_COL_SHIFT 16
431# define R300_RS_HIGHWATER_TEX_SHIFT 19
432# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
433# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
434
435#define R300_GB_SELECT 0x401C
436# define R300_GB_FOG_SELECT_C0A 0
437# define R300_GB_FOG_SELECT_C1A 1
438# define R300_GB_FOG_SELECT_C2A 2
439# define R300_GB_FOG_SELECT_C3A 3
440# define R300_GB_FOG_SELECT_1_1_W 4
441# define R300_GB_FOG_SELECT_Z 5
442# define R300_GB_DEPTH_SELECT_Z 0
443# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
444# define R300_GB_W_SELECT_1_W 0
445# define R300_GB_W_SELECT_1 (1<<4)
446
447#define R300_GB_AA_CONFIG 0x4020
448# define R300_AA_ENABLE 0x01
449# define R300_AA_SUBSAMPLES_2 0
450# define R300_AA_SUBSAMPLES_3 (1<<1)
451# define R300_AA_SUBSAMPLES_4 (2<<1)
452# define R300_AA_SUBSAMPLES_6 (3<<1)
453
454/* END */
455
456/* gap */
457/* The upper enable bits are guessed, based on fglrx reported limits. */
458#define R300_TX_ENABLE 0x4104
459# define R300_TX_ENABLE_0 (1 << 0)
460# define R300_TX_ENABLE_1 (1 << 1)
461# define R300_TX_ENABLE_2 (1 << 2)
462# define R300_TX_ENABLE_3 (1 << 3)
463# define R300_TX_ENABLE_4 (1 << 4)
464# define R300_TX_ENABLE_5 (1 << 5)
465# define R300_TX_ENABLE_6 (1 << 6)
466# define R300_TX_ENABLE_7 (1 << 7)
467# define R300_TX_ENABLE_8 (1 << 8)
468# define R300_TX_ENABLE_9 (1 << 9)
469# define R300_TX_ENABLE_10 (1 << 10)
470# define R300_TX_ENABLE_11 (1 << 11)
471# define R300_TX_ENABLE_12 (1 << 12)
472# define R300_TX_ENABLE_13 (1 << 13)
473# define R300_TX_ENABLE_14 (1 << 14)
474# define R300_TX_ENABLE_15 (1 << 15)
475
476/* The pointsize is given in multiples of 6. The pointsize can be
477// enormous: Clear() renders a single point that fills the entire
478// framebuffer. */
479#define R300_RE_POINTSIZE 0x421C
480# define R300_POINTSIZE_Y_SHIFT 0
481# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
482# define R300_POINTSIZE_X_SHIFT 16
483# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
484# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
485
486/* The line width is given in multiples of 6.
487 In default mode lines are classified as vertical lines.
488 HO: horizontal
489 VE: vertical or horizontal
490 HO & VE: no classification
491*/
492#define R300_RE_LINE_CNT 0x4234
493# define R300_LINESIZE_SHIFT 0
494# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
495# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
496# define R300_LINE_CNT_HO (1 << 16)
497# define R300_LINE_CNT_VE (1 << 17)
498
499/* Some sort of scale or clamp value for texcoordless textures. */
500#define R300_RE_UNK4238 0x4238
501
502#define R300_RE_SHADE_MODEL 0x4278
503# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
504# define R300_RE_SHADE_MODEL_FLAT 0x39595
505
506/* Dangerous */
507#define R300_RE_POLYGON_MODE 0x4288
508# define R300_PM_ENABLED (1 << 0)
509# define R300_PM_FRONT_POINT (0 << 0)
510# define R300_PM_BACK_POINT (0 << 0)
511# define R300_PM_FRONT_LINE (1 << 4)
512# define R300_PM_FRONT_FILL (1 << 5)
513# define R300_PM_BACK_LINE (1 << 7)
514# define R300_PM_BACK_FILL (1 << 8)
515
516/* Not sure why there are duplicate of factor and constant values.
517 My best guess so far is that there are seperate zbiases for test and write.
518 Ordering might be wrong.
519 Some of the tests indicate that fgl has a fallback implementation of zbias
520 via pixel shaders. */
521#define R300_RE_ZBIAS_T_FACTOR 0x42A4
522#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
523#define R300_RE_ZBIAS_W_FACTOR 0x42AC
524#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
525
526/* This register needs to be set to (1<<1) for RV350 to correctly
527 perform depth test (see --vb-triangles in r300_demo)
528 Don't know about other chips. - Vladimir
529 This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
530 My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
531 One to enable depth test and one for depth write.
532 Yet this doesnt explain why depth writes work ...
533 */
534#define R300_RE_OCCLUSION_CNTL 0x42B4
535# define R300_OCCLUSION_ON (1<<1)
536
537#define R300_RE_CULL_CNTL 0x42B8
538# define R300_CULL_FRONT (1 << 0)
539# define R300_CULL_BACK (1 << 1)
540# define R300_FRONT_FACE_CCW (0 << 2)
541# define R300_FRONT_FACE_CW (1 << 2)
542
543
544/* BEGIN: Rasterization / Interpolators - many guesses
545// 0_UNKNOWN_18 has always been set except for clear operations.
546// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
547// on the vertex program, *not* the fragment program) */
548#define R300_RS_CNTL_0 0x4300
549# define R300_RS_CNTL_TC_CNT_SHIFT 2
550# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
551# define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */
552# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
553/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
554#define R300_RS_CNTL_1 0x4304
555
556/* gap */
557/* Only used for texture coordinates.
558// Use the source field to route texture coordinate input from the vertex program
559// to the desired interpolator. Note that the source field is relative to the
560// outputs the vertex program *actually* writes. If a vertex program only writes
561// texcoord[1], this will be source index 0.
562// Set INTERP_USED on all interpolators that produce data used by the
563// fragment program. INTERP_USED looks like a swizzling mask, but
564// I haven't seen it used that way.
565//
566// Note: The _UNKNOWN constants are always set in their respective register.
567// I don't know if this is necessary. */
568#define R300_RS_INTERP_0 0x4310
569#define R300_RS_INTERP_1 0x4314
570# define R300_RS_INTERP_1_UNKNOWN 0x40
571#define R300_RS_INTERP_2 0x4318
572# define R300_RS_INTERP_2_UNKNOWN 0x80
573#define R300_RS_INTERP_3 0x431C
574# define R300_RS_INTERP_3_UNKNOWN 0xC0
575#define R300_RS_INTERP_4 0x4320
576#define R300_RS_INTERP_5 0x4324
577#define R300_RS_INTERP_6 0x4328
578#define R300_RS_INTERP_7 0x432C
579# define R300_RS_INTERP_SRC_SHIFT 2
580# define R300_RS_INTERP_SRC_MASK (7 << 2)
581# define R300_RS_INTERP_USED 0x00D10000
582
583/* These DWORDs control how vertex data is routed into fragment program
584// registers, after interpolators. */
585#define R300_RS_ROUTE_0 0x4330
586#define R300_RS_ROUTE_1 0x4334
587#define R300_RS_ROUTE_2 0x4338
588#define R300_RS_ROUTE_3 0x433C /* GUESS */
589#define R300_RS_ROUTE_4 0x4340 /* GUESS */
590#define R300_RS_ROUTE_5 0x4344 /* GUESS */
591#define R300_RS_ROUTE_6 0x4348 /* GUESS */
592#define R300_RS_ROUTE_7 0x434C /* GUESS */
593# define R300_RS_ROUTE_SOURCE_INTERP_0 0
594# define R300_RS_ROUTE_SOURCE_INTERP_1 1
595# define R300_RS_ROUTE_SOURCE_INTERP_2 2
596# define R300_RS_ROUTE_SOURCE_INTERP_3 3
597# define R300_RS_ROUTE_SOURCE_INTERP_4 4
598# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
599# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
600# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
601# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
602# define R300_RS_ROUTE_DEST_SHIFT 6
603# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
604
605/* Special handling for color: When the fragment program uses color,
606// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
607// color register index. */
608# define R300_RS_ROUTE_0_COLOR (1 << 14)
609# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
610# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
611/* As above, but for secondary color */
612# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
613# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
614# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
615# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
616/* END */
617
618/* BEGIN: Scissors and cliprects
619// There are four clipping rectangles. Their corner coordinates are inclusive.
620// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
621// on whether the pixel is inside cliprects 0-3, respectively. For example,
622// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
623// the number 3 (binary 0011).
624// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
625// the pixel is rasterized.
626//
627// In addition to this, there is a scissors rectangle. Only pixels inside the
628// scissors rectangle are drawn. (coordinates are inclusive)
629//
630// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
631// for the purpose of clipping and scissors. */
632#define R300_RE_CLIPRECT_TL_0 0x43B0
633#define R300_RE_CLIPRECT_BR_0 0x43B4
634#define R300_RE_CLIPRECT_TL_1 0x43B8
635#define R300_RE_CLIPRECT_BR_1 0x43BC
636#define R300_RE_CLIPRECT_TL_2 0x43C0
637#define R300_RE_CLIPRECT_BR_2 0x43C4
638#define R300_RE_CLIPRECT_TL_3 0x43C8
639#define R300_RE_CLIPRECT_BR_3 0x43CC
640# define R300_CLIPRECT_OFFSET 1440
641# define R300_CLIPRECT_MASK 0x1FFF
642# define R300_CLIPRECT_X_SHIFT 0
643# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
644# define R300_CLIPRECT_Y_SHIFT 13
645# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
646#define R300_RE_CLIPRECT_CNTL 0x43D0
647# define R300_CLIP_OUT (1 << 0)
648# define R300_CLIP_0 (1 << 1)
649# define R300_CLIP_1 (1 << 2)
650# define R300_CLIP_10 (1 << 3)
651# define R300_CLIP_2 (1 << 4)
652# define R300_CLIP_20 (1 << 5)
653# define R300_CLIP_21 (1 << 6)
654# define R300_CLIP_210 (1 << 7)
655# define R300_CLIP_3 (1 << 8)
656# define R300_CLIP_30 (1 << 9)
657# define R300_CLIP_31 (1 << 10)
658# define R300_CLIP_310 (1 << 11)
659# define R300_CLIP_32 (1 << 12)
660# define R300_CLIP_320 (1 << 13)
661# define R300_CLIP_321 (1 << 14)
662# define R300_CLIP_3210 (1 << 15)
663
664/* gap */
665#define R300_RE_SCISSORS_TL 0x43E0
666#define R300_RE_SCISSORS_BR 0x43E4
667# define R300_SCISSORS_OFFSET 1440
668# define R300_SCISSORS_X_SHIFT 0
669# define R300_SCISSORS_X_MASK (0x1FFF << 0)
670# define R300_SCISSORS_Y_SHIFT 13
671# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
672/* END */
673
674/* BEGIN: Texture specification
675// The texture specification dwords are grouped by meaning and not by texture unit.
676// This means that e.g. the offset for texture image unit N is found in register
677// TX_OFFSET_0 + (4*N) */
678#define R300_TX_FILTER_0 0x4400
679# define R300_TX_REPEAT 0
680# define R300_TX_MIRRORED 1
681# define R300_TX_CLAMP 4
682# define R300_TX_CLAMP_TO_EDGE 2
683# define R300_TX_CLAMP_TO_BORDER 6
684# define R300_TX_WRAP_S_SHIFT 0
685# define R300_TX_WRAP_S_MASK (7 << 0)
686# define R300_TX_WRAP_T_SHIFT 3
687# define R300_TX_WRAP_T_MASK (7 << 3)
688# define R300_TX_WRAP_Q_SHIFT 6
689# define R300_TX_WRAP_Q_MASK (7 << 6)
690# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
691# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
692# define R300_TX_MAG_FILTER_MASK (3 << 9)
693# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
694# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
695# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
696# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
697# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
698# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
699
700/* NOTE: NEAREST doesnt seem to exist.
701 Im not seting MAG_FILTER_MASK and (3 << 11) on for all
702 anisotropy modes because that would void selected mag filter */
703# define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
704# define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
705# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
706# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
707# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
708# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
709# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
710# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
711# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
712# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
713# define R300_TX_MAX_ANISO_MASK (14 << 21)
714
715#define R300_TX_UNK1_0 0x4440
716# define R300_LOD_BIAS_MASK 0x1fff
717
718#define R300_TX_SIZE_0 0x4480
719# define R300_TX_WIDTHMASK_SHIFT 0
720# define R300_TX_WIDTHMASK_MASK (2047 << 0)
721# define R300_TX_HEIGHTMASK_SHIFT 11
722# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
723# define R300_TX_UNK23 (1 << 23)
724# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
725# define R300_TX_SIZE_MASK (15 << 26)
726#define R300_TX_FORMAT_0 0x44C0
727 /* The interpretation of the format word by Wladimir van der Laan */
728 /* The X, Y, Z and W refer to the layout of the components.
729 They are given meanings as R, G, B and Alpha by the swizzle
730 specification */
731# define R300_TX_FORMAT_X8 0x0
732# define R300_TX_FORMAT_X16 0x1
733# define R300_TX_FORMAT_Y4X4 0x2
734# define R300_TX_FORMAT_Y8X8 0x3
735# define R300_TX_FORMAT_Y16X16 0x4
736# define R300_TX_FORMAT_Z3Y3X2 0x5
737# define R300_TX_FORMAT_Z5Y6X5 0x6
738# define R300_TX_FORMAT_Z6Y5X5 0x7
739# define R300_TX_FORMAT_Z11Y11X10 0x8
740# define R300_TX_FORMAT_Z10Y11X11 0x9
741# define R300_TX_FORMAT_W4Z4Y4X4 0xA
742# define R300_TX_FORMAT_W1Z5Y5X5 0xB
743# define R300_TX_FORMAT_W8Z8Y8X8 0xC
744# define R300_TX_FORMAT_W2Z10Y10X10 0xD
745# define R300_TX_FORMAT_W16Z16Y16X16 0xE
746# define R300_TX_FORMAT_DXT1 0xF
747# define R300_TX_FORMAT_DXT3 0x10
748# define R300_TX_FORMAT_DXT5 0x11
749# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
750# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
751# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
752# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
753 /* 0x16 - some 16 bit green format.. ?? */
754# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
755
756 /* gap */
757 /* Floating point formats */
758 /* Note - hardware supports both 16 and 32 bit floating point */
759# define R300_TX_FORMAT_FL_I16 0x18
760# define R300_TX_FORMAT_FL_I16A16 0x19
761# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
762# define R300_TX_FORMAT_FL_I32 0x1B
763# define R300_TX_FORMAT_FL_I32A32 0x1C
764# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
765 /* alpha modes, convenience mostly */
766 /* if you have alpha, pick constant appropriate to the
767 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
768# define R300_TX_FORMAT_ALPHA_1CH 0x000
769# define R300_TX_FORMAT_ALPHA_2CH 0x200
770# define R300_TX_FORMAT_ALPHA_4CH 0x600
771# define R300_TX_FORMAT_ALPHA_NONE 0xA00
772 /* Swizzling */
773 /* constants */
774# define R300_TX_FORMAT_X 0
775# define R300_TX_FORMAT_Y 1
776# define R300_TX_FORMAT_Z 2
777# define R300_TX_FORMAT_W 3
778# define R300_TX_FORMAT_ZERO 4
779# define R300_TX_FORMAT_ONE 5
780# define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */
781# define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */
782
783# define R300_TX_FORMAT_B_SHIFT 18
784# define R300_TX_FORMAT_G_SHIFT 15
785# define R300_TX_FORMAT_R_SHIFT 12
786# define R300_TX_FORMAT_A_SHIFT 9
787 /* Convenience macro to take care of layout and swizzling */
788# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
789 ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
790 | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
791 | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
792 | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
793 | (R300_TX_FORMAT_##FMT) \
794 )
795 /* These can be ORed with result of R300_EASY_TX_FORMAT() */
796 /* We don't really know what they do. Take values from a constant color ? */
797# define R300_TX_FORMAT_CONST_X (1<<5)
798# define R300_TX_FORMAT_CONST_Y (2<<5)
799# define R300_TX_FORMAT_CONST_Z (4<<5)
800# define R300_TX_FORMAT_CONST_W (8<<5)
801
802# define R300_TX_FORMAT_YUV_MODE 0x00800000
803
804#define R300_TX_OFFSET_0 0x4540
805/* BEGIN: Guess from R200 */
806# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
807# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
808# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
809# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
810# define R300_TXO_OFFSET_MASK 0xffffffe0
811# define R300_TXO_OFFSET_SHIFT 5
812/* END */
813#define R300_TX_UNK4_0 0x4580
814#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
815
816/* END */
817
818/* BEGIN: Fragment program instruction set
819// Fragment programs are written directly into register space.
820// There are separate instruction streams for texture instructions and ALU
821// instructions.
822// In order to synchronize these streams, the program is divided into up
823// to 4 nodes. Each node begins with a number of TEX operations, followed
824// by a number of ALU operations.
825// The first node can have zero TEX ops, all subsequent nodes must have at least
826// one TEX ops.
827// All nodes must have at least one ALU op.
828//
829// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
830// 1 node, a value of 3 means 4 nodes.
831// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
832// offsets into the respective instruction streams, while *_END points to the
833// last instruction relative to this offset. */
834#define R300_PFS_CNTL_0 0x4600
835# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
836# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
837# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
838#define R300_PFS_CNTL_1 0x4604
839/* There is an unshifted value here which has so far always been equal to the
840// index of the highest used temporary register. */
841#define R300_PFS_CNTL_2 0x4608
842# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
843# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
844# define R300_PFS_CNTL_ALU_END_SHIFT 6
845# define R300_PFS_CNTL_ALU_END_MASK (63 << 0)
846# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
847# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
848# define R300_PFS_CNTL_TEX_END_SHIFT 18
849# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
850
851/* gap */
852/* Nodes are stored backwards. The last active node is always stored in
853// PFS_NODE_3.
854// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
855// first node is stored in NODE_2, the second node is stored in NODE_3.
856//
857// Offsets are relative to the master offset from PFS_CNTL_2.
858// LAST_NODE is set for the last node, and only for the last node. */
859#define R300_PFS_NODE_0 0x4610
860#define R300_PFS_NODE_1 0x4614
861#define R300_PFS_NODE_2 0x4618
862#define R300_PFS_NODE_3 0x461C
863# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
864# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
865# define R300_PFS_NODE_ALU_END_SHIFT 6
866# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
867# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
868# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
869# define R300_PFS_NODE_TEX_END_SHIFT 17
870# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
871# define R300_PFS_NODE_LAST_NODE (1 << 22)
872
873/* TEX
874// As far as I can tell, texture instructions cannot write into output
875// registers directly. A subsequent ALU instruction is always necessary,
876// even if it's just MAD o0, r0, 1, 0 */
877#define R300_PFS_TEXI_0 0x4620
878# define R300_FPITX_SRC_SHIFT 0
879# define R300_FPITX_SRC_MASK (31 << 0)
880# define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
881# define R300_FPITX_DST_SHIFT 6
882# define R300_FPITX_DST_MASK (31 << 6)
883# define R300_FPITX_IMAGE_SHIFT 11
884# define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */
885/* Unsure if these are opcodes, or some kind of bitfield, but this is how
886 * they were set when I checked
887 */
888# define R300_FPITX_OPCODE_SHIFT 15
889# define R300_FPITX_OP_TEX 1
890# define R300_FPITX_OP_TXP 3
891# define R300_FPITX_OP_TXB 4
892
893/* ALU
894// The ALU instructions register blocks are enumerated according to the order
895// in which fglrx. I assume there is space for 64 instructions, since
896// each block has space for a maximum of 64 DWORDs, and this matches reported
897// native limits.
898//
899// The basic functional block seems to be one MAD for each color and alpha,
900// and an adder that adds all components after the MUL.
901// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
902// - DP4: Use OUTC_DP4, OUTA_DP4
903// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
904// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
905// - CMP: If ARG2 < 0, return ARG1, else return ARG0
906// - FLR: use FRC+MAD
907// - XPD: use MAD+MAD
908// - SGE, SLT: use MAD+CMP
909// - RSQ: use ABS modifier for argument
910// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
911// into color register
912// - apparently, there's no quick DST operation
913// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
914// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
915// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
916//
917// Operand selection
918// First stage selects three sources from the available registers and
919// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
920// fglrx sorts the three source fields: Registers before constants,
921// lower indices before higher indices; I do not know whether this is necessary.
922// fglrx fills unused sources with "read constant 0"
923// According to specs, you cannot select more than two different constants.
924//
925// Second stage selects the operands from the sources. This is defined in
926// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
927// zero and one.
928// Swizzling and negation happens in this stage, as well.
929//
930// Important: Color and alpha seem to be mostly separate, i.e. their sources
931// selection appears to be fully independent (the register storage is probably
932// physically split into a color and an alpha section).
933// However (because of the apparent physical split), there is some interaction
934// WRT swizzling. If, for example, you want to load an R component into an
935// Alpha operand, this R component is taken from a *color* source, not from
936// an alpha source. The corresponding register doesn't even have to appear in
937// the alpha sources list. (I hope this alll makes sense to you)
938//
939// Destination selection
940// The destination register index is in FPI1 (color) and FPI3 (alpha) together
941// with enable bits.
942// There are separate enable bits for writing into temporary registers
943// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
944// You can write to both at once, or not write at all (the same index
945// must be used for both).
946//
947// Note: There is a special form for LRP
948// - Argument order is the same as in ARB_fragment_program.
949// - Operation is MAD
950// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
951// - Set FPI0/FPI2_SPECIAL_LRP
952// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
953#define R300_PFS_INSTR1_0 0x46C0
954# define R300_FPI1_SRC0C_SHIFT 0
955# define R300_FPI1_SRC0C_MASK (31 << 0)
956# define R300_FPI1_SRC0C_CONST (1 << 5)
957# define R300_FPI1_SRC1C_SHIFT 6
958# define R300_FPI1_SRC1C_MASK (31 << 6)
959# define R300_FPI1_SRC1C_CONST (1 << 11)
960# define R300_FPI1_SRC2C_SHIFT 12
961# define R300_FPI1_SRC2C_MASK (31 << 12)
962# define R300_FPI1_SRC2C_CONST (1 << 17)
963# define R300_FPI1_DSTC_SHIFT 18
964# define R300_FPI1_DSTC_MASK (31 << 18)
965# define R300_FPI1_DSTC_REG_X (1 << 23)
966# define R300_FPI1_DSTC_REG_Y (1 << 24)
967# define R300_FPI1_DSTC_REG_Z (1 << 25)
968# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
969# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
970# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
971
972#define R300_PFS_INSTR3_0 0x47C0
973# define R300_FPI3_SRC0A_SHIFT 0
974# define R300_FPI3_SRC0A_MASK (31 << 0)
975# define R300_FPI3_SRC0A_CONST (1 << 5)
976# define R300_FPI3_SRC1A_SHIFT 6
977# define R300_FPI3_SRC1A_MASK (31 << 6)
978# define R300_FPI3_SRC1A_CONST (1 << 11)
979# define R300_FPI3_SRC2A_SHIFT 12
980# define R300_FPI3_SRC2A_MASK (31 << 12)
981# define R300_FPI3_SRC2A_CONST (1 << 17)
982# define R300_FPI3_DSTA_SHIFT 18
983# define R300_FPI3_DSTA_MASK (31 << 18)
984# define R300_FPI3_DSTA_REG (1 << 23)
985# define R300_FPI3_DSTA_OUTPUT (1 << 24)
986
987#define R300_PFS_INSTR0_0 0x48C0
988# define R300_FPI0_ARGC_SRC0C_XYZ 0
989# define R300_FPI0_ARGC_SRC0C_XXX 1
990# define R300_FPI0_ARGC_SRC0C_YYY 2
991# define R300_FPI0_ARGC_SRC0C_ZZZ 3
992# define R300_FPI0_ARGC_SRC1C_XYZ 4
993# define R300_FPI0_ARGC_SRC1C_XXX 5
994# define R300_FPI0_ARGC_SRC1C_YYY 6
995# define R300_FPI0_ARGC_SRC1C_ZZZ 7
996# define R300_FPI0_ARGC_SRC2C_XYZ 8
997# define R300_FPI0_ARGC_SRC2C_XXX 9
998# define R300_FPI0_ARGC_SRC2C_YYY 10
999# define R300_FPI0_ARGC_SRC2C_ZZZ 11
1000# define R300_FPI0_ARGC_SRC0A 12
1001# define R300_FPI0_ARGC_SRC1A 13
1002# define R300_FPI0_ARGC_SRC2A 14
1003# define R300_FPI0_ARGC_SRC1C_LRP 15
1004# define R300_FPI0_ARGC_ZERO 20
1005# define R300_FPI0_ARGC_ONE 21
1006# define R300_FPI0_ARGC_HALF 22 /* GUESS */
1007# define R300_FPI0_ARGC_SRC0C_YZX 23
1008# define R300_FPI0_ARGC_SRC1C_YZX 24
1009# define R300_FPI0_ARGC_SRC2C_YZX 25
1010# define R300_FPI0_ARGC_SRC0C_ZXY 26
1011# define R300_FPI0_ARGC_SRC1C_ZXY 27
1012# define R300_FPI0_ARGC_SRC2C_ZXY 28
1013# define R300_FPI0_ARGC_SRC0CA_WZY 29
1014# define R300_FPI0_ARGC_SRC1CA_WZY 30
1015# define R300_FPI0_ARGC_SRC2CA_WZY 31
1016
1017# define R300_FPI0_ARG0C_SHIFT 0
1018# define R300_FPI0_ARG0C_MASK (31 << 0)
1019# define R300_FPI0_ARG0C_NEG (1 << 5)
1020# define R300_FPI0_ARG0C_ABS (1 << 6)
1021# define R300_FPI0_ARG1C_SHIFT 7
1022# define R300_FPI0_ARG1C_MASK (31 << 7)
1023# define R300_FPI0_ARG1C_NEG (1 << 12)
1024# define R300_FPI0_ARG1C_ABS (1 << 13)
1025# define R300_FPI0_ARG2C_SHIFT 14
1026# define R300_FPI0_ARG2C_MASK (31 << 14)
1027# define R300_FPI0_ARG2C_NEG (1 << 19)
1028# define R300_FPI0_ARG2C_ABS (1 << 20)
1029# define R300_FPI0_SPECIAL_LRP (1 << 21)
1030# define R300_FPI0_OUTC_MAD (0 << 23)
1031# define R300_FPI0_OUTC_DP3 (1 << 23)
1032# define R300_FPI0_OUTC_DP4 (2 << 23)
1033# define R300_FPI0_OUTC_MIN (4 << 23)
1034# define R300_FPI0_OUTC_MAX (5 << 23)
1035# define R300_FPI0_OUTC_CMP (8 << 23)
1036# define R300_FPI0_OUTC_FRC (9 << 23)
1037# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
1038# define R300_FPI0_OUTC_SAT (1 << 30)
1039# define R300_FPI0_UNKNOWN_31 (1 << 31)
1040
1041#define R300_PFS_INSTR2_0 0x49C0
1042# define R300_FPI2_ARGA_SRC0C_X 0
1043# define R300_FPI2_ARGA_SRC0C_Y 1
1044# define R300_FPI2_ARGA_SRC0C_Z 2
1045# define R300_FPI2_ARGA_SRC1C_X 3
1046# define R300_FPI2_ARGA_SRC1C_Y 4
1047# define R300_FPI2_ARGA_SRC1C_Z 5
1048# define R300_FPI2_ARGA_SRC2C_X 6
1049# define R300_FPI2_ARGA_SRC2C_Y 7
1050# define R300_FPI2_ARGA_SRC2C_Z 8
1051# define R300_FPI2_ARGA_SRC0A 9
1052# define R300_FPI2_ARGA_SRC1A 10
1053# define R300_FPI2_ARGA_SRC2A 11
1054# define R300_FPI2_ARGA_SRC1A_LRP 15
1055# define R300_FPI2_ARGA_ZERO 16
1056# define R300_FPI2_ARGA_ONE 17
1057# define R300_FPI2_ARGA_HALF 18 /* GUESS */
1058
1059# define R300_FPI2_ARG0A_SHIFT 0
1060# define R300_FPI2_ARG0A_MASK (31 << 0)
1061# define R300_FPI2_ARG0A_NEG (1 << 5)
1062# define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
1063# define R300_FPI2_ARG1A_SHIFT 7
1064# define R300_FPI2_ARG1A_MASK (31 << 7)
1065# define R300_FPI2_ARG1A_NEG (1 << 12)
1066# define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
1067# define R300_FPI2_ARG2A_SHIFT 14
1068# define R300_FPI2_ARG2A_MASK (31 << 14)
1069# define R300_FPI2_ARG2A_NEG (1 << 19)
1070# define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
1071# define R300_FPI2_SPECIAL_LRP (1 << 21)
1072# define R300_FPI2_OUTA_MAD (0 << 23)
1073# define R300_FPI2_OUTA_DP4 (1 << 23)
1074# define R300_FPI2_OUTA_MIN (2 << 23)
1075# define R300_FPI2_OUTA_MAX (3 << 23)
1076# define R300_FPI2_OUTA_CMP (6 << 23)
1077# define R300_FPI2_OUTA_FRC (7 << 23)
1078# define R300_FPI2_OUTA_EX2 (8 << 23)
1079# define R300_FPI2_OUTA_LG2 (9 << 23)
1080# define R300_FPI2_OUTA_RCP (10 << 23)
1081# define R300_FPI2_OUTA_RSQ (11 << 23)
1082# define R300_FPI2_OUTA_SAT (1 << 30)
1083# define R300_FPI2_UNKNOWN_31 (1 << 31)
1084/* END */
1085
1086/* gap */
1087#define R300_PP_ALPHA_TEST 0x4BD4
1088# define R300_REF_ALPHA_MASK 0x000000ff
1089# define R300_ALPHA_TEST_FAIL (0 << 8)
1090# define R300_ALPHA_TEST_LESS (1 << 8)
1091# define R300_ALPHA_TEST_LEQUAL (3 << 8)
1092# define R300_ALPHA_TEST_EQUAL (2 << 8)
1093# define R300_ALPHA_TEST_GEQUAL (6 << 8)
1094# define R300_ALPHA_TEST_GREATER (4 << 8)
1095# define R300_ALPHA_TEST_NEQUAL (5 << 8)
1096# define R300_ALPHA_TEST_PASS (7 << 8)
1097# define R300_ALPHA_TEST_OP_MASK (7 << 8)
1098# define R300_ALPHA_TEST_ENABLE (1 << 11)
1099
1100/* gap */
1101/* Fragment program parameters in 7.16 floating point */
1102#define R300_PFS_PARAM_0_X 0x4C00
1103#define R300_PFS_PARAM_0_Y 0x4C04
1104#define R300_PFS_PARAM_0_Z 0x4C08
1105#define R300_PFS_PARAM_0_W 0x4C0C
1106/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
1107#define R300_PFS_PARAM_31_X 0x4DF0
1108#define R300_PFS_PARAM_31_Y 0x4DF4
1109#define R300_PFS_PARAM_31_Z 0x4DF8
1110#define R300_PFS_PARAM_31_W 0x4DFC
1111
1112/* Notes:
1113// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
1114// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
1115// function (both registers are always set up completely in any case)
1116// - Most blend flags are simply copied from R200 and not tested yet */
1117#define R300_RB3D_CBLEND 0x4E04
1118#define R300_RB3D_ABLEND 0x4E08
1119 /* the following only appear in CBLEND */
1120# define R300_BLEND_ENABLE (1 << 0)
1121# define R300_BLEND_UNKNOWN (3 << 1)
1122# define R300_BLEND_NO_SEPARATE (1 << 3)
1123 /* the following are shared between CBLEND and ABLEND */
1124# define R300_FCN_MASK (3 << 12)
1125# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
1126# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
1127# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
1128# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
1129# define R300_SRC_BLEND_GL_ZERO (32 << 16)
1130# define R300_SRC_BLEND_GL_ONE (33 << 16)
1131# define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
1132# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
1133# define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
1134# define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
1135# define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
1136# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
1137# define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
1138# define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
1139# define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
1140# define R300_SRC_BLEND_MASK (63 << 16)
1141# define R300_DST_BLEND_GL_ZERO (32 << 24)
1142# define R300_DST_BLEND_GL_ONE (33 << 24)
1143# define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
1144# define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
1145# define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
1146# define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
1147# define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
1148# define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
1149# define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
1150# define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
1151# define R300_DST_BLEND_MASK (63 << 24)
1152#define R300_RB3D_COLORMASK 0x4E0C
1153# define R300_COLORMASK0_B (1<<0)
1154# define R300_COLORMASK0_G (1<<1)
1155# define R300_COLORMASK0_R (1<<2)
1156# define R300_COLORMASK0_A (1<<3)
1157
1158/* gap */
1159#define R300_RB3D_COLOROFFSET0 0x4E28
1160# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
1161#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
1162#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
1163#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
1164/* gap */
1165/* Bit 16: Larger tiles
1166// Bit 17: 4x2 tiles
1167// Bit 18: Extremely weird tile like, but some pixels duplicated? */
1168#define R300_RB3D_COLORPITCH0 0x4E38
1169# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1170# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1171# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1172# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1173# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1174# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1175# define R300_COLOR_FORMAT_RGB565 (2 << 22)
1176# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
1177#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
1178#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1179#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1180
1181/* gap */
1182/* Guess by Vladimir.
1183// Set to 0A before 3D operations, set to 02 afterwards. */
1184#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
1185# define R300_RB3D_DSTCACHE_02 0x00000002
1186# define R300_RB3D_DSTCACHE_0A 0x0000000A
1187
1188/* gap */
1189/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
1190/* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */
1191#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
1192# define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
1193# define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
1194# define R300_RB3D_Z_TEST 0x00000012
1195# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1196# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1197
1198# define R300_RB3D_Z_TEST 0x00000012
1199# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1200# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1201# define R300_RB3D_STENCIL_ENABLE 0x00000001
1202
1203#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
1204 /* functions */
1205# define R300_ZS_NEVER 0
1206# define R300_ZS_LESS 1
1207# define R300_ZS_LEQUAL 2
1208# define R300_ZS_EQUAL 3
1209# define R300_ZS_GEQUAL 4
1210# define R300_ZS_GREATER 5
1211# define R300_ZS_NOTEQUAL 6
1212# define R300_ZS_ALWAYS 7
1213# define R300_ZS_MASK 7
1214 /* operations */
1215# define R300_ZS_KEEP 0
1216# define R300_ZS_ZERO 1
1217# define R300_ZS_REPLACE 2
1218# define R300_ZS_INCR 3
1219# define R300_ZS_DECR 4
1220# define R300_ZS_INVERT 5
1221# define R300_ZS_INCR_WRAP 6
1222# define R300_ZS_DECR_WRAP 7
1223
1224 /* front and back refer to operations done for front
1225 and back faces, i.e. separate stencil function support */
1226# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
1227# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
1228# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
1229# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
1230# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
1231# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
1232# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
1233# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
1234# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
1235
1236
1237
1238#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
1239# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
1240# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
1241# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
1242# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
1243
1244/* gap */
1245
1246#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
1247# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
1248# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
1249
1250/* gap */
1251#define R300_RB3D_DEPTHOFFSET 0x4F20
1252#define R300_RB3D_DEPTHPITCH 0x4F24
1253# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
1254# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
1255# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
1256# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1257# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1258# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1259
1260/* BEGIN: Vertex program instruction set
1261// Every instruction is four dwords long:
1262// DWORD 0: output and opcode
1263// DWORD 1: first argument
1264// DWORD 2: second argument
1265// DWORD 3: third argument
1266//
1267// Notes:
1268// - ABS r, a is implemented as MAX r, a, -a
1269// - MOV is implemented as ADD to zero
1270// - XPD is implemented as MUL + MAD
1271// - FLR is implemented as FRC + ADD
1272// - apparently, fglrx tries to schedule instructions so that there is at least
1273// one instruction between the write to a temporary and the first read
1274// from said temporary; however, violations of this scheduling are allowed
1275// - register indices seem to be unrelated with OpenGL aliasing to conventional state
1276// - only one attribute and one parameter can be loaded at a time; however, the
1277// same attribute/parameter can be used for more than one argument
1278// - the second software argument for POW is the third hardware argument (no idea why)
1279// - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
1280//
1281// There is some magic surrounding LIT:
1282// The single argument is replicated across all three inputs, but swizzled:
1283// First argument: xyzy
1284// Second argument: xyzx
1285// Third argument: xyzw
1286// Whenever the result is used later in the fragment program, fglrx forces x and w
1287// to be 1.0 in the input selection; I don't know whether this is strictly necessary */
1288#define R300_VPI_OUT_OP_DOT (1 << 0)
1289#define R300_VPI_OUT_OP_MUL (2 << 0)
1290#define R300_VPI_OUT_OP_ADD (3 << 0)
1291#define R300_VPI_OUT_OP_MAD (4 << 0)
1292#define R300_VPI_OUT_OP_DST (5 << 0)
1293#define R300_VPI_OUT_OP_FRC (6 << 0)
1294#define R300_VPI_OUT_OP_MAX (7 << 0)
1295#define R300_VPI_OUT_OP_MIN (8 << 0)
1296#define R300_VPI_OUT_OP_SGE (9 << 0)
1297#define R300_VPI_OUT_OP_SLT (10 << 0)
1298#define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
1299#define R300_VPI_OUT_OP_EXP (65 << 0)
1300#define R300_VPI_OUT_OP_LOG (66 << 0)
1301#define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */
1302#define R300_VPI_OUT_OP_LIT (68 << 0)
1303#define R300_VPI_OUT_OP_POW (69 << 0)
1304#define R300_VPI_OUT_OP_RCP (70 << 0)
1305#define R300_VPI_OUT_OP_RSQ (72 << 0)
1306#define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
1307#define R300_VPI_OUT_OP_EX2 (75 << 0)
1308#define R300_VPI_OUT_OP_LG2 (76 << 0)
1309#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
1310#define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */
1311
1312#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
1313#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
1314#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
1315
1316#define R300_VPI_OUT_REG_INDEX_SHIFT 13
1317#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */
1318
1319#define R300_VPI_OUT_WRITE_X (1 << 20)
1320#define R300_VPI_OUT_WRITE_Y (1 << 21)
1321#define R300_VPI_OUT_WRITE_Z (1 << 22)
1322#define R300_VPI_OUT_WRITE_W (1 << 23)
1323
1324#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
1325#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
1326#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
1327#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
1328#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
1329
1330#define R300_VPI_IN_REG_INDEX_SHIFT 5
1331#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */
1332
1333/* The R300 can select components from the input register arbitrarily.
1334// Use the following constants, shifted by the component shift you
1335// want to select */
1336#define R300_VPI_IN_SELECT_X 0
1337#define R300_VPI_IN_SELECT_Y 1
1338#define R300_VPI_IN_SELECT_Z 2
1339#define R300_VPI_IN_SELECT_W 3
1340#define R300_VPI_IN_SELECT_ZERO 4
1341#define R300_VPI_IN_SELECT_ONE 5
1342#define R300_VPI_IN_SELECT_MASK 7
1343
1344#define R300_VPI_IN_X_SHIFT 13
1345#define R300_VPI_IN_Y_SHIFT 16
1346#define R300_VPI_IN_Z_SHIFT 19
1347#define R300_VPI_IN_W_SHIFT 22
1348
1349#define R300_VPI_IN_NEG_X (1 << 25)
1350#define R300_VPI_IN_NEG_Y (1 << 26)
1351#define R300_VPI_IN_NEG_Z (1 << 27)
1352#define R300_VPI_IN_NEG_W (1 << 28)
1353/* END */
1354
1355//BEGIN: Packet 3 commands
1356
1357// A primitive emission dword.
1358#define R300_PRIM_TYPE_NONE (0 << 0)
1359#define R300_PRIM_TYPE_POINT (1 << 0)
1360#define R300_PRIM_TYPE_LINE (2 << 0)
1361#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
1362#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
1363#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
1364#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
1365#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
1366#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
1367#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
1368#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
1369#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200)
1370#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
1371#define R300_PRIM_TYPE_QUADS (13 << 0)
1372#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
1373#define R300_PRIM_TYPE_POLYGON (15 << 0)
1374#define R300_PRIM_TYPE_MASK 0xF
1375#define R300_PRIM_WALK_IND (1 << 4)
1376#define R300_PRIM_WALK_LIST (2 << 4)
1377#define R300_PRIM_WALK_RING (3 << 4)
1378#define R300_PRIM_WALK_MASK (3 << 4)
1379#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200)
1380#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
1381#define R300_PRIM_NUM_VERTICES_SHIFT 16
1382
1383// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
1384// Two parameter dwords:
1385// 0. The first parameter appears to be always 0
1386// 1. The second parameter is a standard primitive emission dword.
1387#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
1388
1389// Specify the full set of vertex arrays as (address, stride).
1390// The first parameter is the number of vertex arrays specified.
1391// The rest of the command is a variable length list of blocks, where
1392// each block is three dwords long and specifies two arrays.
1393// The first dword of a block is split into two words, the lower significant
1394// word refers to the first array, the more significant word to the second
1395// array in the block.
1396// The low byte of each word contains the size of an array entry in dwords,
1397// the high byte contains the stride of the array.
1398// The second dword of a block contains the pointer to the first array,
1399// the third dword of a block contains the pointer to the second array.
1400// Note that if the total number of arrays is odd, the third dword of
1401// the last block is omitted.
1402#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
1403
1404#define R300_PACKET3_INDX_BUFFER 0x00003300
1405# define R300_EB_UNK1_SHIFT 24
1406# define R300_EB_UNK1 (0x80<<24)
1407# define R300_EB_UNK2 0x0810
1408#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
1409
1410//END
1411
1412#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 20bcf872b348..6d9080a3ca7e 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35#include "r300_reg.h"
35 36
36#define RADEON_FIFO_DEBUG 0 37#define RADEON_FIFO_DEBUG 0
37 38
@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
1151 1152
1152#if __OS_HAS_AGP 1153#if __OS_HAS_AGP
1153 if ( !dev_priv->is_pci ) { 1154 if ( !dev_priv->is_pci ) {
1155 /* set RADEON_AGP_BASE here instead of relying on X from user space */
1156 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1154 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, 1157 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
1155 dev_priv->ring_rptr->offset 1158 dev_priv->ring_rptr->offset
1156 - dev->agp->base 1159 - dev->agp->base
@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
1407 radeon_do_cleanup_cp(dev); 1410 radeon_do_cleanup_cp(dev);
1408 return DRM_ERR(EINVAL); 1411 return DRM_ERR(EINVAL);
1409 } 1412 }
1413 dev->agp_buffer_token = init->buffers_offset;
1410 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1414 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1411 if(!dev->agp_buffer_map) { 1415 if(!dev->agp_buffer_map) {
1412 DRM_ERROR("could not find dma buffer region!\n"); 1416 DRM_ERROR("could not find dma buffer region!\n");
@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
1625 1629
1626 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); 1630 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
1627 1631
1632 if(init.func == RADEON_INIT_R300_CP)
1633 r300_init_reg_flags();
1634
1628 switch ( init.func ) { 1635 switch ( init.func ) {
1629 case RADEON_INIT_CP: 1636 case RADEON_INIT_CP:
1630 case RADEON_INIT_R200_CP: 1637 case RADEON_INIT_R200_CP:
@@ -2039,15 +2046,43 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
2039 case CHIP_RV200: 2046 case CHIP_RV200:
2040 case CHIP_R200: 2047 case CHIP_R200:
2041 case CHIP_R300: 2048 case CHIP_R300:
2049 case CHIP_R420:
2042 dev_priv->flags |= CHIP_HAS_HIERZ; 2050 dev_priv->flags |= CHIP_HAS_HIERZ;
2043 break; 2051 break;
2044 default: 2052 default:
2045 /* all other chips have no hierarchical z buffer */ 2053 /* all other chips have no hierarchical z buffer */
2046 break; 2054 break;
2047 } 2055 }
2056
2057 if (drm_device_is_agp(dev))
2058 dev_priv->flags |= CHIP_IS_AGP;
2059
2060 DRM_DEBUG("%s card detected\n",
2061 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
2048 return ret; 2062 return ret;
2049} 2063}
2050 2064
2065int radeon_presetup(struct drm_device *dev)
2066{
2067 int ret;
2068 drm_local_map_t *map;
2069 drm_radeon_private_t *dev_priv = dev->dev_private;
2070
2071 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
2072 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
2073 _DRM_READ_ONLY, &dev_priv->mmio);
2074 if (ret != 0)
2075 return ret;
2076
2077 ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
2078 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
2079 _DRM_WRITE_COMBINING, &map);
2080 if (ret != 0)
2081 return ret;
2082
2083 return 0;
2084}
2085
2051int radeon_driver_postcleanup(struct drm_device *dev) 2086int radeon_driver_postcleanup(struct drm_device *dev)
2052{ 2087{
2053 drm_radeon_private_t *dev_priv = dev->dev_private; 2088 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index c1e62d047989..3792798270a4 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -195,6 +195,52 @@ typedef union {
195#define RADEON_WAIT_2D 0x1 195#define RADEON_WAIT_2D 0x1
196#define RADEON_WAIT_3D 0x2 196#define RADEON_WAIT_3D 0x2
197 197
198/* Allowed parameters for R300_CMD_PACKET3
199 */
200#define R300_CMD_PACKET3_CLEAR 0
201#define R300_CMD_PACKET3_RAW 1
202
203/* Commands understood by cmd_buffer ioctl for R300.
204 * The interface has not been stabilized, so some of these may be removed
205 * and eventually reordered before stabilization.
206 */
207#define R300_CMD_PACKET0 1
208#define R300_CMD_VPU 2 /* emit vertex program upload */
209#define R300_CMD_PACKET3 3 /* emit a packet3 */
210#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
211#define R300_CMD_CP_DELAY 5
212#define R300_CMD_DMA_DISCARD 6
213#define R300_CMD_WAIT 7
214# define R300_WAIT_2D 0x1
215# define R300_WAIT_3D 0x2
216# define R300_WAIT_2D_CLEAN 0x3
217# define R300_WAIT_3D_CLEAN 0x4
218
219typedef union {
220 unsigned int u;
221 struct {
222 unsigned char cmd_type, pad0, pad1, pad2;
223 } header;
224 struct {
225 unsigned char cmd_type, count, reglo, reghi;
226 } packet0;
227 struct {
228 unsigned char cmd_type, count, adrlo, adrhi;
229 } vpu;
230 struct {
231 unsigned char cmd_type, packet, pad0, pad1;
232 } packet3;
233 struct {
234 unsigned char cmd_type, packet;
235 unsigned short count; /* amount of packet2 to emit */
236 } delay;
237 struct {
238 unsigned char cmd_type, buf_idx, pad0, pad1;
239 } dma;
240 struct {
241 unsigned char cmd_type, flags, pad0, pad1;
242 } wait;
243} drm_r300_cmd_header_t;
198 244
199#define RADEON_FRONT 0x1 245#define RADEON_FRONT 0x1
200#define RADEON_BACK 0x2 246#define RADEON_BACK 0x2
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 18e4e5b0952f..e0682f64b400 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -76,6 +76,7 @@ static struct drm_driver driver = {
76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
78 .preinit = radeon_driver_preinit, 78 .preinit = radeon_driver_preinit,
79 .presetup = radeon_presetup,
79 .postcleanup = radeon_driver_postcleanup, 80 .postcleanup = radeon_driver_postcleanup,
80 .prerelease = radeon_driver_prerelease, 81 .prerelease = radeon_driver_prerelease,
81 .pretakedown = radeon_driver_pretakedown, 82 .pretakedown = radeon_driver_pretakedown,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 771aa80a5e8c..f12a963ede18 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -82,9 +82,10 @@
82 * - Add support for r100 cube maps 82 * - Add support for r100 cube maps
83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear 83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
84 * texture filtering on r200 84 * texture filtering on r200
85 * 1.17- Add initial support for R300 (3D).
85 */ 86 */
86#define DRIVER_MAJOR 1 87#define DRIVER_MAJOR 1
87#define DRIVER_MINOR 16 88#define DRIVER_MINOR 17
88#define DRIVER_PATCHLEVEL 0 89#define DRIVER_PATCHLEVEL 0
89 90
90#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) 91#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
@@ -106,7 +107,9 @@ enum radeon_family {
106 CHIP_RV280, 107 CHIP_RV280,
107 CHIP_R300, 108 CHIP_R300,
108 CHIP_RS300, 109 CHIP_RS300,
110 CHIP_R350,
109 CHIP_RV350, 111 CHIP_RV350,
112 CHIP_R420,
110 CHIP_LAST, 113 CHIP_LAST,
111}; 114};
112 115
@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
290extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); 293extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
291 294
292extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); 295extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
296extern int radeon_presetup(struct drm_device *dev);
293extern int radeon_driver_postcleanup(struct drm_device *dev); 297extern int radeon_driver_postcleanup(struct drm_device *dev);
294 298
295extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); 299extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev );
320extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 324extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
321 unsigned long arg); 325 unsigned long arg);
322 326
327
328/* r300_cmdbuf.c */
329extern void r300_init_reg_flags(void);
330
331extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
332 drm_file_t* filp_priv,
333 drm_radeon_cmd_buffer_t* cmdbuf);
334
323/* Flags for stats.boxes 335/* Flags for stats.boxes
324 */ 336 */
325#define RADEON_BOX_DMA_IDLE 0x1 337#define RADEON_BOX_DMA_IDLE 0x1
@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
357#define RADEON_CRTC2_OFFSET 0x0324 369#define RADEON_CRTC2_OFFSET 0x0324
358#define RADEON_CRTC2_OFFSET_CNTL 0x0328 370#define RADEON_CRTC2_OFFSET_CNTL 0x0328
359 371
372#define RADEON_MPP_TB_CONFIG 0x01c0
373#define RADEON_MEM_CNTL 0x0140
374#define RADEON_MEM_SDRAM_MODE_REG 0x0158
375#define RADEON_AGP_BASE 0x0170
376
360#define RADEON_RB3D_COLOROFFSET 0x1c40 377#define RADEON_RB3D_COLOROFFSET 0x1c40
361#define RADEON_RB3D_COLORPITCH 0x1c48 378#define RADEON_RB3D_COLORPITCH 0x1c48
362 379
@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
651#define RADEON_CP_PACKET1 0x40000000 668#define RADEON_CP_PACKET1 0x40000000
652#define RADEON_CP_PACKET2 0x80000000 669#define RADEON_CP_PACKET2 0x80000000
653#define RADEON_CP_PACKET3 0xC0000000 670#define RADEON_CP_PACKET3 0xC0000000
671# define RADEON_CP_NOP 0x00001000
672# define RADEON_CP_NEXT_CHAR 0x00001900
673# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
674# define RADEON_CP_SET_SCISSORS 0x00001E00
675 /* GEN_INDX_PRIM is unsupported starting with R300 */
654# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 676# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
655# define RADEON_WAIT_FOR_IDLE 0x00002600 677# define RADEON_WAIT_FOR_IDLE 0x00002600
656# define RADEON_3D_DRAW_VBUF 0x00002800 678# define RADEON_3D_DRAW_VBUF 0x00002800
657# define RADEON_3D_DRAW_IMMD 0x00002900 679# define RADEON_3D_DRAW_IMMD 0x00002900
658# define RADEON_3D_DRAW_INDX 0x00002A00 680# define RADEON_3D_DRAW_INDX 0x00002A00
681# define RADEON_CP_LOAD_PALETTE 0x00002C00
659# define RADEON_3D_LOAD_VBPNTR 0x00002F00 682# define RADEON_3D_LOAD_VBPNTR 0x00002F00
660# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 683# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
661# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 684# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
662# define RADEON_3D_CLEAR_ZMASK 0x00003200 685# define RADEON_3D_CLEAR_ZMASK 0x00003200
686# define RADEON_CP_INDX_BUFFER 0x00003300
687# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
688# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
689# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
663# define RADEON_3D_CLEAR_HIZ 0x00003700 690# define RADEON_3D_CLEAR_HIZ 0x00003700
691# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
664# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 692# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
665# define RADEON_CNTL_PAINT_MULTI 0x00009A00 693# define RADEON_CNTL_PAINT_MULTI 0x00009A00
666# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 694# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 1f79e249146c..64a3e3a406ef 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
1493 1493
1494} 1494}
1495 1495
1496#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) 1496#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1497 1497
1498static int radeon_cp_dispatch_texture( DRMFILE filp, 1498static int radeon_cp_dispatch_texture( DRMFILE filp,
1499 drm_device_t *dev, 1499 drm_device_t *dev,
@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1506 u32 format; 1506 u32 format;
1507 u32 *buffer; 1507 u32 *buffer;
1508 const u8 __user *data; 1508 const u8 __user *data;
1509 int size, dwords, tex_width, blit_width; 1509 int size, dwords, tex_width, blit_width, spitch;
1510 u32 height; 1510 u32 height;
1511 int i; 1511 int i;
1512 u32 texpitch, microtile; 1512 u32 texpitch, microtile;
1513 u32 offset;
1513 RING_LOCALS; 1514 RING_LOCALS;
1514 1515
1515 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); 1516 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1530 RADEON_WAIT_UNTIL_IDLE(); 1531 RADEON_WAIT_UNTIL_IDLE();
1531 ADVANCE_RING(); 1532 ADVANCE_RING();
1532 1533
1533#ifdef __BIG_ENDIAN
1534 /* The Mesa texture functions provide the data in little endian as the
1535 * chip wants it, but we need to compensate for the fact that the CP
1536 * ring gets byte-swapped
1537 */
1538 BEGIN_RING( 2 );
1539 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1540 ADVANCE_RING();
1541#endif
1542
1543
1544 /* The compiler won't optimize away a division by a variable, 1534 /* The compiler won't optimize away a division by a variable,
1545 * even if the only legal values are powers of two. Thus, we'll 1535 * even if the only legal values are powers of two. Thus, we'll
1546 * use a shift instead. 1536 * use a shift instead.
@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1572 DRM_ERROR( "invalid texture format %d\n", tex->format ); 1562 DRM_ERROR( "invalid texture format %d\n", tex->format );
1573 return DRM_ERR(EINVAL); 1563 return DRM_ERR(EINVAL);
1574 } 1564 }
1565 spitch = blit_width >> 6;
1566 if (spitch == 0 && image->height > 1)
1567 return DRM_ERR(EINVAL);
1568
1575 texpitch = tex->pitch; 1569 texpitch = tex->pitch;
1576 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { 1570 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1577 microtile = 1; 1571 microtile = 1;
@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1624 */ 1618 */
1625 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); 1619 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1626 dwords = size / 4; 1620 dwords = size / 4;
1627 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1628 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1629 RADEON_GMC_BRUSH_NONE |
1630 (format << 8) |
1631 RADEON_GMC_SRC_DATATYPE_COLOR |
1632 RADEON_ROP3_S |
1633 RADEON_DP_SRC_SOURCE_HOST_DATA |
1634 RADEON_GMC_CLR_CMP_CNTL_DIS |
1635 RADEON_GMC_WR_MSK_DIS);
1636
1637 buffer[2] = (texpitch << 22) | (tex->offset >> 10);
1638 buffer[3] = 0xffffffff;
1639 buffer[4] = 0xffffffff;
1640 buffer[5] = (image->y << 16) | image->x;
1641 buffer[6] = (height << 16) | image->width;
1642 buffer[7] = dwords;
1643 buffer += 8;
1644
1645
1646 1621
1647 if (microtile) { 1622 if (microtile) {
1648 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1623 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1750 } 1725 }
1751 1726
1752 buf->filp = filp; 1727 buf->filp = filp;
1753 buf->used = (dwords + 8) * sizeof(u32); 1728 buf->used = size;
1754 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); 1729 offset = dev_priv->gart_buffers_offset + buf->offset;
1755 radeon_cp_discard_buffer( dev, buf ); 1730 BEGIN_RING(9);
1731 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1732 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1733 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1734 RADEON_GMC_BRUSH_NONE |
1735 (format << 8) |
1736 RADEON_GMC_SRC_DATATYPE_COLOR |
1737 RADEON_ROP3_S |
1738 RADEON_DP_SRC_SOURCE_MEMORY |
1739 RADEON_GMC_CLR_CMP_CNTL_DIS |
1740 RADEON_GMC_WR_MSK_DIS );
1741 OUT_RING((spitch << 22) | (offset >> 10));
1742 OUT_RING((texpitch << 22) | (tex->offset >> 10));
1743 OUT_RING(0);
1744 OUT_RING((image->x << 16) | image->y);
1745 OUT_RING((image->width << 16) | height);
1746 RADEON_WAIT_UNTIL_2D_IDLE();
1747 ADVANCE_RING();
1748
1749 radeon_cp_discard_buffer(dev, buf);
1756 1750
1757 /* Update the input parameters for next time */ 1751 /* Update the input parameters for next time */
1758 image->y += height; 1752 image->y += height;
@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2797 2791
2798 orig_nbox = cmdbuf.nbox; 2792 orig_nbox = cmdbuf.nbox;
2799 2793
2794 if(dev_priv->microcode_version == UCODE_R300) {
2795 int temp;
2796 temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
2797
2798 if (orig_bufsz != 0)
2799 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2800
2801 return temp;
2802 }
2803
2804 /* microcode_version != r300 */
2800 while ( cmdbuf.bufsz >= sizeof(header) ) { 2805 while ( cmdbuf.bufsz >= sizeof(header) ) {
2801 2806
2802 header.i = *(int *)cmdbuf.buf; 2807 header.i = *(int *)cmdbuf.buf;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
new file mode 100644
index 000000000000..2fd40bac7c97
--- /dev/null
+++ b/drivers/char/drm/savage_bci.c
@@ -0,0 +1,1096 @@
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0
34
35static int
36savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
37{
38 uint32_t mask = dev_priv->status_used_mask;
39 uint32_t threshold = dev_priv->bci_threshold_hi;
40 uint32_t status;
41 int i;
42
43#if SAVAGE_BCI_DEBUG
44 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
45 DRM_ERROR("Trying to emit %d words "
46 "(more than guaranteed space in COB)\n", n);
47#endif
48
49 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
50 DRM_MEMORYBARRIER();
51 status = dev_priv->status_ptr[0];
52 if ((status & mask) < threshold)
53 return 0;
54 DRM_UDELAY(1);
55 }
56
57#if SAVAGE_BCI_DEBUG
58 DRM_ERROR("failed!\n");
59 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
60#endif
61 return DRM_ERR(EBUSY);
62}
63
64static int
65savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
66{
67 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
68 uint32_t status;
69 int i;
70
71 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
72 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
73 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
74 return 0;
75 DRM_UDELAY(1);
76 }
77
78#if SAVAGE_BCI_DEBUG
79 DRM_ERROR("failed!\n");
80 DRM_INFO(" status=0x%08x\n", status);
81#endif
82 return DRM_ERR(EBUSY);
83}
84
85static int
86savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
87{
88 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
89 uint32_t status;
90 int i;
91
92 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
93 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
94 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
95 return 0;
96 DRM_UDELAY(1);
97 }
98
99#if SAVAGE_BCI_DEBUG
100 DRM_ERROR("failed!\n");
101 DRM_INFO(" status=0x%08x\n", status);
102#endif
103 return DRM_ERR(EBUSY);
104}
105
106/*
107 * Waiting for events.
108 *
109 * The BIOSresets the event tag to 0 on mode changes. Therefore we
110 * never emit 0 to the event tag. If we find a 0 event tag we know the
111 * BIOS stomped on it and return success assuming that the BIOS waited
112 * for engine idle.
113 *
114 * Note: if the Xserver uses the event tag it has to follow the same
115 * rule. Otherwise there may be glitches every 2^16 events.
116 */
117static int
118savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
119{
120 uint32_t status;
121 int i;
122
123 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
124 DRM_MEMORYBARRIER();
125 status = dev_priv->status_ptr[1];
126 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
127 (status & 0xffff) == 0)
128 return 0;
129 DRM_UDELAY(1);
130 }
131
132#if SAVAGE_BCI_DEBUG
133 DRM_ERROR("failed!\n");
134 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
135#endif
136
137 return DRM_ERR(EBUSY);
138}
139
140static int
141savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
142{
143 uint32_t status;
144 int i;
145
146 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
147 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
148 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
149 (status & 0xffff) == 0)
150 return 0;
151 DRM_UDELAY(1);
152 }
153
154#if SAVAGE_BCI_DEBUG
155 DRM_ERROR("failed!\n");
156 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
157#endif
158
159 return DRM_ERR(EBUSY);
160}
161
162uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
163 unsigned int flags)
164{
165 uint16_t count;
166 BCI_LOCALS;
167
168 if (dev_priv->status_ptr) {
169 /* coordinate with Xserver */
170 count = dev_priv->status_ptr[1023];
171 if (count < dev_priv->event_counter)
172 dev_priv->event_wrap++;
173 } else {
174 count = dev_priv->event_counter;
175 }
176 count = (count + 1) & 0xffff;
177 if (count == 0) {
178 count++; /* See the comment above savage_wait_event_*. */
179 dev_priv->event_wrap++;
180 }
181 dev_priv->event_counter = count;
182 if (dev_priv->status_ptr)
183 dev_priv->status_ptr[1023] = (uint32_t)count;
184
185 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
186 unsigned int wait_cmd = BCI_CMD_WAIT;
187 if ((flags & SAVAGE_WAIT_2D))
188 wait_cmd |= BCI_CMD_WAIT_2D;
189 if ((flags & SAVAGE_WAIT_3D))
190 wait_cmd |= BCI_CMD_WAIT_3D;
191 BEGIN_BCI(2);
192 BCI_WRITE(wait_cmd);
193 } else {
194 BEGIN_BCI(1);
195 }
196 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
197
198 return count;
199}
200
201/*
202 * Freelist management
203 */
204static int savage_freelist_init(drm_device_t *dev)
205{
206 drm_savage_private_t *dev_priv = dev->dev_private;
207 drm_device_dma_t *dma = dev->dma;
208 drm_buf_t *buf;
209 drm_savage_buf_priv_t *entry;
210 int i;
211 DRM_DEBUG("count=%d\n", dma->buf_count);
212
213 dev_priv->head.next = &dev_priv->tail;
214 dev_priv->head.prev = NULL;
215 dev_priv->head.buf = NULL;
216
217 dev_priv->tail.next = NULL;
218 dev_priv->tail.prev = &dev_priv->head;
219 dev_priv->tail.buf = NULL;
220
221 for (i = 0; i < dma->buf_count; i++) {
222 buf = dma->buflist[i];
223 entry = buf->dev_private;
224
225 SET_AGE(&entry->age, 0, 0);
226 entry->buf = buf;
227
228 entry->next = dev_priv->head.next;
229 entry->prev = &dev_priv->head;
230 dev_priv->head.next->prev = entry;
231 dev_priv->head.next = entry;
232 }
233
234 return 0;
235}
236
237static drm_buf_t *savage_freelist_get(drm_device_t *dev)
238{
239 drm_savage_private_t *dev_priv = dev->dev_private;
240 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
241 uint16_t event;
242 unsigned int wrap;
243 DRM_DEBUG("\n");
244
245 UPDATE_EVENT_COUNTER();
246 if (dev_priv->status_ptr)
247 event = dev_priv->status_ptr[1] & 0xffff;
248 else
249 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
250 wrap = dev_priv->event_wrap;
251 if (event > dev_priv->event_counter)
252 wrap--; /* hardware hasn't passed the last wrap yet */
253
254 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
255 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
256
257 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
258 drm_savage_buf_priv_t *next = tail->next;
259 drm_savage_buf_priv_t *prev = tail->prev;
260 prev->next = next;
261 next->prev = prev;
262 tail->next = tail->prev = NULL;
263 return tail->buf;
264 }
265
266 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
267 return NULL;
268}
269
270void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
271{
272 drm_savage_private_t *dev_priv = dev->dev_private;
273 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
274
275 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
276
277 if (entry->next != NULL || entry->prev != NULL) {
278 DRM_ERROR("entry already on freelist.\n");
279 return;
280 }
281
282 prev = &dev_priv->head;
283 next = prev->next;
284 prev->next = entry;
285 next->prev = entry;
286 entry->prev = prev;
287 entry->next = next;
288}
289
290/*
291 * Command DMA
292 */
293static int savage_dma_init(drm_savage_private_t *dev_priv)
294{
295 unsigned int i;
296
297 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
298 (SAVAGE_DMA_PAGE_SIZE*4);
299 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
300 dev_priv->nr_dma_pages,
301 DRM_MEM_DRIVER);
302 if (dev_priv->dma_pages == NULL)
303 return DRM_ERR(ENOMEM);
304
305 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
306 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
307 dev_priv->dma_pages[i].used = 0;
308 dev_priv->dma_pages[i].flushed = 0;
309 }
310 SET_AGE(&dev_priv->last_dma_age, 0, 0);
311
312 dev_priv->first_dma_page = 0;
313 dev_priv->current_dma_page = 0;
314
315 return 0;
316}
317
318void savage_dma_reset(drm_savage_private_t *dev_priv)
319{
320 uint16_t event;
321 unsigned int wrap, i;
322 event = savage_bci_emit_event(dev_priv, 0);
323 wrap = dev_priv->event_wrap;
324 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
325 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
326 dev_priv->dma_pages[i].used = 0;
327 dev_priv->dma_pages[i].flushed = 0;
328 }
329 SET_AGE(&dev_priv->last_dma_age, event, wrap);
330 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
331}
332
333void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
334{
335 uint16_t event;
336 unsigned int wrap;
337
338 /* Faked DMA buffer pages don't age. */
339 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
340 return;
341
342 UPDATE_EVENT_COUNTER();
343 if (dev_priv->status_ptr)
344 event = dev_priv->status_ptr[1] & 0xffff;
345 else
346 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
347 wrap = dev_priv->event_wrap;
348 if (event > dev_priv->event_counter)
349 wrap--; /* hardware hasn't passed the last wrap yet */
350
351 if (dev_priv->dma_pages[page].age.wrap > wrap ||
352 (dev_priv->dma_pages[page].age.wrap == wrap &&
353 dev_priv->dma_pages[page].age.event > event)) {
354 if (dev_priv->wait_evnt(dev_priv,
355 dev_priv->dma_pages[page].age.event)
356 < 0)
357 DRM_ERROR("wait_evnt failed!\n");
358 }
359}
360
361uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
362{
363 unsigned int cur = dev_priv->current_dma_page;
364 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
365 dev_priv->dma_pages[cur].used;
366 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
367 SAVAGE_DMA_PAGE_SIZE;
368 uint32_t *dma_ptr;
369 unsigned int i;
370
371 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
372 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
373
374 if (cur + nr_pages < dev_priv->nr_dma_pages) {
375 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
376 cur*SAVAGE_DMA_PAGE_SIZE +
377 dev_priv->dma_pages[cur].used;
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
385 nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
386 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
387 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
388 dev_priv->dma_pages[i].used = 0;
389 dev_priv->dma_pages[i].flushed = 0;
390 }
391 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
392 dev_priv->first_dma_page = cur = 0;
393 }
394 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
395#if SAVAGE_DMA_DEBUG
396 if (dev_priv->dma_pages[i].used) {
397 DRM_ERROR("unflushed page %u: used=%u\n",
398 i, dev_priv->dma_pages[i].used);
399 }
400#endif
401 if (n > SAVAGE_DMA_PAGE_SIZE)
402 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
403 else
404 dev_priv->dma_pages[i].used = n;
405 n -= SAVAGE_DMA_PAGE_SIZE;
406 }
407 dev_priv->current_dma_page = --i;
408
409 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
410 i, dev_priv->dma_pages[i].used, n);
411
412 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
413
414 return dma_ptr;
415}
416
417static void savage_dma_flush(drm_savage_private_t *dev_priv)
418{
419 unsigned int first = dev_priv->first_dma_page;
420 unsigned int cur = dev_priv->current_dma_page;
421 uint16_t event;
422 unsigned int wrap, pad, align, len, i;
423 unsigned long phys_addr;
424 BCI_LOCALS;
425
426 if (first == cur &&
427 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
428 return;
429
430 /* pad length to multiples of 2 entries
431 * align start of next DMA block to multiles of 8 entries */
432 pad = -dev_priv->dma_pages[cur].used & 1;
433 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
434
435 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
436 "pad=%u, align=%u\n",
437 first, cur, dev_priv->dma_pages[first].flushed,
438 dev_priv->dma_pages[cur].used, pad, align);
439
440 /* pad with noops */
441 if (pad) {
442 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
443 cur * SAVAGE_DMA_PAGE_SIZE +
444 dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad;
446 while(pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used -
460 dev_priv->dma_pages[first].flushed;
461
462 DRM_DEBUG("phys_addr=%lx, len=%u\n",
463 phys_addr | dev_priv->dma_type, len);
464
465 BEGIN_BCI(3);
466 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
467 BCI_WRITE(phys_addr | dev_priv->dma_type);
468 BCI_DMA(len);
469
470 /* fix alignment of the start of the next block */
471 dev_priv->dma_pages[cur].used += align;
472
473 /* age DMA pages */
474 event = savage_bci_emit_event(dev_priv, 0);
475 wrap = dev_priv->event_wrap;
476 for (i = first; i < cur; ++i) {
477 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
478 dev_priv->dma_pages[i].used = 0;
479 dev_priv->dma_pages[i].flushed = 0;
480 }
481 /* age the current page only when it's full */
482 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
483 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
484 dev_priv->dma_pages[cur].used = 0;
485 dev_priv->dma_pages[cur].flushed = 0;
486 /* advance to next page */
487 cur++;
488 if (cur == dev_priv->nr_dma_pages)
489 cur = 0;
490 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
491 } else {
492 dev_priv->first_dma_page = cur;
493 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
494 }
495 SET_AGE(&dev_priv->last_dma_age, event, wrap);
496
497 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
498 dev_priv->dma_pages[cur].used,
499 dev_priv->dma_pages[cur].flushed);
500}
501
502static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
503{
504 unsigned int i, j;
505 BCI_LOCALS;
506
507 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
508 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
509 return;
510
511 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
512 dev_priv->first_dma_page, dev_priv->current_dma_page,
513 dev_priv->dma_pages[dev_priv->current_dma_page].used);
514
515 for (i = dev_priv->first_dma_page;
516 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
517 ++i) {
518 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
519 i * SAVAGE_DMA_PAGE_SIZE;
520#if SAVAGE_DMA_DEBUG
521 /* Sanity check: all pages except the last one must be full. */
522 if (i < dev_priv->current_dma_page &&
523 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
524 DRM_ERROR("partial DMA page %u: used=%u",
525 i, dev_priv->dma_pages[i].used);
526 }
527#endif
528 BEGIN_BCI(dev_priv->dma_pages[i].used);
529 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
530 BCI_WRITE(dma_ptr[j]);
531 }
532 dev_priv->dma_pages[i].used = 0;
533 }
534
535 /* reset to first page */
536 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
537}
538
539/*
540 * Initalize mappings. On Savage4 and SavageIX the alignment
541 * and size of the aperture is not suitable for automatic MTRR setup
542 * in drm_addmap. Therefore we do it manually before the maps are
543 * initialized. We also need to take care of deleting the MTRRs in
544 * postcleanup.
545 */
546int savage_preinit(drm_device_t *dev, unsigned long chipset)
547{
548 drm_savage_private_t *dev_priv;
549 unsigned long mmio_base, fb_base, fb_size, aperture_base;
550 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
551 * in case we decide we need information on the BAR for BSD in the
552 * future.
553 */
554 unsigned int fb_rsrc, aper_rsrc;
555 int ret = 0;
556
557 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
558 if (dev_priv == NULL)
559 return DRM_ERR(ENOMEM);
560
561 memset(dev_priv, 0, sizeof(drm_savage_private_t));
562 dev->dev_private = (void *)dev_priv;
563 dev_priv->chipset = (enum savage_family)chipset;
564
565 dev_priv->mtrr[0].handle = -1;
566 dev_priv->mtrr[1].handle = -1;
567 dev_priv->mtrr[2].handle = -1;
568 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
569 fb_rsrc = 0;
570 fb_base = drm_get_resource_start(dev, 0);
571 fb_size = SAVAGE_FB_SIZE_S3;
572 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
573 aper_rsrc = 0;
574 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
575 /* this should always be true */
576 if (drm_get_resource_len(dev, 0) == 0x08000000) {
577 /* Don't make MMIO write-cobining! We need 3
578 * MTRRs. */
579 dev_priv->mtrr[0].base = fb_base;
580 dev_priv->mtrr[0].size = 0x01000000;
581 dev_priv->mtrr[0].handle = mtrr_add(
582 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
583 MTRR_TYPE_WRCOMB, 1);
584 dev_priv->mtrr[1].base = fb_base+0x02000000;
585 dev_priv->mtrr[1].size = 0x02000000;
586 dev_priv->mtrr[1].handle = mtrr_add(
587 dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
588 MTRR_TYPE_WRCOMB, 1);
589 dev_priv->mtrr[2].base = fb_base+0x04000000;
590 dev_priv->mtrr[2].size = 0x04000000;
591 dev_priv->mtrr[2].handle = mtrr_add(
592 dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
593 MTRR_TYPE_WRCOMB, 1);
594 } else {
595 DRM_ERROR("strange pci_resource_len %08lx\n",
596 drm_get_resource_len(dev, 0));
597 }
598 } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
599 mmio_base = drm_get_resource_start(dev, 0);
600 fb_rsrc = 1;
601 fb_base = drm_get_resource_start(dev, 1);
602 fb_size = SAVAGE_FB_SIZE_S4;
603 aper_rsrc = 1;
604 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
605 /* this should always be true */
606 if (drm_get_resource_len(dev, 1) == 0x08000000) {
607 /* Can use one MTRR to cover both fb and
608 * aperture. */
609 dev_priv->mtrr[0].base = fb_base;
610 dev_priv->mtrr[0].size = 0x08000000;
611 dev_priv->mtrr[0].handle = mtrr_add(
612 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
613 MTRR_TYPE_WRCOMB, 1);
614 } else {
615 DRM_ERROR("strange pci_resource_len %08lx\n",
616 drm_get_resource_len(dev, 1));
617 }
618 } else {
619 mmio_base = drm_get_resource_start(dev, 0);
620 fb_rsrc = 1;
621 fb_base = drm_get_resource_start(dev, 1);
622 fb_size = drm_get_resource_len(dev, 1);
623 aper_rsrc = 2;
624 aperture_base = drm_get_resource_start(dev, 2);
625 /* Automatic MTRR setup will do the right thing. */
626 }
627
628 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
629 _DRM_READ_ONLY, &dev_priv->mmio);
630 if (ret)
631 return ret;
632
633 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
634 _DRM_WRITE_COMBINING, &dev_priv->fb);
635 if (ret)
636 return ret;
637
638 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
639 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
640 &dev_priv->aperture);
641 if (ret)
642 return ret;
643
644 return ret;
645}
646
647/*
648 * Delete MTRRs and free device-private data.
649 */
650int savage_postcleanup(drm_device_t *dev)
651{
652 drm_savage_private_t *dev_priv = dev->dev_private;
653 int i;
654
655 for (i = 0; i < 3; ++i)
656 if (dev_priv->mtrr[i].handle >= 0)
657 mtrr_del(dev_priv->mtrr[i].handle,
658 dev_priv->mtrr[i].base,
659 dev_priv->mtrr[i].size);
660
661 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
662
663 return 0;
664}
665
666static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
667{
668 drm_savage_private_t *dev_priv = dev->dev_private;
669
670 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
671 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
672 return DRM_ERR(EINVAL);
673 }
674 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
675 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
676 return DRM_ERR(EINVAL);
677 }
678 if (init->dma_type != SAVAGE_DMA_AGP &&
679 init->dma_type != SAVAGE_DMA_PCI) {
680 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
681 return DRM_ERR(EINVAL);
682 }
683
684 dev_priv->cob_size = init->cob_size;
685 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
686 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
687 dev_priv->dma_type = init->dma_type;
688
689 dev_priv->fb_bpp = init->fb_bpp;
690 dev_priv->front_offset = init->front_offset;
691 dev_priv->front_pitch = init->front_pitch;
692 dev_priv->back_offset = init->back_offset;
693 dev_priv->back_pitch = init->back_pitch;
694 dev_priv->depth_bpp = init->depth_bpp;
695 dev_priv->depth_offset = init->depth_offset;
696 dev_priv->depth_pitch = init->depth_pitch;
697
698 dev_priv->texture_offset = init->texture_offset;
699 dev_priv->texture_size = init->texture_size;
700
701 DRM_GETSAREA();
702 if (!dev_priv->sarea) {
703 DRM_ERROR("could not find sarea!\n");
704 savage_do_cleanup_bci(dev);
705 return DRM_ERR(EINVAL);
706 }
707 if (init->status_offset != 0) {
708 dev_priv->status = drm_core_findmap(dev, init->status_offset);
709 if (!dev_priv->status) {
710 DRM_ERROR("could not find shadow status region!\n");
711 savage_do_cleanup_bci(dev);
712 return DRM_ERR(EINVAL);
713 }
714 } else {
715 dev_priv->status = NULL;
716 }
717 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
718 dev->agp_buffer_map = drm_core_findmap(dev,
719 init->buffers_offset);
720 if (!dev->agp_buffer_map) {
721 DRM_ERROR("could not find DMA buffer region!\n");
722 savage_do_cleanup_bci(dev);
723 return DRM_ERR(EINVAL);
724 }
725 drm_core_ioremap(dev->agp_buffer_map, dev);
726 if (!dev->agp_buffer_map) {
727 DRM_ERROR("failed to ioremap DMA buffer region!\n");
728 savage_do_cleanup_bci(dev);
729 return DRM_ERR(ENOMEM);
730 }
731 }
732 if (init->agp_textures_offset) {
733 dev_priv->agp_textures =
734 drm_core_findmap(dev, init->agp_textures_offset);
735 if (!dev_priv->agp_textures) {
736 DRM_ERROR("could not find agp texture region!\n");
737 savage_do_cleanup_bci(dev);
738 return DRM_ERR(EINVAL);
739 }
740 } else {
741 dev_priv->agp_textures = NULL;
742 }
743
744 if (init->cmd_dma_offset) {
745 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
746 DRM_ERROR("command DMA not supported on "
747 "Savage3D/MX/IX.\n");
748 savage_do_cleanup_bci(dev);
749 return DRM_ERR(EINVAL);
750 }
751 if (dev->dma && dev->dma->buflist) {
752 DRM_ERROR("command and vertex DMA not supported "
753 "at the same time.\n");
754 savage_do_cleanup_bci(dev);
755 return DRM_ERR(EINVAL);
756 }
757 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
758 if (!dev_priv->cmd_dma) {
759 DRM_ERROR("could not find command DMA region!\n");
760 savage_do_cleanup_bci(dev);
761 return DRM_ERR(EINVAL);
762 }
763 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
764 if (dev_priv->cmd_dma->type != _DRM_AGP) {
765 DRM_ERROR("AGP command DMA region is not a "
766 "_DRM_AGP map!\n");
767 savage_do_cleanup_bci(dev);
768 return DRM_ERR(EINVAL);
769 }
770 drm_core_ioremap(dev_priv->cmd_dma, dev);
771 if (!dev_priv->cmd_dma->handle) {
772 DRM_ERROR("failed to ioremap command "
773 "DMA region!\n");
774 savage_do_cleanup_bci(dev);
775 return DRM_ERR(ENOMEM);
776 }
777 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
778 DRM_ERROR("PCI command DMA region is not a "
779 "_DRM_CONSISTENT map!\n");
780 savage_do_cleanup_bci(dev);
781 return DRM_ERR(EINVAL);
782 }
783 } else {
784 dev_priv->cmd_dma = NULL;
785 }
786
787 dev_priv->dma_flush = savage_dma_flush;
788 if (!dev_priv->cmd_dma) {
789 DRM_DEBUG("falling back to faked command DMA.\n");
790 dev_priv->fake_dma.offset = 0;
791 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
792 dev_priv->fake_dma.type = _DRM_SHM;
793 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
794 DRM_MEM_DRIVER);
795 if (!dev_priv->fake_dma.handle) {
796 DRM_ERROR("could not allocate faked DMA buffer!\n");
797 savage_do_cleanup_bci(dev);
798 return DRM_ERR(ENOMEM);
799 }
800 dev_priv->cmd_dma = &dev_priv->fake_dma;
801 dev_priv->dma_flush = savage_fake_dma_flush;
802 }
803
804 dev_priv->sarea_priv =
805 (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
806 init->sarea_priv_offset);
807
808 /* setup bitmap descriptors */
809 {
810 unsigned int color_tile_format;
811 unsigned int depth_tile_format;
812 unsigned int front_stride, back_stride, depth_stride;
813 if (dev_priv->chipset <= S3_SAVAGE4) {
814 color_tile_format = dev_priv->fb_bpp == 16 ?
815 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
816 depth_tile_format = dev_priv->depth_bpp == 16 ?
817 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
818 } else {
819 color_tile_format = SAVAGE_BD_TILE_DEST;
820 depth_tile_format = SAVAGE_BD_TILE_DEST;
821 }
822 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
823 back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
824 depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
825
826 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
827 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
828 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
829
830 dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
831 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
832 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
833
834 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
835 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
836 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
837 }
838
839 /* setup status and bci ptr */
840 dev_priv->event_counter = 0;
841 dev_priv->event_wrap = 0;
842 dev_priv->bci_ptr = (volatile uint32_t *)
843 ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
844 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
845 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
846 } else {
847 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
848 }
849 if (dev_priv->status != NULL) {
850 dev_priv->status_ptr =
851 (volatile uint32_t *)dev_priv->status->handle;
852 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
853 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
854 dev_priv->status_ptr[1023] = dev_priv->event_counter;
855 } else {
856 dev_priv->status_ptr = NULL;
857 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
858 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
859 } else {
860 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
861 }
862 dev_priv->wait_evnt = savage_bci_wait_event_reg;
863 }
864
865 /* cliprect functions */
866 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
867 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
868 else
869 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
870
871 if (savage_freelist_init(dev) < 0) {
872 DRM_ERROR("could not initialize freelist\n");
873 savage_do_cleanup_bci(dev);
874 return DRM_ERR(ENOMEM);
875 }
876
877 if (savage_dma_init(dev_priv) < 0) {
878 DRM_ERROR("could not initialize command DMA\n");
879 savage_do_cleanup_bci(dev);
880 return DRM_ERR(ENOMEM);
881 }
882
883 return 0;
884}
885
886int savage_do_cleanup_bci(drm_device_t *dev)
887{
888 drm_savage_private_t *dev_priv = dev->dev_private;
889
890 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
891 if (dev_priv->fake_dma.handle)
892 drm_free(dev_priv->fake_dma.handle,
893 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
894 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
895 dev_priv->cmd_dma->type == _DRM_AGP &&
896 dev_priv->dma_type == SAVAGE_DMA_AGP)
897 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
898
899 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
900 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
901 drm_core_ioremapfree(dev->agp_buffer_map, dev);
902 /* make sure the next instance (which may be running
903 * in PCI mode) doesn't try to use an old
904 * agp_buffer_map. */
905 dev->agp_buffer_map = NULL;
906 }
907
908 if (dev_priv->dma_pages)
909 drm_free(dev_priv->dma_pages,
910 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
911 DRM_MEM_DRIVER);
912
913 return 0;
914}
915
916static int savage_bci_init(DRM_IOCTL_ARGS)
917{
918 DRM_DEVICE;
919 drm_savage_init_t init;
920
921 LOCK_TEST_WITH_RETURN(dev, filp);
922
923 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
924 sizeof(init));
925
926 switch (init.func) {
927 case SAVAGE_INIT_BCI:
928 return savage_do_init_bci(dev, &init);
929 case SAVAGE_CLEANUP_BCI:
930 return savage_do_cleanup_bci(dev);
931 }
932
933 return DRM_ERR(EINVAL);
934}
935
936static int savage_bci_event_emit(DRM_IOCTL_ARGS)
937{
938 DRM_DEVICE;
939 drm_savage_private_t *dev_priv = dev->dev_private;
940 drm_savage_event_emit_t event;
941
942 DRM_DEBUG("\n");
943
944 LOCK_TEST_WITH_RETURN(dev, filp);
945
946 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
947 sizeof(event));
948
949 event.count = savage_bci_emit_event(dev_priv, event.flags);
950 event.count |= dev_priv->event_wrap << 16;
951 DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,
952 event.count, sizeof(event.count));
953 return 0;
954}
955
956static int savage_bci_event_wait(DRM_IOCTL_ARGS)
957{
958 DRM_DEVICE;
959 drm_savage_private_t *dev_priv = dev->dev_private;
960 drm_savage_event_wait_t event;
961 unsigned int event_e, hw_e;
962 unsigned int event_w, hw_w;
963
964 DRM_DEBUG("\n");
965
966 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
967 sizeof(event));
968
969 UPDATE_EVENT_COUNTER();
970 if (dev_priv->status_ptr)
971 hw_e = dev_priv->status_ptr[1] & 0xffff;
972 else
973 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
974 hw_w = dev_priv->event_wrap;
975 if (hw_e > dev_priv->event_counter)
976 hw_w--; /* hardware hasn't passed the last wrap yet */
977
978 event_e = event.count & 0xffff;
979 event_w = event.count >> 16;
980
981 /* Don't need to wait if
982 * - event counter wrapped since the event was emitted or
983 * - the hardware has advanced up to or over the event to wait for.
984 */
985 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
986 return 0;
987 else
988 return dev_priv->wait_evnt(dev_priv, event_e);
989}
990
991/*
992 * DMA buffer management
993 */
994
995static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
996{
997 drm_buf_t *buf;
998 int i;
999
1000 for (i = d->granted_count; i < d->request_count; i++) {
1001 buf = savage_freelist_get(dev);
1002 if (!buf)
1003 return DRM_ERR(EAGAIN);
1004
1005 buf->filp = filp;
1006
1007 if (DRM_COPY_TO_USER(&d->request_indices[i],
1008 &buf->idx, sizeof(buf->idx)))
1009 return DRM_ERR(EFAULT);
1010 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1011 &buf->total, sizeof(buf->total)))
1012 return DRM_ERR(EFAULT);
1013
1014 d->granted_count++;
1015 }
1016 return 0;
1017}
1018
1019int savage_bci_buffers(DRM_IOCTL_ARGS)
1020{
1021 DRM_DEVICE;
1022 drm_device_dma_t *dma = dev->dma;
1023 drm_dma_t d;
1024 int ret = 0;
1025
1026 LOCK_TEST_WITH_RETURN(dev, filp);
1027
1028 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
1029
1030 /* Please don't send us buffers.
1031 */
1032 if (d.send_count != 0) {
1033 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1034 DRM_CURRENTPID, d.send_count);
1035 return DRM_ERR(EINVAL);
1036 }
1037
1038 /* We'll send you buffers.
1039 */
1040 if (d.request_count < 0 || d.request_count > dma->buf_count) {
1041 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1042 DRM_CURRENTPID, d.request_count, dma->buf_count);
1043 return DRM_ERR(EINVAL);
1044 }
1045
1046 d.granted_count = 0;
1047
1048 if (d.request_count) {
1049 ret = savage_bci_get_buffers(filp, dev, &d);
1050 }
1051
1052 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
1053
1054 return ret;
1055}
1056
1057void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
1058 drm_device_dma_t *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i;
1061
1062 if (!dma)
1063 return;
1064 if (!dev_priv)
1065 return;
1066 if (!dma->buflist)
1067 return;
1068
1069 /*i830_flush_queue(dev);*/
1070
1071 for (i = 0; i < dma->buf_count; i++) {
1072 drm_buf_t *buf = dma->buflist[i];
1073 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1074
1075 if (buf->filp == filp && buf_priv &&
1076 buf_priv->next == NULL && buf_priv->prev == NULL) {
1077 uint16_t event;
1078 DRM_DEBUG("reclaimed from client\n");
1079 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1080 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1081 savage_freelist_put(dev, buf);
1082 }
1083 }
1084
1085 drm_core_reclaim_buffers(dev, filp);
1086}
1087
1088
1089drm_ioctl_desc_t savage_ioctls[] = {
1090 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
1091 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
1092 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
1094};
1095
1096int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
new file mode 100644
index 000000000000..6526c9aa7589
--- /dev/null
+++ b/drivers/char/drm/savage_drm.h
@@ -0,0 +1,209 @@
1/* savage_drm.h -- Public header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__
28
29#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__
31
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
33 * regions, subject to a minimum region size of (1<<16) == 64k.
34 *
35 * Clients may subdivide regions internally, but when sharing between
36 * clients, the region size is the minimum granularity.
37 */
38
39#define SAVAGE_CARD_HEAP 0
40#define SAVAGE_AGP_HEAP 1
41#define SAVAGE_NR_TEX_HEAPS 2
42#define SAVAGE_NR_TEX_REGIONS 16
43#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
44
45#endif /* __SAVAGE_SAREA_DEFINES__ */
46
47typedef struct _drm_savage_sarea {
48 /* LRU lists for texture memory in agp space and on the card.
49 */
50 drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
51 unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
52
53 /* Mechanism to validate card state.
54 */
55 int ctxOwner;
56} drm_savage_sarea_t, *drm_savage_sarea_ptr;
57
58/* Savage-specific ioctls
59 */
60#define DRM_SAVAGE_BCI_INIT 0x00
61#define DRM_SAVAGE_BCI_CMDBUF 0x01
62#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
63#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
64
65#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
66#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
67#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
68#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
69
70#define SAVAGE_DMA_PCI 1
71#define SAVAGE_DMA_AGP 3
72typedef struct drm_savage_init {
73 enum {
74 SAVAGE_INIT_BCI = 1,
75 SAVAGE_CLEANUP_BCI = 2
76 } func;
77 unsigned int sarea_priv_offset;
78
79 /* some parameters */
80 unsigned int cob_size;
81 unsigned int bci_threshold_lo, bci_threshold_hi;
82 unsigned int dma_type;
83
84 /* frame buffer layout */
85 unsigned int fb_bpp;
86 unsigned int front_offset, front_pitch;
87 unsigned int back_offset, back_pitch;
88 unsigned int depth_bpp;
89 unsigned int depth_offset, depth_pitch;
90
91 /* local textures */
92 unsigned int texture_offset;
93 unsigned int texture_size;
94
95 /* physical locations of non-permanent maps */
96 unsigned long status_offset;
97 unsigned long buffers_offset;
98 unsigned long agp_textures_offset;
99 unsigned long cmd_dma_offset;
100} drm_savage_init_t;
101
102typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
103typedef struct drm_savage_cmdbuf {
104 /* command buffer in client's address space */
105 drm_savage_cmd_header_t __user *cmd_addr;
106 unsigned int size; /* size of the command buffer in 64bit units */
107
108 unsigned int dma_idx; /* DMA buffer index to use */
109 int discard; /* discard DMA buffer when done */
110 /* vertex buffer in client's address space */
111 unsigned int __user *vb_addr;
112 unsigned int vb_size; /* size of client vertex buffer in bytes */
113 unsigned int vb_stride; /* stride of vertices in 32bit words */
114 /* boxes in client's address space */
115 drm_clip_rect_t __user *box_addr;
116 unsigned int nbox; /* number of clipping boxes */
117} drm_savage_cmdbuf_t;
118
119#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
120#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
121#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
122typedef struct drm_savage_event {
123 unsigned int count;
124 unsigned int flags;
125} drm_savage_event_emit_t, drm_savage_event_wait_t;
126
127/* Commands for the cmdbuf ioctl
128 */
129#define SAVAGE_CMD_STATE 0 /* a range of state registers */
130#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
131#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
132#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
133#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
134#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
135#define SAVAGE_CMD_SWAP 6 /* swap buffers */
136
137/* Primitive types
138*/
139#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
140#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
141#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
142#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
143 * shading on s3d */
144
145/* Skip flags (vertex format)
146 */
147#define SAVAGE_SKIP_Z 0x01
148#define SAVAGE_SKIP_W 0x02
149#define SAVAGE_SKIP_C0 0x04
150#define SAVAGE_SKIP_C1 0x08
151#define SAVAGE_SKIP_S0 0x10
152#define SAVAGE_SKIP_T0 0x20
153#define SAVAGE_SKIP_ST0 0x30
154#define SAVAGE_SKIP_S1 0x40
155#define SAVAGE_SKIP_T1 0x80
156#define SAVAGE_SKIP_ST1 0xc0
157#define SAVAGE_SKIP_ALL_S3D 0x3f
158#define SAVAGE_SKIP_ALL_S4 0xff
159
160/* Buffer names for clear command
161 */
162#define SAVAGE_FRONT 0x1
163#define SAVAGE_BACK 0x2
164#define SAVAGE_DEPTH 0x4
165
166/* 64-bit command header
167 */
168union drm_savage_cmd_header {
169 struct {
170 unsigned char cmd; /* command */
171 unsigned char pad0;
172 unsigned short pad1;
173 unsigned short pad2;
174 unsigned short pad3;
175 } cmd; /* generic */
176 struct {
177 unsigned char cmd;
178 unsigned char global; /* need idle engine? */
179 unsigned short count; /* number of consecutive registers */
180 unsigned short start; /* first register */
181 unsigned short pad3;
182 } state; /* SAVAGE_CMD_STATE */
183 struct {
184 unsigned char cmd;
185 unsigned char prim; /* primitive type */
186 unsigned short skip; /* vertex format (skip flags) */
187 unsigned short count; /* number of vertices */
188 unsigned short start; /* first vertex in DMA/vertex buffer */
189 } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
190 struct {
191 unsigned char cmd;
192 unsigned char prim;
193 unsigned short skip;
194 unsigned short count; /* number of indices that follow */
195 unsigned short pad3;
196 } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
197 struct {
198 unsigned char cmd;
199 unsigned char pad0;
200 unsigned short pad1;
201 unsigned int flags;
202 } clear0; /* SAVAGE_CMD_CLEAR */
203 struct {
204 unsigned int mask;
205 unsigned int value;
206 } clear1; /* SAVAGE_CMD_CLEAR data */
207};
208
209#endif
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
new file mode 100644
index 000000000000..ac8d270427ca
--- /dev/null
+++ b/drivers/char/drm/savage_drv.c
@@ -0,0 +1,112 @@
1/* savage_drv.c -- Savage driver for Linux
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/config.h>
27#include "drmP.h"
28#include "savage_drm.h"
29#include "savage_drv.h"
30
31#include "drm_pciids.h"
32
33static int postinit( struct drm_device *dev, unsigned long flags )
34{
35 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
36 DRIVER_NAME,
37 DRIVER_MAJOR,
38 DRIVER_MINOR,
39 DRIVER_PATCHLEVEL,
40 DRIVER_DATE,
41 dev->primary.minor,
42 pci_pretty_name(dev->pdev)
43 );
44 return 0;
45}
46
47static int version( drm_version_t *version )
48{
49 int len;
50
51 version->version_major = DRIVER_MAJOR;
52 version->version_minor = DRIVER_MINOR;
53 version->version_patchlevel = DRIVER_PATCHLEVEL;
54 DRM_COPY( version->name, DRIVER_NAME );
55 DRM_COPY( version->date, DRIVER_DATE );
56 DRM_COPY( version->desc, DRIVER_DESC );
57 return 0;
58}
59
60static struct pci_device_id pciidlist[] = {
61 savage_PCI_IDS
62};
63
64extern drm_ioctl_desc_t savage_ioctls[];
65extern int savage_max_ioctl;
66
67static struct drm_driver driver = {
68 .driver_features =
69 DRIVER_USE_AGP | DRIVER_USE_MTRR |
70 DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
71 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
72 .preinit = savage_preinit,
73 .postinit = postinit,
74 .postcleanup = savage_postcleanup,
75 .reclaim_buffers = savage_reclaim_buffers,
76 .get_map_ofs = drm_core_get_map_ofs,
77 .get_reg_ofs = drm_core_get_reg_ofs,
78 .version = version,
79 .ioctls = savage_ioctls,
80 .dma_ioctl = savage_bci_buffers,
81 .fops = {
82 .owner = THIS_MODULE,
83 .open = drm_open,
84 .release = drm_release,
85 .ioctl = drm_ioctl,
86 .mmap = drm_mmap,
87 .poll = drm_poll,
88 .fasync = drm_fasync,
89 },
90 .pci_driver = {
91 .name = DRIVER_NAME,
92 .id_table = pciidlist,
93 }
94};
95
96static int __init savage_init(void)
97{
98 driver.num_ioctls = savage_max_ioctl;
99 return drm_init(&driver);
100}
101
102static void __exit savage_exit(void)
103{
104 drm_exit(&driver);
105}
106
107module_init(savage_init);
108module_exit(savage_exit);
109
110MODULE_AUTHOR( DRIVER_AUTHOR );
111MODULE_DESCRIPTION( DRIVER_DESC );
112MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
new file mode 100644
index 000000000000..a45434944658
--- /dev/null
+++ b/drivers/char/drm/savage_drv.h
@@ -0,0 +1,579 @@
1/* savage_drv.h -- Private header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRV_H__
27#define __SAVAGE_DRV_H__
28
29#define DRIVER_AUTHOR "Felix Kuehling"
30
31#define DRIVER_NAME "savage"
32#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
33#define DRIVER_DATE "20050313"
34
35#define DRIVER_MAJOR 2
36#define DRIVER_MINOR 4
37#define DRIVER_PATCHLEVEL 1
38/* Interface history:
39 *
40 * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
41 * 2.0 The first real DRM
42 * 2.1 Scissors registers managed by the DRM, 3D operations clipped by
43 * cliprects of the cmdbuf ioctl
44 * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
45 * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
46 * wide and thus very long lived (unlikely to ever wrap). The size
47 * in the struct was 32 bits before, but only 16 bits were used
48 * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
49 * actually used
50 */
51
52typedef struct drm_savage_age {
53 uint16_t event;
54 unsigned int wrap;
55} drm_savage_age_t;
56
57typedef struct drm_savage_buf_priv {
58 struct drm_savage_buf_priv *next;
59 struct drm_savage_buf_priv *prev;
60 drm_savage_age_t age;
61 drm_buf_t *buf;
62} drm_savage_buf_priv_t;
63
64typedef struct drm_savage_dma_page {
65 drm_savage_age_t age;
66 unsigned int used, flushed;
67} drm_savage_dma_page_t;
68#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
69/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
70 * size of 16kbytes or 4k entries. Minimum requirement would be
71 * 10kbytes for 255 40-byte vertices in one drawing command. */
72#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
73
74/* interesting bits of hardware state that are saved in dev_priv */
75typedef union {
76 struct drm_savage_common_state {
77 uint32_t vbaddr;
78 } common;
79 struct {
80 unsigned char pad[sizeof(struct drm_savage_common_state)];
81 uint32_t texctrl, texaddr;
82 uint32_t scstart, new_scstart;
83 uint32_t scend, new_scend;
84 } s3d;
85 struct {
86 unsigned char pad[sizeof(struct drm_savage_common_state)];
87 uint32_t texdescr, texaddr0, texaddr1;
88 uint32_t drawctrl0, new_drawctrl0;
89 uint32_t drawctrl1, new_drawctrl1;
90 } s4;
91} drm_savage_state_t;
92
93/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
94enum savage_family {
95 S3_UNKNOWN = 0,
96 S3_SAVAGE3D,
97 S3_SAVAGE_MX,
98 S3_SAVAGE4,
99 S3_PROSAVAGE,
100 S3_TWISTER,
101 S3_PROSAVAGEDDR,
102 S3_SUPERSAVAGE,
103 S3_SAVAGE2000,
104 S3_LAST
105};
106
107#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
108
109#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
110 || (chip==S3_PROSAVAGE) \
111 || (chip==S3_TWISTER) \
112 || (chip==S3_PROSAVAGEDDR))
113
114#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
115
116#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
117
118#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
119 ||(chip==S3_PROSAVAGEDDR))
120
121/* flags */
122#define SAVAGE_IS_AGP 1
123
124typedef struct drm_savage_private {
125 drm_savage_sarea_t *sarea_priv;
126
127 drm_savage_buf_priv_t head, tail;
128
129 /* who am I? */
130 enum savage_family chipset;
131
132 unsigned int cob_size;
133 unsigned int bci_threshold_lo, bci_threshold_hi;
134 unsigned int dma_type;
135
136 /* frame buffer layout */
137 unsigned int fb_bpp;
138 unsigned int front_offset, front_pitch;
139 unsigned int back_offset, back_pitch;
140 unsigned int depth_bpp;
141 unsigned int depth_offset, depth_pitch;
142
143 /* bitmap descriptors for swap and clear */
144 unsigned int front_bd, back_bd, depth_bd;
145
146 /* local textures */
147 unsigned int texture_offset;
148 unsigned int texture_size;
149
150 /* memory regions in physical memory */
151 drm_local_map_t *sarea;
152 drm_local_map_t *mmio;
153 drm_local_map_t *fb;
154 drm_local_map_t *aperture;
155 drm_local_map_t *status;
156 drm_local_map_t *agp_textures;
157 drm_local_map_t *cmd_dma;
158 drm_local_map_t fake_dma;
159
160 struct {
161 int handle;
162 unsigned long base, size;
163 } mtrr[3];
164
165 /* BCI and status-related stuff */
166 volatile uint32_t *status_ptr, *bci_ptr;
167 uint32_t status_used_mask;
168 uint16_t event_counter;
169 unsigned int event_wrap;
170
171 /* Savage4 command DMA */
172 drm_savage_dma_page_t *dma_pages;
173 unsigned int nr_dma_pages, first_dma_page, current_dma_page;
174 drm_savage_age_t last_dma_age;
175
176 /* saved hw state for global/local check on S3D */
177 uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
178 /* and for scissors (global, so don't emit if not changed) */
179 uint32_t hw_scissors_start, hw_scissors_end;
180
181 drm_savage_state_t state;
182
183 /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
184 unsigned int waiting;
185
186 /* config/hardware-dependent function pointers */
187 int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
188 int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
189 /* Err, there is a macro wait_event in include/linux/wait.h.
190 * Avoid unwanted macro expansion. */
191 void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
192 drm_clip_rect_t *pbox);
193 void (*dma_flush)(struct drm_savage_private *dev_priv);
194} drm_savage_private_t;
195
196/* ioctls */
197extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
198extern int savage_bci_buffers(DRM_IOCTL_ARGS);
199
200/* BCI functions */
201extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
202 unsigned int flags);
203extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
204extern void savage_dma_reset(drm_savage_private_t *dev_priv);
205extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
206extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
207 unsigned int n);
208extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
209extern int savage_postcleanup(drm_device_t *dev);
210extern int savage_do_cleanup_bci(drm_device_t *dev);
211extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
212
213/* state functions */
214extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
215 drm_clip_rect_t *pbox);
216extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
217 drm_clip_rect_t *pbox);
218
219#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
220#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
221#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
222#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
223#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
224
225#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
226 * inside the MMIO region */
227#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
228 * BCI FIFO */
229
230/*
231 * MMIO registers
232 */
233#define SAVAGE_STATUS_WORD0 0x48C00
234#define SAVAGE_STATUS_WORD1 0x48C04
235#define SAVAGE_ALT_STATUS_WORD0 0x48C60
236
237#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
238#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
239
240/* Copied from savage_bci.h in the 2D driver with some renaming. */
241
242/* Bitmap descriptors */
243#define SAVAGE_BD_STRIDE_SHIFT 0
244#define SAVAGE_BD_BPP_SHIFT 16
245#define SAVAGE_BD_TILE_SHIFT 24
246#define SAVAGE_BD_BW_DISABLE (1<<28)
247/* common: */
248#define SAVAGE_BD_TILE_LINEAR 0
249/* savage4, MX, IX, 3D */
250#define SAVAGE_BD_TILE_16BPP 2
251#define SAVAGE_BD_TILE_32BPP 3
252/* twister, prosavage, DDR, supersavage, 2000 */
253#define SAVAGE_BD_TILE_DEST 1
254#define SAVAGE_BD_TILE_TEXTURE 2
255/* GBD - BCI enable */
256/* savage4, MX, IX, 3D */
257#define SAVAGE_GBD_BCI_ENABLE 8
258/* twister, prosavage, DDR, supersavage, 2000 */
259#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
260
261#define SAVAGE_GBD_BIG_ENDIAN 4
262#define SAVAGE_GBD_LITTLE_ENDIAN 0
263#define SAVAGE_GBD_64 1
264
265/* Global Bitmap Descriptor */
266#define SAVAGE_BCI_GLB_BD_LOW 0x8168
267#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
268
269/*
270 * BCI registers
271 */
272/* Savage4/Twister/ProSavage 3D registers */
273#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
274#define SAVAGE_TEXPALADDR_S4 0x1f
275#define SAVAGE_TEXCTRL0_S4 0x20
276#define SAVAGE_TEXCTRL1_S4 0x21
277#define SAVAGE_TEXADDR0_S4 0x22
278#define SAVAGE_TEXADDR1_S4 0x23
279#define SAVAGE_TEXBLEND0_S4 0x24
280#define SAVAGE_TEXBLEND1_S4 0x25
281#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
282#define SAVAGE_TEXDESCR_S4 0x27
283#define SAVAGE_FOGTABLE_S4 0x28
284#define SAVAGE_FOGCTRL_S4 0x30
285#define SAVAGE_STENCILCTRL_S4 0x31
286#define SAVAGE_ZBUFCTRL_S4 0x32
287#define SAVAGE_ZBUFOFF_S4 0x33
288#define SAVAGE_DESTCTRL_S4 0x34
289#define SAVAGE_DRAWCTRL0_S4 0x35
290#define SAVAGE_DRAWCTRL1_S4 0x36
291#define SAVAGE_ZWATERMARK_S4 0x37
292#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
293#define SAVAGE_TEXBLENDCOLOR_S4 0x39
294/* Savage3D/MX/IX 3D registers */
295#define SAVAGE_TEXPALADDR_S3D 0x18
296#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
297#define SAVAGE_TEXADDR_S3D 0x1A
298#define SAVAGE_TEXDESCR_S3D 0x1B
299#define SAVAGE_TEXCTRL_S3D 0x1C
300#define SAVAGE_FOGTABLE_S3D 0x20
301#define SAVAGE_FOGCTRL_S3D 0x30
302#define SAVAGE_DRAWCTRL_S3D 0x31
303#define SAVAGE_ZBUFCTRL_S3D 0x32
304#define SAVAGE_ZBUFOFF_S3D 0x33
305#define SAVAGE_DESTCTRL_S3D 0x34
306#define SAVAGE_SCSTART_S3D 0x35
307#define SAVAGE_SCEND_S3D 0x36
308#define SAVAGE_ZWATERMARK_S3D 0x37
309#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
310/* common stuff */
311#define SAVAGE_VERTBUFADDR 0x3e
312#define SAVAGE_BITPLANEWTMASK 0xd7
313#define SAVAGE_DMABUFADDR 0x51
314
315/* texture enable bits (needed for tex addr checking) */
316#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
317#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
318#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
319
320/* Global fields in Savage4/Twister/ProSavage 3D registers:
321 *
322 * All texture registers and DrawLocalCtrl are local. All other
323 * registers are global. */
324
325/* Global fields in Savage3D/MX/IX 3D registers:
326 *
327 * All texture registers are local. DrawCtrl and ZBufCtrl are
328 * partially local. All other registers are global.
329 *
330 * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
331 * ZBufCtrl global fields: zCmpFunc, zBufEn
332 */
333#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
334#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
335
336/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
337 */
338#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
339#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
340
341/*
342 * BCI commands
343 */
344#define BCI_CMD_NOP 0x40000000
345#define BCI_CMD_RECT 0x48000000
346#define BCI_CMD_RECT_XP 0x01000000
347#define BCI_CMD_RECT_YP 0x02000000
348#define BCI_CMD_SCANLINE 0x50000000
349#define BCI_CMD_LINE 0x5C000000
350#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
351#define BCI_CMD_BYTE_TEXT 0x63000000
352#define BCI_CMD_NT_BYTE_TEXT 0x67000000
353#define BCI_CMD_BIT_TEXT 0x6C000000
354#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
355#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
356#define BCI_CMD_SEND_COLOR 0x00008000
357
358#define BCI_CMD_CLIP_NONE 0x00000000
359#define BCI_CMD_CLIP_CURRENT 0x00002000
360#define BCI_CMD_CLIP_LR 0x00004000
361#define BCI_CMD_CLIP_NEW 0x00006000
362
363#define BCI_CMD_DEST_GBD 0x00000000
364#define BCI_CMD_DEST_PBD 0x00000800
365#define BCI_CMD_DEST_PBD_NEW 0x00000C00
366#define BCI_CMD_DEST_SBD 0x00001000
367#define BCI_CMD_DEST_SBD_NEW 0x00001400
368
369#define BCI_CMD_SRC_TRANSPARENT 0x00000200
370#define BCI_CMD_SRC_SOLID 0x00000000
371#define BCI_CMD_SRC_GBD 0x00000020
372#define BCI_CMD_SRC_COLOR 0x00000040
373#define BCI_CMD_SRC_MONO 0x00000060
374#define BCI_CMD_SRC_PBD_COLOR 0x00000080
375#define BCI_CMD_SRC_PBD_MONO 0x000000A0
376#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
377#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
378#define BCI_CMD_SRC_SBD_COLOR 0x00000100
379#define BCI_CMD_SRC_SBD_MONO 0x00000120
380#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
381#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
382
383#define BCI_CMD_PAT_TRANSPARENT 0x00000010
384#define BCI_CMD_PAT_NONE 0x00000000
385#define BCI_CMD_PAT_COLOR 0x00000002
386#define BCI_CMD_PAT_MONO 0x00000003
387#define BCI_CMD_PAT_PBD_COLOR 0x00000004
388#define BCI_CMD_PAT_PBD_MONO 0x00000005
389#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
390#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
391#define BCI_CMD_PAT_SBD_COLOR 0x00000008
392#define BCI_CMD_PAT_SBD_MONO 0x00000009
393#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
394#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
395
396#define BCI_BD_BW_DISABLE 0x10000000
397#define BCI_BD_TILE_MASK 0x03000000
398#define BCI_BD_TILE_NONE 0x00000000
399#define BCI_BD_TILE_16 0x02000000
400#define BCI_BD_TILE_32 0x03000000
401#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
402#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
403#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
404#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
405
406#define BCI_CMD_SET_REGISTER 0x96000000
407
408#define BCI_CMD_WAIT 0xC0000000
409#define BCI_CMD_WAIT_3D 0x00010000
410#define BCI_CMD_WAIT_2D 0x00020000
411
412#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
413
414#define BCI_CMD_DRAW_PRIM 0x80000000
415#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
416#define BCI_CMD_DRAW_CONT 0x01000000
417#define BCI_CMD_DRAW_TRILIST 0x00000000
418#define BCI_CMD_DRAW_TRISTRIP 0x02000000
419#define BCI_CMD_DRAW_TRIFAN 0x04000000
420#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
421#define BCI_CMD_DRAW_NO_Z 0x00000001
422#define BCI_CMD_DRAW_NO_W 0x00000002
423#define BCI_CMD_DRAW_NO_CD 0x00000004
424#define BCI_CMD_DRAW_NO_CS 0x00000008
425#define BCI_CMD_DRAW_NO_U0 0x00000010
426#define BCI_CMD_DRAW_NO_V0 0x00000020
427#define BCI_CMD_DRAW_NO_UV0 0x00000030
428#define BCI_CMD_DRAW_NO_U1 0x00000040
429#define BCI_CMD_DRAW_NO_V1 0x00000080
430#define BCI_CMD_DRAW_NO_UV1 0x000000c0
431
432#define BCI_CMD_DMA 0xa8000000
433
434#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
435#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
436#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
437#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
438#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
439#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
440
441#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
442#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
443#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
444 (((maj) & 0x1FFF) | \
445 ((ym) ? 1<<13 : 0) | \
446 ((xp) ? 1<<14 : 0) | \
447 ((yp) ? 1<<15 : 0) | \
448 ((err) << 16))
449
450/*
451 * common commands
452 */
453#define BCI_SET_REGISTERS( first, n ) \
454 BCI_WRITE(BCI_CMD_SET_REGISTER | \
455 ((uint32_t)(n) & 0xff) << 16 | \
456 ((uint32_t)(first) & 0xffff))
457#define DMA_SET_REGISTERS( first, n ) \
458 DMA_WRITE(BCI_CMD_SET_REGISTER | \
459 ((uint32_t)(n) & 0xff) << 16 | \
460 ((uint32_t)(first) & 0xffff))
461
462#define BCI_DRAW_PRIMITIVE(n, type, skip) \
463 BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
464 ((n) << 16))
465#define DMA_DRAW_PRIMITIVE(n, type, skip) \
466 DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
467 ((n) << 16))
468
469#define BCI_DRAW_INDICES_S3D(n, type, i0) \
470 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
471 ((n) << 16) | (i0))
472
473#define BCI_DRAW_INDICES_S4(n, type, skip) \
474 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
475 (skip) | ((n) << 16))
476
477#define BCI_DMA(n) \
478 BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
479
480/*
481 * access to MMIO
482 */
483#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
484#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
485
486/*
487 * access to the burst command interface (BCI)
488 */
489#define SAVAGE_BCI_DEBUG 1
490
491#define BCI_LOCALS volatile uint32_t *bci_ptr;
492
493#define BEGIN_BCI( n ) do { \
494 dev_priv->wait_fifo(dev_priv, (n)); \
495 bci_ptr = dev_priv->bci_ptr; \
496} while(0)
497
498#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
499
500#define BCI_COPY_FROM_USER(src,n) do { \
501 unsigned int i; \
502 for (i = 0; i < n; ++i) { \
503 uint32_t val; \
504 DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
505 BCI_WRITE(val); \
506 } \
507} while(0)
508
509/*
510 * command DMA support
511 */
512#define SAVAGE_DMA_DEBUG 1
513
514#define DMA_LOCALS uint32_t *dma_ptr;
515
516#define BEGIN_DMA( n ) do { \
517 unsigned int cur = dev_priv->current_dma_page; \
518 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
519 dev_priv->dma_pages[cur].used; \
520 if ((n) > rest) { \
521 dma_ptr = savage_dma_alloc(dev_priv, (n)); \
522 } else { /* fast path for small allocations */ \
523 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
524 cur * SAVAGE_DMA_PAGE_SIZE + \
525 dev_priv->dma_pages[cur].used; \
526 if (dev_priv->dma_pages[cur].used == 0) \
527 savage_dma_wait(dev_priv, cur); \
528 dev_priv->dma_pages[cur].used += (n); \
529 } \
530} while(0)
531
532#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
533
534#define DMA_COPY_FROM_USER(src,n) do { \
535 DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
536 dma_ptr += n; \
537} while(0)
538
539#if SAVAGE_DMA_DEBUG
540#define DMA_COMMIT() do { \
541 unsigned int cur = dev_priv->current_dma_page; \
542 uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
543 cur * SAVAGE_DMA_PAGE_SIZE + \
544 dev_priv->dma_pages[cur].used; \
545 if (dma_ptr != expected) { \
546 DRM_ERROR("DMA allocation and use don't match: " \
547 "%p != %p\n", expected, dma_ptr); \
548 savage_dma_reset(dev_priv); \
549 } \
550} while(0)
551#else
552#define DMA_COMMIT() do {/* nothing */} while(0)
553#endif
554
555#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
556
557/* Buffer aging via event tag
558 */
559
560#define UPDATE_EVENT_COUNTER( ) do { \
561 if (dev_priv->status_ptr) { \
562 uint16_t count; \
563 /* coordinate with Xserver */ \
564 count = dev_priv->status_ptr[1023]; \
565 if (count < dev_priv->event_counter) \
566 dev_priv->event_wrap++; \
567 dev_priv->event_counter = count; \
568 } \
569} while(0)
570
571#define SET_AGE( age, e, w ) do { \
572 (age)->event = e; \
573 (age)->wrap = w; \
574} while(0)
575
576#define TEST_AGE( age, e, w ) \
577 ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
578
579#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
new file mode 100644
index 000000000000..475695a00083
--- /dev/null
+++ b/drivers/char/drm/savage_state.c
@@ -0,0 +1,1146 @@
1/* savage_state.c -- State and drawing support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30 drm_clip_rect_t *pbox)
31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2-1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS;
43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart);
47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
51 DMA_COMMIT();
52 }
53}
54
55void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56 drm_clip_rect_t *pbox)
57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2-1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS;
69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
77 DMA_COMMIT();
78 }
79}
80
81static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
82 uint32_t addr)
83{
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL);
87 }
88 if (!(addr & 1)) { /* local */
89 addr &= ~7;
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset+dev_priv->texture_size) {
92 DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
93 unit, addr);
94 return DRM_ERR(EINVAL);
95 }
96 } else { /* AGP */
97 if (!dev_priv->agp_textures) {
98 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
99 unit, addr);
100 return DRM_ERR(EINVAL);
101 }
102 addr &= ~7;
103 if (addr < dev_priv->agp_textures->offset ||
104 addr >= (dev_priv->agp_textures->offset +
105 dev_priv->agp_textures->size)) {
106 DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
107 unit, addr);
108 return DRM_ERR(EINVAL);
109 }
110 }
111 return 0;
112}
113
114#define SAVE_STATE(reg,where) \
115 if(start <= reg && start+count > reg) \
116 DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
117#define SAVE_STATE_MASK(reg,where,mask) do { \
118 if(start <= reg && start+count > reg) { \
119 uint32_t tmp; \
120 DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \
121 dev_priv->state.where = (tmp & (mask)) | \
122 (dev_priv->state.where & ~(mask)); \
123 } \
124} while (0)
125static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
126 unsigned int start, unsigned int count,
127 const uint32_t __user *regs)
128{
129 if (start < SAVAGE_TEXPALADDR_S3D ||
130 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
131 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
132 start, start+count-1);
133 return DRM_ERR(EINVAL);
134 }
135
136 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
137 ~SAVAGE_SCISSOR_MASK_S3D);
138 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
139 ~SAVAGE_SCISSOR_MASK_S3D);
140
141 /* if any texture regs were changed ... */
142 if (start <= SAVAGE_TEXCTRL_S3D &&
143 start+count > SAVAGE_TEXPALADDR_S3D) {
144 /* ... check texture state */
145 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
146 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
147 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
148 return savage_verify_texaddr(
149 dev_priv, 0, dev_priv->state.s3d.texaddr);
150 }
151
152 return 0;
153}
154
155static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
156 unsigned int start, unsigned int count,
157 const uint32_t __user *regs)
158{
159 int ret = 0;
160
161 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
162 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
163 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
164 start, start+count-1);
165 return DRM_ERR(EINVAL);
166 }
167
168 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
169 ~SAVAGE_SCISSOR_MASK_S4);
170 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
171 ~SAVAGE_SCISSOR_MASK_S4);
172
173 /* if any texture regs were changed ... */
174 if (start <= SAVAGE_TEXDESCR_S4 &&
175 start+count > SAVAGE_TEXPALADDR_S4) {
176 /* ... check texture state */
177 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
178 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
179 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
180 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
181 ret |= savage_verify_texaddr(
182 dev_priv, 0, dev_priv->state.s4.texaddr0);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
184 ret |= savage_verify_texaddr(
185 dev_priv, 1, dev_priv->state.s4.texaddr1);
186 }
187
188 return ret;
189}
190#undef SAVE_STATE
191#undef SAVE_STATE_MASK
192
193static int savage_dispatch_state(drm_savage_private_t *dev_priv,
194 const drm_savage_cmd_header_t *cmd_header,
195 const uint32_t __user *regs)
196{
197 unsigned int count = cmd_header->state.count;
198 unsigned int start = cmd_header->state.start;
199 unsigned int count2 = 0;
200 unsigned int bci_size;
201 int ret;
202 DMA_LOCALS;
203
204 if (!count)
205 return 0;
206
207 if (DRM_VERIFYAREA_READ(regs, count*4))
208 return DRM_ERR(EFAULT);
209
210 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
211 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
212 if (ret != 0)
213 return ret;
214 /* scissor regs are emitted in savage_dispatch_draw */
215 if (start < SAVAGE_SCSTART_S3D) {
216 if (start+count > SAVAGE_SCEND_S3D+1)
217 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
218 if (start+count > SAVAGE_SCSTART_S3D)
219 count = SAVAGE_SCSTART_S3D - start;
220 } else if (start <= SAVAGE_SCEND_S3D) {
221 if (start+count > SAVAGE_SCEND_S3D+1) {
222 count -= SAVAGE_SCEND_S3D+1 - start;
223 start = SAVAGE_SCEND_S3D+1;
224 } else
225 return 0;
226 }
227 } else {
228 ret = savage_verify_state_s4(dev_priv, start, count, regs);
229 if (ret != 0)
230 return ret;
231 /* scissor regs are emitted in savage_dispatch_draw */
232 if (start < SAVAGE_DRAWCTRL0_S4) {
233 if (start+count > SAVAGE_DRAWCTRL1_S4+1)
234 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
235 if (start+count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
239 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
240 start = SAVAGE_DRAWCTRL1_S4+1;
241 } else
242 return 0;
243 }
244 }
245
246 bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
247
248 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size+1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1;
252 } else {
253 BEGIN_DMA(bci_size);
254 }
255
256 do {
257 while (count > 0) {
258 unsigned int n = count < 255 ? count : 255;
259 DMA_SET_REGISTERS(start, n);
260 DMA_COPY_FROM_USER(regs, n);
261 count -= n;
262 start += n;
263 regs += n;
264 }
265 start += 2;
266 regs += 2;
267 count = count2;
268 count2 = 0;
269 } while (count);
270
271 DMA_COMMIT();
272
273 return 0;
274}
275
276static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277 const drm_savage_cmd_header_t *cmd_header,
278 const drm_buf_t *dmabuf)
279{
280 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim;
282 unsigned int skip = cmd_header->prim.skip;
283 unsigned int n = cmd_header->prim.count;
284 unsigned int start = cmd_header->prim.start;
285 unsigned int i;
286 BCI_LOCALS;
287
288 if (!dmabuf) {
289 DRM_ERROR("called without dma buffers!\n");
290 return DRM_ERR(EINVAL);
291 }
292
293 if (!n)
294 return 0;
295
296 switch (prim) {
297 case SAVAGE_PRIM_TRILIST_201:
298 reorder = 1;
299 prim = SAVAGE_PRIM_TRILIST;
300 case SAVAGE_PRIM_TRILIST:
301 if (n % 3 != 0) {
302 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
303 n);
304 return DRM_ERR(EINVAL);
305 }
306 break;
307 case SAVAGE_PRIM_TRISTRIP:
308 case SAVAGE_PRIM_TRIFAN:
309 if (n < 3) {
310 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
311 n);
312 return DRM_ERR(EINVAL);
313 }
314 break;
315 default:
316 DRM_ERROR("invalid primitive type %u\n", prim);
317 return DRM_ERR(EINVAL);
318 }
319
320 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
321 if (skip != 0) {
322 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
323 skip);
324 return DRM_ERR(EINVAL);
325 }
326 } else {
327 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
332 skip);
333 return DRM_ERR(EINVAL);
334 }
335 if (reorder) {
336 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
337 return DRM_ERR(EINVAL);
338 }
339 }
340
341 if (start + n > dmabuf->total/32) {
342 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343 start, start + n - 1, dmabuf->total/32);
344 return DRM_ERR(EINVAL);
345 }
346
347 /* Vertex DMA doesn't work with command DMA at the same time,
348 * so we use BCI_... to submit commands here. Flush buffered
349 * faked DMA first. */
350 DMA_FLUSH();
351
352 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353 BEGIN_BCI(2);
354 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
355 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
356 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357 }
358 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
359 /* Workaround for what looks like a hardware bug. If a
360 * WAIT_3D_IDLE was emitted some time before the
361 * indexed drawing command then the engine will lock
362 * up. There are two known workarounds:
363 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364 BEGIN_BCI(63);
365 for (i = 0; i < 63; ++i)
366 BCI_WRITE(BCI_CMD_WAIT);
367 dev_priv->waiting = 0;
368 }
369
370 prim <<= 25;
371 while (n != 0) {
372 /* Can emit up to 255 indices (85 triangles) at once. */
373 unsigned int count = n > 255 ? 255 : n;
374 if (reorder) {
375 /* Need to reorder indices for correct flat
376 * shading while preserving the clock sense
377 * for correct culling. Only on Savage3D. */
378 int reorder[3] = {-1, -1, -1};
379 reorder[start%3] = 2;
380
381 BEGIN_BCI((count+1+1)/2);
382 BCI_DRAW_INDICES_S3D(count, prim, start+2);
383
384 for (i = start+1; i+1 < start+count; i += 2)
385 BCI_WRITE((i + reorder[i % 3]) |
386 ((i+1 + reorder[(i+1) % 3]) << 16));
387 if (i < start+count)
388 BCI_WRITE(i + reorder[i%3]);
389 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390 BEGIN_BCI((count+1+1)/2);
391 BCI_DRAW_INDICES_S3D(count, prim, start);
392
393 for (i = start+1; i+1 < start+count; i += 2)
394 BCI_WRITE(i | ((i+1) << 16));
395 if (i < start+count)
396 BCI_WRITE(i);
397 } else {
398 BEGIN_BCI((count+2+1)/2);
399 BCI_DRAW_INDICES_S4(count, prim, skip);
400
401 for (i = start; i+1 < start+count; i += 2)
402 BCI_WRITE(i | ((i+1) << 16));
403 if (i < start+count)
404 BCI_WRITE(i);
405 }
406
407 start += count;
408 n -= count;
409
410 prim |= BCI_CMD_DRAW_CONT;
411 }
412
413 return 0;
414}
415
416static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
417 const drm_savage_cmd_header_t *cmd_header,
418 const uint32_t __user *vtxbuf,
419 unsigned int vb_size,
420 unsigned int vb_stride)
421{
422 unsigned char reorder = 0;
423 unsigned int prim = cmd_header->prim.prim;
424 unsigned int skip = cmd_header->prim.skip;
425 unsigned int n = cmd_header->prim.count;
426 unsigned int start = cmd_header->prim.start;
427 unsigned int vtx_size;
428 unsigned int i;
429 DMA_LOCALS;
430
431 if (!n)
432 return 0;
433
434 switch (prim) {
435 case SAVAGE_PRIM_TRILIST_201:
436 reorder = 1;
437 prim = SAVAGE_PRIM_TRILIST;
438 case SAVAGE_PRIM_TRILIST:
439 if (n % 3 != 0) {
440 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
441 n);
442 return DRM_ERR(EINVAL);
443 }
444 break;
445 case SAVAGE_PRIM_TRISTRIP:
446 case SAVAGE_PRIM_TRIFAN:
447 if (n < 3) {
448 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
449 n);
450 return DRM_ERR(EINVAL);
451 }
452 break;
453 default:
454 DRM_ERROR("invalid primitive type %u\n", prim);
455 return DRM_ERR(EINVAL);
456 }
457
458 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
459 if (skip > SAVAGE_SKIP_ALL_S3D) {
460 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461 return DRM_ERR(EINVAL);
462 }
463 vtx_size = 8; /* full vertex */
464 } else {
465 if (skip > SAVAGE_SKIP_ALL_S4) {
466 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467 return DRM_ERR(EINVAL);
468 }
469 vtx_size = 10; /* full vertex */
470 }
471
472 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
475
476 if (vtx_size > vb_stride) {
477 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
478 vtx_size, vb_stride);
479 return DRM_ERR(EINVAL);
480 }
481
482 if (start + n > vb_size / (vb_stride*4)) {
483 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484 start, start + n - 1, vb_size / (vb_stride*4));
485 return DRM_ERR(EINVAL);
486 }
487
488 prim <<= 25;
489 while (n != 0) {
490 /* Can emit up to 255 vertices (85 triangles) at once. */
491 unsigned int count = n > 255 ? 255 : n;
492 if (reorder) {
493 /* Need to reorder vertices for correct flat
494 * shading while preserving the clock sense
495 * for correct culling. Only on Savage3D. */
496 int reorder[3] = {-1, -1, -1};
497 reorder[start%3] = 2;
498
499 BEGIN_DMA(count*vtx_size+1);
500 DMA_DRAW_PRIMITIVE(count, prim, skip);
501
502 for (i = start; i < start+count; ++i) {
503 unsigned int j = i + reorder[i % 3];
504 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
505 vtx_size);
506 }
507
508 DMA_COMMIT();
509 } else {
510 BEGIN_DMA(count*vtx_size+1);
511 DMA_DRAW_PRIMITIVE(count, prim, skip);
512
513 if (vb_stride == vtx_size) {
514 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
515 vtx_size*count);
516 } else {
517 for (i = start; i < start+count; ++i) {
518 DMA_COPY_FROM_USER(
519 &vtxbuf[vb_stride*i],
520 vtx_size);
521 }
522 }
523
524 DMA_COMMIT();
525 }
526
527 start += count;
528 n -= count;
529
530 prim |= BCI_CMD_DRAW_CONT;
531 }
532
533 return 0;
534}
535
536static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
537 const drm_savage_cmd_header_t *cmd_header,
538 const uint16_t __user *usr_idx,
539 const drm_buf_t *dmabuf)
540{
541 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim;
543 unsigned int skip = cmd_header->idx.skip;
544 unsigned int n = cmd_header->idx.count;
545 unsigned int i;
546 BCI_LOCALS;
547
548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL);
551 }
552
553 if (!n)
554 return 0;
555
556 switch (prim) {
557 case SAVAGE_PRIM_TRILIST_201:
558 reorder = 1;
559 prim = SAVAGE_PRIM_TRILIST;
560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n",
563 n);
564 return DRM_ERR(EINVAL);
565 }
566 break;
567 case SAVAGE_PRIM_TRISTRIP:
568 case SAVAGE_PRIM_TRIFAN:
569 if (n < 3) {
570 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
571 n);
572 return DRM_ERR(EINVAL);
573 }
574 break;
575 default:
576 DRM_ERROR("invalid primitive type %u\n", prim);
577 return DRM_ERR(EINVAL);
578 }
579
580 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581 if (skip != 0) {
582 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
583 skip);
584 return DRM_ERR(EINVAL);
585 }
586 } else {
587 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
588 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
589 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
590 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
591 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
592 skip);
593 return DRM_ERR(EINVAL);
594 }
595 if (reorder) {
596 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
597 return DRM_ERR(EINVAL);
598 }
599 }
600
601 /* Vertex DMA doesn't work with command DMA at the same time,
602 * so we use BCI_... to submit commands here. Flush buffered
603 * faked DMA first. */
604 DMA_FLUSH();
605
606 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
607 BEGIN_BCI(2);
608 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
609 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
610 dev_priv->state.common.vbaddr = dmabuf->bus_address;
611 }
612 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
613 /* Workaround for what looks like a hardware bug. If a
614 * WAIT_3D_IDLE was emitted some time before the
615 * indexed drawing command then the engine will lock
616 * up. There are two known workarounds:
617 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
618 BEGIN_BCI(63);
619 for (i = 0; i < 63; ++i)
620 BCI_WRITE(BCI_CMD_WAIT);
621 dev_priv->waiting = 0;
622 }
623
624 prim <<= 25;
625 while (n != 0) {
626 /* Can emit up to 255 indices (85 triangles) at once. */
627 unsigned int count = n > 255 ? 255 : n;
628 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
629 uint16_t idx[255];
630
631 /* Copy and check indices */
632 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
633 for (i = 0; i < count; ++i) {
634 if (idx[i] > dmabuf->total/32) {
635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
636 i, idx[i], dmabuf->total/32);
637 return DRM_ERR(EINVAL);
638 }
639 }
640
641 if (reorder) {
642 /* Need to reorder indices for correct flat
643 * shading while preserving the clock sense
644 * for correct culling. Only on Savage3D. */
645 int reorder[3] = {2, -1, -1};
646
647 BEGIN_BCI((count+1+1)/2);
648 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
649
650 for (i = 1; i+1 < count; i += 2)
651 BCI_WRITE(idx[i + reorder[i % 3]] |
652 (idx[i+1 + reorder[(i+1) % 3]] << 16));
653 if (i < count)
654 BCI_WRITE(idx[i + reorder[i%3]]);
655 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
656 BEGIN_BCI((count+1+1)/2);
657 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
658
659 for (i = 1; i+1 < count; i += 2)
660 BCI_WRITE(idx[i] | (idx[i+1] << 16));
661 if (i < count)
662 BCI_WRITE(idx[i]);
663 } else {
664 BEGIN_BCI((count+2+1)/2);
665 BCI_DRAW_INDICES_S4(count, prim, skip);
666
667 for (i = 0; i+1 < count; i += 2)
668 BCI_WRITE(idx[i] | (idx[i+1] << 16));
669 if (i < count)
670 BCI_WRITE(idx[i]);
671 }
672
673 usr_idx += count;
674 n -= count;
675
676 prim |= BCI_CMD_DRAW_CONT;
677 }
678
679 return 0;
680}
681
682static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
683 const drm_savage_cmd_header_t *cmd_header,
684 const uint16_t __user *usr_idx,
685 const uint32_t __user *vtxbuf,
686 unsigned int vb_size,
687 unsigned int vb_stride)
688{
689 unsigned char reorder = 0;
690 unsigned int prim = cmd_header->idx.prim;
691 unsigned int skip = cmd_header->idx.skip;
692 unsigned int n = cmd_header->idx.count;
693 unsigned int vtx_size;
694 unsigned int i;
695 DMA_LOCALS;
696
697 if (!n)
698 return 0;
699
700 switch (prim) {
701 case SAVAGE_PRIM_TRILIST_201:
702 reorder = 1;
703 prim = SAVAGE_PRIM_TRILIST;
704 case SAVAGE_PRIM_TRILIST:
705 if (n % 3 != 0) {
706 DRM_ERROR("wrong number of indices %u in TRILIST\n",
707 n);
708 return DRM_ERR(EINVAL);
709 }
710 break;
711 case SAVAGE_PRIM_TRISTRIP:
712 case SAVAGE_PRIM_TRIFAN:
713 if (n < 3) {
714 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
715 n);
716 return DRM_ERR(EINVAL);
717 }
718 break;
719 default:
720 DRM_ERROR("invalid primitive type %u\n", prim);
721 return DRM_ERR(EINVAL);
722 }
723
724 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
725 if (skip > SAVAGE_SKIP_ALL_S3D) {
726 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
727 return DRM_ERR(EINVAL);
728 }
729 vtx_size = 8; /* full vertex */
730 } else {
731 if (skip > SAVAGE_SKIP_ALL_S4) {
732 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
733 return DRM_ERR(EINVAL);
734 }
735 vtx_size = 10; /* full vertex */
736 }
737
738 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
739 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
740 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
741
742 if (vtx_size > vb_stride) {
743 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
744 vtx_size, vb_stride);
745 return DRM_ERR(EINVAL);
746 }
747
748 prim <<= 25;
749 while (n != 0) {
750 /* Can emit up to 255 vertices (85 triangles) at once. */
751 unsigned int count = n > 255 ? 255 : n;
752 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
753 uint16_t idx[255];
754
755 /* Copy and check indices */
756 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
757 for (i = 0; i < count; ++i) {
758 if (idx[i] > vb_size / (vb_stride*4)) {
759 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
760 i, idx[i], vb_size / (vb_stride*4));
761 return DRM_ERR(EINVAL);
762 }
763 }
764
765 if (reorder) {
766 /* Need to reorder vertices for correct flat
767 * shading while preserving the clock sense
768 * for correct culling. Only on Savage3D. */
769 int reorder[3] = {2, -1, -1};
770
771 BEGIN_DMA(count*vtx_size+1);
772 DMA_DRAW_PRIMITIVE(count, prim, skip);
773
774 for (i = 0; i < count; ++i) {
775 unsigned int j = idx[i + reorder[i % 3]];
776 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
777 vtx_size);
778 }
779
780 DMA_COMMIT();
781 } else {
782 BEGIN_DMA(count*vtx_size+1);
783 DMA_DRAW_PRIMITIVE(count, prim, skip);
784
785 for (i = 0; i < count; ++i) {
786 unsigned int j = idx[i];
787 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
788 vtx_size);
789 }
790
791 DMA_COMMIT();
792 }
793
794 usr_idx += count;
795 n -= count;
796
797 prim |= BCI_CMD_DRAW_CONT;
798 }
799
800 return 0;
801}
802
803static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
804 const drm_savage_cmd_header_t *cmd_header,
805 const drm_savage_cmd_header_t __user *data,
806 unsigned int nbox,
807 const drm_clip_rect_t __user *usr_boxes)
808{
809 unsigned int flags = cmd_header->clear0.flags, mask, value;
810 unsigned int clear_cmd;
811 unsigned int i, nbufs;
812 DMA_LOCALS;
813
814 if (nbox == 0)
815 return 0;
816
817 DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
818 ->clear1.mask);
819 DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
820 ->clear1.value);
821
822 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824 BCI_CMD_SET_ROP(clear_cmd,0xCC);
825
826 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
827 ((flags & SAVAGE_BACK) ? 1 : 0) +
828 ((flags & SAVAGE_DEPTH) ? 1 : 0);
829 if (nbufs == 0)
830 return 0;
831
832 if (mask != 0xffffffff) {
833 /* set mask */
834 BEGIN_DMA(2);
835 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
836 DMA_WRITE(mask);
837 DMA_COMMIT();
838 }
839 for (i = 0; i < nbox; ++i) {
840 drm_clip_rect_t box;
841 unsigned int x, y, w, h;
842 unsigned int buf;
843 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
844 x = box.x1, y = box.y1;
845 w = box.x2 - box.x1;
846 h = box.y2 - box.y1;
847 BEGIN_DMA(nbufs*6);
848 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
849 if (!(flags & buf))
850 continue;
851 DMA_WRITE(clear_cmd);
852 switch(buf) {
853 case SAVAGE_FRONT:
854 DMA_WRITE(dev_priv->front_offset);
855 DMA_WRITE(dev_priv->front_bd);
856 break;
857 case SAVAGE_BACK:
858 DMA_WRITE(dev_priv->back_offset);
859 DMA_WRITE(dev_priv->back_bd);
860 break;
861 case SAVAGE_DEPTH:
862 DMA_WRITE(dev_priv->depth_offset);
863 DMA_WRITE(dev_priv->depth_bd);
864 break;
865 }
866 DMA_WRITE(value);
867 DMA_WRITE(BCI_X_Y(x, y));
868 DMA_WRITE(BCI_W_H(w, h));
869 }
870 DMA_COMMIT();
871 }
872 if (mask != 0xffffffff) {
873 /* reset mask */
874 BEGIN_DMA(2);
875 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
876 DMA_WRITE(0xffffffff);
877 DMA_COMMIT();
878 }
879
880 return 0;
881}
882
883static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
884 unsigned int nbox,
885 const drm_clip_rect_t __user *usr_boxes)
886{
887 unsigned int swap_cmd;
888 unsigned int i;
889 DMA_LOCALS;
890
891 if (nbox == 0)
892 return 0;
893
894 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
895 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
896 BCI_CMD_SET_ROP(swap_cmd,0xCC);
897
898 for (i = 0; i < nbox; ++i) {
899 drm_clip_rect_t box;
900 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
901
902 BEGIN_DMA(6);
903 DMA_WRITE(swap_cmd);
904 DMA_WRITE(dev_priv->back_offset);
905 DMA_WRITE(dev_priv->back_bd);
906 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
907 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
908 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
909 DMA_COMMIT();
910 }
911
912 return 0;
913}
914
915static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
916 const drm_savage_cmd_header_t __user *start,
917 const drm_savage_cmd_header_t __user *end,
918 const drm_buf_t *dmabuf,
919 const unsigned int __user *usr_vtxbuf,
920 unsigned int vb_size, unsigned int vb_stride,
921 unsigned int nbox,
922 const drm_clip_rect_t __user *usr_boxes)
923{
924 unsigned int i, j;
925 int ret;
926
927 for (i = 0; i < nbox; ++i) {
928 drm_clip_rect_t box;
929 const drm_savage_cmd_header_t __user *usr_cmdbuf;
930 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
931 dev_priv->emit_clip_rect(dev_priv, &box);
932
933 usr_cmdbuf = start;
934 while (usr_cmdbuf < end) {
935 drm_savage_cmd_header_t cmd_header;
936 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
937 sizeof(cmd_header));
938 usr_cmdbuf++;
939 switch (cmd_header.cmd.cmd) {
940 case SAVAGE_CMD_DMA_PRIM:
941 ret = savage_dispatch_dma_prim(
942 dev_priv, &cmd_header, dmabuf);
943 break;
944 case SAVAGE_CMD_VB_PRIM:
945 ret = savage_dispatch_vb_prim(
946 dev_priv, &cmd_header,
947 (const uint32_t __user *)usr_vtxbuf,
948 vb_size, vb_stride);
949 break;
950 case SAVAGE_CMD_DMA_IDX:
951 j = (cmd_header.idx.count + 3) / 4;
952 /* j was check in savage_bci_cmdbuf */
953 ret = savage_dispatch_dma_idx(
954 dev_priv, &cmd_header,
955 (const uint16_t __user *)usr_cmdbuf,
956 dmabuf);
957 usr_cmdbuf += j;
958 break;
959 case SAVAGE_CMD_VB_IDX:
960 j = (cmd_header.idx.count + 3) / 4;
961 /* j was check in savage_bci_cmdbuf */
962 ret = savage_dispatch_vb_idx(
963 dev_priv, &cmd_header,
964 (const uint16_t __user *)usr_cmdbuf,
965 (const uint32_t __user *)usr_vtxbuf,
966 vb_size, vb_stride);
967 usr_cmdbuf += j;
968 break;
969 default:
970 /* What's the best return code? EFAULT? */
971 DRM_ERROR("IMPLEMENTATION ERROR: "
972 "non-drawing-command %d\n",
973 cmd_header.cmd.cmd);
974 return DRM_ERR(EINVAL);
975 }
976
977 if (ret != 0)
978 return ret;
979 }
980 }
981
982 return 0;
983}
984
985int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
986{
987 DRM_DEVICE;
988 drm_savage_private_t *dev_priv = dev->dev_private;
989 drm_device_dma_t *dma = dev->dma;
990 drm_buf_t *dmabuf;
991 drm_savage_cmdbuf_t cmdbuf;
992 drm_savage_cmd_header_t __user *usr_cmdbuf;
993 drm_savage_cmd_header_t __user *first_draw_cmd;
994 unsigned int __user *usr_vtxbuf;
995 drm_clip_rect_t __user *usr_boxes;
996 unsigned int i, j;
997 int ret = 0;
998
999 DRM_DEBUG("\n");
1000
1001 LOCK_TEST_WITH_RETURN(dev, filp);
1002
1003 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
1004 sizeof(cmdbuf));
1005
1006 if (dma && dma->buflist) {
1007 if (cmdbuf.dma_idx > dma->buf_count) {
1008 DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
1009 cmdbuf.dma_idx, dma->buf_count-1);
1010 return DRM_ERR(EINVAL);
1011 }
1012 dmabuf = dma->buflist[cmdbuf.dma_idx];
1013 } else {
1014 dmabuf = NULL;
1015 }
1016
1017 usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
1018 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1019 usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
1020 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
1021 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
1022 usr_vtxbuf, cmdbuf.vb_size)) ||
1023 (cmdbuf.nbox && DRM_VERIFYAREA_READ(
1024 usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
1025 return DRM_ERR(EFAULT);
1026
1027 /* Make sure writes to DMA buffers are finished before sending
1028 * DMA commands to the graphics hardware. */
1029 DRM_MEMORYBARRIER();
1030
1031 /* Coming from user space. Don't know if the Xserver has
1032 * emitted wait commands. Assuming the worst. */
1033 dev_priv->waiting = 1;
1034
1035 i = 0;
1036 first_draw_cmd = NULL;
1037 while (i < cmdbuf.size) {
1038 drm_savage_cmd_header_t cmd_header;
1039 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
1040 sizeof(cmd_header));
1041 usr_cmdbuf++;
1042 i++;
1043
1044 /* Group drawing commands with same state to minimize
1045 * iterations over clip rects. */
1046 j = 0;
1047 switch (cmd_header.cmd.cmd) {
1048 case SAVAGE_CMD_DMA_IDX:
1049 case SAVAGE_CMD_VB_IDX:
1050 j = (cmd_header.idx.count + 3) / 4;
1051 if (i + j > cmdbuf.size) {
1052 DRM_ERROR("indexed drawing command extends "
1053 "beyond end of command buffer\n");
1054 DMA_FLUSH();
1055 return DRM_ERR(EINVAL);
1056 }
1057 /* fall through */
1058 case SAVAGE_CMD_DMA_PRIM:
1059 case SAVAGE_CMD_VB_PRIM:
1060 if (!first_draw_cmd)
1061 first_draw_cmd = usr_cmdbuf-1;
1062 usr_cmdbuf += j;
1063 i += j;
1064 break;
1065 default:
1066 if (first_draw_cmd) {
1067 ret = savage_dispatch_draw (
1068 dev_priv, first_draw_cmd, usr_cmdbuf-1,
1069 dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1070 cmdbuf.vb_stride,
1071 cmdbuf.nbox, usr_boxes);
1072 if (ret != 0)
1073 return ret;
1074 first_draw_cmd = NULL;
1075 }
1076 }
1077 if (first_draw_cmd)
1078 continue;
1079
1080 switch (cmd_header.cmd.cmd) {
1081 case SAVAGE_CMD_STATE:
1082 j = (cmd_header.state.count + 1) / 2;
1083 if (i + j > cmdbuf.size) {
1084 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1085 "beyond end of command buffer\n");
1086 DMA_FLUSH();
1087 return DRM_ERR(EINVAL);
1088 }
1089 ret = savage_dispatch_state(
1090 dev_priv, &cmd_header,
1091 (uint32_t __user *)usr_cmdbuf);
1092 usr_cmdbuf += j;
1093 i += j;
1094 break;
1095 case SAVAGE_CMD_CLEAR:
1096 if (i + 1 > cmdbuf.size) {
1097 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1098 "beyond end of command buffer\n");
1099 DMA_FLUSH();
1100 return DRM_ERR(EINVAL);
1101 }
1102 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1103 usr_cmdbuf,
1104 cmdbuf.nbox, usr_boxes);
1105 usr_cmdbuf++;
1106 i++;
1107 break;
1108 case SAVAGE_CMD_SWAP:
1109 ret = savage_dispatch_swap(dev_priv,
1110 cmdbuf.nbox, usr_boxes);
1111 break;
1112 default:
1113 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1114 DMA_FLUSH();
1115 return DRM_ERR(EINVAL);
1116 }
1117
1118 if (ret != 0) {
1119 DMA_FLUSH();
1120 return ret;
1121 }
1122 }
1123
1124 if (first_draw_cmd) {
1125 ret = savage_dispatch_draw (
1126 dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
1127 usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
1128 cmdbuf.nbox, usr_boxes);
1129 if (ret != 0) {
1130 DMA_FLUSH();
1131 return ret;
1132 }
1133 }
1134
1135 DMA_FLUSH();
1136
1137 if (dmabuf && cmdbuf.discard) {
1138 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1139 uint16_t event;
1140 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1141 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1142 savage_freelist_put(dev, dmabuf);
1143 }
1144
1145 return 0;
1146}
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 6025e1866c7e..58d3738a2b7f 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -6,6 +6,8 @@
6 For technical support please email digiLinux@dgii.com or 6 For technical support please email digiLinux@dgii.com or
7 call Digi tech support at (612) 912-3456 7 call Digi tech support at (612) 912-3456
8 8
9 ** This driver is no longer supported by Digi **
10
9 Much of this design and code came from epca.c which was 11 Much of this design and code came from epca.c which was
10 copyright (C) 1994, 1995 Troy De Jongh, and subsquently 12 copyright (C) 1994, 1995 Troy De Jongh, and subsquently
11 modified by David Nugent, Christoph Lameter, Mike McLagan. 13 modified by David Nugent, Christoph Lameter, Mike McLagan.
@@ -43,31 +45,19 @@
43#include <linux/interrupt.h> 45#include <linux/interrupt.h>
44#include <asm/uaccess.h> 46#include <asm/uaccess.h>
45#include <asm/io.h> 47#include <asm/io.h>
46 48#include <linux/spinlock.h>
47#ifdef CONFIG_PCI
48#define ENABLE_PCI
49#endif /* CONFIG_PCI */
50
51#define putUser(arg1, arg2) put_user(arg1, (unsigned long __user *)arg2)
52#define getUser(arg1, arg2) get_user(arg1, (unsigned __user *)arg2)
53
54#ifdef ENABLE_PCI
55#include <linux/pci.h> 49#include <linux/pci.h>
56#include "digiPCI.h" 50#include "digiPCI.h"
57#endif /* ENABLE_PCI */ 51
58 52
59#include "digi1.h" 53#include "digi1.h"
60#include "digiFep1.h" 54#include "digiFep1.h"
61#include "epca.h" 55#include "epca.h"
62#include "epcaconfig.h" 56#include "epcaconfig.h"
63 57
64#if BITS_PER_LONG != 32
65# error FIXME: this driver only works on 32-bit platforms
66#endif
67
68/* ---------------------- Begin defines ------------------------ */ 58/* ---------------------- Begin defines ------------------------ */
69 59
70#define VERSION "1.3.0.1-LK" 60#define VERSION "1.3.0.1-LK2.6"
71 61
72/* This major needs to be submitted to Linux to join the majors list */ 62/* This major needs to be submitted to Linux to join the majors list */
73 63
@@ -81,13 +71,17 @@
81 71
82/* ----------------- Begin global definitions ------------------- */ 72/* ----------------- Begin global definitions ------------------- */
83 73
84static char mesg[100];
85static int nbdevs, num_cards, liloconfig; 74static int nbdevs, num_cards, liloconfig;
86static int digi_poller_inhibited = 1 ; 75static int digi_poller_inhibited = 1 ;
87 76
88static int setup_error_code; 77static int setup_error_code;
89static int invalid_lilo_config; 78static int invalid_lilo_config;
90 79
80/* The ISA boards do window flipping into the same spaces so its only sane
81 with a single lock. It's still pretty efficient */
82
83static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED;
84
91/* ----------------------------------------------------------------------- 85/* -----------------------------------------------------------------------
92 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 86 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
93 7 below. 87 7 below.
@@ -129,58 +123,58 @@ static struct timer_list epca_timer;
129 configured. 123 configured.
130----------------------------------------------------------------------- */ 124----------------------------------------------------------------------- */
131 125
132static inline void memwinon(struct board_info *b, unsigned int win); 126static void memwinon(struct board_info *b, unsigned int win);
133static inline void memwinoff(struct board_info *b, unsigned int win); 127static void memwinoff(struct board_info *b, unsigned int win);
134static inline void globalwinon(struct channel *ch); 128static void globalwinon(struct channel *ch);
135static inline void rxwinon(struct channel *ch); 129static void rxwinon(struct channel *ch);
136static inline void txwinon(struct channel *ch); 130static void txwinon(struct channel *ch);
137static inline void memoff(struct channel *ch); 131static void memoff(struct channel *ch);
138static inline void assertgwinon(struct channel *ch); 132static void assertgwinon(struct channel *ch);
139static inline void assertmemoff(struct channel *ch); 133static void assertmemoff(struct channel *ch);
140 134
141/* ---- Begin more 'specific' memory functions for cx_like products --- */ 135/* ---- Begin more 'specific' memory functions for cx_like products --- */
142 136
143static inline void pcxem_memwinon(struct board_info *b, unsigned int win); 137static void pcxem_memwinon(struct board_info *b, unsigned int win);
144static inline void pcxem_memwinoff(struct board_info *b, unsigned int win); 138static void pcxem_memwinoff(struct board_info *b, unsigned int win);
145static inline void pcxem_globalwinon(struct channel *ch); 139static void pcxem_globalwinon(struct channel *ch);
146static inline void pcxem_rxwinon(struct channel *ch); 140static void pcxem_rxwinon(struct channel *ch);
147static inline void pcxem_txwinon(struct channel *ch); 141static void pcxem_txwinon(struct channel *ch);
148static inline void pcxem_memoff(struct channel *ch); 142static void pcxem_memoff(struct channel *ch);
149 143
150/* ------ Begin more 'specific' memory functions for the pcxe ------- */ 144/* ------ Begin more 'specific' memory functions for the pcxe ------- */
151 145
152static inline void pcxe_memwinon(struct board_info *b, unsigned int win); 146static void pcxe_memwinon(struct board_info *b, unsigned int win);
153static inline void pcxe_memwinoff(struct board_info *b, unsigned int win); 147static void pcxe_memwinoff(struct board_info *b, unsigned int win);
154static inline void pcxe_globalwinon(struct channel *ch); 148static void pcxe_globalwinon(struct channel *ch);
155static inline void pcxe_rxwinon(struct channel *ch); 149static void pcxe_rxwinon(struct channel *ch);
156static inline void pcxe_txwinon(struct channel *ch); 150static void pcxe_txwinon(struct channel *ch);
157static inline void pcxe_memoff(struct channel *ch); 151static void pcxe_memoff(struct channel *ch);
158 152
159/* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */ 153/* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */
160/* Note : pc64xe and pcxi share the same windowing routines */ 154/* Note : pc64xe and pcxi share the same windowing routines */
161 155
162static inline void pcxi_memwinon(struct board_info *b, unsigned int win); 156static void pcxi_memwinon(struct board_info *b, unsigned int win);
163static inline void pcxi_memwinoff(struct board_info *b, unsigned int win); 157static void pcxi_memwinoff(struct board_info *b, unsigned int win);
164static inline void pcxi_globalwinon(struct channel *ch); 158static void pcxi_globalwinon(struct channel *ch);
165static inline void pcxi_rxwinon(struct channel *ch); 159static void pcxi_rxwinon(struct channel *ch);
166static inline void pcxi_txwinon(struct channel *ch); 160static void pcxi_txwinon(struct channel *ch);
167static inline void pcxi_memoff(struct channel *ch); 161static void pcxi_memoff(struct channel *ch);
168 162
169/* - Begin 'specific' do nothing memory functions needed for some cards - */ 163/* - Begin 'specific' do nothing memory functions needed for some cards - */
170 164
171static inline void dummy_memwinon(struct board_info *b, unsigned int win); 165static void dummy_memwinon(struct board_info *b, unsigned int win);
172static inline void dummy_memwinoff(struct board_info *b, unsigned int win); 166static void dummy_memwinoff(struct board_info *b, unsigned int win);
173static inline void dummy_globalwinon(struct channel *ch); 167static void dummy_globalwinon(struct channel *ch);
174static inline void dummy_rxwinon(struct channel *ch); 168static void dummy_rxwinon(struct channel *ch);
175static inline void dummy_txwinon(struct channel *ch); 169static void dummy_txwinon(struct channel *ch);
176static inline void dummy_memoff(struct channel *ch); 170static void dummy_memoff(struct channel *ch);
177static inline void dummy_assertgwinon(struct channel *ch); 171static void dummy_assertgwinon(struct channel *ch);
178static inline void dummy_assertmemoff(struct channel *ch); 172static void dummy_assertmemoff(struct channel *ch);
179 173
180/* ------------------- Begin declare functions ----------------------- */ 174/* ------------------- Begin declare functions ----------------------- */
181 175
182static inline struct channel *verifyChannel(register struct tty_struct *); 176static struct channel *verifyChannel(struct tty_struct *);
183static inline void pc_sched_event(struct channel *, int); 177static void pc_sched_event(struct channel *, int);
184static void epca_error(int, char *); 178static void epca_error(int, char *);
185static void pc_close(struct tty_struct *, struct file *); 179static void pc_close(struct tty_struct *, struct file *);
186static void shutdown(struct channel *); 180static void shutdown(struct channel *);
@@ -215,15 +209,11 @@ static void pc_unthrottle(struct tty_struct *tty);
215static void digi_send_break(struct channel *ch, int msec); 209static void digi_send_break(struct channel *ch, int msec);
216static void setup_empty_event(struct tty_struct *tty, struct channel *ch); 210static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
217void epca_setup(char *, int *); 211void epca_setup(char *, int *);
218void console_print(const char *);
219 212
220static int get_termio(struct tty_struct *, struct termio __user *); 213static int get_termio(struct tty_struct *, struct termio __user *);
221static int pc_write(struct tty_struct *, const unsigned char *, int); 214static int pc_write(struct tty_struct *, const unsigned char *, int);
222int pc_init(void); 215static int pc_init(void);
223
224#ifdef ENABLE_PCI
225static int init_PCI(void); 216static int init_PCI(void);
226#endif /* ENABLE_PCI */
227 217
228 218
229/* ------------------------------------------------------------------ 219/* ------------------------------------------------------------------
@@ -237,41 +227,41 @@ static int init_PCI(void);
237 making direct calls deserves what they get. 227 making direct calls deserves what they get.
238-------------------------------------------------------------------- */ 228-------------------------------------------------------------------- */
239 229
240static inline void memwinon(struct board_info *b, unsigned int win) 230static void memwinon(struct board_info *b, unsigned int win)
241{ 231{
242 (b->memwinon)(b, win); 232 (b->memwinon)(b, win);
243} 233}
244 234
245static inline void memwinoff(struct board_info *b, unsigned int win) 235static void memwinoff(struct board_info *b, unsigned int win)
246{ 236{
247 (b->memwinoff)(b, win); 237 (b->memwinoff)(b, win);
248} 238}
249 239
250static inline void globalwinon(struct channel *ch) 240static void globalwinon(struct channel *ch)
251{ 241{
252 (ch->board->globalwinon)(ch); 242 (ch->board->globalwinon)(ch);
253} 243}
254 244
255static inline void rxwinon(struct channel *ch) 245static void rxwinon(struct channel *ch)
256{ 246{
257 (ch->board->rxwinon)(ch); 247 (ch->board->rxwinon)(ch);
258} 248}
259 249
260static inline void txwinon(struct channel *ch) 250static void txwinon(struct channel *ch)
261{ 251{
262 (ch->board->txwinon)(ch); 252 (ch->board->txwinon)(ch);
263} 253}
264 254
265static inline void memoff(struct channel *ch) 255static void memoff(struct channel *ch)
266{ 256{
267 (ch->board->memoff)(ch); 257 (ch->board->memoff)(ch);
268} 258}
269static inline void assertgwinon(struct channel *ch) 259static void assertgwinon(struct channel *ch)
270{ 260{
271 (ch->board->assertgwinon)(ch); 261 (ch->board->assertgwinon)(ch);
272} 262}
273 263
274static inline void assertmemoff(struct channel *ch) 264static void assertmemoff(struct channel *ch)
275{ 265{
276 (ch->board->assertmemoff)(ch); 266 (ch->board->assertmemoff)(ch);
277} 267}
@@ -281,66 +271,66 @@ static inline void assertmemoff(struct channel *ch)
281 and CX series cards. 271 and CX series cards.
282------------------------------------------------------------ */ 272------------------------------------------------------------ */
283 273
284static inline void pcxem_memwinon(struct board_info *b, unsigned int win) 274static void pcxem_memwinon(struct board_info *b, unsigned int win)
285{ 275{
286 outb_p(FEPWIN|win, (int)b->port + 1); 276 outb_p(FEPWIN|win, b->port + 1);
287} 277}
288 278
289static inline void pcxem_memwinoff(struct board_info *b, unsigned int win) 279static void pcxem_memwinoff(struct board_info *b, unsigned int win)
290{ 280{
291 outb_p(0, (int)b->port + 1); 281 outb_p(0, b->port + 1);
292} 282}
293 283
294static inline void pcxem_globalwinon(struct channel *ch) 284static void pcxem_globalwinon(struct channel *ch)
295{ 285{
296 outb_p( FEPWIN, (int)ch->board->port + 1); 286 outb_p( FEPWIN, (int)ch->board->port + 1);
297} 287}
298 288
299static inline void pcxem_rxwinon(struct channel *ch) 289static void pcxem_rxwinon(struct channel *ch)
300{ 290{
301 outb_p(ch->rxwin, (int)ch->board->port + 1); 291 outb_p(ch->rxwin, (int)ch->board->port + 1);
302} 292}
303 293
304static inline void pcxem_txwinon(struct channel *ch) 294static void pcxem_txwinon(struct channel *ch)
305{ 295{
306 outb_p(ch->txwin, (int)ch->board->port + 1); 296 outb_p(ch->txwin, (int)ch->board->port + 1);
307} 297}
308 298
309static inline void pcxem_memoff(struct channel *ch) 299static void pcxem_memoff(struct channel *ch)
310{ 300{
311 outb_p(0, (int)ch->board->port + 1); 301 outb_p(0, (int)ch->board->port + 1);
312} 302}
313 303
314/* ----------------- Begin pcxe memory window stuff ------------------ */ 304/* ----------------- Begin pcxe memory window stuff ------------------ */
315 305
316static inline void pcxe_memwinon(struct board_info *b, unsigned int win) 306static void pcxe_memwinon(struct board_info *b, unsigned int win)
317{ 307{
318 outb_p(FEPWIN | win, (int)b->port + 1); 308 outb_p(FEPWIN | win, b->port + 1);
319} 309}
320 310
321static inline void pcxe_memwinoff(struct board_info *b, unsigned int win) 311static void pcxe_memwinoff(struct board_info *b, unsigned int win)
322{ 312{
323 outb_p(inb((int)b->port) & ~FEPMEM, 313 outb_p(inb(b->port) & ~FEPMEM,
324 (int)b->port + 1); 314 b->port + 1);
325 outb_p(0, (int)b->port + 1); 315 outb_p(0, b->port + 1);
326} 316}
327 317
328static inline void pcxe_globalwinon(struct channel *ch) 318static void pcxe_globalwinon(struct channel *ch)
329{ 319{
330 outb_p( FEPWIN, (int)ch->board->port + 1); 320 outb_p( FEPWIN, (int)ch->board->port + 1);
331} 321}
332 322
333static inline void pcxe_rxwinon(struct channel *ch) 323static void pcxe_rxwinon(struct channel *ch)
334{ 324{
335 outb_p(ch->rxwin, (int)ch->board->port + 1); 325 outb_p(ch->rxwin, (int)ch->board->port + 1);
336} 326}
337 327
338static inline void pcxe_txwinon(struct channel *ch) 328static void pcxe_txwinon(struct channel *ch)
339{ 329{
340 outb_p(ch->txwin, (int)ch->board->port + 1); 330 outb_p(ch->txwin, (int)ch->board->port + 1);
341} 331}
342 332
343static inline void pcxe_memoff(struct channel *ch) 333static void pcxe_memoff(struct channel *ch)
344{ 334{
345 outb_p(0, (int)ch->board->port); 335 outb_p(0, (int)ch->board->port);
346 outb_p(0, (int)ch->board->port + 1); 336 outb_p(0, (int)ch->board->port + 1);
@@ -348,44 +338,44 @@ static inline void pcxe_memoff(struct channel *ch)
348 338
349/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */ 339/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */
350 340
351static inline void pcxi_memwinon(struct board_info *b, unsigned int win) 341static void pcxi_memwinon(struct board_info *b, unsigned int win)
352{ 342{
353 outb_p(inb((int)b->port) | FEPMEM, (int)b->port); 343 outb_p(inb(b->port) | FEPMEM, b->port);
354} 344}
355 345
356static inline void pcxi_memwinoff(struct board_info *b, unsigned int win) 346static void pcxi_memwinoff(struct board_info *b, unsigned int win)
357{ 347{
358 outb_p(inb((int)b->port) & ~FEPMEM, (int)b->port); 348 outb_p(inb(b->port) & ~FEPMEM, b->port);
359} 349}
360 350
361static inline void pcxi_globalwinon(struct channel *ch) 351static void pcxi_globalwinon(struct channel *ch)
362{ 352{
363 outb_p(FEPMEM, (int)ch->board->port); 353 outb_p(FEPMEM, ch->board->port);
364} 354}
365 355
366static inline void pcxi_rxwinon(struct channel *ch) 356static void pcxi_rxwinon(struct channel *ch)
367{ 357{
368 outb_p(FEPMEM, (int)ch->board->port); 358 outb_p(FEPMEM, ch->board->port);
369} 359}
370 360
371static inline void pcxi_txwinon(struct channel *ch) 361static void pcxi_txwinon(struct channel *ch)
372{ 362{
373 outb_p(FEPMEM, (int)ch->board->port); 363 outb_p(FEPMEM, ch->board->port);
374} 364}
375 365
376static inline void pcxi_memoff(struct channel *ch) 366static void pcxi_memoff(struct channel *ch)
377{ 367{
378 outb_p(0, (int)ch->board->port); 368 outb_p(0, ch->board->port);
379} 369}
380 370
381static inline void pcxi_assertgwinon(struct channel *ch) 371static void pcxi_assertgwinon(struct channel *ch)
382{ 372{
383 epcaassert(inb((int)ch->board->port) & FEPMEM, "Global memory off"); 373 epcaassert(inb(ch->board->port) & FEPMEM, "Global memory off");
384} 374}
385 375
386static inline void pcxi_assertmemoff(struct channel *ch) 376static void pcxi_assertmemoff(struct channel *ch)
387{ 377{
388 epcaassert(!(inb((int)ch->board->port) & FEPMEM), "Memory on"); 378 epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on");
389} 379}
390 380
391 381
@@ -398,185 +388,143 @@ static inline void pcxi_assertmemoff(struct channel *ch)
398 may or may not do anything. 388 may or may not do anything.
399---------------------------------------------------------------------------*/ 389---------------------------------------------------------------------------*/
400 390
401static inline void dummy_memwinon(struct board_info *b, unsigned int win) 391static void dummy_memwinon(struct board_info *b, unsigned int win)
402{ 392{
403} 393}
404 394
405static inline void dummy_memwinoff(struct board_info *b, unsigned int win) 395static void dummy_memwinoff(struct board_info *b, unsigned int win)
406{ 396{
407} 397}
408 398
409static inline void dummy_globalwinon(struct channel *ch) 399static void dummy_globalwinon(struct channel *ch)
410{ 400{
411} 401}
412 402
413static inline void dummy_rxwinon(struct channel *ch) 403static void dummy_rxwinon(struct channel *ch)
414{ 404{
415} 405}
416 406
417static inline void dummy_txwinon(struct channel *ch) 407static void dummy_txwinon(struct channel *ch)
418{ 408{
419} 409}
420 410
421static inline void dummy_memoff(struct channel *ch) 411static void dummy_memoff(struct channel *ch)
422{ 412{
423} 413}
424 414
425static inline void dummy_assertgwinon(struct channel *ch) 415static void dummy_assertgwinon(struct channel *ch)
426{ 416{
427} 417}
428 418
429static inline void dummy_assertmemoff(struct channel *ch) 419static void dummy_assertmemoff(struct channel *ch)
430{ 420{
431} 421}
432 422
433/* ----------------- Begin verifyChannel function ----------------------- */ 423/* ----------------- Begin verifyChannel function ----------------------- */
434static inline struct channel *verifyChannel(register struct tty_struct *tty) 424static struct channel *verifyChannel(struct tty_struct *tty)
435{ /* Begin verifyChannel */ 425{ /* Begin verifyChannel */
436
437 /* -------------------------------------------------------------------- 426 /* --------------------------------------------------------------------
438 This routine basically provides a sanity check. It insures that 427 This routine basically provides a sanity check. It insures that
439 the channel returned is within the proper range of addresses as 428 the channel returned is within the proper range of addresses as
440 well as properly initialized. If some bogus info gets passed in 429 well as properly initialized. If some bogus info gets passed in
441 through tty->driver_data this should catch it. 430 through tty->driver_data this should catch it.
442 --------------------------------------------------------------------- */ 431 --------------------------------------------------------------------- */
443 432 if (tty) {
444 if (tty) 433 struct channel *ch = (struct channel *)tty->driver_data;
445 { /* Begin if tty */ 434 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) {
446
447 register struct channel *ch = (struct channel *)tty->driver_data;
448
449 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs]))
450 {
451 if (ch->magic == EPCA_MAGIC) 435 if (ch->magic == EPCA_MAGIC)
452 return ch; 436 return ch;
453 } 437 }
454 438 }
455 } /* End if tty */
456
457 /* Else return a NULL for invalid */
458 return NULL; 439 return NULL;
459 440
460} /* End verifyChannel */ 441} /* End verifyChannel */
461 442
462/* ------------------ Begin pc_sched_event ------------------------- */ 443/* ------------------ Begin pc_sched_event ------------------------- */
463 444
464static inline void pc_sched_event(struct channel *ch, int event) 445static void pc_sched_event(struct channel *ch, int event)
465{ /* Begin pc_sched_event */ 446{
466
467
468 /* ---------------------------------------------------------------------- 447 /* ----------------------------------------------------------------------
469 We call this to schedule interrupt processing on some event. The 448 We call this to schedule interrupt processing on some event. The
470 kernel sees our request and calls the related routine in OUR driver. 449 kernel sees our request and calls the related routine in OUR driver.
471 -------------------------------------------------------------------------*/ 450 -------------------------------------------------------------------------*/
472
473 ch->event |= 1 << event; 451 ch->event |= 1 << event;
474 schedule_work(&ch->tqueue); 452 schedule_work(&ch->tqueue);
475
476
477} /* End pc_sched_event */ 453} /* End pc_sched_event */
478 454
479/* ------------------ Begin epca_error ------------------------- */ 455/* ------------------ Begin epca_error ------------------------- */
480 456
481static void epca_error(int line, char *msg) 457static void epca_error(int line, char *msg)
482{ /* Begin epca_error */ 458{
483
484 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg); 459 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg);
485 return; 460}
486
487} /* End epca_error */
488 461
489/* ------------------ Begin pc_close ------------------------- */ 462/* ------------------ Begin pc_close ------------------------- */
490static void pc_close(struct tty_struct * tty, struct file * filp) 463static void pc_close(struct tty_struct * tty, struct file * filp)
491{ /* Begin pc_close */ 464{
492
493 struct channel *ch; 465 struct channel *ch;
494 unsigned long flags; 466 unsigned long flags;
495
496 /* --------------------------------------------------------- 467 /* ---------------------------------------------------------
497 verifyChannel returns the channel from the tty struct 468 verifyChannel returns the channel from the tty struct
498 if it is valid. This serves as a sanity check. 469 if it is valid. This serves as a sanity check.
499 ------------------------------------------------------------- */ 470 ------------------------------------------------------------- */
500 471 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */
501 if ((ch = verifyChannel(tty)) != NULL) 472 spin_lock_irqsave(&epca_lock, flags);
502 { /* Begin if ch != NULL */ 473 if (tty_hung_up_p(filp)) {
503 474 spin_unlock_irqrestore(&epca_lock, flags);
504 save_flags(flags);
505 cli();
506
507 if (tty_hung_up_p(filp))
508 {
509 restore_flags(flags);
510 return; 475 return;
511 } 476 }
512
513 /* Check to see if the channel is open more than once */ 477 /* Check to see if the channel is open more than once */
514 if (ch->count-- > 1) 478 if (ch->count-- > 1) {
515 { /* Begin channel is open more than once */ 479 /* Begin channel is open more than once */
516
517 /* ------------------------------------------------------------- 480 /* -------------------------------------------------------------
518 Return without doing anything. Someone might still be using 481 Return without doing anything. Someone might still be using
519 the channel. 482 the channel.
520 ---------------------------------------------------------------- */ 483 ---------------------------------------------------------------- */
521 484 spin_unlock_irqrestore(&epca_lock, flags);
522 restore_flags(flags);
523 return; 485 return;
524 } /* End channel is open more than once */ 486 } /* End channel is open more than once */
525 487
526 /* Port open only once go ahead with shutdown & reset */ 488 /* Port open only once go ahead with shutdown & reset */
527 489 if (ch->count < 0)
528 if (ch->count < 0) 490 BUG();
529 {
530 ch->count = 0;
531 }
532 491
533 /* --------------------------------------------------------------- 492 /* ---------------------------------------------------------------
534 Let the rest of the driver know the channel is being closed. 493 Let the rest of the driver know the channel is being closed.
535 This becomes important if an open is attempted before close 494 This becomes important if an open is attempted before close
536 is finished. 495 is finished.
537 ------------------------------------------------------------------ */ 496 ------------------------------------------------------------------ */
538
539 ch->asyncflags |= ASYNC_CLOSING; 497 ch->asyncflags |= ASYNC_CLOSING;
540
541 tty->closing = 1; 498 tty->closing = 1;
542 499
543 if (ch->asyncflags & ASYNC_INITIALIZED) 500 spin_unlock_irqrestore(&epca_lock, flags);
544 { 501
502 if (ch->asyncflags & ASYNC_INITIALIZED) {
545 /* Setup an event to indicate when the transmit buffer empties */ 503 /* Setup an event to indicate when the transmit buffer empties */
546 setup_empty_event(tty, ch); 504 setup_empty_event(tty, ch);
547 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ 505 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
548 } 506 }
549
550 if (tty->driver->flush_buffer) 507 if (tty->driver->flush_buffer)
551 tty->driver->flush_buffer(tty); 508 tty->driver->flush_buffer(tty);
552 509
553 tty_ldisc_flush(tty); 510 tty_ldisc_flush(tty);
554 shutdown(ch); 511 shutdown(ch);
512
513 spin_lock_irqsave(&epca_lock, flags);
555 tty->closing = 0; 514 tty->closing = 0;
556 ch->event = 0; 515 ch->event = 0;
557 ch->tty = NULL; 516 ch->tty = NULL;
517 spin_unlock_irqrestore(&epca_lock, flags);
558 518
559 if (ch->blocked_open) 519 if (ch->blocked_open) { /* Begin if blocked_open */
560 { /* Begin if blocked_open */
561
562 if (ch->close_delay) 520 if (ch->close_delay)
563 {
564 msleep_interruptible(jiffies_to_msecs(ch->close_delay)); 521 msleep_interruptible(jiffies_to_msecs(ch->close_delay));
565 }
566
567 wake_up_interruptible(&ch->open_wait); 522 wake_up_interruptible(&ch->open_wait);
568
569 } /* End if blocked_open */ 523 } /* End if blocked_open */
570
571 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | 524 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED |
572 ASYNC_CLOSING); 525 ASYNC_CLOSING);
573 wake_up_interruptible(&ch->close_wait); 526 wake_up_interruptible(&ch->close_wait);
574
575
576 restore_flags(flags);
577
578 } /* End if ch != NULL */ 527 } /* End if ch != NULL */
579
580} /* End pc_close */ 528} /* End pc_close */
581 529
582/* ------------------ Begin shutdown ------------------------- */ 530/* ------------------ Begin shutdown ------------------------- */
@@ -586,15 +534,14 @@ static void shutdown(struct channel *ch)
586 534
587 unsigned long flags; 535 unsigned long flags;
588 struct tty_struct *tty; 536 struct tty_struct *tty;
589 volatile struct board_chan *bc; 537 struct board_chan *bc;
590 538
591 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 539 if (!(ch->asyncflags & ASYNC_INITIALIZED))
592 return; 540 return;
593 541
594 save_flags(flags); 542 spin_lock_irqsave(&epca_lock, flags);
595 cli();
596 globalwinon(ch);
597 543
544 globalwinon(ch);
598 bc = ch->brdchan; 545 bc = ch->brdchan;
599 546
600 /* ------------------------------------------------------------------ 547 /* ------------------------------------------------------------------
@@ -604,20 +551,17 @@ static void shutdown(struct channel *ch)
604 --------------------------------------------------------------------- */ 551 --------------------------------------------------------------------- */
605 552
606 if (bc) 553 if (bc)
607 bc->idata = 0; 554 writeb(0, &bc->idata);
608
609 tty = ch->tty; 555 tty = ch->tty;
610 556
611 /* ---------------------------------------------------------------- 557 /* ----------------------------------------------------------------
612 If we're a modem control device and HUPCL is on, drop RTS & DTR. 558 If we're a modem control device and HUPCL is on, drop RTS & DTR.
613 ------------------------------------------------------------------ */ 559 ------------------------------------------------------------------ */
614 560
615 if (tty->termios->c_cflag & HUPCL) 561 if (tty->termios->c_cflag & HUPCL) {
616 {
617 ch->omodem &= ~(ch->m_rts | ch->m_dtr); 562 ch->omodem &= ~(ch->m_rts | ch->m_dtr);
618 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1); 563 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1);
619 } 564 }
620
621 memoff(ch); 565 memoff(ch);
622 566
623 /* ------------------------------------------------------------------ 567 /* ------------------------------------------------------------------
@@ -628,7 +572,7 @@ static void shutdown(struct channel *ch)
628 /* Prevent future Digi programmed interrupts from coming active */ 572 /* Prevent future Digi programmed interrupts from coming active */
629 573
630 ch->asyncflags &= ~ASYNC_INITIALIZED; 574 ch->asyncflags &= ~ASYNC_INITIALIZED;
631 restore_flags(flags); 575 spin_unlock_irqrestore(&epca_lock, flags);
632 576
633} /* End shutdown */ 577} /* End shutdown */
634 578
@@ -636,7 +580,6 @@ static void shutdown(struct channel *ch)
636 580
637static void pc_hangup(struct tty_struct *tty) 581static void pc_hangup(struct tty_struct *tty)
638{ /* Begin pc_hangup */ 582{ /* Begin pc_hangup */
639
640 struct channel *ch; 583 struct channel *ch;
641 584
642 /* --------------------------------------------------------- 585 /* ---------------------------------------------------------
@@ -644,25 +587,21 @@ static void pc_hangup(struct tty_struct *tty)
644 if it is valid. This serves as a sanity check. 587 if it is valid. This serves as a sanity check.
645 ------------------------------------------------------------- */ 588 ------------------------------------------------------------- */
646 589
647 if ((ch = verifyChannel(tty)) != NULL) 590 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */
648 { /* Begin if ch != NULL */
649
650 unsigned long flags; 591 unsigned long flags;
651 592
652 save_flags(flags);
653 cli();
654 if (tty->driver->flush_buffer) 593 if (tty->driver->flush_buffer)
655 tty->driver->flush_buffer(tty); 594 tty->driver->flush_buffer(tty);
656 tty_ldisc_flush(tty); 595 tty_ldisc_flush(tty);
657 shutdown(ch); 596 shutdown(ch);
658 597
598 spin_lock_irqsave(&epca_lock, flags);
659 ch->tty = NULL; 599 ch->tty = NULL;
660 ch->event = 0; 600 ch->event = 0;
661 ch->count = 0; 601 ch->count = 0;
662 restore_flags(flags);
663 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED); 602 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED);
603 spin_unlock_irqrestore(&epca_lock, flags);
664 wake_up_interruptible(&ch->open_wait); 604 wake_up_interruptible(&ch->open_wait);
665
666 } /* End if ch != NULL */ 605 } /* End if ch != NULL */
667 606
668} /* End pc_hangup */ 607} /* End pc_hangup */
@@ -672,18 +611,14 @@ static void pc_hangup(struct tty_struct *tty)
672static int pc_write(struct tty_struct * tty, 611static int pc_write(struct tty_struct * tty,
673 const unsigned char *buf, int bytesAvailable) 612 const unsigned char *buf, int bytesAvailable)
674{ /* Begin pc_write */ 613{ /* Begin pc_write */
675 614 unsigned int head, tail;
676 register unsigned int head, tail; 615 int dataLen;
677 register int dataLen; 616 int size;
678 register int size; 617 int amountCopied;
679 register int amountCopied;
680
681
682 struct channel *ch; 618 struct channel *ch;
683 unsigned long flags; 619 unsigned long flags;
684 int remain; 620 int remain;
685 volatile struct board_chan *bc; 621 struct board_chan *bc;
686
687 622
688 /* ---------------------------------------------------------------- 623 /* ----------------------------------------------------------------
689 pc_write is primarily called directly by the kernel routine 624 pc_write is primarily called directly by the kernel routine
@@ -706,24 +641,20 @@ static int pc_write(struct tty_struct * tty,
706 641
707 bc = ch->brdchan; 642 bc = ch->brdchan;
708 size = ch->txbufsize; 643 size = ch->txbufsize;
709
710 amountCopied = 0; 644 amountCopied = 0;
711 save_flags(flags);
712 cli();
713 645
646 spin_lock_irqsave(&epca_lock, flags);
714 globalwinon(ch); 647 globalwinon(ch);
715 648
716 head = bc->tin & (size - 1); 649 head = readw(&bc->tin) & (size - 1);
717 tail = bc->tout; 650 tail = readw(&bc->tout);
718 651
719 if (tail != bc->tout) 652 if (tail != readw(&bc->tout))
720 tail = bc->tout; 653 tail = readw(&bc->tout);
721 tail &= (size - 1); 654 tail &= (size - 1);
722 655
723 /* If head >= tail, head has not wrapped around. */ 656 /* If head >= tail, head has not wrapped around. */
724 if (head >= tail) 657 if (head >= tail) { /* Begin head has not wrapped */
725 { /* Begin head has not wrapped */
726
727 /* --------------------------------------------------------------- 658 /* ---------------------------------------------------------------
728 remain (much like dataLen above) represents the total amount of 659 remain (much like dataLen above) represents the total amount of
729 space available on the card for data. Here dataLen represents 660 space available on the card for data. Here dataLen represents
@@ -731,26 +662,19 @@ static int pc_write(struct tty_struct * tty,
731 buffer. This is important because a memcpy cannot be told to 662 buffer. This is important because a memcpy cannot be told to
732 automatically wrap around when it hits the buffer end. 663 automatically wrap around when it hits the buffer end.
733 ------------------------------------------------------------------ */ 664 ------------------------------------------------------------------ */
734
735 dataLen = size - head; 665 dataLen = size - head;
736 remain = size - (head - tail) - 1; 666 remain = size - (head - tail) - 1;
737 667 } else { /* Begin head has wrapped around */
738 } /* End head has not wrapped */
739 else
740 { /* Begin head has wrapped around */
741 668
742 remain = tail - head - 1; 669 remain = tail - head - 1;
743 dataLen = remain; 670 dataLen = remain;
744 671
745 } /* End head has wrapped around */ 672 } /* End head has wrapped around */
746
747 /* ------------------------------------------------------------------- 673 /* -------------------------------------------------------------------
748 Check the space on the card. If we have more data than 674 Check the space on the card. If we have more data than
749 space; reduce the amount of data to fit the space. 675 space; reduce the amount of data to fit the space.
750 ---------------------------------------------------------------------- */ 676 ---------------------------------------------------------------------- */
751
752 bytesAvailable = min(remain, bytesAvailable); 677 bytesAvailable = min(remain, bytesAvailable);
753
754 txwinon(ch); 678 txwinon(ch);
755 while (bytesAvailable > 0) 679 while (bytesAvailable > 0)
756 { /* Begin while there is data to copy onto card */ 680 { /* Begin while there is data to copy onto card */
@@ -767,26 +691,21 @@ static int pc_write(struct tty_struct * tty,
767 amountCopied += dataLen; 691 amountCopied += dataLen;
768 bytesAvailable -= dataLen; 692 bytesAvailable -= dataLen;
769 693
770 if (head >= size) 694 if (head >= size) {
771 {
772 head = 0; 695 head = 0;
773 dataLen = tail; 696 dataLen = tail;
774 } 697 }
775
776 } /* End while there is data to copy onto card */ 698 } /* End while there is data to copy onto card */
777
778 ch->statusflags |= TXBUSY; 699 ch->statusflags |= TXBUSY;
779 globalwinon(ch); 700 globalwinon(ch);
780 bc->tin = head; 701 writew(head, &bc->tin);
781 702
782 if ((ch->statusflags & LOWWAIT) == 0) 703 if ((ch->statusflags & LOWWAIT) == 0) {
783 {
784 ch->statusflags |= LOWWAIT; 704 ch->statusflags |= LOWWAIT;
785 bc->ilow = 1; 705 writeb(1, &bc->ilow);
786 } 706 }
787 memoff(ch); 707 memoff(ch);
788 restore_flags(flags); 708 spin_unlock_irqrestore(&epca_lock, flags);
789
790 return(amountCopied); 709 return(amountCopied);
791 710
792} /* End pc_write */ 711} /* End pc_write */
@@ -795,11 +714,7 @@ static int pc_write(struct tty_struct * tty,
795 714
796static void pc_put_char(struct tty_struct *tty, unsigned char c) 715static void pc_put_char(struct tty_struct *tty, unsigned char c)
797{ /* Begin pc_put_char */ 716{ /* Begin pc_put_char */
798
799
800 pc_write(tty, &c, 1); 717 pc_write(tty, &c, 1);
801 return;
802
803} /* End pc_put_char */ 718} /* End pc_put_char */
804 719
805/* ------------------ Begin pc_write_room ------------------------- */ 720/* ------------------ Begin pc_write_room ------------------------- */
@@ -811,7 +726,7 @@ static int pc_write_room(struct tty_struct *tty)
811 struct channel *ch; 726 struct channel *ch;
812 unsigned long flags; 727 unsigned long flags;
813 unsigned int head, tail; 728 unsigned int head, tail;
814 volatile struct board_chan *bc; 729 struct board_chan *bc;
815 730
816 remain = 0; 731 remain = 0;
817 732
@@ -820,33 +735,29 @@ static int pc_write_room(struct tty_struct *tty)
820 if it is valid. This serves as a sanity check. 735 if it is valid. This serves as a sanity check.
821 ------------------------------------------------------------- */ 736 ------------------------------------------------------------- */
822 737
823 if ((ch = verifyChannel(tty)) != NULL) 738 if ((ch = verifyChannel(tty)) != NULL) {
824 { 739 spin_lock_irqsave(&epca_lock, flags);
825 save_flags(flags);
826 cli();
827 globalwinon(ch); 740 globalwinon(ch);
828 741
829 bc = ch->brdchan; 742 bc = ch->brdchan;
830 head = bc->tin & (ch->txbufsize - 1); 743 head = readw(&bc->tin) & (ch->txbufsize - 1);
831 tail = bc->tout; 744 tail = readw(&bc->tout);
832 745
833 if (tail != bc->tout) 746 if (tail != readw(&bc->tout))
834 tail = bc->tout; 747 tail = readw(&bc->tout);
835 /* Wrap tail if necessary */ 748 /* Wrap tail if necessary */
836 tail &= (ch->txbufsize - 1); 749 tail &= (ch->txbufsize - 1);
837 750
838 if ((remain = tail - head - 1) < 0 ) 751 if ((remain = tail - head - 1) < 0 )
839 remain += ch->txbufsize; 752 remain += ch->txbufsize;
840 753
841 if (remain && (ch->statusflags & LOWWAIT) == 0) 754 if (remain && (ch->statusflags & LOWWAIT) == 0) {
842 {
843 ch->statusflags |= LOWWAIT; 755 ch->statusflags |= LOWWAIT;
844 bc->ilow = 1; 756 writeb(1, &bc->ilow);
845 } 757 }
846 memoff(ch); 758 memoff(ch);
847 restore_flags(flags); 759 spin_unlock_irqrestore(&epca_lock, flags);
848 } 760 }
849
850 /* Return how much room is left on card */ 761 /* Return how much room is left on card */
851 return remain; 762 return remain;
852 763
@@ -862,8 +773,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
862 int remain; 773 int remain;
863 unsigned long flags; 774 unsigned long flags;
864 struct channel *ch; 775 struct channel *ch;
865 volatile struct board_chan *bc; 776 struct board_chan *bc;
866
867 777
868 /* --------------------------------------------------------- 778 /* ---------------------------------------------------------
869 verifyChannel returns the channel from the tty struct 779 verifyChannel returns the channel from the tty struct
@@ -873,34 +783,27 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
873 if ((ch = verifyChannel(tty)) == NULL) 783 if ((ch = verifyChannel(tty)) == NULL)
874 return(0); 784 return(0);
875 785
876 save_flags(flags); 786 spin_lock_irqsave(&epca_lock, flags);
877 cli();
878 globalwinon(ch); 787 globalwinon(ch);
879 788
880 bc = ch->brdchan; 789 bc = ch->brdchan;
881 tail = bc->tout; 790 tail = readw(&bc->tout);
882 head = bc->tin; 791 head = readw(&bc->tin);
883 ctail = ch->mailbox->cout; 792 ctail = readw(&ch->mailbox->cout);
884 793
885 if (tail == head && ch->mailbox->cin == ctail && bc->tbusy == 0) 794 if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0)
886 chars = 0; 795 chars = 0;
887 else 796 else { /* Begin if some space on the card has been used */
888 { /* Begin if some space on the card has been used */ 797 head = readw(&bc->tin) & (ch->txbufsize - 1);
889
890 head = bc->tin & (ch->txbufsize - 1);
891 tail &= (ch->txbufsize - 1); 798 tail &= (ch->txbufsize - 1);
892
893 /* -------------------------------------------------------------- 799 /* --------------------------------------------------------------
894 The logic here is basically opposite of the above pc_write_room 800 The logic here is basically opposite of the above pc_write_room
895 here we are finding the amount of bytes in the buffer filled. 801 here we are finding the amount of bytes in the buffer filled.
896 Not the amount of bytes empty. 802 Not the amount of bytes empty.
897 ------------------------------------------------------------------- */ 803 ------------------------------------------------------------------- */
898
899 if ((remain = tail - head - 1) < 0 ) 804 if ((remain = tail - head - 1) < 0 )
900 remain += ch->txbufsize; 805 remain += ch->txbufsize;
901
902 chars = (int)(ch->txbufsize - remain); 806 chars = (int)(ch->txbufsize - remain);
903
904 /* ------------------------------------------------------------- 807 /* -------------------------------------------------------------
905 Make it possible to wakeup anything waiting for output 808 Make it possible to wakeup anything waiting for output
906 in tty_ioctl.c, etc. 809 in tty_ioctl.c, etc.
@@ -908,15 +811,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
908 If not already set. Setup an event to indicate when the 811 If not already set. Setup an event to indicate when the
909 transmit buffer empties 812 transmit buffer empties
910 ----------------------------------------------------------------- */ 813 ----------------------------------------------------------------- */
911
912 if (!(ch->statusflags & EMPTYWAIT)) 814 if (!(ch->statusflags & EMPTYWAIT))
913 setup_empty_event(tty,ch); 815 setup_empty_event(tty,ch);
914 816
915 } /* End if some space on the card has been used */ 817 } /* End if some space on the card has been used */
916
917 memoff(ch); 818 memoff(ch);
918 restore_flags(flags); 819 spin_unlock_irqrestore(&epca_lock, flags);
919
920 /* Return number of characters residing on card. */ 820 /* Return number of characters residing on card. */
921 return(chars); 821 return(chars);
922 822
@@ -930,67 +830,46 @@ static void pc_flush_buffer(struct tty_struct *tty)
930 unsigned int tail; 830 unsigned int tail;
931 unsigned long flags; 831 unsigned long flags;
932 struct channel *ch; 832 struct channel *ch;
933 volatile struct board_chan *bc; 833 struct board_chan *bc;
934
935
936 /* --------------------------------------------------------- 834 /* ---------------------------------------------------------
937 verifyChannel returns the channel from the tty struct 835 verifyChannel returns the channel from the tty struct
938 if it is valid. This serves as a sanity check. 836 if it is valid. This serves as a sanity check.
939 ------------------------------------------------------------- */ 837 ------------------------------------------------------------- */
940
941 if ((ch = verifyChannel(tty)) == NULL) 838 if ((ch = verifyChannel(tty)) == NULL)
942 return; 839 return;
943 840
944 save_flags(flags); 841 spin_lock_irqsave(&epca_lock, flags);
945 cli();
946
947 globalwinon(ch); 842 globalwinon(ch);
948
949 bc = ch->brdchan; 843 bc = ch->brdchan;
950 tail = bc->tout; 844 tail = readw(&bc->tout);
951
952 /* Have FEP move tout pointer; effectively flushing transmit buffer */ 845 /* Have FEP move tout pointer; effectively flushing transmit buffer */
953
954 fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0); 846 fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0);
955
956 memoff(ch); 847 memoff(ch);
957 restore_flags(flags); 848 spin_unlock_irqrestore(&epca_lock, flags);
958
959 wake_up_interruptible(&tty->write_wait); 849 wake_up_interruptible(&tty->write_wait);
960 tty_wakeup(tty); 850 tty_wakeup(tty);
961
962} /* End pc_flush_buffer */ 851} /* End pc_flush_buffer */
963 852
964/* ------------------ Begin pc_flush_chars ---------------------- */ 853/* ------------------ Begin pc_flush_chars ---------------------- */
965 854
966static void pc_flush_chars(struct tty_struct *tty) 855static void pc_flush_chars(struct tty_struct *tty)
967{ /* Begin pc_flush_chars */ 856{ /* Begin pc_flush_chars */
968
969 struct channel * ch; 857 struct channel * ch;
970
971 /* --------------------------------------------------------- 858 /* ---------------------------------------------------------
972 verifyChannel returns the channel from the tty struct 859 verifyChannel returns the channel from the tty struct
973 if it is valid. This serves as a sanity check. 860 if it is valid. This serves as a sanity check.
974 ------------------------------------------------------------- */ 861 ------------------------------------------------------------- */
975 862 if ((ch = verifyChannel(tty)) != NULL) {
976 if ((ch = verifyChannel(tty)) != NULL)
977 {
978 unsigned long flags; 863 unsigned long flags;
979 864 spin_lock_irqsave(&epca_lock, flags);
980 save_flags(flags);
981 cli();
982
983 /* ---------------------------------------------------------------- 865 /* ----------------------------------------------------------------
984 If not already set and the transmitter is busy setup an event 866 If not already set and the transmitter is busy setup an event
985 to indicate when the transmit empties. 867 to indicate when the transmit empties.
986 ------------------------------------------------------------------- */ 868 ------------------------------------------------------------------- */
987
988 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) 869 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT))
989 setup_empty_event(tty,ch); 870 setup_empty_event(tty,ch);
990 871 spin_unlock_irqrestore(&epca_lock, flags);
991 restore_flags(flags);
992 } 872 }
993
994} /* End pc_flush_chars */ 873} /* End pc_flush_chars */
995 874
996/* ------------------ Begin block_til_ready ---------------------- */ 875/* ------------------ Begin block_til_ready ---------------------- */
@@ -998,14 +877,11 @@ static void pc_flush_chars(struct tty_struct *tty)
998static int block_til_ready(struct tty_struct *tty, 877static int block_til_ready(struct tty_struct *tty,
999 struct file *filp, struct channel *ch) 878 struct file *filp, struct channel *ch)
1000{ /* Begin block_til_ready */ 879{ /* Begin block_til_ready */
1001
1002 DECLARE_WAITQUEUE(wait,current); 880 DECLARE_WAITQUEUE(wait,current);
1003 int retval, do_clocal = 0; 881 int retval, do_clocal = 0;
1004 unsigned long flags; 882 unsigned long flags;
1005 883
1006 884 if (tty_hung_up_p(filp)) {
1007 if (tty_hung_up_p(filp))
1008 {
1009 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 885 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
1010 retval = -EAGAIN; 886 retval = -EAGAIN;
1011 else 887 else
@@ -1017,8 +893,7 @@ static int block_til_ready(struct tty_struct *tty,
1017 If the device is in the middle of being closed, then block 893 If the device is in the middle of being closed, then block
1018 until it's done, and then try again. 894 until it's done, and then try again.
1019 -------------------------------------------------------------------- */ 895 -------------------------------------------------------------------- */
1020 if (ch->asyncflags & ASYNC_CLOSING) 896 if (ch->asyncflags & ASYNC_CLOSING) {
1021 {
1022 interruptible_sleep_on(&ch->close_wait); 897 interruptible_sleep_on(&ch->close_wait);
1023 898
1024 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 899 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
@@ -1027,43 +902,29 @@ static int block_til_ready(struct tty_struct *tty,
1027 return -ERESTARTSYS; 902 return -ERESTARTSYS;
1028 } 903 }
1029 904
1030 if (filp->f_flags & O_NONBLOCK) 905 if (filp->f_flags & O_NONBLOCK) {
1031 {
1032 /* ----------------------------------------------------------------- 906 /* -----------------------------------------------------------------
1033 If non-blocking mode is set, then make the check up front 907 If non-blocking mode is set, then make the check up front
1034 and then exit. 908 and then exit.
1035 -------------------------------------------------------------------- */ 909 -------------------------------------------------------------------- */
1036
1037 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 910 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1038
1039 return 0; 911 return 0;
1040 } 912 }
1041
1042
1043 if (tty->termios->c_cflag & CLOCAL) 913 if (tty->termios->c_cflag & CLOCAL)
1044 do_clocal = 1; 914 do_clocal = 1;
1045 915 /* Block waiting for the carrier detect and the line to become free */
1046 /* Block waiting for the carrier detect and the line to become free */
1047 916
1048 retval = 0; 917 retval = 0;
1049 add_wait_queue(&ch->open_wait, &wait); 918 add_wait_queue(&ch->open_wait, &wait);
1050 save_flags(flags);
1051 cli();
1052
1053 919
920 spin_lock_irqsave(&epca_lock, flags);
1054 /* We dec count so that pc_close will know when to free things */ 921 /* We dec count so that pc_close will know when to free things */
1055 if (!tty_hung_up_p(filp)) 922 if (!tty_hung_up_p(filp))
1056 ch->count--; 923 ch->count--;
1057
1058 restore_flags(flags);
1059
1060 ch->blocked_open++; 924 ch->blocked_open++;
1061
1062 while(1) 925 while(1)
1063 { /* Begin forever while */ 926 { /* Begin forever while */
1064
1065 set_current_state(TASK_INTERRUPTIBLE); 927 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 if (tty_hung_up_p(filp) || 928 if (tty_hung_up_p(filp) ||
1068 !(ch->asyncflags & ASYNC_INITIALIZED)) 929 !(ch->asyncflags & ASYNC_INITIALIZED))
1069 { 930 {
@@ -1073,17 +934,14 @@ static int block_til_ready(struct tty_struct *tty,
1073 retval = -ERESTARTSYS; 934 retval = -ERESTARTSYS;
1074 break; 935 break;
1075 } 936 }
1076
1077 if (!(ch->asyncflags & ASYNC_CLOSING) && 937 if (!(ch->asyncflags & ASYNC_CLOSING) &&
1078 (do_clocal || (ch->imodem & ch->dcd))) 938 (do_clocal || (ch->imodem & ch->dcd)))
1079 break; 939 break;
1080 940 if (signal_pending(current)) {
1081 if (signal_pending(current))
1082 {
1083 retval = -ERESTARTSYS; 941 retval = -ERESTARTSYS;
1084 break; 942 break;
1085 } 943 }
1086 944 spin_unlock_irqrestore(&epca_lock, flags);
1087 /* --------------------------------------------------------------- 945 /* ---------------------------------------------------------------
1088 Allow someone else to be scheduled. We will occasionally go 946 Allow someone else to be scheduled. We will occasionally go
1089 through this loop until one of the above conditions change. 947 through this loop until one of the above conditions change.
@@ -1091,25 +949,23 @@ static int block_til_ready(struct tty_struct *tty,
1091 prevent this loop from hogging the cpu. 949 prevent this loop from hogging the cpu.
1092 ------------------------------------------------------------------ */ 950 ------------------------------------------------------------------ */
1093 schedule(); 951 schedule();
952 spin_lock_irqsave(&epca_lock, flags);
1094 953
1095 } /* End forever while */ 954 } /* End forever while */
1096 955
1097 current->state = TASK_RUNNING; 956 current->state = TASK_RUNNING;
1098 remove_wait_queue(&ch->open_wait, &wait); 957 remove_wait_queue(&ch->open_wait, &wait);
1099 cli();
1100 if (!tty_hung_up_p(filp)) 958 if (!tty_hung_up_p(filp))
1101 ch->count++; 959 ch->count++;
1102 restore_flags(flags);
1103
1104 ch->blocked_open--; 960 ch->blocked_open--;
1105 961
962 spin_unlock_irqrestore(&epca_lock, flags);
963
1106 if (retval) 964 if (retval)
1107 return retval; 965 return retval;
1108 966
1109 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 967 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1110
1111 return 0; 968 return 0;
1112
1113} /* End block_til_ready */ 969} /* End block_til_ready */
1114 970
1115/* ------------------ Begin pc_open ---------------------- */ 971/* ------------------ Begin pc_open ---------------------- */
@@ -1120,17 +976,12 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1120 struct channel *ch; 976 struct channel *ch;
1121 unsigned long flags; 977 unsigned long flags;
1122 int line, retval, boardnum; 978 int line, retval, boardnum;
1123 volatile struct board_chan *bc; 979 struct board_chan *bc;
1124 volatile unsigned int head; 980 unsigned int head;
1125 981
1126 line = tty->index; 982 line = tty->index;
1127 if (line < 0 || line >= nbdevs) 983 if (line < 0 || line >= nbdevs)
1128 { 984 return -ENODEV;
1129 printk(KERN_ERR "<Error> - pc_open : line out of range in pc_open\n");
1130 tty->driver_data = NULL;
1131 return(-ENODEV);
1132 }
1133
1134 985
1135 ch = &digi_channels[line]; 986 ch = &digi_channels[line];
1136 boardnum = ch->boardnum; 987 boardnum = ch->boardnum;
@@ -1143,66 +994,49 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1143 goes here. 994 goes here.
1144 ---------------------------------------------------------------------- */ 995 ---------------------------------------------------------------------- */
1145 996
1146 if (invalid_lilo_config) 997 if (invalid_lilo_config) {
1147 {
1148 if (setup_error_code & INVALID_BOARD_TYPE) 998 if (setup_error_code & INVALID_BOARD_TYPE)
1149 printk(KERN_ERR "<Error> - pc_open: Invalid board type specified in LILO command\n"); 999 printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n");
1150
1151 if (setup_error_code & INVALID_NUM_PORTS) 1000 if (setup_error_code & INVALID_NUM_PORTS)
1152 printk(KERN_ERR "<Error> - pc_open: Invalid number of ports specified in LILO command\n"); 1001 printk(KERN_ERR "epca: pc_open: Invalid number of ports specified in kernel options.\n");
1153
1154 if (setup_error_code & INVALID_MEM_BASE) 1002 if (setup_error_code & INVALID_MEM_BASE)
1155 printk(KERN_ERR "<Error> - pc_open: Invalid board memory address specified in LILO command\n"); 1003 printk(KERN_ERR "epca: pc_open: Invalid board memory address specified in kernel options.\n");
1156
1157 if (setup_error_code & INVALID_PORT_BASE) 1004 if (setup_error_code & INVALID_PORT_BASE)
1158 printk(KERN_ERR "<Error> - pc_open: Invalid board port address specified in LILO command\n"); 1005 printk(KERN_ERR "epca; pc_open: Invalid board port address specified in kernel options.\n");
1159
1160 if (setup_error_code & INVALID_BOARD_STATUS) 1006 if (setup_error_code & INVALID_BOARD_STATUS)
1161 printk(KERN_ERR "<Error> - pc_open: Invalid board status specified in LILO command\n"); 1007 printk(KERN_ERR "epca: pc_open: Invalid board status specified in kernel options.\n");
1162
1163 if (setup_error_code & INVALID_ALTPIN) 1008 if (setup_error_code & INVALID_ALTPIN)
1164 printk(KERN_ERR "<Error> - pc_open: Invalid board altpin specified in LILO command\n"); 1009 printk(KERN_ERR "epca: pc_open: Invalid board altpin specified in kernel options;\n");
1165
1166 tty->driver_data = NULL; /* Mark this device as 'down' */ 1010 tty->driver_data = NULL; /* Mark this device as 'down' */
1167 return(-ENODEV); 1011 return -ENODEV;
1168 } 1012 }
1169 1013 if (boardnum >= num_cards || boards[boardnum].status == DISABLED) {
1170 if ((boardnum >= num_cards) || (boards[boardnum].status == DISABLED))
1171 {
1172 tty->driver_data = NULL; /* Mark this device as 'down' */ 1014 tty->driver_data = NULL; /* Mark this device as 'down' */
1173 return(-ENODEV); 1015 return(-ENODEV);
1174 } 1016 }
1175 1017
1176 if (( bc = ch->brdchan) == 0) 1018 if ((bc = ch->brdchan) == 0) {
1177 {
1178 tty->driver_data = NULL; 1019 tty->driver_data = NULL;
1179 return(-ENODEV); 1020 return -ENODEV;
1180 } 1021 }
1181 1022
1023 spin_lock_irqsave(&epca_lock, flags);
1182 /* ------------------------------------------------------------------ 1024 /* ------------------------------------------------------------------
1183 Every time a channel is opened, increment a counter. This is 1025 Every time a channel is opened, increment a counter. This is
1184 necessary because we do not wish to flush and shutdown the channel 1026 necessary because we do not wish to flush and shutdown the channel
1185 until the last app holding the channel open, closes it. 1027 until the last app holding the channel open, closes it.
1186 --------------------------------------------------------------------- */ 1028 --------------------------------------------------------------------- */
1187
1188 ch->count++; 1029 ch->count++;
1189
1190 /* ---------------------------------------------------------------- 1030 /* ----------------------------------------------------------------
1191 Set a kernel structures pointer to our local channel 1031 Set a kernel structures pointer to our local channel
1192 structure. This way we can get to it when passed only 1032 structure. This way we can get to it when passed only
1193 a tty struct. 1033 a tty struct.
1194 ------------------------------------------------------------------ */ 1034 ------------------------------------------------------------------ */
1195
1196 tty->driver_data = ch; 1035 tty->driver_data = ch;
1197
1198 /* ---------------------------------------------------------------- 1036 /* ----------------------------------------------------------------
1199 If this is the first time the channel has been opened, initialize 1037 If this is the first time the channel has been opened, initialize
1200 the tty->termios struct otherwise let pc_close handle it. 1038 the tty->termios struct otherwise let pc_close handle it.
1201 -------------------------------------------------------------------- */ 1039 -------------------------------------------------------------------- */
1202
1203 save_flags(flags);
1204 cli();
1205
1206 globalwinon(ch); 1040 globalwinon(ch);
1207 ch->statusflags = 0; 1041 ch->statusflags = 0;
1208 1042
@@ -1213,8 +1047,8 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1213 Set receive head and tail ptrs to each other. This indicates 1047 Set receive head and tail ptrs to each other. This indicates
1214 no data available to read. 1048 no data available to read.
1215 ----------------------------------------------------------------- */ 1049 ----------------------------------------------------------------- */
1216 head = bc->rin; 1050 head = readw(&bc->rin);
1217 bc->rout = head; 1051 writew(head, &bc->rout);
1218 1052
1219 /* Set the channels associated tty structure */ 1053 /* Set the channels associated tty structure */
1220 ch->tty = tty; 1054 ch->tty = tty;
@@ -1224,122 +1058,74 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1224 issues, etc.... It effect both control flags and input flags. 1058 issues, etc.... It effect both control flags and input flags.
1225 -------------------------------------------------------------------- */ 1059 -------------------------------------------------------------------- */
1226 epcaparam(tty,ch); 1060 epcaparam(tty,ch);
1227
1228 ch->asyncflags |= ASYNC_INITIALIZED; 1061 ch->asyncflags |= ASYNC_INITIALIZED;
1229 memoff(ch); 1062 memoff(ch);
1230 1063 spin_unlock_irqrestore(&epca_lock, flags);
1231 restore_flags(flags);
1232 1064
1233 retval = block_til_ready(tty, filp, ch); 1065 retval = block_til_ready(tty, filp, ch);
1234 if (retval) 1066 if (retval)
1235 {
1236 return retval; 1067 return retval;
1237 }
1238
1239 /* ------------------------------------------------------------- 1068 /* -------------------------------------------------------------
1240 Set this again in case a hangup set it to zero while this 1069 Set this again in case a hangup set it to zero while this
1241 open() was waiting for the line... 1070 open() was waiting for the line...
1242 --------------------------------------------------------------- */ 1071 --------------------------------------------------------------- */
1072 spin_lock_irqsave(&epca_lock, flags);
1243 ch->tty = tty; 1073 ch->tty = tty;
1244
1245 save_flags(flags);
1246 cli();
1247 globalwinon(ch); 1074 globalwinon(ch);
1248
1249 /* Enable Digi Data events */ 1075 /* Enable Digi Data events */
1250 bc->idata = 1; 1076 writeb(1, &bc->idata);
1251
1252 memoff(ch); 1077 memoff(ch);
1253 restore_flags(flags); 1078 spin_unlock_irqrestore(&epca_lock, flags);
1254
1255 return 0; 1079 return 0;
1256
1257} /* End pc_open */ 1080} /* End pc_open */
1258 1081
1259#ifdef MODULE
1260static int __init epca_module_init(void) 1082static int __init epca_module_init(void)
1261{ /* Begin init_module */ 1083{ /* Begin init_module */
1262 1084 return pc_init();
1263 unsigned long flags;
1264
1265 save_flags(flags);
1266 cli();
1267
1268 pc_init();
1269
1270 restore_flags(flags);
1271
1272 return(0);
1273} 1085}
1274 1086
1275module_init(epca_module_init); 1087module_init(epca_module_init);
1276#endif
1277 1088
1278#ifdef ENABLE_PCI
1279static struct pci_driver epca_driver; 1089static struct pci_driver epca_driver;
1280#endif
1281
1282#ifdef MODULE
1283/* -------------------- Begin cleanup_module ---------------------- */
1284 1090
1285static void __exit epca_module_exit(void) 1091static void __exit epca_module_exit(void)
1286{ 1092{
1287
1288 int count, crd; 1093 int count, crd;
1289 struct board_info *bd; 1094 struct board_info *bd;
1290 struct channel *ch; 1095 struct channel *ch;
1291 unsigned long flags;
1292 1096
1293 del_timer_sync(&epca_timer); 1097 del_timer_sync(&epca_timer);
1294 1098
1295 save_flags(flags);
1296 cli();
1297
1298 if ((tty_unregister_driver(pc_driver)) || 1099 if ((tty_unregister_driver(pc_driver)) ||
1299 (tty_unregister_driver(pc_info))) 1100 (tty_unregister_driver(pc_info)))
1300 { 1101 {
1301 printk(KERN_WARNING "<Error> - DIGI : cleanup_module failed to un-register tty driver\n"); 1102 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
1302 restore_flags(flags);
1303 return; 1103 return;
1304 } 1104 }
1305 put_tty_driver(pc_driver); 1105 put_tty_driver(pc_driver);
1306 put_tty_driver(pc_info); 1106 put_tty_driver(pc_info);
1307 1107
1308 for (crd = 0; crd < num_cards; crd++) 1108 for (crd = 0; crd < num_cards; crd++) { /* Begin for each card */
1309 { /* Begin for each card */
1310
1311 bd = &boards[crd]; 1109 bd = &boards[crd];
1312
1313 if (!bd) 1110 if (!bd)
1314 { /* Begin sanity check */ 1111 { /* Begin sanity check */
1315 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n"); 1112 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n");
1316 return; 1113 return;
1317 } /* End sanity check */ 1114 } /* End sanity check */
1318 1115 ch = card_ptr[crd];
1319 ch = card_ptr[crd];
1320
1321 for (count = 0; count < bd->numports; count++, ch++) 1116 for (count = 0; count < bd->numports; count++, ch++)
1322 { /* Begin for each port */ 1117 { /* Begin for each port */
1323 1118 if (ch) {
1324 if (ch)
1325 {
1326 if (ch->tty) 1119 if (ch->tty)
1327 tty_hangup(ch->tty); 1120 tty_hangup(ch->tty);
1328 kfree(ch->tmp_buf); 1121 kfree(ch->tmp_buf);
1329 } 1122 }
1330
1331 } /* End for each port */ 1123 } /* End for each port */
1332 } /* End for each card */ 1124 } /* End for each card */
1333
1334#ifdef ENABLE_PCI
1335 pci_unregister_driver (&epca_driver); 1125 pci_unregister_driver (&epca_driver);
1336#endif
1337
1338 restore_flags(flags);
1339
1340} 1126}
1127
1341module_exit(epca_module_exit); 1128module_exit(epca_module_exit);
1342#endif /* MODULE */
1343 1129
1344static struct tty_operations pc_ops = { 1130static struct tty_operations pc_ops = {
1345 .open = pc_open, 1131 .open = pc_open,
@@ -1371,34 +1157,15 @@ static struct tty_operations info_ops = {
1371 1157
1372/* ------------------ Begin pc_init ---------------------- */ 1158/* ------------------ Begin pc_init ---------------------- */
1373 1159
1374int __init pc_init(void) 1160static int __init pc_init(void)
1375{ /* Begin pc_init */ 1161{ /* Begin pc_init */
1376
1377 /* ----------------------------------------------------------------
1378 pc_init is called by the operating system during boot up prior to
1379 any open calls being made. In the older versions of Linux (Prior
1380 to 2.0.0) an entry is made into tty_io.c. A pointer to the last
1381 memory location (from kernel space) used (kmem_start) is passed
1382 to pc_init. It is pc_inits responsibility to modify this value
1383 for any memory that the Digi driver might need and then return
1384 this value to the operating system. For example if the driver
1385 wishes to allocate 1K of kernel memory, pc_init would return
1386 (kmem_start + 1024). This memory (Between kmem_start and kmem_start
1387 + 1024) would then be available for use exclusively by the driver.
1388 In this case our driver does not allocate any of this kernel
1389 memory.
1390 ------------------------------------------------------------------*/
1391
1392 ulong flags;
1393 int crd; 1162 int crd;
1394 struct board_info *bd; 1163 struct board_info *bd;
1395 unsigned char board_id = 0; 1164 unsigned char board_id = 0;
1396 1165
1397#ifdef ENABLE_PCI
1398 int pci_boards_found, pci_count; 1166 int pci_boards_found, pci_count;
1399 1167
1400 pci_count = 0; 1168 pci_count = 0;
1401#endif /* ENABLE_PCI */
1402 1169
1403 pc_driver = alloc_tty_driver(MAX_ALLOC); 1170 pc_driver = alloc_tty_driver(MAX_ALLOC);
1404 if (!pc_driver) 1171 if (!pc_driver)
@@ -1416,8 +1183,7 @@ int __init pc_init(void)
1416 Note : If LILO has ran epca_setup then epca_setup will handle defining 1183 Note : If LILO has ran epca_setup then epca_setup will handle defining
1417 num_cards as well as copying the data into the board structure. 1184 num_cards as well as copying the data into the board structure.
1418 -------------------------------------------------------------------------- */ 1185 -------------------------------------------------------------------------- */
1419 if (!liloconfig) 1186 if (!liloconfig) { /* Begin driver has been configured via. epcaconfig */
1420 { /* Begin driver has been configured via. epcaconfig */
1421 1187
1422 nbdevs = NBDEVS; 1188 nbdevs = NBDEVS;
1423 num_cards = NUMCARDS; 1189 num_cards = NUMCARDS;
@@ -1440,8 +1206,6 @@ int __init pc_init(void)
1440 1206
1441 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION); 1207 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION);
1442 1208
1443#ifdef ENABLE_PCI
1444
1445 /* ------------------------------------------------------------------ 1209 /* ------------------------------------------------------------------
1446 NOTE : This code assumes that the number of ports found in 1210 NOTE : This code assumes that the number of ports found in
1447 the boards array is correct. This could be wrong if 1211 the boards array is correct. This could be wrong if
@@ -1467,8 +1231,6 @@ int __init pc_init(void)
1467 pci_boards_found += init_PCI(); 1231 pci_boards_found += init_PCI();
1468 num_cards += pci_boards_found; 1232 num_cards += pci_boards_found;
1469 1233
1470#endif /* ENABLE_PCI */
1471
1472 pc_driver->owner = THIS_MODULE; 1234 pc_driver->owner = THIS_MODULE;
1473 pc_driver->name = "ttyD"; 1235 pc_driver->name = "ttyD";
1474 pc_driver->devfs_name = "tts/D"; 1236 pc_driver->devfs_name = "tts/D";
@@ -1499,9 +1261,6 @@ int __init pc_init(void)
1499 tty_set_operations(pc_info, &info_ops); 1261 tty_set_operations(pc_info, &info_ops);
1500 1262
1501 1263
1502 save_flags(flags);
1503 cli();
1504
1505 for (crd = 0; crd < num_cards; crd++) 1264 for (crd = 0; crd < num_cards; crd++)
1506 { /* Begin for each card */ 1265 { /* Begin for each card */
1507 1266
@@ -1610,11 +1369,7 @@ int __init pc_init(void)
1610 if ((board_id & 0x30) == 0x30) 1369 if ((board_id & 0x30) == 0x30)
1611 bd->memory_seg = 0x8000; 1370 bd->memory_seg = 0x8000;
1612 1371
1613 } /* End it is an XI card */ 1372 } else printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
1614 else
1615 {
1616 printk(KERN_ERR "<Error> - Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
1617 }
1618 break; 1373 break;
1619 1374
1620 } /* End switch on bd->type */ 1375 } /* End switch on bd->type */
@@ -1634,9 +1389,6 @@ int __init pc_init(void)
1634 init_timer(&epca_timer); 1389 init_timer(&epca_timer);
1635 epca_timer.function = epcapoll; 1390 epca_timer.function = epcapoll;
1636 mod_timer(&epca_timer, jiffies + HZ/25); 1391 mod_timer(&epca_timer, jiffies + HZ/25);
1637
1638 restore_flags(flags);
1639
1640 return 0; 1392 return 0;
1641 1393
1642} /* End pc_init */ 1394} /* End pc_init */
@@ -1647,10 +1399,10 @@ static void post_fep_init(unsigned int crd)
1647{ /* Begin post_fep_init */ 1399{ /* Begin post_fep_init */
1648 1400
1649 int i; 1401 int i;
1650 unchar *memaddr; 1402 unsigned char *memaddr;
1651 volatile struct global_data *gd; 1403 struct global_data *gd;
1652 struct board_info *bd; 1404 struct board_info *bd;
1653 volatile struct board_chan *bc; 1405 struct board_chan *bc;
1654 struct channel *ch; 1406 struct channel *ch;
1655 int shrinkmem = 0, lowwater ; 1407 int shrinkmem = 0, lowwater ;
1656 1408
@@ -1669,9 +1421,7 @@ static void post_fep_init(unsigned int crd)
1669 after DIGI_INIT has been called will return the proper values. 1421 after DIGI_INIT has been called will return the proper values.
1670 ------------------------------------------------------------------- */ 1422 ------------------------------------------------------------------- */
1671 1423
1672 if (bd->type >= PCIXEM) /* If the board in question is PCI */ 1424 if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */
1673 { /* Begin get PCI number of ports */
1674
1675 /* -------------------------------------------------------------------- 1425 /* --------------------------------------------------------------------
1676 Below we use XEMPORTS as a memory offset regardless of which PCI 1426 Below we use XEMPORTS as a memory offset regardless of which PCI
1677 card it is. This is because all of the supported PCI cards have 1427 card it is. This is because all of the supported PCI cards have
@@ -1685,15 +1435,15 @@ static void post_fep_init(unsigned int crd)
1685 (FYI - The id should be located at 0x1ac (And may use up to 4 bytes 1435 (FYI - The id should be located at 0x1ac (And may use up to 4 bytes
1686 if the box in question is a XEM or CX)). 1436 if the box in question is a XEM or CX)).
1687 ------------------------------------------------------------------------ */ 1437 ------------------------------------------------------------------------ */
1688 1438 /* PCI cards are already remapped at this point ISA are not */
1689 bd->numports = (unsigned short)*(unsigned char *)bus_to_virt((unsigned long) 1439 bd->numports = readw(bd->re_map_membase + XEMPORTS);
1690 (bd->re_map_membase + XEMPORTS));
1691
1692
1693 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports"); 1440 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports");
1694 nbdevs += (bd->numports); 1441 nbdevs += (bd->numports);
1695 1442 } else {
1696 } /* End get PCI number of ports */ 1443 /* Fix up the mappings for ISA/EISA etc */
1444 /* FIXME: 64K - can we be smarter ? */
1445 bd->re_map_membase = ioremap(bd->membase, 0x10000);
1446 }
1697 1447
1698 if (crd != 0) 1448 if (crd != 0)
1699 card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports; 1449 card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports;
@@ -1701,19 +1451,9 @@ static void post_fep_init(unsigned int crd)
1701 card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */ 1451 card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */
1702 1452
1703 ch = card_ptr[crd]; 1453 ch = card_ptr[crd];
1704
1705
1706 epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range"); 1454 epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range");
1707 1455
1708 memaddr = (unchar *)bd->re_map_membase; 1456 memaddr = bd->re_map_membase;
1709
1710 /*
1711 The below command is necessary because newer kernels (2.1.x and
1712 up) do not have a 1:1 virtual to physical mapping. The below
1713 call adjust for that.
1714 */
1715
1716 memaddr = (unsigned char *)bus_to_virt((unsigned long)memaddr);
1717 1457
1718 /* ----------------------------------------------------------------- 1458 /* -----------------------------------------------------------------
1719 The below assignment will set bc to point at the BEGINING of 1459 The below assignment will set bc to point at the BEGINING of
@@ -1721,7 +1461,7 @@ static void post_fep_init(unsigned int crd)
1721 8 and 64 of these structures. 1461 8 and 64 of these structures.
1722 -------------------------------------------------------------------- */ 1462 -------------------------------------------------------------------- */
1723 1463
1724 bc = (volatile struct board_chan *)((ulong)memaddr + CHANSTRUCT); 1464 bc = (struct board_chan *)(memaddr + CHANSTRUCT);
1725 1465
1726 /* ------------------------------------------------------------------- 1466 /* -------------------------------------------------------------------
1727 The below assignment will set gd to point at the BEGINING of 1467 The below assignment will set gd to point at the BEGINING of
@@ -1730,20 +1470,18 @@ static void post_fep_init(unsigned int crd)
1730 pointer begins at 0xd10. 1470 pointer begins at 0xd10.
1731 ---------------------------------------------------------------------- */ 1471 ---------------------------------------------------------------------- */
1732 1472
1733 gd = (volatile struct global_data *)((ulong)memaddr + GLOBAL); 1473 gd = (struct global_data *)(memaddr + GLOBAL);
1734 1474
1735 /* -------------------------------------------------------------------- 1475 /* --------------------------------------------------------------------
1736 XEPORTS (address 0xc22) points at the number of channels the 1476 XEPORTS (address 0xc22) points at the number of channels the
1737 card supports. (For 64XE, XI, XEM, and XR use 0xc02) 1477 card supports. (For 64XE, XI, XEM, and XR use 0xc02)
1738 ----------------------------------------------------------------------- */ 1478 ----------------------------------------------------------------------- */
1739 1479
1740 if (((bd->type == PCXEVE) | (bd->type == PCXE)) && 1480 if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3))
1741 (*(ushort *)((ulong)memaddr + XEPORTS) < 3))
1742 shrinkmem = 1; 1481 shrinkmem = 1;
1743 if (bd->type < PCIXEM) 1482 if (bd->type < PCIXEM)
1744 if (!request_region((int)bd->port, 4, board_desc[bd->type])) 1483 if (!request_region((int)bd->port, 4, board_desc[bd->type]))
1745 return; 1484 return;
1746
1747 memwinon(bd, 0); 1485 memwinon(bd, 0);
1748 1486
1749 /* -------------------------------------------------------------------- 1487 /* --------------------------------------------------------------------
@@ -1753,17 +1491,16 @@ static void post_fep_init(unsigned int crd)
1753 1491
1754 /* For every port on the card do ..... */ 1492 /* For every port on the card do ..... */
1755 1493
1756 for (i = 0; i < bd->numports; i++, ch++, bc++) 1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
1757 { /* Begin for each port */ 1495 unsigned long flags;
1758 1496
1759 ch->brdchan = bc; 1497 ch->brdchan = bc;
1760 ch->mailbox = gd; 1498 ch->mailbox = gd;
1761 INIT_WORK(&ch->tqueue, do_softint, ch); 1499 INIT_WORK(&ch->tqueue, do_softint, ch);
1762 ch->board = &boards[crd]; 1500 ch->board = &boards[crd];
1763 1501
1764 switch (bd->type) 1502 spin_lock_irqsave(&epca_lock, flags);
1765 { /* Begin switch bd->type */ 1503 switch (bd->type) {
1766
1767 /* ---------------------------------------------------------------- 1504 /* ----------------------------------------------------------------
1768 Since some of the boards use different bitmaps for their 1505 Since some of the boards use different bitmaps for their
1769 control signals we cannot hard code these values and retain 1506 control signals we cannot hard code these values and retain
@@ -1796,14 +1533,12 @@ static void post_fep_init(unsigned int crd)
1796 1533
1797 } /* End switch bd->type */ 1534 } /* End switch bd->type */
1798 1535
1799 if (boards[crd].altpin) 1536 if (boards[crd].altpin) {
1800 {
1801 ch->dsr = ch->m_dcd; 1537 ch->dsr = ch->m_dcd;
1802 ch->dcd = ch->m_dsr; 1538 ch->dcd = ch->m_dsr;
1803 ch->digiext.digi_flags |= DIGI_ALTPIN; 1539 ch->digiext.digi_flags |= DIGI_ALTPIN;
1804 } 1540 }
1805 else 1541 else {
1806 {
1807 ch->dcd = ch->m_dcd; 1542 ch->dcd = ch->m_dcd;
1808 ch->dsr = ch->m_dsr; 1543 ch->dsr = ch->m_dsr;
1809 } 1544 }
@@ -1813,14 +1548,12 @@ static void post_fep_init(unsigned int crd)
1813 ch->magic = EPCA_MAGIC; 1548 ch->magic = EPCA_MAGIC;
1814 ch->tty = NULL; 1549 ch->tty = NULL;
1815 1550
1816 if (shrinkmem) 1551 if (shrinkmem) {
1817 {
1818 fepcmd(ch, SETBUFFER, 32, 0, 0, 0); 1552 fepcmd(ch, SETBUFFER, 32, 0, 0, 0);
1819 shrinkmem = 0; 1553 shrinkmem = 0;
1820 } 1554 }
1821 1555
1822 switch (bd->type) 1556 switch (bd->type) {
1823 { /* Begin switch bd->type */
1824 1557
1825 case PCIXEM: 1558 case PCIXEM:
1826 case PCIXRJ: 1559 case PCIXRJ:
@@ -1878,13 +1611,13 @@ static void post_fep_init(unsigned int crd)
1878 1611
1879 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0); 1612 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0);
1880 1613
1881 bc->edelay = 100; 1614 writew(100, &bc->edelay);
1882 bc->idata = 1; 1615 writeb(1, &bc->idata);
1883 1616
1884 ch->startc = bc->startc; 1617 ch->startc = readb(&bc->startc);
1885 ch->stopc = bc->stopc; 1618 ch->stopc = readb(&bc->stopc);
1886 ch->startca = bc->startca; 1619 ch->startca = readb(&bc->startca);
1887 ch->stopca = bc->stopca; 1620 ch->stopca = readb(&bc->stopca);
1888 1621
1889 ch->fepcflag = 0; 1622 ch->fepcflag = 0;
1890 ch->fepiflag = 0; 1623 ch->fepiflag = 0;
@@ -1899,27 +1632,23 @@ static void post_fep_init(unsigned int crd)
1899 ch->blocked_open = 0; 1632 ch->blocked_open = 0;
1900 init_waitqueue_head(&ch->open_wait); 1633 init_waitqueue_head(&ch->open_wait);
1901 init_waitqueue_head(&ch->close_wait); 1634 init_waitqueue_head(&ch->close_wait);
1635
1636 spin_unlock_irqrestore(&epca_lock, flags);
1637
1902 ch->tmp_buf = kmalloc(ch->txbufsize,GFP_KERNEL); 1638 ch->tmp_buf = kmalloc(ch->txbufsize,GFP_KERNEL);
1903 if (!(ch->tmp_buf)) 1639 if (!ch->tmp_buf) {
1904 {
1905 printk(KERN_ERR "POST FEP INIT : kmalloc failed for port 0x%x\n",i); 1640 printk(KERN_ERR "POST FEP INIT : kmalloc failed for port 0x%x\n",i);
1906 release_region((int)bd->port, 4); 1641 release_region((int)bd->port, 4);
1907 while(i-- > 0) 1642 while(i-- > 0)
1908 kfree((ch--)->tmp_buf); 1643 kfree((ch--)->tmp_buf);
1909 return; 1644 return;
1910 } 1645 } else
1911 else
1912 memset((void *)ch->tmp_buf,0,ch->txbufsize); 1646 memset((void *)ch->tmp_buf,0,ch->txbufsize);
1913 } /* End for each port */ 1647 } /* End for each port */
1914 1648
1915 printk(KERN_INFO 1649 printk(KERN_INFO
1916 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", 1650 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1917 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); 1651 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
1918 sprintf(mesg,
1919 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1920 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
1921 console_print(mesg);
1922
1923 memwinoff(bd, 0); 1652 memwinoff(bd, 0);
1924 1653
1925} /* End post_fep_init */ 1654} /* End post_fep_init */
@@ -1943,9 +1672,6 @@ static void epcapoll(unsigned long ignored)
1943 buffer empty) and acts on those events. 1672 buffer empty) and acts on those events.
1944 ----------------------------------------------------------------------- */ 1673 ----------------------------------------------------------------------- */
1945 1674
1946 save_flags(flags);
1947 cli();
1948
1949 for (crd = 0; crd < num_cards; crd++) 1675 for (crd = 0; crd < num_cards; crd++)
1950 { /* Begin for each card */ 1676 { /* Begin for each card */
1951 1677
@@ -1961,6 +1687,8 @@ static void epcapoll(unsigned long ignored)
1961 some legacy boards. 1687 some legacy boards.
1962 ---------------------------------------------------------------- */ 1688 ---------------------------------------------------------------- */
1963 1689
1690 spin_lock_irqsave(&epca_lock, flags);
1691
1964 assertmemoff(ch); 1692 assertmemoff(ch);
1965 1693
1966 globalwinon(ch); 1694 globalwinon(ch);
@@ -1970,21 +1698,19 @@ static void epcapoll(unsigned long ignored)
1970 the transmit or receive queue. 1698 the transmit or receive queue.
1971 ------------------------------------------------------------------- */ 1699 ------------------------------------------------------------------- */
1972 1700
1973 head = ch->mailbox->ein; 1701 head = readw(&ch->mailbox->ein);
1974 tail = ch->mailbox->eout; 1702 tail = readw(&ch->mailbox->eout);
1975 1703
1976 /* If head isn't equal to tail we have an event */ 1704 /* If head isn't equal to tail we have an event */
1977 1705
1978 if (head != tail) 1706 if (head != tail)
1979 doevent(crd); 1707 doevent(crd);
1980
1981 memoff(ch); 1708 memoff(ch);
1982 1709
1983 } /* End for each card */ 1710 spin_unlock_irqrestore(&epca_lock, flags);
1984 1711
1712 } /* End for each card */
1985 mod_timer(&epca_timer, jiffies + (HZ / 25)); 1713 mod_timer(&epca_timer, jiffies + (HZ / 25));
1986
1987 restore_flags(flags);
1988} /* End epcapoll */ 1714} /* End epcapoll */
1989 1715
1990/* --------------------- Begin doevent ------------------------ */ 1716/* --------------------- Begin doevent ------------------------ */
@@ -1992,53 +1718,42 @@ static void epcapoll(unsigned long ignored)
1992static void doevent(int crd) 1718static void doevent(int crd)
1993{ /* Begin doevent */ 1719{ /* Begin doevent */
1994 1720
1995 volatile unchar *eventbuf; 1721 void *eventbuf;
1996 struct channel *ch, *chan0; 1722 struct channel *ch, *chan0;
1997 static struct tty_struct *tty; 1723 static struct tty_struct *tty;
1998 volatile struct board_info *bd; 1724 struct board_info *bd;
1999 volatile struct board_chan *bc; 1725 struct board_chan *bc;
2000 register volatile unsigned int tail, head; 1726 unsigned int tail, head;
2001 register int event, channel; 1727 int event, channel;
2002 register int mstat, lstat; 1728 int mstat, lstat;
2003 1729
2004 /* ------------------------------------------------------------------- 1730 /* -------------------------------------------------------------------
2005 This subroutine is called by epcapoll when an event is detected 1731 This subroutine is called by epcapoll when an event is detected
2006 in the event queue. This routine responds to those events. 1732 in the event queue. This routine responds to those events.
2007 --------------------------------------------------------------------- */ 1733 --------------------------------------------------------------------- */
2008
2009 bd = &boards[crd]; 1734 bd = &boards[crd];
2010 1735
2011 chan0 = card_ptr[crd]; 1736 chan0 = card_ptr[crd];
2012 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); 1737 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
2013
2014 assertgwinon(chan0); 1738 assertgwinon(chan0);
2015 1739 while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein)))
2016 while ((tail = chan0->mailbox->eout) != (head = chan0->mailbox->ein))
2017 { /* Begin while something in event queue */ 1740 { /* Begin while something in event queue */
2018
2019 assertgwinon(chan0); 1741 assertgwinon(chan0);
2020 1742 eventbuf = bd->re_map_membase + tail + ISTART;
2021 eventbuf = (volatile unchar *)bus_to_virt((ulong)(bd->re_map_membase + tail + ISTART));
2022
2023 /* Get the channel the event occurred on */ 1743 /* Get the channel the event occurred on */
2024 channel = eventbuf[0]; 1744 channel = readb(eventbuf);
2025
2026 /* Get the actual event code that occurred */ 1745 /* Get the actual event code that occurred */
2027 event = eventbuf[1]; 1746 event = readb(eventbuf + 1);
2028
2029 /* ---------------------------------------------------------------- 1747 /* ----------------------------------------------------------------
2030 The two assignments below get the current modem status (mstat) 1748 The two assignments below get the current modem status (mstat)
2031 and the previous modem status (lstat). These are useful becuase 1749 and the previous modem status (lstat). These are useful becuase
2032 an event could signal a change in modem signals itself. 1750 an event could signal a change in modem signals itself.
2033 ------------------------------------------------------------------- */ 1751 ------------------------------------------------------------------- */
2034 1752 mstat = readb(eventbuf + 2);
2035 mstat = eventbuf[2]; 1753 lstat = readb(eventbuf + 3);
2036 lstat = eventbuf[3];
2037 1754
2038 ch = chan0 + channel; 1755 ch = chan0 + channel;
2039 1756 if ((unsigned)channel >= bd->numports || !ch) {
2040 if ((unsigned)channel >= bd->numports || !ch)
2041 {
2042 if (channel >= bd->numports) 1757 if (channel >= bd->numports)
2043 ch = chan0; 1758 ch = chan0;
2044 bc = ch->brdchan; 1759 bc = ch->brdchan;
@@ -2048,97 +1763,53 @@ static void doevent(int crd)
2048 if ((bc = ch->brdchan) == NULL) 1763 if ((bc = ch->brdchan) == NULL)
2049 goto next; 1764 goto next;
2050 1765
2051 if (event & DATA_IND) 1766 if (event & DATA_IND) { /* Begin DATA_IND */
2052 { /* Begin DATA_IND */
2053
2054 receive_data(ch); 1767 receive_data(ch);
2055 assertgwinon(ch); 1768 assertgwinon(ch);
2056
2057 } /* End DATA_IND */ 1769 } /* End DATA_IND */
2058 /* else *//* Fix for DCD transition missed bug */ 1770 /* else *//* Fix for DCD transition missed bug */
2059 if (event & MODEMCHG_IND) 1771 if (event & MODEMCHG_IND) { /* Begin MODEMCHG_IND */
2060 { /* Begin MODEMCHG_IND */
2061
2062 /* A modem signal change has been indicated */ 1772 /* A modem signal change has been indicated */
2063
2064 ch->imodem = mstat; 1773 ch->imodem = mstat;
2065 1774 if (ch->asyncflags & ASYNC_CHECK_CD) {
2066 if (ch->asyncflags & ASYNC_CHECK_CD)
2067 {
2068 if (mstat & ch->dcd) /* We are now receiving dcd */ 1775 if (mstat & ch->dcd) /* We are now receiving dcd */
2069 wake_up_interruptible(&ch->open_wait); 1776 wake_up_interruptible(&ch->open_wait);
2070 else 1777 else
2071 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */ 1778 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */
2072 } 1779 }
2073
2074 } /* End MODEMCHG_IND */ 1780 } /* End MODEMCHG_IND */
2075
2076 tty = ch->tty; 1781 tty = ch->tty;
2077 if (tty) 1782 if (tty) { /* Begin if valid tty */
2078 { /* Begin if valid tty */ 1783 if (event & BREAK_IND) { /* Begin if BREAK_IND */
2079
2080 if (event & BREAK_IND)
2081 { /* Begin if BREAK_IND */
2082
2083 /* A break has been indicated */ 1784 /* A break has been indicated */
2084
2085 tty->flip.count++; 1785 tty->flip.count++;
2086 *tty->flip.flag_buf_ptr++ = TTY_BREAK; 1786 *tty->flip.flag_buf_ptr++ = TTY_BREAK;
2087
2088 *tty->flip.char_buf_ptr++ = 0; 1787 *tty->flip.char_buf_ptr++ = 0;
2089
2090 tty_schedule_flip(tty); 1788 tty_schedule_flip(tty);
2091 1789 } else if (event & LOWTX_IND) { /* Begin LOWTX_IND */
2092 } /* End if BREAK_IND */
2093 else
2094 if (event & LOWTX_IND)
2095 { /* Begin LOWTX_IND */
2096
2097 if (ch->statusflags & LOWWAIT) 1790 if (ch->statusflags & LOWWAIT)
2098 { /* Begin if LOWWAIT */ 1791 { /* Begin if LOWWAIT */
2099
2100 ch->statusflags &= ~LOWWAIT; 1792 ch->statusflags &= ~LOWWAIT;
2101 tty_wakeup(tty); 1793 tty_wakeup(tty);
2102 wake_up_interruptible(&tty->write_wait); 1794 wake_up_interruptible(&tty->write_wait);
2103
2104 } /* End if LOWWAIT */ 1795 } /* End if LOWWAIT */
2105 1796 } else if (event & EMPTYTX_IND) { /* Begin EMPTYTX_IND */
2106 } /* End LOWTX_IND */
2107 else
2108 if (event & EMPTYTX_IND)
2109 { /* Begin EMPTYTX_IND */
2110
2111 /* This event is generated by setup_empty_event */ 1797 /* This event is generated by setup_empty_event */
2112
2113 ch->statusflags &= ~TXBUSY; 1798 ch->statusflags &= ~TXBUSY;
2114 if (ch->statusflags & EMPTYWAIT) 1799 if (ch->statusflags & EMPTYWAIT) { /* Begin if EMPTYWAIT */
2115 { /* Begin if EMPTYWAIT */
2116
2117 ch->statusflags &= ~EMPTYWAIT; 1800 ch->statusflags &= ~EMPTYWAIT;
2118 tty_wakeup(tty); 1801 tty_wakeup(tty);
2119
2120 wake_up_interruptible(&tty->write_wait); 1802 wake_up_interruptible(&tty->write_wait);
2121
2122 } /* End if EMPTYWAIT */ 1803 } /* End if EMPTYWAIT */
2123
2124 } /* End EMPTYTX_IND */ 1804 } /* End EMPTYTX_IND */
2125
2126 } /* End if valid tty */ 1805 } /* End if valid tty */
2127
2128
2129 next: 1806 next:
2130 globalwinon(ch); 1807 globalwinon(ch);
2131 1808 BUG_ON(!bc);
2132 if (!bc) 1809 writew(1, &bc->idata);
2133 printk(KERN_ERR "<Error> - bc == NULL in doevent!\n"); 1810 writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout);
2134 else
2135 bc->idata = 1;
2136
2137 chan0->mailbox->eout = (tail + 4) & (IMAX - ISTART - 4);
2138 globalwinon(chan0); 1811 globalwinon(chan0);
2139
2140 } /* End while something in event queue */ 1812 } /* End while something in event queue */
2141
2142} /* End doevent */ 1813} /* End doevent */
2143 1814
2144/* --------------------- Begin fepcmd ------------------------ */ 1815/* --------------------- Begin fepcmd ------------------------ */
@@ -2146,7 +1817,6 @@ static void doevent(int crd)
2146static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1817static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2147 int byte2, int ncmds, int bytecmd) 1818 int byte2, int ncmds, int bytecmd)
2148{ /* Begin fepcmd */ 1819{ /* Begin fepcmd */
2149
2150 unchar *memaddr; 1820 unchar *memaddr;
2151 unsigned int head, cmdTail, cmdStart, cmdMax; 1821 unsigned int head, cmdTail, cmdStart, cmdMax;
2152 long count; 1822 long count;
@@ -2155,93 +1825,57 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2155 /* This is the routine in which commands may be passed to the card. */ 1825 /* This is the routine in which commands may be passed to the card. */
2156 1826
2157 if (ch->board->status == DISABLED) 1827 if (ch->board->status == DISABLED)
2158 {
2159 return; 1828 return;
2160 }
2161
2162 assertgwinon(ch); 1829 assertgwinon(ch);
2163
2164 /* Remember head (As well as max) is just an offset not a base addr */ 1830 /* Remember head (As well as max) is just an offset not a base addr */
2165 head = ch->mailbox->cin; 1831 head = readw(&ch->mailbox->cin);
2166
2167 /* cmdStart is a base address */ 1832 /* cmdStart is a base address */
2168 cmdStart = ch->mailbox->cstart; 1833 cmdStart = readw(&ch->mailbox->cstart);
2169
2170 /* ------------------------------------------------------------------ 1834 /* ------------------------------------------------------------------
2171 We do the addition below because we do not want a max pointer 1835 We do the addition below because we do not want a max pointer
2172 relative to cmdStart. We want a max pointer that points at the 1836 relative to cmdStart. We want a max pointer that points at the
2173 physical end of the command queue. 1837 physical end of the command queue.
2174 -------------------------------------------------------------------- */ 1838 -------------------------------------------------------------------- */
2175 1839 cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax));
2176 cmdMax = (cmdStart + 4 + (ch->mailbox->cmax));
2177
2178 memaddr = ch->board->re_map_membase; 1840 memaddr = ch->board->re_map_membase;
2179 1841
2180 /* 1842 if (head >= (cmdMax - cmdStart) || (head & 03)) {
2181 The below command is necessary because newer kernels (2.1.x and 1843 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__, cmd, head);
2182 up) do not have a 1:1 virtual to physical mapping. The below 1844 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__, cmdMax, cmdStart);
2183 call adjust for that.
2184 */
2185
2186 memaddr = (unsigned char *)bus_to_virt((unsigned long)memaddr);
2187
2188 if (head >= (cmdMax - cmdStart) || (head & 03))
2189 {
2190 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__,
2191 cmd, head);
2192 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__,
2193 cmdMax, cmdStart);
2194 return; 1845 return;
2195 } 1846 }
2196 1847 if (bytecmd) {
2197 if (bytecmd) 1848 writeb(cmd, memaddr + head + cmdStart + 0);
2198 { 1849 writeb(ch->channelnum, memaddr + head + cmdStart + 1);
2199 *(volatile unchar *)(memaddr + head + cmdStart + 0) = (unchar)cmd;
2200
2201 *(volatile unchar *)(memaddr + head + cmdStart + 1) = (unchar)ch->channelnum;
2202 /* Below word_or_byte is bits to set */ 1850 /* Below word_or_byte is bits to set */
2203 *(volatile unchar *)(memaddr + head + cmdStart + 2) = (unchar)word_or_byte; 1851 writeb(word_or_byte, memaddr + head + cmdStart + 2);
2204 /* Below byte2 is bits to reset */ 1852 /* Below byte2 is bits to reset */
2205 *(volatile unchar *)(memaddr + head + cmdStart + 3) = (unchar)byte2; 1853 writeb(byte2, memaddr + head + cmdStart + 3);
2206 1854 } else {
2207 } 1855 writeb(cmd, memaddr + head + cmdStart + 0);
2208 else 1856 writeb(ch->channelnum, memaddr + head + cmdStart + 1);
2209 { 1857 writeb(word_or_byte, memaddr + head + cmdStart + 2);
2210 *(volatile unchar *)(memaddr + head + cmdStart + 0) = (unchar)cmd;
2211 *(volatile unchar *)(memaddr + head + cmdStart + 1) = (unchar)ch->channelnum;
2212 *(volatile ushort*)(memaddr + head + cmdStart + 2) = (ushort)word_or_byte;
2213 } 1858 }
2214
2215 head = (head + 4) & (cmdMax - cmdStart - 4); 1859 head = (head + 4) & (cmdMax - cmdStart - 4);
2216 ch->mailbox->cin = head; 1860 writew(head, &ch->mailbox->cin);
2217
2218 count = FEPTIMEOUT; 1861 count = FEPTIMEOUT;
2219 1862
2220 for (;;) 1863 for (;;) { /* Begin forever loop */
2221 { /* Begin forever loop */
2222
2223 count--; 1864 count--;
2224 if (count == 0) 1865 if (count == 0) {
2225 {
2226 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n"); 1866 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n");
2227 return; 1867 return;
2228 } 1868 }
2229 1869 head = readw(&ch->mailbox->cin);
2230 head = ch->mailbox->cin; 1870 cmdTail = readw(&ch->mailbox->cout);
2231 cmdTail = ch->mailbox->cout;
2232
2233 n = (head - cmdTail) & (cmdMax - cmdStart - 4); 1871 n = (head - cmdTail) & (cmdMax - cmdStart - 4);
2234
2235 /* ---------------------------------------------------------- 1872 /* ----------------------------------------------------------
2236 Basically this will break when the FEP acknowledges the 1873 Basically this will break when the FEP acknowledges the
2237 command by incrementing cmdTail (Making it equal to head). 1874 command by incrementing cmdTail (Making it equal to head).
2238 ------------------------------------------------------------- */ 1875 ------------------------------------------------------------- */
2239
2240 if (n <= ncmds * (sizeof(short) * 4)) 1876 if (n <= ncmds * (sizeof(short) * 4))
2241 break; /* Well nearly forever :-) */ 1877 break; /* Well nearly forever :-) */
2242
2243 } /* End forever loop */ 1878 } /* End forever loop */
2244
2245} /* End fepcmd */ 1879} /* End fepcmd */
2246 1880
2247/* --------------------------------------------------------------------- 1881/* ---------------------------------------------------------------------
@@ -2255,11 +1889,9 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2255 1889
2256static unsigned termios2digi_h(struct channel *ch, unsigned cflag) 1890static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
2257{ /* Begin termios2digi_h */ 1891{ /* Begin termios2digi_h */
2258
2259 unsigned res = 0; 1892 unsigned res = 0;
2260 1893
2261 if (cflag & CRTSCTS) 1894 if (cflag & CRTSCTS) {
2262 {
2263 ch->digiext.digi_flags |= (RTSPACE | CTSPACE); 1895 ch->digiext.digi_flags |= (RTSPACE | CTSPACE);
2264 res |= ((ch->m_cts) | (ch->m_rts)); 1896 res |= ((ch->m_cts) | (ch->m_rts));
2265 } 1897 }
@@ -2295,7 +1927,6 @@ static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
2295 1927
2296 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | 1928 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
2297 INPCK | ISTRIP|IXON|IXANY|IXOFF); 1929 INPCK | ISTRIP|IXON|IXANY|IXOFF);
2298
2299 if (ch->digiext.digi_flags & DIGI_AIXON) 1930 if (ch->digiext.digi_flags & DIGI_AIXON)
2300 res |= IAIXON; 1931 res |= IAIXON;
2301 return res; 1932 return res;
@@ -2308,28 +1939,15 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2308{ /* Begin termios2digi_c */ 1939{ /* Begin termios2digi_c */
2309 1940
2310 unsigned res = 0; 1941 unsigned res = 0;
2311 1942 if (cflag & CBAUDEX) { /* Begin detected CBAUDEX */
2312#ifdef SPEED_HACK
2313 /* CL: HACK to force 115200 at 38400 and 57600 at 19200 Baud */
2314 if ((cflag & CBAUD)== B38400) cflag=cflag - B38400 + B115200;
2315 if ((cflag & CBAUD)== B19200) cflag=cflag - B19200 + B57600;
2316#endif /* SPEED_HACK */
2317
2318 if (cflag & CBAUDEX)
2319 { /* Begin detected CBAUDEX */
2320
2321 ch->digiext.digi_flags |= DIGI_FAST; 1943 ch->digiext.digi_flags |= DIGI_FAST;
2322
2323 /* ------------------------------------------------------------- 1944 /* -------------------------------------------------------------
2324 HUPCL bit is used by FEP to indicate fast baud 1945 HUPCL bit is used by FEP to indicate fast baud
2325 table is to be used. 1946 table is to be used.
2326 ----------------------------------------------------------------- */ 1947 ----------------------------------------------------------------- */
2327
2328 res |= FEP_HUPCL; 1948 res |= FEP_HUPCL;
2329
2330 } /* End detected CBAUDEX */ 1949 } /* End detected CBAUDEX */
2331 else ch->digiext.digi_flags &= ~DIGI_FAST; 1950 else ch->digiext.digi_flags &= ~DIGI_FAST;
2332
2333 /* ------------------------------------------------------------------- 1951 /* -------------------------------------------------------------------
2334 CBAUD has bit position 0x1000 set these days to indicate Linux 1952 CBAUD has bit position 0x1000 set these days to indicate Linux
2335 baud rate remap. Digi hardware can't handle the bit assignment. 1953 baud rate remap. Digi hardware can't handle the bit assignment.
@@ -2337,7 +1955,6 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2337 bit out. 1955 bit out.
2338 ---------------------------------------------------------------------- */ 1956 ---------------------------------------------------------------------- */
2339 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE); 1957 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
2340
2341 /* ------------------------------------------------------------- 1958 /* -------------------------------------------------------------
2342 This gets a little confusing. The Digi cards have their own 1959 This gets a little confusing. The Digi cards have their own
2343 representation of c_cflags controling baud rate. For the most 1960 representation of c_cflags controling baud rate. For the most
@@ -2357,10 +1974,8 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2357 should be checked for a screened out prior to termios2digi_c 1974 should be checked for a screened out prior to termios2digi_c
2358 returning. Since CLOCAL isn't used by the board this can be 1975 returning. Since CLOCAL isn't used by the board this can be
2359 ignored as long as the returned value is used only by Digi hardware. 1976 ignored as long as the returned value is used only by Digi hardware.
2360 ----------------------------------------------------------------- */ 1977 ----------------------------------------------------------------- */
2361 1978 if (cflag & CBAUDEX) {
2362 if (cflag & CBAUDEX)
2363 {
2364 /* ------------------------------------------------------------- 1979 /* -------------------------------------------------------------
2365 The below code is trying to guarantee that only baud rates 1980 The below code is trying to guarantee that only baud rates
2366 115200 and 230400 are remapped. We use exclusive or because 1981 115200 and 230400 are remapped. We use exclusive or because
@@ -2371,138 +1986,96 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2371 1986
2372 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) || 1987 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) ||
2373 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX)))) 1988 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX))))
2374 {
2375 res += 1; 1989 res += 1;
2376 }
2377 } 1990 }
2378
2379 return res; 1991 return res;
2380 1992
2381} /* End termios2digi_c */ 1993} /* End termios2digi_c */
2382 1994
2383/* --------------------- Begin epcaparam ----------------------- */ 1995/* --------------------- Begin epcaparam ----------------------- */
2384 1996
1997/* Caller must hold the locks */
2385static void epcaparam(struct tty_struct *tty, struct channel *ch) 1998static void epcaparam(struct tty_struct *tty, struct channel *ch)
2386{ /* Begin epcaparam */ 1999{ /* Begin epcaparam */
2387 2000
2388 unsigned int cmdHead; 2001 unsigned int cmdHead;
2389 struct termios *ts; 2002 struct termios *ts;
2390 volatile struct board_chan *bc; 2003 struct board_chan *bc;
2391 unsigned mval, hflow, cflag, iflag; 2004 unsigned mval, hflow, cflag, iflag;
2392 2005
2393 bc = ch->brdchan; 2006 bc = ch->brdchan;
2394 epcaassert(bc !=0, "bc out of range"); 2007 epcaassert(bc !=0, "bc out of range");
2395 2008
2396 assertgwinon(ch); 2009 assertgwinon(ch);
2397
2398 ts = tty->termios; 2010 ts = tty->termios;
2399 2011 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
2400 if ((ts->c_cflag & CBAUD) == 0) 2012 cmdHead = readw(&bc->rin);
2401 { /* Begin CBAUD detected */
2402
2403 cmdHead = bc->rin;
2404 bc->rout = cmdHead; 2013 bc->rout = cmdHead;
2405 cmdHead = bc->tin; 2014 cmdHead = readw(&bc->tin);
2406
2407 /* Changing baud in mid-stream transmission can be wonderful */ 2015 /* Changing baud in mid-stream transmission can be wonderful */
2408 /* --------------------------------------------------------------- 2016 /* ---------------------------------------------------------------
2409 Flush current transmit buffer by setting cmdTail pointer (tout) 2017 Flush current transmit buffer by setting cmdTail pointer (tout)
2410 to cmdHead pointer (tin). Hopefully the transmit buffer is empty. 2018 to cmdHead pointer (tin). Hopefully the transmit buffer is empty.
2411 ----------------------------------------------------------------- */ 2019 ----------------------------------------------------------------- */
2412
2413 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0); 2020 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0);
2414 mval = 0; 2021 mval = 0;
2415 2022 } else { /* Begin CBAUD not detected */
2416 } /* End CBAUD detected */
2417 else
2418 { /* Begin CBAUD not detected */
2419
2420 /* ------------------------------------------------------------------- 2023 /* -------------------------------------------------------------------
2421 c_cflags have changed but that change had nothing to do with BAUD. 2024 c_cflags have changed but that change had nothing to do with BAUD.
2422 Propagate the change to the card. 2025 Propagate the change to the card.
2423 ---------------------------------------------------------------------- */ 2026 ---------------------------------------------------------------------- */
2424
2425 cflag = termios2digi_c(ch, ts->c_cflag); 2027 cflag = termios2digi_c(ch, ts->c_cflag);
2426 2028 if (cflag != ch->fepcflag) {
2427 if (cflag != ch->fepcflag)
2428 {
2429 ch->fepcflag = cflag; 2029 ch->fepcflag = cflag;
2430 /* Set baud rate, char size, stop bits, parity */ 2030 /* Set baud rate, char size, stop bits, parity */
2431 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0); 2031 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0);
2432 } 2032 }
2433
2434
2435 /* ---------------------------------------------------------------- 2033 /* ----------------------------------------------------------------
2436 If the user has not forced CLOCAL and if the device is not a 2034 If the user has not forced CLOCAL and if the device is not a
2437 CALLOUT device (Which is always CLOCAL) we set flags such that 2035 CALLOUT device (Which is always CLOCAL) we set flags such that
2438 the driver will wait on carrier detect. 2036 the driver will wait on carrier detect.
2439 ------------------------------------------------------------------- */ 2037 ------------------------------------------------------------------- */
2440
2441 if (ts->c_cflag & CLOCAL) 2038 if (ts->c_cflag & CLOCAL)
2442 { /* Begin it is a cud device or a ttyD device with CLOCAL on */
2443 ch->asyncflags &= ~ASYNC_CHECK_CD; 2039 ch->asyncflags &= ~ASYNC_CHECK_CD;
2444 } /* End it is a cud device or a ttyD device with CLOCAL on */
2445 else 2040 else
2446 { /* Begin it is a ttyD device */
2447 ch->asyncflags |= ASYNC_CHECK_CD; 2041 ch->asyncflags |= ASYNC_CHECK_CD;
2448 } /* End it is a ttyD device */
2449
2450 mval = ch->m_dtr | ch->m_rts; 2042 mval = ch->m_dtr | ch->m_rts;
2451
2452 } /* End CBAUD not detected */ 2043 } /* End CBAUD not detected */
2453
2454 iflag = termios2digi_i(ch, ts->c_iflag); 2044 iflag = termios2digi_i(ch, ts->c_iflag);
2455
2456 /* Check input mode flags */ 2045 /* Check input mode flags */
2457 2046 if (iflag != ch->fepiflag) {
2458 if (iflag != ch->fepiflag)
2459 {
2460 ch->fepiflag = iflag; 2047 ch->fepiflag = iflag;
2461
2462 /* --------------------------------------------------------------- 2048 /* ---------------------------------------------------------------
2463 Command sets channels iflag structure on the board. Such things 2049 Command sets channels iflag structure on the board. Such things
2464 as input soft flow control, handling of parity errors, and 2050 as input soft flow control, handling of parity errors, and
2465 break handling are all set here. 2051 break handling are all set here.
2466 ------------------------------------------------------------------- */ 2052 ------------------------------------------------------------------- */
2467
2468 /* break handling, parity handling, input stripping, flow control chars */ 2053 /* break handling, parity handling, input stripping, flow control chars */
2469 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); 2054 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
2470 } 2055 }
2471
2472 /* --------------------------------------------------------------- 2056 /* ---------------------------------------------------------------
2473 Set the board mint value for this channel. This will cause hardware 2057 Set the board mint value for this channel. This will cause hardware
2474 events to be generated each time the DCD signal (Described in mint) 2058 events to be generated each time the DCD signal (Described in mint)
2475 changes. 2059 changes.
2476 ------------------------------------------------------------------- */ 2060 ------------------------------------------------------------------- */
2477 bc->mint = ch->dcd; 2061 writeb(ch->dcd, &bc->mint);
2478
2479 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD)) 2062 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD))
2480 if (ch->digiext.digi_flags & DIGI_FORCEDCD) 2063 if (ch->digiext.digi_flags & DIGI_FORCEDCD)
2481 bc->mint = 0; 2064 writeb(0, &bc->mint);
2482 2065 ch->imodem = readb(&bc->mstat);
2483 ch->imodem = bc->mstat;
2484
2485 hflow = termios2digi_h(ch, ts->c_cflag); 2066 hflow = termios2digi_h(ch, ts->c_cflag);
2486 2067 if (hflow != ch->hflow) {
2487 if (hflow != ch->hflow)
2488 {
2489 ch->hflow = hflow; 2068 ch->hflow = hflow;
2490
2491 /* -------------------------------------------------------------- 2069 /* --------------------------------------------------------------
2492 Hard flow control has been selected but the board is not 2070 Hard flow control has been selected but the board is not
2493 using it. Activate hard flow control now. 2071 using it. Activate hard flow control now.
2494 ----------------------------------------------------------------- */ 2072 ----------------------------------------------------------------- */
2495
2496 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1); 2073 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1);
2497 } 2074 }
2498
2499
2500 mval ^= ch->modemfake & (mval ^ ch->modem); 2075 mval ^= ch->modemfake & (mval ^ ch->modem);
2501 2076
2502 if (ch->omodem ^ mval) 2077 if (ch->omodem ^ mval) {
2503 {
2504 ch->omodem = mval; 2078 ch->omodem = mval;
2505
2506 /* -------------------------------------------------------------- 2079 /* --------------------------------------------------------------
2507 The below command sets the DTR and RTS mstat structure. If 2080 The below command sets the DTR and RTS mstat structure. If
2508 hard flow control is NOT active these changes will drive the 2081 hard flow control is NOT active these changes will drive the
@@ -2514,87 +2087,65 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2514 /* First reset DTR & RTS; then set them */ 2087 /* First reset DTR & RTS; then set them */
2515 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1); 2088 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1);
2516 fepcmd(ch, SETMODEM, mval, 0, 0, 1); 2089 fepcmd(ch, SETMODEM, mval, 0, 0, 1);
2517
2518 } 2090 }
2519 2091 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) {
2520 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc)
2521 {
2522 ch->fepstartc = ch->startc; 2092 ch->fepstartc = ch->startc;
2523 ch->fepstopc = ch->stopc; 2093 ch->fepstopc = ch->stopc;
2524
2525 /* ------------------------------------------------------------ 2094 /* ------------------------------------------------------------
2526 The XON / XOFF characters have changed; propagate these 2095 The XON / XOFF characters have changed; propagate these
2527 changes to the card. 2096 changes to the card.
2528 --------------------------------------------------------------- */ 2097 --------------------------------------------------------------- */
2529
2530 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 2098 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
2531 } 2099 }
2532 2100 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) {
2533 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca)
2534 {
2535 ch->fepstartca = ch->startca; 2101 ch->fepstartca = ch->startca;
2536 ch->fepstopca = ch->stopca; 2102 ch->fepstopca = ch->stopca;
2537
2538 /* --------------------------------------------------------------- 2103 /* ---------------------------------------------------------------
2539 Similar to the above, this time the auxilarly XON / XOFF 2104 Similar to the above, this time the auxilarly XON / XOFF
2540 characters have changed; propagate these changes to the card. 2105 characters have changed; propagate these changes to the card.
2541 ------------------------------------------------------------------ */ 2106 ------------------------------------------------------------------ */
2542
2543 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 2107 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
2544 } 2108 }
2545
2546} /* End epcaparam */ 2109} /* End epcaparam */
2547 2110
2548/* --------------------- Begin receive_data ----------------------- */ 2111/* --------------------- Begin receive_data ----------------------- */
2549 2112/* Caller holds lock */
2550static void receive_data(struct channel *ch) 2113static void receive_data(struct channel *ch)
2551{ /* Begin receive_data */ 2114{ /* Begin receive_data */
2552 2115
2553 unchar *rptr; 2116 unchar *rptr;
2554 struct termios *ts = NULL; 2117 struct termios *ts = NULL;
2555 struct tty_struct *tty; 2118 struct tty_struct *tty;
2556 volatile struct board_chan *bc; 2119 struct board_chan *bc;
2557 register int dataToRead, wrapgap, bytesAvailable; 2120 int dataToRead, wrapgap, bytesAvailable;
2558 register unsigned int tail, head; 2121 unsigned int tail, head;
2559 unsigned int wrapmask; 2122 unsigned int wrapmask;
2560 int rc; 2123 int rc;
2561 2124
2562
2563 /* --------------------------------------------------------------- 2125 /* ---------------------------------------------------------------
2564 This routine is called by doint when a receive data event 2126 This routine is called by doint when a receive data event
2565 has taken place. 2127 has taken place.
2566 ------------------------------------------------------------------- */ 2128 ------------------------------------------------------------------- */
2567 2129
2568 globalwinon(ch); 2130 globalwinon(ch);
2569
2570 if (ch->statusflags & RXSTOPPED) 2131 if (ch->statusflags & RXSTOPPED)
2571 return; 2132 return;
2572
2573 tty = ch->tty; 2133 tty = ch->tty;
2574 if (tty) 2134 if (tty)
2575 ts = tty->termios; 2135 ts = tty->termios;
2576
2577 bc = ch->brdchan; 2136 bc = ch->brdchan;
2578 2137 BUG_ON(!bc);
2579 if (!bc)
2580 {
2581 printk(KERN_ERR "<Error> - bc is NULL in receive_data!\n");
2582 return;
2583 }
2584
2585 wrapmask = ch->rxbufsize - 1; 2138 wrapmask = ch->rxbufsize - 1;
2586 2139
2587 /* --------------------------------------------------------------------- 2140 /* ---------------------------------------------------------------------
2588 Get the head and tail pointers to the receiver queue. Wrap the 2141 Get the head and tail pointers to the receiver queue. Wrap the
2589 head pointer if it has reached the end of the buffer. 2142 head pointer if it has reached the end of the buffer.
2590 ------------------------------------------------------------------------ */ 2143 ------------------------------------------------------------------------ */
2591 2144 head = readw(&bc->rin);
2592 head = bc->rin;
2593 head &= wrapmask; 2145 head &= wrapmask;
2594 tail = bc->rout & wrapmask; 2146 tail = readw(&bc->rout) & wrapmask;
2595 2147
2596 bytesAvailable = (head - tail) & wrapmask; 2148 bytesAvailable = (head - tail) & wrapmask;
2597
2598 if (bytesAvailable == 0) 2149 if (bytesAvailable == 0)
2599 return; 2150 return;
2600 2151
@@ -2602,8 +2153,7 @@ static void receive_data(struct channel *ch)
2602 If CREAD bit is off or device not open, set TX tail to head 2153 If CREAD bit is off or device not open, set TX tail to head
2603 --------------------------------------------------------------------- */ 2154 --------------------------------------------------------------------- */
2604 2155
2605 if (!tty || !ts || !(ts->c_cflag & CREAD)) 2156 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
2606 {
2607 bc->rout = head; 2157 bc->rout = head;
2608 return; 2158 return;
2609 } 2159 }
@@ -2611,64 +2161,45 @@ static void receive_data(struct channel *ch)
2611 if (tty->flip.count == TTY_FLIPBUF_SIZE) 2161 if (tty->flip.count == TTY_FLIPBUF_SIZE)
2612 return; 2162 return;
2613 2163
2614 if (bc->orun) 2164 if (readb(&bc->orun)) {
2615 { 2165 writeb(0, &bc->orun);
2616 bc->orun = 0; 2166 printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name);
2617 printk(KERN_WARNING "overrun! DigiBoard device %s\n",tty->name);
2618 } 2167 }
2619
2620 rxwinon(ch); 2168 rxwinon(ch);
2621 rptr = tty->flip.char_buf_ptr; 2169 rptr = tty->flip.char_buf_ptr;
2622 rc = tty->flip.count; 2170 rc = tty->flip.count;
2623 2171 while (bytesAvailable > 0) { /* Begin while there is data on the card */
2624 while (bytesAvailable > 0)
2625 { /* Begin while there is data on the card */
2626
2627 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; 2172 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
2628
2629 /* --------------------------------------------------------------- 2173 /* ---------------------------------------------------------------
2630 Even if head has wrapped around only report the amount of 2174 Even if head has wrapped around only report the amount of
2631 data to be equal to the size - tail. Remember memcpy can't 2175 data to be equal to the size - tail. Remember memcpy can't
2632 automaticly wrap around the receive buffer. 2176 automaticly wrap around the receive buffer.
2633 ----------------------------------------------------------------- */ 2177 ----------------------------------------------------------------- */
2634
2635 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; 2178 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable;
2636
2637 /* -------------------------------------------------------------- 2179 /* --------------------------------------------------------------
2638 Make sure we don't overflow the buffer 2180 Make sure we don't overflow the buffer
2639 ----------------------------------------------------------------- */ 2181 ----------------------------------------------------------------- */
2640
2641 if ((rc + dataToRead) > TTY_FLIPBUF_SIZE) 2182 if ((rc + dataToRead) > TTY_FLIPBUF_SIZE)
2642 dataToRead = TTY_FLIPBUF_SIZE - rc; 2183 dataToRead = TTY_FLIPBUF_SIZE - rc;
2643
2644 if (dataToRead == 0) 2184 if (dataToRead == 0)
2645 break; 2185 break;
2646
2647 /* --------------------------------------------------------------- 2186 /* ---------------------------------------------------------------
2648 Move data read from our card into the line disciplines buffer 2187 Move data read from our card into the line disciplines buffer
2649 for translation if necessary. 2188 for translation if necessary.
2650 ------------------------------------------------------------------ */ 2189 ------------------------------------------------------------------ */
2651 2190 memcpy_fromio(rptr, ch->rxptr + tail, dataToRead);
2652 if ((memcpy(rptr, ch->rxptr + tail, dataToRead)) != rptr)
2653 printk(KERN_ERR "<Error> - receive_data : memcpy failed\n");
2654
2655 rc += dataToRead; 2191 rc += dataToRead;
2656 rptr += dataToRead; 2192 rptr += dataToRead;
2657 tail = (tail + dataToRead) & wrapmask; 2193 tail = (tail + dataToRead) & wrapmask;
2658 bytesAvailable -= dataToRead; 2194 bytesAvailable -= dataToRead;
2659
2660 } /* End while there is data on the card */ 2195 } /* End while there is data on the card */
2661
2662
2663 tty->flip.count = rc; 2196 tty->flip.count = rc;
2664 tty->flip.char_buf_ptr = rptr; 2197 tty->flip.char_buf_ptr = rptr;
2665 globalwinon(ch); 2198 globalwinon(ch);
2666 bc->rout = tail; 2199 writew(tail, &bc->rout);
2667
2668 /* Must be called with global data */ 2200 /* Must be called with global data */
2669 tty_schedule_flip(ch->tty); 2201 tty_schedule_flip(ch->tty);
2670 return; 2202 return;
2671
2672} /* End receive_data */ 2203} /* End receive_data */
2673 2204
2674static int info_ioctl(struct tty_struct *tty, struct file * file, 2205static int info_ioctl(struct tty_struct *tty, struct file * file,
@@ -2676,17 +2207,15 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2676{ 2207{
2677 switch (cmd) 2208 switch (cmd)
2678 { /* Begin switch cmd */ 2209 { /* Begin switch cmd */
2679
2680 case DIGI_GETINFO: 2210 case DIGI_GETINFO:
2681 { /* Begin case DIGI_GETINFO */ 2211 { /* Begin case DIGI_GETINFO */
2682
2683 struct digi_info di ; 2212 struct digi_info di ;
2684 int brd; 2213 int brd;
2685 2214
2686 getUser(brd, (unsigned int __user *)arg); 2215 if(get_user(brd, (unsigned int __user *)arg))
2687 2216 return -EFAULT;
2688 if ((brd < 0) || (brd >= num_cards) || (num_cards == 0)) 2217 if (brd < 0 || brd >= num_cards || num_cards == 0)
2689 return (-ENODEV); 2218 return -ENODEV;
2690 2219
2691 memset(&di, 0, sizeof(di)); 2220 memset(&di, 0, sizeof(di));
2692 2221
@@ -2694,8 +2223,9 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2694 di.status = boards[brd].status; 2223 di.status = boards[brd].status;
2695 di.type = boards[brd].type ; 2224 di.type = boards[brd].type ;
2696 di.numports = boards[brd].numports ; 2225 di.numports = boards[brd].numports ;
2697 di.port = boards[brd].port ; 2226 /* Legacy fixups - just move along nothing to see */
2698 di.membase = boards[brd].membase ; 2227 di.port = (unsigned char *)boards[brd].port ;
2228 di.membase = (unsigned char *)boards[brd].membase ;
2699 2229
2700 if (copy_to_user((void __user *)arg, &di, sizeof (di))) 2230 if (copy_to_user((void __user *)arg, &di, sizeof (di)))
2701 return -EFAULT; 2231 return -EFAULT;
@@ -2709,39 +2239,29 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2709 int brd = arg & 0xff000000 >> 16 ; 2239 int brd = arg & 0xff000000 >> 16 ;
2710 unsigned char state = arg & 0xff ; 2240 unsigned char state = arg & 0xff ;
2711 2241
2712 if ((brd < 0) || (brd >= num_cards)) 2242 if (brd < 0 || brd >= num_cards) {
2713 { 2243 printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n");
2714 printk(KERN_ERR "<Error> - DIGI POLLER : brd not valid!\n");
2715 return (-ENODEV); 2244 return (-ENODEV);
2716 } 2245 }
2717
2718 digi_poller_inhibited = state ; 2246 digi_poller_inhibited = state ;
2719 break ; 2247 break ;
2720
2721 } /* End case DIGI_POLLER */ 2248 } /* End case DIGI_POLLER */
2722 2249
2723 case DIGI_INIT: 2250 case DIGI_INIT:
2724 { /* Begin case DIGI_INIT */ 2251 { /* Begin case DIGI_INIT */
2725
2726 /* ------------------------------------------------------------ 2252 /* ------------------------------------------------------------
2727 This call is made by the apps to complete the initilization 2253 This call is made by the apps to complete the initilization
2728 of the board(s). This routine is responsible for setting 2254 of the board(s). This routine is responsible for setting
2729 the card to its initial state and setting the drivers control 2255 the card to its initial state and setting the drivers control
2730 fields to the sutianle settings for the card in question. 2256 fields to the sutianle settings for the card in question.
2731 ---------------------------------------------------------------- */ 2257 ---------------------------------------------------------------- */
2732
2733 int crd ; 2258 int crd ;
2734 for (crd = 0; crd < num_cards; crd++) 2259 for (crd = 0; crd < num_cards; crd++)
2735 post_fep_init (crd); 2260 post_fep_init (crd);
2736
2737 break ; 2261 break ;
2738
2739 } /* End case DIGI_INIT */ 2262 } /* End case DIGI_INIT */
2740
2741
2742 default: 2263 default:
2743 return -ENOIOCTLCMD; 2264 return -ENOTTY;
2744
2745 } /* End switch cmd */ 2265 } /* End switch cmd */
2746 return (0) ; 2266 return (0) ;
2747} 2267}
@@ -2750,43 +2270,33 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2750static int pc_tiocmget(struct tty_struct *tty, struct file *file) 2270static int pc_tiocmget(struct tty_struct *tty, struct file *file)
2751{ 2271{
2752 struct channel *ch = (struct channel *) tty->driver_data; 2272 struct channel *ch = (struct channel *) tty->driver_data;
2753 volatile struct board_chan *bc; 2273 struct board_chan *bc;
2754 unsigned int mstat, mflag = 0; 2274 unsigned int mstat, mflag = 0;
2755 unsigned long flags; 2275 unsigned long flags;
2756 2276
2757 if (ch) 2277 if (ch)
2758 bc = ch->brdchan; 2278 bc = ch->brdchan;
2759 else 2279 else
2760 { 2280 return -EINVAL;
2761 printk(KERN_ERR "<Error> - ch is NULL in pc_tiocmget!\n");
2762 return(-EINVAL);
2763 }
2764 2281
2765 save_flags(flags); 2282 spin_lock_irqsave(&epca_lock, flags);
2766 cli();
2767 globalwinon(ch); 2283 globalwinon(ch);
2768 mstat = bc->mstat; 2284 mstat = readb(&bc->mstat);
2769 memoff(ch); 2285 memoff(ch);
2770 restore_flags(flags); 2286 spin_unlock_irqrestore(&epca_lock, flags);
2771 2287
2772 if (mstat & ch->m_dtr) 2288 if (mstat & ch->m_dtr)
2773 mflag |= TIOCM_DTR; 2289 mflag |= TIOCM_DTR;
2774
2775 if (mstat & ch->m_rts) 2290 if (mstat & ch->m_rts)
2776 mflag |= TIOCM_RTS; 2291 mflag |= TIOCM_RTS;
2777
2778 if (mstat & ch->m_cts) 2292 if (mstat & ch->m_cts)
2779 mflag |= TIOCM_CTS; 2293 mflag |= TIOCM_CTS;
2780
2781 if (mstat & ch->dsr) 2294 if (mstat & ch->dsr)
2782 mflag |= TIOCM_DSR; 2295 mflag |= TIOCM_DSR;
2783
2784 if (mstat & ch->m_ri) 2296 if (mstat & ch->m_ri)
2785 mflag |= TIOCM_RI; 2297 mflag |= TIOCM_RI;
2786
2787 if (mstat & ch->dcd) 2298 if (mstat & ch->dcd)
2788 mflag |= TIOCM_CD; 2299 mflag |= TIOCM_CD;
2789
2790 return mflag; 2300 return mflag;
2791} 2301}
2792 2302
@@ -2796,13 +2306,10 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2796 struct channel *ch = (struct channel *) tty->driver_data; 2306 struct channel *ch = (struct channel *) tty->driver_data;
2797 unsigned long flags; 2307 unsigned long flags;
2798 2308
2799 if (!ch) { 2309 if (!ch)
2800 printk(KERN_ERR "<Error> - ch is NULL in pc_tiocmset!\n"); 2310 return -EINVAL;
2801 return(-EINVAL);
2802 }
2803 2311
2804 save_flags(flags); 2312 spin_lock_irqsave(&epca_lock, flags);
2805 cli();
2806 /* 2313 /*
2807 * I think this modemfake stuff is broken. It doesn't 2314 * I think this modemfake stuff is broken. It doesn't
2808 * correctly reflect the behaviour desired by the TIOCM* 2315 * correctly reflect the behaviour desired by the TIOCM*
@@ -2824,17 +2331,14 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2824 ch->modemfake |= ch->m_dtr; 2331 ch->modemfake |= ch->m_dtr;
2825 ch->modem &= ~ch->m_dtr; 2332 ch->modem &= ~ch->m_dtr;
2826 } 2333 }
2827
2828 globalwinon(ch); 2334 globalwinon(ch);
2829
2830 /* -------------------------------------------------------------- 2335 /* --------------------------------------------------------------
2831 The below routine generally sets up parity, baud, flow control 2336 The below routine generally sets up parity, baud, flow control
2832 issues, etc.... It effect both control flags and input flags. 2337 issues, etc.... It effect both control flags and input flags.
2833 ------------------------------------------------------------------ */ 2338 ------------------------------------------------------------------ */
2834
2835 epcaparam(tty,ch); 2339 epcaparam(tty,ch);
2836 memoff(ch); 2340 memoff(ch);
2837 restore_flags(flags); 2341 spin_unlock_irqrestore(&epca_lock, flags);
2838 return 0; 2342 return 0;
2839} 2343}
2840 2344
@@ -2847,19 +2351,14 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2847 unsigned long flags; 2351 unsigned long flags;
2848 unsigned int mflag, mstat; 2352 unsigned int mflag, mstat;
2849 unsigned char startc, stopc; 2353 unsigned char startc, stopc;
2850 volatile struct board_chan *bc; 2354 struct board_chan *bc;
2851 struct channel *ch = (struct channel *) tty->driver_data; 2355 struct channel *ch = (struct channel *) tty->driver_data;
2852 void __user *argp = (void __user *)arg; 2356 void __user *argp = (void __user *)arg;
2853 2357
2854 if (ch) 2358 if (ch)
2855 bc = ch->brdchan; 2359 bc = ch->brdchan;
2856 else 2360 else
2857 { 2361 return -EINVAL;
2858 printk(KERN_ERR "<Error> - ch is NULL in pc_ioctl!\n");
2859 return(-EINVAL);
2860 }
2861
2862 save_flags(flags);
2863 2362
2864 /* ------------------------------------------------------------------- 2363 /* -------------------------------------------------------------------
2865 For POSIX compliance we need to add more ioctls. See tty_ioctl.c 2364 For POSIX compliance we need to add more ioctls. See tty_ioctl.c
@@ -2871,46 +2370,39 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2871 { /* Begin switch cmd */ 2370 { /* Begin switch cmd */
2872 2371
2873 case TCGETS: 2372 case TCGETS:
2874 if (copy_to_user(argp, 2373 if (copy_to_user(argp, tty->termios, sizeof(struct termios)))
2875 tty->termios, sizeof(struct termios)))
2876 return -EFAULT; 2374 return -EFAULT;
2877 return(0); 2375 return 0;
2878
2879 case TCGETA: 2376 case TCGETA:
2880 return get_termio(tty, argp); 2377 return get_termio(tty, argp);
2881
2882 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2378 case TCSBRK: /* SVID version: non-zero arg --> no break */
2883
2884 retval = tty_check_change(tty); 2379 retval = tty_check_change(tty);
2885 if (retval) 2380 if (retval)
2886 return retval; 2381 return retval;
2887
2888 /* Setup an event to indicate when the transmit buffer empties */ 2382 /* Setup an event to indicate when the transmit buffer empties */
2889 2383 spin_lock_irqsave(&epca_lock, flags);
2890 setup_empty_event(tty,ch); 2384 setup_empty_event(tty,ch);
2385 spin_unlock_irqrestore(&epca_lock, flags);
2891 tty_wait_until_sent(tty, 0); 2386 tty_wait_until_sent(tty, 0);
2892 if (!arg) 2387 if (!arg)
2893 digi_send_break(ch, HZ/4); /* 1/4 second */ 2388 digi_send_break(ch, HZ/4); /* 1/4 second */
2894 return 0; 2389 return 0;
2895
2896 case TCSBRKP: /* support for POSIX tcsendbreak() */ 2390 case TCSBRKP: /* support for POSIX tcsendbreak() */
2897
2898 retval = tty_check_change(tty); 2391 retval = tty_check_change(tty);
2899 if (retval) 2392 if (retval)
2900 return retval; 2393 return retval;
2901 2394
2902 /* Setup an event to indicate when the transmit buffer empties */ 2395 /* Setup an event to indicate when the transmit buffer empties */
2903 2396 spin_lock_irqsave(&epca_lock, flags);
2904 setup_empty_event(tty,ch); 2397 setup_empty_event(tty,ch);
2398 spin_unlock_irqrestore(&epca_lock, flags);
2905 tty_wait_until_sent(tty, 0); 2399 tty_wait_until_sent(tty, 0);
2906 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); 2400 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4);
2907 return 0; 2401 return 0;
2908
2909 case TIOCGSOFTCAR: 2402 case TIOCGSOFTCAR:
2910 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg)) 2403 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
2911 return -EFAULT; 2404 return -EFAULT;
2912 return 0; 2405 return 0;
2913
2914 case TIOCSSOFTCAR: 2406 case TIOCSSOFTCAR:
2915 { 2407 {
2916 unsigned int value; 2408 unsigned int value;
@@ -2922,75 +2414,63 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2922 (value ? CLOCAL : 0)); 2414 (value ? CLOCAL : 0));
2923 return 0; 2415 return 0;
2924 } 2416 }
2925
2926 case TIOCMODG: 2417 case TIOCMODG:
2927 mflag = pc_tiocmget(tty, file); 2418 mflag = pc_tiocmget(tty, file);
2928 if (put_user(mflag, (unsigned long __user *)argp)) 2419 if (put_user(mflag, (unsigned long __user *)argp))
2929 return -EFAULT; 2420 return -EFAULT;
2930 break; 2421 break;
2931
2932 case TIOCMODS: 2422 case TIOCMODS:
2933 if (get_user(mstat, (unsigned __user *)argp)) 2423 if (get_user(mstat, (unsigned __user *)argp))
2934 return -EFAULT; 2424 return -EFAULT;
2935 return pc_tiocmset(tty, file, mstat, ~mstat); 2425 return pc_tiocmset(tty, file, mstat, ~mstat);
2936
2937 case TIOCSDTR: 2426 case TIOCSDTR:
2427 spin_lock_irqsave(&epca_lock, flags);
2938 ch->omodem |= ch->m_dtr; 2428 ch->omodem |= ch->m_dtr;
2939 cli();
2940 globalwinon(ch); 2429 globalwinon(ch);
2941 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1); 2430 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1);
2942 memoff(ch); 2431 memoff(ch);
2943 restore_flags(flags); 2432 spin_unlock_irqrestore(&epca_lock, flags);
2944 break; 2433 break;
2945 2434
2946 case TIOCCDTR: 2435 case TIOCCDTR:
2436 spin_lock_irqsave(&epca_lock, flags);
2947 ch->omodem &= ~ch->m_dtr; 2437 ch->omodem &= ~ch->m_dtr;
2948 cli();
2949 globalwinon(ch); 2438 globalwinon(ch);
2950 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1); 2439 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1);
2951 memoff(ch); 2440 memoff(ch);
2952 restore_flags(flags); 2441 spin_unlock_irqrestore(&epca_lock, flags);
2953 break; 2442 break;
2954
2955 case DIGI_GETA: 2443 case DIGI_GETA:
2956 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t))) 2444 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
2957 return -EFAULT; 2445 return -EFAULT;
2958 break; 2446 break;
2959
2960 case DIGI_SETAW: 2447 case DIGI_SETAW:
2961 case DIGI_SETAF: 2448 case DIGI_SETAF:
2962 if ((cmd) == (DIGI_SETAW)) 2449 if (cmd == DIGI_SETAW) {
2963 {
2964 /* Setup an event to indicate when the transmit buffer empties */ 2450 /* Setup an event to indicate when the transmit buffer empties */
2965 2451 spin_lock_irqsave(&epca_lock, flags);
2966 setup_empty_event(tty,ch); 2452 setup_empty_event(tty,ch);
2453 spin_unlock_irqrestore(&epca_lock, flags);
2967 tty_wait_until_sent(tty, 0); 2454 tty_wait_until_sent(tty, 0);
2968 } 2455 } else {
2969 else
2970 {
2971 /* ldisc lock already held in ioctl */ 2456 /* ldisc lock already held in ioctl */
2972 if (tty->ldisc.flush_buffer) 2457 if (tty->ldisc.flush_buffer)
2973 tty->ldisc.flush_buffer(tty); 2458 tty->ldisc.flush_buffer(tty);
2974 } 2459 }
2975
2976 /* Fall Thru */ 2460 /* Fall Thru */
2977
2978 case DIGI_SETA: 2461 case DIGI_SETA:
2979 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) 2462 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
2980 return -EFAULT; 2463 return -EFAULT;
2981 2464
2982 if (ch->digiext.digi_flags & DIGI_ALTPIN) 2465 if (ch->digiext.digi_flags & DIGI_ALTPIN) {
2983 {
2984 ch->dcd = ch->m_dsr; 2466 ch->dcd = ch->m_dsr;
2985 ch->dsr = ch->m_dcd; 2467 ch->dsr = ch->m_dcd;
2986 } 2468 } else {
2987 else
2988 {
2989 ch->dcd = ch->m_dcd; 2469 ch->dcd = ch->m_dcd;
2990 ch->dsr = ch->m_dsr; 2470 ch->dsr = ch->m_dsr;
2991 } 2471 }
2992 2472
2993 cli(); 2473 spin_lock_irqsave(&epca_lock, flags);
2994 globalwinon(ch); 2474 globalwinon(ch);
2995 2475
2996 /* ----------------------------------------------------------------- 2476 /* -----------------------------------------------------------------
@@ -3000,25 +2480,22 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3000 2480
3001 epcaparam(tty,ch); 2481 epcaparam(tty,ch);
3002 memoff(ch); 2482 memoff(ch);
3003 restore_flags(flags); 2483 spin_unlock_irqrestore(&epca_lock, flags);
3004 break; 2484 break;
3005 2485
3006 case DIGI_GETFLOW: 2486 case DIGI_GETFLOW:
3007 case DIGI_GETAFLOW: 2487 case DIGI_GETAFLOW:
3008 cli(); 2488 spin_lock_irqsave(&epca_lock, flags);
3009 globalwinon(ch); 2489 globalwinon(ch);
3010 if ((cmd) == (DIGI_GETFLOW)) 2490 if (cmd == DIGI_GETFLOW) {
3011 { 2491 dflow.startc = readb(&bc->startc);
3012 dflow.startc = bc->startc; 2492 dflow.stopc = readb(&bc->stopc);
3013 dflow.stopc = bc->stopc; 2493 } else {
3014 } 2494 dflow.startc = readb(&bc->startca);
3015 else 2495 dflow.stopc = readb(&bc->stopca);
3016 {
3017 dflow.startc = bc->startca;
3018 dflow.stopc = bc->stopca;
3019 } 2496 }
3020 memoff(ch); 2497 memoff(ch);
3021 restore_flags(flags); 2498 spin_unlock_irqrestore(&epca_lock, flags);
3022 2499
3023 if (copy_to_user(argp, &dflow, sizeof(dflow))) 2500 if (copy_to_user(argp, &dflow, sizeof(dflow)))
3024 return -EFAULT; 2501 return -EFAULT;
@@ -3026,13 +2503,10 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3026 2503
3027 case DIGI_SETAFLOW: 2504 case DIGI_SETAFLOW:
3028 case DIGI_SETFLOW: 2505 case DIGI_SETFLOW:
3029 if ((cmd) == (DIGI_SETFLOW)) 2506 if (cmd == DIGI_SETFLOW) {
3030 {
3031 startc = ch->startc; 2507 startc = ch->startc;
3032 stopc = ch->stopc; 2508 stopc = ch->stopc;
3033 } 2509 } else {
3034 else
3035 {
3036 startc = ch->startca; 2510 startc = ch->startca;
3037 stopc = ch->stopca; 2511 stopc = ch->stopca;
3038 } 2512 }
@@ -3040,40 +2514,31 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3040 if (copy_from_user(&dflow, argp, sizeof(dflow))) 2514 if (copy_from_user(&dflow, argp, sizeof(dflow)))
3041 return -EFAULT; 2515 return -EFAULT;
3042 2516
3043 if (dflow.startc != startc || dflow.stopc != stopc) 2517 if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */
3044 { /* Begin if setflow toggled */ 2518 spin_lock_irqsave(&epca_lock, flags);
3045 cli();
3046 globalwinon(ch); 2519 globalwinon(ch);
3047 2520
3048 if ((cmd) == (DIGI_SETFLOW)) 2521 if (cmd == DIGI_SETFLOW) {
3049 {
3050 ch->fepstartc = ch->startc = dflow.startc; 2522 ch->fepstartc = ch->startc = dflow.startc;
3051 ch->fepstopc = ch->stopc = dflow.stopc; 2523 ch->fepstopc = ch->stopc = dflow.stopc;
3052 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 2524 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
3053 } 2525 } else {
3054 else
3055 {
3056 ch->fepstartca = ch->startca = dflow.startc; 2526 ch->fepstartca = ch->startca = dflow.startc;
3057 ch->fepstopca = ch->stopca = dflow.stopc; 2527 ch->fepstopca = ch->stopca = dflow.stopc;
3058 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 2528 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
3059 } 2529 }
3060 2530
3061 if (ch->statusflags & TXSTOPPED) 2531 if (ch->statusflags & TXSTOPPED)
3062 pc_start(tty); 2532 pc_start(tty);
3063 2533
3064 memoff(ch); 2534 memoff(ch);
3065 restore_flags(flags); 2535 spin_unlock_irqrestore(&epca_lock, flags);
3066
3067 } /* End if setflow toggled */ 2536 } /* End if setflow toggled */
3068 break; 2537 break;
3069
3070 default: 2538 default:
3071 return -ENOIOCTLCMD; 2539 return -ENOIOCTLCMD;
3072
3073 } /* End switch cmd */ 2540 } /* End switch cmd */
3074
3075 return 0; 2541 return 0;
3076
3077} /* End pc_ioctl */ 2542} /* End pc_ioctl */
3078 2543
3079/* --------------------- Begin pc_set_termios ----------------------- */ 2544/* --------------------- Begin pc_set_termios ----------------------- */
@@ -3083,20 +2548,16 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3083 2548
3084 struct channel *ch; 2549 struct channel *ch;
3085 unsigned long flags; 2550 unsigned long flags;
3086
3087 /* --------------------------------------------------------- 2551 /* ---------------------------------------------------------
3088 verifyChannel returns the channel from the tty struct 2552 verifyChannel returns the channel from the tty struct
3089 if it is valid. This serves as a sanity check. 2553 if it is valid. This serves as a sanity check.
3090 ------------------------------------------------------------- */ 2554 ------------------------------------------------------------- */
3091 2555 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3092 if ((ch = verifyChannel(tty)) != NULL) 2556 spin_lock_irqsave(&epca_lock, flags);
3093 { /* Begin if channel valid */
3094
3095 save_flags(flags);
3096 cli();
3097 globalwinon(ch); 2557 globalwinon(ch);
3098 epcaparam(tty, ch); 2558 epcaparam(tty, ch);
3099 memoff(ch); 2559 memoff(ch);
2560 spin_unlock_irqrestore(&epca_lock, flags);
3100 2561
3101 if ((old_termios->c_cflag & CRTSCTS) && 2562 if ((old_termios->c_cflag & CRTSCTS) &&
3102 ((tty->termios->c_cflag & CRTSCTS) == 0)) 2563 ((tty->termios->c_cflag & CRTSCTS) == 0))
@@ -3106,8 +2567,6 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3106 (tty->termios->c_cflag & CLOCAL)) 2567 (tty->termios->c_cflag & CLOCAL))
3107 wake_up_interruptible(&ch->open_wait); 2568 wake_up_interruptible(&ch->open_wait);
3108 2569
3109 restore_flags(flags);
3110
3111 } /* End if channel valid */ 2570 } /* End if channel valid */
3112 2571
3113} /* End pc_set_termios */ 2572} /* End pc_set_termios */
@@ -3116,29 +2575,18 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3116 2575
3117static void do_softint(void *private_) 2576static void do_softint(void *private_)
3118{ /* Begin do_softint */ 2577{ /* Begin do_softint */
3119
3120 struct channel *ch = (struct channel *) private_; 2578 struct channel *ch = (struct channel *) private_;
3121
3122
3123 /* Called in response to a modem change event */ 2579 /* Called in response to a modem change event */
3124 2580 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
3125 if (ch && ch->magic == EPCA_MAGIC)
3126 { /* Begin EPCA_MAGIC */
3127
3128 struct tty_struct *tty = ch->tty; 2581 struct tty_struct *tty = ch->tty;
3129 2582
3130 if (tty && tty->driver_data) 2583 if (tty && tty->driver_data) {
3131 { 2584 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { /* Begin if clear_bit */
3132 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event))
3133 { /* Begin if clear_bit */
3134
3135 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 2585 tty_hangup(tty); /* FIXME: module removal race here - AKPM */
3136 wake_up_interruptible(&ch->open_wait); 2586 wake_up_interruptible(&ch->open_wait);
3137 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; 2587 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
3138
3139 } /* End if clear_bit */ 2588 } /* End if clear_bit */
3140 } 2589 }
3141
3142 } /* End EPCA_MAGIC */ 2590 } /* End EPCA_MAGIC */
3143} /* End do_softint */ 2591} /* End do_softint */
3144 2592
@@ -3154,82 +2602,49 @@ static void pc_stop(struct tty_struct *tty)
3154 2602
3155 struct channel *ch; 2603 struct channel *ch;
3156 unsigned long flags; 2604 unsigned long flags;
3157
3158 /* --------------------------------------------------------- 2605 /* ---------------------------------------------------------
3159 verifyChannel returns the channel from the tty struct 2606 verifyChannel returns the channel from the tty struct
3160 if it is valid. This serves as a sanity check. 2607 if it is valid. This serves as a sanity check.
3161 ------------------------------------------------------------- */ 2608 ------------------------------------------------------------- */
3162 2609 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if valid channel */
3163 if ((ch = verifyChannel(tty)) != NULL) 2610 spin_lock_irqsave(&epca_lock, flags);
3164 { /* Begin if valid channel */ 2611 if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */
3165
3166 save_flags(flags);
3167 cli();
3168
3169 if ((ch->statusflags & TXSTOPPED) == 0)
3170 { /* Begin if transmit stop requested */
3171
3172 globalwinon(ch); 2612 globalwinon(ch);
3173
3174 /* STOP transmitting now !! */ 2613 /* STOP transmitting now !! */
3175
3176 fepcmd(ch, PAUSETX, 0, 0, 0, 0); 2614 fepcmd(ch, PAUSETX, 0, 0, 0, 0);
3177
3178 ch->statusflags |= TXSTOPPED; 2615 ch->statusflags |= TXSTOPPED;
3179 memoff(ch); 2616 memoff(ch);
3180
3181 } /* End if transmit stop requested */ 2617 } /* End if transmit stop requested */
3182 2618 spin_unlock_irqrestore(&epca_lock, flags);
3183 restore_flags(flags);
3184
3185 } /* End if valid channel */ 2619 } /* End if valid channel */
3186
3187} /* End pc_stop */ 2620} /* End pc_stop */
3188 2621
3189/* --------------------- Begin pc_start ----------------------- */ 2622/* --------------------- Begin pc_start ----------------------- */
3190 2623
3191static void pc_start(struct tty_struct *tty) 2624static void pc_start(struct tty_struct *tty)
3192{ /* Begin pc_start */ 2625{ /* Begin pc_start */
3193
3194 struct channel *ch; 2626 struct channel *ch;
3195
3196 /* --------------------------------------------------------- 2627 /* ---------------------------------------------------------
3197 verifyChannel returns the channel from the tty struct 2628 verifyChannel returns the channel from the tty struct
3198 if it is valid. This serves as a sanity check. 2629 if it is valid. This serves as a sanity check.
3199 ------------------------------------------------------------- */ 2630 ------------------------------------------------------------- */
3200 2631 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3201 if ((ch = verifyChannel(tty)) != NULL)
3202 { /* Begin if channel valid */
3203
3204 unsigned long flags; 2632 unsigned long flags;
3205 2633 spin_lock_irqsave(&epca_lock, flags);
3206 save_flags(flags);
3207 cli();
3208
3209 /* Just in case output was resumed because of a change in Digi-flow */ 2634 /* Just in case output was resumed because of a change in Digi-flow */
3210 if (ch->statusflags & TXSTOPPED) 2635 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */
3211 { /* Begin transmit resume requested */ 2636 struct board_chan *bc;
3212
3213 volatile struct board_chan *bc;
3214
3215 globalwinon(ch); 2637 globalwinon(ch);
3216 bc = ch->brdchan; 2638 bc = ch->brdchan;
3217 if (ch->statusflags & LOWWAIT) 2639 if (ch->statusflags & LOWWAIT)
3218 bc->ilow = 1; 2640 writeb(1, &bc->ilow);
3219
3220 /* Okay, you can start transmitting again... */ 2641 /* Okay, you can start transmitting again... */
3221
3222 fepcmd(ch, RESUMETX, 0, 0, 0, 0); 2642 fepcmd(ch, RESUMETX, 0, 0, 0, 0);
3223
3224 ch->statusflags &= ~TXSTOPPED; 2643 ch->statusflags &= ~TXSTOPPED;
3225 memoff(ch); 2644 memoff(ch);
3226
3227 } /* End transmit resume requested */ 2645 } /* End transmit resume requested */
3228 2646 spin_unlock_irqrestore(&epca_lock, flags);
3229 restore_flags(flags);
3230
3231 } /* End if channel valid */ 2647 } /* End if channel valid */
3232
3233} /* End pc_start */ 2648} /* End pc_start */
3234 2649
3235/* ------------------------------------------------------------------ 2650/* ------------------------------------------------------------------
@@ -3244,86 +2659,55 @@ ______________________________________________________________________ */
3244 2659
3245static void pc_throttle(struct tty_struct * tty) 2660static void pc_throttle(struct tty_struct * tty)
3246{ /* Begin pc_throttle */ 2661{ /* Begin pc_throttle */
3247
3248 struct channel *ch; 2662 struct channel *ch;
3249 unsigned long flags; 2663 unsigned long flags;
3250
3251 /* --------------------------------------------------------- 2664 /* ---------------------------------------------------------
3252 verifyChannel returns the channel from the tty struct 2665 verifyChannel returns the channel from the tty struct
3253 if it is valid. This serves as a sanity check. 2666 if it is valid. This serves as a sanity check.
3254 ------------------------------------------------------------- */ 2667 ------------------------------------------------------------- */
3255 2668 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3256 if ((ch = verifyChannel(tty)) != NULL) 2669 spin_lock_irqsave(&epca_lock, flags);
3257 { /* Begin if channel valid */ 2670 if ((ch->statusflags & RXSTOPPED) == 0) {
3258
3259
3260 save_flags(flags);
3261 cli();
3262
3263 if ((ch->statusflags & RXSTOPPED) == 0)
3264 {
3265 globalwinon(ch); 2671 globalwinon(ch);
3266 fepcmd(ch, PAUSERX, 0, 0, 0, 0); 2672 fepcmd(ch, PAUSERX, 0, 0, 0, 0);
3267
3268 ch->statusflags |= RXSTOPPED; 2673 ch->statusflags |= RXSTOPPED;
3269 memoff(ch); 2674 memoff(ch);
3270 } 2675 }
3271 restore_flags(flags); 2676 spin_unlock_irqrestore(&epca_lock, flags);
3272
3273 } /* End if channel valid */ 2677 } /* End if channel valid */
3274
3275} /* End pc_throttle */ 2678} /* End pc_throttle */
3276 2679
3277/* --------------------- Begin unthrottle ----------------------- */ 2680/* --------------------- Begin unthrottle ----------------------- */
3278 2681
3279static void pc_unthrottle(struct tty_struct *tty) 2682static void pc_unthrottle(struct tty_struct *tty)
3280{ /* Begin pc_unthrottle */ 2683{ /* Begin pc_unthrottle */
3281
3282 struct channel *ch; 2684 struct channel *ch;
3283 unsigned long flags; 2685 unsigned long flags;
3284 volatile struct board_chan *bc;
3285
3286
3287 /* --------------------------------------------------------- 2686 /* ---------------------------------------------------------
3288 verifyChannel returns the channel from the tty struct 2687 verifyChannel returns the channel from the tty struct
3289 if it is valid. This serves as a sanity check. 2688 if it is valid. This serves as a sanity check.
3290 ------------------------------------------------------------- */ 2689 ------------------------------------------------------------- */
3291 2690 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3292 if ((ch = verifyChannel(tty)) != NULL)
3293 { /* Begin if channel valid */
3294
3295
3296 /* Just in case output was resumed because of a change in Digi-flow */ 2691 /* Just in case output was resumed because of a change in Digi-flow */
3297 save_flags(flags); 2692 spin_lock_irqsave(&epca_lock, flags);
3298 cli(); 2693 if (ch->statusflags & RXSTOPPED) {
3299
3300 if (ch->statusflags & RXSTOPPED)
3301 {
3302
3303 globalwinon(ch); 2694 globalwinon(ch);
3304 bc = ch->brdchan;
3305 fepcmd(ch, RESUMERX, 0, 0, 0, 0); 2695 fepcmd(ch, RESUMERX, 0, 0, 0, 0);
3306
3307 ch->statusflags &= ~RXSTOPPED; 2696 ch->statusflags &= ~RXSTOPPED;
3308 memoff(ch); 2697 memoff(ch);
3309 } 2698 }
3310 restore_flags(flags); 2699 spin_unlock_irqrestore(&epca_lock, flags);
3311
3312 } /* End if channel valid */ 2700 } /* End if channel valid */
3313
3314} /* End pc_unthrottle */ 2701} /* End pc_unthrottle */
3315 2702
3316/* --------------------- Begin digi_send_break ----------------------- */ 2703/* --------------------- Begin digi_send_break ----------------------- */
3317 2704
3318void digi_send_break(struct channel *ch, int msec) 2705void digi_send_break(struct channel *ch, int msec)
3319{ /* Begin digi_send_break */ 2706{ /* Begin digi_send_break */
3320
3321 unsigned long flags; 2707 unsigned long flags;
3322 2708
3323 save_flags(flags); 2709 spin_lock_irqsave(&epca_lock, flags);
3324 cli();
3325 globalwinon(ch); 2710 globalwinon(ch);
3326
3327 /* -------------------------------------------------------------------- 2711 /* --------------------------------------------------------------------
3328 Maybe I should send an infinite break here, schedule() for 2712 Maybe I should send an infinite break here, schedule() for
3329 msec amount of time, and then stop the break. This way, 2713 msec amount of time, and then stop the break. This way,
@@ -3331,36 +2715,28 @@ void digi_send_break(struct channel *ch, int msec)
3331 to be called (i.e. via an ioctl()) more than once in msec amount 2715 to be called (i.e. via an ioctl()) more than once in msec amount
3332 of time. Try this for now... 2716 of time. Try this for now...
3333 ------------------------------------------------------------------------ */ 2717 ------------------------------------------------------------------------ */
3334
3335 fepcmd(ch, SENDBREAK, msec, 0, 10, 0); 2718 fepcmd(ch, SENDBREAK, msec, 0, 10, 0);
3336 memoff(ch); 2719 memoff(ch);
3337 2720 spin_unlock_irqrestore(&epca_lock, flags);
3338 restore_flags(flags);
3339
3340} /* End digi_send_break */ 2721} /* End digi_send_break */
3341 2722
3342/* --------------------- Begin setup_empty_event ----------------------- */ 2723/* --------------------- Begin setup_empty_event ----------------------- */
3343 2724
2725/* Caller MUST hold the lock */
2726
3344static void setup_empty_event(struct tty_struct *tty, struct channel *ch) 2727static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
3345{ /* Begin setup_empty_event */ 2728{ /* Begin setup_empty_event */
3346 2729
3347 volatile struct board_chan *bc = ch->brdchan; 2730 struct board_chan *bc = ch->brdchan;
3348 unsigned long int flags;
3349 2731
3350 save_flags(flags);
3351 cli();
3352 globalwinon(ch); 2732 globalwinon(ch);
3353 ch->statusflags |= EMPTYWAIT; 2733 ch->statusflags |= EMPTYWAIT;
3354
3355 /* ------------------------------------------------------------------ 2734 /* ------------------------------------------------------------------
3356 When set the iempty flag request a event to be generated when the 2735 When set the iempty flag request a event to be generated when the
3357 transmit buffer is empty (If there is no BREAK in progress). 2736 transmit buffer is empty (If there is no BREAK in progress).
3358 --------------------------------------------------------------------- */ 2737 --------------------------------------------------------------------- */
3359 2738 writeb(1, &bc->iempty);
3360 bc->iempty = 1;
3361 memoff(ch); 2739 memoff(ch);
3362 restore_flags(flags);
3363
3364} /* End setup_empty_event */ 2740} /* End setup_empty_event */
3365 2741
3366/* --------------------- Begin get_termio ----------------------- */ 2742/* --------------------- Begin get_termio ----------------------- */
@@ -3369,10 +2745,10 @@ static int get_termio(struct tty_struct * tty, struct termio __user * termio)
3369{ /* Begin get_termio */ 2745{ /* Begin get_termio */
3370 return kernel_termios_to_user_termio(termio, tty->termios); 2746 return kernel_termios_to_user_termio(termio, tty->termios);
3371} /* End get_termio */ 2747} /* End get_termio */
2748
3372/* ---------------------- Begin epca_setup -------------------------- */ 2749/* ---------------------- Begin epca_setup -------------------------- */
3373void epca_setup(char *str, int *ints) 2750void epca_setup(char *str, int *ints)
3374{ /* Begin epca_setup */ 2751{ /* Begin epca_setup */
3375
3376 struct board_info board; 2752 struct board_info board;
3377 int index, loop, last; 2753 int index, loop, last;
3378 char *temp, *t2; 2754 char *temp, *t2;
@@ -3394,49 +2770,41 @@ void epca_setup(char *str, int *ints)
3394 for (last = 0, index = 1; index <= ints[0]; index++) 2770 for (last = 0, index = 1; index <= ints[0]; index++)
3395 switch(index) 2771 switch(index)
3396 { /* Begin parse switch */ 2772 { /* Begin parse switch */
3397
3398 case 1: 2773 case 1:
3399 board.status = ints[index]; 2774 board.status = ints[index];
3400
3401 /* --------------------------------------------------------- 2775 /* ---------------------------------------------------------
3402 We check for 2 (As opposed to 1; because 2 is a flag 2776 We check for 2 (As opposed to 1; because 2 is a flag
3403 instructing the driver to ignore epcaconfig.) For this 2777 instructing the driver to ignore epcaconfig.) For this
3404 reason we check for 2. 2778 reason we check for 2.
3405 ------------------------------------------------------------ */ 2779 ------------------------------------------------------------ */
3406 if (board.status == 2) 2780 if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */
3407 { /* Begin ignore epcaconfig as well as lilo cmd line */
3408 nbdevs = 0; 2781 nbdevs = 0;
3409 num_cards = 0; 2782 num_cards = 0;
3410 return; 2783 return;
3411 } /* End ignore epcaconfig as well as lilo cmd line */ 2784 } /* End ignore epcaconfig as well as lilo cmd line */
3412 2785
3413 if (board.status > 2) 2786 if (board.status > 2) {
3414 { 2787 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status);
3415 printk(KERN_ERR "<Error> - epca_setup: Invalid board status 0x%x\n", board.status);
3416 invalid_lilo_config = 1; 2788 invalid_lilo_config = 1;
3417 setup_error_code |= INVALID_BOARD_STATUS; 2789 setup_error_code |= INVALID_BOARD_STATUS;
3418 return; 2790 return;
3419 } 2791 }
3420 last = index; 2792 last = index;
3421 break; 2793 break;
3422
3423 case 2: 2794 case 2:
3424 board.type = ints[index]; 2795 board.type = ints[index];
3425 if (board.type >= PCIXEM) 2796 if (board.type >= PCIXEM) {
3426 { 2797 printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type);
3427 printk(KERN_ERR "<Error> - epca_setup: Invalid board type 0x%x\n", board.type);
3428 invalid_lilo_config = 1; 2798 invalid_lilo_config = 1;
3429 setup_error_code |= INVALID_BOARD_TYPE; 2799 setup_error_code |= INVALID_BOARD_TYPE;
3430 return; 2800 return;
3431 } 2801 }
3432 last = index; 2802 last = index;
3433 break; 2803 break;
3434
3435 case 3: 2804 case 3:
3436 board.altpin = ints[index]; 2805 board.altpin = ints[index];
3437 if (board.altpin > 1) 2806 if (board.altpin > 1) {
3438 { 2807 printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin);
3439 printk(KERN_ERR "<Error> - epca_setup: Invalid board altpin 0x%x\n", board.altpin);
3440 invalid_lilo_config = 1; 2808 invalid_lilo_config = 1;
3441 setup_error_code |= INVALID_ALTPIN; 2809 setup_error_code |= INVALID_ALTPIN;
3442 return; 2810 return;
@@ -3446,9 +2814,8 @@ void epca_setup(char *str, int *ints)
3446 2814
3447 case 4: 2815 case 4:
3448 board.numports = ints[index]; 2816 board.numports = ints[index];
3449 if ((board.numports < 2) || (board.numports > 256)) 2817 if (board.numports < 2 || board.numports > 256) {
3450 { 2818 printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports);
3451 printk(KERN_ERR "<Error> - epca_setup: Invalid board numports 0x%x\n", board.numports);
3452 invalid_lilo_config = 1; 2819 invalid_lilo_config = 1;
3453 setup_error_code |= INVALID_NUM_PORTS; 2820 setup_error_code |= INVALID_NUM_PORTS;
3454 return; 2821 return;
@@ -3458,10 +2825,9 @@ void epca_setup(char *str, int *ints)
3458 break; 2825 break;
3459 2826
3460 case 5: 2827 case 5:
3461 board.port = (unsigned char *)ints[index]; 2828 board.port = ints[index];
3462 if (ints[index] <= 0) 2829 if (ints[index] <= 0) {
3463 { 2830 printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
3464 printk(KERN_ERR "<Error> - epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
3465 invalid_lilo_config = 1; 2831 invalid_lilo_config = 1;
3466 setup_error_code |= INVALID_PORT_BASE; 2832 setup_error_code |= INVALID_PORT_BASE;
3467 return; 2833 return;
@@ -3470,10 +2836,9 @@ void epca_setup(char *str, int *ints)
3470 break; 2836 break;
3471 2837
3472 case 6: 2838 case 6:
3473 board.membase = (unsigned char *)ints[index]; 2839 board.membase = ints[index];
3474 if (ints[index] <= 0) 2840 if (ints[index] <= 0) {
3475 { 2841 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
3476 printk(KERN_ERR "<Error> - epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
3477 invalid_lilo_config = 1; 2842 invalid_lilo_config = 1;
3478 setup_error_code |= INVALID_MEM_BASE; 2843 setup_error_code |= INVALID_MEM_BASE;
3479 return; 2844 return;
@@ -3487,21 +2852,16 @@ void epca_setup(char *str, int *ints)
3487 2852
3488 } /* End parse switch */ 2853 } /* End parse switch */
3489 2854
3490 while (str && *str) 2855 while (str && *str) { /* Begin while there is a string arg */
3491 { /* Begin while there is a string arg */
3492
3493 /* find the next comma or terminator */ 2856 /* find the next comma or terminator */
3494 temp = str; 2857 temp = str;
3495
3496 /* While string is not null, and a comma hasn't been found */ 2858 /* While string is not null, and a comma hasn't been found */
3497 while (*temp && (*temp != ',')) 2859 while (*temp && (*temp != ','))
3498 temp++; 2860 temp++;
3499
3500 if (!*temp) 2861 if (!*temp)
3501 temp = NULL; 2862 temp = NULL;
3502 else 2863 else
3503 *temp++ = 0; 2864 *temp++ = 0;
3504
3505 /* Set index to the number of args + 1 */ 2865 /* Set index to the number of args + 1 */
3506 index = last + 1; 2866 index = last + 1;
3507 2867
@@ -3511,12 +2871,10 @@ void epca_setup(char *str, int *ints)
3511 len = strlen(str); 2871 len = strlen(str);
3512 if (strncmp("Disable", str, len) == 0) 2872 if (strncmp("Disable", str, len) == 0)
3513 board.status = 0; 2873 board.status = 0;
3514 else 2874 else if (strncmp("Enable", str, len) == 0)
3515 if (strncmp("Enable", str, len) == 0)
3516 board.status = 1; 2875 board.status = 1;
3517 else 2876 else {
3518 { 2877 printk(KERN_ERR "epca_setup: Invalid status %s\n", str);
3519 printk(KERN_ERR "<Error> - epca_setup: Invalid status %s\n", str);
3520 invalid_lilo_config = 1; 2878 invalid_lilo_config = 1;
3521 setup_error_code |= INVALID_BOARD_STATUS; 2879 setup_error_code |= INVALID_BOARD_STATUS;
3522 return; 2880 return;
@@ -3525,22 +2883,17 @@ void epca_setup(char *str, int *ints)
3525 break; 2883 break;
3526 2884
3527 case 2: 2885 case 2:
3528
3529 for(loop = 0; loop < EPCA_NUM_TYPES; loop++) 2886 for(loop = 0; loop < EPCA_NUM_TYPES; loop++)
3530 if (strcmp(board_desc[loop], str) == 0) 2887 if (strcmp(board_desc[loop], str) == 0)
3531 break; 2888 break;
3532
3533
3534 /* --------------------------------------------------------------- 2889 /* ---------------------------------------------------------------
3535 If the index incremented above refers to a legitamate board 2890 If the index incremented above refers to a legitamate board
3536 type set it here. 2891 type set it here.
3537 ------------------------------------------------------------------*/ 2892 ------------------------------------------------------------------*/
3538
3539 if (index < EPCA_NUM_TYPES) 2893 if (index < EPCA_NUM_TYPES)
3540 board.type = loop; 2894 board.type = loop;
3541 else 2895 else {
3542 { 2896 printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str);
3543 printk(KERN_ERR "<Error> - epca_setup: Invalid board type: %s\n", str);
3544 invalid_lilo_config = 1; 2897 invalid_lilo_config = 1;
3545 setup_error_code |= INVALID_BOARD_TYPE; 2898 setup_error_code |= INVALID_BOARD_TYPE;
3546 return; 2899 return;
@@ -3552,12 +2905,10 @@ void epca_setup(char *str, int *ints)
3552 len = strlen(str); 2905 len = strlen(str);
3553 if (strncmp("Disable", str, len) == 0) 2906 if (strncmp("Disable", str, len) == 0)
3554 board.altpin = 0; 2907 board.altpin = 0;
3555 else 2908 else if (strncmp("Enable", str, len) == 0)
3556 if (strncmp("Enable", str, len) == 0)
3557 board.altpin = 1; 2909 board.altpin = 1;
3558 else 2910 else {
3559 { 2911 printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str);
3560 printk(KERN_ERR "<Error> - epca_setup: Invalid altpin %s\n", str);
3561 invalid_lilo_config = 1; 2912 invalid_lilo_config = 1;
3562 setup_error_code |= INVALID_ALTPIN; 2913 setup_error_code |= INVALID_ALTPIN;
3563 return; 2914 return;
@@ -3570,9 +2921,8 @@ void epca_setup(char *str, int *ints)
3570 while (isdigit(*t2)) 2921 while (isdigit(*t2))
3571 t2++; 2922 t2++;
3572 2923
3573 if (*t2) 2924 if (*t2) {
3574 { 2925 printk(KERN_ERR "epca_setup: Invalid port count %s\n", str);
3575 printk(KERN_ERR "<Error> - epca_setup: Invalid port count %s\n", str);
3576 invalid_lilo_config = 1; 2926 invalid_lilo_config = 1;
3577 setup_error_code |= INVALID_NUM_PORTS; 2927 setup_error_code |= INVALID_NUM_PORTS;
3578 return; 2928 return;
@@ -3601,15 +2951,14 @@ void epca_setup(char *str, int *ints)
3601 while (isxdigit(*t2)) 2951 while (isxdigit(*t2))
3602 t2++; 2952 t2++;
3603 2953
3604 if (*t2) 2954 if (*t2) {
3605 { 2955 printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str);
3606 printk(KERN_ERR "<Error> - epca_setup: Invalid i/o address %s\n", str);
3607 invalid_lilo_config = 1; 2956 invalid_lilo_config = 1;
3608 setup_error_code |= INVALID_PORT_BASE; 2957 setup_error_code |= INVALID_PORT_BASE;
3609 return; 2958 return;
3610 } 2959 }
3611 2960
3612 board.port = (unsigned char *)simple_strtoul(str, NULL, 16); 2961 board.port = simple_strtoul(str, NULL, 16);
3613 last = index; 2962 last = index;
3614 break; 2963 break;
3615 2964
@@ -3618,52 +2967,38 @@ void epca_setup(char *str, int *ints)
3618 while (isxdigit(*t2)) 2967 while (isxdigit(*t2))
3619 t2++; 2968 t2++;
3620 2969
3621 if (*t2) 2970 if (*t2) {
3622 { 2971 printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str);
3623 printk(KERN_ERR "<Error> - epca_setup: Invalid memory base %s\n",str);
3624 invalid_lilo_config = 1; 2972 invalid_lilo_config = 1;
3625 setup_error_code |= INVALID_MEM_BASE; 2973 setup_error_code |= INVALID_MEM_BASE;
3626 return; 2974 return;
3627 } 2975 }
3628 2976 board.membase = simple_strtoul(str, NULL, 16);
3629 board.membase = (unsigned char *)simple_strtoul(str, NULL, 16);
3630 last = index; 2977 last = index;
3631 break; 2978 break;
3632
3633 default: 2979 default:
3634 printk(KERN_ERR "PC/Xx: Too many string parms\n"); 2980 printk(KERN_ERR "epca: Too many string parms\n");
3635 return; 2981 return;
3636 } 2982 }
3637 str = temp; 2983 str = temp;
3638
3639 } /* End while there is a string arg */ 2984 } /* End while there is a string arg */
3640 2985
3641 2986 if (last < 6) {
3642 if (last < 6) 2987 printk(KERN_ERR "epca: Insufficient parms specified\n");
3643 {
3644 printk(KERN_ERR "PC/Xx: Insufficient parms specified\n");
3645 return; 2988 return;
3646 } 2989 }
3647 2990
3648 /* I should REALLY validate the stuff here */ 2991 /* I should REALLY validate the stuff here */
3649
3650 /* Copies our local copy of board into boards */ 2992 /* Copies our local copy of board into boards */
3651 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board)); 2993 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board));
3652
3653
3654 /* Does this get called once per lilo arg are what ? */ 2994 /* Does this get called once per lilo arg are what ? */
3655
3656 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", 2995 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
3657 num_cards, board_desc[board.type], 2996 num_cards, board_desc[board.type],
3658 board.numports, (int)board.port, (unsigned int) board.membase); 2997 board.numports, (int)board.port, (unsigned int) board.membase);
3659
3660 num_cards++; 2998 num_cards++;
3661
3662} /* End epca_setup */ 2999} /* End epca_setup */
3663 3000
3664 3001
3665
3666#ifdef ENABLE_PCI
3667/* ------------------------ Begin init_PCI --------------------------- */ 3002/* ------------------------ Begin init_PCI --------------------------- */
3668 3003
3669enum epic_board_types { 3004enum epic_board_types {
@@ -3685,7 +3020,6 @@ static struct {
3685 { PCIXRJ, 2, }, 3020 { PCIXRJ, 2, },
3686}; 3021};
3687 3022
3688
3689static int __devinit epca_init_one (struct pci_dev *pdev, 3023static int __devinit epca_init_one (struct pci_dev *pdev,
3690 const struct pci_device_id *ent) 3024 const struct pci_device_id *ent)
3691{ 3025{
@@ -3711,10 +3045,8 @@ static int __devinit epca_init_one (struct pci_dev *pdev,
3711 boards[board_idx].status = ENABLED; 3045 boards[board_idx].status = ENABLED;
3712 boards[board_idx].type = epca_info_tbl[info_idx].board_type; 3046 boards[board_idx].type = epca_info_tbl[info_idx].board_type;
3713 boards[board_idx].numports = 0x0; 3047 boards[board_idx].numports = 0x0;
3714 boards[board_idx].port = 3048 boards[board_idx].port = addr + PCI_IO_OFFSET;
3715 (unsigned char *)((char *) addr + PCI_IO_OFFSET); 3049 boards[board_idx].membase = addr;
3716 boards[board_idx].membase =
3717 (unsigned char *)((char *) addr);
3718 3050
3719 if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) { 3051 if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) {
3720 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 3052 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
@@ -3775,15 +3107,13 @@ static struct pci_device_id epca_pci_tbl[] = {
3775MODULE_DEVICE_TABLE(pci, epca_pci_tbl); 3107MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
3776 3108
3777int __init init_PCI (void) 3109int __init init_PCI (void)
3778{ /* Begin init_PCI */ 3110{ /* Begin init_PCI */
3779 memset (&epca_driver, 0, sizeof (epca_driver)); 3111 memset (&epca_driver, 0, sizeof (epca_driver));
3780 epca_driver.name = "epca"; 3112 epca_driver.name = "epca";
3781 epca_driver.id_table = epca_pci_tbl; 3113 epca_driver.id_table = epca_pci_tbl;
3782 epca_driver.probe = epca_init_one; 3114 epca_driver.probe = epca_init_one;
3783 3115
3784 return pci_register_driver(&epca_driver); 3116 return pci_register_driver(&epca_driver);
3785} /* End init_PCI */ 3117}
3786
3787#endif /* ENABLE_PCI */
3788 3118
3789MODULE_LICENSE("GPL"); 3119MODULE_LICENSE("GPL");
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 52205ef71314..20eeb5a70e1a 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -85,73 +85,73 @@ static char *board_desc[] =
85struct channel 85struct channel
86{ 86{
87 long magic; 87 long magic;
88 unchar boardnum; 88 unsigned char boardnum;
89 unchar channelnum; 89 unsigned char channelnum;
90 unchar omodem; /* FEP output modem status */ 90 unsigned char omodem; /* FEP output modem status */
91 unchar imodem; /* FEP input modem status */ 91 unsigned char imodem; /* FEP input modem status */
92 unchar modemfake; /* Modem values to be forced */ 92 unsigned char modemfake; /* Modem values to be forced */
93 unchar modem; /* Force values */ 93 unsigned char modem; /* Force values */
94 unchar hflow; 94 unsigned char hflow;
95 unchar dsr; 95 unsigned char dsr;
96 unchar dcd; 96 unsigned char dcd;
97 unchar m_rts ; /* The bits used in whatever FEP */ 97 unsigned char m_rts ; /* The bits used in whatever FEP */
98 unchar m_dcd ; /* is indiginous to this board to */ 98 unsigned char m_dcd ; /* is indiginous to this board to */
99 unchar m_dsr ; /* represent each of the physical */ 99 unsigned char m_dsr ; /* represent each of the physical */
100 unchar m_cts ; /* handshake lines */ 100 unsigned char m_cts ; /* handshake lines */
101 unchar m_ri ; 101 unsigned char m_ri ;
102 unchar m_dtr ; 102 unsigned char m_dtr ;
103 unchar stopc; 103 unsigned char stopc;
104 unchar startc; 104 unsigned char startc;
105 unchar stopca; 105 unsigned char stopca;
106 unchar startca; 106 unsigned char startca;
107 unchar fepstopc; 107 unsigned char fepstopc;
108 unchar fepstartc; 108 unsigned char fepstartc;
109 unchar fepstopca; 109 unsigned char fepstopca;
110 unchar fepstartca; 110 unsigned char fepstartca;
111 unchar txwin; 111 unsigned char txwin;
112 unchar rxwin; 112 unsigned char rxwin;
113 ushort fepiflag; 113 unsigned short fepiflag;
114 ushort fepcflag; 114 unsigned short fepcflag;
115 ushort fepoflag; 115 unsigned short fepoflag;
116 ushort txbufhead; 116 unsigned short txbufhead;
117 ushort txbufsize; 117 unsigned short txbufsize;
118 ushort rxbufhead; 118 unsigned short rxbufhead;
119 ushort rxbufsize; 119 unsigned short rxbufsize;
120 int close_delay; 120 int close_delay;
121 int count; 121 int count;
122 int blocked_open; 122 int blocked_open;
123 ulong event; 123 unsigned long event;
124 int asyncflags; 124 int asyncflags;
125 uint dev; 125 uint dev;
126 ulong statusflags; 126 unsigned long statusflags;
127 ulong c_iflag; 127 unsigned long c_iflag;
128 ulong c_cflag; 128 unsigned long c_cflag;
129 ulong c_lflag; 129 unsigned long c_lflag;
130 ulong c_oflag; 130 unsigned long c_oflag;
131 unchar *txptr; 131 unsigned char *txptr;
132 unchar *rxptr; 132 unsigned char *rxptr;
133 unchar *tmp_buf; 133 unsigned char *tmp_buf;
134 struct board_info *board; 134 struct board_info *board;
135 volatile struct board_chan *brdchan; 135 struct board_chan *brdchan;
136 struct digi_struct digiext; 136 struct digi_struct digiext;
137 struct tty_struct *tty; 137 struct tty_struct *tty;
138 wait_queue_head_t open_wait; 138 wait_queue_head_t open_wait;
139 wait_queue_head_t close_wait; 139 wait_queue_head_t close_wait;
140 struct work_struct tqueue; 140 struct work_struct tqueue;
141 volatile struct global_data *mailbox; 141 struct global_data *mailbox;
142}; 142};
143 143
144struct board_info 144struct board_info
145{ 145{
146 unchar status; 146 unsigned char status;
147 unchar type; 147 unsigned char type;
148 unchar altpin; 148 unsigned char altpin;
149 ushort numports; 149 unsigned short numports;
150 unchar *port; 150 unsigned long port;
151 unchar *membase; 151 unsigned long membase;
152 unchar __iomem *re_map_port; 152 unsigned char __iomem *re_map_port;
153 unchar *re_map_membase; 153 unsigned char *re_map_membase;
154 ulong memory_seg; 154 unsigned long memory_seg;
155 void ( * memwinon ) (struct board_info *, unsigned int) ; 155 void ( * memwinon ) (struct board_info *, unsigned int) ;
156 void ( * memwinoff ) (struct board_info *, unsigned int) ; 156 void ( * memwinoff ) (struct board_info *, unsigned int) ;
157 void ( * globalwinon ) (struct channel *) ; 157 void ( * globalwinon ) (struct channel *) ;
@@ -160,6 +160,6 @@ struct board_info
160 void ( * memoff ) (struct channel *) ; 160 void ( * memoff ) (struct channel *) ;
161 void ( * assertgwinon ) (struct channel *) ; 161 void ( * assertgwinon ) (struct channel *) ;
162 void ( * assertmemoff ) (struct channel *) ; 162 void ( * assertmemoff ) (struct channel *) ;
163 unchar poller_inhibited ; 163 unsigned char poller_inhibited ;
164}; 164};
165 165
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 762fa430fb5b..a695f25e4497 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -44,7 +44,7 @@
44/* 44/*
45 * The High Precision Event Timer driver. 45 * The High Precision Event Timer driver.
46 * This driver is closely modelled after the rtc.c driver. 46 * This driver is closely modelled after the rtc.c driver.
47 * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm 47 * http://www.intel.com/hardwaredesign/hpetspec.htm
48 */ 48 */
49#define HPET_USER_FREQ (64) 49#define HPET_USER_FREQ (64)
50#define HPET_DRIFT (500) 50#define HPET_DRIFT (500)
@@ -712,7 +712,7 @@ static void hpet_register_interpolator(struct hpets *hpetp)
712 ti->shift = 10; 712 ti->shift = 10;
713 ti->addr = &hpetp->hp_hpet->hpet_mc; 713 ti->addr = &hpetp->hp_hpet->hpet_mc;
714 ti->frequency = hpet_time_div(hpets->hp_period); 714 ti->frequency = hpet_time_div(hpets->hp_period);
715 ti->drift = ti->frequency * HPET_DRIFT / 1000000; 715 ti->drift = HPET_DRIFT;
716 ti->mask = -1; 716 ti->mask = -1;
717 717
718 hpetp->hp_interpolator = ti; 718 hpetp->hp_interpolator = ti;
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 60bb9152b832..78d681dc35a8 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@ char hvc_driver_name[] = "hvc_console";
39 39
40static struct vio_device_id hvc_driver_table[] __devinitdata = { 40static struct vio_device_id hvc_driver_table[] __devinitdata = {
41 {"serial", "hvterm1"}, 41 {"serial", "hvterm1"},
42 { NULL, } 42 { "", "" }
43}; 43};
44MODULE_DEVICE_TABLE(vio, hvc_driver_table); 44MODULE_DEVICE_TABLE(vio, hvc_driver_table);
45 45
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 3236d2404905..f47f009f9259 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -527,7 +527,7 @@ static int khvcsd(void *unused)
527 527
528static struct vio_device_id hvcs_driver_table[] __devinitdata= { 528static struct vio_device_id hvcs_driver_table[] __devinitdata= {
529 {"serial-server", "hvterm2"}, 529 {"serial-server", "hvterm2"},
530 { NULL, } 530 { "", "" }
531}; 531};
532MODULE_DEVICE_TABLE(vio, hvcs_driver_table); 532MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
533 533
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 5ce9c6269033..33862670e285 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -31,8 +31,6 @@
31#include <linux/ipmi_msgdefs.h> /* for completion codes */ 31#include <linux/ipmi_msgdefs.h> /* for completion codes */
32#include "ipmi_si_sm.h" 32#include "ipmi_si_sm.h"
33 33
34#define IPMI_BT_VERSION "v33"
35
36static int bt_debug = 0x00; /* Production value 0, see following flags */ 34static int bt_debug = 0x00; /* Production value 0, see following flags */
37 35
38#define BT_DEBUG_ENABLE 1 36#define BT_DEBUG_ENABLE 1
@@ -163,7 +161,8 @@ static int bt_start_transaction(struct si_sm_data *bt,
163{ 161{
164 unsigned int i; 162 unsigned int i;
165 163
166 if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH)) return -1; 164 if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH))
165 return -1;
167 166
168 if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED)) 167 if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
169 return -2; 168 return -2;
@@ -171,7 +170,8 @@ static int bt_start_transaction(struct si_sm_data *bt,
171 if (bt_debug & BT_DEBUG_MSG) { 170 if (bt_debug & BT_DEBUG_MSG) {
172 printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n"); 171 printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
173 printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq); 172 printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
174 for (i = 0; i < size; i ++) printk (" %02x", data[i]); 173 for (i = 0; i < size; i ++)
174 printk (" %02x", data[i]);
175 printk("\n"); 175 printk("\n");
176 } 176 }
177 bt->write_data[0] = size + 1; /* all data plus seq byte */ 177 bt->write_data[0] = size + 1; /* all data plus seq byte */
@@ -210,15 +210,18 @@ static int bt_get_result(struct si_sm_data *bt,
210 } else { 210 } else {
211 data[0] = bt->read_data[1]; 211 data[0] = bt->read_data[1];
212 data[1] = bt->read_data[3]; 212 data[1] = bt->read_data[3];
213 if (length < msg_len) bt->truncated = 1; 213 if (length < msg_len)
214 bt->truncated = 1;
214 if (bt->truncated) { /* can be set in read_all_bytes() */ 215 if (bt->truncated) { /* can be set in read_all_bytes() */
215 data[2] = IPMI_ERR_MSG_TRUNCATED; 216 data[2] = IPMI_ERR_MSG_TRUNCATED;
216 msg_len = 3; 217 msg_len = 3;
217 } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); 218 } else
219 memcpy(data + 2, bt->read_data + 4, msg_len - 2);
218 220
219 if (bt_debug & BT_DEBUG_MSG) { 221 if (bt_debug & BT_DEBUG_MSG) {
220 printk (KERN_WARNING "BT: res (raw)"); 222 printk (KERN_WARNING "BT: res (raw)");
221 for (i = 0; i < msg_len; i++) printk(" %02x", data[i]); 223 for (i = 0; i < msg_len; i++)
224 printk(" %02x", data[i]);
222 printk ("\n"); 225 printk ("\n");
223 } 226 }
224 } 227 }
@@ -231,8 +234,10 @@ static int bt_get_result(struct si_sm_data *bt,
231 234
232static void reset_flags(struct si_sm_data *bt) 235static void reset_flags(struct si_sm_data *bt)
233{ 236{
234 if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); 237 if (BT_STATUS & BT_H_BUSY)
235 if (BT_STATUS & BT_B_BUSY) BT_CONTROL(BT_B_BUSY); 238 BT_CONTROL(BT_H_BUSY);
239 if (BT_STATUS & BT_B_BUSY)
240 BT_CONTROL(BT_B_BUSY);
236 BT_CONTROL(BT_CLR_WR_PTR); 241 BT_CONTROL(BT_CLR_WR_PTR);
237 BT_CONTROL(BT_SMS_ATN); 242 BT_CONTROL(BT_SMS_ATN);
238#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION 243#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION
@@ -241,7 +246,8 @@ static void reset_flags(struct si_sm_data *bt)
241 BT_CONTROL(BT_H_BUSY); 246 BT_CONTROL(BT_H_BUSY);
242 BT_CONTROL(BT_B2H_ATN); 247 BT_CONTROL(BT_B2H_ATN);
243 BT_CONTROL(BT_CLR_RD_PTR); 248 BT_CONTROL(BT_CLR_RD_PTR);
244 for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) BMC2HOST; 249 for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
250 BMC2HOST;
245 BT_CONTROL(BT_H_BUSY); 251 BT_CONTROL(BT_H_BUSY);
246 } 252 }
247#endif 253#endif
@@ -258,7 +264,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
258 printk (" %02x", bt->write_data[i]); 264 printk (" %02x", bt->write_data[i]);
259 printk ("\n"); 265 printk ("\n");
260 } 266 }
261 for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); 267 for (i = 0; i < bt->write_count; i++)
268 HOST2BMC(bt->write_data[i]);
262} 269}
263 270
264static inline int read_all_bytes(struct si_sm_data *bt) 271static inline int read_all_bytes(struct si_sm_data *bt)
@@ -278,7 +285,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
278 bt->truncated = 1; 285 bt->truncated = 1;
279 return 1; /* let next XACTION START clean it up */ 286 return 1; /* let next XACTION START clean it up */
280 } 287 }
281 for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; 288 for (i = 1; i <= bt->read_count; i++)
289 bt->read_data[i] = BMC2HOST;
282 bt->read_count++; /* account for the length byte */ 290 bt->read_count++; /* account for the length byte */
283 291
284 if (bt_debug & BT_DEBUG_MSG) { 292 if (bt_debug & BT_DEBUG_MSG) {
@@ -295,7 +303,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
295 ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) 303 ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
296 return 1; 304 return 1;
297 305
298 if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad packet: " 306 if (bt_debug & BT_DEBUG_MSG)
307 printk(KERN_WARNING "BT: bad packet: "
299 "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", 308 "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
300 bt->write_data[1], bt->write_data[2], bt->write_data[3], 309 bt->write_data[1], bt->write_data[2], bt->write_data[3],
301 bt->read_data[1], bt->read_data[2], bt->read_data[3]); 310 bt->read_data[1], bt->read_data[2], bt->read_data[3]);
@@ -359,7 +368,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
359 time); 368 time);
360 bt->last_state = bt->state; 369 bt->last_state = bt->state;
361 370
362 if (bt->state == BT_STATE_HOSED) return SI_SM_HOSED; 371 if (bt->state == BT_STATE_HOSED)
372 return SI_SM_HOSED;
363 373
364 if (bt->state != BT_STATE_IDLE) { /* do timeout test */ 374 if (bt->state != BT_STATE_IDLE) { /* do timeout test */
365 375
@@ -371,7 +381,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
371 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT 381 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
372 (noticed in ipmi_smic_sm.c January 2004) */ 382 (noticed in ipmi_smic_sm.c January 2004) */
373 383
374 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) time = 100; 384 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT))
385 time = 100;
375 bt->timeout -= time; 386 bt->timeout -= time;
376 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { 387 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
377 error_recovery(bt, "timed out"); 388 error_recovery(bt, "timed out");
@@ -393,12 +404,14 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
393 BT_CONTROL(BT_H_BUSY); 404 BT_CONTROL(BT_H_BUSY);
394 break; 405 break;
395 } 406 }
396 if (status & BT_B2H_ATN) break; 407 if (status & BT_B2H_ATN)
408 break;
397 bt->state = BT_STATE_WRITE_BYTES; 409 bt->state = BT_STATE_WRITE_BYTES;
398 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ 410 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
399 411
400 case BT_STATE_WRITE_BYTES: 412 case BT_STATE_WRITE_BYTES:
401 if (status & (BT_B_BUSY | BT_H2B_ATN)) break; 413 if (status & (BT_B_BUSY | BT_H2B_ATN))
414 break;
402 BT_CONTROL(BT_CLR_WR_PTR); 415 BT_CONTROL(BT_CLR_WR_PTR);
403 write_all_bytes(bt); 416 write_all_bytes(bt);
404 BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */ 417 BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
@@ -406,7 +419,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
406 return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */ 419 return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
407 420
408 case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */ 421 case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
409 if (status & (BT_H2B_ATN | BT_B_BUSY)) break; 422 if (status & (BT_H2B_ATN | BT_B_BUSY))
423 break;
410 bt->state = BT_STATE_B2H_WAIT; 424 bt->state = BT_STATE_B2H_WAIT;
411 /* fall through with status */ 425 /* fall through with status */
412 426
@@ -415,15 +429,18 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
415 generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */ 429 generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
416 430
417 case BT_STATE_B2H_WAIT: 431 case BT_STATE_B2H_WAIT:
418 if (!(status & BT_B2H_ATN)) break; 432 if (!(status & BT_B2H_ATN))
433 break;
419 434
420 /* Assume ordered, uncached writes: no need to wait */ 435 /* Assume ordered, uncached writes: no need to wait */
421 if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); /* set */ 436 if (!(status & BT_H_BUSY))
437 BT_CONTROL(BT_H_BUSY); /* set */
422 BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */ 438 BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */
423 BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */ 439 BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */
424 i = read_all_bytes(bt); 440 i = read_all_bytes(bt);
425 BT_CONTROL(BT_H_BUSY); /* clear */ 441 BT_CONTROL(BT_H_BUSY); /* clear */
426 if (!i) break; /* Try this state again */ 442 if (!i) /* Try this state again */
443 break;
427 bt->state = BT_STATE_READ_END; 444 bt->state = BT_STATE_READ_END;
428 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ 445 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
429 446
@@ -436,7 +453,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
436 453
437#ifdef MAKE_THIS_TRUE_IF_NECESSARY 454#ifdef MAKE_THIS_TRUE_IF_NECESSARY
438 455
439 if (status & BT_H_BUSY) break; 456 if (status & BT_H_BUSY)
457 break;
440#endif 458#endif
441 bt->seq++; 459 bt->seq++;
442 bt->state = BT_STATE_IDLE; 460 bt->state = BT_STATE_IDLE;
@@ -459,7 +477,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
459 break; 477 break;
460 478
461 case BT_STATE_RESET3: 479 case BT_STATE_RESET3:
462 if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; 480 if (bt->timeout > 0)
481 return SI_SM_CALL_WITH_DELAY;
463 bt->state = BT_STATE_RESTART; /* printk in debug modes */ 482 bt->state = BT_STATE_RESTART; /* printk in debug modes */
464 break; 483 break;
465 484
@@ -485,7 +504,8 @@ static int bt_detect(struct si_sm_data *bt)
485 but that's what you get from reading a bogus address, so we 504 but that's what you get from reading a bogus address, so we
486 test that first. The calling routine uses negative logic. */ 505 test that first. The calling routine uses negative logic. */
487 506
488 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; 507 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
508 return 1;
489 reset_flags(bt); 509 reset_flags(bt);
490 return 0; 510 return 0;
491} 511}
@@ -501,7 +521,6 @@ static int bt_size(void)
501 521
502struct si_sm_handlers bt_smi_handlers = 522struct si_sm_handlers bt_smi_handlers =
503{ 523{
504 .version = IPMI_BT_VERSION,
505 .init_data = bt_init_data, 524 .init_data = bt_init_data,
506 .start_transaction = bt_start_transaction, 525 .start_transaction = bt_start_transaction,
507 .get_result = bt_get_result, 526 .get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index e0a53570fea1..883ac4352be4 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -47,8 +47,6 @@
47#include <linux/device.h> 47#include <linux/device.h>
48#include <linux/compat.h> 48#include <linux/compat.h>
49 49
50#define IPMI_DEVINTF_VERSION "v33"
51
52struct ipmi_file_private 50struct ipmi_file_private
53{ 51{
54 ipmi_user_t user; 52 ipmi_user_t user;
@@ -411,6 +409,7 @@ static int ipmi_ioctl(struct inode *inode,
411 break; 409 break;
412 } 410 }
413 411
412 /* The next four are legacy, not per-channel. */
414 case IPMICTL_SET_MY_ADDRESS_CMD: 413 case IPMICTL_SET_MY_ADDRESS_CMD:
415 { 414 {
416 unsigned int val; 415 unsigned int val;
@@ -420,22 +419,25 @@ static int ipmi_ioctl(struct inode *inode,
420 break; 419 break;
421 } 420 }
422 421
423 ipmi_set_my_address(priv->user, val); 422 rv = ipmi_set_my_address(priv->user, 0, val);
424 rv = 0;
425 break; 423 break;
426 } 424 }
427 425
428 case IPMICTL_GET_MY_ADDRESS_CMD: 426 case IPMICTL_GET_MY_ADDRESS_CMD:
429 { 427 {
430 unsigned int val; 428 unsigned int val;
429 unsigned char rval;
430
431 rv = ipmi_get_my_address(priv->user, 0, &rval);
432 if (rv)
433 break;
431 434
432 val = ipmi_get_my_address(priv->user); 435 val = rval;
433 436
434 if (copy_to_user(arg, &val, sizeof(val))) { 437 if (copy_to_user(arg, &val, sizeof(val))) {
435 rv = -EFAULT; 438 rv = -EFAULT;
436 break; 439 break;
437 } 440 }
438 rv = 0;
439 break; 441 break;
440 } 442 }
441 443
@@ -448,24 +450,94 @@ static int ipmi_ioctl(struct inode *inode,
448 break; 450 break;
449 } 451 }
450 452
451 ipmi_set_my_LUN(priv->user, val); 453 rv = ipmi_set_my_LUN(priv->user, 0, val);
452 rv = 0;
453 break; 454 break;
454 } 455 }
455 456
456 case IPMICTL_GET_MY_LUN_CMD: 457 case IPMICTL_GET_MY_LUN_CMD:
457 { 458 {
458 unsigned int val; 459 unsigned int val;
460 unsigned char rval;
459 461
460 val = ipmi_get_my_LUN(priv->user); 462 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
463 if (rv)
464 break;
465
466 val = rval;
467
468 if (copy_to_user(arg, &val, sizeof(val))) {
469 rv = -EFAULT;
470 break;
471 }
472 break;
473 }
474
475 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
476 {
477 struct ipmi_channel_lun_address_set val;
478
479 if (copy_from_user(&val, arg, sizeof(val))) {
480 rv = -EFAULT;
481 break;
482 }
483
484 return ipmi_set_my_address(priv->user, val.channel, val.value);
485 break;
486 }
487
488 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
489 {
490 struct ipmi_channel_lun_address_set val;
491
492 if (copy_from_user(&val, arg, sizeof(val))) {
493 rv = -EFAULT;
494 break;
495 }
496
497 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
498 if (rv)
499 break;
500
501 if (copy_to_user(arg, &val, sizeof(val))) {
502 rv = -EFAULT;
503 break;
504 }
505 break;
506 }
507
508 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
509 {
510 struct ipmi_channel_lun_address_set val;
511
512 if (copy_from_user(&val, arg, sizeof(val))) {
513 rv = -EFAULT;
514 break;
515 }
516
517 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
518 break;
519 }
520
521 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
522 {
523 struct ipmi_channel_lun_address_set val;
524
525 if (copy_from_user(&val, arg, sizeof(val))) {
526 rv = -EFAULT;
527 break;
528 }
529
530 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
531 if (rv)
532 break;
461 533
462 if (copy_to_user(arg, &val, sizeof(val))) { 534 if (copy_to_user(arg, &val, sizeof(val))) {
463 rv = -EFAULT; 535 rv = -EFAULT;
464 break; 536 break;
465 } 537 }
466 rv = 0;
467 break; 538 break;
468 } 539 }
540
469 case IPMICTL_SET_TIMING_PARMS_CMD: 541 case IPMICTL_SET_TIMING_PARMS_CMD:
470 { 542 {
471 struct ipmi_timing_parms parms; 543 struct ipmi_timing_parms parms;
@@ -748,8 +820,7 @@ static __init int init_ipmi_devintf(void)
748 if (ipmi_major < 0) 820 if (ipmi_major < 0)
749 return -EINVAL; 821 return -EINVAL;
750 822
751 printk(KERN_INFO "ipmi device interface version " 823 printk(KERN_INFO "ipmi device interface\n");
752 IPMI_DEVINTF_VERSION "\n");
753 824
754 ipmi_class = class_create(THIS_MODULE, "ipmi"); 825 ipmi_class = class_create(THIS_MODULE, "ipmi");
755 if (IS_ERR(ipmi_class)) { 826 if (IS_ERR(ipmi_class)) {
@@ -792,3 +863,5 @@ static __exit void cleanup_ipmi(void)
792module_exit(cleanup_ipmi); 863module_exit(cleanup_ipmi);
793 864
794MODULE_LICENSE("GPL"); 865MODULE_LICENSE("GPL");
866MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
867MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 48cce24329be..d21853a594a3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -42,8 +42,6 @@
42#include <linux/ipmi_msgdefs.h> /* for completion codes */ 42#include <linux/ipmi_msgdefs.h> /* for completion codes */
43#include "ipmi_si_sm.h" 43#include "ipmi_si_sm.h"
44 44
45#define IPMI_KCS_VERSION "v33"
46
47/* Set this if you want a printout of why the state machine was hosed 45/* Set this if you want a printout of why the state machine was hosed
48 when it gets hosed. */ 46 when it gets hosed. */
49#define DEBUG_HOSED_REASON 47#define DEBUG_HOSED_REASON
@@ -489,7 +487,6 @@ static void kcs_cleanup(struct si_sm_data *kcs)
489 487
490struct si_sm_handlers kcs_smi_handlers = 488struct si_sm_handlers kcs_smi_handlers =
491{ 489{
492 .version = IPMI_KCS_VERSION,
493 .init_data = init_kcs_data, 490 .init_data = init_kcs_data,
494 .start_transaction = start_kcs_transaction, 491 .start_transaction = start_kcs_transaction,
495 .get_result = get_kcs_result, 492 .get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e16c13fe698d..463351d4f942 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -47,7 +47,8 @@
47#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50#define IPMI_MSGHANDLER_VERSION "v33" 50
51#define IPMI_DRIVER_VERSION "36.0"
51 52
52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53static int ipmi_init_msghandler(void); 54static int ipmi_init_msghandler(void);
@@ -116,7 +117,7 @@ struct seq_table
116 do { \ 117 do { \
117 seq = ((msgid >> 26) & 0x3f); \ 118 seq = ((msgid >> 26) & 0x3f); \
118 seqid = (msgid & 0x3fffff); \ 119 seqid = (msgid & 0x3fffff); \
119 } while(0) 120 } while (0)
120 121
121#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 122#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
122 123
@@ -124,6 +125,14 @@ struct ipmi_channel
124{ 125{
125 unsigned char medium; 126 unsigned char medium;
126 unsigned char protocol; 127 unsigned char protocol;
128
129 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
130 but may be changed by the user. */
131 unsigned char address;
132
133 /* My LUN. This should generally stay the SMS LUN, but just in
134 case... */
135 unsigned char lun;
127}; 136};
128 137
129#ifdef CONFIG_PROC_FS 138#ifdef CONFIG_PROC_FS
@@ -135,7 +144,7 @@ struct ipmi_proc_entry
135#endif 144#endif
136 145
137#define IPMI_IPMB_NUM_SEQ 64 146#define IPMI_IPMB_NUM_SEQ 64
138#define IPMI_MAX_CHANNELS 8 147#define IPMI_MAX_CHANNELS 16
139struct ipmi_smi 148struct ipmi_smi
140{ 149{
141 /* What interface number are we? */ 150 /* What interface number are we? */
@@ -193,20 +202,6 @@ struct ipmi_smi
193 struct list_head waiting_events; 202 struct list_head waiting_events;
194 unsigned int waiting_events_count; /* How many events in queue? */ 203 unsigned int waiting_events_count; /* How many events in queue? */
195 204
196 /* This will be non-null if someone registers to receive all
197 IPMI commands (this is for interface emulation). There
198 may not be any things in the cmd_rcvrs list above when
199 this is registered. */
200 ipmi_user_t all_cmd_rcvr;
201
202 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
203 but may be changed by the user. */
204 unsigned char my_address;
205
206 /* My LUN. This should generally stay the SMS LUN, but just in
207 case... */
208 unsigned char my_lun;
209
210 /* The event receiver for my BMC, only really used at panic 205 /* The event receiver for my BMC, only really used at panic
211 shutdown as a place to store this. */ 206 shutdown as a place to store this. */
212 unsigned char event_receiver; 207 unsigned char event_receiver;
@@ -218,7 +213,7 @@ struct ipmi_smi
218 interface comes in with a NULL user, call this routine with 213 interface comes in with a NULL user, call this routine with
219 it. Note that the message will still be freed by the 214 it. Note that the message will still be freed by the
220 caller. This only works on the system interface. */ 215 caller. This only works on the system interface. */
221 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg); 216 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
222 217
223 /* When we are scanning the channels for an SMI, this will 218 /* When we are scanning the channels for an SMI, this will
224 tell which channel we are scanning. */ 219 tell which channel we are scanning. */
@@ -325,7 +320,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
325 down_read(&interfaces_sem); 320 down_read(&interfaces_sem);
326 down_write(&smi_watchers_sem); 321 down_write(&smi_watchers_sem);
327 list_add(&(watcher->link), &smi_watchers); 322 list_add(&(watcher->link), &smi_watchers);
328 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 323 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
329 if (ipmi_interfaces[i] != NULL) { 324 if (ipmi_interfaces[i] != NULL) {
330 watcher->new_smi(i); 325 watcher->new_smi(i);
331 } 326 }
@@ -458,7 +453,27 @@ unsigned int ipmi_addr_length(int addr_type)
458 453
459static void deliver_response(struct ipmi_recv_msg *msg) 454static void deliver_response(struct ipmi_recv_msg *msg)
460{ 455{
461 msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data); 456 if (! msg->user) {
457 ipmi_smi_t intf = msg->user_msg_data;
458 unsigned long flags;
459
460 /* Special handling for NULL users. */
461 if (intf->null_user_handler) {
462 intf->null_user_handler(intf, msg);
463 spin_lock_irqsave(&intf->counter_lock, flags);
464 intf->handled_local_responses++;
465 spin_unlock_irqrestore(&intf->counter_lock, flags);
466 } else {
467 /* No handler, so give up. */
468 spin_lock_irqsave(&intf->counter_lock, flags);
469 intf->unhandled_local_responses++;
470 spin_unlock_irqrestore(&intf->counter_lock, flags);
471 }
472 ipmi_free_recv_msg(msg);
473 } else {
474 msg->user->handler->ipmi_recv_hndl(msg,
475 msg->user->handler_data);
476 }
462} 477}
463 478
464/* Find the next sequence number not being used and add the given 479/* Find the next sequence number not being used and add the given
@@ -475,9 +490,9 @@ static int intf_next_seq(ipmi_smi_t intf,
475 int rv = 0; 490 int rv = 0;
476 unsigned int i; 491 unsigned int i;
477 492
478 for (i=intf->curr_seq; 493 for (i = intf->curr_seq;
479 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 494 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
480 i=(i+1)%IPMI_IPMB_NUM_SEQ) 495 i = (i+1)%IPMI_IPMB_NUM_SEQ)
481 { 496 {
482 if (! intf->seq_table[i].inuse) 497 if (! intf->seq_table[i].inuse)
483 break; 498 break;
@@ -712,7 +727,7 @@ static int ipmi_destroy_user_nolock(ipmi_user_t user)
712 727
713 /* Remove the user from the interfaces sequence table. */ 728 /* Remove the user from the interfaces sequence table. */
714 spin_lock_irqsave(&(user->intf->seq_lock), flags); 729 spin_lock_irqsave(&(user->intf->seq_lock), flags);
715 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) { 730 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
716 if (user->intf->seq_table[i].inuse 731 if (user->intf->seq_table[i].inuse
717 && (user->intf->seq_table[i].recv_msg->user == user)) 732 && (user->intf->seq_table[i].recv_msg->user == user))
718 { 733 {
@@ -766,26 +781,44 @@ void ipmi_get_version(ipmi_user_t user,
766 *minor = user->intf->version_minor; 781 *minor = user->intf->version_minor;
767} 782}
768 783
769void ipmi_set_my_address(ipmi_user_t user, 784int ipmi_set_my_address(ipmi_user_t user,
770 unsigned char address) 785 unsigned int channel,
786 unsigned char address)
771{ 787{
772 user->intf->my_address = address; 788 if (channel >= IPMI_MAX_CHANNELS)
789 return -EINVAL;
790 user->intf->channels[channel].address = address;
791 return 0;
773} 792}
774 793
775unsigned char ipmi_get_my_address(ipmi_user_t user) 794int ipmi_get_my_address(ipmi_user_t user,
795 unsigned int channel,
796 unsigned char *address)
776{ 797{
777 return user->intf->my_address; 798 if (channel >= IPMI_MAX_CHANNELS)
799 return -EINVAL;
800 *address = user->intf->channels[channel].address;
801 return 0;
778} 802}
779 803
780void ipmi_set_my_LUN(ipmi_user_t user, 804int ipmi_set_my_LUN(ipmi_user_t user,
781 unsigned char LUN) 805 unsigned int channel,
806 unsigned char LUN)
782{ 807{
783 user->intf->my_lun = LUN & 0x3; 808 if (channel >= IPMI_MAX_CHANNELS)
809 return -EINVAL;
810 user->intf->channels[channel].lun = LUN & 0x3;
811 return 0;
784} 812}
785 813
786unsigned char ipmi_get_my_LUN(ipmi_user_t user) 814int ipmi_get_my_LUN(ipmi_user_t user,
815 unsigned int channel,
816 unsigned char *address)
787{ 817{
788 return user->intf->my_lun; 818 if (channel >= IPMI_MAX_CHANNELS)
819 return -EINVAL;
820 *address = user->intf->channels[channel].lun;
821 return 0;
789} 822}
790 823
791int ipmi_set_gets_events(ipmi_user_t user, int val) 824int ipmi_set_gets_events(ipmi_user_t user, int val)
@@ -828,11 +861,6 @@ int ipmi_register_for_cmd(ipmi_user_t user,
828 861
829 read_lock(&(user->intf->users_lock)); 862 read_lock(&(user->intf->users_lock));
830 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); 863 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
831 if (user->intf->all_cmd_rcvr != NULL) {
832 rv = -EBUSY;
833 goto out_unlock;
834 }
835
836 /* Make sure the command/netfn is not already registered. */ 864 /* Make sure the command/netfn is not already registered. */
837 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) { 865 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
838 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) { 866 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
@@ -847,7 +875,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
847 rcvr->user = user; 875 rcvr->user = user;
848 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs)); 876 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
849 } 877 }
850 out_unlock: 878
851 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 879 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
852 read_unlock(&(user->intf->users_lock)); 880 read_unlock(&(user->intf->users_lock));
853 881
@@ -1213,7 +1241,7 @@ static inline int i_ipmi_request(ipmi_user_t user,
1213 unsigned char ipmb_seq; 1241 unsigned char ipmb_seq;
1214 long seqid; 1242 long seqid;
1215 1243
1216 if (addr->channel > IPMI_NUM_CHANNELS) { 1244 if (addr->channel >= IPMI_NUM_CHANNELS) {
1217 spin_lock_irqsave(&intf->counter_lock, flags); 1245 spin_lock_irqsave(&intf->counter_lock, flags);
1218 intf->sent_invalid_commands++; 1246 intf->sent_invalid_commands++;
1219 spin_unlock_irqrestore(&intf->counter_lock, flags); 1247 spin_unlock_irqrestore(&intf->counter_lock, flags);
@@ -1331,7 +1359,7 @@ static inline int i_ipmi_request(ipmi_user_t user,
1331#ifdef DEBUG_MSGING 1359#ifdef DEBUG_MSGING
1332 { 1360 {
1333 int m; 1361 int m;
1334 for (m=0; m<smi_msg->data_size; m++) 1362 for (m = 0; m < smi_msg->data_size; m++)
1335 printk(" %2.2x", smi_msg->data[m]); 1363 printk(" %2.2x", smi_msg->data[m]);
1336 printk("\n"); 1364 printk("\n");
1337 } 1365 }
@@ -1346,6 +1374,18 @@ static inline int i_ipmi_request(ipmi_user_t user,
1346 return rv; 1374 return rv;
1347} 1375}
1348 1376
1377static int check_addr(ipmi_smi_t intf,
1378 struct ipmi_addr *addr,
1379 unsigned char *saddr,
1380 unsigned char *lun)
1381{
1382 if (addr->channel >= IPMI_MAX_CHANNELS)
1383 return -EINVAL;
1384 *lun = intf->channels[addr->channel].lun;
1385 *saddr = intf->channels[addr->channel].address;
1386 return 0;
1387}
1388
1349int ipmi_request_settime(ipmi_user_t user, 1389int ipmi_request_settime(ipmi_user_t user,
1350 struct ipmi_addr *addr, 1390 struct ipmi_addr *addr,
1351 long msgid, 1391 long msgid,
@@ -1355,6 +1395,14 @@ int ipmi_request_settime(ipmi_user_t user,
1355 int retries, 1395 int retries,
1356 unsigned int retry_time_ms) 1396 unsigned int retry_time_ms)
1357{ 1397{
1398 unsigned char saddr, lun;
1399 int rv;
1400
1401 if (! user)
1402 return -EINVAL;
1403 rv = check_addr(user->intf, addr, &saddr, &lun);
1404 if (rv)
1405 return rv;
1358 return i_ipmi_request(user, 1406 return i_ipmi_request(user,
1359 user->intf, 1407 user->intf,
1360 addr, 1408 addr,
@@ -1363,8 +1411,8 @@ int ipmi_request_settime(ipmi_user_t user,
1363 user_msg_data, 1411 user_msg_data,
1364 NULL, NULL, 1412 NULL, NULL,
1365 priority, 1413 priority,
1366 user->intf->my_address, 1414 saddr,
1367 user->intf->my_lun, 1415 lun,
1368 retries, 1416 retries,
1369 retry_time_ms); 1417 retry_time_ms);
1370} 1418}
@@ -1378,6 +1426,14 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1378 struct ipmi_recv_msg *supplied_recv, 1426 struct ipmi_recv_msg *supplied_recv,
1379 int priority) 1427 int priority)
1380{ 1428{
1429 unsigned char saddr, lun;
1430 int rv;
1431
1432 if (! user)
1433 return -EINVAL;
1434 rv = check_addr(user->intf, addr, &saddr, &lun);
1435 if (rv)
1436 return rv;
1381 return i_ipmi_request(user, 1437 return i_ipmi_request(user,
1382 user->intf, 1438 user->intf,
1383 addr, 1439 addr,
@@ -1387,8 +1443,8 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1387 supplied_smi, 1443 supplied_smi,
1388 supplied_recv, 1444 supplied_recv,
1389 priority, 1445 priority,
1390 user->intf->my_address, 1446 saddr,
1391 user->intf->my_lun, 1447 lun,
1392 -1, 0); 1448 -1, 0);
1393} 1449}
1394 1450
@@ -1397,8 +1453,15 @@ static int ipmb_file_read_proc(char *page, char **start, off_t off,
1397{ 1453{
1398 char *out = (char *) page; 1454 char *out = (char *) page;
1399 ipmi_smi_t intf = data; 1455 ipmi_smi_t intf = data;
1456 int i;
1457 int rv= 0;
1400 1458
1401 return sprintf(out, "%x\n", intf->my_address); 1459 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1460 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1461 out[rv-1] = '\n'; /* Replace the final space with a newline */
1462 out[rv] = '\0';
1463 rv++;
1464 return rv;
1402} 1465}
1403 1466
1404static int version_file_read_proc(char *page, char **start, off_t off, 1467static int version_file_read_proc(char *page, char **start, off_t off,
@@ -1588,29 +1651,30 @@ send_channel_info_cmd(ipmi_smi_t intf, int chan)
1588 (struct ipmi_addr *) &si, 1651 (struct ipmi_addr *) &si,
1589 0, 1652 0,
1590 &msg, 1653 &msg,
1591 NULL, 1654 intf,
1592 NULL, 1655 NULL,
1593 NULL, 1656 NULL,
1594 0, 1657 0,
1595 intf->my_address, 1658 intf->channels[0].address,
1596 intf->my_lun, 1659 intf->channels[0].lun,
1597 -1, 0); 1660 -1, 0);
1598} 1661}
1599 1662
1600static void 1663static void
1601channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 1664channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1602{ 1665{
1603 int rv = 0; 1666 int rv = 0;
1604 int chan; 1667 int chan;
1605 1668
1606 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2)) 1669 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
1607 && (msg->rsp[1] == IPMI_GET_CHANNEL_INFO_CMD)) 1670 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
1671 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
1608 { 1672 {
1609 /* It's the one we want */ 1673 /* It's the one we want */
1610 if (msg->rsp[2] != 0) { 1674 if (msg->msg.data[0] != 0) {
1611 /* Got an error from the channel, just go on. */ 1675 /* Got an error from the channel, just go on. */
1612 1676
1613 if (msg->rsp[2] == IPMI_INVALID_COMMAND_ERR) { 1677 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
1614 /* If the MC does not support this 1678 /* If the MC does not support this
1615 command, that is legal. We just 1679 command, that is legal. We just
1616 assume it has one IPMB at channel 1680 assume it has one IPMB at channel
@@ -1627,13 +1691,13 @@ channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
1627 } 1691 }
1628 goto next_channel; 1692 goto next_channel;
1629 } 1693 }
1630 if (msg->rsp_size < 6) { 1694 if (msg->msg.data_len < 4) {
1631 /* Message not big enough, just go on. */ 1695 /* Message not big enough, just go on. */
1632 goto next_channel; 1696 goto next_channel;
1633 } 1697 }
1634 chan = intf->curr_channel; 1698 chan = intf->curr_channel;
1635 intf->channels[chan].medium = msg->rsp[4] & 0x7f; 1699 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
1636 intf->channels[chan].protocol = msg->rsp[5] & 0x1f; 1700 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1637 1701
1638 next_channel: 1702 next_channel:
1639 intf->curr_channel++; 1703 intf->curr_channel++;
@@ -1691,22 +1755,24 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1691 rv = -ENOMEM; 1755 rv = -ENOMEM;
1692 1756
1693 down_write(&interfaces_sem); 1757 down_write(&interfaces_sem);
1694 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 1758 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1695 if (ipmi_interfaces[i] == NULL) { 1759 if (ipmi_interfaces[i] == NULL) {
1696 new_intf->intf_num = i; 1760 new_intf->intf_num = i;
1697 new_intf->version_major = version_major; 1761 new_intf->version_major = version_major;
1698 new_intf->version_minor = version_minor; 1762 new_intf->version_minor = version_minor;
1699 if (slave_addr == 0) 1763 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1700 new_intf->my_address = IPMI_BMC_SLAVE_ADDR; 1764 new_intf->channels[j].address
1701 else 1765 = IPMI_BMC_SLAVE_ADDR;
1702 new_intf->my_address = slave_addr; 1766 new_intf->channels[j].lun = 2;
1703 new_intf->my_lun = 2; /* the SMS LUN. */ 1767 }
1768 if (slave_addr != 0)
1769 new_intf->channels[0].address = slave_addr;
1704 rwlock_init(&(new_intf->users_lock)); 1770 rwlock_init(&(new_intf->users_lock));
1705 INIT_LIST_HEAD(&(new_intf->users)); 1771 INIT_LIST_HEAD(&(new_intf->users));
1706 new_intf->handlers = handlers; 1772 new_intf->handlers = handlers;
1707 new_intf->send_info = send_info; 1773 new_intf->send_info = send_info;
1708 spin_lock_init(&(new_intf->seq_lock)); 1774 spin_lock_init(&(new_intf->seq_lock));
1709 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) { 1775 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1710 new_intf->seq_table[j].inuse = 0; 1776 new_intf->seq_table[j].inuse = 0;
1711 new_intf->seq_table[j].seqid = 0; 1777 new_intf->seq_table[j].seqid = 0;
1712 } 1778 }
@@ -1722,7 +1788,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1722 rwlock_init(&(new_intf->cmd_rcvr_lock)); 1788 rwlock_init(&(new_intf->cmd_rcvr_lock));
1723 init_waitqueue_head(&new_intf->waitq); 1789 init_waitqueue_head(&new_intf->waitq);
1724 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs)); 1790 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1725 new_intf->all_cmd_rcvr = NULL;
1726 1791
1727 spin_lock_init(&(new_intf->counter_lock)); 1792 spin_lock_init(&(new_intf->counter_lock));
1728 1793
@@ -1814,7 +1879,7 @@ static void clean_up_interface_data(ipmi_smi_t intf)
1814 free_recv_msg_list(&(intf->waiting_events)); 1879 free_recv_msg_list(&(intf->waiting_events));
1815 free_cmd_rcvr_list(&(intf->cmd_rcvrs)); 1880 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1816 1881
1817 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) { 1882 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1818 if ((intf->seq_table[i].inuse) 1883 if ((intf->seq_table[i].inuse)
1819 && (intf->seq_table[i].recv_msg)) 1884 && (intf->seq_table[i].recv_msg))
1820 { 1885 {
@@ -1833,7 +1898,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
1833 down_write(&interfaces_sem); 1898 down_write(&interfaces_sem);
1834 if (list_empty(&(intf->users))) 1899 if (list_empty(&(intf->users)))
1835 { 1900 {
1836 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 1901 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1837 if (ipmi_interfaces[i] == intf) { 1902 if (ipmi_interfaces[i] == intf) {
1838 remove_proc_entries(intf); 1903 remove_proc_entries(intf);
1839 spin_lock_irqsave(&interfaces_lock, flags); 1904 spin_lock_irqsave(&interfaces_lock, flags);
@@ -1960,15 +2025,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1960 2025
1961 read_lock(&(intf->cmd_rcvr_lock)); 2026 read_lock(&(intf->cmd_rcvr_lock));
1962 2027
1963 if (intf->all_cmd_rcvr) { 2028 /* Find the command/netfn. */
1964 user = intf->all_cmd_rcvr; 2029 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
1965 } else { 2030 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1966 /* Find the command/netfn. */ 2031 user = rcvr->user;
1967 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2032 break;
1968 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1969 user = rcvr->user;
1970 break;
1971 }
1972 } 2033 }
1973 } 2034 }
1974 read_unlock(&(intf->cmd_rcvr_lock)); 2035 read_unlock(&(intf->cmd_rcvr_lock));
@@ -1985,7 +2046,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1985 msg->data[3] = msg->rsp[6]; 2046 msg->data[3] = msg->rsp[6];
1986 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 2047 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1987 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 2048 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
1988 msg->data[6] = intf->my_address; 2049 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
1989 /* rqseq/lun */ 2050 /* rqseq/lun */
1990 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 2051 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1991 msg->data[8] = msg->rsp[8]; /* cmd */ 2052 msg->data[8] = msg->rsp[8]; /* cmd */
@@ -1997,7 +2058,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1997 { 2058 {
1998 int m; 2059 int m;
1999 printk("Invalid command:"); 2060 printk("Invalid command:");
2000 for (m=0; m<msg->data_size; m++) 2061 for (m = 0; m < msg->data_size; m++)
2001 printk(" %2.2x", msg->data[m]); 2062 printk(" %2.2x", msg->data[m]);
2002 printk("\n"); 2063 printk("\n");
2003 } 2064 }
@@ -2145,15 +2206,11 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2145 2206
2146 read_lock(&(intf->cmd_rcvr_lock)); 2207 read_lock(&(intf->cmd_rcvr_lock));
2147 2208
2148 if (intf->all_cmd_rcvr) { 2209 /* Find the command/netfn. */
2149 user = intf->all_cmd_rcvr; 2210 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
2150 } else { 2211 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2151 /* Find the command/netfn. */ 2212 user = rcvr->user;
2152 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2213 break;
2153 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2154 user = rcvr->user;
2155 break;
2156 }
2157 } 2214 }
2158 } 2215 }
2159 read_unlock(&(intf->cmd_rcvr_lock)); 2216 read_unlock(&(intf->cmd_rcvr_lock));
@@ -2330,6 +2387,14 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2330 unsigned long flags; 2387 unsigned long flags;
2331 2388
2332 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2389 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2390 if (recv_msg == NULL)
2391 {
2392 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2393 "could be because of a malformed message, or\n"
2394 "because of a hardware error. Contact your\n"
2395 "hardware vender for assistance\n");
2396 return 0;
2397 }
2333 2398
2334 /* Make sure the user still exists. */ 2399 /* Make sure the user still exists. */
2335 list_for_each_entry(user, &(intf->users), link) { 2400 list_for_each_entry(user, &(intf->users), link) {
@@ -2340,19 +2405,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2340 } 2405 }
2341 } 2406 }
2342 2407
2343 if (!found) { 2408 if ((! found) && recv_msg->user) {
2344 /* Special handling for NULL users. */ 2409 /* The user for the message went away, so give up. */
2345 if (!recv_msg->user && intf->null_user_handler){ 2410 spin_lock_irqsave(&intf->counter_lock, flags);
2346 intf->null_user_handler(intf, msg); 2411 intf->unhandled_local_responses++;
2347 spin_lock_irqsave(&intf->counter_lock, flags); 2412 spin_unlock_irqrestore(&intf->counter_lock, flags);
2348 intf->handled_local_responses++;
2349 spin_unlock_irqrestore(&intf->counter_lock, flags);
2350 }else{
2351 /* The user for the message went away, so give up. */
2352 spin_lock_irqsave(&intf->counter_lock, flags);
2353 intf->unhandled_local_responses++;
2354 spin_unlock_irqrestore(&intf->counter_lock, flags);
2355 }
2356 ipmi_free_recv_msg(recv_msg); 2413 ipmi_free_recv_msg(recv_msg);
2357 } else { 2414 } else {
2358 struct ipmi_system_interface_addr *smi_addr; 2415 struct ipmi_system_interface_addr *smi_addr;
@@ -2392,7 +2449,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2392#ifdef DEBUG_MSGING 2449#ifdef DEBUG_MSGING
2393 int m; 2450 int m;
2394 printk("Recv:"); 2451 printk("Recv:");
2395 for (m=0; m<msg->rsp_size; m++) 2452 for (m = 0; m < msg->rsp_size; m++)
2396 printk(" %2.2x", msg->rsp[m]); 2453 printk(" %2.2x", msg->rsp[m]);
2397 printk("\n"); 2454 printk("\n");
2398#endif 2455#endif
@@ -2626,7 +2683,7 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2626 { 2683 {
2627 int m; 2684 int m;
2628 printk("Resend: "); 2685 printk("Resend: ");
2629 for (m=0; m<smi_msg->data_size; m++) 2686 for (m = 0; m < smi_msg->data_size; m++)
2630 printk(" %2.2x", smi_msg->data[m]); 2687 printk(" %2.2x", smi_msg->data[m]);
2631 printk("\n"); 2688 printk("\n");
2632 } 2689 }
@@ -2647,7 +2704,7 @@ ipmi_timeout_handler(long timeout_period)
2647 INIT_LIST_HEAD(&timeouts); 2704 INIT_LIST_HEAD(&timeouts);
2648 2705
2649 spin_lock(&interfaces_lock); 2706 spin_lock(&interfaces_lock);
2650 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2707 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2651 intf = ipmi_interfaces[i]; 2708 intf = ipmi_interfaces[i];
2652 if (intf == NULL) 2709 if (intf == NULL)
2653 continue; 2710 continue;
@@ -2672,7 +2729,7 @@ ipmi_timeout_handler(long timeout_period)
2672 have timed out, putting them in the timeouts 2729 have timed out, putting them in the timeouts
2673 list. */ 2730 list. */
2674 spin_lock_irqsave(&(intf->seq_lock), flags); 2731 spin_lock_irqsave(&(intf->seq_lock), flags);
2675 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) { 2732 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2676 struct seq_table *ent = &(intf->seq_table[j]); 2733 struct seq_table *ent = &(intf->seq_table[j]);
2677 if (!ent->inuse) 2734 if (!ent->inuse)
2678 continue; 2735 continue;
@@ -2712,7 +2769,7 @@ ipmi_timeout_handler(long timeout_period)
2712 spin_unlock(&intf->counter_lock); 2769 spin_unlock(&intf->counter_lock);
2713 smi_msg = smi_from_recv_msg(intf, 2770 smi_msg = smi_from_recv_msg(intf,
2714 ent->recv_msg, j, ent->seqid); 2771 ent->recv_msg, j, ent->seqid);
2715 if(!smi_msg) 2772 if (! smi_msg)
2716 continue; 2773 continue;
2717 2774
2718 spin_unlock_irqrestore(&(intf->seq_lock),flags); 2775 spin_unlock_irqrestore(&(intf->seq_lock),flags);
@@ -2743,7 +2800,7 @@ static void ipmi_request_event(void)
2743 int i; 2800 int i;
2744 2801
2745 spin_lock(&interfaces_lock); 2802 spin_lock(&interfaces_lock);
2746 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2803 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2747 intf = ipmi_interfaces[i]; 2804 intf = ipmi_interfaces[i];
2748 if (intf == NULL) 2805 if (intf == NULL)
2749 continue; 2806 continue;
@@ -2838,28 +2895,30 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2838} 2895}
2839 2896
2840#ifdef CONFIG_IPMI_PANIC_STRING 2897#ifdef CONFIG_IPMI_PANIC_STRING
2841static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 2898static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2842{ 2899{
2843 if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2)) 2900 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2844 && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD) 2901 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
2845 && (msg->rsp[2] == IPMI_CC_NO_ERROR)) 2902 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
2903 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2846 { 2904 {
2847 /* A get event receiver command, save it. */ 2905 /* A get event receiver command, save it. */
2848 intf->event_receiver = msg->rsp[3]; 2906 intf->event_receiver = msg->msg.data[1];
2849 intf->event_receiver_lun = msg->rsp[4] & 0x3; 2907 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
2850 } 2908 }
2851} 2909}
2852 2910
2853static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 2911static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2854{ 2912{
2855 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2)) 2913 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2856 && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD) 2914 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2857 && (msg->rsp[2] == IPMI_CC_NO_ERROR)) 2915 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
2916 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2858 { 2917 {
2859 /* A get device id command, save if we are an event 2918 /* A get device id command, save if we are an event
2860 receiver or generator. */ 2919 receiver or generator. */
2861 intf->local_sel_device = (msg->rsp[8] >> 2) & 1; 2920 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
2862 intf->local_event_generator = (msg->rsp[8] >> 5) & 1; 2921 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
2863 } 2922 }
2864} 2923}
2865#endif 2924#endif
@@ -2903,7 +2962,7 @@ static void send_panic_events(char *str)
2903 recv_msg.done = dummy_recv_done_handler; 2962 recv_msg.done = dummy_recv_done_handler;
2904 2963
2905 /* For every registered interface, send the event. */ 2964 /* For every registered interface, send the event. */
2906 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2965 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2907 intf = ipmi_interfaces[i]; 2966 intf = ipmi_interfaces[i];
2908 if (intf == NULL) 2967 if (intf == NULL)
2909 continue; 2968 continue;
@@ -2915,12 +2974,12 @@ static void send_panic_events(char *str)
2915 &addr, 2974 &addr,
2916 0, 2975 0,
2917 &msg, 2976 &msg,
2918 NULL, 2977 intf,
2919 &smi_msg, 2978 &smi_msg,
2920 &recv_msg, 2979 &recv_msg,
2921 0, 2980 0,
2922 intf->my_address, 2981 intf->channels[0].address,
2923 intf->my_lun, 2982 intf->channels[0].lun,
2924 0, 1); /* Don't retry, and don't wait. */ 2983 0, 1); /* Don't retry, and don't wait. */
2925 } 2984 }
2926 2985
@@ -2930,7 +2989,7 @@ static void send_panic_events(char *str)
2930 if (!str) 2989 if (!str)
2931 return; 2990 return;
2932 2991
2933 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2992 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2934 char *p = str; 2993 char *p = str;
2935 struct ipmi_ipmb_addr *ipmb; 2994 struct ipmi_ipmb_addr *ipmb;
2936 int j; 2995 int j;
@@ -2961,12 +3020,12 @@ static void send_panic_events(char *str)
2961 &addr, 3020 &addr,
2962 0, 3021 0,
2963 &msg, 3022 &msg,
2964 NULL, 3023 intf,
2965 &smi_msg, 3024 &smi_msg,
2966 &recv_msg, 3025 &recv_msg,
2967 0, 3026 0,
2968 intf->my_address, 3027 intf->channels[0].address,
2969 intf->my_lun, 3028 intf->channels[0].lun,
2970 0, 1); /* Don't retry, and don't wait. */ 3029 0, 1); /* Don't retry, and don't wait. */
2971 3030
2972 if (intf->local_event_generator) { 3031 if (intf->local_event_generator) {
@@ -2981,12 +3040,12 @@ static void send_panic_events(char *str)
2981 &addr, 3040 &addr,
2982 0, 3041 0,
2983 &msg, 3042 &msg,
2984 NULL, 3043 intf,
2985 &smi_msg, 3044 &smi_msg,
2986 &recv_msg, 3045 &recv_msg,
2987 0, 3046 0,
2988 intf->my_address, 3047 intf->channels[0].address,
2989 intf->my_lun, 3048 intf->channels[0].lun,
2990 0, 1); /* no retry, and no wait. */ 3049 0, 1); /* no retry, and no wait. */
2991 } 3050 }
2992 intf->null_user_handler = NULL; 3051 intf->null_user_handler = NULL;
@@ -2996,7 +3055,7 @@ static void send_panic_events(char *str)
2996 be zero, and it must not be my address. */ 3055 be zero, and it must not be my address. */
2997 if (((intf->event_receiver & 1) == 0) 3056 if (((intf->event_receiver & 1) == 0)
2998 && (intf->event_receiver != 0) 3057 && (intf->event_receiver != 0)
2999 && (intf->event_receiver != intf->my_address)) 3058 && (intf->event_receiver != intf->channels[0].address))
3000 { 3059 {
3001 /* The event receiver is valid, send an IPMB 3060 /* The event receiver is valid, send an IPMB
3002 message. */ 3061 message. */
@@ -3031,7 +3090,7 @@ static void send_panic_events(char *str)
3031 data[0] = 0; 3090 data[0] = 0;
3032 data[1] = 0; 3091 data[1] = 0;
3033 data[2] = 0xf0; /* OEM event without timestamp. */ 3092 data[2] = 0xf0; /* OEM event without timestamp. */
3034 data[3] = intf->my_address; 3093 data[3] = intf->channels[0].address;
3035 data[4] = j++; /* sequence # */ 3094 data[4] = j++; /* sequence # */
3036 /* Always give 11 bytes, so strncpy will fill 3095 /* Always give 11 bytes, so strncpy will fill
3037 it with zeroes for me. */ 3096 it with zeroes for me. */
@@ -3043,12 +3102,12 @@ static void send_panic_events(char *str)
3043 &addr, 3102 &addr,
3044 0, 3103 0,
3045 &msg, 3104 &msg,
3046 NULL, 3105 intf,
3047 &smi_msg, 3106 &smi_msg,
3048 &recv_msg, 3107 &recv_msg,
3049 0, 3108 0,
3050 intf->my_address, 3109 intf->channels[0].address,
3051 intf->my_lun, 3110 intf->channels[0].lun,
3052 0, 1); /* no retry, and no wait. */ 3111 0, 1); /* no retry, and no wait. */
3053 } 3112 }
3054 } 3113 }
@@ -3070,7 +3129,7 @@ static int panic_event(struct notifier_block *this,
3070 has_paniced = 1; 3129 has_paniced = 1;
3071 3130
3072 /* For every registered interface, set it to run to completion. */ 3131 /* For every registered interface, set it to run to completion. */
3073 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 3132 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3074 intf = ipmi_interfaces[i]; 3133 intf = ipmi_interfaces[i];
3075 if (intf == NULL) 3134 if (intf == NULL)
3076 continue; 3135 continue;
@@ -3099,9 +3158,9 @@ static int ipmi_init_msghandler(void)
3099 return 0; 3158 return 0;
3100 3159
3101 printk(KERN_INFO "ipmi message handler version " 3160 printk(KERN_INFO "ipmi message handler version "
3102 IPMI_MSGHANDLER_VERSION "\n"); 3161 IPMI_DRIVER_VERSION "\n");
3103 3162
3104 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 3163 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3105 ipmi_interfaces[i] = NULL; 3164 ipmi_interfaces[i] = NULL;
3106 } 3165 }
3107 3166
@@ -3171,6 +3230,9 @@ module_exit(cleanup_ipmi);
3171 3230
3172module_init(ipmi_init_msghandler_mod); 3231module_init(ipmi_init_msghandler_mod);
3173MODULE_LICENSE("GPL"); 3232MODULE_LICENSE("GPL");
3233MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3234MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3235MODULE_VERSION(IPMI_DRIVER_VERSION);
3174 3236
3175EXPORT_SYMBOL(ipmi_create_user); 3237EXPORT_SYMBOL(ipmi_create_user);
3176EXPORT_SYMBOL(ipmi_destroy_user); 3238EXPORT_SYMBOL(ipmi_destroy_user);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index f951c30236c9..e82a96ba396b 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -42,7 +42,6 @@
42#include <linux/ipmi_smi.h> 42#include <linux/ipmi_smi.h>
43 43
44#define PFX "IPMI poweroff: " 44#define PFX "IPMI poweroff: "
45#define IPMI_POWEROFF_VERSION "v33"
46 45
47/* Where to we insert our poweroff function? */ 46/* Where to we insert our poweroff function? */
48extern void (*pm_power_off)(void); 47extern void (*pm_power_off)(void);
@@ -53,16 +52,17 @@ extern void (*pm_power_off)(void);
53#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */ 52#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
54 53
55/* the IPMI data command */ 54/* the IPMI data command */
56static int poweroff_control = IPMI_CHASSIS_POWER_DOWN; 55static int poweroff_powercycle;
57 56
58/* parameter definition to allow user to flag power cycle */ 57/* parameter definition to allow user to flag power cycle */
59module_param(poweroff_control, int, IPMI_CHASSIS_POWER_DOWN); 58module_param(poweroff_powercycle, int, 0);
60MODULE_PARM_DESC(poweroff_control, " Set to 2 to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
61 60
62/* Stuff from the get device id command. */ 61/* Stuff from the get device id command. */
63static unsigned int mfg_id; 62static unsigned int mfg_id;
64static unsigned int prod_id; 63static unsigned int prod_id;
65static unsigned char capabilities; 64static unsigned char capabilities;
65static unsigned char ipmi_version;
66 66
67/* We use our own messages for this operation, we don't let the system 67/* We use our own messages for this operation, we don't let the system
68 allocate them, since we may be in a panic situation. The whole 68 allocate them, since we may be in a panic situation. The whole
@@ -338,6 +338,25 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
338} 338}
339 339
340/* 340/*
341 * ipmi_dell_chassis_detect()
342 * Dell systems with IPMI < 1.5 don't set the chassis capability bit
343 * but they can handle a chassis poweroff or powercycle command.
344 */
345
346#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
347static int ipmi_dell_chassis_detect (ipmi_user_t user)
348{
349 const char ipmi_version_major = ipmi_version & 0xF;
350 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
351 const char mfr[3]=DELL_IANA_MFR_ID;
352 if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
353 ipmi_version_major <= 1 &&
354 ipmi_version_minor < 5)
355 return 1;
356 return 0;
357}
358
359/*
341 * Standard chassis support 360 * Standard chassis support
342 */ 361 */
343 362
@@ -366,37 +385,34 @@ static void ipmi_poweroff_chassis (ipmi_user_t user)
366 385
367 powercyclefailed: 386 powercyclefailed:
368 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", 387 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
369 ((poweroff_control != IPMI_CHASSIS_POWER_CYCLE) ? "down" : "cycle")); 388 (poweroff_powercycle ? "cycle" : "down"));
370 389
371 /* 390 /*
372 * Power down 391 * Power down
373 */ 392 */
374 send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST; 393 send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
375 send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD; 394 send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
376 data[0] = poweroff_control; 395 if (poweroff_powercycle)
396 data[0] = IPMI_CHASSIS_POWER_CYCLE;
397 else
398 data[0] = IPMI_CHASSIS_POWER_DOWN;
377 send_msg.data = data; 399 send_msg.data = data;
378 send_msg.data_len = sizeof(data); 400 send_msg.data_len = sizeof(data);
379 rv = ipmi_request_in_rc_mode(user, 401 rv = ipmi_request_in_rc_mode(user,
380 (struct ipmi_addr *) &smi_addr, 402 (struct ipmi_addr *) &smi_addr,
381 &send_msg); 403 &send_msg);
382 if (rv) { 404 if (rv) {
383 switch (poweroff_control) { 405 if (poweroff_powercycle) {
384 case IPMI_CHASSIS_POWER_CYCLE: 406 /* power cycle failed, default to power down */
385 /* power cycle failed, default to power down */ 407 printk(KERN_ERR PFX "Unable to send chassis power " \
386 printk(KERN_ERR PFX "Unable to send chassis power " \ 408 "cycle message, IPMI error 0x%x\n", rv);
387 "cycle message, IPMI error 0x%x\n", rv); 409 poweroff_powercycle = 0;
388 poweroff_control = IPMI_CHASSIS_POWER_DOWN; 410 goto powercyclefailed;
389 goto powercyclefailed;
390
391 case IPMI_CHASSIS_POWER_DOWN:
392 default:
393 printk(KERN_ERR PFX "Unable to send chassis power " \
394 "down message, IPMI error 0x%x\n", rv);
395 break;
396 } 411 }
397 }
398 412
399 return; 413 printk(KERN_ERR PFX "Unable to send chassis power " \
414 "down message, IPMI error 0x%x\n", rv);
415 }
400} 416}
401 417
402 418
@@ -414,6 +430,9 @@ static struct poweroff_function poweroff_functions[] = {
414 { .platform_type = "CPI1", 430 { .platform_type = "CPI1",
415 .detect = ipmi_cpi1_detect, 431 .detect = ipmi_cpi1_detect,
416 .poweroff_func = ipmi_poweroff_cpi1 }, 432 .poweroff_func = ipmi_poweroff_cpi1 },
433 { .platform_type = "chassis",
434 .detect = ipmi_dell_chassis_detect,
435 .poweroff_func = ipmi_poweroff_chassis },
417 /* Chassis should generally be last, other things should override 436 /* Chassis should generally be last, other things should override
418 it. */ 437 it. */
419 { .platform_type = "chassis", 438 { .platform_type = "chassis",
@@ -499,10 +518,11 @@ static void ipmi_po_new_smi(int if_num)
499 prod_id = (halt_recv_msg.msg.data[10] 518 prod_id = (halt_recv_msg.msg.data[10]
500 | (halt_recv_msg.msg.data[11] << 8)); 519 | (halt_recv_msg.msg.data[11] << 8));
501 capabilities = halt_recv_msg.msg.data[6]; 520 capabilities = halt_recv_msg.msg.data[6];
521 ipmi_version = halt_recv_msg.msg.data[5];
502 522
503 523
504 /* Scan for a poweroff method */ 524 /* Scan for a poweroff method */
505 for (i=0; i<NUM_PO_FUNCS; i++) { 525 for (i = 0; i < NUM_PO_FUNCS; i++) {
506 if (poweroff_functions[i].detect(ipmi_user)) 526 if (poweroff_functions[i].detect(ipmi_user))
507 goto found; 527 goto found;
508 } 528 }
@@ -538,39 +558,35 @@ static struct ipmi_smi_watcher smi_watcher =
538 558
539 559
540#ifdef CONFIG_PROC_FS 560#ifdef CONFIG_PROC_FS
541/* displays properties to proc */ 561#include <linux/sysctl.h>
542static int proc_read_chassctrl(char *page, char **start, off_t off, int count, 562
543 int *eof, void *data) 563static ctl_table ipmi_table[] = {
544{ 564 { .ctl_name = DEV_IPMI_POWEROFF_POWERCYCLE,
545 return sprintf(page, "%d\t[ 0=powerdown 2=powercycle ]\n", 565 .procname = "poweroff_powercycle",
546 poweroff_control); 566 .data = &poweroff_powercycle,
547} 567 .maxlen = sizeof(poweroff_powercycle),
568 .mode = 0644,
569 .proc_handler = &proc_dointvec },
570 { }
571};
548 572
549/* process property writes from proc */ 573static ctl_table ipmi_dir_table[] = {
550static int proc_write_chassctrl(struct file *file, const char *buffer, 574 { .ctl_name = DEV_IPMI,
551 unsigned long count, void *data) 575 .procname = "ipmi",
552{ 576 .mode = 0555,
553 int rv = count; 577 .child = ipmi_table },
554 unsigned int newval = 0; 578 { }
555 579};
556 sscanf(buffer, "%d", &newval);
557 switch (newval) {
558 case IPMI_CHASSIS_POWER_CYCLE:
559 printk(KERN_INFO PFX "power cycle is now enabled\n");
560 poweroff_control = newval;
561 break;
562
563 case IPMI_CHASSIS_POWER_DOWN:
564 poweroff_control = IPMI_CHASSIS_POWER_DOWN;
565 break;
566
567 default:
568 rv = -EINVAL;
569 break;
570 }
571 580
572 return rv; 581static ctl_table ipmi_root_table[] = {
573} 582 { .ctl_name = CTL_DEV,
583 .procname = "dev",
584 .mode = 0555,
585 .child = ipmi_dir_table },
586 { }
587};
588
589static struct ctl_table_header *ipmi_table_header;
574#endif /* CONFIG_PROC_FS */ 590#endif /* CONFIG_PROC_FS */
575 591
576/* 592/*
@@ -578,42 +594,32 @@ static int proc_write_chassctrl(struct file *file, const char *buffer,
578 */ 594 */
579static int ipmi_poweroff_init (void) 595static int ipmi_poweroff_init (void)
580{ 596{
581 int rv; 597 int rv;
582 struct proc_dir_entry *file;
583 598
584 printk ("Copyright (C) 2004 MontaVista Software -" 599 printk ("Copyright (C) 2004 MontaVista Software -"
585 " IPMI Powerdown via sys_reboot version " 600 " IPMI Powerdown via sys_reboot.\n");
586 IPMI_POWEROFF_VERSION ".\n"); 601
587 602 if (poweroff_powercycle)
588 switch (poweroff_control) { 603 printk(KERN_INFO PFX "Power cycle is enabled.\n");
589 case IPMI_CHASSIS_POWER_CYCLE: 604
590 printk(KERN_INFO PFX "Power cycle is enabled.\n"); 605#ifdef CONFIG_PROC_FS
591 break; 606 ipmi_table_header = register_sysctl_table(ipmi_root_table, 1);
592 607 if (!ipmi_table_header) {
593 case IPMI_CHASSIS_POWER_DOWN: 608 printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
594 default: 609 rv = -ENOMEM;
595 poweroff_control = IPMI_CHASSIS_POWER_DOWN; 610 goto out_err;
596 break;
597 } 611 }
612#endif
598 613
614#ifdef CONFIG_PROC_FS
599 rv = ipmi_smi_watcher_register(&smi_watcher); 615 rv = ipmi_smi_watcher_register(&smi_watcher);
616#endif
600 if (rv) { 617 if (rv) {
618 unregister_sysctl_table(ipmi_table_header);
601 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); 619 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
602 goto out_err; 620 goto out_err;
603 } 621 }
604 622
605#ifdef CONFIG_PROC_FS
606 file = create_proc_entry("poweroff_control", 0, proc_ipmi_root);
607 if (!file) {
608 printk(KERN_ERR PFX "Unable to create proc power control\n");
609 } else {
610 file->nlink = 1;
611 file->read_proc = proc_read_chassctrl;
612 file->write_proc = proc_write_chassctrl;
613 file->owner = THIS_MODULE;
614 }
615#endif
616
617 out_err: 623 out_err:
618 return rv; 624 return rv;
619} 625}
@@ -624,7 +630,7 @@ static __exit void ipmi_poweroff_cleanup(void)
624 int rv; 630 int rv;
625 631
626#ifdef CONFIG_PROC_FS 632#ifdef CONFIG_PROC_FS
627 remove_proc_entry("poweroff_control", proc_ipmi_root); 633 unregister_sysctl_table(ipmi_table_header);
628#endif 634#endif
629 635
630 ipmi_smi_watcher_unregister(&smi_watcher); 636 ipmi_smi_watcher_unregister(&smi_watcher);
@@ -642,3 +648,5 @@ module_exit(ipmi_poweroff_cleanup);
642 648
643module_init(ipmi_poweroff_init); 649module_init(ipmi_poweroff_init);
644MODULE_LICENSE("GPL"); 650MODULE_LICENSE("GPL");
651MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
652MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a44b97304e95..1abec687865c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,11 +61,11 @@
61# endif 61# endif
62static inline void add_usec_to_timer(struct timer_list *t, long v) 62static inline void add_usec_to_timer(struct timer_list *t, long v)
63{ 63{
64 t->sub_expires += nsec_to_arch_cycle(v * 1000); 64 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
65 while (t->sub_expires >= arch_cycles_per_jiffy) 65 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
66 { 66 {
67 t->expires++; 67 t->expires++;
68 t->sub_expires -= arch_cycles_per_jiffy; 68 t->arch_cycle_expires -= arch_cycles_per_jiffy;
69 } 69 }
70} 70}
71#endif 71#endif
@@ -75,8 +75,7 @@ static inline void add_usec_to_timer(struct timer_list *t, long v)
75#include <asm/io.h> 75#include <asm/io.h>
76#include "ipmi_si_sm.h" 76#include "ipmi_si_sm.h"
77#include <linux/init.h> 77#include <linux/init.h>
78 78#include <linux/dmi.h>
79#define IPMI_SI_VERSION "v33"
80 79
81/* Measure times between events in the driver. */ 80/* Measure times between events in the driver. */
82#undef DEBUG_TIMING 81#undef DEBUG_TIMING
@@ -109,6 +108,21 @@ enum si_type {
109 SI_KCS, SI_SMIC, SI_BT 108 SI_KCS, SI_SMIC, SI_BT
110}; 109};
111 110
111struct ipmi_device_id {
112 unsigned char device_id;
113 unsigned char device_revision;
114 unsigned char firmware_revision_1;
115 unsigned char firmware_revision_2;
116 unsigned char ipmi_version;
117 unsigned char additional_device_support;
118 unsigned char manufacturer_id[3];
119 unsigned char product_id[2];
120 unsigned char aux_firmware_revision[4];
121} __attribute__((packed));
122
123#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
124#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
125
112struct smi_info 126struct smi_info
113{ 127{
114 ipmi_smi_t intf; 128 ipmi_smi_t intf;
@@ -131,12 +145,24 @@ struct smi_info
131 void (*irq_cleanup)(struct smi_info *info); 145 void (*irq_cleanup)(struct smi_info *info);
132 unsigned int io_size; 146 unsigned int io_size;
133 147
148 /* Per-OEM handler, called from handle_flags().
149 Returns 1 when handle_flags() needs to be re-run
150 or 0 indicating it set si_state itself.
151 */
152 int (*oem_data_avail_handler)(struct smi_info *smi_info);
153
134 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 154 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
135 is set to hold the flags until we are done handling everything 155 is set to hold the flags until we are done handling everything
136 from the flags. */ 156 from the flags. */
137#define RECEIVE_MSG_AVAIL 0x01 157#define RECEIVE_MSG_AVAIL 0x01
138#define EVENT_MSG_BUFFER_FULL 0x02 158#define EVENT_MSG_BUFFER_FULL 0x02
139#define WDT_PRE_TIMEOUT_INT 0x08 159#define WDT_PRE_TIMEOUT_INT 0x08
160#define OEM0_DATA_AVAIL 0x20
161#define OEM1_DATA_AVAIL 0x40
162#define OEM2_DATA_AVAIL 0x80
163#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
164 OEM1_DATA_AVAIL | \
165 OEM2_DATA_AVAIL)
140 unsigned char msg_flags; 166 unsigned char msg_flags;
141 167
142 /* If set to true, this will request events the next time the 168 /* If set to true, this will request events the next time the
@@ -175,11 +201,7 @@ struct smi_info
175 interrupts. */ 201 interrupts. */
176 int interrupt_disabled; 202 int interrupt_disabled;
177 203
178 unsigned char ipmi_si_dev_rev; 204 struct ipmi_device_id device_id;
179 unsigned char ipmi_si_fw_rev_major;
180 unsigned char ipmi_si_fw_rev_minor;
181 unsigned char ipmi_version_major;
182 unsigned char ipmi_version_minor;
183 205
184 /* Slave address, could be reported from DMI. */ 206 /* Slave address, could be reported from DMI. */
185 unsigned char slave_addr; 207 unsigned char slave_addr;
@@ -245,7 +267,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
245 entry = smi_info->xmit_msgs.next; 267 entry = smi_info->xmit_msgs.next;
246 } 268 }
247 269
248 if (!entry) { 270 if (! entry) {
249 smi_info->curr_msg = NULL; 271 smi_info->curr_msg = NULL;
250 rv = SI_SM_IDLE; 272 rv = SI_SM_IDLE;
251 } else { 273 } else {
@@ -306,7 +328,7 @@ static void start_clear_flags(struct smi_info *smi_info)
306 memory, we will re-enable the interrupt. */ 328 memory, we will re-enable the interrupt. */
307static inline void disable_si_irq(struct smi_info *smi_info) 329static inline void disable_si_irq(struct smi_info *smi_info)
308{ 330{
309 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 331 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
310 disable_irq_nosync(smi_info->irq); 332 disable_irq_nosync(smi_info->irq);
311 smi_info->interrupt_disabled = 1; 333 smi_info->interrupt_disabled = 1;
312 } 334 }
@@ -322,6 +344,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
322 344
323static void handle_flags(struct smi_info *smi_info) 345static void handle_flags(struct smi_info *smi_info)
324{ 346{
347 retry:
325 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 348 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
326 /* Watchdog pre-timeout */ 349 /* Watchdog pre-timeout */
327 spin_lock(&smi_info->count_lock); 350 spin_lock(&smi_info->count_lock);
@@ -336,7 +359,7 @@ static void handle_flags(struct smi_info *smi_info)
336 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 359 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
337 /* Messages available. */ 360 /* Messages available. */
338 smi_info->curr_msg = ipmi_alloc_smi_msg(); 361 smi_info->curr_msg = ipmi_alloc_smi_msg();
339 if (!smi_info->curr_msg) { 362 if (! smi_info->curr_msg) {
340 disable_si_irq(smi_info); 363 disable_si_irq(smi_info);
341 smi_info->si_state = SI_NORMAL; 364 smi_info->si_state = SI_NORMAL;
342 return; 365 return;
@@ -355,7 +378,7 @@ static void handle_flags(struct smi_info *smi_info)
355 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 378 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
356 /* Events available. */ 379 /* Events available. */
357 smi_info->curr_msg = ipmi_alloc_smi_msg(); 380 smi_info->curr_msg = ipmi_alloc_smi_msg();
358 if (!smi_info->curr_msg) { 381 if (! smi_info->curr_msg) {
359 disable_si_irq(smi_info); 382 disable_si_irq(smi_info);
360 smi_info->si_state = SI_NORMAL; 383 smi_info->si_state = SI_NORMAL;
361 return; 384 return;
@@ -371,6 +394,10 @@ static void handle_flags(struct smi_info *smi_info)
371 smi_info->curr_msg->data, 394 smi_info->curr_msg->data,
372 smi_info->curr_msg->data_size); 395 smi_info->curr_msg->data_size);
373 smi_info->si_state = SI_GETTING_EVENTS; 396 smi_info->si_state = SI_GETTING_EVENTS;
397 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
398 if (smi_info->oem_data_avail_handler)
399 if (smi_info->oem_data_avail_handler(smi_info))
400 goto retry;
374 } else { 401 } else {
375 smi_info->si_state = SI_NORMAL; 402 smi_info->si_state = SI_NORMAL;
376 } 403 }
@@ -387,7 +414,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
387#endif 414#endif
388 switch (smi_info->si_state) { 415 switch (smi_info->si_state) {
389 case SI_NORMAL: 416 case SI_NORMAL:
390 if (!smi_info->curr_msg) 417 if (! smi_info->curr_msg)
391 break; 418 break;
392 419
393 smi_info->curr_msg->rsp_size 420 smi_info->curr_msg->rsp_size
@@ -761,18 +788,20 @@ static void si_restart_short_timer(struct smi_info *smi_info)
761#if defined(CONFIG_HIGH_RES_TIMERS) 788#if defined(CONFIG_HIGH_RES_TIMERS)
762 unsigned long flags; 789 unsigned long flags;
763 unsigned long jiffies_now; 790 unsigned long jiffies_now;
791 unsigned long seq;
764 792
765 if (del_timer(&(smi_info->si_timer))) { 793 if (del_timer(&(smi_info->si_timer))) {
766 /* If we don't delete the timer, then it will go off 794 /* If we don't delete the timer, then it will go off
767 immediately, anyway. So we only process if we 795 immediately, anyway. So we only process if we
768 actually delete the timer. */ 796 actually delete the timer. */
769 797
770 /* We already have irqsave on, so no need for it 798 do {
771 here. */ 799 seq = read_seqbegin_irqsave(&xtime_lock, flags);
772 read_lock(&xtime_lock); 800 jiffies_now = jiffies;
773 jiffies_now = jiffies; 801 smi_info->si_timer.expires = jiffies_now;
774 smi_info->si_timer.expires = jiffies_now; 802 smi_info->si_timer.arch_cycle_expires
775 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now); 803 = get_arch_cycles(jiffies_now);
804 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
776 805
777 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 806 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
778 807
@@ -826,15 +855,19 @@ static void smi_timeout(unsigned long data)
826 /* If the state machine asks for a short delay, then shorten 855 /* If the state machine asks for a short delay, then shorten
827 the timer timeout. */ 856 the timer timeout. */
828 if (smi_result == SI_SM_CALL_WITH_DELAY) { 857 if (smi_result == SI_SM_CALL_WITH_DELAY) {
858#if defined(CONFIG_HIGH_RES_TIMERS)
859 unsigned long seq;
860#endif
829 spin_lock_irqsave(&smi_info->count_lock, flags); 861 spin_lock_irqsave(&smi_info->count_lock, flags);
830 smi_info->short_timeouts++; 862 smi_info->short_timeouts++;
831 spin_unlock_irqrestore(&smi_info->count_lock, flags); 863 spin_unlock_irqrestore(&smi_info->count_lock, flags);
832#if defined(CONFIG_HIGH_RES_TIMERS) 864#if defined(CONFIG_HIGH_RES_TIMERS)
833 read_lock(&xtime_lock); 865 do {
834 smi_info->si_timer.expires = jiffies; 866 seq = read_seqbegin_irqsave(&xtime_lock, flags);
835 smi_info->si_timer.sub_expires 867 smi_info->si_timer.expires = jiffies;
836 = get_arch_cycles(smi_info->si_timer.expires); 868 smi_info->si_timer.arch_cycle_expires
837 read_unlock(&xtime_lock); 869 = get_arch_cycles(smi_info->si_timer.expires);
870 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
838 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 871 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
839#else 872#else
840 smi_info->si_timer.expires = jiffies + 1; 873 smi_info->si_timer.expires = jiffies + 1;
@@ -845,7 +878,7 @@ static void smi_timeout(unsigned long data)
845 spin_unlock_irqrestore(&smi_info->count_lock, flags); 878 spin_unlock_irqrestore(&smi_info->count_lock, flags);
846 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 879 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
847#if defined(CONFIG_HIGH_RES_TIMERS) 880#if defined(CONFIG_HIGH_RES_TIMERS)
848 smi_info->si_timer.sub_expires = 0; 881 smi_info->si_timer.arch_cycle_expires = 0;
849#endif 882#endif
850 } 883 }
851 884
@@ -1014,7 +1047,7 @@ static int std_irq_setup(struct smi_info *info)
1014{ 1047{
1015 int rv; 1048 int rv;
1016 1049
1017 if (!info->irq) 1050 if (! info->irq)
1018 return 0; 1051 return 0;
1019 1052
1020 if (info->si_type == SI_BT) { 1053 if (info->si_type == SI_BT) {
@@ -1023,7 +1056,7 @@ static int std_irq_setup(struct smi_info *info)
1023 SA_INTERRUPT, 1056 SA_INTERRUPT,
1024 DEVICE_NAME, 1057 DEVICE_NAME,
1025 info); 1058 info);
1026 if (!rv) 1059 if (! rv)
1027 /* Enable the interrupt in the BT interface. */ 1060 /* Enable the interrupt in the BT interface. */
1028 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1061 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1029 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1062 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1048,7 +1081,7 @@ static int std_irq_setup(struct smi_info *info)
1048 1081
1049static void std_irq_cleanup(struct smi_info *info) 1082static void std_irq_cleanup(struct smi_info *info)
1050{ 1083{
1051 if (!info->irq) 1084 if (! info->irq)
1052 return; 1085 return;
1053 1086
1054 if (info->si_type == SI_BT) 1087 if (info->si_type == SI_BT)
@@ -1121,7 +1154,7 @@ static int port_setup(struct smi_info *info)
1121 unsigned int *addr = info->io.info; 1154 unsigned int *addr = info->io.info;
1122 int mapsize; 1155 int mapsize;
1123 1156
1124 if (!addr || (!*addr)) 1157 if (! addr || (! *addr))
1125 return -ENODEV; 1158 return -ENODEV;
1126 1159
1127 info->io_cleanup = port_cleanup; 1160 info->io_cleanup = port_cleanup;
@@ -1164,15 +1197,15 @@ static int try_init_port(int intf_num, struct smi_info **new_info)
1164{ 1197{
1165 struct smi_info *info; 1198 struct smi_info *info;
1166 1199
1167 if (!ports[intf_num]) 1200 if (! ports[intf_num])
1168 return -ENODEV; 1201 return -ENODEV;
1169 1202
1170 if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE, 1203 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1171 ports[intf_num])) 1204 ports[intf_num]))
1172 return -ENODEV; 1205 return -ENODEV;
1173 1206
1174 info = kmalloc(sizeof(*info), GFP_KERNEL); 1207 info = kmalloc(sizeof(*info), GFP_KERNEL);
1175 if (!info) { 1208 if (! info) {
1176 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n"); 1209 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1177 return -ENOMEM; 1210 return -ENOMEM;
1178 } 1211 }
@@ -1182,10 +1215,10 @@ static int try_init_port(int intf_num, struct smi_info **new_info)
1182 info->io.info = &(ports[intf_num]); 1215 info->io.info = &(ports[intf_num]);
1183 info->io.addr = NULL; 1216 info->io.addr = NULL;
1184 info->io.regspacing = regspacings[intf_num]; 1217 info->io.regspacing = regspacings[intf_num];
1185 if (!info->io.regspacing) 1218 if (! info->io.regspacing)
1186 info->io.regspacing = DEFAULT_REGSPACING; 1219 info->io.regspacing = DEFAULT_REGSPACING;
1187 info->io.regsize = regsizes[intf_num]; 1220 info->io.regsize = regsizes[intf_num];
1188 if (!info->io.regsize) 1221 if (! info->io.regsize)
1189 info->io.regsize = DEFAULT_REGSPACING; 1222 info->io.regsize = DEFAULT_REGSPACING;
1190 info->io.regshift = regshifts[intf_num]; 1223 info->io.regshift = regshifts[intf_num];
1191 info->irq = 0; 1224 info->irq = 0;
@@ -1270,7 +1303,7 @@ static int mem_setup(struct smi_info *info)
1270 unsigned long *addr = info->io.info; 1303 unsigned long *addr = info->io.info;
1271 int mapsize; 1304 int mapsize;
1272 1305
1273 if (!addr || (!*addr)) 1306 if (! addr || (! *addr))
1274 return -ENODEV; 1307 return -ENODEV;
1275 1308
1276 info->io_cleanup = mem_cleanup; 1309 info->io_cleanup = mem_cleanup;
@@ -1325,15 +1358,15 @@ static int try_init_mem(int intf_num, struct smi_info **new_info)
1325{ 1358{
1326 struct smi_info *info; 1359 struct smi_info *info;
1327 1360
1328 if (!addrs[intf_num]) 1361 if (! addrs[intf_num])
1329 return -ENODEV; 1362 return -ENODEV;
1330 1363
1331 if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, 1364 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1332 addrs[intf_num])) 1365 addrs[intf_num]))
1333 return -ENODEV; 1366 return -ENODEV;
1334 1367
1335 info = kmalloc(sizeof(*info), GFP_KERNEL); 1368 info = kmalloc(sizeof(*info), GFP_KERNEL);
1336 if (!info) { 1369 if (! info) {
1337 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n"); 1370 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1338 return -ENOMEM; 1371 return -ENOMEM;
1339 } 1372 }
@@ -1343,10 +1376,10 @@ static int try_init_mem(int intf_num, struct smi_info **new_info)
1343 info->io.info = &addrs[intf_num]; 1376 info->io.info = &addrs[intf_num];
1344 info->io.addr = NULL; 1377 info->io.addr = NULL;
1345 info->io.regspacing = regspacings[intf_num]; 1378 info->io.regspacing = regspacings[intf_num];
1346 if (!info->io.regspacing) 1379 if (! info->io.regspacing)
1347 info->io.regspacing = DEFAULT_REGSPACING; 1380 info->io.regspacing = DEFAULT_REGSPACING;
1348 info->io.regsize = regsizes[intf_num]; 1381 info->io.regsize = regsizes[intf_num];
1349 if (!info->io.regsize) 1382 if (! info->io.regsize)
1350 info->io.regsize = DEFAULT_REGSPACING; 1383 info->io.regsize = DEFAULT_REGSPACING;
1351 info->io.regshift = regshifts[intf_num]; 1384 info->io.regshift = regshifts[intf_num];
1352 info->irq = 0; 1385 info->irq = 0;
@@ -1404,7 +1437,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1404{ 1437{
1405 acpi_status status; 1438 acpi_status status;
1406 1439
1407 if (!info->irq) 1440 if (! info->irq)
1408 return 0; 1441 return 0;
1409 1442
1410 /* FIXME - is level triggered right? */ 1443 /* FIXME - is level triggered right? */
@@ -1428,7 +1461,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1428 1461
1429static void acpi_gpe_irq_cleanup(struct smi_info *info) 1462static void acpi_gpe_irq_cleanup(struct smi_info *info)
1430{ 1463{
1431 if (!info->irq) 1464 if (! info->irq)
1432 return; 1465 return;
1433 1466
1434 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); 1467 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
@@ -1504,10 +1537,10 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1504 addr_space = IPMI_MEM_ADDR_SPACE; 1537 addr_space = IPMI_MEM_ADDR_SPACE;
1505 else 1538 else
1506 addr_space = IPMI_IO_ADDR_SPACE; 1539 addr_space = IPMI_IO_ADDR_SPACE;
1507 if (!is_new_interface(-1, addr_space, spmi->addr.address)) 1540 if (! is_new_interface(-1, addr_space, spmi->addr.address))
1508 return -ENODEV; 1541 return -ENODEV;
1509 1542
1510 if (!spmi->addr.register_bit_width) { 1543 if (! spmi->addr.register_bit_width) {
1511 acpi_failure = 1; 1544 acpi_failure = 1;
1512 return -ENODEV; 1545 return -ENODEV;
1513 } 1546 }
@@ -1534,7 +1567,7 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1534 } 1567 }
1535 1568
1536 info = kmalloc(sizeof(*info), GFP_KERNEL); 1569 info = kmalloc(sizeof(*info), GFP_KERNEL);
1537 if (!info) { 1570 if (! info) {
1538 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 1571 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1539 return -ENOMEM; 1572 return -ENOMEM;
1540 } 1573 }
@@ -1610,22 +1643,15 @@ typedef struct dmi_ipmi_data
1610static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS]; 1643static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1611static int dmi_data_entries; 1644static int dmi_data_entries;
1612 1645
1613typedef struct dmi_header 1646static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1614{
1615 u8 type;
1616 u8 length;
1617 u16 handle;
1618} dmi_header_t;
1619
1620static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1621{ 1647{
1622 u8 __iomem *data = (u8 __iomem *)dm; 1648 u8 *data = (u8 *)dm;
1623 unsigned long base_addr; 1649 unsigned long base_addr;
1624 u8 reg_spacing; 1650 u8 reg_spacing;
1625 u8 len = readb(&dm->length); 1651 u8 len = dm->length;
1626 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; 1652 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1627 1653
1628 ipmi_data->type = readb(&data[4]); 1654 ipmi_data->type = data[4];
1629 1655
1630 memcpy(&base_addr, data+8, sizeof(unsigned long)); 1656 memcpy(&base_addr, data+8, sizeof(unsigned long));
1631 if (len >= 0x11) { 1657 if (len >= 0x11) {
@@ -1640,12 +1666,12 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1640 } 1666 }
1641 /* If bit 4 of byte 0x10 is set, then the lsb for the address 1667 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1642 is odd. */ 1668 is odd. */
1643 ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4); 1669 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1644 1670
1645 ipmi_data->irq = readb(&data[0x11]); 1671 ipmi_data->irq = data[0x11];
1646 1672
1647 /* The top two bits of byte 0x10 hold the register spacing. */ 1673 /* The top two bits of byte 0x10 hold the register spacing. */
1648 reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6; 1674 reg_spacing = (data[0x10] & 0xC0) >> 6;
1649 switch(reg_spacing){ 1675 switch(reg_spacing){
1650 case 0x00: /* Byte boundaries */ 1676 case 0x00: /* Byte boundaries */
1651 ipmi_data->offset = 1; 1677 ipmi_data->offset = 1;
@@ -1673,7 +1699,7 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1673 ipmi_data->offset = 1; 1699 ipmi_data->offset = 1;
1674 } 1700 }
1675 1701
1676 ipmi_data->slave_addr = readb(&data[6]); 1702 ipmi_data->slave_addr = data[6];
1677 1703
1678 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) { 1704 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1679 dmi_data_entries++; 1705 dmi_data_entries++;
@@ -1685,94 +1711,29 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1685 return -1; 1711 return -1;
1686} 1712}
1687 1713
1688static int dmi_table(u32 base, int len, int num) 1714static void __init dmi_find_bmc(void)
1689{ 1715{
1690 u8 __iomem *buf; 1716 struct dmi_device *dev = NULL;
1691 struct dmi_header __iomem *dm;
1692 u8 __iomem *data;
1693 int i=1;
1694 int status=-1;
1695 int intf_num = 0; 1717 int intf_num = 0;
1696 1718
1697 buf = ioremap(base, len); 1719 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1698 if(buf==NULL) 1720 if (intf_num >= SI_MAX_DRIVERS)
1699 return -1; 1721 break;
1700
1701 data = buf;
1702
1703 while(i<num && (data - buf) < len)
1704 {
1705 dm=(dmi_header_t __iomem *)data;
1706
1707 if((data-buf+readb(&dm->length)) >= len)
1708 break;
1709
1710 if (readb(&dm->type) == 38) {
1711 if (decode_dmi(dm, intf_num) == 0) {
1712 intf_num++;
1713 if (intf_num >= SI_MAX_DRIVERS)
1714 break;
1715 }
1716 }
1717
1718 data+=readb(&dm->length);
1719 while((data-buf) < len && (readb(data)||readb(data+1)))
1720 data++;
1721 data+=2;
1722 i++;
1723 }
1724 iounmap(buf);
1725
1726 return status;
1727}
1728
1729static inline int dmi_checksum(u8 *buf)
1730{
1731 u8 sum=0;
1732 int a;
1733
1734 for(a=0; a<15; a++)
1735 sum+=buf[a];
1736 return (sum==0);
1737}
1738
1739static int dmi_decode(void)
1740{
1741 u8 buf[15];
1742 u32 fp=0xF0000;
1743
1744#ifdef CONFIG_SIMNOW
1745 return -1;
1746#endif
1747
1748 while(fp < 0xFFFFF)
1749 {
1750 isa_memcpy_fromio(buf, fp, 15);
1751 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1752 {
1753 u16 num=buf[13]<<8|buf[12];
1754 u16 len=buf[7]<<8|buf[6];
1755 u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1756 1722
1757 if(dmi_table(base, len, num) == 0) 1723 decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
1758 return 0;
1759 }
1760 fp+=16;
1761 } 1724 }
1762
1763 return -1;
1764} 1725}
1765 1726
1766static int try_init_smbios(int intf_num, struct smi_info **new_info) 1727static int try_init_smbios(int intf_num, struct smi_info **new_info)
1767{ 1728{
1768 struct smi_info *info; 1729 struct smi_info *info;
1769 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; 1730 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1770 char *io_type; 1731 char *io_type;
1771 1732
1772 if (intf_num >= dmi_data_entries) 1733 if (intf_num >= dmi_data_entries)
1773 return -ENODEV; 1734 return -ENODEV;
1774 1735
1775 switch(ipmi_data->type) { 1736 switch (ipmi_data->type) {
1776 case 0x01: /* KCS */ 1737 case 0x01: /* KCS */
1777 si_type[intf_num] = "kcs"; 1738 si_type[intf_num] = "kcs";
1778 break; 1739 break;
@@ -1787,7 +1748,7 @@ static int try_init_smbios(int intf_num, struct smi_info **new_info)
1787 } 1748 }
1788 1749
1789 info = kmalloc(sizeof(*info), GFP_KERNEL); 1750 info = kmalloc(sizeof(*info), GFP_KERNEL);
1790 if (!info) { 1751 if (! info) {
1791 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n"); 1752 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1792 return -ENOMEM; 1753 return -ENOMEM;
1793 } 1754 }
@@ -1811,7 +1772,7 @@ static int try_init_smbios(int intf_num, struct smi_info **new_info)
1811 1772
1812 regspacings[intf_num] = ipmi_data->offset; 1773 regspacings[intf_num] = ipmi_data->offset;
1813 info->io.regspacing = regspacings[intf_num]; 1774 info->io.regspacing = regspacings[intf_num];
1814 if (!info->io.regspacing) 1775 if (! info->io.regspacing)
1815 info->io.regspacing = DEFAULT_REGSPACING; 1776 info->io.regspacing = DEFAULT_REGSPACING;
1816 info->io.regsize = DEFAULT_REGSPACING; 1777 info->io.regsize = DEFAULT_REGSPACING;
1817 info->io.regshift = regshifts[intf_num]; 1778 info->io.regshift = regshifts[intf_num];
@@ -1853,14 +1814,14 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1853 1814
1854 pci_smic_checked = 1; 1815 pci_smic_checked = 1;
1855 1816
1856 if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, 1817 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
1857 NULL))) 1818 if (! pci_dev) {
1858 ; 1819 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
1859 else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) && 1820 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
1860 pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID) 1821 fe_rmc = 1;
1861 fe_rmc = 1; 1822 else
1862 else 1823 return -ENODEV;
1863 return -ENODEV; 1824 }
1864 1825
1865 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); 1826 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1866 if (error) 1827 if (error)
@@ -1873,7 +1834,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1873 } 1834 }
1874 1835
1875 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ 1836 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1876 if (!(base_addr & 0x0001)) 1837 if (! (base_addr & 0x0001))
1877 { 1838 {
1878 pci_dev_put(pci_dev); 1839 pci_dev_put(pci_dev);
1879 printk(KERN_ERR 1840 printk(KERN_ERR
@@ -1883,17 +1844,17 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1883 } 1844 }
1884 1845
1885 base_addr &= 0xFFFE; 1846 base_addr &= 0xFFFE;
1886 if (!fe_rmc) 1847 if (! fe_rmc)
1887 /* Data register starts at base address + 1 in eRMC */ 1848 /* Data register starts at base address + 1 in eRMC */
1888 ++base_addr; 1849 ++base_addr;
1889 1850
1890 if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { 1851 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1891 pci_dev_put(pci_dev); 1852 pci_dev_put(pci_dev);
1892 return -ENODEV; 1853 return -ENODEV;
1893 } 1854 }
1894 1855
1895 info = kmalloc(sizeof(*info), GFP_KERNEL); 1856 info = kmalloc(sizeof(*info), GFP_KERNEL);
1896 if (!info) { 1857 if (! info) {
1897 pci_dev_put(pci_dev); 1858 pci_dev_put(pci_dev);
1898 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); 1859 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1899 return -ENOMEM; 1860 return -ENOMEM;
@@ -1904,7 +1865,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1904 ports[intf_num] = base_addr; 1865 ports[intf_num] = base_addr;
1905 info->io.info = &(ports[intf_num]); 1866 info->io.info = &(ports[intf_num]);
1906 info->io.regspacing = regspacings[intf_num]; 1867 info->io.regspacing = regspacings[intf_num];
1907 if (!info->io.regspacing) 1868 if (! info->io.regspacing)
1908 info->io.regspacing = DEFAULT_REGSPACING; 1869 info->io.regspacing = DEFAULT_REGSPACING;
1909 info->io.regsize = DEFAULT_REGSPACING; 1870 info->io.regsize = DEFAULT_REGSPACING;
1910 info->io.regshift = regshifts[intf_num]; 1871 info->io.regshift = regshifts[intf_num];
@@ -1925,7 +1886,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1925static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) 1886static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1926{ 1887{
1927#ifdef CONFIG_PCI 1888#ifdef CONFIG_PCI
1928 if (find_pci_smic(intf_num, new_info)==0) 1889 if (find_pci_smic(intf_num, new_info) == 0)
1929 return 0; 1890 return 0;
1930#endif 1891#endif
1931 /* Include other methods here. */ 1892 /* Include other methods here. */
@@ -1943,7 +1904,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1943 int rv = 0; 1904 int rv = 0;
1944 1905
1945 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1906 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1946 if (!resp) 1907 if (! resp)
1947 return -ENOMEM; 1908 return -ENOMEM;
1948 1909
1949 /* Do a Get Device ID command, since it comes back with some 1910 /* Do a Get Device ID command, since it comes back with some
@@ -1992,11 +1953,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
1992 } 1953 }
1993 1954
1994 /* Record info from the get device id, in case we need it. */ 1955 /* Record info from the get device id, in case we need it. */
1995 smi_info->ipmi_si_dev_rev = resp[4] & 0xf; 1956 memcpy(&smi_info->device_id, &resp[3],
1996 smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f; 1957 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1997 smi_info->ipmi_si_fw_rev_minor = resp[6];
1998 smi_info->ipmi_version_major = resp[7] & 0xf;
1999 smi_info->ipmi_version_minor = resp[7] >> 4;
2000 1958
2001 out: 1959 out:
2002 kfree(resp); 1960 kfree(resp);
@@ -2028,7 +1986,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2028 struct smi_info *smi = data; 1986 struct smi_info *smi = data;
2029 1987
2030 out += sprintf(out, "interrupts_enabled: %d\n", 1988 out += sprintf(out, "interrupts_enabled: %d\n",
2031 smi->irq && !smi->interrupt_disabled); 1989 smi->irq && ! smi->interrupt_disabled);
2032 out += sprintf(out, "short_timeouts: %ld\n", 1990 out += sprintf(out, "short_timeouts: %ld\n",
2033 smi->short_timeouts); 1991 smi->short_timeouts);
2034 out += sprintf(out, "long_timeouts: %ld\n", 1992 out += sprintf(out, "long_timeouts: %ld\n",
@@ -2057,6 +2015,73 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2057 return (out - ((char *) page)); 2015 return (out - ((char *) page));
2058} 2016}
2059 2017
2018/*
2019 * oem_data_avail_to_receive_msg_avail
2020 * @info - smi_info structure with msg_flags set
2021 *
2022 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2023 * Returns 1 indicating need to re-run handle_flags().
2024 */
2025static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2026{
2027 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2028 RECEIVE_MSG_AVAIL);
2029 return 1;
2030}
2031
2032/*
2033 * setup_dell_poweredge_oem_data_handler
2034 * @info - smi_info.device_id must be populated
2035 *
2036 * Systems that match, but have firmware version < 1.40 may assert
2037 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2038 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2039 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2040 * as RECEIVE_MSG_AVAIL instead.
2041 *
2042 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2043 * assert the OEM[012] bits, and if it did, the driver would have to
2044 * change to handle that properly, we don't actually check for the
2045 * firmware version.
2046 * Device ID = 0x20 BMC on PowerEdge 8G servers
2047 * Device Revision = 0x80
2048 * Firmware Revision1 = 0x01 BMC version 1.40
2049 * Firmware Revision2 = 0x40 BCD encoded
2050 * IPMI Version = 0x51 IPMI 1.5
2051 * Manufacturer ID = A2 02 00 Dell IANA
2052 *
2053 */
2054#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2055#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2056#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2057#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
2058static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2059{
2060 struct ipmi_device_id *id = &smi_info->device_id;
2061 const char mfr[3]=DELL_IANA_MFR_ID;
2062 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))
2063 && (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID)
2064 && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV)
2065 && (id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION))
2066 {
2067 smi_info->oem_data_avail_handler =
2068 oem_data_avail_to_receive_msg_avail;
2069 }
2070}
2071
2072/*
2073 * setup_oem_data_handler
2074 * @info - smi_info.device_id must be filled in already
2075 *
2076 * Fills in smi_info.device_id.oem_data_available_handler
2077 * when we know what function to use there.
2078 */
2079
2080static void setup_oem_data_handler(struct smi_info *smi_info)
2081{
2082 setup_dell_poweredge_oem_data_handler(smi_info);
2083}
2084
2060/* Returns 0 if initialized, or negative on an error. */ 2085/* Returns 0 if initialized, or negative on an error. */
2061static int init_one_smi(int intf_num, struct smi_info **smi) 2086static int init_one_smi(int intf_num, struct smi_info **smi)
2062{ 2087{
@@ -2068,19 +2093,15 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2068 if (rv) 2093 if (rv)
2069 rv = try_init_port(intf_num, &new_smi); 2094 rv = try_init_port(intf_num, &new_smi);
2070#ifdef CONFIG_ACPI_INTERPRETER 2095#ifdef CONFIG_ACPI_INTERPRETER
2071 if ((rv) && (si_trydefaults)) { 2096 if (rv && si_trydefaults)
2072 rv = try_init_acpi(intf_num, &new_smi); 2097 rv = try_init_acpi(intf_num, &new_smi);
2073 }
2074#endif 2098#endif
2075#ifdef CONFIG_X86 2099#ifdef CONFIG_X86
2076 if ((rv) && (si_trydefaults)) { 2100 if (rv && si_trydefaults)
2077 rv = try_init_smbios(intf_num, &new_smi); 2101 rv = try_init_smbios(intf_num, &new_smi);
2078 }
2079#endif 2102#endif
2080 if ((rv) && (si_trydefaults)) { 2103 if (rv && si_trydefaults)
2081 rv = try_init_plug_and_play(intf_num, &new_smi); 2104 rv = try_init_plug_and_play(intf_num, &new_smi);
2082 }
2083
2084 2105
2085 if (rv) 2106 if (rv)
2086 return rv; 2107 return rv;
@@ -2090,7 +2111,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2090 new_smi->si_sm = NULL; 2111 new_smi->si_sm = NULL;
2091 new_smi->handlers = NULL; 2112 new_smi->handlers = NULL;
2092 2113
2093 if (!new_smi->irq_setup) { 2114 if (! new_smi->irq_setup) {
2094 new_smi->irq = irqs[intf_num]; 2115 new_smi->irq = irqs[intf_num];
2095 new_smi->irq_setup = std_irq_setup; 2116 new_smi->irq_setup = std_irq_setup;
2096 new_smi->irq_cleanup = std_irq_cleanup; 2117 new_smi->irq_cleanup = std_irq_cleanup;
@@ -2124,7 +2145,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2124 2145
2125 /* Allocate the state machine's data and initialize it. */ 2146 /* Allocate the state machine's data and initialize it. */
2126 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2147 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2127 if (!new_smi->si_sm) { 2148 if (! new_smi->si_sm) {
2128 printk(" Could not allocate state machine memory\n"); 2149 printk(" Could not allocate state machine memory\n");
2129 rv = -ENOMEM; 2150 rv = -ENOMEM;
2130 goto out_err; 2151 goto out_err;
@@ -2155,6 +2176,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2155 if (rv) 2176 if (rv)
2156 goto out_err; 2177 goto out_err;
2157 2178
2179 setup_oem_data_handler(new_smi);
2180
2158 /* Try to claim any interrupts. */ 2181 /* Try to claim any interrupts. */
2159 new_smi->irq_setup(new_smi); 2182 new_smi->irq_setup(new_smi);
2160 2183
@@ -2188,8 +2211,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2188 2211
2189 rv = ipmi_register_smi(&handlers, 2212 rv = ipmi_register_smi(&handlers,
2190 new_smi, 2213 new_smi,
2191 new_smi->ipmi_version_major, 2214 ipmi_version_major(&new_smi->device_id),
2192 new_smi->ipmi_version_minor, 2215 ipmi_version_minor(&new_smi->device_id),
2193 new_smi->slave_addr, 2216 new_smi->slave_addr,
2194 &(new_smi->intf)); 2217 &(new_smi->intf));
2195 if (rv) { 2218 if (rv) {
@@ -2230,7 +2253,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2230 2253
2231 /* Wait for the timer to stop. This avoids problems with race 2254 /* Wait for the timer to stop. This avoids problems with race
2232 conditions removing the timer here. */ 2255 conditions removing the timer here. */
2233 while (!new_smi->timer_stopped) { 2256 while (! new_smi->timer_stopped) {
2234 set_current_state(TASK_UNINTERRUPTIBLE); 2257 set_current_state(TASK_UNINTERRUPTIBLE);
2235 schedule_timeout(1); 2258 schedule_timeout(1);
2236 } 2259 }
@@ -2270,7 +2293,7 @@ static __init int init_ipmi_si(void)
2270 /* Parse out the si_type string into its components. */ 2293 /* Parse out the si_type string into its components. */
2271 str = si_type_str; 2294 str = si_type_str;
2272 if (*str != '\0') { 2295 if (*str != '\0') {
2273 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) { 2296 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2274 si_type[i] = str; 2297 si_type[i] = str;
2275 str = strchr(str, ','); 2298 str = strchr(str, ',');
2276 if (str) { 2299 if (str) {
@@ -2282,22 +2305,14 @@ static __init int init_ipmi_si(void)
2282 } 2305 }
2283 } 2306 }
2284 2307
2285 printk(KERN_INFO "IPMI System Interface driver version " 2308 printk(KERN_INFO "IPMI System Interface driver.\n");
2286 IPMI_SI_VERSION);
2287 if (kcs_smi_handlers.version)
2288 printk(", KCS version %s", kcs_smi_handlers.version);
2289 if (smic_smi_handlers.version)
2290 printk(", SMIC version %s", smic_smi_handlers.version);
2291 if (bt_smi_handlers.version)
2292 printk(", BT version %s", bt_smi_handlers.version);
2293 printk("\n");
2294 2309
2295#ifdef CONFIG_X86 2310#ifdef CONFIG_X86
2296 dmi_decode(); 2311 dmi_find_bmc();
2297#endif 2312#endif
2298 2313
2299 rv = init_one_smi(0, &(smi_infos[pos])); 2314 rv = init_one_smi(0, &(smi_infos[pos]));
2300 if (rv && !ports[0] && si_trydefaults) { 2315 if (rv && ! ports[0] && si_trydefaults) {
2301 /* If we are trying defaults and the initial port is 2316 /* If we are trying defaults and the initial port is
2302 not set, then set it. */ 2317 not set, then set it. */
2303 si_type[0] = "kcs"; 2318 si_type[0] = "kcs";
@@ -2319,7 +2334,7 @@ static __init int init_ipmi_si(void)
2319 if (rv == 0) 2334 if (rv == 0)
2320 pos++; 2335 pos++;
2321 2336
2322 for (i=1; i < SI_MAX_PARMS; i++) { 2337 for (i = 1; i < SI_MAX_PARMS; i++) {
2323 rv = init_one_smi(i, &(smi_infos[pos])); 2338 rv = init_one_smi(i, &(smi_infos[pos]));
2324 if (rv == 0) 2339 if (rv == 0)
2325 pos++; 2340 pos++;
@@ -2361,14 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2361 2376
2362 /* Wait for the timer to stop. This avoids problems with race 2377 /* Wait for the timer to stop. This avoids problems with race
2363 conditions removing the timer here. */ 2378 conditions removing the timer here. */
2364 while (!to_clean->timer_stopped) { 2379 while (! to_clean->timer_stopped) {
2365 set_current_state(TASK_UNINTERRUPTIBLE); 2380 set_current_state(TASK_UNINTERRUPTIBLE);
2366 schedule_timeout(1); 2381 schedule_timeout(1);
2367 } 2382 }
2368 2383
2369 /* Interrupts and timeouts are stopped, now make sure the 2384 /* Interrupts and timeouts are stopped, now make sure the
2370 interface is in a clean state. */ 2385 interface is in a clean state. */
2371 while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) { 2386 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2372 poll(to_clean); 2387 poll(to_clean);
2373 set_current_state(TASK_UNINTERRUPTIBLE); 2388 set_current_state(TASK_UNINTERRUPTIBLE);
2374 schedule_timeout(1); 2389 schedule_timeout(1);
@@ -2392,13 +2407,15 @@ static __exit void cleanup_ipmi_si(void)
2392{ 2407{
2393 int i; 2408 int i;
2394 2409
2395 if (!initialized) 2410 if (! initialized)
2396 return; 2411 return;
2397 2412
2398 for (i=0; i<SI_MAX_DRIVERS; i++) { 2413 for (i = 0; i < SI_MAX_DRIVERS; i++) {
2399 cleanup_one_si(smi_infos[i]); 2414 cleanup_one_si(smi_infos[i]);
2400 } 2415 }
2401} 2416}
2402module_exit(cleanup_ipmi_si); 2417module_exit(cleanup_ipmi_si);
2403 2418
2404MODULE_LICENSE("GPL"); 2419MODULE_LICENSE("GPL");
2420MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2421MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index ae18747e670b..add2aa2732f0 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -46,8 +46,6 @@
46#include <linux/ipmi_msgdefs.h> /* for completion codes */ 46#include <linux/ipmi_msgdefs.h> /* for completion codes */
47#include "ipmi_si_sm.h" 47#include "ipmi_si_sm.h"
48 48
49#define IPMI_SMIC_VERSION "v33"
50
51/* smic_debug is a bit-field 49/* smic_debug is a bit-field
52 * SMIC_DEBUG_ENABLE - turned on for now 50 * SMIC_DEBUG_ENABLE - turned on for now
53 * SMIC_DEBUG_MSG - commands and their responses 51 * SMIC_DEBUG_MSG - commands and their responses
@@ -588,7 +586,6 @@ static int smic_size(void)
588 586
589struct si_sm_handlers smic_smi_handlers = 587struct si_sm_handlers smic_smi_handlers =
590{ 588{
591 .version = IPMI_SMIC_VERSION,
592 .init_data = init_smic_data, 589 .init_data = init_smic_data,
593 .start_transaction = start_smic_transaction, 590 .start_transaction = start_smic_transaction,
594 .get_result = smic_get_result, 591 .get_result = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index d35a953961cb..e71aaae855ad 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -53,8 +53,6 @@
53 53
54#define PFX "IPMI Watchdog: " 54#define PFX "IPMI Watchdog: "
55 55
56#define IPMI_WATCHDOG_VERSION "v33"
57
58/* 56/*
59 * The IPMI command/response information for the watchdog timer. 57 * The IPMI command/response information for the watchdog timer.
60 */ 58 */
@@ -259,7 +257,7 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
259 257
260 data[1] = 0; 258 data[1] = 0;
261 WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state); 259 WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
262 if (pretimeout > 0) { 260 if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
263 WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val); 261 WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
264 data[2] = pretimeout; 262 data[2] = pretimeout;
265 } else { 263 } else {
@@ -659,19 +657,18 @@ static ssize_t ipmi_read(struct file *file,
659 657
660static int ipmi_open(struct inode *ino, struct file *filep) 658static int ipmi_open(struct inode *ino, struct file *filep)
661{ 659{
662 switch (iminor(ino)) 660 switch (iminor(ino)) {
663 { 661 case WATCHDOG_MINOR:
664 case WATCHDOG_MINOR: 662 if (test_and_set_bit(0, &ipmi_wdog_open))
665 if(test_and_set_bit(0, &ipmi_wdog_open))
666 return -EBUSY; 663 return -EBUSY;
667 664
668 /* Don't start the timer now, let it start on the 665 /* Don't start the timer now, let it start on the
669 first heartbeat. */ 666 first heartbeat. */
670 ipmi_start_timer_on_heartbeat = 1; 667 ipmi_start_timer_on_heartbeat = 1;
671 return nonseekable_open(ino, filep); 668 return nonseekable_open(ino, filep);
672 669
673 default: 670 default:
674 return (-ENODEV); 671 return (-ENODEV);
675 } 672 }
676} 673}
677 674
@@ -817,15 +814,19 @@ static void ipmi_register_watchdog(int ipmi_intf)
817static int 814static int
818ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled) 815ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
819{ 816{
817 /* If we are not expecting a timeout, ignore it. */
818 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
819 return NOTIFY_DONE;
820
820 /* If no one else handled the NMI, we assume it was the IPMI 821 /* If no one else handled the NMI, we assume it was the IPMI
821 watchdog. */ 822 watchdog. */
822 if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) 823 if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) {
824 /* On some machines, the heartbeat will give
825 an error and not work unless we re-enable
826 the timer. So do so. */
827 pretimeout_since_last_heartbeat = 1;
823 panic(PFX "pre-timeout"); 828 panic(PFX "pre-timeout");
824 829 }
825 /* On some machines, the heartbeat will give
826 an error and not work unless we re-enable
827 the timer. So do so. */
828 pretimeout_since_last_heartbeat = 1;
829 830
830 return NOTIFY_DONE; 831 return NOTIFY_DONE;
831} 832}
@@ -924,9 +925,6 @@ static int __init ipmi_wdog_init(void)
924{ 925{
925 int rv; 926 int rv;
926 927
927 printk(KERN_INFO PFX "driver version "
928 IPMI_WATCHDOG_VERSION "\n");
929
930 if (strcmp(action, "reset") == 0) { 928 if (strcmp(action, "reset") == 0) {
931 action_val = WDOG_TIMEOUT_RESET; 929 action_val = WDOG_TIMEOUT_RESET;
932 } else if (strcmp(action, "none") == 0) { 930 } else if (strcmp(action, "none") == 0) {
@@ -1011,6 +1009,8 @@ static int __init ipmi_wdog_init(void)
1011 register_reboot_notifier(&wdog_reboot_notifier); 1009 register_reboot_notifier(&wdog_reboot_notifier);
1012 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); 1010 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
1013 1011
1012 printk(KERN_INFO PFX "driver initialized\n");
1013
1014 return 0; 1014 return 0;
1015} 1015}
1016 1016
@@ -1062,3 +1062,5 @@ static void __exit ipmi_wdog_exit(void)
1062module_exit(ipmi_wdog_exit); 1062module_exit(ipmi_wdog_exit);
1063module_init(ipmi_wdog_init); 1063module_init(ipmi_wdog_init);
1064MODULE_LICENSE("GPL"); 1064MODULE_LICENSE("GPL");
1065MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
1066MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 115dbb35334b..3fa64c631108 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -750,7 +750,7 @@ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
750 750
751 dev->soft = NULL; 751 dev->soft = NULL;
752 752
753 soft = kcalloc(1, sizeof(struct mbcs_soft), GFP_KERNEL); 753 soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
754 if (soft == NULL) 754 if (soft == NULL)
755 return -ENOMEM; 755 return -ENOMEM;
756 756
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 850a78c9c4bc..f182752fe918 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -35,10 +35,6 @@
35# include <linux/efi.h> 35# include <linux/efi.h>
36#endif 36#endif
37 37
38#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
39extern void tapechar_init(void);
40#endif
41
42/* 38/*
43 * Architectures vary in how they handle caching for addresses 39 * Architectures vary in how they handle caching for addresses
44 * outside of main memory. 40 * outside of main memory.
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 931efd58f87a..0c8375165e29 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -63,8 +63,6 @@ static DECLARE_MUTEX(misc_sem);
63#define DYNAMIC_MINORS 64 /* like dynamic majors */ 63#define DYNAMIC_MINORS 64 /* like dynamic majors */
64static unsigned char misc_minors[DYNAMIC_MINORS / 8]; 64static unsigned char misc_minors[DYNAMIC_MINORS / 8];
65 65
66extern int rtc_DP8570A_init(void);
67extern int rtc_MK48T08_init(void);
68extern int pmu_device_init(void); 66extern int pmu_device_init(void);
69 67
70#ifdef CONFIG_PROC_FS 68#ifdef CONFIG_PROC_FS
@@ -303,12 +301,7 @@ static int __init misc_init(void)
303 misc_class = class_create(THIS_MODULE, "misc"); 301 misc_class = class_create(THIS_MODULE, "misc");
304 if (IS_ERR(misc_class)) 302 if (IS_ERR(misc_class))
305 return PTR_ERR(misc_class); 303 return PTR_ERR(misc_class);
306#ifdef CONFIG_MVME16x 304
307 rtc_MK48T08_init();
308#endif
309#ifdef CONFIG_BVME6000
310 rtc_DP8570A_init();
311#endif
312 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) { 305 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
313 printk("unable to get major %d for misc devices\n", 306 printk("unable to get major %d for misc devices\n",
314 MISC_MAJOR); 307 MISC_MAJOR);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 95f7046ff059..79e490ef2cf2 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -339,7 +339,7 @@ static int __init moxa_init(void)
339 339
340 init_MUTEX(&moxaBuffSem); 340 init_MUTEX(&moxaBuffSem);
341 moxaDriver->owner = THIS_MODULE; 341 moxaDriver->owner = THIS_MODULE;
342 moxaDriver->name = "ttya"; 342 moxaDriver->name = "ttyMX";
343 moxaDriver->devfs_name = "tts/a"; 343 moxaDriver->devfs_name = "tts/a";
344 moxaDriver->major = ttymajor; 344 moxaDriver->major = ttymajor;
345 moxaDriver->minor_start = 0; 345 moxaDriver->minor_start = 0;
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index d568991ac6b3..8666171e187b 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -57,6 +57,7 @@
57#include <linux/sched.h> 57#include <linux/sched.h>
58#include <linux/spinlock.h> 58#include <linux/spinlock.h>
59#include <linux/delay.h> 59#include <linux/delay.h>
60#include <linux/serial_8250.h>
60#include "smapi.h" 61#include "smapi.h"
61#include "mwavedd.h" 62#include "mwavedd.h"
62#include "3780i.h" 63#include "3780i.h"
@@ -410,8 +411,8 @@ static ssize_t mwave_write(struct file *file, const char __user *buf,
410 411
411static int register_serial_portandirq(unsigned int port, int irq) 412static int register_serial_portandirq(unsigned int port, int irq)
412{ 413{
413 struct serial_struct serial; 414 struct uart_port uart;
414 415
415 switch ( port ) { 416 switch ( port ) {
416 case 0x3f8: 417 case 0x3f8:
417 case 0x2f8: 418 case 0x2f8:
@@ -442,12 +443,14 @@ static int register_serial_portandirq(unsigned int port, int irq)
442 } /* switch */ 443 } /* switch */
443 /* irq is okay */ 444 /* irq is okay */
444 445
445 memset(&serial, 0, sizeof(serial)); 446 memset(&uart, 0, sizeof(struct uart_port));
446 serial.port = port; 447
447 serial.irq = irq; 448 uart.uartclk = 1843200;
448 serial.flags = ASYNC_SHARE_IRQ; 449 uart.iobase = port;
449 450 uart.irq = irq;
450 return register_serial(&serial); 451 uart.iotype = UPIO_PORT;
452 uart.flags = UPF_SHARE_IRQ;
453 return serial8250_register_port(&uart);
451} 454}
452 455
453 456
@@ -523,7 +526,7 @@ static void mwave_exit(void)
523#endif 526#endif
524 527
525 if ( pDrvData->sLine >= 0 ) { 528 if ( pDrvData->sLine >= 0 ) {
526 unregister_serial(pDrvData->sLine); 529 serial8250_unregister_port(pDrvData->sLine);
527 } 530 }
528 if (pDrvData->bMwaveDevRegistered) { 531 if (pDrvData->bMwaveDevRegistered) {
529 misc_deregister(&mwave_misc_dev); 532 misc_deregister(&mwave_misc_dev);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index f022f0944434..d0ef1ae41298 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -63,7 +63,6 @@
63#include <asm/system.h> 63#include <asm/system.h>
64#include <asm/io.h> 64#include <asm/io.h>
65#include <asm/irq.h> 65#include <asm/irq.h>
66#include <asm/segment.h>
67#include <asm/bitops.h> 66#include <asm/bitops.h>
68#include <asm/uaccess.h> 67#include <asm/uaccess.h>
69 68
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b11d6b2129f..7999da25fe40 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1589,6 +1589,40 @@ u32 secure_tcpv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dp
1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral); 1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral);
1590#endif 1590#endif
1591 1591
1592#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1593/* Similar to secure_tcp_sequence_number but generate a 48 bit value
1594 * bit's 32-47 increase every key exchange
1595 * 0-31 hash(source, dest)
1596 */
1597u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
1598 __u16 sport, __u16 dport)
1599{
1600 struct timeval tv;
1601 u64 seq;
1602 __u32 hash[4];
1603 struct keydata *keyptr = get_keyptr();
1604
1605 hash[0] = saddr;
1606 hash[1] = daddr;
1607 hash[2] = (sport << 16) + dport;
1608 hash[3] = keyptr->secret[11];
1609
1610 seq = half_md4_transform(hash, keyptr->secret);
1611 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1612
1613 do_gettimeofday(&tv);
1614 seq += tv.tv_usec + tv.tv_sec * 1000000;
1615 seq &= (1ull << 48) - 1;
1616#if 0
1617 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
1618 saddr, daddr, sport, dport, seq);
1619#endif
1620 return seq;
1621}
1622
1623EXPORT_SYMBOL(secure_dccp_sequence_number);
1624#endif
1625
1592#endif /* CONFIG_INET */ 1626#endif /* CONFIG_INET */
1593 1627
1594 1628
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index cd4fe8b1709f..63fff7c1244a 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -938,10 +938,9 @@ found:
938 938
939 /* 939 /*
940 * XXX Interrupt pin #7 in Espresso is shared between RTC and 940 * XXX Interrupt pin #7 in Espresso is shared between RTC and
941 * PCI Slot 2 INTA# (and some INTx# in Slot 1). SA_INTERRUPT here 941 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
942 * is asking for trouble with add-on boards. Change to SA_SHIRQ.
943 */ 942 */
944 if (request_irq(rtc_irq, rtc_interrupt, SA_INTERRUPT, "rtc", (void *)&rtc_port)) { 943 if (request_irq(rtc_irq, rtc_interrupt, SA_SHIRQ, "rtc", (void *)&rtc_port)) {
945 /* 944 /*
946 * Standard way for sparc to print irq's is to use 945 * Standard way for sparc to print irq's is to use
947 * __irq_itoa(). I think for EBus it's ok to use %d. 946 * __irq_itoa(). I think for EBus it's ok to use %d.
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index d692af57213a..baaa365285fa 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/byteorder/generic.h> 20#include <linux/byteorder/generic.h>
21#include <asm/sn/sn_sal.h> 21#include <asm/sn/sn_sal.h>
22#include <asm/unaligned.h>
22#include "snsc.h" 23#include "snsc.h"
23 24
24static struct subch_data_s *event_sd; 25static struct subch_data_s *event_sd;
@@ -62,13 +63,16 @@ static int
62scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) 63scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
63{ 64{
64 char *desc_end; 65 char *desc_end;
66 __be32 from_buf;
65 67
66 /* record event source address */ 68 /* record event source address */
67 *src = be32_to_cpup((__be32 *)event); 69 from_buf = get_unaligned((__be32 *)event);
70 *src = be32_to_cpup(&from_buf);
68 event += 4; /* move on to event code */ 71 event += 4; /* move on to event code */
69 72
70 /* record the system controller's event code */ 73 /* record the system controller's event code */
71 *code = be32_to_cpup((__be32 *)event); 74 from_buf = get_unaligned((__be32 *)event);
75 *code = be32_to_cpup(&from_buf);
72 event += 4; /* move on to event arguments */ 76 event += 4; /* move on to event arguments */
73 77
74 /* how many arguments are in the packet? */ 78 /* how many arguments are in the packet? */
@@ -82,7 +86,8 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
82 /* not an integer argument, so give up */ 86 /* not an integer argument, so give up */
83 return -1; 87 return -1;
84 } 88 }
85 *esp_code = be32_to_cpup((__be32 *)event); 89 from_buf = get_unaligned((__be32 *)event);
90 *esp_code = be32_to_cpup(&from_buf);
86 event += 4; 91 event += 4;
87 92
88 /* parse out the event description */ 93 /* parse out the event description */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index cefbe985e55c..36ae9ad2598c 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -98,12 +98,13 @@ MODULE_PARM_DESC(useinput,
98 98
99#define SONYPI_DEVICE_MODEL_TYPE1 1 99#define SONYPI_DEVICE_MODEL_TYPE1 1
100#define SONYPI_DEVICE_MODEL_TYPE2 2 100#define SONYPI_DEVICE_MODEL_TYPE2 2
101#define SONYPI_DEVICE_MODEL_TYPE3 3
101 102
102/* type1 models use those */ 103/* type1 models use those */
103#define SONYPI_IRQ_PORT 0x8034 104#define SONYPI_IRQ_PORT 0x8034
104#define SONYPI_IRQ_SHIFT 22 105#define SONYPI_IRQ_SHIFT 22
105#define SONYPI_BASE 0x50 106#define SONYPI_TYPE1_BASE 0x50
106#define SONYPI_G10A (SONYPI_BASE+0x14) 107#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14)
107#define SONYPI_TYPE1_REGION_SIZE 0x08 108#define SONYPI_TYPE1_REGION_SIZE 0x08
108#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 109#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04
109 110
@@ -114,6 +115,13 @@ MODULE_PARM_DESC(useinput,
114#define SONYPI_TYPE2_REGION_SIZE 0x20 115#define SONYPI_TYPE2_REGION_SIZE 0x20
115#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 116#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12
116 117
118/* type3 series specifics */
119#define SONYPI_TYPE3_BASE 0x40
120#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */
121#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */
122#define SONYPI_TYPE3_REGION_SIZE 0x20
123#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12
124
117/* battery / brightness addresses */ 125/* battery / brightness addresses */
118#define SONYPI_BAT_FLAGS 0x81 126#define SONYPI_BAT_FLAGS 0x81
119#define SONYPI_LCD_LIGHT 0x96 127#define SONYPI_LCD_LIGHT 0x96
@@ -159,6 +167,10 @@ static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
159 { 0x0, 0x0 } 167 { 0x0, 0x0 }
160}; 168};
161 169
170/* same as in type 2 models */
171static struct sonypi_ioport_list *sonypi_type3_ioport_list =
172 sonypi_type2_ioport_list;
173
162/* The set of possible interrupts */ 174/* The set of possible interrupts */
163struct sonypi_irq_list { 175struct sonypi_irq_list {
164 u16 irq; 176 u16 irq;
@@ -180,6 +192,9 @@ static struct sonypi_irq_list sonypi_type2_irq_list[] = {
180 { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ 192 { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */
181}; 193};
182 194
195/* same as in type2 models */
196static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
197
183#define SONYPI_CAMERA_BRIGHTNESS 0 198#define SONYPI_CAMERA_BRIGHTNESS 0
184#define SONYPI_CAMERA_CONTRAST 1 199#define SONYPI_CAMERA_CONTRAST 1
185#define SONYPI_CAMERA_HUE 2 200#define SONYPI_CAMERA_HUE 2
@@ -223,6 +238,7 @@ static struct sonypi_irq_list sonypi_type2_irq_list[] = {
223#define SONYPI_MEYE_MASK 0x00000400 238#define SONYPI_MEYE_MASK 0x00000400
224#define SONYPI_MEMORYSTICK_MASK 0x00000800 239#define SONYPI_MEMORYSTICK_MASK 0x00000800
225#define SONYPI_BATTERY_MASK 0x00001000 240#define SONYPI_BATTERY_MASK 0x00001000
241#define SONYPI_WIRELESS_MASK 0x00002000
226 242
227struct sonypi_event { 243struct sonypi_event {
228 u8 data; 244 u8 data;
@@ -305,6 +321,13 @@ static struct sonypi_event sonypi_blueev[] = {
305 { 0, 0 } 321 { 0, 0 }
306}; 322};
307 323
324/* The set of possible wireless events */
325static struct sonypi_event sonypi_wlessev[] = {
326 { 0x59, SONYPI_EVENT_WIRELESS_ON },
327 { 0x5a, SONYPI_EVENT_WIRELESS_OFF },
328 { 0, 0 }
329};
330
308/* The set of possible back button events */ 331/* The set of possible back button events */
309static struct sonypi_event sonypi_backev[] = { 332static struct sonypi_event sonypi_backev[] = {
310 { 0x20, SONYPI_EVENT_BACK_PRESSED }, 333 { 0x20, SONYPI_EVENT_BACK_PRESSED },
@@ -383,7 +406,6 @@ static struct sonypi_eventtypes {
383 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, 406 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
384 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, 407 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
385 { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, 408 { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
386 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_HELP_MASK, sonypi_helpev },
387 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, 409 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
388 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, 410 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
389 { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, 411 { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
@@ -391,6 +413,12 @@ static struct sonypi_eventtypes {
391 { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, 413 { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
392 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, 414 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
393 415
416 { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
417 { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
418 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
419 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
420 { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
421 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
394 { 0 } 422 { 0 }
395}; 423};
396 424
@@ -563,6 +591,23 @@ static void sonypi_type2_srs(void)
563 udelay(10); 591 udelay(10);
564} 592}
565 593
594static void sonypi_type3_srs(void)
595{
596 u16 v16;
597 u8 v8;
598
599 /* This model type uses the same initialiazation of
600 * the embedded controller as the type2 models. */
601 sonypi_type2_srs();
602
603 /* Initialization of PCI config space of the LPC interface bridge. */
604 v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
605 pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
606 pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
607 v8 = (v8 & 0xCF) | 0x10;
608 pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
609}
610
566/* Disables the device - this comes from the AML code in the ACPI bios */ 611/* Disables the device - this comes from the AML code in the ACPI bios */
567static void sonypi_type1_dis(void) 612static void sonypi_type1_dis(void)
568{ 613{
@@ -587,6 +632,13 @@ static void sonypi_type2_dis(void)
587 printk(KERN_WARNING "ec_write failed\n"); 632 printk(KERN_WARNING "ec_write failed\n");
588} 633}
589 634
635static void sonypi_type3_dis(void)
636{
637 sonypi_type2_dis();
638 udelay(10);
639 pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
640}
641
590static u8 sonypi_call1(u8 dev) 642static u8 sonypi_call1(u8 dev)
591{ 643{
592 u8 v1, v2; 644 u8 v1, v2;
@@ -1067,10 +1119,17 @@ static struct miscdevice sonypi_misc_device = {
1067 1119
1068static void sonypi_enable(unsigned int camera_on) 1120static void sonypi_enable(unsigned int camera_on)
1069{ 1121{
1070 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) 1122 switch (sonypi_device.model) {
1071 sonypi_type2_srs(); 1123 case SONYPI_DEVICE_MODEL_TYPE1:
1072 else
1073 sonypi_type1_srs(); 1124 sonypi_type1_srs();
1125 break;
1126 case SONYPI_DEVICE_MODEL_TYPE2:
1127 sonypi_type2_srs();
1128 break;
1129 case SONYPI_DEVICE_MODEL_TYPE3:
1130 sonypi_type3_srs();
1131 break;
1132 }
1074 1133
1075 sonypi_call1(0x82); 1134 sonypi_call1(0x82);
1076 sonypi_call2(0x81, 0xff); 1135 sonypi_call2(0x81, 0xff);
@@ -1094,10 +1153,18 @@ static int sonypi_disable(void)
1094 if (!SONYPI_ACPI_ACTIVE && fnkeyinit) 1153 if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
1095 outb(0xf1, 0xb2); 1154 outb(0xf1, 0xb2);
1096 1155
1097 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) 1156 switch (sonypi_device.model) {
1098 sonypi_type2_dis(); 1157 case SONYPI_DEVICE_MODEL_TYPE1:
1099 else
1100 sonypi_type1_dis(); 1158 sonypi_type1_dis();
1159 break;
1160 case SONYPI_DEVICE_MODEL_TYPE2:
1161 sonypi_type2_dis();
1162 break;
1163 case SONYPI_DEVICE_MODEL_TYPE3:
1164 sonypi_type3_dis();
1165 break;
1166 }
1167
1101 return 0; 1168 return 0;
1102} 1169}
1103 1170
@@ -1143,12 +1210,16 @@ static int __devinit sonypi_probe(void)
1143 struct sonypi_irq_list *irq_list; 1210 struct sonypi_irq_list *irq_list;
1144 struct pci_dev *pcidev; 1211 struct pci_dev *pcidev;
1145 1212
1146 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1213 if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1147 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 1214 PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
1215 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
1216 else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1217 PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
1218 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
1219 else
1220 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
1148 1221
1149 sonypi_device.dev = pcidev; 1222 sonypi_device.dev = pcidev;
1150 sonypi_device.model = pcidev ?
1151 SONYPI_DEVICE_MODEL_TYPE1 : SONYPI_DEVICE_MODEL_TYPE2;
1152 1223
1153 spin_lock_init(&sonypi_device.fifo_lock); 1224 spin_lock_init(&sonypi_device.fifo_lock);
1154 sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, 1225 sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
@@ -1176,16 +1247,22 @@ static int __devinit sonypi_probe(void)
1176 goto out_miscreg; 1247 goto out_miscreg;
1177 } 1248 }
1178 1249
1179 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { 1250
1251 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
1252 ioport_list = sonypi_type1_ioport_list;
1253 sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
1254 sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
1255 irq_list = sonypi_type1_irq_list;
1256 } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
1180 ioport_list = sonypi_type2_ioport_list; 1257 ioport_list = sonypi_type2_ioport_list;
1181 sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; 1258 sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
1182 sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; 1259 sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
1183 irq_list = sonypi_type2_irq_list; 1260 irq_list = sonypi_type2_irq_list;
1184 } else { 1261 } else {
1185 ioport_list = sonypi_type1_ioport_list; 1262 ioport_list = sonypi_type3_ioport_list;
1186 sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; 1263 sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
1187 sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; 1264 sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
1188 irq_list = sonypi_type1_irq_list; 1265 irq_list = sonypi_type3_irq_list;
1189 } 1266 }
1190 1267
1191 for (i = 0; ioport_list[i].port1; i++) { 1268 for (i = 0; ioport_list[i].port1; i++) {
@@ -1274,11 +1351,10 @@ static int __devinit sonypi_probe(void)
1274 1351
1275 printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver" 1352 printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver"
1276 "v%s.\n", SONYPI_DRIVER_VERSION); 1353 "v%s.\n", SONYPI_DRIVER_VERSION);
1277 printk(KERN_INFO "sonypi: detected %s model, " 1354 printk(KERN_INFO "sonypi: detected type%d model, "
1278 "verbose = %d, fnkeyinit = %s, camera = %s, " 1355 "verbose = %d, fnkeyinit = %s, camera = %s, "
1279 "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", 1356 "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
1280 (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) ? 1357 sonypi_device.model,
1281 "type1" : "type2",
1282 verbose, 1358 verbose,
1283 fnkeyinit ? "on" : "off", 1359 fnkeyinit ? "on" : "off",
1284 camera ? "on" : "off", 1360 camera ? "on" : "off",
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index cc2cc77fd174..c0d64914595f 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -206,6 +206,9 @@ static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
207 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
208 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)}, 208 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
209 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
210 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
211 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
209 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)}, 212 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
210 {PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)}, 213 {PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)},
211 {0,} 214 {0,}
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index dc8c540391fd..939e51e119e6 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -14,7 +14,6 @@
14 * License. 14 * License.
15 */ 15 */
16 16
17#include <acpi/acpi_bus.h>
18#include <linux/pnp.h> 17#include <linux/pnp.h>
19#include "tpm.h" 18#include "tpm.h"
20 19
@@ -29,9 +28,10 @@
29#define TPM_MAX_TRIES 5000 28#define TPM_MAX_TRIES 5000
30#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 29#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
31 30
32/* These values will be filled after ACPI-call */ 31/* These values will be filled after PnP-call */
33static int TPM_INF_DATA = 0; 32static int TPM_INF_DATA = 0;
34static int TPM_INF_ADDR = 0; 33static int TPM_INF_ADDR = 0;
34static int pnp_registered = 0;
35 35
36/* TPM header definitions */ 36/* TPM header definitions */
37enum infineon_tpm_header { 37enum infineon_tpm_header {
@@ -356,24 +356,26 @@ static const struct pnp_device_id tpm_pnp_tbl[] = {
356 {"IFX0102", 0}, 356 {"IFX0102", 0},
357 {"", 0} 357 {"", 0}
358}; 358};
359MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
359 360
360static int __devinit tpm_inf_acpi_probe(struct pnp_dev *dev, 361static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
361 const struct pnp_device_id *dev_id) 362 const struct pnp_device_id *dev_id)
362{ 363{
363 TPM_INF_ADDR = (pnp_port_start(dev, 0) & 0xff); 364 if (pnp_port_valid(dev, 0)) {
364 TPM_INF_DATA = ((TPM_INF_ADDR + 1) & 0xff); 365 TPM_INF_ADDR = (pnp_port_start(dev, 0) & 0xff);
365 tpm_inf.base = pnp_port_start(dev, 1); 366 TPM_INF_DATA = ((TPM_INF_ADDR + 1) & 0xff);
366 dev_info(&dev->dev, "Found %s with ID %s\n", 367 tpm_inf.base = pnp_port_start(dev, 1);
367 dev->name, dev_id->id); 368 dev_info(&dev->dev, "Found %s with ID %s\n",
368 if (!((tpm_inf.base >> 8) & 0xff)) 369 dev->name, dev_id->id);
369 tpm_inf.base = 0; 370 return 0;
370 return 0; 371 }
372 return -ENODEV;
371} 373}
372 374
373static struct pnp_driver tpm_inf_pnp = { 375static struct pnp_driver tpm_inf_pnp = {
374 .name = "tpm_inf_pnp", 376 .name = "tpm_inf_pnp",
375 .id_table = tpm_pnp_tbl, 377 .id_table = tpm_pnp_tbl,
376 .probe = tpm_inf_acpi_probe, 378 .probe = tpm_inf_pnp_probe,
377}; 379};
378 380
379static int __devinit tpm_inf_probe(struct pci_dev *pci_dev, 381static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
@@ -386,19 +388,30 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
386 int productid[2]; 388 int productid[2];
387 char chipname[20]; 389 char chipname[20];
388 390
389 if (pci_enable_device(pci_dev)) 391 rc = pci_enable_device(pci_dev);
390 return -EIO; 392 if (rc)
393 return rc;
391 394
392 dev_info(&pci_dev->dev, "LPC-bus found at 0x%x\n", pci_id->device); 395 dev_info(&pci_dev->dev, "LPC-bus found at 0x%x\n", pci_id->device);
393 396
394 /* read IO-ports from ACPI */ 397 /* read IO-ports from PnP */
395 pnp_register_driver(&tpm_inf_pnp); 398 rc = pnp_register_driver(&tpm_inf_pnp);
396 pnp_unregister_driver(&tpm_inf_pnp); 399 if (rc < 0) {
400 dev_err(&pci_dev->dev,
401 "Error %x from pnp_register_driver!\n",rc);
402 goto error2;
403 }
404 if (!rc) {
405 dev_info(&pci_dev->dev, "No Infineon TPM found!\n");
406 goto error;
407 } else {
408 pnp_registered = 1;
409 }
397 410
398 /* Make sure, we have received valid config ports */ 411 /* Make sure, we have received valid config ports */
399 if (!TPM_INF_ADDR) { 412 if (!TPM_INF_ADDR) {
400 pci_disable_device(pci_dev); 413 dev_err(&pci_dev->dev, "No valid IO-ports received!\n");
401 return -EIO; 414 goto error;
402 } 415 }
403 416
404 /* query chip for its vendor, its version number a.s.o. */ 417 /* query chip for its vendor, its version number a.s.o. */
@@ -418,23 +431,21 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
418 431
419 switch ((productid[0] << 8) | productid[1]) { 432 switch ((productid[0] << 8) | productid[1]) {
420 case 6: 433 case 6:
421 sprintf(chipname, " (SLD 9630 TT 1.1)"); 434 snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)");
422 break; 435 break;
423 case 11: 436 case 11:
424 sprintf(chipname, " (SLB 9635 TT 1.2)"); 437 snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)");
425 break; 438 break;
426 default: 439 default:
427 sprintf(chipname, " (unknown chip)"); 440 snprintf(chipname, sizeof(chipname), " (unknown chip)");
428 break; 441 break;
429 } 442 }
430 chipname[19] = 0;
431 443
432 if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { 444 if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
433 445
434 if (tpm_inf.base == 0) { 446 if (tpm_inf.base == 0) {
435 dev_err(&pci_dev->dev, "No IO-ports found!\n"); 447 dev_err(&pci_dev->dev, "No IO-ports found!\n");
436 pci_disable_device(pci_dev); 448 goto error;
437 return -EIO;
438 } 449 }
439 /* configure TPM with IO-ports */ 450 /* configure TPM with IO-ports */
440 outb(IOLIMH, TPM_INF_ADDR); 451 outb(IOLIMH, TPM_INF_ADDR);
@@ -452,8 +463,7 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
452 dev_err(&pci_dev->dev, 463 dev_err(&pci_dev->dev,
453 "Could not set IO-ports to %04x\n", 464 "Could not set IO-ports to %04x\n",
454 tpm_inf.base); 465 tpm_inf.base);
455 pci_disable_device(pci_dev); 466 goto error;
456 return -EIO;
457 } 467 }
458 468
459 /* activate register */ 469 /* activate register */
@@ -479,14 +489,16 @@ static int __devinit tpm_inf_probe(struct pci_dev *pci_dev,
479 productid[0], productid[1], chipname); 489 productid[0], productid[1], chipname);
480 490
481 rc = tpm_register_hardware(pci_dev, &tpm_inf); 491 rc = tpm_register_hardware(pci_dev, &tpm_inf);
482 if (rc < 0) { 492 if (rc < 0)
483 pci_disable_device(pci_dev); 493 goto error;
484 return -ENODEV;
485 }
486 return 0; 494 return 0;
487 } else { 495 } else {
488 dev_info(&pci_dev->dev, "No Infineon TPM found!\n"); 496 dev_info(&pci_dev->dev, "No Infineon TPM found!\n");
497error:
498 pnp_unregister_driver(&tpm_inf_pnp);
499error2:
489 pci_disable_device(pci_dev); 500 pci_disable_device(pci_dev);
501 pnp_registered = 0;
490 return -ENODEV; 502 return -ENODEV;
491 } 503 }
492} 504}
@@ -521,6 +533,8 @@ static int __init init_inf(void)
521 533
522static void __exit cleanup_inf(void) 534static void __exit cleanup_inf(void)
523{ 535{
536 if (pnp_registered)
537 pnp_unregister_driver(&tpm_inf_pnp);
524 pci_unregister_driver(&inf_pci_driver); 538 pci_unregister_driver(&inf_pci_driver);
525} 539}
526 540
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6e4be3bb2d89..9d657127f313 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,7 +153,6 @@ static int tty_release(struct inode *, struct file *);
153int tty_ioctl(struct inode * inode, struct file * file, 153int tty_ioctl(struct inode * inode, struct file * file,
154 unsigned int cmd, unsigned long arg); 154 unsigned int cmd, unsigned long arg);
155static int tty_fasync(int fd, struct file * filp, int on); 155static int tty_fasync(int fd, struct file * filp, int on);
156extern void rs_360_init(void);
157static void release_mem(struct tty_struct *tty, int idx); 156static void release_mem(struct tty_struct *tty, int idx);
158 157
159 158
@@ -2911,11 +2910,6 @@ void __init console_init(void)
2911#ifdef CONFIG_EARLY_PRINTK 2910#ifdef CONFIG_EARLY_PRINTK
2912 disable_early_printk(); 2911 disable_early_printk();
2913#endif 2912#endif
2914#ifdef CONFIG_SERIAL_68360
2915 /* This is not a console initcall. I know not what it's doing here.
2916 So I haven't moved it. dwmw2 */
2917 rs_360_init();
2918#endif
2919 call = __con_initcall_start; 2913 call = __con_initcall_start;
2920 while (call < __con_initcall_end) { 2914 while (call < __con_initcall_end) {
2921 (*call)(); 2915 (*call)();
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 4764b4f9555d..0aff45fac2e6 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -991,7 +991,7 @@ static int viotape_remove(struct vio_dev *vdev)
991 */ 991 */
992static struct vio_device_id viotape_device_table[] __devinitdata = { 992static struct vio_device_id viotape_device_table[] __devinitdata = {
993 { "viotape", "" }, 993 { "viotape", "" },
994 { 0, } 994 { "", "" }
995}; 995};
996 996
997MODULE_DEVICE_TABLE(vio, viotape_device_table); 997MODULE_DEVICE_TABLE(vio, viotape_device_table);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 665103ccaee8..b8d0c290b0db 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -434,21 +434,25 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
434/* used by selection: complement pointer position */ 434/* used by selection: complement pointer position */
435void complement_pos(struct vc_data *vc, int offset) 435void complement_pos(struct vc_data *vc, int offset)
436{ 436{
437 static unsigned short *p; 437 static int old_offset = -1;
438 static unsigned short old; 438 static unsigned short old;
439 static unsigned short oldx, oldy; 439 static unsigned short oldx, oldy;
440 440
441 WARN_CONSOLE_UNLOCKED(); 441 WARN_CONSOLE_UNLOCKED();
442 442
443 if (p) { 443 if (old_offset != -1 && old_offset >= 0 &&
444 scr_writew(old, p); 444 old_offset < vc->vc_screenbuf_size) {
445 scr_writew(old, screenpos(vc, old_offset, 1));
445 if (DO_UPDATE(vc)) 446 if (DO_UPDATE(vc))
446 vc->vc_sw->con_putc(vc, old, oldy, oldx); 447 vc->vc_sw->con_putc(vc, old, oldy, oldx);
447 } 448 }
448 if (offset == -1) 449
449 p = NULL; 450 old_offset = offset;
450 else { 451
452 if (offset != -1 && offset >= 0 &&
453 offset < vc->vc_screenbuf_size) {
451 unsigned short new; 454 unsigned short new;
455 unsigned short *p;
452 p = screenpos(vc, offset, 1); 456 p = screenpos(vc, offset, 1);
453 old = scr_readw(p); 457 old = scr_readw(p);
454 new = old ^ vc->vc_complement_mask; 458 new = old ^ vc->vc_complement_mask;
@@ -459,6 +463,7 @@ void complement_pos(struct vc_data *vc, int offset)
459 vc->vc_sw->con_putc(vc, new, oldy, oldx); 463 vc->vc_sw->con_putc(vc, new, oldy, oldx);
460 } 464 }
461 } 465 }
466
462} 467}
463 468
464static void insert_char(struct vc_data *vc, unsigned int nr) 469static void insert_char(struct vc_data *vc, unsigned int nr)
@@ -2272,7 +2277,9 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2272 ret = paste_selection(tty); 2277 ret = paste_selection(tty);
2273 break; 2278 break;
2274 case TIOCL_UNBLANKSCREEN: 2279 case TIOCL_UNBLANKSCREEN:
2280 acquire_console_sem();
2275 unblank_screen(); 2281 unblank_screen();
2282 release_console_sem();
2276 break; 2283 break;
2277 case TIOCL_SELLOADLUT: 2284 case TIOCL_SELLOADLUT:
2278 ret = sel_loadlut(p); 2285 ret = sel_loadlut(p);
@@ -2317,8 +2324,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2317 } 2324 }
2318 break; 2325 break;
2319 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */ 2326 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
2327 acquire_console_sem();
2320 ignore_poke = 1; 2328 ignore_poke = 1;
2321 do_blank_screen(0); 2329 do_blank_screen(0);
2330 release_console_sem();
2322 break; 2331 break;
2323 case TIOCL_BLANKEDSCREEN: 2332 case TIOCL_BLANKEDSCREEN:
2324 ret = console_blanked; 2333 ret = console_blanked;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index b53e2e2b5aee..c3898afce3ae 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -346,6 +346,13 @@ config 8xx_WDT
346 tristate "MPC8xx Watchdog Timer" 346 tristate "MPC8xx Watchdog Timer"
347 depends on WATCHDOG && 8xx 347 depends on WATCHDOG && 8xx
348 348
349config BOOKE_WDT
350 tristate "PowerPC Book-E Watchdog Timer"
351 depends on WATCHDOG && (BOOKE || 4xx)
352 ---help---
353 Please see Documentation/watchdog/watchdog-api.txt for
354 more information.
355
349# MIPS Architecture 356# MIPS Architecture
350 357
351config INDYDOG 358config INDYDOG
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index c1838834ea7f..cfeac6f10137 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -2,42 +2,68 @@
2# Makefile for the WatchDog device drivers. 2# Makefile for the WatchDog device drivers.
3# 3#
4 4
5# Only one watchdog can succeed. We probe the ISA/PCI/USB based
6# watchdog-cards first, then the architecture specific watchdog
7# drivers and then the architecture independant "softdog" driver.
8# This means that if your ISA/PCI/USB card isn't detected that
9# you can fall back to an architecture specific driver and if
10# that also fails then you can fall back to the software watchdog
11# to give you some cover.
12
13# ISA-based Watchdog Cards
5obj-$(CONFIG_PCWATCHDOG) += pcwd.o 14obj-$(CONFIG_PCWATCHDOG) += pcwd.o
6obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
7obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
8obj-$(CONFIG_IB700_WDT) += ib700wdt.o
9obj-$(CONFIG_MIXCOMWD) += mixcomwd.o 15obj-$(CONFIG_MIXCOMWD) += mixcomwd.o
10obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
11obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
12obj-$(CONFIG_WDT) += wdt.o 16obj-$(CONFIG_WDT) += wdt.o
17
18# PCI-based Watchdog Cards
19obj-$(CONFIG_PCIPCWATCHDOG) += pcwd_pci.o
13obj-$(CONFIG_WDTPCI) += wdt_pci.o 20obj-$(CONFIG_WDTPCI) += wdt_pci.o
21
22# USB-based Watchdog Cards
23obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
24
25# ARM Architecture
14obj-$(CONFIG_21285_WATCHDOG) += wdt285.o 26obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
15obj-$(CONFIG_977_WATCHDOG) += wdt977.o 27obj-$(CONFIG_977_WATCHDOG) += wdt977.o
16obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o 28obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
17obj-$(CONFIG_MACHZ_WDT) += machzwd.o 29obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
18obj-$(CONFIG_SH_WDT) += shwdt.o
19obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o 30obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
20obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o 31obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
21obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o 32
22obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o 33# X86 (i386 + ia64 + x86_64) Architecture
23obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o 34obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
24obj-$(CONFIG_SC520_WDT) += sc520_wdt.o 35obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
25obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
26obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o 36obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
27obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o 37obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
38obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
39obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o
40obj-$(CONFIG_IB700_WDT) += ib700wdt.o
28obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o 41obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
42obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o
43obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
44obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
45obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
29obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o 46obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
30obj-$(CONFIG_INDYDOG) += indydog.o 47obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
31obj-$(CONFIG_PCIPCWATCHDOG) += pcwd_pci.o 48obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
32obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o 49obj-$(CONFIG_MACHZ_WDT) += machzwd.o
33obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o 50
34obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o 51# PowerPC Architecture
35obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o 52obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
53
54# PPC64 Architecture
36obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o 55obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
56obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
57
58# MIPS Architecture
59obj-$(CONFIG_INDYDOG) += indydog.o
60
61# S390 Architecture
62
63# SUPERH Architecture
64obj-$(CONFIG_SH_WDT) += shwdt.o
37 65
38# Only one watchdog can succeed. We probe the hardware watchdog 66# SPARC64 Architecture
39# drivers first, then the softdog driver. This means if your hardware
40# watchdog dies or is 'borrowed' for some reason the software watchdog
41# still gives you some cover.
42 67
68# Architecture Independant
43obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o 69obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
diff --git a/drivers/char/watchdog/booke_wdt.c b/drivers/char/watchdog/booke_wdt.c
new file mode 100644
index 000000000000..abc30cca6645
--- /dev/null
+++ b/drivers/char/watchdog/booke_wdt.c
@@ -0,0 +1,192 @@
1/*
2 * drivers/char/watchdog/booke_wdt.c
3 *
4 * Watchdog timer for PowerPC Book-E systems
5 *
6 * Author: Matthew McClintock
7 * Maintainer: Kumar Gala <kumar.gala@freescale.com>
8 *
9 * Copyright 2005 Freescale Semiconductor Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/fs.h>
20#include <linux/miscdevice.h>
21#include <linux/notifier.h>
22#include <linux/watchdog.h>
23
24#include <asm/reg_booke.h>
25#include <asm/uaccess.h>
26#include <asm/system.h>
27
28/* If the kernel parameter wdt_enable=1, the watchdog will be enabled at boot.
29 * Also, the wdt_period sets the watchdog timer period timeout.
30 * For E500 cpus the wdt_period sets which bit changing from 0->1 will
31 * trigger a watchog timeout. This watchdog timeout will occur 3 times, the
32 * first time nothing will happen, the second time a watchdog exception will
33 * occur, and the final time the board will reset.
34 */
35
36#ifdef CONFIG_FSL_BOOKE
37#define WDT_PERIOD_DEFAULT 63 /* Ex. wdt_period=28 bus=333Mhz , reset=~40sec */
38#else
39#define WDT_PERIOD_DEFAULT 4 /* Refer to the PPC40x and PPC4xx manuals */
40#endif /* for timing information */
41
42u32 booke_wdt_enabled = 0;
43u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
44
45#ifdef CONFIG_FSL_BOOKE
46#define WDTP(x) ((((63-x)&0x3)<<30)|(((63-x)&0x3c)<<15))
47#else
48#define WDTP(x) (TCR_WP(x))
49#endif
50
51/*
52 * booke_wdt_enable:
53 */
54static __inline__ void booke_wdt_enable(void)
55{
56 u32 val;
57
58 val = mfspr(SPRN_TCR);
59 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
60
61 mtspr(SPRN_TCR, val);
62}
63
64/*
65 * booke_wdt_ping:
66 */
67static __inline__ void booke_wdt_ping(void)
68{
69 mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
70}
71
72/*
73 * booke_wdt_write:
74 */
75static ssize_t booke_wdt_write (struct file *file, const char *buf,
76 size_t count, loff_t *ppos)
77{
78 booke_wdt_ping();
79 return count;
80}
81
82static struct watchdog_info ident = {
83 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
84 .firmware_version = 0,
85 .identity = "PowerPC Book-E Watchdog",
86};
87
88/*
89 * booke_wdt_ioctl:
90 */
91static int booke_wdt_ioctl (struct inode *inode, struct file *file,
92 unsigned int cmd, unsigned long arg)
93{
94 u32 tmp = 0;
95
96 switch (cmd) {
97 case WDIOC_GETSUPPORT:
98 if (copy_to_user ((struct watchdog_info *) arg, &ident,
99 sizeof(struct watchdog_info)))
100 return -EFAULT;
101 case WDIOC_GETSTATUS:
102 return put_user(ident.options, (u32 *) arg);
103 case WDIOC_GETBOOTSTATUS:
104 /* XXX: something is clearing TSR */
105 tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
106 /* returns 1 if last reset was caused by the WDT */
107 return (tmp ? 1 : 0);
108 case WDIOC_KEEPALIVE:
109 booke_wdt_ping();
110 return 0;
111 case WDIOC_SETTIMEOUT:
112 if (get_user(booke_wdt_period, (u32 *) arg))
113 return -EFAULT;
114 mtspr(SPRN_TCR, (mfspr(SPRN_TCR)&~WDTP(0))|WDTP(booke_wdt_period));
115 return 0;
116 case WDIOC_GETTIMEOUT:
117 return put_user(booke_wdt_period, (u32 *) arg);
118 case WDIOC_SETOPTIONS:
119 if (get_user(tmp, (u32 *) arg))
120 return -EINVAL;
121 if (tmp == WDIOS_ENABLECARD) {
122 booke_wdt_ping();
123 break;
124 } else
125 return -EINVAL;
126 return 0;
127 default:
128 return -ENOIOCTLCMD;
129 }
130
131 return 0;
132}
133/*
134 * booke_wdt_open:
135 */
136static int booke_wdt_open (struct inode *inode, struct file *file)
137{
138 if (booke_wdt_enabled == 0) {
139 booke_wdt_enabled = 1;
140 booke_wdt_enable();
141 printk (KERN_INFO "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
142 booke_wdt_period);
143 }
144
145 return 0;
146}
147
148static struct file_operations booke_wdt_fops = {
149 .owner = THIS_MODULE,
150 .llseek = no_llseek,
151 .write = booke_wdt_write,
152 .ioctl = booke_wdt_ioctl,
153 .open = booke_wdt_open,
154};
155
156static struct miscdevice booke_wdt_miscdev = {
157 .minor = WATCHDOG_MINOR,
158 .name = "watchdog",
159 .fops = &booke_wdt_fops,
160};
161
162static void __exit booke_wdt_exit(void)
163{
164 misc_deregister(&booke_wdt_miscdev);
165}
166
167/*
168 * booke_wdt_init:
169 */
170static int __init booke_wdt_init(void)
171{
172 int ret = 0;
173
174 printk (KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n");
175 ident.firmware_version = cpu_specs[0].pvr_value;
176
177 ret = misc_register(&booke_wdt_miscdev);
178 if (ret) {
179 printk (KERN_CRIT "Cannot register miscdev on minor=%d (err=%d)\n",
180 WATCHDOG_MINOR, ret);
181 return ret;
182 }
183
184 if (booke_wdt_enabled == 1) {
185 printk (KERN_INFO "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
186 booke_wdt_period);
187 booke_wdt_enable();
188 }
189
190 return ret;
191}
192device_initcall(booke_wdt_init);
diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c
index e7640bc4904b..0cfb9b9c4a4b 100644
--- a/drivers/char/watchdog/ixp2000_wdt.c
+++ b/drivers/char/watchdog/ixp2000_wdt.c
@@ -182,7 +182,7 @@ static struct file_operations ixp2000_wdt_fops =
182static struct miscdevice ixp2000_wdt_miscdev = 182static struct miscdevice ixp2000_wdt_miscdev =
183{ 183{
184 .minor = WATCHDOG_MINOR, 184 .minor = WATCHDOG_MINOR,
185 .name = "IXP2000 Watchdog", 185 .name = "watchdog",
186 .fops = &ixp2000_wdt_fops, 186 .fops = &ixp2000_wdt_fops,
187}; 187};
188 188
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
index 8d916afbf4fa..b5be8b11104a 100644
--- a/drivers/char/watchdog/ixp4xx_wdt.c
+++ b/drivers/char/watchdog/ixp4xx_wdt.c
@@ -176,7 +176,7 @@ static struct file_operations ixp4xx_wdt_fops =
176static struct miscdevice ixp4xx_wdt_miscdev = 176static struct miscdevice ixp4xx_wdt_miscdev =
177{ 177{
178 .minor = WATCHDOG_MINOR, 178 .minor = WATCHDOG_MINOR,
179 .name = "IXP4xx Watchdog", 179 .name = "watchdog",
180 .fops = &ixp4xx_wdt_fops, 180 .fops = &ixp4xx_wdt_fops,
181}; 181};
182 182
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index f85ac898a49a..8b292bf343c4 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -27,7 +27,10 @@
27 * Fixed tmr_count / wdt_count confusion 27 * Fixed tmr_count / wdt_count confusion
28 * Added configurable debug 28 * Added configurable debug
29 * 29 *
30 * 11-Jan-2004 BJD Fixed divide-by-2 in timeout code 30 * 11-Jan-2005 BJD Fixed divide-by-2 in timeout code
31 *
32 * 25-Jan-2005 DA Added suspend/resume support
33 * Replaced reboot notifier with .shutdown method
31 * 34 *
32 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA 35 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
33*/ 36*/
@@ -40,8 +43,6 @@
40#include <linux/miscdevice.h> 43#include <linux/miscdevice.h>
41#include <linux/watchdog.h> 44#include <linux/watchdog.h>
42#include <linux/fs.h> 45#include <linux/fs.h>
43#include <linux/notifier.h>
44#include <linux/reboot.h>
45#include <linux/init.h> 46#include <linux/init.h>
46#include <linux/device.h> 47#include <linux/device.h>
47#include <linux/interrupt.h> 48#include <linux/interrupt.h>
@@ -317,20 +318,6 @@ static int s3c2410wdt_ioctl(struct inode *inode, struct file *file,
317 } 318 }
318} 319}
319 320
320/*
321 * Notifier for system down
322 */
323
324static int s3c2410wdt_notify_sys(struct notifier_block *this, unsigned long code,
325 void *unused)
326{
327 if(code==SYS_DOWN || code==SYS_HALT) {
328 /* Turn the WDT off */
329 s3c2410wdt_stop();
330 }
331 return NOTIFY_DONE;
332}
333
334/* kernel interface */ 321/* kernel interface */
335 322
336static struct file_operations s3c2410wdt_fops = { 323static struct file_operations s3c2410wdt_fops = {
@@ -348,10 +335,6 @@ static struct miscdevice s3c2410wdt_miscdev = {
348 .fops = &s3c2410wdt_fops, 335 .fops = &s3c2410wdt_fops,
349}; 336};
350 337
351static struct notifier_block s3c2410wdt_notifier = {
352 .notifier_call = s3c2410wdt_notify_sys,
353};
354
355/* interrupt handler code */ 338/* interrupt handler code */
356 339
357static irqreturn_t s3c2410wdt_irq(int irqno, void *param, 340static irqreturn_t s3c2410wdt_irq(int irqno, void *param,
@@ -432,18 +415,10 @@ static int s3c2410wdt_probe(struct device *dev)
432 } 415 }
433 } 416 }
434 417
435 ret = register_reboot_notifier(&s3c2410wdt_notifier);
436 if (ret) {
437 printk (KERN_ERR PFX "cannot register reboot notifier (%d)\n",
438 ret);
439 return ret;
440 }
441
442 ret = misc_register(&s3c2410wdt_miscdev); 418 ret = misc_register(&s3c2410wdt_miscdev);
443 if (ret) { 419 if (ret) {
444 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (%d)\n", 420 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (%d)\n",
445 WATCHDOG_MINOR, ret); 421 WATCHDOG_MINOR, ret);
446 unregister_reboot_notifier(&s3c2410wdt_notifier);
447 return ret; 422 return ret;
448 } 423 }
449 424
@@ -479,15 +454,63 @@ static int s3c2410wdt_remove(struct device *dev)
479 return 0; 454 return 0;
480} 455}
481 456
457static void s3c2410wdt_shutdown(struct device *dev)
458{
459 s3c2410wdt_stop();
460}
461
462#ifdef CONFIG_PM
463
464static unsigned long wtcon_save;
465static unsigned long wtdat_save;
466
467static int s3c2410wdt_suspend(struct device *dev, u32 state, u32 level)
468{
469 if (level == SUSPEND_POWER_DOWN) {
470 /* Save watchdog state, and turn it off. */
471 wtcon_save = readl(wdt_base + S3C2410_WTCON);
472 wtdat_save = readl(wdt_base + S3C2410_WTDAT);
473
474 /* Note that WTCNT doesn't need to be saved. */
475 s3c2410wdt_stop();
476 }
477
478 return 0;
479}
480
481static int s3c2410wdt_resume(struct device *dev, u32 level)
482{
483 if (level == RESUME_POWER_ON) {
484 /* Restore watchdog state. */
485
486 writel(wtdat_save, wdt_base + S3C2410_WTDAT);
487 writel(wtdat_save, wdt_base + S3C2410_WTCNT); /* Reset count */
488 writel(wtcon_save, wdt_base + S3C2410_WTCON);
489
490 printk(KERN_INFO PFX "watchdog %sabled\n",
491 (wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
492 }
493
494 return 0;
495}
496
497#else
498#define s3c2410wdt_suspend NULL
499#define s3c2410wdt_resume NULL
500#endif /* CONFIG_PM */
501
502
482static struct device_driver s3c2410wdt_driver = { 503static struct device_driver s3c2410wdt_driver = {
483 .name = "s3c2410-wdt", 504 .name = "s3c2410-wdt",
484 .bus = &platform_bus_type, 505 .bus = &platform_bus_type,
485 .probe = s3c2410wdt_probe, 506 .probe = s3c2410wdt_probe,
486 .remove = s3c2410wdt_remove, 507 .remove = s3c2410wdt_remove,
508 .shutdown = s3c2410wdt_shutdown,
509 .suspend = s3c2410wdt_suspend,
510 .resume = s3c2410wdt_resume,
487}; 511};
488 512
489 513
490
491static char banner[] __initdata = KERN_INFO "S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n"; 514static char banner[] __initdata = KERN_INFO "S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n";
492 515
493static int __init watchdog_init(void) 516static int __init watchdog_init(void)
@@ -499,13 +522,13 @@ static int __init watchdog_init(void)
499static void __exit watchdog_exit(void) 522static void __exit watchdog_exit(void)
500{ 523{
501 driver_unregister(&s3c2410wdt_driver); 524 driver_unregister(&s3c2410wdt_driver);
502 unregister_reboot_notifier(&s3c2410wdt_notifier);
503} 525}
504 526
505module_init(watchdog_init); 527module_init(watchdog_init);
506module_exit(watchdog_exit); 528module_exit(watchdog_exit);
507 529
508MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 530MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
531 "Dimitry Andric <dimitry.andric@tomtom.com>");
509MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver"); 532MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
510MODULE_LICENSE("GPL"); 533MODULE_LICENSE("GPL");
511MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 534MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/scx200_wdt.c b/drivers/char/watchdog/scx200_wdt.c
index c4568569f3a8..b4a102a2d7e3 100644
--- a/drivers/char/watchdog/scx200_wdt.c
+++ b/drivers/char/watchdog/scx200_wdt.c
@@ -206,7 +206,7 @@ static struct file_operations scx200_wdt_fops = {
206 206
207static struct miscdevice scx200_wdt_miscdev = { 207static struct miscdevice scx200_wdt_miscdev = {
208 .minor = WATCHDOG_MINOR, 208 .minor = WATCHDOG_MINOR,
209 .name = NAME, 209 .name = "watchdog",
210 .fops = &scx200_wdt_fops, 210 .fops = &scx200_wdt_fops,
211}; 211};
212 212
diff --git a/drivers/char/watchdog/softdog.c b/drivers/char/watchdog/softdog.c
index 4d7ed931f5c6..20e5eb8667f2 100644
--- a/drivers/char/watchdog/softdog.c
+++ b/drivers/char/watchdog/softdog.c
@@ -77,7 +77,7 @@ static void watchdog_fire(unsigned long);
77 77
78static struct timer_list watchdog_ticktock = 78static struct timer_list watchdog_ticktock =
79 TIMER_INITIALIZER(watchdog_fire, 0, 0); 79 TIMER_INITIALIZER(watchdog_fire, 0, 0);
80static unsigned long timer_alive; 80static unsigned long driver_open, orphan_timer;
81static char expect_close; 81static char expect_close;
82 82
83 83
@@ -87,6 +87,9 @@ static char expect_close;
87 87
88static void watchdog_fire(unsigned long data) 88static void watchdog_fire(unsigned long data)
89{ 89{
90 if (test_and_clear_bit(0, &orphan_timer))
91 module_put(THIS_MODULE);
92
90 if (soft_noboot) 93 if (soft_noboot)
91 printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); 94 printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n");
92 else 95 else
@@ -128,9 +131,9 @@ static int softdog_set_heartbeat(int t)
128 131
129static int softdog_open(struct inode *inode, struct file *file) 132static int softdog_open(struct inode *inode, struct file *file)
130{ 133{
131 if(test_and_set_bit(0, &timer_alive)) 134 if (test_and_set_bit(0, &driver_open))
132 return -EBUSY; 135 return -EBUSY;
133 if (nowayout) 136 if (!test_and_clear_bit(0, &orphan_timer))
134 __module_get(THIS_MODULE); 137 __module_get(THIS_MODULE);
135 /* 138 /*
136 * Activate timer 139 * Activate timer
@@ -147,11 +150,13 @@ static int softdog_release(struct inode *inode, struct file *file)
147 */ 150 */
148 if (expect_close == 42) { 151 if (expect_close == 42) {
149 softdog_stop(); 152 softdog_stop();
153 module_put(THIS_MODULE);
150 } else { 154 } else {
151 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 155 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
156 set_bit(0, &orphan_timer);
152 softdog_keepalive(); 157 softdog_keepalive();
153 } 158 }
154 clear_bit(0, &timer_alive); 159 clear_bit(0, &driver_open);
155 expect_close = 0; 160 expect_close = 0;
156 return 0; 161 return 0;
157} 162}
diff --git a/drivers/char/watchdog/w83627hf_wdt.c b/drivers/char/watchdog/w83627hf_wdt.c
index 465e0fd0423d..b5d821015421 100644
--- a/drivers/char/watchdog/w83627hf_wdt.c
+++ b/drivers/char/watchdog/w83627hf_wdt.c
@@ -93,6 +93,12 @@ w83627hf_init(void)
93 93
94 w83627hf_select_wd_register(); 94 w83627hf_select_wd_register();
95 95
96 outb_p(0xF6, WDT_EFER); /* Select CRF6 */
97 t=inb_p(WDT_EFDR); /* read CRF6 */
98 if (t != 0) {
99 printk (KERN_INFO PFX "Watchdog already running. Resetting timeout to %d sec\n", timeout);
100 outb_p(timeout, WDT_EFDR); /* Write back to CRF6 */
101 }
96 outb_p(0xF5, WDT_EFER); /* Select CRF5 */ 102 outb_p(0xF5, WDT_EFER); /* Select CRF5 */
97 t=inb_p(WDT_EFDR); /* read CRF5 */ 103 t=inb_p(WDT_EFDR); /* read CRF5 */
98 t&=~0x0C; /* set second mode & disable keyboard turning off watchdog */ 104 t&=~0x0C; /* set second mode & disable keyboard turning off watchdog */
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 5b29c3b2a331..327b58e64875 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -58,4 +58,31 @@ config EFI_PCDP
58 58
59 See <http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf> 59 See <http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf>
60 60
61config DELL_RBU
62 tristate "BIOS update support for DELL systems via sysfs"
63 select FW_LOADER
64 help
65 Say m if you want to have the option of updating the BIOS for your
66 DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
67 supporting application to comunicate with the BIOS regarding the new
68 image for the image update to take effect.
69 See <file:Documentation/dell_rbu.txt> for more details on the driver.
70
71config DCDBAS
72 tristate "Dell Systems Management Base Driver"
73 depends on X86 || X86_64
74 default m
75 help
76 The Dell Systems Management Base Driver provides a sysfs interface
77 for systems management software to perform System Management
78 Interrupts (SMIs) and Host Control Actions (system power cycle or
79 power off after OS shutdown) on certain Dell systems.
80
81 See <file:Documentation/dcdbas.txt> for more details on the driver
82 and the Dell systems on which Dell systems management software makes
83 use of this driver.
84
85 Say Y or M here to enable the driver for use by Dell systems
86 management software such as Dell OpenManage.
87
61endmenu 88endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 90fd0b26db8b..85429979d0db 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -4,3 +4,5 @@
4obj-$(CONFIG_EDD) += edd.o 4obj-$(CONFIG_EDD) += edd.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_PCDP) += pcdp.o 6obj-$(CONFIG_EFI_PCDP) += pcdp.o
7obj-$(CONFIG_DELL_RBU) += dell_rbu.o
8obj-$(CONFIG_DCDBAS) += dcdbas.o
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
new file mode 100644
index 000000000000..955537fe9958
--- /dev/null
+++ b/drivers/firmware/dcdbas.c
@@ -0,0 +1,596 @@
1/*
2 * dcdbas.c: Dell Systems Management Base Driver
3 *
4 * The Dell Systems Management Base Driver provides a sysfs interface for
5 * systems management software to perform System Management Interrupts (SMIs)
6 * and Host Control Actions (power cycle or power off after OS shutdown) on
7 * Dell systems.
8 *
9 * See Documentation/dcdbas.txt for more information.
10 *
11 * Copyright (C) 1995-2005 Dell Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License v2.0 as published by
15 * the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/mc146818rtc.h>
29#include <linux/module.h>
30#include <linux/reboot.h>
31#include <linux/sched.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <asm/io.h>
37#include <asm/semaphore.h>
38
39#include "dcdbas.h"
40
41#define DRIVER_NAME "dcdbas"
42#define DRIVER_VERSION "5.6.0-1"
43#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
44
45static struct platform_device *dcdbas_pdev;
46
47static u8 *smi_data_buf;
48static dma_addr_t smi_data_buf_handle;
49static unsigned long smi_data_buf_size;
50static u32 smi_data_buf_phys_addr;
51static DECLARE_MUTEX(smi_data_lock);
52
53static unsigned int host_control_action;
54static unsigned int host_control_smi_type;
55static unsigned int host_control_on_shutdown;
56
57/**
58 * smi_data_buf_free: free SMI data buffer
59 */
60static void smi_data_buf_free(void)
61{
62 if (!smi_data_buf)
63 return;
64
65 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
66 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
67
68 dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
69 smi_data_buf_handle);
70 smi_data_buf = NULL;
71 smi_data_buf_handle = 0;
72 smi_data_buf_phys_addr = 0;
73 smi_data_buf_size = 0;
74}
75
76/**
77 * smi_data_buf_realloc: grow SMI data buffer if needed
78 */
79static int smi_data_buf_realloc(unsigned long size)
80{
81 void *buf;
82 dma_addr_t handle;
83
84 if (smi_data_buf_size >= size)
85 return 0;
86
87 if (size > MAX_SMI_DATA_BUF_SIZE)
88 return -EINVAL;
89
90 /* new buffer is needed */
91 buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
92 if (!buf) {
93 dev_dbg(&dcdbas_pdev->dev,
94 "%s: failed to allocate memory size %lu\n",
95 __FUNCTION__, size);
96 return -ENOMEM;
97 }
98 /* memory zeroed by dma_alloc_coherent */
99
100 if (smi_data_buf)
101 memcpy(buf, smi_data_buf, smi_data_buf_size);
102
103 /* free any existing buffer */
104 smi_data_buf_free();
105
106 /* set up new buffer for use */
107 smi_data_buf = buf;
108 smi_data_buf_handle = handle;
109 smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
110 smi_data_buf_size = size;
111
112 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
113 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
114
115 return 0;
116}
117
118static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
123}
124
125static ssize_t smi_data_buf_size_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 return sprintf(buf, "%lu\n", smi_data_buf_size);
130}
131
132static ssize_t smi_data_buf_size_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned long buf_size;
137 ssize_t ret;
138
139 buf_size = simple_strtoul(buf, NULL, 10);
140
141 /* make sure SMI data buffer is at least buf_size */
142 down(&smi_data_lock);
143 ret = smi_data_buf_realloc(buf_size);
144 up(&smi_data_lock);
145 if (ret)
146 return ret;
147
148 return count;
149}
150
151static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
152 size_t count)
153{
154 size_t max_read;
155 ssize_t ret;
156
157 down(&smi_data_lock);
158
159 if (pos >= smi_data_buf_size) {
160 ret = 0;
161 goto out;
162 }
163
164 max_read = smi_data_buf_size - pos;
165 ret = min(max_read, count);
166 memcpy(buf, smi_data_buf + pos, ret);
167out:
168 up(&smi_data_lock);
169 return ret;
170}
171
172static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
173 size_t count)
174{
175 ssize_t ret;
176
177 down(&smi_data_lock);
178
179 ret = smi_data_buf_realloc(pos + count);
180 if (ret)
181 goto out;
182
183 memcpy(smi_data_buf + pos, buf, count);
184 ret = count;
185out:
186 up(&smi_data_lock);
187 return ret;
188}
189
190static ssize_t host_control_action_show(struct device *dev,
191 struct device_attribute *attr,
192 char *buf)
193{
194 return sprintf(buf, "%u\n", host_control_action);
195}
196
197static ssize_t host_control_action_store(struct device *dev,
198 struct device_attribute *attr,
199 const char *buf, size_t count)
200{
201 ssize_t ret;
202
203 /* make sure buffer is available for host control command */
204 down(&smi_data_lock);
205 ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
206 up(&smi_data_lock);
207 if (ret)
208 return ret;
209
210 host_control_action = simple_strtoul(buf, NULL, 10);
211 return count;
212}
213
214static ssize_t host_control_smi_type_show(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217{
218 return sprintf(buf, "%u\n", host_control_smi_type);
219}
220
221static ssize_t host_control_smi_type_store(struct device *dev,
222 struct device_attribute *attr,
223 const char *buf, size_t count)
224{
225 host_control_smi_type = simple_strtoul(buf, NULL, 10);
226 return count;
227}
228
229static ssize_t host_control_on_shutdown_show(struct device *dev,
230 struct device_attribute *attr,
231 char *buf)
232{
233 return sprintf(buf, "%u\n", host_control_on_shutdown);
234}
235
236static ssize_t host_control_on_shutdown_store(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf, size_t count)
239{
240 host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
241 return count;
242}
243
244/**
245 * smi_request: generate SMI request
246 *
247 * Called with smi_data_lock.
248 */
249static int smi_request(struct smi_cmd *smi_cmd)
250{
251 cpumask_t old_mask;
252 int ret = 0;
253
254 if (smi_cmd->magic != SMI_CMD_MAGIC) {
255 dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
256 __FUNCTION__);
257 return -EBADR;
258 }
259
260 /* SMI requires CPU 0 */
261 old_mask = current->cpus_allowed;
262 set_cpus_allowed(current, cpumask_of_cpu(0));
263 if (smp_processor_id() != 0) {
264 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
265 __FUNCTION__);
266 ret = -EBUSY;
267 goto out;
268 }
269
270 /* generate SMI */
271 asm volatile (
272 "outb %b0,%w1"
273 : /* no output args */
274 : "a" (smi_cmd->command_code),
275 "d" (smi_cmd->command_address),
276 "b" (smi_cmd->ebx),
277 "c" (smi_cmd->ecx)
278 : "memory"
279 );
280
281out:
282 set_cpus_allowed(current, old_mask);
283 return ret;
284}
285
286/**
287 * smi_request_store:
288 *
289 * The valid values are:
290 * 0: zero SMI data buffer
291 * 1: generate calling interface SMI
292 * 2: generate raw SMI
293 *
294 * User application writes smi_cmd to smi_data before telling driver
295 * to generate SMI.
296 */
297static ssize_t smi_request_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct smi_cmd *smi_cmd;
302 unsigned long val = simple_strtoul(buf, NULL, 10);
303 ssize_t ret;
304
305 down(&smi_data_lock);
306
307 if (smi_data_buf_size < sizeof(struct smi_cmd)) {
308 ret = -ENODEV;
309 goto out;
310 }
311 smi_cmd = (struct smi_cmd *)smi_data_buf;
312
313 switch (val) {
314 case 2:
315 /* Raw SMI */
316 ret = smi_request(smi_cmd);
317 if (!ret)
318 ret = count;
319 break;
320 case 1:
321 /* Calling Interface SMI */
322 smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
323 ret = smi_request(smi_cmd);
324 if (!ret)
325 ret = count;
326 break;
327 case 0:
328 memset(smi_data_buf, 0, smi_data_buf_size);
329 ret = count;
330 break;
331 default:
332 ret = -EINVAL;
333 break;
334 }
335
336out:
337 up(&smi_data_lock);
338 return ret;
339}
340
341/**
342 * host_control_smi: generate host control SMI
343 *
344 * Caller must set up the host control command in smi_data_buf.
345 */
346static int host_control_smi(void)
347{
348 struct apm_cmd *apm_cmd;
349 u8 *data;
350 unsigned long flags;
351 u32 num_ticks;
352 s8 cmd_status;
353 u8 index;
354
355 apm_cmd = (struct apm_cmd *)smi_data_buf;
356 apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
357
358 switch (host_control_smi_type) {
359 case HC_SMITYPE_TYPE1:
360 spin_lock_irqsave(&rtc_lock, flags);
361 /* write SMI data buffer physical address */
362 data = (u8 *)&smi_data_buf_phys_addr;
363 for (index = PE1300_CMOS_CMD_STRUCT_PTR;
364 index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
365 index++, data++) {
366 outb(index,
367 (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
368 outb(*data,
369 (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
370 }
371
372 /* first set status to -1 as called by spec */
373 cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
374 outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
375
376 /* generate SMM call */
377 outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
378 spin_unlock_irqrestore(&rtc_lock, flags);
379
380 /* wait a few to see if it executed */
381 num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
382 while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
383 == ESM_STATUS_CMD_UNSUCCESSFUL) {
384 num_ticks--;
385 if (num_ticks == EXPIRED_TIMER)
386 return -ETIME;
387 }
388 break;
389
390 case HC_SMITYPE_TYPE2:
391 case HC_SMITYPE_TYPE3:
392 spin_lock_irqsave(&rtc_lock, flags);
393 /* write SMI data buffer physical address */
394 data = (u8 *)&smi_data_buf_phys_addr;
395 for (index = PE1400_CMOS_CMD_STRUCT_PTR;
396 index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
397 index++, data++) {
398 outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
399 outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
400 }
401
402 /* generate SMM call */
403 if (host_control_smi_type == HC_SMITYPE_TYPE3)
404 outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
405 else
406 outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
407
408 /* restore RTC index pointer since it was written to above */
409 CMOS_READ(RTC_REG_C);
410 spin_unlock_irqrestore(&rtc_lock, flags);
411
412 /* read control port back to serialize write */
413 cmd_status = inb(PE1400_APM_CONTROL_PORT);
414
415 /* wait a few to see if it executed */
416 num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
417 while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
418 num_ticks--;
419 if (num_ticks == EXPIRED_TIMER)
420 return -ETIME;
421 }
422 break;
423
424 default:
425 dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
426 __FUNCTION__, host_control_smi_type);
427 return -ENOSYS;
428 }
429
430 return 0;
431}
432
433/**
434 * dcdbas_host_control: initiate host control
435 *
436 * This function is called by the driver after the system has
437 * finished shutting down if the user application specified a
438 * host control action to perform on shutdown. It is safe to
439 * use smi_data_buf at this point because the system has finished
440 * shutting down and no userspace apps are running.
441 */
442static void dcdbas_host_control(void)
443{
444 struct apm_cmd *apm_cmd;
445 u8 action;
446
447 if (host_control_action == HC_ACTION_NONE)
448 return;
449
450 action = host_control_action;
451 host_control_action = HC_ACTION_NONE;
452
453 if (!smi_data_buf) {
454 dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__);
455 return;
456 }
457
458 if (smi_data_buf_size < sizeof(struct apm_cmd)) {
459 dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
460 __FUNCTION__);
461 return;
462 }
463
464 apm_cmd = (struct apm_cmd *)smi_data_buf;
465
466 /* power off takes precedence */
467 if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
468 apm_cmd->command = ESM_APM_POWER_CYCLE;
469 apm_cmd->reserved = 0;
470 *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
471 host_control_smi();
472 } else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
473 apm_cmd->command = ESM_APM_POWER_CYCLE;
474 apm_cmd->reserved = 0;
475 *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
476 host_control_smi();
477 }
478}
479
480/**
481 * dcdbas_reboot_notify: handle reboot notification for host control
482 */
483static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
484 void *unused)
485{
486 static unsigned int notify_cnt = 0;
487
488 switch (code) {
489 case SYS_DOWN:
490 case SYS_HALT:
491 case SYS_POWER_OFF:
492 if (host_control_on_shutdown) {
493 /* firmware is going to perform host control action */
494 if (++notify_cnt == 2) {
495 printk(KERN_WARNING
496 "Please wait for shutdown "
497 "action to complete...\n");
498 dcdbas_host_control();
499 }
500 /*
501 * register again and initiate the host control
502 * action on the second notification to allow
503 * everyone that registered to be notified
504 */
505 register_reboot_notifier(nb);
506 }
507 break;
508 }
509
510 return NOTIFY_DONE;
511}
512
513static struct notifier_block dcdbas_reboot_nb = {
514 .notifier_call = dcdbas_reboot_notify,
515 .next = NULL,
516 .priority = 0
517};
518
519static DCDBAS_BIN_ATTR_RW(smi_data);
520
521static struct bin_attribute *dcdbas_bin_attrs[] = {
522 &bin_attr_smi_data,
523 NULL
524};
525
526static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
527static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
528static DCDBAS_DEV_ATTR_WO(smi_request);
529static DCDBAS_DEV_ATTR_RW(host_control_action);
530static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
531static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
532
533static struct device_attribute *dcdbas_dev_attrs[] = {
534 &dev_attr_smi_data_buf_size,
535 &dev_attr_smi_data_buf_phys_addr,
536 &dev_attr_smi_request,
537 &dev_attr_host_control_action,
538 &dev_attr_host_control_smi_type,
539 &dev_attr_host_control_on_shutdown,
540 NULL
541};
542
543/**
544 * dcdbas_init: initialize driver
545 */
546static int __init dcdbas_init(void)
547{
548 int i;
549
550 host_control_action = HC_ACTION_NONE;
551 host_control_smi_type = HC_SMITYPE_NONE;
552
553 dcdbas_pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
554 if (IS_ERR(dcdbas_pdev))
555 return PTR_ERR(dcdbas_pdev);
556
557 /*
558 * BIOS SMI calls require buffer addresses be in 32-bit address space.
559 * This is done by setting the DMA mask below.
560 */
561 dcdbas_pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
562 dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
563
564 register_reboot_notifier(&dcdbas_reboot_nb);
565
566 for (i = 0; dcdbas_bin_attrs[i]; i++)
567 sysfs_create_bin_file(&dcdbas_pdev->dev.kobj,
568 dcdbas_bin_attrs[i]);
569
570 for (i = 0; dcdbas_dev_attrs[i]; i++)
571 device_create_file(&dcdbas_pdev->dev, dcdbas_dev_attrs[i]);
572
573 dev_info(&dcdbas_pdev->dev, "%s (version %s)\n",
574 DRIVER_DESCRIPTION, DRIVER_VERSION);
575
576 return 0;
577}
578
579/**
580 * dcdbas_exit: perform driver cleanup
581 */
582static void __exit dcdbas_exit(void)
583{
584 platform_device_unregister(dcdbas_pdev);
585 unregister_reboot_notifier(&dcdbas_reboot_nb);
586 smi_data_buf_free();
587}
588
589module_init(dcdbas_init);
590module_exit(dcdbas_exit);
591
592MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
593MODULE_VERSION(DRIVER_VERSION);
594MODULE_AUTHOR("Dell Inc.");
595MODULE_LICENSE("GPL");
596
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
new file mode 100644
index 000000000000..58a85182b3e8
--- /dev/null
+++ b/drivers/firmware/dcdbas.h
@@ -0,0 +1,107 @@
1/*
2 * dcdbas.h: Definitions for Dell Systems Management Base driver
3 *
4 * Copyright (C) 1995-2005 Dell Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2.0 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _DCDBAS_H_
17#define _DCDBAS_H_
18
19#include <linux/device.h>
20#include <linux/input.h>
21#include <linux/sysfs.h>
22#include <linux/types.h>
23
24#define MAX_SMI_DATA_BUF_SIZE (256 * 1024)
25
26#define HC_ACTION_NONE (0)
27#define HC_ACTION_HOST_CONTROL_POWEROFF BIT(1)
28#define HC_ACTION_HOST_CONTROL_POWERCYCLE BIT(2)
29
30#define HC_SMITYPE_NONE (0)
31#define HC_SMITYPE_TYPE1 (1)
32#define HC_SMITYPE_TYPE2 (2)
33#define HC_SMITYPE_TYPE3 (3)
34
35#define ESM_APM_CMD (0x0A0)
36#define ESM_APM_POWER_CYCLE (0x10)
37#define ESM_STATUS_CMD_UNSUCCESSFUL (-1)
38
39#define CMOS_BASE_PORT (0x070)
40#define CMOS_PAGE1_INDEX_PORT (0)
41#define CMOS_PAGE1_DATA_PORT (1)
42#define CMOS_PAGE2_INDEX_PORT_PIIX4 (2)
43#define CMOS_PAGE2_DATA_PORT_PIIX4 (3)
44#define PE1400_APM_CONTROL_PORT (0x0B0)
45#define PCAT_APM_CONTROL_PORT (0x0B2)
46#define PCAT_APM_STATUS_PORT (0x0B3)
47#define PE1300_CMOS_CMD_STRUCT_PTR (0x38)
48#define PE1400_CMOS_CMD_STRUCT_PTR (0x70)
49
50#define MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN (14)
51#define MAX_SYSMGMT_LONGCMD_SGENTRY_NUM (16)
52
53#define TIMEOUT_USEC_SHORT_SEMA_BLOCKING (10000)
54#define EXPIRED_TIMER (0)
55
56#define SMI_CMD_MAGIC (0x534D4931)
57
58#define DCDBAS_DEV_ATTR_RW(_name) \
59 DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
60
61#define DCDBAS_DEV_ATTR_RO(_name) \
62 DEVICE_ATTR(_name,0400,_name##_show,NULL);
63
64#define DCDBAS_DEV_ATTR_WO(_name) \
65 DEVICE_ATTR(_name,0200,NULL,_name##_store);
66
67#define DCDBAS_BIN_ATTR_RW(_name) \
68struct bin_attribute bin_attr_##_name = { \
69 .attr = { .name = __stringify(_name), \
70 .mode = 0600, \
71 .owner = THIS_MODULE }, \
72 .read = _name##_read, \
73 .write = _name##_write, \
74}
75
76struct smi_cmd {
77 __u32 magic;
78 __u32 ebx;
79 __u32 ecx;
80 __u16 command_address;
81 __u8 command_code;
82 __u8 reserved;
83 __u8 command_buffer[1];
84} __attribute__ ((packed));
85
86struct apm_cmd {
87 __u8 command;
88 __s8 status;
89 __u16 reserved;
90 union {
91 struct {
92 __u8 parm[MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN];
93 } __attribute__ ((packed)) shortreq;
94
95 struct {
96 __u16 num_sg_entries;
97 struct {
98 __u32 size;
99 __u64 addr;
100 } __attribute__ ((packed))
101 sglist[MAX_SYSMGMT_LONGCMD_SGENTRY_NUM];
102 } __attribute__ ((packed)) longreq;
103 } __attribute__ ((packed)) parameters;
104} __attribute__ ((packed));
105
106#endif /* _DCDBAS_H_ */
107
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
new file mode 100644
index 000000000000..3b865f34a095
--- /dev/null
+++ b/drivers/firmware/dell_rbu.c
@@ -0,0 +1,634 @@
1/*
2 * dell_rbu.c
3 * Bios Update driver for Dell systems
4 * Author: Dell Inc
5 * Abhay Salunke <abhay_salunke@dell.com>
6 *
7 * Copyright (C) 2005 Dell Inc.
8 *
9 * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
10 * creating entries in the /sys file systems on Linux 2.6 and higher
11 * kernels. The driver supports two mechanism to update the BIOS namely
12 * contiguous and packetized. Both these methods still require having some
13 * application to set the CMOS bit indicating the BIOS to update itself
14 * after a reboot.
15 *
16 * Contiguous method:
17 * This driver writes the incoming data in a monolithic image by allocating
18 * contiguous physical pages large enough to accommodate the incoming BIOS
19 * image size.
20 *
21 * Packetized method:
22 * The driver writes the incoming packet image by allocating a new packet
23 * on every time the packet data is written. This driver requires an
24 * application to break the BIOS image in to fixed sized packet chunks.
25 *
26 * See Documentation/dell_rbu.txt for more info.
27 *
28 * This program is free software; you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License v2.0 as published by
30 * the Free Software Foundation
31 *
32 * This program is distributed in the hope that it will be useful,
33 * but WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 * GNU General Public License for more details.
36 */
37#include <linux/version.h>
38#include <linux/config.h>
39#include <linux/init.h>
40#include <linux/module.h>
41#include <linux/string.h>
42#include <linux/errno.h>
43#include <linux/blkdev.h>
44#include <linux/device.h>
45#include <linux/spinlock.h>
46#include <linux/moduleparam.h>
47#include <linux/firmware.h>
48#include <linux/dma-mapping.h>
49
50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
52MODULE_LICENSE("GPL");
53MODULE_VERSION("1.0");
54
55#define BIOS_SCAN_LIMIT 0xffffffff
56#define MAX_IMAGE_LENGTH 16
57static struct _rbu_data {
58 void *image_update_buffer;
59 unsigned long image_update_buffer_size;
60 unsigned long bios_image_size;
61 int image_update_ordernum;
62 int dma_alloc;
63 spinlock_t lock;
64 unsigned long packet_read_count;
65 unsigned long packet_write_count;
66 unsigned long num_packets;
67 unsigned long packetsize;
68} rbu_data;
69
70static char image_type[MAX_IMAGE_LENGTH] = "mono";
71module_param_string(image_type, image_type, sizeof(image_type), 0);
72MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet");
73
74struct packet_data {
75 struct list_head list;
76 size_t length;
77 void *data;
78 int ordernum;
79};
80
81static struct packet_data packet_data_head;
82
83static struct platform_device *rbu_device;
84static int context;
85static dma_addr_t dell_rbu_dmaaddr;
86
87static void init_packet_head(void)
88{
89 INIT_LIST_HEAD(&packet_data_head.list);
90 rbu_data.packet_write_count = 0;
91 rbu_data.packet_read_count = 0;
92 rbu_data.num_packets = 0;
93 rbu_data.packetsize = 0;
94}
95
96static int fill_last_packet(void *data, size_t length)
97{
98 struct list_head *ptemp_list;
99 struct packet_data *packet = NULL;
100 int packet_count = 0;
101
102 pr_debug("fill_last_packet: entry \n");
103
104 if (!rbu_data.num_packets) {
105 pr_debug("fill_last_packet: num_packets=0\n");
106 return -ENOMEM;
107 }
108
109 packet_count = rbu_data.num_packets;
110
111 ptemp_list = (&packet_data_head.list)->prev;
112
113 packet = list_entry(ptemp_list, struct packet_data, list);
114
115 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) {
116 pr_debug("dell_rbu:%s: packet size data "
117 "overrun\n", __FUNCTION__);
118 return -EINVAL;
119 }
120
121 pr_debug("fill_last_packet : buffer = %p\n", packet->data);
122
123 memcpy((packet->data + rbu_data.packet_write_count), data, length);
124
125 if ((rbu_data.packet_write_count + length) == rbu_data.packetsize) {
126 /*
127 * this was the last data chunk in the packet
128 * so reinitialize the packet data counter to zero
129 */
130 rbu_data.packet_write_count = 0;
131 } else
132 rbu_data.packet_write_count += length;
133
134 pr_debug("fill_last_packet: exit \n");
135 return 0;
136}
137
138static int create_packet(size_t length)
139{
140 struct packet_data *newpacket;
141 int ordernum = 0;
142
143 pr_debug("create_packet: entry \n");
144
145 if (!rbu_data.packetsize) {
146 pr_debug("create_packet: packetsize not specified\n");
147 return -EINVAL;
148 }
149
150 newpacket = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
151 if (!newpacket) {
152 printk(KERN_WARNING
153 "dell_rbu:%s: failed to allocate new "
154 "packet\n", __FUNCTION__);
155 return -ENOMEM;
156 }
157
158 ordernum = get_order(length);
159 /*
160 * there is no upper limit on memory
161 * address for packetized mechanism
162 */
163 newpacket->data = (unsigned char *)__get_free_pages(GFP_KERNEL,
164 ordernum);
165
166 pr_debug("create_packet: newpacket %p\n", newpacket->data);
167
168 if (!newpacket->data) {
169 printk(KERN_WARNING
170 "dell_rbu:%s: failed to allocate new "
171 "packet\n", __FUNCTION__);
172 kfree(newpacket);
173 return -ENOMEM;
174 }
175
176 newpacket->ordernum = ordernum;
177 ++rbu_data.num_packets;
178 /*
179 * initialize the newly created packet headers
180 */
181 INIT_LIST_HEAD(&newpacket->list);
182 list_add_tail(&newpacket->list, &packet_data_head.list);
183 /*
184 * packets have fixed size
185 */
186 newpacket->length = rbu_data.packetsize;
187
188 pr_debug("create_packet: exit \n");
189
190 return 0;
191}
192
193static int packetize_data(void *data, size_t length)
194{
195 int rc = 0;
196
197 if (!rbu_data.packet_write_count) {
198 if ((rc = create_packet(length)))
199 return rc;
200 }
201 if ((rc = fill_last_packet(data, length)))
202 return rc;
203
204 return rc;
205}
206
207static int
208do_packet_read(char *data, struct list_head *ptemp_list,
209 int length, int bytes_read, int *list_read_count)
210{
211 void *ptemp_buf;
212 struct packet_data *newpacket = NULL;
213 int bytes_copied = 0;
214 int j = 0;
215
216 newpacket = list_entry(ptemp_list, struct packet_data, list);
217 *list_read_count += newpacket->length;
218
219 if (*list_read_count > bytes_read) {
220 /* point to the start of unread data */
221 j = newpacket->length - (*list_read_count - bytes_read);
222 /* point to the offset in the packet buffer */
223 ptemp_buf = (u8 *) newpacket->data + j;
224 /*
225 * check if there is enough room in
226 * * the incoming buffer
227 */
228 if (length > (*list_read_count - bytes_read))
229 /*
230 * copy what ever is there in this
231 * packet and move on
232 */
233 bytes_copied = (*list_read_count - bytes_read);
234 else
235 /* copy the remaining */
236 bytes_copied = length;
237 memcpy(data, ptemp_buf, bytes_copied);
238 }
239 return bytes_copied;
240}
241
242static int packet_read_list(char *data, size_t * pread_length)
243{
244 struct list_head *ptemp_list;
245 int temp_count = 0;
246 int bytes_copied = 0;
247 int bytes_read = 0;
248 int remaining_bytes = 0;
249 char *pdest = data;
250
251 /* check if we have any packets */
252 if (0 == rbu_data.num_packets)
253 return -ENOMEM;
254
255 remaining_bytes = *pread_length;
256 bytes_read = rbu_data.packet_read_count;
257
258 ptemp_list = (&packet_data_head.list)->next;
259 while (!list_empty(ptemp_list)) {
260 bytes_copied = do_packet_read(pdest, ptemp_list,
261 remaining_bytes, bytes_read,
262 &temp_count);
263 remaining_bytes -= bytes_copied;
264 bytes_read += bytes_copied;
265 pdest += bytes_copied;
266 /*
267 * check if we reached end of buffer before reaching the
268 * last packet
269 */
270 if (remaining_bytes == 0)
271 break;
272
273 ptemp_list = ptemp_list->next;
274 }
275 /*finally set the bytes read */
276 *pread_length = bytes_read - rbu_data.packet_read_count;
277 rbu_data.packet_read_count = bytes_read;
278 return 0;
279}
280
281static void packet_empty_list(void)
282{
283 struct list_head *ptemp_list;
284 struct list_head *pnext_list;
285 struct packet_data *newpacket;
286
287 ptemp_list = (&packet_data_head.list)->next;
288 while (!list_empty(ptemp_list)) {
289 newpacket =
290 list_entry(ptemp_list, struct packet_data, list);
291 pnext_list = ptemp_list->next;
292 list_del(ptemp_list);
293 ptemp_list = pnext_list;
294 /*
295 * zero out the RBU packet memory before freeing
296 * to make sure there are no stale RBU packets left in memory
297 */
298 memset(newpacket->data, 0, rbu_data.packetsize);
299 free_pages((unsigned long)newpacket->data,
300 newpacket->ordernum);
301 kfree(newpacket);
302 }
303 rbu_data.packet_write_count = 0;
304 rbu_data.packet_read_count = 0;
305 rbu_data.num_packets = 0;
306 rbu_data.packetsize = 0;
307}
308
309/*
310 * img_update_free: Frees the buffer allocated for storing BIOS image
311 * Always called with lock held and returned with lock held
312 */
313static void img_update_free(void)
314{
315 if (!rbu_data.image_update_buffer)
316 return;
317 /*
318 * zero out this buffer before freeing it to get rid of any stale
319 * BIOS image copied in memory.
320 */
321 memset(rbu_data.image_update_buffer, 0,
322 rbu_data.image_update_buffer_size);
323 if (rbu_data.dma_alloc == 1)
324 dma_free_coherent(NULL, rbu_data.bios_image_size,
325 rbu_data.image_update_buffer,
326 dell_rbu_dmaaddr);
327 else
328 free_pages((unsigned long)rbu_data.image_update_buffer,
329 rbu_data.image_update_ordernum);
330
331 /*
332 * Re-initialize the rbu_data variables after a free
333 */
334 rbu_data.image_update_ordernum = -1;
335 rbu_data.image_update_buffer = NULL;
336 rbu_data.image_update_buffer_size = 0;
337 rbu_data.bios_image_size = 0;
338 rbu_data.dma_alloc = 0;
339}
340
341/*
342 * img_update_realloc: This function allocates the contiguous pages to
343 * accommodate the requested size of data. The memory address and size
344 * values are stored globally and on every call to this function the new
345 * size is checked to see if more data is required than the existing size.
346 * If true the previous memory is freed and new allocation is done to
347 * accommodate the new size. If the incoming size is less then than the
348 * already allocated size, then that memory is reused. This function is
349 * called with lock held and returns with lock held.
350 */
351static int img_update_realloc(unsigned long size)
352{
353 unsigned char *image_update_buffer = NULL;
354 unsigned long rc;
355 unsigned long img_buf_phys_addr;
356 int ordernum;
357 int dma_alloc = 0;
358
359 /*
360 * check if the buffer of sufficient size has been
361 * already allocated
362 */
363 if (rbu_data.image_update_buffer_size >= size) {
364 /*
365 * check for corruption
366 */
367 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
368 printk(KERN_ERR "dell_rbu:%s: corruption "
369 "check failed\n", __FUNCTION__);
370 return -EINVAL;
371 }
372 /*
373 * we have a valid pre-allocated buffer with
374 * sufficient size
375 */
376 return 0;
377 }
378
379 /*
380 * free any previously allocated buffer
381 */
382 img_update_free();
383
384 spin_unlock(&rbu_data.lock);
385
386 ordernum = get_order(size);
387 image_update_buffer =
388 (unsigned char *)__get_free_pages(GFP_KERNEL, ordernum);
389
390 img_buf_phys_addr =
391 (unsigned long)virt_to_phys(image_update_buffer);
392
393 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
394 free_pages((unsigned long)image_update_buffer, ordernum);
395 ordernum = -1;
396 image_update_buffer = dma_alloc_coherent(NULL, size,
397 &dell_rbu_dmaaddr,
398 GFP_KERNEL);
399 dma_alloc = 1;
400 }
401
402 spin_lock(&rbu_data.lock);
403
404 if (image_update_buffer != NULL) {
405 rbu_data.image_update_buffer = image_update_buffer;
406 rbu_data.image_update_buffer_size = size;
407 rbu_data.bios_image_size =
408 rbu_data.image_update_buffer_size;
409 rbu_data.image_update_ordernum = ordernum;
410 rbu_data.dma_alloc = dma_alloc;
411 rc = 0;
412 } else {
413 pr_debug("Not enough memory for image update:"
414 "size = %ld\n", size);
415 rc = -ENOMEM;
416 }
417
418 return rc;
419}
420
421static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
422{
423 int retval;
424 size_t bytes_left;
425 size_t data_length;
426 char *ptempBuf = buffer;
427 unsigned long imagesize;
428
429 /* check to see if we have something to return */
430 if (rbu_data.num_packets == 0) {
431 pr_debug("read_packet_data: no packets written\n");
432 retval = -ENOMEM;
433 goto read_rbu_data_exit;
434 }
435
436 imagesize = rbu_data.num_packets * rbu_data.packetsize;
437
438 if (pos > imagesize) {
439 retval = 0;
440 printk(KERN_WARNING "dell_rbu:read_packet_data: "
441 "data underrun\n");
442 goto read_rbu_data_exit;
443 }
444
445 bytes_left = imagesize - pos;
446 data_length = min(bytes_left, count);
447
448 if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
449 goto read_rbu_data_exit;
450
451 if ((pos + count) > imagesize) {
452 rbu_data.packet_read_count = 0;
453 /* this was the last copy */
454 retval = bytes_left;
455 } else
456 retval = count;
457
458 read_rbu_data_exit:
459 return retval;
460}
461
462static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
463{
464 unsigned char *ptemp = NULL;
465 size_t bytes_left = 0;
466 size_t data_length = 0;
467 ssize_t ret_count = 0;
468
469 /* check to see if we have something to return */
470 if ((rbu_data.image_update_buffer == NULL) ||
471 (rbu_data.bios_image_size == 0)) {
472 pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
473 "bios_image_size %lu\n",
474 rbu_data.image_update_buffer,
475 rbu_data.bios_image_size);
476 ret_count = -ENOMEM;
477 goto read_rbu_data_exit;
478 }
479
480 if (pos > rbu_data.bios_image_size) {
481 ret_count = 0;
482 goto read_rbu_data_exit;
483 }
484
485 bytes_left = rbu_data.bios_image_size - pos;
486 data_length = min(bytes_left, count);
487
488 ptemp = rbu_data.image_update_buffer;
489 memcpy(buffer, (ptemp + pos), data_length);
490
491 if ((pos + count) > rbu_data.bios_image_size)
492 /* this was the last copy */
493 ret_count = bytes_left;
494 else
495 ret_count = count;
496 read_rbu_data_exit:
497 return ret_count;
498}
499
500static ssize_t
501read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
502{
503 ssize_t ret_count = 0;
504
505 spin_lock(&rbu_data.lock);
506
507 if (!strcmp(image_type, "mono"))
508 ret_count = read_rbu_mono_data(buffer, pos, count);
509 else if (!strcmp(image_type, "packet"))
510 ret_count = read_packet_data(buffer, pos, count);
511 else
512 pr_debug("read_rbu_data: invalid image type specified\n");
513
514 spin_unlock(&rbu_data.lock);
515 return ret_count;
516}
517
518static ssize_t
519read_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
520 size_t count)
521{
522 int size = 0;
523 if (!pos)
524 size = sprintf(buffer, "%s\n", image_type);
525 return size;
526}
527
528static ssize_t
529write_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
530 size_t count)
531{
532 int rc = count;
533 spin_lock(&rbu_data.lock);
534
535 if (strlen(buffer) < MAX_IMAGE_LENGTH)
536 sscanf(buffer, "%s", image_type);
537 else
538 printk(KERN_WARNING "dell_rbu: image_type is invalid"
539 "max chars = %d, \n incoming str--%s-- \n",
540 MAX_IMAGE_LENGTH, buffer);
541
542 /* we must free all previous allocations */
543 packet_empty_list();
544 img_update_free();
545
546 spin_unlock(&rbu_data.lock);
547 return rc;
548
549}
550
551static struct bin_attribute rbu_data_attr = {
552 .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
553 .read = read_rbu_data,
554};
555
556static struct bin_attribute rbu_image_type_attr = {
557 .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
558 .read = read_rbu_image_type,
559 .write = write_rbu_image_type,
560};
561
562static void callbackfn_rbu(const struct firmware *fw, void *context)
563{
564 int rc = 0;
565
566 if (!fw || !fw->size)
567 return;
568
569 spin_lock(&rbu_data.lock);
570 if (!strcmp(image_type, "mono")) {
571 if (!img_update_realloc(fw->size))
572 memcpy(rbu_data.image_update_buffer,
573 fw->data, fw->size);
574 } else if (!strcmp(image_type, "packet")) {
575 if (!rbu_data.packetsize)
576 rbu_data.packetsize = fw->size;
577 else if (rbu_data.packetsize != fw->size) {
578 packet_empty_list();
579 rbu_data.packetsize = fw->size;
580 }
581 packetize_data(fw->data, fw->size);
582 } else
583 pr_debug("invalid image type specified.\n");
584 spin_unlock(&rbu_data.lock);
585
586 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
587 "dell_rbu", &rbu_device->dev,
588 &context, callbackfn_rbu);
589 if (rc)
590 printk(KERN_ERR
591 "dell_rbu:%s request_firmware_nowait failed"
592 " %d\n", __FUNCTION__, rc);
593}
594
595static int __init dcdrbu_init(void)
596{
597 int rc = 0;
598 spin_lock_init(&rbu_data.lock);
599
600 init_packet_head();
601 rbu_device =
602 platform_device_register_simple("dell_rbu", -1, NULL, 0);
603 if (!rbu_device) {
604 printk(KERN_ERR
605 "dell_rbu:%s:platform_device_register_simple "
606 "failed\n", __FUNCTION__);
607 return -EIO;
608 }
609
610 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
611 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
612
613 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
614 "dell_rbu", &rbu_device->dev,
615 &context, callbackfn_rbu);
616 if (rc)
617 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait"
618 " failed %d\n", __FUNCTION__, rc);
619
620 return rc;
621
622}
623
624static __exit void dcdrbu_exit(void)
625{
626 spin_lock(&rbu_data.lock);
627 packet_empty_list();
628 img_update_free();
629 spin_unlock(&rbu_data.lock);
630 platform_device_unregister(rbu_device);
631}
632
633module_exit(dcdrbu_exit);
634module_init(dcdrbu_init);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 140d5f851a5b..138dc50270e3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -12,12 +12,20 @@ config HWMON
12 of a system. Most modern motherboards include such a device. It 12 of a system. Most modern motherboards include such a device. It
13 can include temperature sensors, voltage sensors, fan speed 13 can include temperature sensors, voltage sensors, fan speed
14 sensors and various additional features such as the ability to 14 sensors and various additional features such as the ability to
15 control the speed of the fans. 15 control the speed of the fans. If you want this support you
16 should say Y here and also to the specific driver(s) for your
17 sensors chip(s) below.
18
19 This support can also be built as a module. If so, the module
20 will be called hwmon.
21
22config HWMON_VID
23 tristate
24 default n
16 25
17config SENSORS_ADM1021 26config SENSORS_ADM1021
18 tristate "Analog Devices ADM1021 and compatibles" 27 tristate "Analog Devices ADM1021 and compatibles"
19 depends on HWMON && I2C 28 depends on HWMON && I2C
20 select I2C_SENSOR
21 help 29 help
22 If you say yes here you get support for Analog Devices ADM1021 30 If you say yes here you get support for Analog Devices ADM1021
23 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, 31 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A,
@@ -30,7 +38,7 @@ config SENSORS_ADM1021
30config SENSORS_ADM1025 38config SENSORS_ADM1025
31 tristate "Analog Devices ADM1025 and compatibles" 39 tristate "Analog Devices ADM1025 and compatibles"
32 depends on HWMON && I2C && EXPERIMENTAL 40 depends on HWMON && I2C && EXPERIMENTAL
33 select I2C_SENSOR 41 select HWMON_VID
34 help 42 help
35 If you say yes here you get support for Analog Devices ADM1025 43 If you say yes here you get support for Analog Devices ADM1025
36 and Philips NE1619 sensor chips. 44 and Philips NE1619 sensor chips.
@@ -41,7 +49,7 @@ config SENSORS_ADM1025
41config SENSORS_ADM1026 49config SENSORS_ADM1026
42 tristate "Analog Devices ADM1026 and compatibles" 50 tristate "Analog Devices ADM1026 and compatibles"
43 depends on HWMON && I2C && EXPERIMENTAL 51 depends on HWMON && I2C && EXPERIMENTAL
44 select I2C_SENSOR 52 select HWMON_VID
45 help 53 help
46 If you say yes here you get support for Analog Devices ADM1026 54 If you say yes here you get support for Analog Devices ADM1026
47 sensor chip. 55 sensor chip.
@@ -52,7 +60,6 @@ config SENSORS_ADM1026
52config SENSORS_ADM1031 60config SENSORS_ADM1031
53 tristate "Analog Devices ADM1031 and compatibles" 61 tristate "Analog Devices ADM1031 and compatibles"
54 depends on HWMON && I2C && EXPERIMENTAL 62 depends on HWMON && I2C && EXPERIMENTAL
55 select I2C_SENSOR
56 help 63 help
57 If you say yes here you get support for Analog Devices ADM1031 64 If you say yes here you get support for Analog Devices ADM1031
58 and ADM1030 sensor chips. 65 and ADM1030 sensor chips.
@@ -63,7 +70,7 @@ config SENSORS_ADM1031
63config SENSORS_ADM9240 70config SENSORS_ADM9240
64 tristate "Analog Devices ADM9240 and compatibles" 71 tristate "Analog Devices ADM9240 and compatibles"
65 depends on HWMON && I2C && EXPERIMENTAL 72 depends on HWMON && I2C && EXPERIMENTAL
66 select I2C_SENSOR 73 select HWMON_VID
67 help 74 help
68 If you say yes here you get support for Analog Devices ADM9240, 75 If you say yes here you get support for Analog Devices ADM9240,
69 Dallas DS1780, National Semiconductor LM81 sensor chips. 76 Dallas DS1780, National Semiconductor LM81 sensor chips.
@@ -74,7 +81,7 @@ config SENSORS_ADM9240
74config SENSORS_ASB100 81config SENSORS_ASB100
75 tristate "Asus ASB100 Bach" 82 tristate "Asus ASB100 Bach"
76 depends on HWMON && I2C && EXPERIMENTAL 83 depends on HWMON && I2C && EXPERIMENTAL
77 select I2C_SENSOR 84 select HWMON_VID
78 help 85 help
79 If you say yes here you get support for the ASB100 Bach sensor 86 If you say yes here you get support for the ASB100 Bach sensor
80 chip found on some Asus mainboards. 87 chip found on some Asus mainboards.
@@ -85,7 +92,7 @@ config SENSORS_ASB100
85config SENSORS_ATXP1 92config SENSORS_ATXP1
86 tristate "Attansic ATXP1 VID controller" 93 tristate "Attansic ATXP1 VID controller"
87 depends on HWMON && I2C && EXPERIMENTAL 94 depends on HWMON && I2C && EXPERIMENTAL
88 select I2C_SENSOR 95 select HWMON_VID
89 help 96 help
90 If you say yes here you get support for the Attansic ATXP1 VID 97 If you say yes here you get support for the Attansic ATXP1 VID
91 controller. 98 controller.
@@ -99,7 +106,6 @@ config SENSORS_ATXP1
99config SENSORS_DS1621 106config SENSORS_DS1621
100 tristate "Dallas Semiconductor DS1621 and DS1625" 107 tristate "Dallas Semiconductor DS1621 and DS1625"
101 depends on HWMON && I2C && EXPERIMENTAL 108 depends on HWMON && I2C && EXPERIMENTAL
102 select I2C_SENSOR
103 help 109 help
104 If you say yes here you get support for Dallas Semiconductor 110 If you say yes here you get support for Dallas Semiconductor
105 DS1621 and DS1625 sensor chips. 111 DS1621 and DS1625 sensor chips.
@@ -110,7 +116,6 @@ config SENSORS_DS1621
110config SENSORS_FSCHER 116config SENSORS_FSCHER
111 tristate "FSC Hermes" 117 tristate "FSC Hermes"
112 depends on HWMON && I2C && EXPERIMENTAL 118 depends on HWMON && I2C && EXPERIMENTAL
113 select I2C_SENSOR
114 help 119 help
115 If you say yes here you get support for Fujitsu Siemens 120 If you say yes here you get support for Fujitsu Siemens
116 Computers Hermes sensor chips. 121 Computers Hermes sensor chips.
@@ -121,7 +126,6 @@ config SENSORS_FSCHER
121config SENSORS_FSCPOS 126config SENSORS_FSCPOS
122 tristate "FSC Poseidon" 127 tristate "FSC Poseidon"
123 depends on HWMON && I2C && EXPERIMENTAL 128 depends on HWMON && I2C && EXPERIMENTAL
124 select I2C_SENSOR
125 help 129 help
126 If you say yes here you get support for Fujitsu Siemens 130 If you say yes here you get support for Fujitsu Siemens
127 Computers Poseidon sensor chips. 131 Computers Poseidon sensor chips.
@@ -132,7 +136,6 @@ config SENSORS_FSCPOS
132config SENSORS_GL518SM 136config SENSORS_GL518SM
133 tristate "Genesys Logic GL518SM" 137 tristate "Genesys Logic GL518SM"
134 depends on HWMON && I2C 138 depends on HWMON && I2C
135 select I2C_SENSOR
136 help 139 help
137 If you say yes here you get support for Genesys Logic GL518SM 140 If you say yes here you get support for Genesys Logic GL518SM
138 sensor chips. 141 sensor chips.
@@ -143,7 +146,7 @@ config SENSORS_GL518SM
143config SENSORS_GL520SM 146config SENSORS_GL520SM
144 tristate "Genesys Logic GL520SM" 147 tristate "Genesys Logic GL520SM"
145 depends on HWMON && I2C && EXPERIMENTAL 148 depends on HWMON && I2C && EXPERIMENTAL
146 select I2C_SENSOR 149 select HWMON_VID
147 help 150 help
148 If you say yes here you get support for Genesys Logic GL520SM 151 If you say yes here you get support for Genesys Logic GL520SM
149 sensor chips. 152 sensor chips.
@@ -154,7 +157,8 @@ config SENSORS_GL520SM
154config SENSORS_IT87 157config SENSORS_IT87
155 tristate "ITE IT87xx and compatibles" 158 tristate "ITE IT87xx and compatibles"
156 depends on HWMON && I2C 159 depends on HWMON && I2C
157 select I2C_SENSOR 160 select I2C_ISA
161 select HWMON_VID
158 help 162 help
159 If you say yes here you get support for ITE IT87xx sensor chips 163 If you say yes here you get support for ITE IT87xx sensor chips
160 and clones: SiS960. 164 and clones: SiS960.
@@ -165,7 +169,6 @@ config SENSORS_IT87
165config SENSORS_LM63 169config SENSORS_LM63
166 tristate "National Semiconductor LM63" 170 tristate "National Semiconductor LM63"
167 depends on HWMON && I2C && EXPERIMENTAL 171 depends on HWMON && I2C && EXPERIMENTAL
168 select I2C_SENSOR
169 help 172 help
170 If you say yes here you get support for the National Semiconductor 173 If you say yes here you get support for the National Semiconductor
171 LM63 remote diode digital temperature sensor with integrated fan 174 LM63 remote diode digital temperature sensor with integrated fan
@@ -178,7 +181,6 @@ config SENSORS_LM63
178config SENSORS_LM75 181config SENSORS_LM75
179 tristate "National Semiconductor LM75 and compatibles" 182 tristate "National Semiconductor LM75 and compatibles"
180 depends on HWMON && I2C 183 depends on HWMON && I2C
181 select I2C_SENSOR
182 help 184 help
183 If you say yes here you get support for National Semiconductor LM75 185 If you say yes here you get support for National Semiconductor LM75
184 sensor chips and clones: Dallas Semiconductor DS75 and DS1775 (in 186 sensor chips and clones: Dallas Semiconductor DS75 and DS1775 (in
@@ -194,7 +196,6 @@ config SENSORS_LM75
194config SENSORS_LM77 196config SENSORS_LM77
195 tristate "National Semiconductor LM77" 197 tristate "National Semiconductor LM77"
196 depends on HWMON && I2C && EXPERIMENTAL 198 depends on HWMON && I2C && EXPERIMENTAL
197 select I2C_SENSOR
198 help 199 help
199 If you say yes here you get support for National Semiconductor LM77 200 If you say yes here you get support for National Semiconductor LM77
200 sensor chips. 201 sensor chips.
@@ -205,7 +206,8 @@ config SENSORS_LM77
205config SENSORS_LM78 206config SENSORS_LM78
206 tristate "National Semiconductor LM78 and compatibles" 207 tristate "National Semiconductor LM78 and compatibles"
207 depends on HWMON && I2C && EXPERIMENTAL 208 depends on HWMON && I2C && EXPERIMENTAL
208 select I2C_SENSOR 209 select I2C_ISA
210 select HWMON_VID
209 help 211 help
210 If you say yes here you get support for National Semiconductor LM78, 212 If you say yes here you get support for National Semiconductor LM78,
211 LM78-J and LM79. 213 LM78-J and LM79.
@@ -216,7 +218,6 @@ config SENSORS_LM78
216config SENSORS_LM80 218config SENSORS_LM80
217 tristate "National Semiconductor LM80" 219 tristate "National Semiconductor LM80"
218 depends on HWMON && I2C && EXPERIMENTAL 220 depends on HWMON && I2C && EXPERIMENTAL
219 select I2C_SENSOR
220 help 221 help
221 If you say yes here you get support for National Semiconductor 222 If you say yes here you get support for National Semiconductor
222 LM80 sensor chips. 223 LM80 sensor chips.
@@ -227,7 +228,6 @@ config SENSORS_LM80
227config SENSORS_LM83 228config SENSORS_LM83
228 tristate "National Semiconductor LM83" 229 tristate "National Semiconductor LM83"
229 depends on HWMON && I2C 230 depends on HWMON && I2C
230 select I2C_SENSOR
231 help 231 help
232 If you say yes here you get support for National Semiconductor 232 If you say yes here you get support for National Semiconductor
233 LM83 sensor chips. 233 LM83 sensor chips.
@@ -238,7 +238,7 @@ config SENSORS_LM83
238config SENSORS_LM85 238config SENSORS_LM85
239 tristate "National Semiconductor LM85 and compatibles" 239 tristate "National Semiconductor LM85 and compatibles"
240 depends on HWMON && I2C && EXPERIMENTAL 240 depends on HWMON && I2C && EXPERIMENTAL
241 select I2C_SENSOR 241 select HWMON_VID
242 help 242 help
243 If you say yes here you get support for National Semiconductor LM85 243 If you say yes here you get support for National Semiconductor LM85
244 sensor chips and clones: ADT7463, EMC6D100, EMC6D102 and ADM1027. 244 sensor chips and clones: ADT7463, EMC6D100, EMC6D102 and ADM1027.
@@ -249,7 +249,7 @@ config SENSORS_LM85
249config SENSORS_LM87 249config SENSORS_LM87
250 tristate "National Semiconductor LM87" 250 tristate "National Semiconductor LM87"
251 depends on HWMON && I2C && EXPERIMENTAL 251 depends on HWMON && I2C && EXPERIMENTAL
252 select I2C_SENSOR 252 select HWMON_VID
253 help 253 help
254 If you say yes here you get support for National Semiconductor LM87 254 If you say yes here you get support for National Semiconductor LM87
255 sensor chips. 255 sensor chips.
@@ -260,7 +260,6 @@ config SENSORS_LM87
260config SENSORS_LM90 260config SENSORS_LM90
261 tristate "National Semiconductor LM90 and compatibles" 261 tristate "National Semiconductor LM90 and compatibles"
262 depends on HWMON && I2C 262 depends on HWMON && I2C
263 select I2C_SENSOR
264 help 263 help
265 If you say yes here you get support for National Semiconductor LM90, 264 If you say yes here you get support for National Semiconductor LM90,
266 LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657 and 265 LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657 and
@@ -275,7 +274,6 @@ config SENSORS_LM90
275config SENSORS_LM92 274config SENSORS_LM92
276 tristate "National Semiconductor LM92 and compatibles" 275 tristate "National Semiconductor LM92 and compatibles"
277 depends on HWMON && I2C && EXPERIMENTAL 276 depends on HWMON && I2C && EXPERIMENTAL
278 select I2C_SENSOR
279 help 277 help
280 If you say yes here you get support for National Semiconductor LM92 278 If you say yes here you get support for National Semiconductor LM92
281 and Maxim MAX6635 sensor chips. 279 and Maxim MAX6635 sensor chips.
@@ -286,7 +284,6 @@ config SENSORS_LM92
286config SENSORS_MAX1619 284config SENSORS_MAX1619
287 tristate "Maxim MAX1619 sensor chip" 285 tristate "Maxim MAX1619 sensor chip"
288 depends on HWMON && I2C && EXPERIMENTAL 286 depends on HWMON && I2C && EXPERIMENTAL
289 select I2C_SENSOR
290 help 287 help
291 If you say yes here you get support for MAX1619 sensor chip. 288 If you say yes here you get support for MAX1619 sensor chip.
292 289
@@ -296,8 +293,8 @@ config SENSORS_MAX1619
296config SENSORS_PC87360 293config SENSORS_PC87360
297 tristate "National Semiconductor PC87360 family" 294 tristate "National Semiconductor PC87360 family"
298 depends on HWMON && I2C && EXPERIMENTAL 295 depends on HWMON && I2C && EXPERIMENTAL
299 select I2C_SENSOR
300 select I2C_ISA 296 select I2C_ISA
297 select HWMON_VID
301 help 298 help
302 If you say yes here you get access to the hardware monitoring 299 If you say yes here you get access to the hardware monitoring
303 functions of the National Semiconductor PC8736x Super-I/O chips. 300 functions of the National Semiconductor PC8736x Super-I/O chips.
@@ -311,7 +308,6 @@ config SENSORS_PC87360
311config SENSORS_SIS5595 308config SENSORS_SIS5595
312 tristate "Silicon Integrated Systems Corp. SiS5595" 309 tristate "Silicon Integrated Systems Corp. SiS5595"
313 depends on HWMON && I2C && PCI && EXPERIMENTAL 310 depends on HWMON && I2C && PCI && EXPERIMENTAL
314 select I2C_SENSOR
315 select I2C_ISA 311 select I2C_ISA
316 help 312 help
317 If you say yes here you get support for the integrated sensors in 313 If you say yes here you get support for the integrated sensors in
@@ -323,7 +319,6 @@ config SENSORS_SIS5595
323config SENSORS_SMSC47M1 319config SENSORS_SMSC47M1
324 tristate "SMSC LPC47M10x and compatibles" 320 tristate "SMSC LPC47M10x and compatibles"
325 depends on HWMON && I2C && EXPERIMENTAL 321 depends on HWMON && I2C && EXPERIMENTAL
326 select I2C_SENSOR
327 select I2C_ISA 322 select I2C_ISA
328 help 323 help
329 If you say yes here you get support for the integrated fan 324 If you say yes here you get support for the integrated fan
@@ -336,7 +331,6 @@ config SENSORS_SMSC47M1
336config SENSORS_SMSC47B397 331config SENSORS_SMSC47B397
337 tristate "SMSC LPC47B397-NC" 332 tristate "SMSC LPC47B397-NC"
338 depends on HWMON && I2C && EXPERIMENTAL 333 depends on HWMON && I2C && EXPERIMENTAL
339 select I2C_SENSOR
340 select I2C_ISA 334 select I2C_ISA
341 help 335 help
342 If you say yes here you get support for the SMSC LPC47B397-NC 336 If you say yes here you get support for the SMSC LPC47B397-NC
@@ -348,7 +342,6 @@ config SENSORS_SMSC47B397
348config SENSORS_VIA686A 342config SENSORS_VIA686A
349 tristate "VIA686A" 343 tristate "VIA686A"
350 depends on HWMON && I2C && PCI 344 depends on HWMON && I2C && PCI
351 select I2C_SENSOR
352 select I2C_ISA 345 select I2C_ISA
353 help 346 help
354 If you say yes here you get support for the integrated sensors in 347 If you say yes here you get support for the integrated sensors in
@@ -360,7 +353,8 @@ config SENSORS_VIA686A
360config SENSORS_W83781D 353config SENSORS_W83781D
361 tristate "Winbond W83781D, W83782D, W83783S, W83627HF, Asus AS99127F" 354 tristate "Winbond W83781D, W83782D, W83783S, W83627HF, Asus AS99127F"
362 depends on HWMON && I2C 355 depends on HWMON && I2C
363 select I2C_SENSOR 356 select I2C_ISA
357 select HWMON_VID
364 help 358 help
365 If you say yes here you get support for the Winbond W8378x series 359 If you say yes here you get support for the Winbond W8378x series
366 of sensor chips: the W83781D, W83782D, W83783S and W83627HF, 360 of sensor chips: the W83781D, W83782D, W83783S and W83627HF,
@@ -369,10 +363,18 @@ config SENSORS_W83781D
369 This driver can also be built as a module. If so, the module 363 This driver can also be built as a module. If so, the module
370 will be called w83781d. 364 will be called w83781d.
371 365
366config SENSORS_W83792D
367 tristate "Winbond W83792D"
368 depends on HWMON && I2C && EXPERIMENTAL
369 help
370 If you say yes here you get support for the Winbond W83792D chip.
371
372 This driver can also be built as a module. If so, the module
373 will be called w83792d.
374
372config SENSORS_W83L785TS 375config SENSORS_W83L785TS
373 tristate "Winbond W83L785TS-S" 376 tristate "Winbond W83L785TS-S"
374 depends on HWMON && I2C && EXPERIMENTAL 377 depends on HWMON && I2C && EXPERIMENTAL
375 select I2C_SENSOR
376 help 378 help
377 If you say yes here you get support for the Winbond W83L785TS-S 379 If you say yes here you get support for the Winbond W83L785TS-S
378 sensor chip, which is used on the Asus A7N8X, among other 380 sensor chip, which is used on the Asus A7N8X, among other
@@ -384,8 +386,8 @@ config SENSORS_W83L785TS
384config SENSORS_W83627HF 386config SENSORS_W83627HF
385 tristate "Winbond W83627HF, W83627THF, W83637HF, W83697HF" 387 tristate "Winbond W83627HF, W83627THF, W83637HF, W83697HF"
386 depends on HWMON && I2C && EXPERIMENTAL 388 depends on HWMON && I2C && EXPERIMENTAL
387 select I2C_SENSOR
388 select I2C_ISA 389 select I2C_ISA
390 select HWMON_VID
389 help 391 help
390 If you say yes here you get support for the Winbond W836X7 series 392 If you say yes here you get support for the Winbond W836X7 series
391 of sensor chips: the W83627HF, W83627THF, W83637HF, and the W83697HF 393 of sensor chips: the W83627HF, W83627THF, W83637HF, and the W83697HF
@@ -396,7 +398,6 @@ config SENSORS_W83627HF
396config SENSORS_W83627EHF 398config SENSORS_W83627EHF
397 tristate "Winbond W83627EHF" 399 tristate "Winbond W83627EHF"
398 depends on HWMON && I2C && EXPERIMENTAL 400 depends on HWMON && I2C && EXPERIMENTAL
399 select I2C_SENSOR
400 select I2C_ISA 401 select I2C_ISA
401 help 402 help
402 If you say yes here you get preliminary support for the hardware 403 If you say yes here you get preliminary support for the hardware
@@ -404,6 +405,9 @@ config SENSORS_W83627EHF
404 Only fan and temperature inputs are supported at the moment, while 405 Only fan and temperature inputs are supported at the moment, while
405 the chip does much more than that. 406 the chip does much more than that.
406 407
408 This driver also supports the W83627EHG, which is the lead-free
409 version of the W83627EHF.
410
407 This driver can also be built as a module. If so, the module 411 This driver can also be built as a module. If so, the module
408 will be called w83627ehf. 412 will be called w83627ehf.
409 413
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2781403a0236..381f1bf04cc5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -2,9 +2,13 @@
2# Makefile for sensor chip drivers. 2# Makefile for sensor chip drivers.
3# 3#
4 4
5obj-$(CONFIG_HWMON) += hwmon.o
6obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
7
5# asb100, then w83781d go first, as they can override other drivers' addresses. 8# asb100, then w83781d go first, as they can override other drivers' addresses.
6obj-$(CONFIG_SENSORS_ASB100) += asb100.o 9obj-$(CONFIG_SENSORS_ASB100) += asb100.o
7obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o 10obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
11obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
8obj-$(CONFIG_SENSORS_W83781D) += w83781d.o 12obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
9 13
10obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o 14obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index d2c774c32f45..e928cdb041cb 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -24,7 +24,8 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c-sensor.h> 27#include <linux/hwmon.h>
28#include <linux/err.h>
28 29
29 30
30/* Addresses to scan */ 31/* Addresses to scan */
@@ -32,10 +33,9 @@ static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a,
32 0x29, 0x2a, 0x2b, 33 0x29, 0x2a, 0x2b,
33 0x4c, 0x4d, 0x4e, 34 0x4c, 0x4d, 0x4e,
34 I2C_CLIENT_END }; 35 I2C_CLIENT_END };
35static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
36 36
37/* Insmod parameters */ 37/* Insmod parameters */
38SENSORS_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066); 38I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066);
39 39
40/* adm1021 constants specified below */ 40/* adm1021 constants specified below */
41 41
@@ -89,6 +89,7 @@ clearing it. Weird, ey? --Phil */
89/* Each client has this additional data */ 89/* Each client has this additional data */
90struct adm1021_data { 90struct adm1021_data {
91 struct i2c_client client; 91 struct i2c_client client;
92 struct class_device *class_dev;
92 enum chips type; 93 enum chips type;
93 94
94 struct semaphore update_lock; 95 struct semaphore update_lock;
@@ -185,7 +186,7 @@ static int adm1021_attach_adapter(struct i2c_adapter *adapter)
185{ 186{
186 if (!(adapter->class & I2C_CLASS_HWMON)) 187 if (!(adapter->class & I2C_CLASS_HWMON))
187 return 0; 188 return 0;
188 return i2c_detect(adapter, &addr_data, adm1021_detect); 189 return i2c_probe(adapter, &addr_data, adm1021_detect);
189} 190}
190 191
191static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) 192static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -196,15 +197,6 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
196 int err = 0; 197 int err = 0;
197 const char *type_name = ""; 198 const char *type_name = "";
198 199
199 /* Make sure we aren't probing the ISA bus!! This is just a safety check
200 at this moment; i2c_detect really won't call us. */
201#ifdef DEBUG
202 if (i2c_is_isa_adapter(adapter)) {
203 dev_dbg(&adapter->dev, "adm1021_detect called for an ISA bus adapter?!?\n");
204 return 0;
205 }
206#endif
207
208 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 200 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
209 goto error0; 201 goto error0;
210 202
@@ -295,6 +287,12 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
295 adm1021_init_client(new_client); 287 adm1021_init_client(new_client);
296 288
297 /* Register sysfs hooks */ 289 /* Register sysfs hooks */
290 data->class_dev = hwmon_device_register(&new_client->dev);
291 if (IS_ERR(data->class_dev)) {
292 err = PTR_ERR(data->class_dev);
293 goto error2;
294 }
295
298 device_create_file(&new_client->dev, &dev_attr_temp1_max); 296 device_create_file(&new_client->dev, &dev_attr_temp1_max);
299 device_create_file(&new_client->dev, &dev_attr_temp1_min); 297 device_create_file(&new_client->dev, &dev_attr_temp1_min);
300 device_create_file(&new_client->dev, &dev_attr_temp1_input); 298 device_create_file(&new_client->dev, &dev_attr_temp1_input);
@@ -305,6 +303,8 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
305 303
306 return 0; 304 return 0;
307 305
306error2:
307 i2c_detach_client(new_client);
308error1: 308error1:
309 kfree(data); 309 kfree(data);
310error0: 310error0:
@@ -322,14 +322,15 @@ static void adm1021_init_client(struct i2c_client *client)
322 322
323static int adm1021_detach_client(struct i2c_client *client) 323static int adm1021_detach_client(struct i2c_client *client)
324{ 324{
325 struct adm1021_data *data = i2c_get_clientdata(client);
325 int err; 326 int err;
326 327
327 if ((err = i2c_detach_client(client))) { 328 hwmon_device_unregister(data->class_dev);
328 dev_err(&client->dev, "Client deregistration failed, client not detached.\n"); 329
330 if ((err = i2c_detach_client(client)))
329 return err; 331 return err;
330 }
331 332
332 kfree(i2c_get_clientdata(client)); 333 kfree(data);
333 return 0; 334 return 0;
334} 335}
335 336
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index e452d0daf906..526b7ff179eb 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -50,8 +50,9 @@
50#include <linux/slab.h> 50#include <linux/slab.h>
51#include <linux/jiffies.h> 51#include <linux/jiffies.h>
52#include <linux/i2c.h> 52#include <linux/i2c.h>
53#include <linux/i2c-sensor.h> 53#include <linux/hwmon.h>
54#include <linux/i2c-vid.h> 54#include <linux/hwmon-vid.h>
55#include <linux/err.h>
55 56
56/* 57/*
57 * Addresses to scan 58 * Addresses to scan
@@ -60,13 +61,12 @@
60 */ 61 */
61 62
62static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 63static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
63static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
64 64
65/* 65/*
66 * Insmod parameters 66 * Insmod parameters
67 */ 67 */
68 68
69SENSORS_INSMOD_2(adm1025, ne1619); 69I2C_CLIENT_INSMOD_2(adm1025, ne1619);
70 70
71/* 71/*
72 * The ADM1025 registers 72 * The ADM1025 registers
@@ -132,6 +132,7 @@ static struct i2c_driver adm1025_driver = {
132 132
133struct adm1025_data { 133struct adm1025_data {
134 struct i2c_client client; 134 struct i2c_client client;
135 struct class_device *class_dev;
135 struct semaphore update_lock; 136 struct semaphore update_lock;
136 char valid; /* zero until following fields are valid */ 137 char valid; /* zero until following fields are valid */
137 unsigned long last_updated; /* in jiffies */ 138 unsigned long last_updated; /* in jiffies */
@@ -312,7 +313,7 @@ static int adm1025_attach_adapter(struct i2c_adapter *adapter)
312{ 313{
313 if (!(adapter->class & I2C_CLASS_HWMON)) 314 if (!(adapter->class & I2C_CLASS_HWMON))
314 return 0; 315 return 0;
315 return i2c_detect(adapter, &addr_data, adm1025_detect); 316 return i2c_probe(adapter, &addr_data, adm1025_detect);
316} 317}
317 318
318/* 319/*
@@ -416,6 +417,12 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
416 adm1025_init_client(new_client); 417 adm1025_init_client(new_client);
417 418
418 /* Register sysfs hooks */ 419 /* Register sysfs hooks */
420 data->class_dev = hwmon_device_register(&new_client->dev);
421 if (IS_ERR(data->class_dev)) {
422 err = PTR_ERR(data->class_dev);
423 goto exit_detach;
424 }
425
419 device_create_file(&new_client->dev, &dev_attr_in0_input); 426 device_create_file(&new_client->dev, &dev_attr_in0_input);
420 device_create_file(&new_client->dev, &dev_attr_in1_input); 427 device_create_file(&new_client->dev, &dev_attr_in1_input);
421 device_create_file(&new_client->dev, &dev_attr_in2_input); 428 device_create_file(&new_client->dev, &dev_attr_in2_input);
@@ -452,6 +459,8 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
452 459
453 return 0; 460 return 0;
454 461
462exit_detach:
463 i2c_detach_client(new_client);
455exit_free: 464exit_free:
456 kfree(data); 465 kfree(data);
457exit: 466exit:
@@ -464,7 +473,7 @@ static void adm1025_init_client(struct i2c_client *client)
464 struct adm1025_data *data = i2c_get_clientdata(client); 473 struct adm1025_data *data = i2c_get_clientdata(client);
465 int i; 474 int i;
466 475
467 data->vrm = i2c_which_vrm(); 476 data->vrm = vid_which_vrm();
468 477
469 /* 478 /*
470 * Set high limits 479 * Set high limits
@@ -502,15 +511,15 @@ static void adm1025_init_client(struct i2c_client *client)
502 511
503static int adm1025_detach_client(struct i2c_client *client) 512static int adm1025_detach_client(struct i2c_client *client)
504{ 513{
514 struct adm1025_data *data = i2c_get_clientdata(client);
505 int err; 515 int err;
506 516
507 if ((err = i2c_detach_client(client))) { 517 hwmon_device_unregister(data->class_dev);
508 dev_err(&client->dev, "Client deregistration failed, " 518
509 "client not detached.\n"); 519 if ((err = i2c_detach_client(client)))
510 return err; 520 return err;
511 }
512 521
513 kfree(i2c_get_clientdata(client)); 522 kfree(data);
514 return 0; 523 return 0;
515} 524}
516 525
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index c8a7f47911f9..625158110fd4 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -28,16 +28,16 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jiffies.h> 29#include <linux/jiffies.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/i2c-sensor.h> 31#include <linux/hwmon.h>
32#include <linux/i2c-vid.h>
33#include <linux/hwmon-sysfs.h> 32#include <linux/hwmon-sysfs.h>
33#include <linux/hwmon-vid.h>
34#include <linux/err.h>
34 35
35/* Addresses to scan */ 36/* Addresses to scan */
36static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 37static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
37static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
38 38
39/* Insmod parameters */ 39/* Insmod parameters */
40SENSORS_INSMOD_1(adm1026); 40I2C_CLIENT_INSMOD_1(adm1026);
41 41
42static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, 42static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
43 -1, -1, -1, -1, -1, -1, -1, -1 }; 43 -1, -1, -1, -1, -1, -1, -1, -1 };
@@ -259,6 +259,7 @@ struct pwm_data {
259 259
260struct adm1026_data { 260struct adm1026_data {
261 struct i2c_client client; 261 struct i2c_client client;
262 struct class_device *class_dev;
262 struct semaphore lock; 263 struct semaphore lock;
263 enum chips type; 264 enum chips type;
264 265
@@ -319,13 +320,15 @@ int adm1026_attach_adapter(struct i2c_adapter *adapter)
319 if (!(adapter->class & I2C_CLASS_HWMON)) { 320 if (!(adapter->class & I2C_CLASS_HWMON)) {
320 return 0; 321 return 0;
321 } 322 }
322 return i2c_detect(adapter, &addr_data, adm1026_detect); 323 return i2c_probe(adapter, &addr_data, adm1026_detect);
323} 324}
324 325
325int adm1026_detach_client(struct i2c_client *client) 326int adm1026_detach_client(struct i2c_client *client)
326{ 327{
328 struct adm1026_data *data = i2c_get_clientdata(client);
329 hwmon_device_unregister(data->class_dev);
327 i2c_detach_client(client); 330 i2c_detach_client(client);
328 kfree(i2c_get_clientdata(client)); 331 kfree(data);
329 return 0; 332 return 0;
330} 333}
331 334
@@ -1549,12 +1552,18 @@ int adm1026_detect(struct i2c_adapter *adapter, int address,
1549 goto exitfree; 1552 goto exitfree;
1550 1553
1551 /* Set the VRM version */ 1554 /* Set the VRM version */
1552 data->vrm = i2c_which_vrm(); 1555 data->vrm = vid_which_vrm();
1553 1556
1554 /* Initialize the ADM1026 chip */ 1557 /* Initialize the ADM1026 chip */
1555 adm1026_init_client(new_client); 1558 adm1026_init_client(new_client);
1556 1559
1557 /* Register sysfs hooks */ 1560 /* Register sysfs hooks */
1561 data->class_dev = hwmon_device_register(&new_client->dev);
1562 if (IS_ERR(data->class_dev)) {
1563 err = PTR_ERR(data->class_dev);
1564 goto exitdetach;
1565 }
1566
1558 device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr); 1567 device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr);
1559 device_create_file(&new_client->dev, &sensor_dev_attr_in0_max.dev_attr); 1568 device_create_file(&new_client->dev, &sensor_dev_attr_in0_max.dev_attr);
1560 device_create_file(&new_client->dev, &sensor_dev_attr_in0_min.dev_attr); 1569 device_create_file(&new_client->dev, &sensor_dev_attr_in0_min.dev_attr);
@@ -1690,6 +1699,8 @@ int adm1026_detect(struct i2c_adapter *adapter, int address,
1690 return 0; 1699 return 0;
1691 1700
1692 /* Error out and cleanup code */ 1701 /* Error out and cleanup code */
1702exitdetach:
1703 i2c_detach_client(new_client);
1693exitfree: 1704exitfree:
1694 kfree(data); 1705 kfree(data);
1695exit: 1706exit:
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 936250957270..58338ed7c8a1 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -26,7 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-sensor.h> 29#include <linux/hwmon.h>
30#include <linux/err.h>
30 31
31/* Following macros takes channel parameter starting from 0 to 2 */ 32/* Following macros takes channel parameter starting from 0 to 2 */
32#define ADM1031_REG_FAN_SPEED(nr) (0x08 + (nr)) 33#define ADM1031_REG_FAN_SPEED(nr) (0x08 + (nr))
@@ -59,16 +60,16 @@
59 60
60/* Addresses to scan */ 61/* Addresses to scan */
61static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 62static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
62static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
63 63
64/* Insmod parameters */ 64/* Insmod parameters */
65SENSORS_INSMOD_2(adm1030, adm1031); 65I2C_CLIENT_INSMOD_2(adm1030, adm1031);
66 66
67typedef u8 auto_chan_table_t[8][2]; 67typedef u8 auto_chan_table_t[8][2];
68 68
69/* Each client has this additional data */ 69/* Each client has this additional data */
70struct adm1031_data { 70struct adm1031_data {
71 struct i2c_client client; 71 struct i2c_client client;
72 struct class_device *class_dev;
72 struct semaphore update_lock; 73 struct semaphore update_lock;
73 int chip_type; 74 int chip_type;
74 char valid; /* !=0 if following fields are valid */ 75 char valid; /* !=0 if following fields are valid */
@@ -725,10 +726,10 @@ static int adm1031_attach_adapter(struct i2c_adapter *adapter)
725{ 726{
726 if (!(adapter->class & I2C_CLASS_HWMON)) 727 if (!(adapter->class & I2C_CLASS_HWMON))
727 return 0; 728 return 0;
728 return i2c_detect(adapter, &addr_data, adm1031_detect); 729 return i2c_probe(adapter, &addr_data, adm1031_detect);
729} 730}
730 731
731/* This function is called by i2c_detect */ 732/* This function is called by i2c_probe */
732static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) 733static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
733{ 734{
734 struct i2c_client *new_client; 735 struct i2c_client *new_client;
@@ -788,6 +789,12 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
788 adm1031_init_client(new_client); 789 adm1031_init_client(new_client);
789 790
790 /* Register sysfs hooks */ 791 /* Register sysfs hooks */
792 data->class_dev = hwmon_device_register(&new_client->dev);
793 if (IS_ERR(data->class_dev)) {
794 err = PTR_ERR(data->class_dev);
795 goto exit_detach;
796 }
797
791 device_create_file(&new_client->dev, &dev_attr_fan1_input); 798 device_create_file(&new_client->dev, &dev_attr_fan1_input);
792 device_create_file(&new_client->dev, &dev_attr_fan1_div); 799 device_create_file(&new_client->dev, &dev_attr_fan1_div);
793 device_create_file(&new_client->dev, &dev_attr_fan1_min); 800 device_create_file(&new_client->dev, &dev_attr_fan1_min);
@@ -833,6 +840,8 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
833 840
834 return 0; 841 return 0;
835 842
843exit_detach:
844 i2c_detach_client(new_client);
836exit_free: 845exit_free:
837 kfree(data); 846 kfree(data);
838exit: 847exit:
@@ -841,11 +850,14 @@ exit:
841 850
842static int adm1031_detach_client(struct i2c_client *client) 851static int adm1031_detach_client(struct i2c_client *client)
843{ 852{
853 struct adm1031_data *data = i2c_get_clientdata(client);
844 int ret; 854 int ret;
855
856 hwmon_device_unregister(data->class_dev);
845 if ((ret = i2c_detach_client(client)) != 0) { 857 if ((ret = i2c_detach_client(client)) != 0) {
846 return ret; 858 return ret;
847 } 859 }
848 kfree(i2c_get_clientdata(client)); 860 kfree(data);
849 return 0; 861 return 0;
850} 862}
851 863
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index ce2a6eb93f6e..bc7faef162f7 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -45,17 +45,16 @@
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/i2c.h> 47#include <linux/i2c.h>
48#include <linux/i2c-sensor.h> 48#include <linux/hwmon.h>
49#include <linux/i2c-vid.h> 49#include <linux/hwmon-vid.h>
50#include <linux/err.h>
50 51
51/* Addresses to scan */ 52/* Addresses to scan */
52static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, 53static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
53 I2C_CLIENT_END }; 54 I2C_CLIENT_END };
54 55
55static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
56
57/* Insmod parameters */ 56/* Insmod parameters */
58SENSORS_INSMOD_3(adm9240, ds1780, lm81); 57I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81);
59 58
60/* ADM9240 registers */ 59/* ADM9240 registers */
61#define ADM9240_REG_MAN_ID 0x3e 60#define ADM9240_REG_MAN_ID 0x3e
@@ -150,6 +149,7 @@ static struct i2c_driver adm9240_driver = {
150struct adm9240_data { 149struct adm9240_data {
151 enum chips type; 150 enum chips type;
152 struct i2c_client client; 151 struct i2c_client client;
152 struct class_device *class_dev;
153 struct semaphore update_lock; 153 struct semaphore update_lock;
154 char valid; 154 char valid;
155 unsigned long last_updated_measure; 155 unsigned long last_updated_measure;
@@ -582,6 +582,12 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
582 adm9240_init_client(new_client); 582 adm9240_init_client(new_client);
583 583
584 /* populate sysfs filesystem */ 584 /* populate sysfs filesystem */
585 data->class_dev = hwmon_device_register(&new_client->dev);
586 if (IS_ERR(data->class_dev)) {
587 err = PTR_ERR(data->class_dev);
588 goto exit_detach;
589 }
590
585 device_create_file(&new_client->dev, &dev_attr_in0_input); 591 device_create_file(&new_client->dev, &dev_attr_in0_input);
586 device_create_file(&new_client->dev, &dev_attr_in0_min); 592 device_create_file(&new_client->dev, &dev_attr_in0_min);
587 device_create_file(&new_client->dev, &dev_attr_in0_max); 593 device_create_file(&new_client->dev, &dev_attr_in0_max);
@@ -615,6 +621,9 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
615 device_create_file(&new_client->dev, &dev_attr_cpu0_vid); 621 device_create_file(&new_client->dev, &dev_attr_cpu0_vid);
616 622
617 return 0; 623 return 0;
624
625exit_detach:
626 i2c_detach_client(new_client);
618exit_free: 627exit_free:
619 kfree(data); 628 kfree(data);
620exit: 629exit:
@@ -625,20 +634,20 @@ static int adm9240_attach_adapter(struct i2c_adapter *adapter)
625{ 634{
626 if (!(adapter->class & I2C_CLASS_HWMON)) 635 if (!(adapter->class & I2C_CLASS_HWMON))
627 return 0; 636 return 0;
628 return i2c_detect(adapter, &addr_data, adm9240_detect); 637 return i2c_probe(adapter, &addr_data, adm9240_detect);
629} 638}
630 639
631static int adm9240_detach_client(struct i2c_client *client) 640static int adm9240_detach_client(struct i2c_client *client)
632{ 641{
642 struct adm9240_data *data = i2c_get_clientdata(client);
633 int err; 643 int err;
634 644
635 if ((err = i2c_detach_client(client))) { 645 hwmon_device_unregister(data->class_dev);
636 dev_err(&client->dev, "Client deregistration failed, " 646
637 "client not detached.\n"); 647 if ((err = i2c_detach_client(client)))
638 return err; 648 return err;
639 }
640 649
641 kfree(i2c_get_clientdata(client)); 650 kfree(data);
642 return 0; 651 return 0;
643} 652}
644 653
@@ -648,7 +657,7 @@ static void adm9240_init_client(struct i2c_client *client)
648 u8 conf = adm9240_read_value(client, ADM9240_REG_CONFIG); 657 u8 conf = adm9240_read_value(client, ADM9240_REG_CONFIG);
649 u8 mode = adm9240_read_value(client, ADM9240_REG_TEMP_CONF) & 3; 658 u8 mode = adm9240_read_value(client, ADM9240_REG_TEMP_CONF) & 3;
650 659
651 data->vrm = i2c_which_vrm(); /* need this to report vid as mV */ 660 data->vrm = vid_which_vrm(); /* need this to report vid as mV */
652 661
653 dev_info(&client->dev, "Using VRM: %d.%d\n", data->vrm / 10, 662 dev_info(&client->dev, "Using VRM: %d.%d\n", data->vrm / 10,
654 data->vrm % 10); 663 data->vrm % 10);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 70d996d6fe0a..8e34855a6274 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -39,8 +39,9 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/i2c.h> 41#include <linux/i2c.h>
42#include <linux/i2c-sensor.h> 42#include <linux/hwmon.h>
43#include <linux/i2c-vid.h> 43#include <linux/hwmon-vid.h>
44#include <linux/err.h>
44#include <linux/init.h> 45#include <linux/init.h>
45#include <linux/jiffies.h> 46#include <linux/jiffies.h>
46#include "lm75.h" 47#include "lm75.h"
@@ -54,11 +55,8 @@
54/* I2C addresses to scan */ 55/* I2C addresses to scan */
55static unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; 56static unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
56 57
57/* ISA addresses to scan (none) */
58static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
59
60/* Insmod parameters */ 58/* Insmod parameters */
61SENSORS_INSMOD_1(asb100); 59I2C_CLIENT_INSMOD_1(asb100);
62I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: " 60I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
63 "{bus, clientaddr, subclientaddr1, subclientaddr2}"); 61 "{bus, clientaddr, subclientaddr1, subclientaddr2}");
64 62
@@ -183,6 +181,7 @@ static u8 DIV_TO_REG(long val)
183 dynamically allocated, at the same time the client itself is allocated. */ 181 dynamically allocated, at the same time the client itself is allocated. */
184struct asb100_data { 182struct asb100_data {
185 struct i2c_client client; 183 struct i2c_client client;
184 struct class_device *class_dev;
186 struct semaphore lock; 185 struct semaphore lock;
187 enum chips type; 186 enum chips type;
188 187
@@ -621,7 +620,7 @@ static int asb100_attach_adapter(struct i2c_adapter *adapter)
621{ 620{
622 if (!(adapter->class & I2C_CLASS_HWMON)) 621 if (!(adapter->class & I2C_CLASS_HWMON))
623 return 0; 622 return 0;
624 return i2c_detect(adapter, &addr_data, asb100_detect); 623 return i2c_probe(adapter, &addr_data, asb100_detect);
625} 624}
626 625
627static int asb100_detect_subclients(struct i2c_adapter *adapter, int address, 626static int asb100_detect_subclients(struct i2c_adapter *adapter, int address,
@@ -714,14 +713,6 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind)
714 struct i2c_client *new_client; 713 struct i2c_client *new_client;
715 struct asb100_data *data; 714 struct asb100_data *data;
716 715
717 /* asb100 is SMBus only */
718 if (i2c_is_isa_adapter(adapter)) {
719 pr_debug("asb100.o: detect failed, "
720 "cannot attach to legacy adapter!\n");
721 err = -ENODEV;
722 goto ERROR0;
723 }
724
725 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 716 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
726 pr_debug("asb100.o: detect failed, " 717 pr_debug("asb100.o: detect failed, "
727 "smbus byte data not supported!\n"); 718 "smbus byte data not supported!\n");
@@ -821,6 +812,12 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind)
821 data->fan_min[2] = asb100_read_value(new_client, ASB100_REG_FAN_MIN(2)); 812 data->fan_min[2] = asb100_read_value(new_client, ASB100_REG_FAN_MIN(2));
822 813
823 /* Register sysfs hooks */ 814 /* Register sysfs hooks */
815 data->class_dev = hwmon_device_register(&new_client->dev);
816 if (IS_ERR(data->class_dev)) {
817 err = PTR_ERR(data->class_dev);
818 goto ERROR3;
819 }
820
824 device_create_file_in(new_client, 0); 821 device_create_file_in(new_client, 0);
825 device_create_file_in(new_client, 1); 822 device_create_file_in(new_client, 1);
826 device_create_file_in(new_client, 2); 823 device_create_file_in(new_client, 2);
@@ -847,6 +844,11 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind)
847 844
848 return 0; 845 return 0;
849 846
847ERROR3:
848 i2c_detach_client(data->lm75[1]);
849 i2c_detach_client(data->lm75[0]);
850 kfree(data->lm75[1]);
851 kfree(data->lm75[0]);
850ERROR2: 852ERROR2:
851 i2c_detach_client(new_client); 853 i2c_detach_client(new_client);
852ERROR1: 854ERROR1:
@@ -857,21 +859,23 @@ ERROR0:
857 859
858static int asb100_detach_client(struct i2c_client *client) 860static int asb100_detach_client(struct i2c_client *client)
859{ 861{
862 struct asb100_data *data = i2c_get_clientdata(client);
860 int err; 863 int err;
861 864
862 if ((err = i2c_detach_client(client))) { 865 /* main client */
863 dev_err(&client->dev, "client deregistration failed; " 866 if (data)
864 "client not detached.\n"); 867 hwmon_device_unregister(data->class_dev);
868
869 if ((err = i2c_detach_client(client)))
865 return err; 870 return err;
866 }
867 871
868 if (i2c_get_clientdata(client)==NULL) { 872 /* main client */
869 /* subclients */ 873 if (data)
874 kfree(data);
875
876 /* subclient */
877 else
870 kfree(client); 878 kfree(client);
871 } else {
872 /* main client */
873 kfree(i2c_get_clientdata(client));
874 }
875 879
876 return 0; 880 return 0;
877} 881}
@@ -969,7 +973,7 @@ static void asb100_init_client(struct i2c_client *client)
969 973
970 vid = asb100_read_value(client, ASB100_REG_VID_FANDIV) & 0x0f; 974 vid = asb100_read_value(client, ASB100_REG_VID_FANDIV) & 0x0f;
971 vid |= (asb100_read_value(client, ASB100_REG_CHIPID) & 0x01) << 4; 975 vid |= (asb100_read_value(client, ASB100_REG_CHIPID) & 0x01) << 4;
972 data->vrm = i2c_which_vrm(); 976 data->vrm = vid_which_vrm();
973 vid = vid_from_reg(vid, data->vrm); 977 vid = vid_from_reg(vid, data->vrm);
974 978
975 /* Start monitoring */ 979 /* Start monitoring */
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index fca3fc1cef72..deb4d34c9539 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -23,8 +23,9 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c-sensor.h> 26#include <linux/hwmon.h>
27#include <linux/i2c-vid.h> 27#include <linux/hwmon-vid.h>
28#include <linux/err.h>
28 29
29MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
30MODULE_DESCRIPTION("System voltages control via Attansic ATXP1"); 31MODULE_DESCRIPTION("System voltages control via Attansic ATXP1");
@@ -40,9 +41,8 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
40#define ATXP1_GPIO1MASK 0x0f 41#define ATXP1_GPIO1MASK 0x0f
41 42
42static unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; 43static unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
43static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
44 44
45SENSORS_INSMOD_1(atxp1); 45I2C_CLIENT_INSMOD_1(atxp1);
46 46
47static int atxp1_attach_adapter(struct i2c_adapter * adapter); 47static int atxp1_attach_adapter(struct i2c_adapter * adapter);
48static int atxp1_detach_client(struct i2c_client * client); 48static int atxp1_detach_client(struct i2c_client * client);
@@ -59,6 +59,7 @@ static struct i2c_driver atxp1_driver = {
59 59
60struct atxp1_data { 60struct atxp1_data {
61 struct i2c_client client; 61 struct i2c_client client;
62 struct class_device *class_dev;
62 struct semaphore update_lock; 63 struct semaphore update_lock;
63 unsigned long last_updated; 64 unsigned long last_updated;
64 u8 valid; 65 u8 valid;
@@ -252,7 +253,7 @@ static DEVICE_ATTR(gpio2, S_IRUGO | S_IWUSR, atxp1_showgpio2, atxp1_storegpio2);
252 253
253static int atxp1_attach_adapter(struct i2c_adapter *adapter) 254static int atxp1_attach_adapter(struct i2c_adapter *adapter)
254{ 255{
255 return i2c_detect(adapter, &addr_data, &atxp1_detect); 256 return i2c_probe(adapter, &addr_data, &atxp1_detect);
256}; 257};
257 258
258static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind) 259static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -295,7 +296,7 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
295 } 296 }
296 297
297 /* Get VRM */ 298 /* Get VRM */
298 data->vrm = i2c_which_vrm(); 299 data->vrm = vid_which_vrm();
299 300
300 if ((data->vrm != 90) && (data->vrm != 91)) { 301 if ((data->vrm != 90) && (data->vrm != 91)) {
301 dev_err(&new_client->dev, "Not supporting VRM %d.%d\n", 302 dev_err(&new_client->dev, "Not supporting VRM %d.%d\n",
@@ -317,6 +318,12 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
317 goto exit_free; 318 goto exit_free;
318 } 319 }
319 320
321 data->class_dev = hwmon_device_register(&new_client->dev);
322 if (IS_ERR(data->class_dev)) {
323 err = PTR_ERR(data->class_dev);
324 goto exit_detach;
325 }
326
320 device_create_file(&new_client->dev, &dev_attr_gpio1); 327 device_create_file(&new_client->dev, &dev_attr_gpio1);
321 device_create_file(&new_client->dev, &dev_attr_gpio2); 328 device_create_file(&new_client->dev, &dev_attr_gpio2);
322 device_create_file(&new_client->dev, &dev_attr_cpu0_vid); 329 device_create_file(&new_client->dev, &dev_attr_cpu0_vid);
@@ -326,6 +333,8 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
326 333
327 return 0; 334 return 0;
328 335
336exit_detach:
337 i2c_detach_client(new_client);
329exit_free: 338exit_free:
330 kfree(data); 339 kfree(data);
331exit: 340exit:
@@ -334,14 +343,17 @@ exit:
334 343
335static int atxp1_detach_client(struct i2c_client * client) 344static int atxp1_detach_client(struct i2c_client * client)
336{ 345{
346 struct atxp1_data * data = i2c_get_clientdata(client);
337 int err; 347 int err;
338 348
349 hwmon_device_unregister(data->class_dev);
350
339 err = i2c_detach_client(client); 351 err = i2c_detach_client(client);
340 352
341 if (err) 353 if (err)
342 dev_err(&client->dev, "Failed to detach client.\n"); 354 dev_err(&client->dev, "Failed to detach client.\n");
343 else 355 else
344 kfree(i2c_get_clientdata(client)); 356 kfree(data);
345 357
346 return err; 358 return err;
347}; 359};
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 5360d58804f6..b0199e063d0e 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -26,16 +26,16 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-sensor.h> 29#include <linux/hwmon.h>
30#include <linux/err.h>
30#include "lm75.h" 31#include "lm75.h"
31 32
32/* Addresses to scan */ 33/* Addresses to scan */
33static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 34static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
34 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 35 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
35static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
36 36
37/* Insmod parameters */ 37/* Insmod parameters */
38SENSORS_INSMOD_1(ds1621); 38I2C_CLIENT_INSMOD_1(ds1621);
39static int polarity = -1; 39static int polarity = -1;
40module_param(polarity, int, 0); 40module_param(polarity, int, 0);
41MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); 41MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low");
@@ -71,6 +71,7 @@ MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low")
71/* Each client has this additional data */ 71/* Each client has this additional data */
72struct ds1621_data { 72struct ds1621_data {
73 struct i2c_client client; 73 struct i2c_client client;
74 struct class_device *class_dev;
74 struct semaphore update_lock; 75 struct semaphore update_lock;
75 char valid; /* !=0 if following fields are valid */ 76 char valid; /* !=0 if following fields are valid */
76 unsigned long last_updated; /* In jiffies */ 77 unsigned long last_updated; /* In jiffies */
@@ -179,10 +180,10 @@ static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max);
179 180
180static int ds1621_attach_adapter(struct i2c_adapter *adapter) 181static int ds1621_attach_adapter(struct i2c_adapter *adapter)
181{ 182{
182 return i2c_detect(adapter, &addr_data, ds1621_detect); 183 return i2c_probe(adapter, &addr_data, ds1621_detect);
183} 184}
184 185
185/* This function is called by i2c_detect */ 186/* This function is called by i2c_probe */
186int ds1621_detect(struct i2c_adapter *adapter, int address, 187int ds1621_detect(struct i2c_adapter *adapter, int address,
187 int kind) 188 int kind)
188{ 189{
@@ -250,6 +251,12 @@ int ds1621_detect(struct i2c_adapter *adapter, int address,
250 ds1621_init_client(new_client); 251 ds1621_init_client(new_client);
251 252
252 /* Register sysfs hooks */ 253 /* Register sysfs hooks */
254 data->class_dev = hwmon_device_register(&new_client->dev);
255 if (IS_ERR(data->class_dev)) {
256 err = PTR_ERR(data->class_dev);
257 goto exit_detach;
258 }
259
253 device_create_file(&new_client->dev, &dev_attr_alarms); 260 device_create_file(&new_client->dev, &dev_attr_alarms);
254 device_create_file(&new_client->dev, &dev_attr_temp1_input); 261 device_create_file(&new_client->dev, &dev_attr_temp1_input);
255 device_create_file(&new_client->dev, &dev_attr_temp1_min); 262 device_create_file(&new_client->dev, &dev_attr_temp1_min);
@@ -259,6 +266,8 @@ int ds1621_detect(struct i2c_adapter *adapter, int address,
259 266
260/* OK, this is not exactly good programming practice, usually. But it is 267/* OK, this is not exactly good programming practice, usually. But it is
261 very code-efficient in this case. */ 268 very code-efficient in this case. */
269 exit_detach:
270 i2c_detach_client(new_client);
262 exit_free: 271 exit_free:
263 kfree(data); 272 kfree(data);
264 exit: 273 exit:
@@ -267,15 +276,15 @@ int ds1621_detect(struct i2c_adapter *adapter, int address,
267 276
268static int ds1621_detach_client(struct i2c_client *client) 277static int ds1621_detach_client(struct i2c_client *client)
269{ 278{
279 struct ds1621_data *data = i2c_get_clientdata(client);
270 int err; 280 int err;
271 281
272 if ((err = i2c_detach_client(client))) { 282 hwmon_device_unregister(data->class_dev);
273 dev_err(&client->dev, "Client deregistration failed, " 283
274 "client not detached.\n"); 284 if ((err = i2c_detach_client(client)))
275 return err; 285 return err;
276 }
277 286
278 kfree(i2c_get_clientdata(client)); 287 kfree(data);
279 288
280 return 0; 289 return 0;
281} 290}
diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c
index da411741c2c5..eef6061d786b 100644
--- a/drivers/hwmon/fscher.c
+++ b/drivers/hwmon/fscher.c
@@ -31,20 +31,20 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/i2c-sensor.h> 34#include <linux/hwmon.h>
35#include <linux/err.h>
35 36
36/* 37/*
37 * Addresses to scan 38 * Addresses to scan
38 */ 39 */
39 40
40static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; 41static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
41static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
42 42
43/* 43/*
44 * Insmod parameters 44 * Insmod parameters
45 */ 45 */
46 46
47SENSORS_INSMOD_1(fscher); 47I2C_CLIENT_INSMOD_1(fscher);
48 48
49/* 49/*
50 * The FSCHER registers 50 * The FSCHER registers
@@ -132,6 +132,7 @@ static struct i2c_driver fscher_driver = {
132 132
133struct fscher_data { 133struct fscher_data {
134 struct i2c_client client; 134 struct i2c_client client;
135 struct class_device *class_dev;
135 struct semaphore update_lock; 136 struct semaphore update_lock;
136 char valid; /* zero until following fields are valid */ 137 char valid; /* zero until following fields are valid */
137 unsigned long last_updated; /* in jiffies */ 138 unsigned long last_updated; /* in jiffies */
@@ -287,7 +288,7 @@ static int fscher_attach_adapter(struct i2c_adapter *adapter)
287{ 288{
288 if (!(adapter->class & I2C_CLASS_HWMON)) 289 if (!(adapter->class & I2C_CLASS_HWMON))
289 return 0; 290 return 0;
290 return i2c_detect(adapter, &addr_data, fscher_detect); 291 return i2c_probe(adapter, &addr_data, fscher_detect);
291} 292}
292 293
293static int fscher_detect(struct i2c_adapter *adapter, int address, int kind) 294static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -341,6 +342,12 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
341 fscher_init_client(new_client); 342 fscher_init_client(new_client);
342 343
343 /* Register sysfs hooks */ 344 /* Register sysfs hooks */
345 data->class_dev = hwmon_device_register(&new_client->dev);
346 if (IS_ERR(data->class_dev)) {
347 err = PTR_ERR(data->class_dev);
348 goto exit_detach;
349 }
350
344 device_create_file_revision(new_client); 351 device_create_file_revision(new_client);
345 device_create_file_alarms(new_client); 352 device_create_file_alarms(new_client);
346 device_create_file_control(new_client); 353 device_create_file_control(new_client);
@@ -360,6 +367,8 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
360 367
361 return 0; 368 return 0;
362 369
370exit_detach:
371 i2c_detach_client(new_client);
363exit_free: 372exit_free:
364 kfree(data); 373 kfree(data);
365exit: 374exit:
@@ -368,15 +377,15 @@ exit:
368 377
369static int fscher_detach_client(struct i2c_client *client) 378static int fscher_detach_client(struct i2c_client *client)
370{ 379{
380 struct fscher_data *data = i2c_get_clientdata(client);
371 int err; 381 int err;
372 382
373 if ((err = i2c_detach_client(client))) { 383 hwmon_device_unregister(data->class_dev);
374 dev_err(&client->dev, "Client deregistration failed, " 384
375 "client not detached.\n"); 385 if ((err = i2c_detach_client(client)))
376 return err; 386 return err;
377 }
378 387
379 kfree(i2c_get_clientdata(client)); 388 kfree(data);
380 return 0; 389 return 0;
381} 390}
382 391
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
index 301ae98bd0ad..5fc77a5fed07 100644
--- a/drivers/hwmon/fscpos.c
+++ b/drivers/hwmon/fscpos.c
@@ -34,19 +34,19 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/jiffies.h> 35#include <linux/jiffies.h>
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/i2c-sensor.h>
38#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/hwmon.h>
39#include <linux/err.h>
39 40
40/* 41/*
41 * Addresses to scan 42 * Addresses to scan
42 */ 43 */
43static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; 44static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
44static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
45 45
46/* 46/*
47 * Insmod parameters 47 * Insmod parameters
48 */ 48 */
49SENSORS_INSMOD_1(fscpos); 49I2C_CLIENT_INSMOD_1(fscpos);
50 50
51/* 51/*
52 * The FSCPOS registers 52 * The FSCPOS registers
@@ -113,6 +113,7 @@ static struct i2c_driver fscpos_driver = {
113 */ 113 */
114struct fscpos_data { 114struct fscpos_data {
115 struct i2c_client client; 115 struct i2c_client client;
116 struct class_device *class_dev;
116 struct semaphore update_lock; 117 struct semaphore update_lock;
117 char valid; /* 0 until following fields are valid */ 118 char valid; /* 0 until following fields are valid */
118 unsigned long last_updated; /* In jiffies */ 119 unsigned long last_updated; /* In jiffies */
@@ -434,7 +435,7 @@ static int fscpos_attach_adapter(struct i2c_adapter *adapter)
434{ 435{
435 if (!(adapter->class & I2C_CLASS_HWMON)) 436 if (!(adapter->class & I2C_CLASS_HWMON))
436 return 0; 437 return 0;
437 return i2c_detect(adapter, &addr_data, fscpos_detect); 438 return i2c_probe(adapter, &addr_data, fscpos_detect);
438} 439}
439 440
440int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) 441int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -496,6 +497,12 @@ int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
496 dev_info(&new_client->dev, "Found fscpos chip, rev %u\n", data->revision); 497 dev_info(&new_client->dev, "Found fscpos chip, rev %u\n", data->revision);
497 498
498 /* Register sysfs hooks */ 499 /* Register sysfs hooks */
500 data->class_dev = hwmon_device_register(&new_client->dev);
501 if (IS_ERR(data->class_dev)) {
502 err = PTR_ERR(data->class_dev);
503 goto exit_detach;
504 }
505
499 device_create_file(&new_client->dev, &dev_attr_event); 506 device_create_file(&new_client->dev, &dev_attr_event);
500 device_create_file(&new_client->dev, &dev_attr_in0_input); 507 device_create_file(&new_client->dev, &dev_attr_in0_input);
501 device_create_file(&new_client->dev, &dev_attr_in1_input); 508 device_create_file(&new_client->dev, &dev_attr_in1_input);
@@ -526,6 +533,8 @@ int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
526 533
527 return 0; 534 return 0;
528 535
536exit_detach:
537 i2c_detach_client(new_client);
529exit_free: 538exit_free:
530 kfree(data); 539 kfree(data);
531exit: 540exit:
@@ -534,14 +543,14 @@ exit:
534 543
535static int fscpos_detach_client(struct i2c_client *client) 544static int fscpos_detach_client(struct i2c_client *client)
536{ 545{
546 struct fscpos_data *data = i2c_get_clientdata(client);
537 int err; 547 int err;
538 548
539 if ((err = i2c_detach_client(client))) { 549 hwmon_device_unregister(data->class_dev);
540 dev_err(&client->dev, "Client deregistration failed, client" 550
541 " not detached.\n"); 551 if ((err = i2c_detach_client(client)))
542 return err; 552 return err;
543 } 553 kfree(data);
544 kfree(i2c_get_clientdata(client));
545 return 0; 554 return 0;
546} 555}
547 556
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 6bedf729dcf5..256b9323c84b 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -41,14 +41,14 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/jiffies.h> 42#include <linux/jiffies.h>
43#include <linux/i2c.h> 43#include <linux/i2c.h>
44#include <linux/i2c-sensor.h> 44#include <linux/hwmon.h>
45#include <linux/err.h>
45 46
46/* Addresses to scan */ 47/* Addresses to scan */
47static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 48static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
48static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
49 49
50/* Insmod parameters */ 50/* Insmod parameters */
51SENSORS_INSMOD_2(gl518sm_r00, gl518sm_r80); 51I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80);
52 52
53/* Many GL518 constants specified below */ 53/* Many GL518 constants specified below */
54 54
@@ -117,6 +117,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
117/* Each client has this additional data */ 117/* Each client has this additional data */
118struct gl518_data { 118struct gl518_data {
119 struct i2c_client client; 119 struct i2c_client client;
120 struct class_device *class_dev;
120 enum chips type; 121 enum chips type;
121 122
122 struct semaphore update_lock; 123 struct semaphore update_lock;
@@ -346,7 +347,7 @@ static int gl518_attach_adapter(struct i2c_adapter *adapter)
346{ 347{
347 if (!(adapter->class & I2C_CLASS_HWMON)) 348 if (!(adapter->class & I2C_CLASS_HWMON))
348 return 0; 349 return 0;
349 return i2c_detect(adapter, &addr_data, gl518_detect); 350 return i2c_probe(adapter, &addr_data, gl518_detect);
350} 351}
351 352
352static int gl518_detect(struct i2c_adapter *adapter, int address, int kind) 353static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -419,6 +420,12 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
419 gl518_init_client((struct i2c_client *) new_client); 420 gl518_init_client((struct i2c_client *) new_client);
420 421
421 /* Register sysfs hooks */ 422 /* Register sysfs hooks */
423 data->class_dev = hwmon_device_register(&new_client->dev);
424 if (IS_ERR(data->class_dev)) {
425 err = PTR_ERR(data->class_dev);
426 goto exit_detach;
427 }
428
422 device_create_file(&new_client->dev, &dev_attr_in0_input); 429 device_create_file(&new_client->dev, &dev_attr_in0_input);
423 device_create_file(&new_client->dev, &dev_attr_in1_input); 430 device_create_file(&new_client->dev, &dev_attr_in1_input);
424 device_create_file(&new_client->dev, &dev_attr_in2_input); 431 device_create_file(&new_client->dev, &dev_attr_in2_input);
@@ -450,6 +457,8 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
450/* OK, this is not exactly good programming practice, usually. But it is 457/* OK, this is not exactly good programming practice, usually. But it is
451 very code-efficient in this case. */ 458 very code-efficient in this case. */
452 459
460exit_detach:
461 i2c_detach_client(new_client);
453exit_free: 462exit_free:
454 kfree(data); 463 kfree(data);
455exit: 464exit:
@@ -477,16 +486,15 @@ static void gl518_init_client(struct i2c_client *client)
477 486
478static int gl518_detach_client(struct i2c_client *client) 487static int gl518_detach_client(struct i2c_client *client)
479{ 488{
489 struct gl518_data *data = i2c_get_clientdata(client);
480 int err; 490 int err;
481 491
482 if ((err = i2c_detach_client(client))) { 492 hwmon_device_unregister(data->class_dev);
483 dev_err(&client->dev, "Client deregistration failed, "
484 "client not detached.\n");
485 return err;
486 }
487 493
488 kfree(i2c_get_clientdata(client)); 494 if ((err = i2c_detach_client(client)))
495 return err;
489 496
497 kfree(data);
490 return 0; 498 return 0;
491} 499}
492 500
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 80ae8d30c2af..12fd757066fc 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -26,8 +26,9 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-sensor.h> 29#include <linux/hwmon.h>
30#include <linux/i2c-vid.h> 30#include <linux/hwmon-vid.h>
31#include <linux/err.h>
31 32
32/* Type of the extra sensor */ 33/* Type of the extra sensor */
33static unsigned short extra_sensor_type; 34static unsigned short extra_sensor_type;
@@ -36,10 +37,9 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe
36 37
37/* Addresses to scan */ 38/* Addresses to scan */
38static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 39static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
39static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
40 40
41/* Insmod parameters */ 41/* Insmod parameters */
42SENSORS_INSMOD_1(gl520sm); 42I2C_CLIENT_INSMOD_1(gl520sm);
43 43
44/* Many GL520 constants specified below 44/* Many GL520 constants specified below
45One of the inputs can be configured as either temp or voltage. 45One of the inputs can be configured as either temp or voltage.
@@ -120,6 +120,7 @@ static struct i2c_driver gl520_driver = {
120/* Client data */ 120/* Client data */
121struct gl520_data { 121struct gl520_data {
122 struct i2c_client client; 122 struct i2c_client client;
123 struct class_device *class_dev;
123 struct semaphore update_lock; 124 struct semaphore update_lock;
124 char valid; /* zero until the following fields are valid */ 125 char valid; /* zero until the following fields are valid */
125 unsigned long last_updated; /* in jiffies */ 126 unsigned long last_updated; /* in jiffies */
@@ -518,7 +519,7 @@ static int gl520_attach_adapter(struct i2c_adapter *adapter)
518{ 519{
519 if (!(adapter->class & I2C_CLASS_HWMON)) 520 if (!(adapter->class & I2C_CLASS_HWMON))
520 return 0; 521 return 0;
521 return i2c_detect(adapter, &addr_data, gl520_detect); 522 return i2c_probe(adapter, &addr_data, gl520_detect);
522} 523}
523 524
524static int gl520_detect(struct i2c_adapter *adapter, int address, int kind) 525static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -571,6 +572,12 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
571 gl520_init_client(new_client); 572 gl520_init_client(new_client);
572 573
573 /* Register sysfs hooks */ 574 /* Register sysfs hooks */
575 data->class_dev = hwmon_device_register(&new_client->dev);
576 if (IS_ERR(data->class_dev)) {
577 err = PTR_ERR(data->class_dev);
578 goto exit_detach;
579 }
580
574 device_create_file_vid(new_client, 0); 581 device_create_file_vid(new_client, 0);
575 582
576 device_create_file_in(new_client, 0); 583 device_create_file_in(new_client, 0);
@@ -592,6 +599,8 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
592 599
593 return 0; 600 return 0;
594 601
602exit_detach:
603 i2c_detach_client(new_client);
595exit_free: 604exit_free:
596 kfree(data); 605 kfree(data);
597exit: 606exit:
@@ -608,7 +617,7 @@ static void gl520_init_client(struct i2c_client *client)
608 conf = oldconf = gl520_read_value(client, GL520_REG_CONF); 617 conf = oldconf = gl520_read_value(client, GL520_REG_CONF);
609 618
610 data->alarm_mask = 0xff; 619 data->alarm_mask = 0xff;
611 data->vrm = i2c_which_vrm(); 620 data->vrm = vid_which_vrm();
612 621
613 if (extra_sensor_type == 1) 622 if (extra_sensor_type == 1)
614 conf &= ~0x10; 623 conf &= ~0x10;
@@ -639,15 +648,15 @@ static void gl520_init_client(struct i2c_client *client)
639 648
640static int gl520_detach_client(struct i2c_client *client) 649static int gl520_detach_client(struct i2c_client *client)
641{ 650{
651 struct gl520_data *data = i2c_get_clientdata(client);
642 int err; 652 int err;
643 653
644 if ((err = i2c_detach_client(client))) { 654 hwmon_device_unregister(data->class_dev);
645 dev_err(&client->dev, "Client deregistration failed, " 655
646 "client not detached.\n"); 656 if ((err = i2c_detach_client(client)))
647 return err; 657 return err;
648 }
649 658
650 kfree(i2c_get_clientdata(client)); 659 kfree(data);
651 return 0; 660 return 0;
652} 661}
653 662
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
new file mode 100644
index 000000000000..312769ad4dab
--- /dev/null
+++ b/drivers/hwmon/hwmon-vid.c
@@ -0,0 +1,189 @@
1/*
2 hwmon-vid.c - VID/VRM/VRD voltage conversions
3
4 Copyright (c) 2004 Rudolf Marek <r.marek@sh.cvut.cz>
5
6 Partly imported from i2c-vid.h of the lm_sensors project
7 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
8 With assistance from Trent Piepho <xyzzy@speakeasy.org>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23*/
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/hwmon-vid.h>
29
30/*
31 Common code for decoding VID pins.
32
33 References:
34
35 For VRM 8.4 to 9.1, "VRM x.y DC-DC Converter Design Guidelines",
36 available at http://developer.intel.com/.
37
38 For VRD 10.0 and up, "VRD x.y Design Guide",
39 available at http://developer.intel.com/.
40
41 AMD Opteron processors don't follow the Intel specifications.
42 I'm going to "make up" 2.4 as the spec number for the Opterons.
43 No good reason just a mnemonic for the 24x Opteron processor
44 series.
45
46 Opteron VID encoding is:
47 00000 = 1.550 V
48 00001 = 1.525 V
49 . . . .
50 11110 = 0.800 V
51 11111 = 0.000 V (off)
52*/
53
54/* vrm is the VRM/VRD document version multiplied by 10.
55 val is the 4-, 5- or 6-bit VID code.
56 Returned value is in mV to avoid floating point in the kernel. */
57int vid_from_reg(int val, int vrm)
58{
59 int vid;
60
61 switch(vrm) {
62
63 case 0:
64 return 0;
65
66 case 100: /* VRD 10.0 */
67 if((val & 0x1f) == 0x1f)
68 return 0;
69 if((val & 0x1f) <= 0x09 || val == 0x0a)
70 vid = 10875 - (val & 0x1f) * 250;
71 else
72 vid = 18625 - (val & 0x1f) * 250;
73 if(val & 0x20)
74 vid -= 125;
75 vid /= 10; /* only return 3 dec. places for now */
76 return vid;
77
78 case 24: /* Opteron processor */
79 return(val == 0x1f ? 0 : 1550 - val * 25);
80
81 case 91: /* VRM 9.1 */
82 case 90: /* VRM 9.0 */
83 return(val == 0x1f ? 0 :
84 1850 - val * 25);
85
86 case 85: /* VRM 8.5 */
87 return((val & 0x10 ? 25 : 0) +
88 ((val & 0x0f) > 0x04 ? 2050 : 1250) -
89 ((val & 0x0f) * 50));
90
91 case 84: /* VRM 8.4 */
92 val &= 0x0f;
93 /* fall through */
94 default: /* VRM 8.2 */
95 return(val == 0x1f ? 0 :
96 val & 0x10 ? 5100 - (val) * 100 :
97 2050 - (val) * 50);
98 }
99}
100
101
102/*
103 After this point is the code to automatically determine which
104 VRM/VRD specification should be used depending on the CPU.
105*/
106
107struct vrm_model {
108 u8 vendor;
109 u8 eff_family;
110 u8 eff_model;
111 int vrm_type;
112};
113
114#define ANY 0xFF
115
116#ifdef CONFIG_X86
117
118static struct vrm_model vrm_models[] = {
119 {X86_VENDOR_AMD, 0x6, ANY, 90}, /* Athlon Duron etc */
120 {X86_VENDOR_AMD, 0xF, ANY, 24}, /* Athlon 64, Opteron */
121 {X86_VENDOR_INTEL, 0x6, 0x9, 85}, /* 0.13um too */
122 {X86_VENDOR_INTEL, 0x6, 0xB, 85}, /* Tualatin */
123 {X86_VENDOR_INTEL, 0x6, ANY, 82}, /* any P6 */
124 {X86_VENDOR_INTEL, 0x7, ANY, 0}, /* Itanium */
125 {X86_VENDOR_INTEL, 0xF, 0x0, 90}, /* P4 */
126 {X86_VENDOR_INTEL, 0xF, 0x1, 90}, /* P4 Willamette */
127 {X86_VENDOR_INTEL, 0xF, 0x2, 90}, /* P4 Northwood */
128 {X86_VENDOR_INTEL, 0xF, 0x3, 100}, /* P4 Prescott */
129 {X86_VENDOR_INTEL, 0xF, 0x4, 100}, /* P4 Prescott */
130 {X86_VENDOR_INTEL, 0x10,ANY, 0}, /* Itanium 2 */
131 {X86_VENDOR_UNKNOWN, ANY, ANY, 0} /* stop here */
132};
133
134static int find_vrm(u8 eff_family, u8 eff_model, u8 vendor)
135{
136 int i = 0;
137
138 while (vrm_models[i].vendor!=X86_VENDOR_UNKNOWN) {
139 if (vrm_models[i].vendor==vendor)
140 if ((vrm_models[i].eff_family==eff_family)
141 && ((vrm_models[i].eff_model==eff_model) ||
142 (vrm_models[i].eff_model==ANY)))
143 return vrm_models[i].vrm_type;
144 i++;
145 }
146
147 return 0;
148}
149
150int vid_which_vrm(void)
151{
152 struct cpuinfo_x86 *c = cpu_data;
153 u32 eax;
154 u8 eff_family, eff_model;
155 int vrm_ret;
156
157 if (c->x86 < 6) /* Any CPU with family lower than 6 */
158 return 0; /* doesn't have VID and/or CPUID */
159
160 eax = cpuid_eax(1);
161 eff_family = ((eax & 0x00000F00)>>8);
162 eff_model = ((eax & 0x000000F0)>>4);
163 if (eff_family == 0xF) { /* use extended model & family */
164 eff_family += ((eax & 0x00F00000)>>20);
165 eff_model += ((eax & 0x000F0000)>>16)<<4;
166 }
167 vrm_ret = find_vrm(eff_family,eff_model,c->x86_vendor);
168 if (vrm_ret == 0)
169 printk(KERN_INFO "hwmon-vid: Unknown VRM version of your "
170 "x86 CPU\n");
171 return vrm_ret;
172}
173
174/* and now something completely different for the non-x86 world */
175#else
176int vid_which_vrm(void)
177{
178 printk(KERN_INFO "hwmon-vid: Unknown VRM version of your CPU\n");
179 return 0;
180}
181#endif
182
183EXPORT_SYMBOL(vid_from_reg);
184EXPORT_SYMBOL(vid_which_vrm);
185
186MODULE_AUTHOR("Rudolf Marek <r.marek@sh.cvut.cz>");
187
188MODULE_DESCRIPTION("hwmon-vid driver");
189MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
new file mode 100644
index 000000000000..9b41c9bd805f
--- /dev/null
+++ b/drivers/hwmon/hwmon.c
@@ -0,0 +1,98 @@
1/*
2 hwmon.c - part of lm_sensors, Linux kernel modules for hardware monitoring
3
4 This file defines the sysfs class "hwmon", for use by sensors drivers.
5
6 Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; version 2 of the License.
11*/
12
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/err.h>
16#include <linux/kdev_t.h>
17#include <linux/idr.h>
18#include <linux/hwmon.h>
19
20#define HWMON_ID_PREFIX "hwmon"
21#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
22
23static struct class *hwmon_class;
24
25static DEFINE_IDR(hwmon_idr);
26
27/**
28 * hwmon_device_register - register w/ hwmon sysfs class
29 * @dev: the device to register
30 *
31 * hwmon_device_unregister() must be called when the class device is no
32 * longer needed.
33 *
34 * Returns the pointer to the new struct class device.
35 */
36struct class_device *hwmon_device_register(struct device *dev)
37{
38 struct class_device *cdev;
39 int id;
40
41 if (idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0)
42 return ERR_PTR(-ENOMEM);
43
44 if (idr_get_new(&hwmon_idr, NULL, &id) < 0)
45 return ERR_PTR(-ENOMEM);
46
47 id = id & MAX_ID_MASK;
48 cdev = class_device_create(hwmon_class, MKDEV(0,0), dev,
49 HWMON_ID_FORMAT, id);
50
51 if (IS_ERR(cdev))
52 idr_remove(&hwmon_idr, id);
53
54 return cdev;
55}
56
57/**
58 * hwmon_device_unregister - removes the previously registered class device
59 *
60 * @cdev: the class device to destroy
61 */
62void hwmon_device_unregister(struct class_device *cdev)
63{
64 int id;
65
66 if (sscanf(cdev->class_id, HWMON_ID_FORMAT, &id) == 1) {
67 class_device_unregister(cdev);
68 idr_remove(&hwmon_idr, id);
69 } else
70 dev_dbg(cdev->dev,
71 "hwmon_device_unregister() failed: bad class ID!\n");
72}
73
74static int __init hwmon_init(void)
75{
76 hwmon_class = class_create(THIS_MODULE, "hwmon");
77 if (IS_ERR(hwmon_class)) {
78 printk(KERN_ERR "hwmon.c: couldn't create sysfs class\n");
79 return PTR_ERR(hwmon_class);
80 }
81 return 0;
82}
83
84static void __exit hwmon_exit(void)
85{
86 class_destroy(hwmon_class);
87}
88
89module_init(hwmon_init);
90module_exit(hwmon_exit);
91
92EXPORT_SYMBOL_GPL(hwmon_device_register);
93EXPORT_SYMBOL_GPL(hwmon_device_unregister);
94
95MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
96MODULE_DESCRIPTION("hardware monitoring sysfs/class support");
97MODULE_LICENSE("GPL");
98
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index db20c9e47393..53cc2b6d6385 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -36,19 +36,21 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/i2c-sensor.h> 39#include <linux/i2c-isa.h>
40#include <linux/i2c-vid.h> 40#include <linux/hwmon.h>
41#include <linux/hwmon-sysfs.h> 41#include <linux/hwmon-sysfs.h>
42#include <linux/hwmon-vid.h>
43#include <linux/err.h>
42#include <asm/io.h> 44#include <asm/io.h>
43 45
44 46
45/* Addresses to scan */ 47/* Addresses to scan */
46static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 48static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
47 0x2e, 0x2f, I2C_CLIENT_END }; 49 0x2e, 0x2f, I2C_CLIENT_END };
48static unsigned int normal_isa[] = { 0x0290, I2C_CLIENT_ISA_END }; 50static unsigned short isa_address = 0x290;
49 51
50/* Insmod parameters */ 52/* Insmod parameters */
51SENSORS_INSMOD_2(it87, it8712); 53I2C_CLIENT_INSMOD_2(it87, it8712);
52 54
53#define REG 0x2e /* The register to read/write */ 55#define REG 0x2e /* The register to read/write */
54#define DEV 0x07 /* Register: Logical device select */ 56#define DEV 0x07 /* Register: Logical device select */
@@ -192,6 +194,7 @@ static int DIV_TO_REG(int val)
192 allocated. */ 194 allocated. */
193struct it87_data { 195struct it87_data {
194 struct i2c_client client; 196 struct i2c_client client;
197 struct class_device *class_dev;
195 struct semaphore lock; 198 struct semaphore lock;
196 enum chips type; 199 enum chips type;
197 200
@@ -218,7 +221,7 @@ struct it87_data {
218 221
219 222
220static int it87_attach_adapter(struct i2c_adapter *adapter); 223static int it87_attach_adapter(struct i2c_adapter *adapter);
221static int it87_find(int *address); 224static int it87_isa_attach_adapter(struct i2c_adapter *adapter);
222static int it87_detect(struct i2c_adapter *adapter, int address, int kind); 225static int it87_detect(struct i2c_adapter *adapter, int address, int kind);
223static int it87_detach_client(struct i2c_client *client); 226static int it87_detach_client(struct i2c_client *client);
224 227
@@ -239,6 +242,14 @@ static struct i2c_driver it87_driver = {
239 .detach_client = it87_detach_client, 242 .detach_client = it87_detach_client,
240}; 243};
241 244
245static struct i2c_driver it87_isa_driver = {
246 .owner = THIS_MODULE,
247 .name = "it87-isa",
248 .attach_adapter = it87_isa_attach_adapter,
249 .detach_client = it87_detach_client,
250};
251
252
242static ssize_t show_in(struct device *dev, struct device_attribute *attr, 253static ssize_t show_in(struct device *dev, struct device_attribute *attr,
243 char *buf) 254 char *buf)
244{ 255{
@@ -686,11 +697,16 @@ static int it87_attach_adapter(struct i2c_adapter *adapter)
686{ 697{
687 if (!(adapter->class & I2C_CLASS_HWMON)) 698 if (!(adapter->class & I2C_CLASS_HWMON))
688 return 0; 699 return 0;
689 return i2c_detect(adapter, &addr_data, it87_detect); 700 return i2c_probe(adapter, &addr_data, it87_detect);
690} 701}
691 702
692/* SuperIO detection - will change normal_isa[0] if a chip is found */ 703static int it87_isa_attach_adapter(struct i2c_adapter *adapter)
693static int it87_find(int *address) 704{
705 return it87_detect(adapter, isa_address, -1);
706}
707
708/* SuperIO detection - will change isa_address if a chip is found */
709static int __init it87_find(int *address)
694{ 710{
695 int err = -ENODEV; 711 int err = -ENODEV;
696 712
@@ -721,7 +737,7 @@ exit:
721 return err; 737 return err;
722} 738}
723 739
724/* This function is called by i2c_detect */ 740/* This function is called by i2c_probe */
725int it87_detect(struct i2c_adapter *adapter, int address, int kind) 741int it87_detect(struct i2c_adapter *adapter, int address, int kind)
726{ 742{
727 int i; 743 int i;
@@ -738,7 +754,7 @@ int it87_detect(struct i2c_adapter *adapter, int address, int kind)
738 754
739 /* Reserve the ISA region */ 755 /* Reserve the ISA region */
740 if (is_isa) 756 if (is_isa)
741 if (!request_region(address, IT87_EXTENT, it87_driver.name)) 757 if (!request_region(address, IT87_EXTENT, it87_isa_driver.name))
742 goto ERROR0; 758 goto ERROR0;
743 759
744 /* Probe whether there is anything available on this address. Already 760 /* Probe whether there is anything available on this address. Already
@@ -784,7 +800,7 @@ int it87_detect(struct i2c_adapter *adapter, int address, int kind)
784 i2c_set_clientdata(new_client, data); 800 i2c_set_clientdata(new_client, data);
785 new_client->addr = address; 801 new_client->addr = address;
786 new_client->adapter = adapter; 802 new_client->adapter = adapter;
787 new_client->driver = &it87_driver; 803 new_client->driver = is_isa ? &it87_isa_driver : &it87_driver;
788 new_client->flags = 0; 804 new_client->flags = 0;
789 805
790 /* Now, we do the remaining detection. */ 806 /* Now, we do the remaining detection. */
@@ -840,6 +856,12 @@ int it87_detect(struct i2c_adapter *adapter, int address, int kind)
840 it87_init_client(new_client, data); 856 it87_init_client(new_client, data);
841 857
842 /* Register sysfs hooks */ 858 /* Register sysfs hooks */
859 data->class_dev = hwmon_device_register(&new_client->dev);
860 if (IS_ERR(data->class_dev)) {
861 err = PTR_ERR(data->class_dev);
862 goto ERROR3;
863 }
864
843 device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr); 865 device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr);
844 device_create_file(&new_client->dev, &sensor_dev_attr_in1_input.dev_attr); 866 device_create_file(&new_client->dev, &sensor_dev_attr_in1_input.dev_attr);
845 device_create_file(&new_client->dev, &sensor_dev_attr_in2_input.dev_attr); 867 device_create_file(&new_client->dev, &sensor_dev_attr_in2_input.dev_attr);
@@ -897,13 +919,15 @@ int it87_detect(struct i2c_adapter *adapter, int address, int kind)
897 } 919 }
898 920
899 if (data->type == it8712) { 921 if (data->type == it8712) {
900 data->vrm = i2c_which_vrm(); 922 data->vrm = vid_which_vrm();
901 device_create_file_vrm(new_client); 923 device_create_file_vrm(new_client);
902 device_create_file_vid(new_client); 924 device_create_file_vid(new_client);
903 } 925 }
904 926
905 return 0; 927 return 0;
906 928
929ERROR3:
930 i2c_detach_client(new_client);
907ERROR2: 931ERROR2:
908 kfree(data); 932 kfree(data);
909ERROR1: 933ERROR1:
@@ -915,17 +939,17 @@ ERROR0:
915 939
916static int it87_detach_client(struct i2c_client *client) 940static int it87_detach_client(struct i2c_client *client)
917{ 941{
942 struct it87_data *data = i2c_get_clientdata(client);
918 int err; 943 int err;
919 944
920 if ((err = i2c_detach_client(client))) { 945 hwmon_device_unregister(data->class_dev);
921 dev_err(&client->dev, 946
922 "Client deregistration failed, client not detached.\n"); 947 if ((err = i2c_detach_client(client)))
923 return err; 948 return err;
924 }
925 949
926 if(i2c_is_isa_client(client)) 950 if(i2c_is_isa_client(client))
927 release_region(client->addr, IT87_EXTENT); 951 release_region(client->addr, IT87_EXTENT);
928 kfree(i2c_get_clientdata(client)); 952 kfree(data);
929 953
930 return 0; 954 return 0;
931} 955}
@@ -1158,16 +1182,28 @@ static struct it87_data *it87_update_device(struct device *dev)
1158 1182
1159static int __init sm_it87_init(void) 1183static int __init sm_it87_init(void)
1160{ 1184{
1161 int addr; 1185 int addr, res;
1162 1186
1163 if (!it87_find(&addr)) { 1187 if (!it87_find(&addr)) {
1164 normal_isa[0] = addr; 1188 isa_address = addr;
1189 }
1190
1191 res = i2c_add_driver(&it87_driver);
1192 if (res)
1193 return res;
1194
1195 res = i2c_isa_add_driver(&it87_isa_driver);
1196 if (res) {
1197 i2c_del_driver(&it87_driver);
1198 return res;
1165 } 1199 }
1166 return i2c_add_driver(&it87_driver); 1200
1201 return 0;
1167} 1202}
1168 1203
1169static void __exit sm_it87_exit(void) 1204static void __exit sm_it87_exit(void)
1170{ 1205{
1206 i2c_isa_del_driver(&it87_isa_driver);
1171 i2c_del_driver(&it87_driver); 1207 i2c_del_driver(&it87_driver);
1172} 1208}
1173 1209
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 7c6f9ea5a254..be5c7095ecbb 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -42,8 +42,9 @@
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/jiffies.h> 43#include <linux/jiffies.h>
44#include <linux/i2c.h> 44#include <linux/i2c.h>
45#include <linux/i2c-sensor.h>
46#include <linux/hwmon-sysfs.h> 45#include <linux/hwmon-sysfs.h>
46#include <linux/hwmon.h>
47#include <linux/err.h>
47 48
48/* 49/*
49 * Addresses to scan 50 * Addresses to scan
@@ -51,13 +52,12 @@
51 */ 52 */
52 53
53static unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; 54static unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
54static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
55 55
56/* 56/*
57 * Insmod parameters 57 * Insmod parameters
58 */ 58 */
59 59
60SENSORS_INSMOD_1(lm63); 60I2C_CLIENT_INSMOD_1(lm63);
61 61
62/* 62/*
63 * The LM63 registers 63 * The LM63 registers
@@ -152,6 +152,7 @@ static struct i2c_driver lm63_driver = {
152 152
153struct lm63_data { 153struct lm63_data {
154 struct i2c_client client; 154 struct i2c_client client;
155 struct class_device *class_dev;
155 struct semaphore update_lock; 156 struct semaphore update_lock;
156 char valid; /* zero until following fields are valid */ 157 char valid; /* zero until following fields are valid */
157 unsigned long last_updated; /* in jiffies */ 158 unsigned long last_updated; /* in jiffies */
@@ -358,7 +359,7 @@ static int lm63_attach_adapter(struct i2c_adapter *adapter)
358{ 359{
359 if (!(adapter->class & I2C_CLASS_HWMON)) 360 if (!(adapter->class & I2C_CLASS_HWMON))
360 return 0; 361 return 0;
361 return i2c_detect(adapter, &addr_data, lm63_detect); 362 return i2c_probe(adapter, &addr_data, lm63_detect);
362} 363}
363 364
364/* 365/*
@@ -437,6 +438,12 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind)
437 lm63_init_client(new_client); 438 lm63_init_client(new_client);
438 439
439 /* Register sysfs hooks */ 440 /* Register sysfs hooks */
441 data->class_dev = hwmon_device_register(&new_client->dev);
442 if (IS_ERR(data->class_dev)) {
443 err = PTR_ERR(data->class_dev);
444 goto exit_detach;
445 }
446
440 if (data->config & 0x04) { /* tachometer enabled */ 447 if (data->config & 0x04) { /* tachometer enabled */
441 device_create_file(&new_client->dev, 448 device_create_file(&new_client->dev,
442 &sensor_dev_attr_fan1_input.dev_attr); 449 &sensor_dev_attr_fan1_input.dev_attr);
@@ -462,6 +469,8 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind)
462 469
463 return 0; 470 return 0;
464 471
472exit_detach:
473 i2c_detach_client(new_client);
465exit_free: 474exit_free:
466 kfree(data); 475 kfree(data);
467exit: 476exit:
@@ -505,15 +514,15 @@ static void lm63_init_client(struct i2c_client *client)
505 514
506static int lm63_detach_client(struct i2c_client *client) 515static int lm63_detach_client(struct i2c_client *client)
507{ 516{
517 struct lm63_data *data = i2c_get_clientdata(client);
508 int err; 518 int err;
509 519
510 if ((err = i2c_detach_client(client))) { 520 hwmon_device_unregister(data->class_dev);
511 dev_err(&client->dev, "Client deregistration failed, " 521
512 "client not detached\n"); 522 if ((err = i2c_detach_client(client)))
513 return err; 523 return err;
514 }
515 524
516 kfree(i2c_get_clientdata(client)); 525 kfree(data);
517 return 0; 526 return 0;
518} 527}
519 528
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 5be164ed278e..9a3ebdf583f4 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -23,17 +23,17 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c-sensor.h> 26#include <linux/hwmon.h>
27#include <linux/err.h>
27#include "lm75.h" 28#include "lm75.h"
28 29
29 30
30/* Addresses to scan */ 31/* Addresses to scan */
31static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 32static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
32 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 33 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
33static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
34 34
35/* Insmod parameters */ 35/* Insmod parameters */
36SENSORS_INSMOD_1(lm75); 36I2C_CLIENT_INSMOD_1(lm75);
37 37
38/* Many LM75 constants specified below */ 38/* Many LM75 constants specified below */
39 39
@@ -46,6 +46,7 @@ SENSORS_INSMOD_1(lm75);
46/* Each client has this additional data */ 46/* Each client has this additional data */
47struct lm75_data { 47struct lm75_data {
48 struct i2c_client client; 48 struct i2c_client client;
49 struct class_device *class_dev;
49 struct semaphore update_lock; 50 struct semaphore update_lock;
50 char valid; /* !=0 if following fields are valid */ 51 char valid; /* !=0 if following fields are valid */
51 unsigned long last_updated; /* In jiffies */ 52 unsigned long last_updated; /* In jiffies */
@@ -107,10 +108,10 @@ static int lm75_attach_adapter(struct i2c_adapter *adapter)
107{ 108{
108 if (!(adapter->class & I2C_CLASS_HWMON)) 109 if (!(adapter->class & I2C_CLASS_HWMON))
109 return 0; 110 return 0;
110 return i2c_detect(adapter, &addr_data, lm75_detect); 111 return i2c_probe(adapter, &addr_data, lm75_detect);
111} 112}
112 113
113/* This function is called by i2c_detect */ 114/* This function is called by i2c_probe */
114static int lm75_detect(struct i2c_adapter *adapter, int address, int kind) 115static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
115{ 116{
116 int i; 117 int i;
@@ -119,16 +120,6 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
119 int err = 0; 120 int err = 0;
120 const char *name = ""; 121 const char *name = "";
121 122
122 /* Make sure we aren't probing the ISA bus!! This is just a safety check
123 at this moment; i2c_detect really won't call us. */
124#ifdef DEBUG
125 if (i2c_is_isa_adapter(adapter)) {
126 dev_dbg(&adapter->dev,
127 "lm75_detect called for an ISA bus adapter?!?\n");
128 goto exit;
129 }
130#endif
131
132 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 123 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
133 I2C_FUNC_SMBUS_WORD_DATA)) 124 I2C_FUNC_SMBUS_WORD_DATA))
134 goto exit; 125 goto exit;
@@ -208,12 +199,20 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
208 lm75_init_client(new_client); 199 lm75_init_client(new_client);
209 200
210 /* Register sysfs hooks */ 201 /* Register sysfs hooks */
202 data->class_dev = hwmon_device_register(&new_client->dev);
203 if (IS_ERR(data->class_dev)) {
204 err = PTR_ERR(data->class_dev);
205 goto exit_detach;
206 }
207
211 device_create_file(&new_client->dev, &dev_attr_temp1_max); 208 device_create_file(&new_client->dev, &dev_attr_temp1_max);
212 device_create_file(&new_client->dev, &dev_attr_temp1_max_hyst); 209 device_create_file(&new_client->dev, &dev_attr_temp1_max_hyst);
213 device_create_file(&new_client->dev, &dev_attr_temp1_input); 210 device_create_file(&new_client->dev, &dev_attr_temp1_input);
214 211
215 return 0; 212 return 0;
216 213
214exit_detach:
215 i2c_detach_client(new_client);
217exit_free: 216exit_free:
218 kfree(data); 217 kfree(data);
219exit: 218exit:
@@ -222,8 +221,10 @@ exit:
222 221
223static int lm75_detach_client(struct i2c_client *client) 222static int lm75_detach_client(struct i2c_client *client)
224{ 223{
224 struct lm75_data *data = i2c_get_clientdata(client);
225 hwmon_device_unregister(data->class_dev);
225 i2c_detach_client(client); 226 i2c_detach_client(client);
226 kfree(i2c_get_clientdata(client)); 227 kfree(data);
227 return 0; 228 return 0;
228} 229}
229 230
@@ -251,8 +252,12 @@ static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
251 252
252static void lm75_init_client(struct i2c_client *client) 253static void lm75_init_client(struct i2c_client *client)
253{ 254{
254 /* Initialize the LM75 chip */ 255 int reg;
255 lm75_write_value(client, LM75_REG_CONF, 0); 256
257 /* Enable if in shutdown mode */
258 reg = lm75_read_value(client, LM75_REG_CONF);
259 if (reg >= 0 && (reg & 0x01))
260 lm75_write_value(client, LM75_REG_CONF, reg & 0xfe);
256} 261}
257 262
258static struct lm75_data *lm75_update_device(struct device *dev) 263static struct lm75_data *lm75_update_device(struct device *dev)
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 63e3f2fb4c21..af7dc650ee15 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -25,7 +25,7 @@
25 which contains this code, we don't worry about the wasted space. 25 which contains this code, we don't worry about the wasted space.
26*/ 26*/
27 27
28#include <linux/i2c-sensor.h> 28#include <linux/hwmon.h>
29 29
30/* straight from the datasheet */ 30/* straight from the datasheet */
31#define LM75_TEMP_MIN (-55000) 31#define LM75_TEMP_MIN (-55000)
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index b98f44952997..866eab96a6f6 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -30,15 +30,14 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/jiffies.h> 31#include <linux/jiffies.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/i2c-sensor.h> 33#include <linux/hwmon.h>
34 34#include <linux/err.h>
35 35
36/* Addresses to scan */ 36/* Addresses to scan */
37static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; 37static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END };
38static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
39 38
40/* Insmod parameters */ 39/* Insmod parameters */
41SENSORS_INSMOD_1(lm77); 40I2C_CLIENT_INSMOD_1(lm77);
42 41
43/* The LM77 registers */ 42/* The LM77 registers */
44#define LM77_REG_TEMP 0x00 43#define LM77_REG_TEMP 0x00
@@ -51,6 +50,7 @@ SENSORS_INSMOD_1(lm77);
51/* Each client has this additional data */ 50/* Each client has this additional data */
52struct lm77_data { 51struct lm77_data {
53 struct i2c_client client; 52 struct i2c_client client;
53 struct class_device *class_dev;
54 struct semaphore update_lock; 54 struct semaphore update_lock;
55 char valid; 55 char valid;
56 unsigned long last_updated; /* In jiffies */ 56 unsigned long last_updated; /* In jiffies */
@@ -208,10 +208,10 @@ static int lm77_attach_adapter(struct i2c_adapter *adapter)
208{ 208{
209 if (!(adapter->class & I2C_CLASS_HWMON)) 209 if (!(adapter->class & I2C_CLASS_HWMON))
210 return 0; 210 return 0;
211 return i2c_detect(adapter, &addr_data, lm77_detect); 211 return i2c_probe(adapter, &addr_data, lm77_detect);
212} 212}
213 213
214/* This function is called by i2c_detect */ 214/* This function is called by i2c_probe */
215static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) 215static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
216{ 216{
217 struct i2c_client *new_client; 217 struct i2c_client *new_client;
@@ -317,6 +317,12 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
317 lm77_init_client(new_client); 317 lm77_init_client(new_client);
318 318
319 /* Register sysfs hooks */ 319 /* Register sysfs hooks */
320 data->class_dev = hwmon_device_register(&new_client->dev);
321 if (IS_ERR(data->class_dev)) {
322 err = PTR_ERR(data->class_dev);
323 goto exit_detach;
324 }
325
320 device_create_file(&new_client->dev, &dev_attr_temp1_input); 326 device_create_file(&new_client->dev, &dev_attr_temp1_input);
321 device_create_file(&new_client->dev, &dev_attr_temp1_crit); 327 device_create_file(&new_client->dev, &dev_attr_temp1_crit);
322 device_create_file(&new_client->dev, &dev_attr_temp1_min); 328 device_create_file(&new_client->dev, &dev_attr_temp1_min);
@@ -327,6 +333,8 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
327 device_create_file(&new_client->dev, &dev_attr_alarms); 333 device_create_file(&new_client->dev, &dev_attr_alarms);
328 return 0; 334 return 0;
329 335
336exit_detach:
337 i2c_detach_client(new_client);
330exit_free: 338exit_free:
331 kfree(data); 339 kfree(data);
332exit: 340exit:
@@ -335,8 +343,10 @@ exit:
335 343
336static int lm77_detach_client(struct i2c_client *client) 344static int lm77_detach_client(struct i2c_client *client)
337{ 345{
346 struct lm77_data *data = i2c_get_clientdata(client);
347 hwmon_device_unregister(data->class_dev);
338 i2c_detach_client(client); 348 i2c_detach_client(client);
339 kfree(i2c_get_clientdata(client)); 349 kfree(data);
340 return 0; 350 return 0;
341} 351}
342 352
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 29241469dcba..f6730dc3573b 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -23,7 +23,10 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c-sensor.h> 26#include <linux/i2c-isa.h>
27#include <linux/hwmon.h>
28#include <linux/hwmon-vid.h>
29#include <linux/err.h>
27#include <asm/io.h> 30#include <asm/io.h>
28 31
29/* Addresses to scan */ 32/* Addresses to scan */
@@ -31,10 +34,10 @@ static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24,
31 0x25, 0x26, 0x27, 0x28, 0x29, 34 0x25, 0x26, 0x27, 0x28, 0x29,
32 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 35 0x2a, 0x2b, 0x2c, 0x2d, 0x2e,
33 0x2f, I2C_CLIENT_END }; 36 0x2f, I2C_CLIENT_END };
34static unsigned int normal_isa[] = { 0x0290, I2C_CLIENT_ISA_END }; 37static unsigned short isa_address = 0x290;
35 38
36/* Insmod parameters */ 39/* Insmod parameters */
37SENSORS_INSMOD_3(lm78, lm78j, lm79); 40I2C_CLIENT_INSMOD_2(lm78, lm79);
38 41
39/* Many LM78 constants specified below */ 42/* Many LM78 constants specified below */
40 43
@@ -104,13 +107,6 @@ static inline int TEMP_FROM_REG(s8 val)
104 return val * 1000; 107 return val * 1000;
105} 108}
106 109
107/* VID: mV
108 REG: (see doc/vid) */
109static inline int VID_FROM_REG(u8 val)
110{
111 return val==0x1f ? 0 : val>=0x10 ? 5100-val*100 : 2050-val*50;
112}
113
114#define DIV_FROM_REG(val) (1 << (val)) 110#define DIV_FROM_REG(val) (1 << (val))
115 111
116/* There are some complications in a module like this. First off, LM78 chips 112/* There are some complications in a module like this. First off, LM78 chips
@@ -134,6 +130,7 @@ static inline int VID_FROM_REG(u8 val)
134 allocated. */ 130 allocated. */
135struct lm78_data { 131struct lm78_data {
136 struct i2c_client client; 132 struct i2c_client client;
133 struct class_device *class_dev;
137 struct semaphore lock; 134 struct semaphore lock;
138 enum chips type; 135 enum chips type;
139 136
@@ -156,6 +153,7 @@ struct lm78_data {
156 153
157 154
158static int lm78_attach_adapter(struct i2c_adapter *adapter); 155static int lm78_attach_adapter(struct i2c_adapter *adapter);
156static int lm78_isa_attach_adapter(struct i2c_adapter *adapter);
159static int lm78_detect(struct i2c_adapter *adapter, int address, int kind); 157static int lm78_detect(struct i2c_adapter *adapter, int address, int kind);
160static int lm78_detach_client(struct i2c_client *client); 158static int lm78_detach_client(struct i2c_client *client);
161 159
@@ -174,6 +172,14 @@ static struct i2c_driver lm78_driver = {
174 .detach_client = lm78_detach_client, 172 .detach_client = lm78_detach_client,
175}; 173};
176 174
175static struct i2c_driver lm78_isa_driver = {
176 .owner = THIS_MODULE,
177 .name = "lm78-isa",
178 .attach_adapter = lm78_isa_attach_adapter,
179 .detach_client = lm78_detach_client,
180};
181
182
177/* 7 Voltages */ 183/* 7 Voltages */
178static ssize_t show_in(struct device *dev, char *buf, int nr) 184static ssize_t show_in(struct device *dev, char *buf, int nr)
179{ 185{
@@ -445,7 +451,7 @@ static DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_3_div, NULL);
445static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) 451static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf)
446{ 452{
447 struct lm78_data *data = lm78_update_device(dev); 453 struct lm78_data *data = lm78_update_device(dev);
448 return sprintf(buf, "%d\n", VID_FROM_REG(data->vid)); 454 return sprintf(buf, "%d\n", vid_from_reg(82, data->vid));
449} 455}
450static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); 456static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
451 457
@@ -465,10 +471,15 @@ static int lm78_attach_adapter(struct i2c_adapter *adapter)
465{ 471{
466 if (!(adapter->class & I2C_CLASS_HWMON)) 472 if (!(adapter->class & I2C_CLASS_HWMON))
467 return 0; 473 return 0;
468 return i2c_detect(adapter, &addr_data, lm78_detect); 474 return i2c_probe(adapter, &addr_data, lm78_detect);
475}
476
477static int lm78_isa_attach_adapter(struct i2c_adapter *adapter)
478{
479 return lm78_detect(adapter, isa_address, -1);
469} 480}
470 481
471/* This function is called by i2c_detect */ 482/* This function is called by i2c_probe */
472int lm78_detect(struct i2c_adapter *adapter, int address, int kind) 483int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
473{ 484{
474 int i, err; 485 int i, err;
@@ -485,7 +496,8 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
485 496
486 /* Reserve the ISA region */ 497 /* Reserve the ISA region */
487 if (is_isa) 498 if (is_isa)
488 if (!request_region(address, LM78_EXTENT, lm78_driver.name)) { 499 if (!request_region(address, LM78_EXTENT,
500 lm78_isa_driver.name)) {
489 err = -EBUSY; 501 err = -EBUSY;
490 goto ERROR0; 502 goto ERROR0;
491 } 503 }
@@ -540,7 +552,7 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
540 i2c_set_clientdata(new_client, data); 552 i2c_set_clientdata(new_client, data);
541 new_client->addr = address; 553 new_client->addr = address;
542 new_client->adapter = adapter; 554 new_client->adapter = adapter;
543 new_client->driver = &lm78_driver; 555 new_client->driver = is_isa ? &lm78_isa_driver : &lm78_driver;
544 new_client->flags = 0; 556 new_client->flags = 0;
545 557
546 /* Now, we do the remaining detection. */ 558 /* Now, we do the remaining detection. */
@@ -559,10 +571,9 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
559 /* Determine the chip type. */ 571 /* Determine the chip type. */
560 if (kind <= 0) { 572 if (kind <= 0) {
561 i = lm78_read_value(new_client, LM78_REG_CHIPID); 573 i = lm78_read_value(new_client, LM78_REG_CHIPID);
562 if (i == 0x00 || i == 0x20) 574 if (i == 0x00 || i == 0x20 /* LM78 */
575 || i == 0x40) /* LM78-J */
563 kind = lm78; 576 kind = lm78;
564 else if (i == 0x40)
565 kind = lm78j;
566 else if ((i & 0xfe) == 0xc0) 577 else if ((i & 0xfe) == 0xc0)
567 kind = lm79; 578 kind = lm79;
568 else { 579 else {
@@ -578,8 +589,6 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
578 589
579 if (kind == lm78) { 590 if (kind == lm78) {
580 client_name = "lm78"; 591 client_name = "lm78";
581 } else if (kind == lm78j) {
582 client_name = "lm78-j";
583 } else if (kind == lm79) { 592 } else if (kind == lm79) {
584 client_name = "lm79"; 593 client_name = "lm79";
585 } 594 }
@@ -605,6 +614,12 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
605 } 614 }
606 615
607 /* Register sysfs hooks */ 616 /* Register sysfs hooks */
617 data->class_dev = hwmon_device_register(&new_client->dev);
618 if (IS_ERR(data->class_dev)) {
619 err = PTR_ERR(data->class_dev);
620 goto ERROR3;
621 }
622
608 device_create_file(&new_client->dev, &dev_attr_in0_input); 623 device_create_file(&new_client->dev, &dev_attr_in0_input);
609 device_create_file(&new_client->dev, &dev_attr_in0_min); 624 device_create_file(&new_client->dev, &dev_attr_in0_min);
610 device_create_file(&new_client->dev, &dev_attr_in0_max); 625 device_create_file(&new_client->dev, &dev_attr_in0_max);
@@ -643,6 +658,8 @@ int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
643 658
644 return 0; 659 return 0;
645 660
661ERROR3:
662 i2c_detach_client(new_client);
646ERROR2: 663ERROR2:
647 kfree(data); 664 kfree(data);
648ERROR1: 665ERROR1:
@@ -654,18 +671,18 @@ ERROR0:
654 671
655static int lm78_detach_client(struct i2c_client *client) 672static int lm78_detach_client(struct i2c_client *client)
656{ 673{
674 struct lm78_data *data = i2c_get_clientdata(client);
657 int err; 675 int err;
658 676
659 if ((err = i2c_detach_client(client))) { 677 hwmon_device_unregister(data->class_dev);
660 dev_err(&client->dev, 678
661 "Client deregistration failed, client not detached.\n"); 679 if ((err = i2c_detach_client(client)))
662 return err; 680 return err;
663 }
664 681
665 if(i2c_is_isa_client(client)) 682 if(i2c_is_isa_client(client))
666 release_region(client->addr, LM78_EXTENT); 683 release_region(client->addr, LM78_EXTENT);
667 684
668 kfree(i2c_get_clientdata(client)); 685 kfree(data);
669 686
670 return 0; 687 return 0;
671} 688}
@@ -777,18 +794,31 @@ static struct lm78_data *lm78_update_device(struct device *dev)
777 794
778static int __init sm_lm78_init(void) 795static int __init sm_lm78_init(void)
779{ 796{
780 return i2c_add_driver(&lm78_driver); 797 int res;
798
799 res = i2c_add_driver(&lm78_driver);
800 if (res)
801 return res;
802
803 res = i2c_isa_add_driver(&lm78_isa_driver);
804 if (res) {
805 i2c_del_driver(&lm78_driver);
806 return res;
807 }
808
809 return 0;
781} 810}
782 811
783static void __exit sm_lm78_exit(void) 812static void __exit sm_lm78_exit(void)
784{ 813{
814 i2c_isa_del_driver(&lm78_isa_driver);
785 i2c_del_driver(&lm78_driver); 815 i2c_del_driver(&lm78_driver);
786} 816}
787 817
788 818
789 819
790MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); 820MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
791MODULE_DESCRIPTION("LM78, LM78-J and LM79 driver"); 821MODULE_DESCRIPTION("LM78/LM79 driver");
792MODULE_LICENSE("GPL"); 822MODULE_LICENSE("GPL");
793 823
794module_init(sm_lm78_init); 824module_init(sm_lm78_init);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 8100595feb44..83af8b3a0cac 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -26,15 +26,15 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-sensor.h> 29#include <linux/hwmon.h>
30#include <linux/err.h>
30 31
31/* Addresses to scan */ 32/* Addresses to scan */
32static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 33static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c,
33 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 34 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
34static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
35 35
36/* Insmod parameters */ 36/* Insmod parameters */
37SENSORS_INSMOD_1(lm80); 37I2C_CLIENT_INSMOD_1(lm80);
38 38
39/* Many LM80 constants specified below */ 39/* Many LM80 constants specified below */
40 40
@@ -107,6 +107,7 @@ static inline long TEMP_FROM_REG(u16 temp)
107 107
108struct lm80_data { 108struct lm80_data {
109 struct i2c_client client; 109 struct i2c_client client;
110 struct class_device *class_dev;
110 struct semaphore update_lock; 111 struct semaphore update_lock;
111 char valid; /* !=0 if following fields are valid */ 112 char valid; /* !=0 if following fields are valid */
112 unsigned long last_updated; /* In jiffies */ 113 unsigned long last_updated; /* In jiffies */
@@ -389,7 +390,7 @@ static int lm80_attach_adapter(struct i2c_adapter *adapter)
389{ 390{
390 if (!(adapter->class & I2C_CLASS_HWMON)) 391 if (!(adapter->class & I2C_CLASS_HWMON))
391 return 0; 392 return 0;
392 return i2c_detect(adapter, &addr_data, lm80_detect); 393 return i2c_probe(adapter, &addr_data, lm80_detect);
393} 394}
394 395
395int lm80_detect(struct i2c_adapter *adapter, int address, int kind) 396int lm80_detect(struct i2c_adapter *adapter, int address, int kind)
@@ -451,6 +452,12 @@ int lm80_detect(struct i2c_adapter *adapter, int address, int kind)
451 data->fan_min[1] = lm80_read_value(new_client, LM80_REG_FAN_MIN(2)); 452 data->fan_min[1] = lm80_read_value(new_client, LM80_REG_FAN_MIN(2));
452 453
453 /* Register sysfs hooks */ 454 /* Register sysfs hooks */
455 data->class_dev = hwmon_device_register(&new_client->dev);
456 if (IS_ERR(data->class_dev)) {
457 err = PTR_ERR(data->class_dev);
458 goto error_detach;
459 }
460
454 device_create_file(&new_client->dev, &dev_attr_in0_min); 461 device_create_file(&new_client->dev, &dev_attr_in0_min);
455 device_create_file(&new_client->dev, &dev_attr_in1_min); 462 device_create_file(&new_client->dev, &dev_attr_in1_min);
456 device_create_file(&new_client->dev, &dev_attr_in2_min); 463 device_create_file(&new_client->dev, &dev_attr_in2_min);
@@ -487,6 +494,8 @@ int lm80_detect(struct i2c_adapter *adapter, int address, int kind)
487 494
488 return 0; 495 return 0;
489 496
497error_detach:
498 i2c_detach_client(new_client);
490error_free: 499error_free:
491 kfree(data); 500 kfree(data);
492exit: 501exit:
@@ -495,15 +504,15 @@ exit:
495 504
496static int lm80_detach_client(struct i2c_client *client) 505static int lm80_detach_client(struct i2c_client *client)
497{ 506{
507 struct lm80_data *data = i2c_get_clientdata(client);
498 int err; 508 int err;
499 509
500 if ((err = i2c_detach_client(client))) { 510 hwmon_device_unregister(data->class_dev);
501 dev_err(&client->dev, "Client deregistration failed, " 511
502 "client not detached.\n"); 512 if ((err = i2c_detach_client(client)))
503 return err; 513 return err;
504 }
505 514
506 kfree(i2c_get_clientdata(client)); 515 kfree(data);
507 return 0; 516 return 0;
508} 517}
509 518
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index a49008b444c8..d74b2c20c719 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -32,8 +32,9 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/jiffies.h> 33#include <linux/jiffies.h>
34#include <linux/i2c.h> 34#include <linux/i2c.h>
35#include <linux/i2c-sensor.h>
36#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/hwmon.h>
37#include <linux/err.h>
37 38
38/* 39/*
39 * Addresses to scan 40 * Addresses to scan
@@ -45,13 +46,12 @@ static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a,
45 0x29, 0x2a, 0x2b, 46 0x29, 0x2a, 0x2b,
46 0x4c, 0x4d, 0x4e, 47 0x4c, 0x4d, 0x4e,
47 I2C_CLIENT_END }; 48 I2C_CLIENT_END };
48static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
49 49
50/* 50/*
51 * Insmod parameters 51 * Insmod parameters
52 */ 52 */
53 53
54SENSORS_INSMOD_1(lm83); 54I2C_CLIENT_INSMOD_1(lm83);
55 55
56/* 56/*
57 * The LM83 registers 57 * The LM83 registers
@@ -138,6 +138,7 @@ static struct i2c_driver lm83_driver = {
138 138
139struct lm83_data { 139struct lm83_data {
140 struct i2c_client client; 140 struct i2c_client client;
141 struct class_device *class_dev;
141 struct semaphore update_lock; 142 struct semaphore update_lock;
142 char valid; /* zero until following fields are valid */ 143 char valid; /* zero until following fields are valid */
143 unsigned long last_updated; /* in jiffies */ 144 unsigned long last_updated; /* in jiffies */
@@ -212,7 +213,7 @@ static int lm83_attach_adapter(struct i2c_adapter *adapter)
212{ 213{
213 if (!(adapter->class & I2C_CLASS_HWMON)) 214 if (!(adapter->class & I2C_CLASS_HWMON))
214 return 0; 215 return 0;
215 return i2c_detect(adapter, &addr_data, lm83_detect); 216 return i2c_probe(adapter, &addr_data, lm83_detect);
216} 217}
217 218
218/* 219/*
@@ -312,6 +313,12 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
312 */ 313 */
313 314
314 /* Register sysfs hooks */ 315 /* Register sysfs hooks */
316 data->class_dev = hwmon_device_register(&new_client->dev);
317 if (IS_ERR(data->class_dev)) {
318 err = PTR_ERR(data->class_dev);
319 goto exit_detach;
320 }
321
315 device_create_file(&new_client->dev, 322 device_create_file(&new_client->dev,
316 &sensor_dev_attr_temp1_input.dev_attr); 323 &sensor_dev_attr_temp1_input.dev_attr);
317 device_create_file(&new_client->dev, 324 device_create_file(&new_client->dev,
@@ -340,6 +347,8 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
340 347
341 return 0; 348 return 0;
342 349
350exit_detach:
351 i2c_detach_client(new_client);
343exit_free: 352exit_free:
344 kfree(data); 353 kfree(data);
345exit: 354exit:
@@ -348,15 +357,15 @@ exit:
348 357
349static int lm83_detach_client(struct i2c_client *client) 358static int lm83_detach_client(struct i2c_client *client)
350{ 359{
360 struct lm83_data *data = i2c_get_clientdata(client);
351 int err; 361 int err;
352 362
353 if ((err = i2c_detach_client(client))) { 363 hwmon_device_unregister(data->class_dev);
354 dev_err(&client->dev, 364
355 "Client deregistration failed, client not detached.\n"); 365 if ((err = i2c_detach_client(client)))
356 return err; 366 return err;
357 }
358 367
359 kfree(i2c_get_clientdata(client)); 368 kfree(data);
360 return 0; 369 return 0;
361} 370}
362 371
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index b4d7fd418264..ab214df9624b 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -28,15 +28,15 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jiffies.h> 29#include <linux/jiffies.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/i2c-sensor.h> 31#include <linux/hwmon.h>
32#include <linux/i2c-vid.h> 32#include <linux/hwmon-vid.h>
33#include <linux/err.h>
33 34
34/* Addresses to scan */ 35/* Addresses to scan */
35static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 36static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
36static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
37 37
38/* Insmod parameters */ 38/* Insmod parameters */
39SENSORS_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102); 39I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
40 40
41/* The LM85 registers */ 41/* The LM85 registers */
42 42
@@ -281,15 +281,6 @@ static int ZONE_TO_REG( int zone )
281#define PPR_TO_REG(val,fan) (SENSORS_LIMIT((val)-1,0,3)<<(fan *2)) 281#define PPR_TO_REG(val,fan) (SENSORS_LIMIT((val)-1,0,3)<<(fan *2))
282#define PPR_FROM_REG(val,fan) ((((val)>>(fan * 2))&0x03)+1) 282#define PPR_FROM_REG(val,fan) ((((val)>>(fan * 2))&0x03)+1)
283 283
284/* i2c-vid.h defines vid_from_reg() */
285#define VID_FROM_REG(val,vrm) (vid_from_reg((val),(vrm)))
286
287/* Unlike some other drivers we DO NOT set initial limits. Use
288 * the config file to set limits. Some users have reported
289 * motherboards shutting down when we set limits in a previous
290 * version of the driver.
291 */
292
293/* Chip sampling rates 284/* Chip sampling rates
294 * 285 *
295 * Some sensors are not updated more frequently than once per second 286 * Some sensors are not updated more frequently than once per second
@@ -339,6 +330,7 @@ struct lm85_autofan {
339 330
340struct lm85_data { 331struct lm85_data {
341 struct i2c_client client; 332 struct i2c_client client;
333 struct class_device *class_dev;
342 struct semaphore lock; 334 struct semaphore lock;
343 enum chips type; 335 enum chips type;
344 336
@@ -1019,7 +1011,7 @@ int lm85_attach_adapter(struct i2c_adapter *adapter)
1019{ 1011{
1020 if (!(adapter->class & I2C_CLASS_HWMON)) 1012 if (!(adapter->class & I2C_CLASS_HWMON))
1021 return 0; 1013 return 0;
1022 return i2c_detect(adapter, &addr_data, lm85_detect); 1014 return i2c_probe(adapter, &addr_data, lm85_detect);
1023} 1015}
1024 1016
1025int lm85_detect(struct i2c_adapter *adapter, int address, 1017int lm85_detect(struct i2c_adapter *adapter, int address,
@@ -1031,11 +1023,6 @@ int lm85_detect(struct i2c_adapter *adapter, int address,
1031 int err = 0; 1023 int err = 0;
1032 const char *type_name = ""; 1024 const char *type_name = "";
1033 1025
1034 if (i2c_is_isa_adapter(adapter)) {
1035 /* This chip has no ISA interface */
1036 goto ERROR0 ;
1037 };
1038
1039 if (!i2c_check_functionality(adapter, 1026 if (!i2c_check_functionality(adapter,
1040 I2C_FUNC_SMBUS_BYTE_DATA)) { 1027 I2C_FUNC_SMBUS_BYTE_DATA)) {
1041 /* We need to be able to do byte I/O */ 1028 /* We need to be able to do byte I/O */
@@ -1160,12 +1147,18 @@ int lm85_detect(struct i2c_adapter *adapter, int address,
1160 goto ERROR1; 1147 goto ERROR1;
1161 1148
1162 /* Set the VRM version */ 1149 /* Set the VRM version */
1163 data->vrm = i2c_which_vrm(); 1150 data->vrm = vid_which_vrm();
1164 1151
1165 /* Initialize the LM85 chip */ 1152 /* Initialize the LM85 chip */
1166 lm85_init_client(new_client); 1153 lm85_init_client(new_client);
1167 1154
1168 /* Register sysfs hooks */ 1155 /* Register sysfs hooks */
1156 data->class_dev = hwmon_device_register(&new_client->dev);
1157 if (IS_ERR(data->class_dev)) {
1158 err = PTR_ERR(data->class_dev);
1159 goto ERROR2;
1160 }
1161
1169 device_create_file(&new_client->dev, &dev_attr_fan1_input); 1162 device_create_file(&new_client->dev, &dev_attr_fan1_input);
1170 device_create_file(&new_client->dev, &dev_attr_fan2_input); 1163 device_create_file(&new_client->dev, &dev_attr_fan2_input);
1171 device_create_file(&new_client->dev, &dev_attr_fan3_input); 1164 device_create_file(&new_client->dev, &dev_attr_fan3_input);
@@ -1235,6 +1228,8 @@ int lm85_detect(struct i2c_adapter *adapter, int address,
1235 return 0; 1228 return 0;
1236 1229
1237 /* Error out and cleanup code */ 1230 /* Error out and cleanup code */
1231 ERROR2:
1232 i2c_detach_client(new_client);
1238 ERROR1: 1233 ERROR1:
1239 kfree(data); 1234 kfree(data);
1240 ERROR0: 1235 ERROR0:
@@ -1243,8 +1238,10 @@ int lm85_detect(struct i2c_adapter *adapter, int address,
1243 1238
1244int lm85_detach_client(struct i2c_client *client) 1239int lm85_detach_client(struct i2c_client *client)
1245{ 1240{
1241 struct lm85_data *data = i2c_get_clientdata(client);
1242 hwmon_device_unregister(data->class_dev);
1246 i2c_detach_client(client); 1243 i2c_detach_client(client);
1247 kfree(i2c_get_clientdata(client)); 1244 kfree(data);
1248 return 0; 1245 return 0;
1249} 1246}
1250 1247
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 1921ed1af182..dca996de4c33 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -57,8 +57,9 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/jiffies.h> 58#include <linux/jiffies.h>
59#include <linux/i2c.h> 59#include <linux/i2c.h>
60#include <linux/i2c-sensor.h> 60#include <linux/hwmon.h>
61#include <linux/i2c-vid.h> 61#include <linux/hwmon-vid.h>
62#include <linux/err.h>
62 63
63/* 64/*
64 * Addresses to scan 65 * Addresses to scan
@@ -66,13 +67,12 @@
66 */ 67 */
67 68
68static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 69static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
69static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
70 70
71/* 71/*
72 * Insmod parameters 72 * Insmod parameters
73 */ 73 */
74 74
75SENSORS_INSMOD_1(lm87); 75I2C_CLIENT_INSMOD_1(lm87);
76 76
77/* 77/*
78 * The LM87 registers 78 * The LM87 registers
@@ -175,6 +175,7 @@ static struct i2c_driver lm87_driver = {
175 175
176struct lm87_data { 176struct lm87_data {
177 struct i2c_client client; 177 struct i2c_client client;
178 struct class_device *class_dev;
178 struct semaphore update_lock; 179 struct semaphore update_lock;
179 char valid; /* zero until following fields are valid */ 180 char valid; /* zero until following fields are valid */
180 unsigned long last_updated; /* In jiffies */ 181 unsigned long last_updated; /* In jiffies */
@@ -537,7 +538,7 @@ static int lm87_attach_adapter(struct i2c_adapter *adapter)
537{ 538{
538 if (!(adapter->class & I2C_CLASS_HWMON)) 539 if (!(adapter->class & I2C_CLASS_HWMON))
539 return 0; 540 return 0;
540 return i2c_detect(adapter, &addr_data, lm87_detect); 541 return i2c_probe(adapter, &addr_data, lm87_detect);
541} 542}
542 543
543/* 544/*
@@ -608,6 +609,12 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
608 data->in_scale[7] = 1875; 609 data->in_scale[7] = 1875;
609 610
610 /* Register sysfs hooks */ 611 /* Register sysfs hooks */
612 data->class_dev = hwmon_device_register(&new_client->dev);
613 if (IS_ERR(data->class_dev)) {
614 err = PTR_ERR(data->class_dev);
615 goto exit_detach;
616 }
617
611 device_create_file(&new_client->dev, &dev_attr_in1_input); 618 device_create_file(&new_client->dev, &dev_attr_in1_input);
612 device_create_file(&new_client->dev, &dev_attr_in1_min); 619 device_create_file(&new_client->dev, &dev_attr_in1_min);
613 device_create_file(&new_client->dev, &dev_attr_in1_max); 620 device_create_file(&new_client->dev, &dev_attr_in1_max);
@@ -673,6 +680,8 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
673 680
674 return 0; 681 return 0;
675 682
683exit_detach:
684 i2c_detach_client(new_client);
676exit_free: 685exit_free:
677 kfree(data); 686 kfree(data);
678exit: 687exit:
@@ -685,7 +694,7 @@ static void lm87_init_client(struct i2c_client *client)
685 u8 config; 694 u8 config;
686 695
687 data->channel = lm87_read_value(client, LM87_REG_CHANNEL_MODE); 696 data->channel = lm87_read_value(client, LM87_REG_CHANNEL_MODE);
688 data->vrm = i2c_which_vrm(); 697 data->vrm = vid_which_vrm();
689 698
690 config = lm87_read_value(client, LM87_REG_CONFIG); 699 config = lm87_read_value(client, LM87_REG_CONFIG);
691 if (!(config & 0x01)) { 700 if (!(config & 0x01)) {
@@ -719,15 +728,15 @@ static void lm87_init_client(struct i2c_client *client)
719 728
720static int lm87_detach_client(struct i2c_client *client) 729static int lm87_detach_client(struct i2c_client *client)
721{ 730{
731 struct lm87_data *data = i2c_get_clientdata(client);
722 int err; 732 int err;
723 733
724 if ((err = i2c_detach_client(client))) { 734 hwmon_device_unregister(data->class_dev);
725 dev_err(&client->dev, "Client deregistration failed, " 735
726 "client not detached.\n"); 736 if ((err = i2c_detach_client(client)))
727 return err; 737 return err;
728 }
729 738
730 kfree(i2c_get_clientdata(client)); 739 kfree(data);
731 return 0; 740 return 0;
732} 741}
733 742
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index a67dcadf7cb0..14de05fcd431 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -75,8 +75,9 @@
75#include <linux/slab.h> 75#include <linux/slab.h>
76#include <linux/jiffies.h> 76#include <linux/jiffies.h>
77#include <linux/i2c.h> 77#include <linux/i2c.h>
78#include <linux/i2c-sensor.h>
79#include <linux/hwmon-sysfs.h> 78#include <linux/hwmon-sysfs.h>
79#include <linux/hwmon.h>
80#include <linux/err.h>
80 81
81/* 82/*
82 * Addresses to scan 83 * Addresses to scan
@@ -89,13 +90,12 @@
89 */ 90 */
90 91
91static unsigned short normal_i2c[] = { 0x4c, 0x4d, I2C_CLIENT_END }; 92static unsigned short normal_i2c[] = { 0x4c, 0x4d, I2C_CLIENT_END };
92static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
93 93
94/* 94/*
95 * Insmod parameters 95 * Insmod parameters
96 */ 96 */
97 97
98SENSORS_INSMOD_6(lm90, adm1032, lm99, lm86, max6657, adt7461); 98I2C_CLIENT_INSMOD_6(lm90, adm1032, lm99, lm86, max6657, adt7461);
99 99
100/* 100/*
101 * The LM90 registers 101 * The LM90 registers
@@ -200,6 +200,7 @@ static struct i2c_driver lm90_driver = {
200 200
201struct lm90_data { 201struct lm90_data {
202 struct i2c_client client; 202 struct i2c_client client;
203 struct class_device *class_dev;
203 struct semaphore update_lock; 204 struct semaphore update_lock;
204 char valid; /* zero until following fields are valid */ 205 char valid; /* zero until following fields are valid */
205 unsigned long last_updated; /* in jiffies */ 206 unsigned long last_updated; /* in jiffies */
@@ -352,7 +353,7 @@ static int lm90_attach_adapter(struct i2c_adapter *adapter)
352{ 353{
353 if (!(adapter->class & I2C_CLASS_HWMON)) 354 if (!(adapter->class & I2C_CLASS_HWMON))
354 return 0; 355 return 0;
355 return i2c_detect(adapter, &addr_data, lm90_detect); 356 return i2c_probe(adapter, &addr_data, lm90_detect);
356} 357}
357 358
358/* 359/*
@@ -500,6 +501,12 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
500 lm90_init_client(new_client); 501 lm90_init_client(new_client);
501 502
502 /* Register sysfs hooks */ 503 /* Register sysfs hooks */
504 data->class_dev = hwmon_device_register(&new_client->dev);
505 if (IS_ERR(data->class_dev)) {
506 err = PTR_ERR(data->class_dev);
507 goto exit_detach;
508 }
509
503 device_create_file(&new_client->dev, 510 device_create_file(&new_client->dev,
504 &sensor_dev_attr_temp1_input.dev_attr); 511 &sensor_dev_attr_temp1_input.dev_attr);
505 device_create_file(&new_client->dev, 512 device_create_file(&new_client->dev,
@@ -524,6 +531,8 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
524 531
525 return 0; 532 return 0;
526 533
534exit_detach:
535 i2c_detach_client(new_client);
527exit_free: 536exit_free:
528 kfree(data); 537 kfree(data);
529exit: 538exit:
@@ -547,15 +556,15 @@ static void lm90_init_client(struct i2c_client *client)
547 556
548static int lm90_detach_client(struct i2c_client *client) 557static int lm90_detach_client(struct i2c_client *client)
549{ 558{
559 struct lm90_data *data = i2c_get_clientdata(client);
550 int err; 560 int err;
551 561
552 if ((err = i2c_detach_client(client))) { 562 hwmon_device_unregister(data->class_dev);
553 dev_err(&client->dev, "Client deregistration failed, " 563
554 "client not detached.\n"); 564 if ((err = i2c_detach_client(client)))
555 return err; 565 return err;
556 }
557 566
558 kfree(i2c_get_clientdata(client)); 567 kfree(data);
559 return 0; 568 return 0;
560} 569}
561 570
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 215c8e40ffdd..647b7c7cd575 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -44,17 +44,16 @@
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/i2c.h> 46#include <linux/i2c.h>
47#include <linux/i2c-sensor.h> 47#include <linux/hwmon.h>
48 48#include <linux/err.h>
49 49
50/* The LM92 and MAX6635 have 2 two-state pins for address selection, 50/* The LM92 and MAX6635 have 2 two-state pins for address selection,
51 resulting in 4 possible addresses. */ 51 resulting in 4 possible addresses. */
52static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 52static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
53 I2C_CLIENT_END }; 53 I2C_CLIENT_END };
54static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
55 54
56/* Insmod parameters */ 55/* Insmod parameters */
57SENSORS_INSMOD_1(lm92); 56I2C_CLIENT_INSMOD_1(lm92);
58 57
59/* The LM92 registers */ 58/* The LM92 registers */
60#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ 59#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
@@ -96,6 +95,7 @@ static struct i2c_driver lm92_driver;
96/* Client data (each client gets its own) */ 95/* Client data (each client gets its own) */
97struct lm92_data { 96struct lm92_data {
98 struct i2c_client client; 97 struct i2c_client client;
98 struct class_device *class_dev;
99 struct semaphore update_lock; 99 struct semaphore update_lock;
100 char valid; /* zero until following fields are valid */ 100 char valid; /* zero until following fields are valid */
101 unsigned long last_updated; /* in jiffies */ 101 unsigned long last_updated; /* in jiffies */
@@ -359,6 +359,12 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind)
359 lm92_init_client(new_client); 359 lm92_init_client(new_client);
360 360
361 /* Register sysfs hooks */ 361 /* Register sysfs hooks */
362 data->class_dev = hwmon_device_register(&new_client->dev);
363 if (IS_ERR(data->class_dev)) {
364 err = PTR_ERR(data->class_dev);
365 goto exit_detach;
366 }
367
362 device_create_file(&new_client->dev, &dev_attr_temp1_input); 368 device_create_file(&new_client->dev, &dev_attr_temp1_input);
363 device_create_file(&new_client->dev, &dev_attr_temp1_crit); 369 device_create_file(&new_client->dev, &dev_attr_temp1_crit);
364 device_create_file(&new_client->dev, &dev_attr_temp1_crit_hyst); 370 device_create_file(&new_client->dev, &dev_attr_temp1_crit_hyst);
@@ -370,6 +376,8 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind)
370 376
371 return 0; 377 return 0;
372 378
379exit_detach:
380 i2c_detach_client(new_client);
373exit_free: 381exit_free:
374 kfree(data); 382 kfree(data);
375exit: 383exit:
@@ -380,20 +388,20 @@ static int lm92_attach_adapter(struct i2c_adapter *adapter)
380{ 388{
381 if (!(adapter->class & I2C_CLASS_HWMON)) 389 if (!(adapter->class & I2C_CLASS_HWMON))
382 return 0; 390 return 0;
383 return i2c_detect(adapter, &addr_data, lm92_detect); 391 return i2c_probe(adapter, &addr_data, lm92_detect);
384} 392}
385 393
386static int lm92_detach_client(struct i2c_client *client) 394static int lm92_detach_client(struct i2c_client *client)
387{ 395{
396 struct lm92_data *data = i2c_get_clientdata(client);
388 int err; 397 int err;
389 398
390 if ((err = i2c_detach_client(client))) { 399 hwmon_device_unregister(data->class_dev);
391 dev_err(&client->dev, "Client deregistration failed, " 400
392 "client not detached.\n"); 401 if ((err = i2c_detach_client(client)))
393 return err; 402 return err;
394 }
395 403
396 kfree(i2c_get_clientdata(client)); 404 kfree(data);
397 return 0; 405 return 0;
398} 406}
399 407
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 3c159f1d49ee..16bf71f3a04d 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -31,20 +31,19 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/i2c-sensor.h> 34#include <linux/hwmon.h>
35 35#include <linux/err.h>
36 36
37static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 37static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a,
38 0x29, 0x2a, 0x2b, 38 0x29, 0x2a, 0x2b,
39 0x4c, 0x4d, 0x4e, 39 0x4c, 0x4d, 0x4e,
40 I2C_CLIENT_END }; 40 I2C_CLIENT_END };
41static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
42 41
43/* 42/*
44 * Insmod parameters 43 * Insmod parameters
45 */ 44 */
46 45
47SENSORS_INSMOD_1(max1619); 46I2C_CLIENT_INSMOD_1(max1619);
48 47
49/* 48/*
50 * The MAX1619 registers 49 * The MAX1619 registers
@@ -104,6 +103,7 @@ static struct i2c_driver max1619_driver = {
104 103
105struct max1619_data { 104struct max1619_data {
106 struct i2c_client client; 105 struct i2c_client client;
106 struct class_device *class_dev;
107 struct semaphore update_lock; 107 struct semaphore update_lock;
108 char valid; /* zero until following fields are valid */ 108 char valid; /* zero until following fields are valid */
109 unsigned long last_updated; /* in jiffies */ 109 unsigned long last_updated; /* in jiffies */
@@ -179,7 +179,7 @@ static int max1619_attach_adapter(struct i2c_adapter *adapter)
179{ 179{
180 if (!(adapter->class & I2C_CLASS_HWMON)) 180 if (!(adapter->class & I2C_CLASS_HWMON))
181 return 0; 181 return 0;
182 return i2c_detect(adapter, &addr_data, max1619_detect); 182 return i2c_probe(adapter, &addr_data, max1619_detect);
183} 183}
184 184
185/* 185/*
@@ -275,6 +275,12 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
275 max1619_init_client(new_client); 275 max1619_init_client(new_client);
276 276
277 /* Register sysfs hooks */ 277 /* Register sysfs hooks */
278 data->class_dev = hwmon_device_register(&new_client->dev);
279 if (IS_ERR(data->class_dev)) {
280 err = PTR_ERR(data->class_dev);
281 goto exit_detach;
282 }
283
278 device_create_file(&new_client->dev, &dev_attr_temp1_input); 284 device_create_file(&new_client->dev, &dev_attr_temp1_input);
279 device_create_file(&new_client->dev, &dev_attr_temp2_input); 285 device_create_file(&new_client->dev, &dev_attr_temp2_input);
280 device_create_file(&new_client->dev, &dev_attr_temp2_min); 286 device_create_file(&new_client->dev, &dev_attr_temp2_min);
@@ -285,6 +291,8 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
285 291
286 return 0; 292 return 0;
287 293
294exit_detach:
295 i2c_detach_client(new_client);
288exit_free: 296exit_free:
289 kfree(data); 297 kfree(data);
290exit: 298exit:
@@ -308,15 +316,15 @@ static void max1619_init_client(struct i2c_client *client)
308 316
309static int max1619_detach_client(struct i2c_client *client) 317static int max1619_detach_client(struct i2c_client *client)
310{ 318{
319 struct max1619_data *data = i2c_get_clientdata(client);
311 int err; 320 int err;
312 321
313 if ((err = i2c_detach_client(client))) { 322 hwmon_device_unregister(data->class_dev);
314 dev_err(&client->dev, "Client deregistration failed, " 323
315 "client not detached.\n"); 324 if ((err = i2c_detach_client(client)))
316 return err; 325 return err;
317 }
318 326
319 kfree(i2c_get_clientdata(client)); 327 kfree(data);
320 return 0; 328 return 0;
321} 329}
322 330
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index fa4032d53b79..cf2a35799c7c 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -38,23 +38,19 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/i2c.h> 40#include <linux/i2c.h>
41#include <linux/i2c-sensor.h> 41#include <linux/i2c-isa.h>
42#include <linux/i2c-vid.h> 42#include <linux/hwmon.h>
43#include <linux/hwmon-sysfs.h>
44#include <linux/hwmon-vid.h>
45#include <linux/err.h>
43#include <asm/io.h> 46#include <asm/io.h>
44 47
45static unsigned short normal_i2c[] = { I2C_CLIENT_END };
46static unsigned int normal_isa[] = { 0, I2C_CLIENT_ISA_END };
47static struct i2c_force_data forces[] = {{ NULL }};
48static u8 devid; 48static u8 devid;
49static unsigned int extra_isa[3]; 49static unsigned short address;
50static unsigned short extra_isa[3];
50static u8 confreg[4]; 51static u8 confreg[4];
51 52
52enum chips { any_chip, pc87360, pc87363, pc87364, pc87365, pc87366 }; 53enum chips { any_chip, pc87360, pc87363, pc87364, pc87365, pc87366 };
53static struct i2c_address_data addr_data = {
54 .normal_i2c = normal_i2c,
55 .normal_isa = normal_isa,
56 .forces = forces,
57};
58 54
59static int init = 1; 55static int init = 1;
60module_param(init, int, 0); 56module_param(init, int, 0);
@@ -186,6 +182,7 @@ static inline u8 PWM_TO_REG(int val, int inv)
186 182
187struct pc87360_data { 183struct pc87360_data {
188 struct i2c_client client; 184 struct i2c_client client;
185 struct class_device *class_dev;
189 struct semaphore lock; 186 struct semaphore lock;
190 struct semaphore update_lock; 187 struct semaphore update_lock;
191 char valid; /* !=0 if following fields are valid */ 188 char valid; /* !=0 if following fields are valid */
@@ -224,8 +221,7 @@ struct pc87360_data {
224 * Functions declaration 221 * Functions declaration
225 */ 222 */
226 223
227static int pc87360_attach_adapter(struct i2c_adapter *adapter); 224static int pc87360_detect(struct i2c_adapter *adapter);
228static int pc87360_detect(struct i2c_adapter *adapter, int address, int kind);
229static int pc87360_detach_client(struct i2c_client *client); 225static int pc87360_detach_client(struct i2c_client *client);
230 226
231static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank, 227static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
@@ -242,8 +238,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev);
242static struct i2c_driver pc87360_driver = { 238static struct i2c_driver pc87360_driver = {
243 .owner = THIS_MODULE, 239 .owner = THIS_MODULE,
244 .name = "pc87360", 240 .name = "pc87360",
245 .flags = I2C_DF_NOTIFY, 241 .attach_adapter = pc87360_detect,
246 .attach_adapter = pc87360_attach_adapter,
247 .detach_client = pc87360_detach_client, 242 .detach_client = pc87360_detach_client,
248}; 243};
249 244
@@ -251,168 +246,178 @@ static struct i2c_driver pc87360_driver = {
251 * Sysfs stuff 246 * Sysfs stuff
252 */ 247 */
253 248
254static ssize_t set_fan_min(struct device *dev, const char *buf, 249static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
255 size_t count, int nr) 250{
251 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
252 struct pc87360_data *data = pc87360_update_device(dev);
253 return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
254 FAN_DIV_FROM_REG(data->fan_status[attr->index])));
255}
256static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
257{
258 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
259 struct pc87360_data *data = pc87360_update_device(dev);
260 return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
261 FAN_DIV_FROM_REG(data->fan_status[attr->index])));
262}
263static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
264{
265 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
266 struct pc87360_data *data = pc87360_update_device(dev);
267 return sprintf(buf, "%u\n",
268 FAN_DIV_FROM_REG(data->fan_status[attr->index]));
269}
270static ssize_t show_fan_status(struct device *dev, struct device_attribute *devattr, char *buf)
256{ 271{
272 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
273 struct pc87360_data *data = pc87360_update_device(dev);
274 return sprintf(buf, "%u\n",
275 FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
276}
277static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf,
278 size_t count)
279{
280 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
257 struct i2c_client *client = to_i2c_client(dev); 281 struct i2c_client *client = to_i2c_client(dev);
258 struct pc87360_data *data = i2c_get_clientdata(client); 282 struct pc87360_data *data = i2c_get_clientdata(client);
259 long fan_min = simple_strtol(buf, NULL, 10); 283 long fan_min = simple_strtol(buf, NULL, 10);
260 284
261 down(&data->update_lock); 285 down(&data->update_lock);
262 fan_min = FAN_TO_REG(fan_min, FAN_DIV_FROM_REG(data->fan_status[nr])); 286 fan_min = FAN_TO_REG(fan_min, FAN_DIV_FROM_REG(data->fan_status[attr->index]));
263 287
264 /* If it wouldn't fit, change clock divisor */ 288 /* If it wouldn't fit, change clock divisor */
265 while (fan_min > 255 289 while (fan_min > 255
266 && (data->fan_status[nr] & 0x60) != 0x60) { 290 && (data->fan_status[attr->index] & 0x60) != 0x60) {
267 fan_min >>= 1; 291 fan_min >>= 1;
268 data->fan[nr] >>= 1; 292 data->fan[attr->index] >>= 1;
269 data->fan_status[nr] += 0x20; 293 data->fan_status[attr->index] += 0x20;
270 } 294 }
271 data->fan_min[nr] = fan_min > 255 ? 255 : fan_min; 295 data->fan_min[attr->index] = fan_min > 255 ? 255 : fan_min;
272 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_FAN_MIN(nr), 296 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_FAN_MIN(attr->index),
273 data->fan_min[nr]); 297 data->fan_min[attr->index]);
274 298
275 /* Write new divider, preserve alarm bits */ 299 /* Write new divider, preserve alarm bits */
276 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_FAN_STATUS(nr), 300 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_FAN_STATUS(attr->index),
277 data->fan_status[nr] & 0xF9); 301 data->fan_status[attr->index] & 0xF9);
278 up(&data->update_lock); 302 up(&data->update_lock);
279 303
280 return count; 304 return count;
281} 305}
282 306
283#define show_and_set_fan(offset) \ 307#define show_and_set_fan(offset) \
284static ssize_t show_fan##offset##_input(struct device *dev, struct device_attribute *attr, char *buf) \ 308static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
285{ \ 309 show_fan_input, NULL, offset-1); \
286 struct pc87360_data *data = pc87360_update_device(dev); \ 310static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IWUSR | S_IRUGO, \
287 return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[offset-1], \ 311 show_fan_min, set_fan_min, offset-1); \
288 FAN_DIV_FROM_REG(data->fan_status[offset-1]))); \ 312static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO, \
289} \ 313 show_fan_div, NULL, offset-1); \
290static ssize_t show_fan##offset##_min(struct device *dev, struct device_attribute *attr, char *buf) \ 314static SENSOR_DEVICE_ATTR(fan##offset##_status, S_IRUGO, \
291{ \ 315 show_fan_status, NULL, offset-1);
292 struct pc87360_data *data = pc87360_update_device(dev); \
293 return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[offset-1], \
294 FAN_DIV_FROM_REG(data->fan_status[offset-1]))); \
295} \
296static ssize_t show_fan##offset##_div(struct device *dev, struct device_attribute *attr, char *buf) \
297{ \
298 struct pc87360_data *data = pc87360_update_device(dev); \
299 return sprintf(buf, "%u\n", \
300 FAN_DIV_FROM_REG(data->fan_status[offset-1])); \
301} \
302static ssize_t show_fan##offset##_status(struct device *dev, struct device_attribute *attr, char *buf) \
303{ \
304 struct pc87360_data *data = pc87360_update_device(dev); \
305 return sprintf(buf, "%u\n", \
306 FAN_STATUS_FROM_REG(data->fan_status[offset-1])); \
307} \
308static ssize_t set_fan##offset##_min(struct device *dev, struct device_attribute *attr, const char *buf, \
309 size_t count) \
310{ \
311 return set_fan_min(dev, buf, count, offset-1); \
312} \
313static DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
314 show_fan##offset##_input, NULL); \
315static DEVICE_ATTR(fan##offset##_min, S_IWUSR | S_IRUGO, \
316 show_fan##offset##_min, set_fan##offset##_min); \
317static DEVICE_ATTR(fan##offset##_div, S_IRUGO, \
318 show_fan##offset##_div, NULL); \
319static DEVICE_ATTR(fan##offset##_status, S_IRUGO, \
320 show_fan##offset##_status, NULL);
321show_and_set_fan(1) 316show_and_set_fan(1)
322show_and_set_fan(2) 317show_and_set_fan(2)
323show_and_set_fan(3) 318show_and_set_fan(3)
324 319
320static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
321{
322 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
323 struct pc87360_data *data = pc87360_update_device(dev);
324 return sprintf(buf, "%u\n",
325 PWM_FROM_REG(data->pwm[attr->index],
326 FAN_CONFIG_INVERT(data->fan_conf,
327 attr->index)));
328}
329static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf,
330 size_t count)
331{
332 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
333 struct i2c_client *client = to_i2c_client(dev);
334 struct pc87360_data *data = i2c_get_clientdata(client);
335 long val = simple_strtol(buf, NULL, 10);
336
337 down(&data->update_lock);
338 data->pwm[attr->index] = PWM_TO_REG(val,
339 FAN_CONFIG_INVERT(data->fan_conf, attr->index));
340 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index),
341 data->pwm[attr->index]);
342 up(&data->update_lock);
343 return count;
344}
345
325#define show_and_set_pwm(offset) \ 346#define show_and_set_pwm(offset) \
326static ssize_t show_pwm##offset(struct device *dev, struct device_attribute *attr, char *buf) \ 347static SENSOR_DEVICE_ATTR(pwm##offset, S_IWUSR | S_IRUGO, \
327{ \ 348 show_pwm, set_pwm, offset-1);
328 struct pc87360_data *data = pc87360_update_device(dev); \
329 return sprintf(buf, "%u\n", \
330 PWM_FROM_REG(data->pwm[offset-1], \
331 FAN_CONFIG_INVERT(data->fan_conf, \
332 offset-1))); \
333} \
334static ssize_t set_pwm##offset(struct device *dev, struct device_attribute *attr, const char *buf, \
335 size_t count) \
336{ \
337 struct i2c_client *client = to_i2c_client(dev); \
338 struct pc87360_data *data = i2c_get_clientdata(client); \
339 long val = simple_strtol(buf, NULL, 10); \
340 \
341 down(&data->update_lock); \
342 data->pwm[offset-1] = PWM_TO_REG(val, \
343 FAN_CONFIG_INVERT(data->fan_conf, offset-1)); \
344 pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(offset-1), \
345 data->pwm[offset-1]); \
346 up(&data->update_lock); \
347 return count; \
348} \
349static DEVICE_ATTR(pwm##offset, S_IWUSR | S_IRUGO, \
350 show_pwm##offset, set_pwm##offset);
351show_and_set_pwm(1) 349show_and_set_pwm(1)
352show_and_set_pwm(2) 350show_and_set_pwm(2)
353show_and_set_pwm(3) 351show_and_set_pwm(3)
354 352
353static ssize_t show_in_input(struct device *dev, struct device_attribute *devattr, char *buf)
354{
355 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
356 struct pc87360_data *data = pc87360_update_device(dev);
357 return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
358 data->in_vref));
359}
360static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf)
361{
362 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
363 struct pc87360_data *data = pc87360_update_device(dev);
364 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
365 data->in_vref));
366}
367static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf)
368{
369 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
370 struct pc87360_data *data = pc87360_update_device(dev);
371 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
372 data->in_vref));
373}
374static ssize_t show_in_status(struct device *dev, struct device_attribute *devattr, char *buf)
375{
376 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
377 struct pc87360_data *data = pc87360_update_device(dev);
378 return sprintf(buf, "%u\n", data->in_status[attr->index]);
379}
380static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr, const char *buf,
381 size_t count)
382{
383 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
384 struct i2c_client *client = to_i2c_client(dev);
385 struct pc87360_data *data = i2c_get_clientdata(client);
386 long val = simple_strtol(buf, NULL, 10);
387
388 down(&data->update_lock);
389 data->in_min[attr->index] = IN_TO_REG(val, data->in_vref);
390 pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_IN_MIN,
391 data->in_min[attr->index]);
392 up(&data->update_lock);
393 return count;
394}
395static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr, const char *buf,
396 size_t count)
397{
398 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
399 struct i2c_client *client = to_i2c_client(dev);
400 struct pc87360_data *data = i2c_get_clientdata(client);
401 long val = simple_strtol(buf, NULL, 10);
402
403 down(&data->update_lock);
404 data->in_max[attr->index] = IN_TO_REG(val,
405 data->in_vref);
406 pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_IN_MAX,
407 data->in_max[attr->index]);
408 up(&data->update_lock);
409 return count;
410}
411
355#define show_and_set_in(offset) \ 412#define show_and_set_in(offset) \
356static ssize_t show_in##offset##_input(struct device *dev, struct device_attribute *attr, char *buf) \ 413static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
357{ \ 414 show_in_input, NULL, offset); \
358 struct pc87360_data *data = pc87360_update_device(dev); \ 415static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \
359 return sprintf(buf, "%u\n", IN_FROM_REG(data->in[offset], \ 416 show_in_min, set_in_min, offset); \
360 data->in_vref)); \ 417static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \
361} \ 418 show_in_max, set_in_max, offset); \
362static ssize_t show_in##offset##_min(struct device *dev, struct device_attribute *attr, char *buf) \ 419static SENSOR_DEVICE_ATTR(in##offset##_status, S_IRUGO, \
363{ \ 420 show_in_status, NULL, offset);
364 struct pc87360_data *data = pc87360_update_device(dev); \
365 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[offset], \
366 data->in_vref)); \
367} \
368static ssize_t show_in##offset##_max(struct device *dev, struct device_attribute *attr, char *buf) \
369{ \
370 struct pc87360_data *data = pc87360_update_device(dev); \
371 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[offset], \
372 data->in_vref)); \
373} \
374static ssize_t show_in##offset##_status(struct device *dev, struct device_attribute *attr, char *buf) \
375{ \
376 struct pc87360_data *data = pc87360_update_device(dev); \
377 return sprintf(buf, "%u\n", data->in_status[offset]); \
378} \
379static ssize_t set_in##offset##_min(struct device *dev, struct device_attribute *attr, const char *buf, \
380 size_t count) \
381{ \
382 struct i2c_client *client = to_i2c_client(dev); \
383 struct pc87360_data *data = i2c_get_clientdata(client); \
384 long val = simple_strtol(buf, NULL, 10); \
385 \
386 down(&data->update_lock); \
387 data->in_min[offset] = IN_TO_REG(val, data->in_vref); \
388 pc87360_write_value(data, LD_IN, offset, PC87365_REG_IN_MIN, \
389 data->in_min[offset]); \
390 up(&data->update_lock); \
391 return count; \
392} \
393static ssize_t set_in##offset##_max(struct device *dev, struct device_attribute *attr, const char *buf, \
394 size_t count) \
395{ \
396 struct i2c_client *client = to_i2c_client(dev); \
397 struct pc87360_data *data = i2c_get_clientdata(client); \
398 long val = simple_strtol(buf, NULL, 10); \
399 \
400 down(&data->update_lock); \
401 data->in_max[offset] = IN_TO_REG(val, \
402 data->in_vref); \
403 pc87360_write_value(data, LD_IN, offset, PC87365_REG_IN_MAX, \
404 data->in_max[offset]); \
405 up(&data->update_lock); \
406 return count; \
407} \
408static DEVICE_ATTR(in##offset##_input, S_IRUGO, \
409 show_in##offset##_input, NULL); \
410static DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \
411 show_in##offset##_min, set_in##offset##_min); \
412static DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \
413 show_in##offset##_max, set_in##offset##_max); \
414static DEVICE_ATTR(in##offset##_status, S_IRUGO, \
415 show_in##offset##_status, NULL);
416show_and_set_in(0) 421show_and_set_in(0)
417show_and_set_in(1) 422show_and_set_in(1)
418show_and_set_in(2) 423show_and_set_in(2)
@@ -425,88 +430,97 @@ show_and_set_in(8)
425show_and_set_in(9) 430show_and_set_in(9)
426show_and_set_in(10) 431show_and_set_in(10)
427 432
433static ssize_t show_therm_input(struct device *dev, struct device_attribute *devattr, char *buf)
434{
435 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
436 struct pc87360_data *data = pc87360_update_device(dev);
437 return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
438 data->in_vref));
439}
440static ssize_t show_therm_min(struct device *dev, struct device_attribute *devattr, char *buf)
441{
442 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
443 struct pc87360_data *data = pc87360_update_device(dev);
444 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
445 data->in_vref));
446}
447static ssize_t show_therm_max(struct device *dev, struct device_attribute *devattr, char *buf)
448{
449 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
450 struct pc87360_data *data = pc87360_update_device(dev);
451 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
452 data->in_vref));
453}
454static ssize_t show_therm_crit(struct device *dev, struct device_attribute *devattr, char *buf)
455{
456 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
457 struct pc87360_data *data = pc87360_update_device(dev);
458 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
459 data->in_vref));
460}
461static ssize_t show_therm_status(struct device *dev, struct device_attribute *devattr, char *buf)
462{
463 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
464 struct pc87360_data *data = pc87360_update_device(dev);
465 return sprintf(buf, "%u\n", data->in_status[attr->index]);
466}
467static ssize_t set_therm_min(struct device *dev, struct device_attribute *devattr, const char *buf,
468 size_t count)
469{
470 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
471 struct i2c_client *client = to_i2c_client(dev);
472 struct pc87360_data *data = i2c_get_clientdata(client);
473 long val = simple_strtol(buf, NULL, 10);
474
475 down(&data->update_lock);
476 data->in_min[attr->index] = IN_TO_REG(val, data->in_vref);
477 pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_MIN,
478 data->in_min[attr->index]);
479 up(&data->update_lock);
480 return count;
481}
482static ssize_t set_therm_max(struct device *dev, struct device_attribute *devattr, const char *buf,
483 size_t count)
484{
485 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
486 struct i2c_client *client = to_i2c_client(dev);
487 struct pc87360_data *data = i2c_get_clientdata(client);
488 long val = simple_strtol(buf, NULL, 10);
489
490 down(&data->update_lock);
491 data->in_max[attr->index] = IN_TO_REG(val, data->in_vref);
492 pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_MAX,
493 data->in_max[attr->index]);
494 up(&data->update_lock);
495 return count;
496}
497static ssize_t set_therm_crit(struct device *dev, struct device_attribute *devattr, const char *buf,
498 size_t count)
499{
500 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
501 struct i2c_client *client = to_i2c_client(dev);
502 struct pc87360_data *data = i2c_get_clientdata(client);
503 long val = simple_strtol(buf, NULL, 10);
504
505 down(&data->update_lock);
506 data->in_crit[attr->index-11] = IN_TO_REG(val, data->in_vref);
507 pc87360_write_value(data, LD_IN, attr->index, PC87365_REG_TEMP_CRIT,
508 data->in_crit[attr->index-11]);
509 up(&data->update_lock);
510 return count;
511}
512
428#define show_and_set_therm(offset) \ 513#define show_and_set_therm(offset) \
429static ssize_t show_temp##offset##_input(struct device *dev, struct device_attribute *attr, char *buf) \ 514static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
430{ \ 515 show_therm_input, NULL, 11+offset-4); \
431 struct pc87360_data *data = pc87360_update_device(dev); \ 516static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
432 return sprintf(buf, "%u\n", IN_FROM_REG(data->in[offset+7], \ 517 show_therm_min, set_therm_min, 11+offset-4); \
433 data->in_vref)); \ 518static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
434} \ 519 show_therm_max, set_therm_max, 11+offset-4); \
435static ssize_t show_temp##offset##_min(struct device *dev, struct device_attribute *attr, char *buf) \ 520static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \
436{ \ 521 show_therm_crit, set_therm_crit, 11+offset-4); \
437 struct pc87360_data *data = pc87360_update_device(dev); \ 522static SENSOR_DEVICE_ATTR(temp##offset##_status, S_IRUGO, \
438 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[offset+7], \ 523 show_therm_status, NULL, 11+offset-4);
439 data->in_vref)); \
440} \
441static ssize_t show_temp##offset##_max(struct device *dev, struct device_attribute *attr, char *buf) \
442{ \
443 struct pc87360_data *data = pc87360_update_device(dev); \
444 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[offset+7], \
445 data->in_vref)); \
446} \
447static ssize_t show_temp##offset##_crit(struct device *dev, struct device_attribute *attr, char *buf) \
448{ \
449 struct pc87360_data *data = pc87360_update_device(dev); \
450 return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[offset-4], \
451 data->in_vref)); \
452} \
453static ssize_t show_temp##offset##_status(struct device *dev, struct device_attribute *attr, char *buf) \
454{ \
455 struct pc87360_data *data = pc87360_update_device(dev); \
456 return sprintf(buf, "%u\n", data->in_status[offset+7]); \
457} \
458static ssize_t set_temp##offset##_min(struct device *dev, struct device_attribute *attr, const char *buf, \
459 size_t count) \
460{ \
461 struct i2c_client *client = to_i2c_client(dev); \
462 struct pc87360_data *data = i2c_get_clientdata(client); \
463 long val = simple_strtol(buf, NULL, 10); \
464 \
465 down(&data->update_lock); \
466 data->in_min[offset+7] = IN_TO_REG(val, data->in_vref); \
467 pc87360_write_value(data, LD_IN, offset+7, PC87365_REG_TEMP_MIN, \
468 data->in_min[offset+7]); \
469 up(&data->update_lock); \
470 return count; \
471} \
472static ssize_t set_temp##offset##_max(struct device *dev, struct device_attribute *attr, const char *buf, \
473 size_t count) \
474{ \
475 struct i2c_client *client = to_i2c_client(dev); \
476 struct pc87360_data *data = i2c_get_clientdata(client); \
477 long val = simple_strtol(buf, NULL, 10); \
478 \
479 down(&data->update_lock); \
480 data->in_max[offset+7] = IN_TO_REG(val, data->in_vref); \
481 pc87360_write_value(data, LD_IN, offset+7, PC87365_REG_TEMP_MAX, \
482 data->in_max[offset+7]); \
483 up(&data->update_lock); \
484 return count; \
485} \
486static ssize_t set_temp##offset##_crit(struct device *dev, struct device_attribute *attr, const char *buf, \
487 size_t count) \
488{ \
489 struct i2c_client *client = to_i2c_client(dev); \
490 struct pc87360_data *data = i2c_get_clientdata(client); \
491 long val = simple_strtol(buf, NULL, 10); \
492 \
493 down(&data->update_lock); \
494 data->in_crit[offset-4] = IN_TO_REG(val, data->in_vref); \
495 pc87360_write_value(data, LD_IN, offset+7, PC87365_REG_TEMP_CRIT, \
496 data->in_crit[offset-4]); \
497 up(&data->update_lock); \
498 return count; \
499} \
500static DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
501 show_temp##offset##_input, NULL); \
502static DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
503 show_temp##offset##_min, set_temp##offset##_min); \
504static DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
505 show_temp##offset##_max, set_temp##offset##_max); \
506static DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \
507 show_temp##offset##_crit, set_temp##offset##_crit); \
508static DEVICE_ATTR(temp##offset##_status, S_IRUGO, \
509 show_temp##offset##_status, NULL);
510show_and_set_therm(4) 524show_and_set_therm(4)
511show_and_set_therm(5) 525show_and_set_therm(5)
512show_and_set_therm(6) 526show_and_set_therm(6)
@@ -539,84 +553,93 @@ static ssize_t show_in_alarms(struct device *dev, struct device_attribute *attr,
539} 553}
540static DEVICE_ATTR(alarms_in, S_IRUGO, show_in_alarms, NULL); 554static DEVICE_ATTR(alarms_in, S_IRUGO, show_in_alarms, NULL);
541 555
556static ssize_t show_temp_input(struct device *dev, struct device_attribute *devattr, char *buf)
557{
558 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
559 struct pc87360_data *data = pc87360_update_device(dev);
560 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
561}
562static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf)
563{
564 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
565 struct pc87360_data *data = pc87360_update_device(dev);
566 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
567}
568static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf)
569{
570 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
571 struct pc87360_data *data = pc87360_update_device(dev);
572 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
573}
574static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf)
575{
576 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
577 struct pc87360_data *data = pc87360_update_device(dev);
578 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[attr->index]));
579}
580static ssize_t show_temp_status(struct device *dev, struct device_attribute *devattr, char *buf)
581{
582 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
583 struct pc87360_data *data = pc87360_update_device(dev);
584 return sprintf(buf, "%d\n", data->temp_status[attr->index]);
585}
586static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr, const char *buf,
587 size_t count)
588{
589 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
590 struct i2c_client *client = to_i2c_client(dev);
591 struct pc87360_data *data = i2c_get_clientdata(client);
592 long val = simple_strtol(buf, NULL, 10);
593
594 down(&data->update_lock);
595 data->temp_min[attr->index] = TEMP_TO_REG(val);
596 pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_MIN,
597 data->temp_min[attr->index]);
598 up(&data->update_lock);
599 return count;
600}
601static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf,
602 size_t count)
603{
604 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
605 struct i2c_client *client = to_i2c_client(dev);
606 struct pc87360_data *data = i2c_get_clientdata(client);
607 long val = simple_strtol(buf, NULL, 10);
608
609 down(&data->update_lock);
610 data->temp_max[attr->index] = TEMP_TO_REG(val);
611 pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_MAX,
612 data->temp_max[attr->index]);
613 up(&data->update_lock);
614 return count;
615}
616static ssize_t set_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf,
617 size_t count)
618{
619 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
620 struct i2c_client *client = to_i2c_client(dev);
621 struct pc87360_data *data = i2c_get_clientdata(client);
622 long val = simple_strtol(buf, NULL, 10);
623
624 down(&data->update_lock);
625 data->temp_crit[attr->index] = TEMP_TO_REG(val);
626 pc87360_write_value(data, LD_TEMP, attr->index, PC87365_REG_TEMP_CRIT,
627 data->temp_crit[attr->index]);
628 up(&data->update_lock);
629 return count;
630}
631
542#define show_and_set_temp(offset) \ 632#define show_and_set_temp(offset) \
543static ssize_t show_temp##offset##_input(struct device *dev, struct device_attribute *attr, char *buf) \ 633static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
544{ \ 634 show_temp_input, NULL, offset-1); \
545 struct pc87360_data *data = pc87360_update_device(dev); \ 635static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
546 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[offset-1])); \ 636 show_temp_min, set_temp_min, offset-1); \
547} \ 637static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
548static ssize_t show_temp##offset##_min(struct device *dev, struct device_attribute *attr, char *buf) \ 638 show_temp_max, set_temp_max, offset-1); \
549{ \ 639static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \
550 struct pc87360_data *data = pc87360_update_device(dev); \ 640 show_temp_crit, set_temp_crit, offset-1); \
551 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[offset-1])); \ 641static SENSOR_DEVICE_ATTR(temp##offset##_status, S_IRUGO, \
552} \ 642 show_temp_status, NULL, offset-1);
553static ssize_t show_temp##offset##_max(struct device *dev, struct device_attribute *attr, char *buf) \
554{ \
555 struct pc87360_data *data = pc87360_update_device(dev); \
556 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[offset-1])); \
557}\
558static ssize_t show_temp##offset##_crit(struct device *dev, struct device_attribute *attr, char *buf) \
559{ \
560 struct pc87360_data *data = pc87360_update_device(dev); \
561 return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[offset-1])); \
562}\
563static ssize_t show_temp##offset##_status(struct device *dev, struct device_attribute *attr, char *buf) \
564{ \
565 struct pc87360_data *data = pc87360_update_device(dev); \
566 return sprintf(buf, "%d\n", data->temp_status[offset-1]); \
567}\
568static ssize_t set_temp##offset##_min(struct device *dev, struct device_attribute *attr, const char *buf, \
569 size_t count) \
570{ \
571 struct i2c_client *client = to_i2c_client(dev); \
572 struct pc87360_data *data = i2c_get_clientdata(client); \
573 long val = simple_strtol(buf, NULL, 10); \
574 \
575 down(&data->update_lock); \
576 data->temp_min[offset-1] = TEMP_TO_REG(val); \
577 pc87360_write_value(data, LD_TEMP, offset-1, PC87365_REG_TEMP_MIN, \
578 data->temp_min[offset-1]); \
579 up(&data->update_lock); \
580 return count; \
581} \
582static ssize_t set_temp##offset##_max(struct device *dev, struct device_attribute *attr, const char *buf, \
583 size_t count) \
584{ \
585 struct i2c_client *client = to_i2c_client(dev); \
586 struct pc87360_data *data = i2c_get_clientdata(client); \
587 long val = simple_strtol(buf, NULL, 10); \
588 \
589 down(&data->update_lock); \
590 data->temp_max[offset-1] = TEMP_TO_REG(val); \
591 pc87360_write_value(data, LD_TEMP, offset-1, PC87365_REG_TEMP_MAX, \
592 data->temp_max[offset-1]); \
593 up(&data->update_lock); \
594 return count; \
595} \
596static ssize_t set_temp##offset##_crit(struct device *dev, struct device_attribute *attr, const char *buf, \
597 size_t count) \
598{ \
599 struct i2c_client *client = to_i2c_client(dev); \
600 struct pc87360_data *data = i2c_get_clientdata(client); \
601 long val = simple_strtol(buf, NULL, 10); \
602 \
603 down(&data->update_lock); \
604 data->temp_crit[offset-1] = TEMP_TO_REG(val); \
605 pc87360_write_value(data, LD_TEMP, offset-1, PC87365_REG_TEMP_CRIT, \
606 data->temp_crit[offset-1]); \
607 up(&data->update_lock); \
608 return count; \
609} \
610static DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
611 show_temp##offset##_input, NULL); \
612static DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
613 show_temp##offset##_min, set_temp##offset##_min); \
614static DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
615 show_temp##offset##_max, set_temp##offset##_max); \
616static DEVICE_ATTR(temp##offset##_crit, S_IWUSR | S_IRUGO, \
617 show_temp##offset##_crit, set_temp##offset##_crit); \
618static DEVICE_ATTR(temp##offset##_status, S_IRUGO, \
619 show_temp##offset##_status, NULL);
620show_and_set_temp(1) 643show_and_set_temp(1)
621show_and_set_temp(2) 644show_and_set_temp(2)
622show_and_set_temp(3) 645show_and_set_temp(3)
@@ -632,12 +655,7 @@ static DEVICE_ATTR(alarms_temp, S_IRUGO, show_temp_alarms, NULL);
632 * Device detection, registration and update 655 * Device detection, registration and update
633 */ 656 */
634 657
635static int pc87360_attach_adapter(struct i2c_adapter *adapter) 658static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses)
636{
637 return i2c_detect(adapter, &addr_data, pc87360_detect);
638}
639
640static int pc87360_find(int sioaddr, u8 *devid, int *address)
641{ 659{
642 u16 val; 660 u16 val;
643 int i; 661 int i;
@@ -683,7 +701,7 @@ static int pc87360_find(int sioaddr, u8 *devid, int *address)
683 continue; 701 continue;
684 } 702 }
685 703
686 address[i] = val; 704 addresses[i] = val;
687 705
688 if (i==0) { /* Fans */ 706 if (i==0) { /* Fans */
689 confreg[0] = superio_inb(sioaddr, 0xF0); 707 confreg[0] = superio_inb(sioaddr, 0xF0);
@@ -727,9 +745,7 @@ static int pc87360_find(int sioaddr, u8 *devid, int *address)
727 return 0; 745 return 0;
728} 746}
729 747
730/* We don't really care about the address. 748static int pc87360_detect(struct i2c_adapter *adapter)
731 Read from extra_isa instead. */
732int pc87360_detect(struct i2c_adapter *adapter, int address, int kind)
733{ 749{
734 int i; 750 int i;
735 struct i2c_client *new_client; 751 struct i2c_client *new_client;
@@ -738,9 +754,6 @@ int pc87360_detect(struct i2c_adapter *adapter, int address, int kind)
738 const char *name = "pc87360"; 754 const char *name = "pc87360";
739 int use_thermistors = 0; 755 int use_thermistors = 0;
740 756
741 if (!i2c_is_isa_adapter(adapter))
742 return -ENODEV;
743
744 if (!(data = kmalloc(sizeof(struct pc87360_data), GFP_KERNEL))) 757 if (!(data = kmalloc(sizeof(struct pc87360_data), GFP_KERNEL)))
745 return -ENOMEM; 758 return -ENOMEM;
746 memset(data, 0x00, sizeof(struct pc87360_data)); 759 memset(data, 0x00, sizeof(struct pc87360_data));
@@ -838,51 +851,57 @@ int pc87360_detect(struct i2c_adapter *adapter, int address, int kind)
838 } 851 }
839 852
840 /* Register sysfs hooks */ 853 /* Register sysfs hooks */
854 data->class_dev = hwmon_device_register(&new_client->dev);
855 if (IS_ERR(data->class_dev)) {
856 err = PTR_ERR(data->class_dev);
857 goto ERROR3;
858 }
859
841 if (data->innr) { 860 if (data->innr) {
842 device_create_file(&new_client->dev, &dev_attr_in0_input); 861 device_create_file(&new_client->dev, &sensor_dev_attr_in0_input.dev_attr);
843 device_create_file(&new_client->dev, &dev_attr_in1_input); 862 device_create_file(&new_client->dev, &sensor_dev_attr_in1_input.dev_attr);
844 device_create_file(&new_client->dev, &dev_attr_in2_input); 863 device_create_file(&new_client->dev, &sensor_dev_attr_in2_input.dev_attr);
845 device_create_file(&new_client->dev, &dev_attr_in3_input); 864 device_create_file(&new_client->dev, &sensor_dev_attr_in3_input.dev_attr);
846 device_create_file(&new_client->dev, &dev_attr_in4_input); 865 device_create_file(&new_client->dev, &sensor_dev_attr_in4_input.dev_attr);
847 device_create_file(&new_client->dev, &dev_attr_in5_input); 866 device_create_file(&new_client->dev, &sensor_dev_attr_in5_input.dev_attr);
848 device_create_file(&new_client->dev, &dev_attr_in6_input); 867 device_create_file(&new_client->dev, &sensor_dev_attr_in6_input.dev_attr);
849 device_create_file(&new_client->dev, &dev_attr_in7_input); 868 device_create_file(&new_client->dev, &sensor_dev_attr_in7_input.dev_attr);
850 device_create_file(&new_client->dev, &dev_attr_in8_input); 869 device_create_file(&new_client->dev, &sensor_dev_attr_in8_input.dev_attr);
851 device_create_file(&new_client->dev, &dev_attr_in9_input); 870 device_create_file(&new_client->dev, &sensor_dev_attr_in9_input.dev_attr);
852 device_create_file(&new_client->dev, &dev_attr_in10_input); 871 device_create_file(&new_client->dev, &sensor_dev_attr_in10_input.dev_attr);
853 device_create_file(&new_client->dev, &dev_attr_in0_min); 872 device_create_file(&new_client->dev, &sensor_dev_attr_in0_min.dev_attr);
854 device_create_file(&new_client->dev, &dev_attr_in1_min); 873 device_create_file(&new_client->dev, &sensor_dev_attr_in1_min.dev_attr);
855 device_create_file(&new_client->dev, &dev_attr_in2_min); 874 device_create_file(&new_client->dev, &sensor_dev_attr_in2_min.dev_attr);
856 device_create_file(&new_client->dev, &dev_attr_in3_min); 875 device_create_file(&new_client->dev, &sensor_dev_attr_in3_min.dev_attr);
857 device_create_file(&new_client->dev, &dev_attr_in4_min); 876 device_create_file(&new_client->dev, &sensor_dev_attr_in4_min.dev_attr);
858 device_create_file(&new_client->dev, &dev_attr_in5_min); 877 device_create_file(&new_client->dev, &sensor_dev_attr_in5_min.dev_attr);
859 device_create_file(&new_client->dev, &dev_attr_in6_min); 878 device_create_file(&new_client->dev, &sensor_dev_attr_in6_min.dev_attr);
860 device_create_file(&new_client->dev, &dev_attr_in7_min); 879 device_create_file(&new_client->dev, &sensor_dev_attr_in7_min.dev_attr);
861 device_create_file(&new_client->dev, &dev_attr_in8_min); 880 device_create_file(&new_client->dev, &sensor_dev_attr_in8_min.dev_attr);
862 device_create_file(&new_client->dev, &dev_attr_in9_min); 881 device_create_file(&new_client->dev, &sensor_dev_attr_in9_min.dev_attr);
863 device_create_file(&new_client->dev, &dev_attr_in10_min); 882 device_create_file(&new_client->dev, &sensor_dev_attr_in10_min.dev_attr);
864 device_create_file(&new_client->dev, &dev_attr_in0_max); 883 device_create_file(&new_client->dev, &sensor_dev_attr_in0_max.dev_attr);
865 device_create_file(&new_client->dev, &dev_attr_in1_max); 884 device_create_file(&new_client->dev, &sensor_dev_attr_in1_max.dev_attr);
866 device_create_file(&new_client->dev, &dev_attr_in2_max); 885 device_create_file(&new_client->dev, &sensor_dev_attr_in2_max.dev_attr);
867 device_create_file(&new_client->dev, &dev_attr_in3_max); 886 device_create_file(&new_client->dev, &sensor_dev_attr_in3_max.dev_attr);
868 device_create_file(&new_client->dev, &dev_attr_in4_max); 887 device_create_file(&new_client->dev, &sensor_dev_attr_in4_max.dev_attr);
869 device_create_file(&new_client->dev, &dev_attr_in5_max); 888 device_create_file(&new_client->dev, &sensor_dev_attr_in5_max.dev_attr);
870 device_create_file(&new_client->dev, &dev_attr_in6_max); 889 device_create_file(&new_client->dev, &sensor_dev_attr_in6_max.dev_attr);
871 device_create_file(&new_client->dev, &dev_attr_in7_max); 890 device_create_file(&new_client->dev, &sensor_dev_attr_in7_max.dev_attr);
872 device_create_file(&new_client->dev, &dev_attr_in8_max); 891 device_create_file(&new_client->dev, &sensor_dev_attr_in8_max.dev_attr);
873 device_create_file(&new_client->dev, &dev_attr_in9_max); 892 device_create_file(&new_client->dev, &sensor_dev_attr_in9_max.dev_attr);
874 device_create_file(&new_client->dev, &dev_attr_in10_max); 893 device_create_file(&new_client->dev, &sensor_dev_attr_in10_max.dev_attr);
875 device_create_file(&new_client->dev, &dev_attr_in0_status); 894 device_create_file(&new_client->dev, &sensor_dev_attr_in0_status.dev_attr);
876 device_create_file(&new_client->dev, &dev_attr_in1_status); 895 device_create_file(&new_client->dev, &sensor_dev_attr_in1_status.dev_attr);
877 device_create_file(&new_client->dev, &dev_attr_in2_status); 896 device_create_file(&new_client->dev, &sensor_dev_attr_in2_status.dev_attr);
878 device_create_file(&new_client->dev, &dev_attr_in3_status); 897 device_create_file(&new_client->dev, &sensor_dev_attr_in3_status.dev_attr);
879 device_create_file(&new_client->dev, &dev_attr_in4_status); 898 device_create_file(&new_client->dev, &sensor_dev_attr_in4_status.dev_attr);
880 device_create_file(&new_client->dev, &dev_attr_in5_status); 899 device_create_file(&new_client->dev, &sensor_dev_attr_in5_status.dev_attr);
881 device_create_file(&new_client->dev, &dev_attr_in6_status); 900 device_create_file(&new_client->dev, &sensor_dev_attr_in6_status.dev_attr);
882 device_create_file(&new_client->dev, &dev_attr_in7_status); 901 device_create_file(&new_client->dev, &sensor_dev_attr_in7_status.dev_attr);
883 device_create_file(&new_client->dev, &dev_attr_in8_status); 902 device_create_file(&new_client->dev, &sensor_dev_attr_in8_status.dev_attr);
884 device_create_file(&new_client->dev, &dev_attr_in9_status); 903 device_create_file(&new_client->dev, &sensor_dev_attr_in9_status.dev_attr);
885 device_create_file(&new_client->dev, &dev_attr_in10_status); 904 device_create_file(&new_client->dev, &sensor_dev_attr_in10_status.dev_attr);
886 905
887 device_create_file(&new_client->dev, &dev_attr_cpu0_vid); 906 device_create_file(&new_client->dev, &dev_attr_cpu0_vid);
888 device_create_file(&new_client->dev, &dev_attr_vrm); 907 device_create_file(&new_client->dev, &dev_attr_vrm);
@@ -890,90 +909,92 @@ int pc87360_detect(struct i2c_adapter *adapter, int address, int kind)
890 } 909 }
891 910
892 if (data->tempnr) { 911 if (data->tempnr) {
893 device_create_file(&new_client->dev, &dev_attr_temp1_input); 912 device_create_file(&new_client->dev, &sensor_dev_attr_temp1_input.dev_attr);
894 device_create_file(&new_client->dev, &dev_attr_temp2_input); 913 device_create_file(&new_client->dev, &sensor_dev_attr_temp2_input.dev_attr);
895 device_create_file(&new_client->dev, &dev_attr_temp1_min); 914 device_create_file(&new_client->dev, &sensor_dev_attr_temp1_min.dev_attr);
896 device_create_file(&new_client->dev, &dev_attr_temp2_min); 915 device_create_file(&new_client->dev, &sensor_dev_attr_temp2_min.dev_attr);
897 device_create_file(&new_client->dev, &dev_attr_temp1_max); 916 device_create_file(&new_client->dev, &sensor_dev_attr_temp1_max.dev_attr);
898 device_create_file(&new_client->dev, &dev_attr_temp2_max); 917 device_create_file(&new_client->dev, &sensor_dev_attr_temp2_max.dev_attr);
899 device_create_file(&new_client->dev, &dev_attr_temp1_crit); 918 device_create_file(&new_client->dev, &sensor_dev_attr_temp1_crit.dev_attr);
900 device_create_file(&new_client->dev, &dev_attr_temp2_crit); 919 device_create_file(&new_client->dev, &sensor_dev_attr_temp2_crit.dev_attr);
901 device_create_file(&new_client->dev, &dev_attr_temp1_status); 920 device_create_file(&new_client->dev, &sensor_dev_attr_temp1_status.dev_attr);
902 device_create_file(&new_client->dev, &dev_attr_temp2_status); 921 device_create_file(&new_client->dev, &sensor_dev_attr_temp2_status.dev_attr);
903 922
904 device_create_file(&new_client->dev, &dev_attr_alarms_temp); 923 device_create_file(&new_client->dev, &dev_attr_alarms_temp);
905 } 924 }
906 if (data->tempnr == 3) { 925 if (data->tempnr == 3) {
907 device_create_file(&new_client->dev, &dev_attr_temp3_input); 926 device_create_file(&new_client->dev, &sensor_dev_attr_temp3_input.dev_attr);
908 device_create_file(&new_client->dev, &dev_attr_temp3_min); 927 device_create_file(&new_client->dev, &sensor_dev_attr_temp3_min.dev_attr);
909 device_create_file(&new_client->dev, &dev_attr_temp3_max); 928 device_create_file(&new_client->dev, &sensor_dev_attr_temp3_max.dev_attr);
910 device_create_file(&new_client->dev, &dev_attr_temp3_crit); 929 device_create_file(&new_client->dev, &sensor_dev_attr_temp3_crit.dev_attr);
911 device_create_file(&new_client->dev, &dev_attr_temp3_status); 930 device_create_file(&new_client->dev, &sensor_dev_attr_temp3_status.dev_attr);
912 } 931 }
913 if (data->innr == 14) { 932 if (data->innr == 14) {
914 device_create_file(&new_client->dev, &dev_attr_temp4_input); 933 device_create_file(&new_client->dev, &sensor_dev_attr_temp4_input.dev_attr);
915 device_create_file(&new_client->dev, &dev_attr_temp5_input); 934 device_create_file(&new_client->dev, &sensor_dev_attr_temp5_input.dev_attr);
916 device_create_file(&new_client->dev, &dev_attr_temp6_input); 935 device_create_file(&new_client->dev, &sensor_dev_attr_temp6_input.dev_attr);
917 device_create_file(&new_client->dev, &dev_attr_temp4_min); 936 device_create_file(&new_client->dev, &sensor_dev_attr_temp4_min.dev_attr);
918 device_create_file(&new_client->dev, &dev_attr_temp5_min); 937 device_create_file(&new_client->dev, &sensor_dev_attr_temp5_min.dev_attr);
919 device_create_file(&new_client->dev, &dev_attr_temp6_min); 938 device_create_file(&new_client->dev, &sensor_dev_attr_temp6_min.dev_attr);
920 device_create_file(&new_client->dev, &dev_attr_temp4_max); 939 device_create_file(&new_client->dev, &sensor_dev_attr_temp4_max.dev_attr);
921 device_create_file(&new_client->dev, &dev_attr_temp5_max); 940 device_create_file(&new_client->dev, &sensor_dev_attr_temp5_max.dev_attr);
922 device_create_file(&new_client->dev, &dev_attr_temp6_max); 941 device_create_file(&new_client->dev, &sensor_dev_attr_temp6_max.dev_attr);
923 device_create_file(&new_client->dev, &dev_attr_temp4_crit); 942 device_create_file(&new_client->dev, &sensor_dev_attr_temp4_crit.dev_attr);
924 device_create_file(&new_client->dev, &dev_attr_temp5_crit); 943 device_create_file(&new_client->dev, &sensor_dev_attr_temp5_crit.dev_attr);
925 device_create_file(&new_client->dev, &dev_attr_temp6_crit); 944 device_create_file(&new_client->dev, &sensor_dev_attr_temp6_crit.dev_attr);
926 device_create_file(&new_client->dev, &dev_attr_temp4_status); 945 device_create_file(&new_client->dev, &sensor_dev_attr_temp4_status.dev_attr);
927 device_create_file(&new_client->dev, &dev_attr_temp5_status); 946 device_create_file(&new_client->dev, &sensor_dev_attr_temp5_status.dev_attr);
928 device_create_file(&new_client->dev, &dev_attr_temp6_status); 947 device_create_file(&new_client->dev, &sensor_dev_attr_temp6_status.dev_attr);
929 } 948 }
930 949
931 if (data->fannr) { 950 if (data->fannr) {
932 if (FAN_CONFIG_MONITOR(data->fan_conf, 0)) { 951 if (FAN_CONFIG_MONITOR(data->fan_conf, 0)) {
933 device_create_file(&new_client->dev, 952 device_create_file(&new_client->dev,
934 &dev_attr_fan1_input); 953 &sensor_dev_attr_fan1_input.dev_attr);
935 device_create_file(&new_client->dev, 954 device_create_file(&new_client->dev,
936 &dev_attr_fan1_min); 955 &sensor_dev_attr_fan1_min.dev_attr);
937 device_create_file(&new_client->dev, 956 device_create_file(&new_client->dev,
938 &dev_attr_fan1_div); 957 &sensor_dev_attr_fan1_div.dev_attr);
939 device_create_file(&new_client->dev, 958 device_create_file(&new_client->dev,
940 &dev_attr_fan1_status); 959 &sensor_dev_attr_fan1_status.dev_attr);
941 } 960 }
942 961
943 if (FAN_CONFIG_MONITOR(data->fan_conf, 1)) { 962 if (FAN_CONFIG_MONITOR(data->fan_conf, 1)) {
944 device_create_file(&new_client->dev, 963 device_create_file(&new_client->dev,
945 &dev_attr_fan2_input); 964 &sensor_dev_attr_fan2_input.dev_attr);
946 device_create_file(&new_client->dev, 965 device_create_file(&new_client->dev,
947 &dev_attr_fan2_min); 966 &sensor_dev_attr_fan2_min.dev_attr);
948 device_create_file(&new_client->dev, 967 device_create_file(&new_client->dev,
949 &dev_attr_fan2_div); 968 &sensor_dev_attr_fan2_div.dev_attr);
950 device_create_file(&new_client->dev, 969 device_create_file(&new_client->dev,
951 &dev_attr_fan2_status); 970 &sensor_dev_attr_fan2_status.dev_attr);
952 } 971 }
953 972
954 if (FAN_CONFIG_CONTROL(data->fan_conf, 0)) 973 if (FAN_CONFIG_CONTROL(data->fan_conf, 0))
955 device_create_file(&new_client->dev, &dev_attr_pwm1); 974 device_create_file(&new_client->dev, &sensor_dev_attr_pwm1.dev_attr);
956 if (FAN_CONFIG_CONTROL(data->fan_conf, 1)) 975 if (FAN_CONFIG_CONTROL(data->fan_conf, 1))
957 device_create_file(&new_client->dev, &dev_attr_pwm2); 976 device_create_file(&new_client->dev, &sensor_dev_attr_pwm2.dev_attr);
958 } 977 }
959 if (data->fannr == 3) { 978 if (data->fannr == 3) {
960 if (FAN_CONFIG_MONITOR(data->fan_conf, 2)) { 979 if (FAN_CONFIG_MONITOR(data->fan_conf, 2)) {
961 device_create_file(&new_client->dev, 980 device_create_file(&new_client->dev,
962 &dev_attr_fan3_input); 981 &sensor_dev_attr_fan3_input.dev_attr);
963 device_create_file(&new_client->dev, 982 device_create_file(&new_client->dev,
964 &dev_attr_fan3_min); 983 &sensor_dev_attr_fan3_min.dev_attr);
965 device_create_file(&new_client->dev, 984 device_create_file(&new_client->dev,
966 &dev_attr_fan3_div); 985 &sensor_dev_attr_fan3_div.dev_attr);
967 device_create_file(&new_client->dev, 986 device_create_file(&new_client->dev,
968 &dev_attr_fan3_status); 987 &sensor_dev_attr_fan3_status.dev_attr);
969 } 988 }
970 989
971 if (FAN_CONFIG_CONTROL(data->fan_conf, 2)) 990 if (FAN_CONFIG_CONTROL(data->fan_conf, 2))
972 device_create_file(&new_client->dev, &dev_attr_pwm3); 991 device_create_file(&new_client->dev, &sensor_dev_attr_pwm3.dev_attr);
973 } 992 }
974 993
975 return 0; 994 return 0;
976 995
996ERROR3:
997 i2c_detach_client(new_client);
977ERROR2: 998ERROR2:
978 for (i = 0; i < 3; i++) { 999 for (i = 0; i < 3; i++) {
979 if (data->address[i]) { 1000 if (data->address[i]) {
@@ -990,11 +1011,10 @@ static int pc87360_detach_client(struct i2c_client *client)
990 struct pc87360_data *data = i2c_get_clientdata(client); 1011 struct pc87360_data *data = i2c_get_clientdata(client);
991 int i; 1012 int i;
992 1013
993 if ((i = i2c_detach_client(client))) { 1014 hwmon_device_unregister(data->class_dev);
994 dev_err(&client->dev, "Client deregistration failed, " 1015
995 "client not detached.\n"); 1016 if ((i = i2c_detach_client(client)))
996 return i; 1017 return i;
997 }
998 1018
999 for (i = 0; i < 3; i++) { 1019 for (i = 0; i < 3; i++) {
1000 if (data->address[i]) { 1020 if (data->address[i]) {
@@ -1320,23 +1340,23 @@ static int __init pc87360_init(void)
1320 /* Arbitrarily pick one of the addresses */ 1340 /* Arbitrarily pick one of the addresses */
1321 for (i = 0; i < 3; i++) { 1341 for (i = 0; i < 3; i++) {
1322 if (extra_isa[i] != 0x0000) { 1342 if (extra_isa[i] != 0x0000) {
1323 normal_isa[0] = extra_isa[i]; 1343 address = extra_isa[i];
1324 break; 1344 break;
1325 } 1345 }
1326 } 1346 }
1327 1347
1328 if (normal_isa[0] == 0x0000) { 1348 if (address == 0x0000) {
1329 printk(KERN_WARNING "pc87360: No active logical device, " 1349 printk(KERN_WARNING "pc87360: No active logical device, "
1330 "module not inserted.\n"); 1350 "module not inserted.\n");
1331 return -ENODEV; 1351 return -ENODEV;
1332 } 1352 }
1333 1353
1334 return i2c_add_driver(&pc87360_driver); 1354 return i2c_isa_add_driver(&pc87360_driver);
1335} 1355}
1336 1356
1337static void __exit pc87360_exit(void) 1357static void __exit pc87360_exit(void)
1338{ 1358{
1339 i2c_del_driver(&pc87360_driver); 1359 i2c_isa_del_driver(&pc87360_driver);
1340} 1360}
1341 1361
1342 1362
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6bbfc8fb4f13..8610bce08244 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -55,7 +55,9 @@
55#include <linux/ioport.h> 55#include <linux/ioport.h>
56#include <linux/pci.h> 56#include <linux/pci.h>
57#include <linux/i2c.h> 57#include <linux/i2c.h>
58#include <linux/i2c-sensor.h> 58#include <linux/i2c-isa.h>
59#include <linux/hwmon.h>
60#include <linux/err.h>
59#include <linux/init.h> 61#include <linux/init.h>
60#include <linux/jiffies.h> 62#include <linux/jiffies.h>
61#include <asm/io.h> 63#include <asm/io.h>
@@ -68,14 +70,10 @@ module_param(force_addr, ushort, 0);
68MODULE_PARM_DESC(force_addr, 70MODULE_PARM_DESC(force_addr,
69 "Initialize the base address of the sensors"); 71 "Initialize the base address of the sensors");
70 72
71/* Addresses to scan. 73/* Device address
72 Note that we can't determine the ISA address until we have initialized 74 Note that we can't determine the ISA address until we have initialized
73 our module */ 75 our module */
74static unsigned short normal_i2c[] = { I2C_CLIENT_END }; 76static unsigned short address;
75static unsigned int normal_isa[] = { 0x0000, I2C_CLIENT_ISA_END };
76
77/* Insmod parameters */
78SENSORS_INSMOD_1(sis5595);
79 77
80/* Many SIS5595 constants specified below */ 78/* Many SIS5595 constants specified below */
81 79
@@ -168,6 +166,7 @@ static inline u8 DIV_TO_REG(int val)
168 allocated. */ 166 allocated. */
169struct sis5595_data { 167struct sis5595_data {
170 struct i2c_client client; 168 struct i2c_client client;
169 struct class_device *class_dev;
171 struct semaphore lock; 170 struct semaphore lock;
172 171
173 struct semaphore update_lock; 172 struct semaphore update_lock;
@@ -190,8 +189,7 @@ struct sis5595_data {
190 189
191static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */ 190static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */
192 191
193static int sis5595_attach_adapter(struct i2c_adapter *adapter); 192static int sis5595_detect(struct i2c_adapter *adapter);
194static int sis5595_detect(struct i2c_adapter *adapter, int address, int kind);
195static int sis5595_detach_client(struct i2c_client *client); 193static int sis5595_detach_client(struct i2c_client *client);
196 194
197static int sis5595_read_value(struct i2c_client *client, u8 register); 195static int sis5595_read_value(struct i2c_client *client, u8 register);
@@ -202,9 +200,7 @@ static void sis5595_init_client(struct i2c_client *client);
202static struct i2c_driver sis5595_driver = { 200static struct i2c_driver sis5595_driver = {
203 .owner = THIS_MODULE, 201 .owner = THIS_MODULE,
204 .name = "sis5595", 202 .name = "sis5595",
205 .id = I2C_DRIVERID_SIS5595, 203 .attach_adapter = sis5595_detect,
206 .flags = I2C_DF_NOTIFY,
207 .attach_adapter = sis5595_attach_adapter,
208 .detach_client = sis5595_detach_client, 204 .detach_client = sis5595_detach_client,
209}; 205};
210 206
@@ -476,14 +472,7 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
476static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 472static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
477 473
478/* This is called when the module is loaded */ 474/* This is called when the module is loaded */
479static int sis5595_attach_adapter(struct i2c_adapter *adapter) 475static int sis5595_detect(struct i2c_adapter *adapter)
480{
481 if (!(adapter->class & I2C_CLASS_HWMON))
482 return 0;
483 return i2c_detect(adapter, &addr_data, sis5595_detect);
484}
485
486int sis5595_detect(struct i2c_adapter *adapter, int address, int kind)
487{ 476{
488 int err = 0; 477 int err = 0;
489 int i; 478 int i;
@@ -492,10 +481,6 @@ int sis5595_detect(struct i2c_adapter *adapter, int address, int kind)
492 char val; 481 char val;
493 u16 a; 482 u16 a;
494 483
495 /* Make sure we are probing the ISA bus!! */
496 if (!i2c_is_isa_adapter(adapter))
497 goto exit;
498
499 if (force_addr) 484 if (force_addr)
500 address = force_addr & ~(SIS5595_EXTENT - 1); 485 address = force_addr & ~(SIS5595_EXTENT - 1);
501 /* Reserve the ISA region */ 486 /* Reserve the ISA region */
@@ -578,6 +563,12 @@ int sis5595_detect(struct i2c_adapter *adapter, int address, int kind)
578 } 563 }
579 564
580 /* Register sysfs hooks */ 565 /* Register sysfs hooks */
566 data->class_dev = hwmon_device_register(&new_client->dev);
567 if (IS_ERR(data->class_dev)) {
568 err = PTR_ERR(data->class_dev);
569 goto exit_detach;
570 }
571
581 device_create_file(&new_client->dev, &dev_attr_in0_input); 572 device_create_file(&new_client->dev, &dev_attr_in0_input);
582 device_create_file(&new_client->dev, &dev_attr_in0_min); 573 device_create_file(&new_client->dev, &dev_attr_in0_min);
583 device_create_file(&new_client->dev, &dev_attr_in0_max); 574 device_create_file(&new_client->dev, &dev_attr_in0_max);
@@ -608,7 +599,9 @@ int sis5595_detect(struct i2c_adapter *adapter, int address, int kind)
608 device_create_file(&new_client->dev, &dev_attr_temp1_max_hyst); 599 device_create_file(&new_client->dev, &dev_attr_temp1_max_hyst);
609 } 600 }
610 return 0; 601 return 0;
611 602
603exit_detach:
604 i2c_detach_client(new_client);
612exit_free: 605exit_free:
613 kfree(data); 606 kfree(data);
614exit_release: 607exit_release:
@@ -619,18 +612,17 @@ exit:
619 612
620static int sis5595_detach_client(struct i2c_client *client) 613static int sis5595_detach_client(struct i2c_client *client)
621{ 614{
615 struct sis5595_data *data = i2c_get_clientdata(client);
622 int err; 616 int err;
623 617
624 if ((err = i2c_detach_client(client))) { 618 hwmon_device_unregister(data->class_dev);
625 dev_err(&client->dev, 619
626 "Client deregistration failed, client not detached.\n"); 620 if ((err = i2c_detach_client(client)))
627 return err; 621 return err;
628 }
629 622
630 if (i2c_is_isa_client(client)) 623 release_region(client->addr, SIS5595_EXTENT);
631 release_region(client->addr, SIS5595_EXTENT);
632 624
633 kfree(i2c_get_clientdata(client)); 625 kfree(data);
634 626
635 return 0; 627 return 0;
636} 628}
@@ -745,7 +737,6 @@ static int __devinit sis5595_pci_probe(struct pci_dev *dev,
745{ 737{
746 u16 val; 738 u16 val;
747 int *i; 739 int *i;
748 int addr = 0;
749 740
750 for (i = blacklist; *i != 0; i++) { 741 for (i = blacklist; *i != 0; i++) {
751 struct pci_dev *dev; 742 struct pci_dev *dev;
@@ -761,22 +752,19 @@ static int __devinit sis5595_pci_probe(struct pci_dev *dev,
761 pci_read_config_word(dev, SIS5595_BASE_REG, &val)) 752 pci_read_config_word(dev, SIS5595_BASE_REG, &val))
762 return -ENODEV; 753 return -ENODEV;
763 754
764 addr = val & ~(SIS5595_EXTENT - 1); 755 address = val & ~(SIS5595_EXTENT - 1);
765 if (addr == 0 && force_addr == 0) { 756 if (address == 0 && force_addr == 0) {
766 dev_err(&dev->dev, "Base address not set - upgrade BIOS or use force_addr=0xaddr\n"); 757 dev_err(&dev->dev, "Base address not set - upgrade BIOS or use force_addr=0xaddr\n");
767 return -ENODEV; 758 return -ENODEV;
768 } 759 }
769 if (force_addr)
770 addr = force_addr; /* so detect will get called */
771 760
772 if (!addr) { 761 if (!address) {
773 dev_err(&dev->dev,"No SiS 5595 sensors found.\n"); 762 dev_err(&dev->dev,"No SiS 5595 sensors found.\n");
774 return -ENODEV; 763 return -ENODEV;
775 } 764 }
776 normal_isa[0] = addr;
777 765
778 s_bridge = pci_dev_get(dev); 766 s_bridge = pci_dev_get(dev);
779 if (i2c_add_driver(&sis5595_driver)) { 767 if (i2c_isa_add_driver(&sis5595_driver)) {
780 pci_dev_put(s_bridge); 768 pci_dev_put(s_bridge);
781 s_bridge = NULL; 769 s_bridge = NULL;
782 } 770 }
@@ -803,7 +791,7 @@ static void __exit sm_sis5595_exit(void)
803{ 791{
804 pci_unregister_driver(&sis5595_pci_driver); 792 pci_unregister_driver(&sis5595_pci_driver);
805 if (s_bridge != NULL) { 793 if (s_bridge != NULL) {
806 i2c_del_driver(&sis5595_driver); 794 i2c_isa_del_driver(&sis5595_driver);
807 pci_dev_put(s_bridge); 795 pci_dev_put(s_bridge);
808 s_bridge = NULL; 796 s_bridge = NULL;
809 } 797 }
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index fdeeb3ab6f2f..7fe71576dea4 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -31,23 +31,14 @@
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/i2c-sensor.h> 34#include <linux/i2c-isa.h>
35#include <linux/hwmon.h>
36#include <linux/err.h>
35#include <linux/init.h> 37#include <linux/init.h>
36#include <asm/io.h> 38#include <asm/io.h>
37 39
38static unsigned short normal_i2c[] = { I2C_CLIENT_END };
39/* Address is autodetected, there is no default value */ 40/* Address is autodetected, there is no default value */
40static unsigned int normal_isa[] = { 0x0000, I2C_CLIENT_ISA_END }; 41static unsigned short address;
41static struct i2c_force_data forces[] = {{NULL}};
42
43enum chips { any_chip, smsc47b397 };
44static struct i2c_address_data addr_data = {
45 .normal_i2c = normal_i2c,
46 .normal_isa = normal_isa,
47 .probe = normal_i2c, /* cheat */
48 .ignore = normal_i2c, /* cheat */
49 .forces = forces,
50};
51 42
52/* Super-I/0 registers and commands */ 43/* Super-I/0 registers and commands */
53 44
@@ -100,6 +91,7 @@ static u8 smsc47b397_reg_temp[] = {0x25, 0x26, 0x27, 0x80};
100 91
101struct smsc47b397_data { 92struct smsc47b397_data {
102 struct i2c_client client; 93 struct i2c_client client;
94 struct class_device *class_dev;
103 struct semaphore lock; 95 struct semaphore lock;
104 96
105 struct semaphore update_lock; 97 struct semaphore update_lock;
@@ -215,52 +207,40 @@ sysfs_fan(4);
215#define device_create_file_fan(client, num) \ 207#define device_create_file_fan(client, num) \
216 device_create_file(&client->dev, &dev_attr_fan##num##_input) 208 device_create_file(&client->dev, &dev_attr_fan##num##_input)
217 209
218static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind);
219
220static int smsc47b397_attach_adapter(struct i2c_adapter *adapter)
221{
222 if (!(adapter->class & I2C_CLASS_HWMON))
223 return 0;
224 return i2c_detect(adapter, &addr_data, smsc47b397_detect);
225}
226
227static int smsc47b397_detach_client(struct i2c_client *client) 210static int smsc47b397_detach_client(struct i2c_client *client)
228{ 211{
212 struct smsc47b397_data *data = i2c_get_clientdata(client);
229 int err; 213 int err;
230 214
231 if ((err = i2c_detach_client(client))) { 215 hwmon_device_unregister(data->class_dev);
232 dev_err(&client->dev, "Client deregistration failed, " 216
233 "client not detached.\n"); 217 if ((err = i2c_detach_client(client)))
234 return err; 218 return err;
235 }
236 219
237 release_region(client->addr, SMSC_EXTENT); 220 release_region(client->addr, SMSC_EXTENT);
238 kfree(i2c_get_clientdata(client)); 221 kfree(data);
239 222
240 return 0; 223 return 0;
241} 224}
242 225
226static int smsc47b397_detect(struct i2c_adapter *adapter);
227
243static struct i2c_driver smsc47b397_driver = { 228static struct i2c_driver smsc47b397_driver = {
244 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
245 .name = "smsc47b397", 230 .name = "smsc47b397",
246 .id = I2C_DRIVERID_SMSC47B397, 231 .attach_adapter = smsc47b397_detect,
247 .flags = I2C_DF_NOTIFY,
248 .attach_adapter = smsc47b397_attach_adapter,
249 .detach_client = smsc47b397_detach_client, 232 .detach_client = smsc47b397_detach_client,
250}; 233};
251 234
252static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind) 235static int smsc47b397_detect(struct i2c_adapter *adapter)
253{ 236{
254 struct i2c_client *new_client; 237 struct i2c_client *new_client;
255 struct smsc47b397_data *data; 238 struct smsc47b397_data *data;
256 int err = 0; 239 int err = 0;
257 240
258 if (!i2c_is_isa_adapter(adapter)) { 241 if (!request_region(address, SMSC_EXTENT, smsc47b397_driver.name)) {
259 return 0; 242 dev_err(&adapter->dev, "Region 0x%x already in use!\n",
260 } 243 address);
261
262 if (!request_region(addr, SMSC_EXTENT, smsc47b397_driver.name)) {
263 dev_err(&adapter->dev, "Region 0x%x already in use!\n", addr);
264 return -EBUSY; 244 return -EBUSY;
265 } 245 }
266 246
@@ -272,7 +252,7 @@ static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind)
272 252
273 new_client = &data->client; 253 new_client = &data->client;
274 i2c_set_clientdata(new_client, data); 254 i2c_set_clientdata(new_client, data);
275 new_client->addr = addr; 255 new_client->addr = address;
276 init_MUTEX(&data->lock); 256 init_MUTEX(&data->lock);
277 new_client->adapter = adapter; 257 new_client->adapter = adapter;
278 new_client->driver = &smsc47b397_driver; 258 new_client->driver = &smsc47b397_driver;
@@ -285,6 +265,12 @@ static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind)
285 if ((err = i2c_attach_client(new_client))) 265 if ((err = i2c_attach_client(new_client)))
286 goto error_free; 266 goto error_free;
287 267
268 data->class_dev = hwmon_device_register(&new_client->dev);
269 if (IS_ERR(data->class_dev)) {
270 err = PTR_ERR(data->class_dev);
271 goto error_detach;
272 }
273
288 device_create_file_temp(new_client, 1); 274 device_create_file_temp(new_client, 1);
289 device_create_file_temp(new_client, 2); 275 device_create_file_temp(new_client, 2);
290 device_create_file_temp(new_client, 3); 276 device_create_file_temp(new_client, 3);
@@ -297,14 +283,16 @@ static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind)
297 283
298 return 0; 284 return 0;
299 285
286error_detach:
287 i2c_detach_client(new_client);
300error_free: 288error_free:
301 kfree(data); 289 kfree(data);
302error_release: 290error_release:
303 release_region(addr, SMSC_EXTENT); 291 release_region(address, SMSC_EXTENT);
304 return err; 292 return err;
305} 293}
306 294
307static int __init smsc47b397_find(unsigned int *addr) 295static int __init smsc47b397_find(unsigned short *addr)
308{ 296{
309 u8 id, rev; 297 u8 id, rev;
310 298
@@ -333,15 +321,15 @@ static int __init smsc47b397_init(void)
333{ 321{
334 int ret; 322 int ret;
335 323
336 if ((ret = smsc47b397_find(normal_isa))) 324 if ((ret = smsc47b397_find(&address)))
337 return ret; 325 return ret;
338 326
339 return i2c_add_driver(&smsc47b397_driver); 327 return i2c_isa_add_driver(&smsc47b397_driver);
340} 328}
341 329
342static void __exit smsc47b397_exit(void) 330static void __exit smsc47b397_exit(void)
343{ 331{
344 i2c_del_driver(&smsc47b397_driver); 332 i2c_isa_del_driver(&smsc47b397_driver);
345} 333}
346 334
347MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); 335MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 7166ad0b2fda..7e699a8ede26 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -30,21 +30,14 @@
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/jiffies.h> 31#include <linux/jiffies.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/i2c-sensor.h> 33#include <linux/i2c-isa.h>
34#include <linux/hwmon.h>
35#include <linux/err.h>
34#include <linux/init.h> 36#include <linux/init.h>
35#include <asm/io.h> 37#include <asm/io.h>
36 38
37static unsigned short normal_i2c[] = { I2C_CLIENT_END };
38/* Address is autodetected, there is no default value */ 39/* Address is autodetected, there is no default value */
39static unsigned int normal_isa[] = { 0x0000, I2C_CLIENT_ISA_END }; 40static unsigned short address;
40static struct i2c_force_data forces[] = {{NULL}};
41
42enum chips { any_chip, smsc47m1 };
43static struct i2c_address_data addr_data = {
44 .normal_i2c = normal_i2c,
45 .normal_isa = normal_isa,
46 .forces = forces,
47};
48 41
49/* Super-I/0 registers and commands */ 42/* Super-I/0 registers and commands */
50 43
@@ -108,6 +101,7 @@ superio_exit(void)
108 101
109struct smsc47m1_data { 102struct smsc47m1_data {
110 struct i2c_client client; 103 struct i2c_client client;
104 struct class_device *class_dev;
111 struct semaphore lock; 105 struct semaphore lock;
112 106
113 struct semaphore update_lock; 107 struct semaphore update_lock;
@@ -121,9 +115,7 @@ struct smsc47m1_data {
121}; 115};
122 116
123 117
124static int smsc47m1_attach_adapter(struct i2c_adapter *adapter); 118static int smsc47m1_detect(struct i2c_adapter *adapter);
125static int smsc47m1_find(int *address);
126static int smsc47m1_detect(struct i2c_adapter *adapter, int address, int kind);
127static int smsc47m1_detach_client(struct i2c_client *client); 119static int smsc47m1_detach_client(struct i2c_client *client);
128 120
129static int smsc47m1_read_value(struct i2c_client *client, u8 reg); 121static int smsc47m1_read_value(struct i2c_client *client, u8 reg);
@@ -136,9 +128,7 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
136static struct i2c_driver smsc47m1_driver = { 128static struct i2c_driver smsc47m1_driver = {
137 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
138 .name = "smsc47m1", 130 .name = "smsc47m1",
139 .id = I2C_DRIVERID_SMSC47M1, 131 .attach_adapter = smsc47m1_detect,
140 .flags = I2C_DF_NOTIFY,
141 .attach_adapter = smsc47m1_attach_adapter,
142 .detach_client = smsc47m1_detach_client, 132 .detach_client = smsc47m1_detach_client,
143}; 133};
144 134
@@ -354,14 +344,7 @@ fan_present(2);
354 344
355static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL); 345static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
356 346
357static int smsc47m1_attach_adapter(struct i2c_adapter *adapter) 347static int __init smsc47m1_find(unsigned short *addr)
358{
359 if (!(adapter->class & I2C_CLASS_HWMON))
360 return 0;
361 return i2c_detect(adapter, &addr_data, smsc47m1_detect);
362}
363
364static int smsc47m1_find(int *address)
365{ 348{
366 u8 val; 349 u8 val;
367 350
@@ -388,10 +371,10 @@ static int smsc47m1_find(int *address)
388 } 371 }
389 372
390 superio_select(); 373 superio_select();
391 *address = (superio_inb(SUPERIO_REG_BASE) << 8) 374 *addr = (superio_inb(SUPERIO_REG_BASE) << 8)
392 | superio_inb(SUPERIO_REG_BASE + 1); 375 | superio_inb(SUPERIO_REG_BASE + 1);
393 val = superio_inb(SUPERIO_REG_ACT); 376 val = superio_inb(SUPERIO_REG_ACT);
394 if (*address == 0 || (val & 0x01) == 0) { 377 if (*addr == 0 || (val & 0x01) == 0) {
395 printk(KERN_INFO "smsc47m1: Device is disabled, will not use\n"); 378 printk(KERN_INFO "smsc47m1: Device is disabled, will not use\n");
396 superio_exit(); 379 superio_exit();
397 return -ENODEV; 380 return -ENODEV;
@@ -401,17 +384,13 @@ static int smsc47m1_find(int *address)
401 return 0; 384 return 0;
402} 385}
403 386
404static int smsc47m1_detect(struct i2c_adapter *adapter, int address, int kind) 387static int smsc47m1_detect(struct i2c_adapter *adapter)
405{ 388{
406 struct i2c_client *new_client; 389 struct i2c_client *new_client;
407 struct smsc47m1_data *data; 390 struct smsc47m1_data *data;
408 int err = 0; 391 int err = 0;
409 int fan1, fan2, pwm1, pwm2; 392 int fan1, fan2, pwm1, pwm2;
410 393
411 if (!i2c_is_isa_adapter(adapter)) {
412 return 0;
413 }
414
415 if (!request_region(address, SMSC_EXTENT, smsc47m1_driver.name)) { 394 if (!request_region(address, SMSC_EXTENT, smsc47m1_driver.name)) {
416 dev_err(&adapter->dev, "Region 0x%x already in use!\n", address); 395 dev_err(&adapter->dev, "Region 0x%x already in use!\n", address);
417 return -EBUSY; 396 return -EBUSY;
@@ -461,6 +440,13 @@ static int smsc47m1_detect(struct i2c_adapter *adapter, int address, int kind)
461 function. */ 440 function. */
462 smsc47m1_update_device(&new_client->dev, 1); 441 smsc47m1_update_device(&new_client->dev, 1);
463 442
443 /* Register sysfs hooks */
444 data->class_dev = hwmon_device_register(&new_client->dev);
445 if (IS_ERR(data->class_dev)) {
446 err = PTR_ERR(data->class_dev);
447 goto error_detach;
448 }
449
464 if (fan1) { 450 if (fan1) {
465 device_create_file(&new_client->dev, &dev_attr_fan1_input); 451 device_create_file(&new_client->dev, &dev_attr_fan1_input);
466 device_create_file(&new_client->dev, &dev_attr_fan1_min); 452 device_create_file(&new_client->dev, &dev_attr_fan1_min);
@@ -494,6 +480,8 @@ static int smsc47m1_detect(struct i2c_adapter *adapter, int address, int kind)
494 480
495 return 0; 481 return 0;
496 482
483error_detach:
484 i2c_detach_client(new_client);
497error_free: 485error_free:
498 kfree(data); 486 kfree(data);
499error_release: 487error_release:
@@ -503,16 +491,16 @@ error_release:
503 491
504static int smsc47m1_detach_client(struct i2c_client *client) 492static int smsc47m1_detach_client(struct i2c_client *client)
505{ 493{
494 struct smsc47m1_data *data = i2c_get_clientdata(client);
506 int err; 495 int err;
507 496
508 if ((err = i2c_detach_client(client))) { 497 hwmon_device_unregister(data->class_dev);
509 dev_err(&client->dev, "Client deregistration failed, " 498
510 "client not detached.\n"); 499 if ((err = i2c_detach_client(client)))
511 return err; 500 return err;
512 }
513 501
514 release_region(client->addr, SMSC_EXTENT); 502 release_region(client->addr, SMSC_EXTENT);
515 kfree(i2c_get_clientdata(client)); 503 kfree(data);
516 504
517 return 0; 505 return 0;
518} 506}
@@ -573,16 +561,16 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
573 561
574static int __init sm_smsc47m1_init(void) 562static int __init sm_smsc47m1_init(void)
575{ 563{
576 if (smsc47m1_find(normal_isa)) { 564 if (smsc47m1_find(&address)) {
577 return -ENODEV; 565 return -ENODEV;
578 } 566 }
579 567
580 return i2c_add_driver(&smsc47m1_driver); 568 return i2c_isa_add_driver(&smsc47m1_driver);
581} 569}
582 570
583static void __exit sm_smsc47m1_exit(void) 571static void __exit sm_smsc47m1_exit(void)
584{ 572{
585 i2c_del_driver(&smsc47m1_driver); 573 i2c_isa_del_driver(&smsc47m1_driver);
586} 574}
587 575
588MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); 576MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 164d47948390..eb84997627c8 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -35,7 +35,9 @@
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/jiffies.h> 36#include <linux/jiffies.h>
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <linux/i2c-sensor.h> 38#include <linux/i2c-isa.h>
39#include <linux/hwmon.h>
40#include <linux/err.h>
39#include <linux/init.h> 41#include <linux/init.h>
40#include <asm/io.h> 42#include <asm/io.h>
41 43
@@ -47,14 +49,10 @@ module_param(force_addr, ushort, 0);
47MODULE_PARM_DESC(force_addr, 49MODULE_PARM_DESC(force_addr,
48 "Initialize the base address of the sensors"); 50 "Initialize the base address of the sensors");
49 51
50/* Addresses to scan. 52/* Device address
51 Note that we can't determine the ISA address until we have initialized 53 Note that we can't determine the ISA address until we have initialized
52 our module */ 54 our module */
53static unsigned short normal_i2c[] = { I2C_CLIENT_END }; 55static unsigned short address;
54static unsigned int normal_isa[] = { 0x0000, I2C_CLIENT_ISA_END };
55
56/* Insmod parameters */
57SENSORS_INSMOD_1(via686a);
58 56
59/* 57/*
60 The Via 686a southbridge has a LM78-like chip integrated on the same IC. 58 The Via 686a southbridge has a LM78-like chip integrated on the same IC.
@@ -297,6 +295,7 @@ static inline long TEMP_FROM_REG10(u16 val)
297 via686a client is allocated. */ 295 via686a client is allocated. */
298struct via686a_data { 296struct via686a_data {
299 struct i2c_client client; 297 struct i2c_client client;
298 struct class_device *class_dev;
300 struct semaphore update_lock; 299 struct semaphore update_lock;
301 char valid; /* !=0 if following fields are valid */ 300 char valid; /* !=0 if following fields are valid */
302 unsigned long last_updated; /* In jiffies */ 301 unsigned long last_updated; /* In jiffies */
@@ -315,8 +314,7 @@ struct via686a_data {
315 314
316static struct pci_dev *s_bridge; /* pointer to the (only) via686a */ 315static struct pci_dev *s_bridge; /* pointer to the (only) via686a */
317 316
318static int via686a_attach_adapter(struct i2c_adapter *adapter); 317static int via686a_detect(struct i2c_adapter *adapter);
319static int via686a_detect(struct i2c_adapter *adapter, int address, int kind);
320static int via686a_detach_client(struct i2c_client *client); 318static int via686a_detach_client(struct i2c_client *client);
321 319
322static inline int via686a_read_value(struct i2c_client *client, u8 reg) 320static inline int via686a_read_value(struct i2c_client *client, u8 reg)
@@ -576,22 +574,13 @@ static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
576static struct i2c_driver via686a_driver = { 574static struct i2c_driver via686a_driver = {
577 .owner = THIS_MODULE, 575 .owner = THIS_MODULE,
578 .name = "via686a", 576 .name = "via686a",
579 .id = I2C_DRIVERID_VIA686A, 577 .attach_adapter = via686a_detect,
580 .flags = I2C_DF_NOTIFY,
581 .attach_adapter = via686a_attach_adapter,
582 .detach_client = via686a_detach_client, 578 .detach_client = via686a_detach_client,
583}; 579};
584 580
585 581
586/* This is called when the module is loaded */ 582/* This is called when the module is loaded */
587static int via686a_attach_adapter(struct i2c_adapter *adapter) 583static int via686a_detect(struct i2c_adapter *adapter)
588{
589 if (!(adapter->class & I2C_CLASS_HWMON))
590 return 0;
591 return i2c_detect(adapter, &addr_data, via686a_detect);
592}
593
594static int via686a_detect(struct i2c_adapter *adapter, int address, int kind)
595{ 584{
596 struct i2c_client *new_client; 585 struct i2c_client *new_client;
597 struct via686a_data *data; 586 struct via686a_data *data;
@@ -599,13 +588,6 @@ static int via686a_detect(struct i2c_adapter *adapter, int address, int kind)
599 const char client_name[] = "via686a"; 588 const char client_name[] = "via686a";
600 u16 val; 589 u16 val;
601 590
602 /* Make sure we are probing the ISA bus!! */
603 if (!i2c_is_isa_adapter(adapter)) {
604 dev_err(&adapter->dev,
605 "via686a_detect called for an I2C bus adapter?!?\n");
606 return 0;
607 }
608
609 /* 8231 requires multiple of 256, we enforce that on 686 as well */ 591 /* 8231 requires multiple of 256, we enforce that on 686 as well */
610 if (force_addr) 592 if (force_addr)
611 address = force_addr & 0xFF00; 593 address = force_addr & 0xFF00;
@@ -637,7 +619,7 @@ static int via686a_detect(struct i2c_adapter *adapter, int address, int kind)
637 619
638 if (!(data = kmalloc(sizeof(struct via686a_data), GFP_KERNEL))) { 620 if (!(data = kmalloc(sizeof(struct via686a_data), GFP_KERNEL))) {
639 err = -ENOMEM; 621 err = -ENOMEM;
640 goto ERROR0; 622 goto exit_release;
641 } 623 }
642 memset(data, 0, sizeof(struct via686a_data)); 624 memset(data, 0, sizeof(struct via686a_data));
643 625
@@ -655,12 +637,18 @@ static int via686a_detect(struct i2c_adapter *adapter, int address, int kind)
655 init_MUTEX(&data->update_lock); 637 init_MUTEX(&data->update_lock);
656 /* Tell the I2C layer a new client has arrived */ 638 /* Tell the I2C layer a new client has arrived */
657 if ((err = i2c_attach_client(new_client))) 639 if ((err = i2c_attach_client(new_client)))
658 goto ERROR3; 640 goto exit_free;
659 641
660 /* Initialize the VIA686A chip */ 642 /* Initialize the VIA686A chip */
661 via686a_init_client(new_client); 643 via686a_init_client(new_client);
662 644
663 /* Register sysfs hooks */ 645 /* Register sysfs hooks */
646 data->class_dev = hwmon_device_register(&new_client->dev);
647 if (IS_ERR(data->class_dev)) {
648 err = PTR_ERR(data->class_dev);
649 goto exit_detach;
650 }
651
664 device_create_file(&new_client->dev, &dev_attr_in0_input); 652 device_create_file(&new_client->dev, &dev_attr_in0_input);
665 device_create_file(&new_client->dev, &dev_attr_in1_input); 653 device_create_file(&new_client->dev, &dev_attr_in1_input);
666 device_create_file(&new_client->dev, &dev_attr_in2_input); 654 device_create_file(&new_client->dev, &dev_attr_in2_input);
@@ -695,25 +683,27 @@ static int via686a_detect(struct i2c_adapter *adapter, int address, int kind)
695 683
696 return 0; 684 return 0;
697 685
698ERROR3: 686exit_detach:
687 i2c_detach_client(new_client);
688exit_free:
699 kfree(data); 689 kfree(data);
700ERROR0: 690exit_release:
701 release_region(address, VIA686A_EXTENT); 691 release_region(address, VIA686A_EXTENT);
702 return err; 692 return err;
703} 693}
704 694
705static int via686a_detach_client(struct i2c_client *client) 695static int via686a_detach_client(struct i2c_client *client)
706{ 696{
697 struct via686a_data *data = i2c_get_clientdata(client);
707 int err; 698 int err;
708 699
709 if ((err = i2c_detach_client(client))) { 700 hwmon_device_unregister(data->class_dev);
710 dev_err(&client->dev, 701
711 "Client deregistration failed, client not detached.\n"); 702 if ((err = i2c_detach_client(client)))
712 return err; 703 return err;
713 }
714 704
715 release_region(client->addr, VIA686A_EXTENT); 705 release_region(client->addr, VIA686A_EXTENT);
716 kfree(i2c_get_clientdata(client)); 706 kfree(data);
717 707
718 return 0; 708 return 0;
719} 709}
@@ -810,29 +800,25 @@ static int __devinit via686a_pci_probe(struct pci_dev *dev,
810 const struct pci_device_id *id) 800 const struct pci_device_id *id)
811{ 801{
812 u16 val; 802 u16 val;
813 int addr = 0;
814 803
815 if (PCIBIOS_SUCCESSFUL != 804 if (PCIBIOS_SUCCESSFUL !=
816 pci_read_config_word(dev, VIA686A_BASE_REG, &val)) 805 pci_read_config_word(dev, VIA686A_BASE_REG, &val))
817 return -ENODEV; 806 return -ENODEV;
818 807
819 addr = val & ~(VIA686A_EXTENT - 1); 808 address = val & ~(VIA686A_EXTENT - 1);
820 if (addr == 0 && force_addr == 0) { 809 if (address == 0 && force_addr == 0) {
821 dev_err(&dev->dev, "base address not set - upgrade BIOS " 810 dev_err(&dev->dev, "base address not set - upgrade BIOS "
822 "or use force_addr=0xaddr\n"); 811 "or use force_addr=0xaddr\n");
823 return -ENODEV; 812 return -ENODEV;
824 } 813 }
825 if (force_addr)
826 addr = force_addr; /* so detect will get called */
827 814
828 if (!addr) { 815 if (!address) {
829 dev_err(&dev->dev, "No Via 686A sensors found.\n"); 816 dev_err(&dev->dev, "No Via 686A sensors found.\n");
830 return -ENODEV; 817 return -ENODEV;
831 } 818 }
832 normal_isa[0] = addr;
833 819
834 s_bridge = pci_dev_get(dev); 820 s_bridge = pci_dev_get(dev);
835 if (i2c_add_driver(&via686a_driver)) { 821 if (i2c_isa_add_driver(&via686a_driver)) {
836 pci_dev_put(s_bridge); 822 pci_dev_put(s_bridge);
837 s_bridge = NULL; 823 s_bridge = NULL;
838 } 824 }
@@ -859,7 +845,7 @@ static void __exit sm_via686a_exit(void)
859{ 845{
860 pci_unregister_driver(&via686a_pci_driver); 846 pci_unregister_driver(&via686a_pci_driver);
861 if (s_bridge != NULL) { 847 if (s_bridge != NULL) {
862 i2c_del_driver(&via686a_driver); 848 i2c_isa_del_driver(&via686a_driver);
863 pci_dev_put(s_bridge); 849 pci_dev_put(s_bridge);
864 s_bridge = NULL; 850 s_bridge = NULL;
865 } 851 }
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 8a40b6976e1a..b60efe8f8b26 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -9,6 +9,9 @@
9 Thanks to Leon Moonen, Steve Cliffe and Grant Coady for their help 9 Thanks to Leon Moonen, Steve Cliffe and Grant Coady for their help
10 in testing and debugging this driver. 10 in testing and debugging this driver.
11 11
12 This driver also supports the W83627EHG, which is the lead-free
13 version of the W83627EHF.
14
12 This program is free software; you can redistribute it and/or modify 15 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by 16 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or 17 the Free Software Foundation; either version 2 of the License, or
@@ -37,17 +40,14 @@
37#include <linux/init.h> 40#include <linux/init.h>
38#include <linux/slab.h> 41#include <linux/slab.h>
39#include <linux/i2c.h> 42#include <linux/i2c.h>
40#include <linux/i2c-sensor.h> 43#include <linux/i2c-isa.h>
44#include <linux/hwmon.h>
45#include <linux/err.h>
41#include <asm/io.h> 46#include <asm/io.h>
42#include "lm75.h" 47#include "lm75.h"
43 48
44/* Addresses to scan 49/* The actual ISA address is read from Super-I/O configuration space */
45 The actual ISA address is read from Super-I/O configuration space */ 50static unsigned short address;
46static unsigned short normal_i2c[] = { I2C_CLIENT_END };
47static unsigned int normal_isa[] = { 0, I2C_CLIENT_ISA_END };
48
49/* Insmod parameters */
50SENSORS_INSMOD_1(w83627ehf);
51 51
52/* 52/*
53 * Super-I/O constants and functions 53 * Super-I/O constants and functions
@@ -174,6 +174,7 @@ temp1_to_reg(int temp)
174 174
175struct w83627ehf_data { 175struct w83627ehf_data {
176 struct i2c_client client; 176 struct i2c_client client;
177 struct class_device *class_dev;
177 struct semaphore lock; 178 struct semaphore lock;
178 179
179 struct semaphore update_lock; 180 struct semaphore update_lock;
@@ -666,15 +667,12 @@ static void w83627ehf_init_client(struct i2c_client *client)
666 } 667 }
667} 668}
668 669
669static int w83627ehf_detect(struct i2c_adapter *adapter, int address, int kind) 670static int w83627ehf_detect(struct i2c_adapter *adapter)
670{ 671{
671 struct i2c_client *client; 672 struct i2c_client *client;
672 struct w83627ehf_data *data; 673 struct w83627ehf_data *data;
673 int i, err = 0; 674 int i, err = 0;
674 675
675 if (!i2c_is_isa_adapter(adapter))
676 return 0;
677
678 if (!request_region(address, REGION_LENGTH, w83627ehf_driver.name)) { 676 if (!request_region(address, REGION_LENGTH, w83627ehf_driver.name)) {
679 err = -EBUSY; 677 err = -EBUSY;
680 goto exit; 678 goto exit;
@@ -720,6 +718,12 @@ static int w83627ehf_detect(struct i2c_adapter *adapter, int address, int kind)
720 data->has_fan |= (1 << 4); 718 data->has_fan |= (1 << 4);
721 719
722 /* Register sysfs hooks */ 720 /* Register sysfs hooks */
721 data->class_dev = hwmon_device_register(&client->dev);
722 if (IS_ERR(data->class_dev)) {
723 err = PTR_ERR(data->class_dev);
724 goto exit_detach;
725 }
726
723 device_create_file(&client->dev, &dev_attr_fan1_input); 727 device_create_file(&client->dev, &dev_attr_fan1_input);
724 device_create_file(&client->dev, &dev_attr_fan1_min); 728 device_create_file(&client->dev, &dev_attr_fan1_min);
725 device_create_file(&client->dev, &dev_attr_fan1_div); 729 device_create_file(&client->dev, &dev_attr_fan1_div);
@@ -753,6 +757,8 @@ static int w83627ehf_detect(struct i2c_adapter *adapter, int address, int kind)
753 757
754 return 0; 758 return 0;
755 759
760exit_detach:
761 i2c_detach_client(client);
756exit_free: 762exit_free:
757 kfree(data); 763 kfree(data);
758exit_release: 764exit_release:
@@ -761,24 +767,17 @@ exit:
761 return err; 767 return err;
762} 768}
763 769
764static int w83627ehf_attach_adapter(struct i2c_adapter *adapter)
765{
766 if (!(adapter->class & I2C_CLASS_HWMON))
767 return 0;
768 return i2c_detect(adapter, &addr_data, w83627ehf_detect);
769}
770
771static int w83627ehf_detach_client(struct i2c_client *client) 770static int w83627ehf_detach_client(struct i2c_client *client)
772{ 771{
772 struct w83627ehf_data *data = i2c_get_clientdata(client);
773 int err; 773 int err;
774 774
775 if ((err = i2c_detach_client(client))) { 775 hwmon_device_unregister(data->class_dev);
776 dev_err(&client->dev, "Client deregistration failed, " 776
777 "client not detached.\n"); 777 if ((err = i2c_detach_client(client)))
778 return err; 778 return err;
779 }
780 release_region(client->addr, REGION_LENGTH); 779 release_region(client->addr, REGION_LENGTH);
781 kfree(i2c_get_clientdata(client)); 780 kfree(data);
782 781
783 return 0; 782 return 0;
784} 783}
@@ -786,12 +785,11 @@ static int w83627ehf_detach_client(struct i2c_client *client)
786static struct i2c_driver w83627ehf_driver = { 785static struct i2c_driver w83627ehf_driver = {
787 .owner = THIS_MODULE, 786 .owner = THIS_MODULE,
788 .name = "w83627ehf", 787 .name = "w83627ehf",
789 .flags = I2C_DF_NOTIFY, 788 .attach_adapter = w83627ehf_detect,
790 .attach_adapter = w83627ehf_attach_adapter,
791 .detach_client = w83627ehf_detach_client, 789 .detach_client = w83627ehf_detach_client,
792}; 790};
793 791
794static int __init w83627ehf_find(int sioaddr, int *address) 792static int __init w83627ehf_find(int sioaddr, unsigned short *addr)
795{ 793{
796 u16 val; 794 u16 val;
797 795
@@ -809,8 +807,8 @@ static int __init w83627ehf_find(int sioaddr, int *address)
809 superio_select(W83627EHF_LD_HWM); 807 superio_select(W83627EHF_LD_HWM);
810 val = (superio_inb(SIO_REG_ADDR) << 8) 808 val = (superio_inb(SIO_REG_ADDR) << 8)
811 | superio_inb(SIO_REG_ADDR + 1); 809 | superio_inb(SIO_REG_ADDR + 1);
812 *address = val & ~(REGION_LENGTH - 1); 810 *addr = val & ~(REGION_LENGTH - 1);
813 if (*address == 0) { 811 if (*addr == 0) {
814 superio_exit(); 812 superio_exit();
815 return -ENODEV; 813 return -ENODEV;
816 } 814 }
@@ -826,16 +824,16 @@ static int __init w83627ehf_find(int sioaddr, int *address)
826 824
827static int __init sensors_w83627ehf_init(void) 825static int __init sensors_w83627ehf_init(void)
828{ 826{
829 if (w83627ehf_find(0x2e, &normal_isa[0]) 827 if (w83627ehf_find(0x2e, &address)
830 && w83627ehf_find(0x4e, &normal_isa[0])) 828 && w83627ehf_find(0x4e, &address))
831 return -ENODEV; 829 return -ENODEV;
832 830
833 return i2c_add_driver(&w83627ehf_driver); 831 return i2c_isa_add_driver(&w83627ehf_driver);
834} 832}
835 833
836static void __exit sensors_w83627ehf_exit(void) 834static void __exit sensors_w83627ehf_exit(void)
837{ 835{
838 i2c_del_driver(&w83627ehf_driver); 836 i2c_isa_del_driver(&w83627ehf_driver);
839} 837}
840 838
841MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); 839MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index bd87a42e068a..02bd5c0239a2 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -42,8 +42,10 @@
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/jiffies.h> 43#include <linux/jiffies.h>
44#include <linux/i2c.h> 44#include <linux/i2c.h>
45#include <linux/i2c-sensor.h> 45#include <linux/i2c-isa.h>
46#include <linux/i2c-vid.h> 46#include <linux/hwmon.h>
47#include <linux/hwmon-vid.h>
48#include <linux/err.h>
47#include <asm/io.h> 49#include <asm/io.h>
48#include "lm75.h" 50#include "lm75.h"
49 51
@@ -56,12 +58,11 @@ module_param(force_i2c, byte, 0);
56MODULE_PARM_DESC(force_i2c, 58MODULE_PARM_DESC(force_i2c,
57 "Initialize the i2c address of the sensors"); 59 "Initialize the i2c address of the sensors");
58 60
59/* Addresses to scan */ 61/* The actual ISA address is read from Super-I/O configuration space */
60static unsigned short normal_i2c[] = { I2C_CLIENT_END }; 62static unsigned short address;
61static unsigned int normal_isa[] = { 0, I2C_CLIENT_ISA_END };
62 63
63/* Insmod parameters */ 64/* Insmod parameters */
64SENSORS_INSMOD_4(w83627hf, w83627thf, w83697hf, w83637hf); 65enum chips { any_chip, w83627hf, w83627thf, w83697hf, w83637hf };
65 66
66static int init = 1; 67static int init = 1;
67module_param(init, bool, 0); 68module_param(init, bool, 0);
@@ -277,6 +278,7 @@ static inline u8 DIV_TO_REG(long val)
277 dynamically allocated, at the same time when a new client is allocated. */ 278 dynamically allocated, at the same time when a new client is allocated. */
278struct w83627hf_data { 279struct w83627hf_data {
279 struct i2c_client client; 280 struct i2c_client client;
281 struct class_device *class_dev;
280 struct semaphore lock; 282 struct semaphore lock;
281 enum chips type; 283 enum chips type;
282 284
@@ -314,9 +316,7 @@ struct w83627hf_data {
314}; 316};
315 317
316 318
317static int w83627hf_attach_adapter(struct i2c_adapter *adapter); 319static int w83627hf_detect(struct i2c_adapter *adapter);
318static int w83627hf_detect(struct i2c_adapter *adapter, int address,
319 int kind);
320static int w83627hf_detach_client(struct i2c_client *client); 320static int w83627hf_detach_client(struct i2c_client *client);
321 321
322static int w83627hf_read_value(struct i2c_client *client, u16 register); 322static int w83627hf_read_value(struct i2c_client *client, u16 register);
@@ -328,9 +328,7 @@ static void w83627hf_init_client(struct i2c_client *client);
328static struct i2c_driver w83627hf_driver = { 328static struct i2c_driver w83627hf_driver = {
329 .owner = THIS_MODULE, 329 .owner = THIS_MODULE,
330 .name = "w83627hf", 330 .name = "w83627hf",
331 .id = I2C_DRIVERID_W83627HF, 331 .attach_adapter = w83627hf_detect,
332 .flags = I2C_DF_NOTIFY,
333 .attach_adapter = w83627hf_attach_adapter,
334 .detach_client = w83627hf_detach_client, 332 .detach_client = w83627hf_detach_client,
335}; 333};
336 334
@@ -959,16 +957,7 @@ device_create_file(&client->dev, &dev_attr_temp##offset##_type); \
959} while (0) 957} while (0)
960 958
961 959
962/* This function is called when: 960static int __init w83627hf_find(int sioaddr, unsigned short *addr)
963 * w83627hf_driver is inserted (when this module is loaded), for each
964 available adapter
965 * when a new adapter is inserted (and w83627hf_driver is still present) */
966static int w83627hf_attach_adapter(struct i2c_adapter *adapter)
967{
968 return i2c_detect(adapter, &addr_data, w83627hf_detect);
969}
970
971static int w83627hf_find(int sioaddr, int *address)
972{ 961{
973 u16 val; 962 u16 val;
974 963
@@ -988,32 +977,24 @@ static int w83627hf_find(int sioaddr, int *address)
988 superio_select(W83627HF_LD_HWM); 977 superio_select(W83627HF_LD_HWM);
989 val = (superio_inb(WINB_BASE_REG) << 8) | 978 val = (superio_inb(WINB_BASE_REG) << 8) |
990 superio_inb(WINB_BASE_REG + 1); 979 superio_inb(WINB_BASE_REG + 1);
991 *address = val & ~(WINB_EXTENT - 1); 980 *addr = val & ~(WINB_EXTENT - 1);
992 if (*address == 0 && force_addr == 0) { 981 if (*addr == 0 && force_addr == 0) {
993 superio_exit(); 982 superio_exit();
994 return -ENODEV; 983 return -ENODEV;
995 } 984 }
996 if (force_addr)
997 *address = force_addr; /* so detect will get called */
998 985
999 superio_exit(); 986 superio_exit();
1000 return 0; 987 return 0;
1001} 988}
1002 989
1003int w83627hf_detect(struct i2c_adapter *adapter, int address, 990static int w83627hf_detect(struct i2c_adapter *adapter)
1004 int kind)
1005{ 991{
1006 int val; 992 int val, kind;
1007 struct i2c_client *new_client; 993 struct i2c_client *new_client;
1008 struct w83627hf_data *data; 994 struct w83627hf_data *data;
1009 int err = 0; 995 int err = 0;
1010 const char *client_name = ""; 996 const char *client_name = "";
1011 997
1012 if (!i2c_is_isa_adapter(adapter)) {
1013 err = -ENODEV;
1014 goto ERROR0;
1015 }
1016
1017 if(force_addr) 998 if(force_addr)
1018 address = force_addr & ~(WINB_EXTENT - 1); 999 address = force_addr & ~(WINB_EXTENT - 1);
1019 1000
@@ -1102,6 +1083,12 @@ int w83627hf_detect(struct i2c_adapter *adapter, int address,
1102 data->fan_min[2] = w83627hf_read_value(new_client, W83781D_REG_FAN_MIN(3)); 1083 data->fan_min[2] = w83627hf_read_value(new_client, W83781D_REG_FAN_MIN(3));
1103 1084
1104 /* Register sysfs hooks */ 1085 /* Register sysfs hooks */
1086 data->class_dev = hwmon_device_register(&new_client->dev);
1087 if (IS_ERR(data->class_dev)) {
1088 err = PTR_ERR(data->class_dev);
1089 goto ERROR3;
1090 }
1091
1105 device_create_file_in(new_client, 0); 1092 device_create_file_in(new_client, 0);
1106 if (kind != w83697hf) 1093 if (kind != w83697hf)
1107 device_create_file_in(new_client, 1); 1094 device_create_file_in(new_client, 1);
@@ -1152,6 +1139,8 @@ int w83627hf_detect(struct i2c_adapter *adapter, int address,
1152 1139
1153 return 0; 1140 return 0;
1154 1141
1142 ERROR3:
1143 i2c_detach_client(new_client);
1155 ERROR2: 1144 ERROR2:
1156 kfree(data); 1145 kfree(data);
1157 ERROR1: 1146 ERROR1:
@@ -1162,16 +1151,16 @@ int w83627hf_detect(struct i2c_adapter *adapter, int address,
1162 1151
1163static int w83627hf_detach_client(struct i2c_client *client) 1152static int w83627hf_detach_client(struct i2c_client *client)
1164{ 1153{
1154 struct w83627hf_data *data = i2c_get_clientdata(client);
1165 int err; 1155 int err;
1166 1156
1167 if ((err = i2c_detach_client(client))) { 1157 hwmon_device_unregister(data->class_dev);
1168 dev_err(&client->dev, 1158
1169 "Client deregistration failed, client not detached.\n"); 1159 if ((err = i2c_detach_client(client)))
1170 return err; 1160 return err;
1171 }
1172 1161
1173 release_region(client->addr, WINB_EXTENT); 1162 release_region(client->addr, WINB_EXTENT);
1174 kfree(i2c_get_clientdata(client)); 1163 kfree(data);
1175 1164
1176 return 0; 1165 return 0;
1177} 1166}
@@ -1327,7 +1316,7 @@ static void w83627hf_init_client(struct i2c_client *client)
1327 data->vrm = (data->vrm_ovt & 0x01) ? 90 : 82; 1316 data->vrm = (data->vrm_ovt & 0x01) ? 90 : 82;
1328 } else { 1317 } else {
1329 /* Convert VID to voltage based on default VRM */ 1318 /* Convert VID to voltage based on default VRM */
1330 data->vrm = i2c_which_vrm(); 1319 data->vrm = vid_which_vrm();
1331 } 1320 }
1332 1321
1333 tmp = w83627hf_read_value(client, W83781D_REG_SCFG1); 1322 tmp = w83627hf_read_value(client, W83781D_REG_SCFG1);
@@ -1485,20 +1474,17 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
1485 1474
1486static int __init sensors_w83627hf_init(void) 1475static int __init sensors_w83627hf_init(void)
1487{ 1476{
1488 int addr; 1477 if (w83627hf_find(0x2e, &address)
1489 1478 && w83627hf_find(0x4e, &address)) {
1490 if (w83627hf_find(0x2e, &addr)
1491 && w83627hf_find(0x4e, &addr)) {
1492 return -ENODEV; 1479 return -ENODEV;
1493 } 1480 }
1494 normal_isa[0] = addr;
1495 1481
1496 return i2c_add_driver(&w83627hf_driver); 1482 return i2c_isa_add_driver(&w83627hf_driver);
1497} 1483}
1498 1484
1499static void __exit sensors_w83627hf_exit(void) 1485static void __exit sensors_w83627hf_exit(void)
1500{ 1486{
1501 i2c_del_driver(&w83627hf_driver); 1487 i2c_isa_del_driver(&w83627hf_driver);
1502} 1488}
1503 1489
1504MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " 1490MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 0bb131ce09eb..4c43337ca780 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -38,8 +38,10 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/i2c.h> 40#include <linux/i2c.h>
41#include <linux/i2c-sensor.h> 41#include <linux/i2c-isa.h>
42#include <linux/i2c-vid.h> 42#include <linux/hwmon.h>
43#include <linux/hwmon-vid.h>
44#include <linux/err.h>
43#include <asm/io.h> 45#include <asm/io.h>
44#include "lm75.h" 46#include "lm75.h"
45 47
@@ -47,10 +49,10 @@
47static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 49static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
48 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 50 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
49 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 51 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
50static unsigned int normal_isa[] = { 0x0290, I2C_CLIENT_ISA_END }; 52static unsigned short isa_address = 0x290;
51 53
52/* Insmod parameters */ 54/* Insmod parameters */
53SENSORS_INSMOD_5(w83781d, w83782d, w83783s, w83627hf, as99127f); 55I2C_CLIENT_INSMOD_5(w83781d, w83782d, w83783s, w83627hf, as99127f);
54I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: " 56I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
55 "{bus, clientaddr, subclientaddr1, subclientaddr2}"); 57 "{bus, clientaddr, subclientaddr1, subclientaddr2}");
56 58
@@ -218,6 +220,7 @@ DIV_TO_REG(long val, enum chips type)
218 allocated. */ 220 allocated. */
219struct w83781d_data { 221struct w83781d_data {
220 struct i2c_client client; 222 struct i2c_client client;
223 struct class_device *class_dev;
221 struct semaphore lock; 224 struct semaphore lock;
222 enum chips type; 225 enum chips type;
223 226
@@ -255,6 +258,7 @@ struct w83781d_data {
255}; 258};
256 259
257static int w83781d_attach_adapter(struct i2c_adapter *adapter); 260static int w83781d_attach_adapter(struct i2c_adapter *adapter);
261static int w83781d_isa_attach_adapter(struct i2c_adapter *adapter);
258static int w83781d_detect(struct i2c_adapter *adapter, int address, int kind); 262static int w83781d_detect(struct i2c_adapter *adapter, int address, int kind);
259static int w83781d_detach_client(struct i2c_client *client); 263static int w83781d_detach_client(struct i2c_client *client);
260 264
@@ -273,6 +277,14 @@ static struct i2c_driver w83781d_driver = {
273 .detach_client = w83781d_detach_client, 277 .detach_client = w83781d_detach_client,
274}; 278};
275 279
280static struct i2c_driver w83781d_isa_driver = {
281 .owner = THIS_MODULE,
282 .name = "w83781d-isa",
283 .attach_adapter = w83781d_isa_attach_adapter,
284 .detach_client = w83781d_detach_client,
285};
286
287
276/* following are the sysfs callback functions */ 288/* following are the sysfs callback functions */
277#define show_in_reg(reg) \ 289#define show_in_reg(reg) \
278static ssize_t show_##reg (struct device *dev, char *buf, int nr) \ 290static ssize_t show_##reg (struct device *dev, char *buf, int nr) \
@@ -856,7 +868,13 @@ w83781d_attach_adapter(struct i2c_adapter *adapter)
856{ 868{
857 if (!(adapter->class & I2C_CLASS_HWMON)) 869 if (!(adapter->class & I2C_CLASS_HWMON))
858 return 0; 870 return 0;
859 return i2c_detect(adapter, &addr_data, w83781d_detect); 871 return i2c_probe(adapter, &addr_data, w83781d_detect);
872}
873
874static int
875w83781d_isa_attach_adapter(struct i2c_adapter *adapter)
876{
877 return w83781d_detect(adapter, isa_address, -1);
860} 878}
861 879
862/* Assumes that adapter is of I2C, not ISA variety. 880/* Assumes that adapter is of I2C, not ISA variety.
@@ -961,10 +979,10 @@ w83781d_detect_subclients(struct i2c_adapter *adapter, int address, int kind,
961ERROR_SC_3: 979ERROR_SC_3:
962 i2c_detach_client(data->lm75[0]); 980 i2c_detach_client(data->lm75[0]);
963ERROR_SC_2: 981ERROR_SC_2:
964 if (NULL != data->lm75[1]) 982 if (data->lm75[1])
965 kfree(data->lm75[1]); 983 kfree(data->lm75[1]);
966ERROR_SC_1: 984ERROR_SC_1:
967 if (NULL != data->lm75[0]) 985 if (data->lm75[0])
968 kfree(data->lm75[0]); 986 kfree(data->lm75[0]);
969ERROR_SC_0: 987ERROR_SC_0:
970 return err; 988 return err;
@@ -999,7 +1017,7 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind)
999 1017
1000 if (is_isa) 1018 if (is_isa)
1001 if (!request_region(address, W83781D_EXTENT, 1019 if (!request_region(address, W83781D_EXTENT,
1002 w83781d_driver.name)) { 1020 w83781d_isa_driver.name)) {
1003 dev_dbg(&adapter->dev, "Request of region " 1021 dev_dbg(&adapter->dev, "Request of region "
1004 "0x%x-0x%x for w83781d failed\n", address, 1022 "0x%x-0x%x for w83781d failed\n", address,
1005 address + W83781D_EXTENT - 1); 1023 address + W83781D_EXTENT - 1);
@@ -1057,7 +1075,7 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind)
1057 new_client->addr = address; 1075 new_client->addr = address;
1058 init_MUTEX(&data->lock); 1076 init_MUTEX(&data->lock);
1059 new_client->adapter = adapter; 1077 new_client->adapter = adapter;
1060 new_client->driver = &w83781d_driver; 1078 new_client->driver = is_isa ? &w83781d_isa_driver : &w83781d_driver;
1061 new_client->flags = 0; 1079 new_client->flags = 0;
1062 1080
1063 /* Now, we do the remaining detection. */ 1081 /* Now, we do the remaining detection. */
@@ -1189,6 +1207,12 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind)
1189 data->pwmenable[i] = 1; 1207 data->pwmenable[i] = 1;
1190 1208
1191 /* Register sysfs hooks */ 1209 /* Register sysfs hooks */
1210 data->class_dev = hwmon_device_register(&new_client->dev);
1211 if (IS_ERR(data->class_dev)) {
1212 err = PTR_ERR(data->class_dev);
1213 goto ERROR4;
1214 }
1215
1192 device_create_file_in(new_client, 0); 1216 device_create_file_in(new_client, 0);
1193 if (kind != w83783s) 1217 if (kind != w83783s)
1194 device_create_file_in(new_client, 1); 1218 device_create_file_in(new_client, 1);
@@ -1241,6 +1265,15 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind)
1241 1265
1242 return 0; 1266 return 0;
1243 1267
1268ERROR4:
1269 if (data->lm75[1]) {
1270 i2c_detach_client(data->lm75[1]);
1271 kfree(data->lm75[1]);
1272 }
1273 if (data->lm75[0]) {
1274 i2c_detach_client(data->lm75[0]);
1275 kfree(data->lm75[0]);
1276 }
1244ERROR3: 1277ERROR3:
1245 i2c_detach_client(new_client); 1278 i2c_detach_client(new_client);
1246ERROR2: 1279ERROR2:
@@ -1255,24 +1288,26 @@ ERROR0:
1255static int 1288static int
1256w83781d_detach_client(struct i2c_client *client) 1289w83781d_detach_client(struct i2c_client *client)
1257{ 1290{
1291 struct w83781d_data *data = i2c_get_clientdata(client);
1258 int err; 1292 int err;
1259 1293
1294 /* main client */
1295 if (data)
1296 hwmon_device_unregister(data->class_dev);
1297
1260 if (i2c_is_isa_client(client)) 1298 if (i2c_is_isa_client(client))
1261 release_region(client->addr, W83781D_EXTENT); 1299 release_region(client->addr, W83781D_EXTENT);
1262 1300
1263 if ((err = i2c_detach_client(client))) { 1301 if ((err = i2c_detach_client(client)))
1264 dev_err(&client->dev,
1265 "Client deregistration failed, client not detached.\n");
1266 return err; 1302 return err;
1267 }
1268 1303
1269 if (i2c_get_clientdata(client)==NULL) { 1304 /* main client */
1270 /* subclients */ 1305 if (data)
1306 kfree(data);
1307
1308 /* subclient */
1309 else
1271 kfree(client); 1310 kfree(client);
1272 } else {
1273 /* main client */
1274 kfree(i2c_get_clientdata(client));
1275 }
1276 1311
1277 return 0; 1312 return 0;
1278} 1313}
@@ -1443,7 +1478,7 @@ w83781d_init_client(struct i2c_client *client)
1443 w83781d_write_value(client, W83781D_REG_BEEP_INTS2, 0); 1478 w83781d_write_value(client, W83781D_REG_BEEP_INTS2, 0);
1444 } 1479 }
1445 1480
1446 data->vrm = i2c_which_vrm(); 1481 data->vrm = vid_which_vrm();
1447 1482
1448 if ((type != w83781d) && (type != as99127f)) { 1483 if ((type != w83781d) && (type != as99127f)) {
1449 tmp = w83781d_read_value(client, W83781D_REG_SCFG1); 1484 tmp = w83781d_read_value(client, W83781D_REG_SCFG1);
@@ -1613,12 +1648,25 @@ static struct w83781d_data *w83781d_update_device(struct device *dev)
1613static int __init 1648static int __init
1614sensors_w83781d_init(void) 1649sensors_w83781d_init(void)
1615{ 1650{
1616 return i2c_add_driver(&w83781d_driver); 1651 int res;
1652
1653 res = i2c_add_driver(&w83781d_driver);
1654 if (res)
1655 return res;
1656
1657 res = i2c_isa_add_driver(&w83781d_isa_driver);
1658 if (res) {
1659 i2c_del_driver(&w83781d_driver);
1660 return res;
1661 }
1662
1663 return 0;
1617} 1664}
1618 1665
1619static void __exit 1666static void __exit
1620sensors_w83781d_exit(void) 1667sensors_w83781d_exit(void)
1621{ 1668{
1669 i2c_isa_del_driver(&w83781d_isa_driver);
1622 i2c_del_driver(&w83781d_driver); 1670 i2c_del_driver(&w83781d_driver);
1623} 1671}
1624 1672
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
new file mode 100644
index 000000000000..ba0c28015f6a
--- /dev/null
+++ b/drivers/hwmon/w83792d.c
@@ -0,0 +1,1649 @@
1/*
2 w83792d.c - Part of lm_sensors, Linux kernel modules for hardware
3 monitoring
4 Copyright (C) 2004, 2005 Winbond Electronics Corp.
5 Chunhao Huang <DZShen@Winbond.com.tw>,
6 Rudolf Marek <r.marek@sh.cvut.cz>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21
22 Note:
23 1. This driver is only for 2.6 kernel, 2.4 kernel need a different driver.
24 2. This driver is only for Winbond W83792D C version device, there
25 are also some motherboards with B version W83792D device. The
26 calculation method to in6-in7(measured value, limits) is a little
27 different between C and B version. C or B version can be identified
28 by CR[0x49h].
29*/
30
31/*
32 Supports following chips:
33
34 Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
35 w83792d 9 7 7 3 0x7a 0x5ca3 yes no
36*/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/i2c.h>
43#include <linux/hwmon.h>
44#include <linux/hwmon-sysfs.h>
45#include <linux/err.h>
46
47/* Addresses to scan */
48static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
49
50/* Insmod parameters */
51I2C_CLIENT_INSMOD_1(w83792d);
52I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
53 "{bus, clientaddr, subclientaddr1, subclientaddr2}");
54
55static int init;
56module_param(init, bool, 0);
57MODULE_PARM_DESC(init, "Set to one to force chip initialization");
58
59/* The W83792D registers */
60static const u8 W83792D_REG_IN[9] = {
61 0x20, /* Vcore A in DataSheet */
62 0x21, /* Vcore B in DataSheet */
63 0x22, /* VIN0 in DataSheet */
64 0x23, /* VIN1 in DataSheet */
65 0x24, /* VIN2 in DataSheet */
66 0x25, /* VIN3 in DataSheet */
67 0x26, /* 5VCC in DataSheet */
68 0xB0, /* 5VSB in DataSheet */
69 0xB1 /* VBAT in DataSheet */
70};
71#define W83792D_REG_LOW_BITS1 0x3E /* Low Bits I in DataSheet */
72#define W83792D_REG_LOW_BITS2 0x3F /* Low Bits II in DataSheet */
73static const u8 W83792D_REG_IN_MAX[9] = {
74 0x2B, /* Vcore A High Limit in DataSheet */
75 0x2D, /* Vcore B High Limit in DataSheet */
76 0x2F, /* VIN0 High Limit in DataSheet */
77 0x31, /* VIN1 High Limit in DataSheet */
78 0x33, /* VIN2 High Limit in DataSheet */
79 0x35, /* VIN3 High Limit in DataSheet */
80 0x37, /* 5VCC High Limit in DataSheet */
81 0xB4, /* 5VSB High Limit in DataSheet */
82 0xB6 /* VBAT High Limit in DataSheet */
83};
84static const u8 W83792D_REG_IN_MIN[9] = {
85 0x2C, /* Vcore A Low Limit in DataSheet */
86 0x2E, /* Vcore B Low Limit in DataSheet */
87 0x30, /* VIN0 Low Limit in DataSheet */
88 0x32, /* VIN1 Low Limit in DataSheet */
89 0x34, /* VIN2 Low Limit in DataSheet */
90 0x36, /* VIN3 Low Limit in DataSheet */
91 0x38, /* 5VCC Low Limit in DataSheet */
92 0xB5, /* 5VSB Low Limit in DataSheet */
93 0xB7 /* VBAT Low Limit in DataSheet */
94};
95static const u8 W83792D_REG_FAN[7] = {
96 0x28, /* FAN 1 Count in DataSheet */
97 0x29, /* FAN 2 Count in DataSheet */
98 0x2A, /* FAN 3 Count in DataSheet */
99 0xB8, /* FAN 4 Count in DataSheet */
100 0xB9, /* FAN 5 Count in DataSheet */
101 0xBA, /* FAN 6 Count in DataSheet */
102 0xBE /* FAN 7 Count in DataSheet */
103};
104static const u8 W83792D_REG_FAN_MIN[7] = {
105 0x3B, /* FAN 1 Count Low Limit in DataSheet */
106 0x3C, /* FAN 2 Count Low Limit in DataSheet */
107 0x3D, /* FAN 3 Count Low Limit in DataSheet */
108 0xBB, /* FAN 4 Count Low Limit in DataSheet */
109 0xBC, /* FAN 5 Count Low Limit in DataSheet */
110 0xBD, /* FAN 6 Count Low Limit in DataSheet */
111 0xBF /* FAN 7 Count Low Limit in DataSheet */
112};
113#define W83792D_REG_FAN_CFG 0x84 /* FAN Configuration in DataSheet */
114static const u8 W83792D_REG_FAN_DIV[4] = {
115 0x47, /* contains FAN2 and FAN1 Divisor */
116 0x5B, /* contains FAN4 and FAN3 Divisor */
117 0x5C, /* contains FAN6 and FAN5 Divisor */
118 0x9E /* contains FAN7 Divisor. */
119};
120static const u8 W83792D_REG_PWM[7] = {
121 0x81, /* FAN 1 Duty Cycle, be used to control */
122 0x83, /* FAN 2 Duty Cycle, be used to control */
123 0x94, /* FAN 3 Duty Cycle, be used to control */
124 0xA3, /* FAN 4 Duty Cycle, be used to control */
125 0xA4, /* FAN 5 Duty Cycle, be used to control */
126 0xA5, /* FAN 6 Duty Cycle, be used to control */
127 0xA6 /* FAN 7 Duty Cycle, be used to control */
128};
129#define W83792D_REG_BANK 0x4E
130#define W83792D_REG_TEMP2_CONFIG 0xC2
131#define W83792D_REG_TEMP3_CONFIG 0xCA
132
133static const u8 W83792D_REG_TEMP1[3] = {
134 0x27, /* TEMP 1 in DataSheet */
135 0x39, /* TEMP 1 Over in DataSheet */
136 0x3A, /* TEMP 1 Hyst in DataSheet */
137};
138
139static const u8 W83792D_REG_TEMP_ADD[2][6] = {
140 { 0xC0, /* TEMP 2 in DataSheet */
141 0xC1, /* TEMP 2(0.5 deg) in DataSheet */
142 0xC5, /* TEMP 2 Over High part in DataSheet */
143 0xC6, /* TEMP 2 Over Low part in DataSheet */
144 0xC3, /* TEMP 2 Thyst High part in DataSheet */
145 0xC4 }, /* TEMP 2 Thyst Low part in DataSheet */
146 { 0xC8, /* TEMP 3 in DataSheet */
147 0xC9, /* TEMP 3(0.5 deg) in DataSheet */
148 0xCD, /* TEMP 3 Over High part in DataSheet */
149 0xCE, /* TEMP 3 Over Low part in DataSheet */
150 0xCB, /* TEMP 3 Thyst High part in DataSheet */
151 0xCC } /* TEMP 3 Thyst Low part in DataSheet */
152};
153
154static const u8 W83792D_REG_THERMAL[3] = {
155 0x85, /* SmartFanI: Fan1 target value */
156 0x86, /* SmartFanI: Fan2 target value */
157 0x96 /* SmartFanI: Fan3 target value */
158};
159
160static const u8 W83792D_REG_TOLERANCE[3] = {
161 0x87, /* (bit3-0)SmartFan Fan1 tolerance */
162 0x87, /* (bit7-4)SmartFan Fan2 tolerance */
163 0x97 /* (bit3-0)SmartFan Fan3 tolerance */
164};
165
166static const u8 W83792D_REG_POINTS[3][4] = {
167 { 0x85, /* SmartFanII: Fan1 temp point 1 */
168 0xE3, /* SmartFanII: Fan1 temp point 2 */
169 0xE4, /* SmartFanII: Fan1 temp point 3 */
170 0xE5 }, /* SmartFanII: Fan1 temp point 4 */
171 { 0x86, /* SmartFanII: Fan2 temp point 1 */
172 0xE6, /* SmartFanII: Fan2 temp point 2 */
173 0xE7, /* SmartFanII: Fan2 temp point 3 */
174 0xE8 }, /* SmartFanII: Fan2 temp point 4 */
175 { 0x96, /* SmartFanII: Fan3 temp point 1 */
176 0xE9, /* SmartFanII: Fan3 temp point 2 */
177 0xEA, /* SmartFanII: Fan3 temp point 3 */
178 0xEB } /* SmartFanII: Fan3 temp point 4 */
179};
180
181static const u8 W83792D_REG_LEVELS[3][4] = {
182 { 0x88, /* (bit3-0) SmartFanII: Fan1 Non-Stop */
183 0x88, /* (bit7-4) SmartFanII: Fan1 Level 1 */
184 0xE0, /* (bit7-4) SmartFanII: Fan1 Level 2 */
185 0xE0 }, /* (bit3-0) SmartFanII: Fan1 Level 3 */
186 { 0x89, /* (bit3-0) SmartFanII: Fan2 Non-Stop */
187 0x89, /* (bit7-4) SmartFanII: Fan2 Level 1 */
188 0xE1, /* (bit7-4) SmartFanII: Fan2 Level 2 */
189 0xE1 }, /* (bit3-0) SmartFanII: Fan2 Level 3 */
190 { 0x98, /* (bit3-0) SmartFanII: Fan3 Non-Stop */
191 0x98, /* (bit7-4) SmartFanII: Fan3 Level 1 */
192 0xE2, /* (bit7-4) SmartFanII: Fan3 Level 2 */
193 0xE2 } /* (bit3-0) SmartFanII: Fan3 Level 3 */
194};
195
196#define W83792D_REG_CONFIG 0x40
197#define W83792D_REG_VID_FANDIV 0x47
198#define W83792D_REG_CHIPID 0x49
199#define W83792D_REG_WCHIPID 0x58
200#define W83792D_REG_CHIPMAN 0x4F
201#define W83792D_REG_PIN 0x4B
202#define W83792D_REG_I2C_SUBADDR 0x4A
203
204#define W83792D_REG_ALARM1 0xA9 /* realtime status register1 */
205#define W83792D_REG_ALARM2 0xAA /* realtime status register2 */
206#define W83792D_REG_ALARM3 0xAB /* realtime status register3 */
207#define W83792D_REG_CHASSIS 0x42 /* Bit 5: Case Open status bit */
208#define W83792D_REG_CHASSIS_CLR 0x44 /* Bit 7: Case Open CLR_CHS/Reset bit */
209
210/* control in0/in1 's limit modifiability */
211#define W83792D_REG_VID_IN_B 0x17
212
213#define W83792D_REG_VBAT 0x5D
214#define W83792D_REG_I2C_ADDR 0x48
215
216/* Conversions. Rounding and limit checking is only done on the TO_REG
217 variants. Note that you should be a bit careful with which arguments
218 these macros are called: arguments may be evaluated more than once.
219 Fixing this is just not worth it. */
220#define IN_FROM_REG(nr,val) (((nr)<=1)?(val*2): \
221 ((((nr)==6)||((nr)==7))?(val*6):(val*4)))
222#define IN_TO_REG(nr,val) (((nr)<=1)?(val/2): \
223 ((((nr)==6)||((nr)==7))?(val/6):(val/4)))
224
225static inline u8
226FAN_TO_REG(long rpm, int div)
227{
228 if (rpm == 0)
229 return 255;
230 rpm = SENSORS_LIMIT(rpm, 1, 1000000);
231 return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
232}
233
234#define FAN_FROM_REG(val,div) ((val) == 0 ? -1 : \
235 ((val) == 255 ? 0 : \
236 1350000 / ((val) * (div))))
237
238/* for temp1 */
239#define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
240 : (val)) / 1000, 0, 0xff))
241#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
242/* for temp2 and temp3, because they need addtional resolution */
243#define TEMP_ADD_FROM_REG(val1, val2) \
244 ((((val1) & 0x80 ? (val1)-0x100 \
245 : (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
246#define TEMP_ADD_TO_REG_HIGH(val) \
247 (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
248 : (val)) / 1000, 0, 0xff))
249#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00)
250
251#define PWM_FROM_REG(val) (val)
252#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
253#define DIV_FROM_REG(val) (1 << (val))
254
255static inline u8
256DIV_TO_REG(long val)
257{
258 int i;
259 val = SENSORS_LIMIT(val, 1, 128) >> 1;
260 for (i = 0; i < 6; i++) {
261 if (val == 0)
262 break;
263 val >>= 1;
264 }
265 return ((u8) i);
266}
267
268struct w83792d_data {
269 struct i2c_client client;
270 struct class_device *class_dev;
271 struct semaphore lock;
272 enum chips type;
273
274 struct semaphore update_lock;
275 char valid; /* !=0 if following fields are valid */
276 unsigned long last_updated; /* In jiffies */
277
278 /* array of 2 pointers to subclients */
279 struct i2c_client *lm75[2];
280
281 u8 in[9]; /* Register value */
282 u8 in_max[9]; /* Register value */
283 u8 in_min[9]; /* Register value */
284 u8 low_bits[2]; /* Additional resolution to voltage in0-6 */
285 u8 fan[7]; /* Register value */
286 u8 fan_min[7]; /* Register value */
287 u8 temp1[3]; /* current, over, thyst */
288 u8 temp_add[2][6]; /* Register value */
289 u8 fan_div[7]; /* Register encoding, shifted right */
290 u8 pwm[7]; /* We only consider the first 3 set of pwm,
291 although 792 chip has 7 set of pwm. */
292 u8 pwmenable[3];
293 u8 pwm_mode[7]; /* indicates PWM or DC mode: 1->PWM; 0->DC */
294 u32 alarms; /* realtime status register encoding,combined */
295 u8 chassis; /* Chassis status */
296 u8 chassis_clear; /* CLR_CHS, clear chassis intrusion detection */
297 u8 thermal_cruise[3]; /* Smart FanI: Fan1,2,3 target value */
298 u8 tolerance[3]; /* Fan1,2,3 tolerance(Smart Fan I/II) */
299 u8 sf2_points[3][4]; /* Smart FanII: Fan1,2,3 temperature points */
300 u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */
301};
302
303static int w83792d_attach_adapter(struct i2c_adapter *adapter);
304static int w83792d_detect(struct i2c_adapter *adapter, int address, int kind);
305static int w83792d_detach_client(struct i2c_client *client);
306
307static int w83792d_read_value(struct i2c_client *client, u8 register);
308static int w83792d_write_value(struct i2c_client *client, u8 register,
309 u8 value);
310static struct w83792d_data *w83792d_update_device(struct device *dev);
311
312#ifdef DEBUG
313static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
314#endif
315
316static void w83792d_init_client(struct i2c_client *client);
317
318static struct i2c_driver w83792d_driver = {
319 .owner = THIS_MODULE,
320 .name = "w83792d",
321 .flags = I2C_DF_NOTIFY,
322 .attach_adapter = w83792d_attach_adapter,
323 .detach_client = w83792d_detach_client,
324};
325
326static long in_count_from_reg(int nr, struct w83792d_data *data)
327{
328 u16 vol_count = data->in[nr];
329 u16 low_bits = 0;
330 vol_count = (vol_count << 2);
331 switch (nr)
332 {
333 case 0: /* vin0 */
334 low_bits = (data->low_bits[0]) & 0x03;
335 break;
336 case 1: /* vin1 */
337 low_bits = ((data->low_bits[0]) & 0x0c) >> 2;
338 break;
339 case 2: /* vin2 */
340 low_bits = ((data->low_bits[0]) & 0x30) >> 4;
341 break;
342 case 3: /* vin3 */
343 low_bits = ((data->low_bits[0]) & 0xc0) >> 6;
344 break;
345 case 4: /* vin4 */
346 low_bits = (data->low_bits[1]) & 0x03;
347 break;
348 case 5: /* vin5 */
349 low_bits = ((data->low_bits[1]) & 0x0c) >> 2;
350 break;
351 case 6: /* vin6 */
352 low_bits = ((data->low_bits[1]) & 0x30) >> 4;
353 default:
354 break;
355 }
356 vol_count = vol_count | low_bits;
357 return vol_count;
358}
359
360/* following are the sysfs callback functions */
361static ssize_t show_in(struct device *dev, struct device_attribute *attr,
362 char *buf)
363{
364 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
365 int nr = sensor_attr->index;
366 struct w83792d_data *data = w83792d_update_device(dev);
367 return sprintf(buf,"%ld\n", IN_FROM_REG(nr,(in_count_from_reg(nr, data))));
368}
369
370#define show_in_reg(reg) \
371static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
372 char *buf) \
373{ \
374 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
375 int nr = sensor_attr->index; \
376 struct w83792d_data *data = w83792d_update_device(dev); \
377 return sprintf(buf,"%ld\n", (long)(IN_FROM_REG(nr, (data->reg[nr])*4))); \
378}
379
380show_in_reg(in_min);
381show_in_reg(in_max);
382
383#define store_in_reg(REG, reg) \
384static ssize_t store_in_##reg (struct device *dev, \
385 struct device_attribute *attr, \
386 const char *buf, size_t count) \
387{ \
388 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
389 int nr = sensor_attr->index; \
390 struct i2c_client *client = to_i2c_client(dev); \
391 struct w83792d_data *data = i2c_get_clientdata(client); \
392 u32 val; \
393 \
394 val = simple_strtoul(buf, NULL, 10); \
395 data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val)/4, 0, 255); \
396 w83792d_write_value(client, W83792D_REG_IN_##REG[nr], data->in_##reg[nr]); \
397 \
398 return count; \
399}
400store_in_reg(MIN, min);
401store_in_reg(MAX, max);
402
403#define sysfs_in_reg(offset) \
404static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \
405 NULL, offset); \
406static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
407 show_in_min, store_in_min, offset); \
408static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
409 show_in_max, store_in_max, offset);
410
411sysfs_in_reg(0);
412sysfs_in_reg(1);
413sysfs_in_reg(2);
414sysfs_in_reg(3);
415sysfs_in_reg(4);
416sysfs_in_reg(5);
417sysfs_in_reg(6);
418sysfs_in_reg(7);
419sysfs_in_reg(8);
420
421#define device_create_file_in(client, offset) \
422do { \
423device_create_file(&client->dev, &sensor_dev_attr_in##offset##_input.dev_attr); \
424device_create_file(&client->dev, &sensor_dev_attr_in##offset##_max.dev_attr); \
425device_create_file(&client->dev, &sensor_dev_attr_in##offset##_min.dev_attr); \
426} while (0)
427
428#define show_fan_reg(reg) \
429static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \
430 char *buf) \
431{ \
432 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
433 int nr = sensor_attr->index - 1; \
434 struct w83792d_data *data = w83792d_update_device(dev); \
435 return sprintf(buf,"%d\n", \
436 FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
437}
438
439show_fan_reg(fan);
440show_fan_reg(fan_min);
441
442static ssize_t
443store_fan_min(struct device *dev, struct device_attribute *attr,
444 const char *buf, size_t count)
445{
446 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
447 int nr = sensor_attr->index - 1;
448 struct i2c_client *client = to_i2c_client(dev);
449 struct w83792d_data *data = i2c_get_clientdata(client);
450 u32 val;
451
452 val = simple_strtoul(buf, NULL, 10);
453 data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
454 w83792d_write_value(client, W83792D_REG_FAN_MIN[nr],
455 data->fan_min[nr]);
456
457 return count;
458}
459
460static ssize_t
461show_fan_div(struct device *dev, struct device_attribute *attr,
462 char *buf)
463{
464 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
465 int nr = sensor_attr->index;
466 struct w83792d_data *data = w83792d_update_device(dev);
467 return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr - 1]));
468}
469
470/* Note: we save and restore the fan minimum here, because its value is
471 determined in part by the fan divisor. This follows the principle of
472 least suprise; the user doesn't expect the fan minimum to change just
473 because the divisor changed. */
474static ssize_t
475store_fan_div(struct device *dev, struct device_attribute *attr,
476 const char *buf, size_t count)
477{
478 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
479 int nr = sensor_attr->index - 1;
480 struct i2c_client *client = to_i2c_client(dev);
481 struct w83792d_data *data = i2c_get_clientdata(client);
482 unsigned long min;
483 /*u8 reg;*/
484 u8 fan_div_reg = 0;
485 u8 tmp_fan_div;
486
487 /* Save fan_min */
488 min = FAN_FROM_REG(data->fan_min[nr],
489 DIV_FROM_REG(data->fan_div[nr]));
490
491 data->fan_div[nr] = DIV_TO_REG(simple_strtoul(buf, NULL, 10));
492
493 fan_div_reg = w83792d_read_value(client, W83792D_REG_FAN_DIV[nr >> 1]);
494 fan_div_reg &= (nr & 0x01) ? 0x8f : 0xf8;
495 tmp_fan_div = (nr & 0x01) ? (((data->fan_div[nr]) << 4) & 0x70)
496 : ((data->fan_div[nr]) & 0x07);
497 w83792d_write_value(client, W83792D_REG_FAN_DIV[nr >> 1],
498 fan_div_reg | tmp_fan_div);
499
500 /* Restore fan_min */
501 data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
502 w83792d_write_value(client, W83792D_REG_FAN_MIN[nr], data->fan_min[nr]);
503
504 return count;
505}
506
507#define sysfs_fan(offset) \
508static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \
509 offset); \
510static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
511 show_fan_div, store_fan_div, offset); \
512static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
513 show_fan_min, store_fan_min, offset);
514
515sysfs_fan(1);
516sysfs_fan(2);
517sysfs_fan(3);
518sysfs_fan(4);
519sysfs_fan(5);
520sysfs_fan(6);
521sysfs_fan(7);
522
523#define device_create_file_fan(client, offset) \
524do { \
525device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_input.dev_attr); \
526device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_div.dev_attr); \
527device_create_file(&client->dev, &sensor_dev_attr_fan##offset##_min.dev_attr); \
528} while (0)
529
530
531/* read/write the temperature1, includes measured value and limits */
532
533static ssize_t show_temp1(struct device *dev, struct device_attribute *attr,
534 char *buf)
535{
536 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
537 int nr = sensor_attr->index;
538 struct w83792d_data *data = w83792d_update_device(dev);
539 return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[nr]));
540}
541
542static ssize_t store_temp1(struct device *dev, struct device_attribute *attr,
543 const char *buf, size_t count)
544{
545 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
546 int nr = sensor_attr->index;
547 struct i2c_client *client = to_i2c_client(dev);
548 struct w83792d_data *data = i2c_get_clientdata(client);
549 s32 val;
550
551 val = simple_strtol(buf, NULL, 10);
552
553 data->temp1[nr] = TEMP1_TO_REG(val);
554 w83792d_write_value(client, W83792D_REG_TEMP1[nr],
555 data->temp1[nr]);
556
557 return count;
558}
559
560
561static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0);
562static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1,
563 store_temp1, 1);
564static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1,
565 store_temp1, 2);
566
567#define device_create_file_temp1(client) \
568do { \
569device_create_file(&client->dev, &sensor_dev_attr_temp1_input.dev_attr); \
570device_create_file(&client->dev, &sensor_dev_attr_temp1_max.dev_attr); \
571device_create_file(&client->dev, &sensor_dev_attr_temp1_max_hyst.dev_attr); \
572} while (0)
573
574
575/* read/write the temperature2-3, includes measured value and limits */
576
577static ssize_t show_temp23(struct device *dev, struct device_attribute *attr,
578 char *buf)
579{
580 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
581 int nr = sensor_attr->nr;
582 int index = sensor_attr->index;
583 struct w83792d_data *data = w83792d_update_device(dev);
584 return sprintf(buf,"%ld\n",
585 (long)TEMP_ADD_FROM_REG(data->temp_add[nr][index],
586 data->temp_add[nr][index+1]));
587}
588
589static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
590 const char *buf, size_t count)
591{
592 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
593 int nr = sensor_attr->nr;
594 int index = sensor_attr->index;
595 struct i2c_client *client = to_i2c_client(dev);
596 struct w83792d_data *data = i2c_get_clientdata(client);
597 s32 val;
598
599 val = simple_strtol(buf, NULL, 10);
600
601 data->temp_add[nr][index] = TEMP_ADD_TO_REG_HIGH(val);
602 data->temp_add[nr][index+1] = TEMP_ADD_TO_REG_LOW(val);
603 w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index],
604 data->temp_add[nr][index]);
605 w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index+1],
606 data->temp_add[nr][index+1]);
607
608 return count;
609}
610
611#define sysfs_temp23(name,idx) \
612static SENSOR_DEVICE_ATTR_2(name##_input, S_IRUGO, show_temp23, NULL, \
613 idx, 0); \
614static SENSOR_DEVICE_ATTR_2(name##_max, S_IRUGO | S_IWUSR, \
615 show_temp23, store_temp23, idx, 2); \
616static SENSOR_DEVICE_ATTR_2(name##_max_hyst, S_IRUGO | S_IWUSR, \
617 show_temp23, store_temp23, idx, 4);
618
619sysfs_temp23(temp2,0)
620sysfs_temp23(temp3,1)
621
622#define device_create_file_temp_add(client, offset) \
623do { \
624device_create_file(&client->dev, &sensor_dev_attr_temp##offset##_input.dev_attr); \
625device_create_file(&client->dev, &sensor_dev_attr_temp##offset##_max.dev_attr); \
626device_create_file(&client->dev, \
627&sensor_dev_attr_temp##offset##_max_hyst.dev_attr); \
628} while (0)
629
630
631/* get reatime status of all sensors items: voltage, temp, fan */
632static ssize_t
633show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
634{
635 struct w83792d_data *data = w83792d_update_device(dev);
636 return sprintf(buf, "%d\n", data->alarms);
637}
638
639static
640DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
641#define device_create_file_alarms(client) \
642device_create_file(&client->dev, &dev_attr_alarms);
643
644
645
646static ssize_t
647show_pwm(struct device *dev, struct device_attribute *attr,
648 char *buf)
649{
650 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
651 int nr = sensor_attr->index;
652 struct w83792d_data *data = w83792d_update_device(dev);
653 return sprintf(buf, "%ld\n", (long) PWM_FROM_REG(data->pwm[nr-1]));
654}
655
656static ssize_t
657show_pwmenable(struct device *dev, struct device_attribute *attr,
658 char *buf)
659{
660 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
661 int nr = sensor_attr->index - 1;
662 struct w83792d_data *data = w83792d_update_device(dev);
663 long pwm_enable_tmp = 1;
664
665 switch (data->pwmenable[nr]) {
666 case 0:
667 pwm_enable_tmp = 1; /* manual mode */
668 break;
669 case 1:
670 pwm_enable_tmp = 3; /*thermal cruise/Smart Fan I */
671 break;
672 case 2:
673 pwm_enable_tmp = 2; /* Smart Fan II */
674 break;
675 }
676
677 return sprintf(buf, "%ld\n", pwm_enable_tmp);
678}
679
680static ssize_t
681store_pwm(struct device *dev, struct device_attribute *attr,
682 const char *buf, size_t count)
683{
684 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
685 int nr = sensor_attr->index - 1;
686 struct i2c_client *client = to_i2c_client(dev);
687 struct w83792d_data *data = i2c_get_clientdata(client);
688 u32 val;
689
690 val = simple_strtoul(buf, NULL, 10);
691 data->pwm[nr] = PWM_TO_REG(val);
692 w83792d_write_value(client, W83792D_REG_PWM[nr], data->pwm[nr]);
693
694 return count;
695}
696
697static ssize_t
698store_pwmenable(struct device *dev, struct device_attribute *attr,
699 const char *buf, size_t count)
700{
701 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
702 int nr = sensor_attr->index - 1;
703 struct i2c_client *client = to_i2c_client(dev);
704 struct w83792d_data *data = i2c_get_clientdata(client);
705 u32 val;
706 u8 fan_cfg_tmp, cfg1_tmp, cfg2_tmp, cfg3_tmp, cfg4_tmp;
707
708 val = simple_strtoul(buf, NULL, 10);
709 switch (val) {
710 case 1:
711 data->pwmenable[nr] = 0; /* manual mode */
712 break;
713 case 2:
714 data->pwmenable[nr] = 2; /* Smart Fan II */
715 break;
716 case 3:
717 data->pwmenable[nr] = 1; /* thermal cruise/Smart Fan I */
718 break;
719 default:
720 return -EINVAL;
721 }
722 cfg1_tmp = data->pwmenable[0];
723 cfg2_tmp = (data->pwmenable[1]) << 2;
724 cfg3_tmp = (data->pwmenable[2]) << 4;
725 cfg4_tmp = w83792d_read_value(client,W83792D_REG_FAN_CFG) & 0xc0;
726 fan_cfg_tmp = ((cfg4_tmp | cfg3_tmp) | cfg2_tmp) | cfg1_tmp;
727 w83792d_write_value(client, W83792D_REG_FAN_CFG, fan_cfg_tmp);
728
729 return count;
730}
731
732#define sysfs_pwm(offset) \
733static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
734 show_pwm, store_pwm, offset); \
735static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
736 show_pwmenable, store_pwmenable, offset); \
737
738sysfs_pwm(1);
739sysfs_pwm(2);
740sysfs_pwm(3);
741
742
743#define device_create_file_pwm(client, offset) \
744do { \
745device_create_file(&client->dev, &sensor_dev_attr_pwm##offset.dev_attr); \
746} while (0)
747
748#define device_create_file_pwmenable(client, offset) \
749do { \
750device_create_file(&client->dev, &sensor_dev_attr_pwm##offset##_enable.dev_attr); \
751} while (0)
752
753
754static ssize_t
755show_pwm_mode(struct device *dev, struct device_attribute *attr,
756 char *buf)
757{
758 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
759 int nr = sensor_attr->index;
760 struct w83792d_data *data = w83792d_update_device(dev);
761 return sprintf(buf, "%d\n", data->pwm_mode[nr-1]);
762}
763
764static ssize_t
765store_pwm_mode(struct device *dev, struct device_attribute *attr,
766 const char *buf, size_t count)
767{
768 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
769 int nr = sensor_attr->index - 1;
770 struct i2c_client *client = to_i2c_client(dev);
771 struct w83792d_data *data = i2c_get_clientdata(client);
772 u32 val;
773 u8 pwm_mode_mask = 0;
774
775 val = simple_strtoul(buf, NULL, 10);
776 data->pwm_mode[nr] = SENSORS_LIMIT(val, 0, 1);
777 pwm_mode_mask = w83792d_read_value(client,
778 W83792D_REG_PWM[nr]) & 0x7f;
779 w83792d_write_value(client, W83792D_REG_PWM[nr],
780 ((data->pwm_mode[nr]) << 7) | pwm_mode_mask);
781
782 return count;
783}
784
785#define sysfs_pwm_mode(offset) \
786static SENSOR_DEVICE_ATTR(pwm##offset##_mode, S_IRUGO | S_IWUSR, \
787 show_pwm_mode, store_pwm_mode, offset);
788
789sysfs_pwm_mode(1);
790sysfs_pwm_mode(2);
791sysfs_pwm_mode(3);
792
793#define device_create_file_pwm_mode(client, offset) \
794do { \
795device_create_file(&client->dev, &sensor_dev_attr_pwm##offset##_mode.dev_attr); \
796} while (0)
797
798
799static ssize_t
800show_regs_chassis(struct device *dev, struct device_attribute *attr,
801 char *buf)
802{
803 struct w83792d_data *data = w83792d_update_device(dev);
804 return sprintf(buf, "%d\n", data->chassis);
805}
806
807static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
808
809#define device_create_file_chassis(client) \
810do { \
811device_create_file(&client->dev, &dev_attr_chassis); \
812} while (0)
813
814
815static ssize_t
816show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
817{
818 struct w83792d_data *data = w83792d_update_device(dev);
819 return sprintf(buf, "%d\n", data->chassis_clear);
820}
821
822static ssize_t
823store_chassis_clear(struct device *dev, struct device_attribute *attr,
824 const char *buf, size_t count)
825{
826 struct i2c_client *client = to_i2c_client(dev);
827 struct w83792d_data *data = i2c_get_clientdata(client);
828 u32 val;
829 u8 temp1 = 0, temp2 = 0;
830
831 val = simple_strtoul(buf, NULL, 10);
832
833 data->chassis_clear = SENSORS_LIMIT(val, 0 ,1);
834 temp1 = ((data->chassis_clear) << 7) & 0x80;
835 temp2 = w83792d_read_value(client,
836 W83792D_REG_CHASSIS_CLR) & 0x7f;
837 w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, temp1 | temp2);
838
839 return count;
840}
841
842static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
843 show_chassis_clear, store_chassis_clear);
844
845#define device_create_file_chassis_clear(client) \
846do { \
847device_create_file(&client->dev, &dev_attr_chassis_clear); \
848} while (0)
849
850
851
852/* For Smart Fan I / Thermal Cruise */
853static ssize_t
854show_thermal_cruise(struct device *dev, struct device_attribute *attr,
855 char *buf)
856{
857 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
858 int nr = sensor_attr->index;
859 struct w83792d_data *data = w83792d_update_device(dev);
860 return sprintf(buf, "%ld\n", (long)data->thermal_cruise[nr-1]);
861}
862
863static ssize_t
864store_thermal_cruise(struct device *dev, struct device_attribute *attr,
865 const char *buf, size_t count)
866{
867 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
868 int nr = sensor_attr->index - 1;
869 struct i2c_client *client = to_i2c_client(dev);
870 struct w83792d_data *data = i2c_get_clientdata(client);
871 u32 val;
872 u8 target_tmp=0, target_mask=0;
873
874 val = simple_strtoul(buf, NULL, 10);
875 target_tmp = val;
876 target_tmp = target_tmp & 0x7f;
877 target_mask = w83792d_read_value(client, W83792D_REG_THERMAL[nr]) & 0x80;
878 data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255);
879 w83792d_write_value(client, W83792D_REG_THERMAL[nr],
880 (data->thermal_cruise[nr]) | target_mask);
881
882 return count;
883}
884
885#define sysfs_thermal_cruise(offset) \
886static SENSOR_DEVICE_ATTR(thermal_cruise##offset, S_IRUGO | S_IWUSR, \
887 show_thermal_cruise, store_thermal_cruise, offset);
888
889sysfs_thermal_cruise(1);
890sysfs_thermal_cruise(2);
891sysfs_thermal_cruise(3);
892
893#define device_create_file_thermal_cruise(client, offset) \
894do { \
895device_create_file(&client->dev, \
896&sensor_dev_attr_thermal_cruise##offset.dev_attr); \
897} while (0)
898
899
900/* For Smart Fan I/Thermal Cruise and Smart Fan II */
901static ssize_t
902show_tolerance(struct device *dev, struct device_attribute *attr,
903 char *buf)
904{
905 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
906 int nr = sensor_attr->index;
907 struct w83792d_data *data = w83792d_update_device(dev);
908 return sprintf(buf, "%ld\n", (long)data->tolerance[nr-1]);
909}
910
911static ssize_t
912store_tolerance(struct device *dev, struct device_attribute *attr,
913 const char *buf, size_t count)
914{
915 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
916 int nr = sensor_attr->index - 1;
917 struct i2c_client *client = to_i2c_client(dev);
918 struct w83792d_data *data = i2c_get_clientdata(client);
919 u32 val;
920 u8 tol_tmp, tol_mask;
921
922 val = simple_strtoul(buf, NULL, 10);
923 tol_mask = w83792d_read_value(client,
924 W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
925 tol_tmp = SENSORS_LIMIT(val, 0, 15);
926 tol_tmp &= 0x0f;
927 data->tolerance[nr] = tol_tmp;
928 if (nr == 1) {
929 tol_tmp <<= 4;
930 }
931 w83792d_write_value(client, W83792D_REG_TOLERANCE[nr],
932 tol_mask | tol_tmp);
933
934 return count;
935}
936
937#define sysfs_tolerance(offset) \
938static SENSOR_DEVICE_ATTR(tolerance##offset, S_IRUGO | S_IWUSR, \
939 show_tolerance, store_tolerance, offset);
940
941sysfs_tolerance(1);
942sysfs_tolerance(2);
943sysfs_tolerance(3);
944
945#define device_create_file_tolerance(client, offset) \
946do { \
947device_create_file(&client->dev, &sensor_dev_attr_tolerance##offset.dev_attr); \
948} while (0)
949
950
951/* For Smart Fan II */
952static ssize_t
953show_sf2_point(struct device *dev, struct device_attribute *attr,
954 char *buf)
955{
956 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
957 int nr = sensor_attr->nr;
958 int index = sensor_attr->index;
959 struct w83792d_data *data = w83792d_update_device(dev);
960 return sprintf(buf, "%ld\n", (long)data->sf2_points[index-1][nr-1]);
961}
962
963static ssize_t
964store_sf2_point(struct device *dev, struct device_attribute *attr,
965 const char *buf, size_t count)
966{
967 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
968 int nr = sensor_attr->nr - 1;
969 int index = sensor_attr->index - 1;
970 struct i2c_client *client = to_i2c_client(dev);
971 struct w83792d_data *data = i2c_get_clientdata(client);
972 u32 val;
973 u8 mask_tmp = 0;
974
975 val = simple_strtoul(buf, NULL, 10);
976 data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127);
977 mask_tmp = w83792d_read_value(client,
978 W83792D_REG_POINTS[index][nr]) & 0x80;
979 w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
980 mask_tmp|data->sf2_points[index][nr]);
981
982 return count;
983}
984
985#define sysfs_sf2_point(offset, index) \
986static SENSOR_DEVICE_ATTR_2(sf2_point##offset##_fan##index, S_IRUGO | S_IWUSR, \
987 show_sf2_point, store_sf2_point, offset, index);
988
989sysfs_sf2_point(1, 1); /* Fan1 */
990sysfs_sf2_point(2, 1); /* Fan1 */
991sysfs_sf2_point(3, 1); /* Fan1 */
992sysfs_sf2_point(4, 1); /* Fan1 */
993sysfs_sf2_point(1, 2); /* Fan2 */
994sysfs_sf2_point(2, 2); /* Fan2 */
995sysfs_sf2_point(3, 2); /* Fan2 */
996sysfs_sf2_point(4, 2); /* Fan2 */
997sysfs_sf2_point(1, 3); /* Fan3 */
998sysfs_sf2_point(2, 3); /* Fan3 */
999sysfs_sf2_point(3, 3); /* Fan3 */
1000sysfs_sf2_point(4, 3); /* Fan3 */
1001
1002#define device_create_file_sf2_point(client, offset, index) \
1003do { \
1004device_create_file(&client->dev, \
1005&sensor_dev_attr_sf2_point##offset##_fan##index.dev_attr); \
1006} while (0)
1007
1008
1009static ssize_t
1010show_sf2_level(struct device *dev, struct device_attribute *attr,
1011 char *buf)
1012{
1013 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
1014 int nr = sensor_attr->nr;
1015 int index = sensor_attr->index;
1016 struct w83792d_data *data = w83792d_update_device(dev);
1017 return sprintf(buf, "%d\n",
1018 (((data->sf2_levels[index-1][nr]) * 100) / 15));
1019}
1020
1021static ssize_t
1022store_sf2_level(struct device *dev, struct device_attribute *attr,
1023 const char *buf, size_t count)
1024{
1025 struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr);
1026 int nr = sensor_attr->nr;
1027 int index = sensor_attr->index - 1;
1028 struct i2c_client *client = to_i2c_client(dev);
1029 struct w83792d_data *data = i2c_get_clientdata(client);
1030 u32 val;
1031 u8 mask_tmp=0, level_tmp=0;
1032
1033 val = simple_strtoul(buf, NULL, 10);
1034 data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15);
1035 mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
1036 & ((nr==3) ? 0xf0 : 0x0f);
1037 if (nr==3) {
1038 level_tmp = data->sf2_levels[index][nr];
1039 } else {
1040 level_tmp = data->sf2_levels[index][nr] << 4;
1041 }
1042 w83792d_write_value(client, W83792D_REG_LEVELS[index][nr], level_tmp | mask_tmp);
1043
1044 return count;
1045}
1046
1047#define sysfs_sf2_level(offset, index) \
1048static SENSOR_DEVICE_ATTR_2(sf2_level##offset##_fan##index, S_IRUGO | S_IWUSR, \
1049 show_sf2_level, store_sf2_level, offset, index);
1050
1051sysfs_sf2_level(1, 1); /* Fan1 */
1052sysfs_sf2_level(2, 1); /* Fan1 */
1053sysfs_sf2_level(3, 1); /* Fan1 */
1054sysfs_sf2_level(1, 2); /* Fan2 */
1055sysfs_sf2_level(2, 2); /* Fan2 */
1056sysfs_sf2_level(3, 2); /* Fan2 */
1057sysfs_sf2_level(1, 3); /* Fan3 */
1058sysfs_sf2_level(2, 3); /* Fan3 */
1059sysfs_sf2_level(3, 3); /* Fan3 */
1060
1061#define device_create_file_sf2_level(client, offset, index) \
1062do { \
1063device_create_file(&client->dev, \
1064&sensor_dev_attr_sf2_level##offset##_fan##index.dev_attr); \
1065} while (0)
1066
1067
1068/* This function is called when:
1069 * w83792d_driver is inserted (when this module is loaded), for each
1070 available adapter
1071 * when a new adapter is inserted (and w83792d_driver is still present) */
1072static int
1073w83792d_attach_adapter(struct i2c_adapter *adapter)
1074{
1075 if (!(adapter->class & I2C_CLASS_HWMON))
1076 return 0;
1077 return i2c_probe(adapter, &addr_data, w83792d_detect);
1078}
1079
1080
1081static int
1082w83792d_create_subclient(struct i2c_adapter *adapter,
1083 struct i2c_client *new_client, int addr,
1084 struct i2c_client **sub_cli)
1085{
1086 int err;
1087 struct i2c_client *sub_client;
1088
1089 (*sub_cli) = sub_client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
1090 if (!(sub_client)) {
1091 return -ENOMEM;
1092 }
1093 memset(sub_client, 0x00, sizeof(struct i2c_client));
1094 sub_client->addr = 0x48 + addr;
1095 i2c_set_clientdata(sub_client, NULL);
1096 sub_client->adapter = adapter;
1097 sub_client->driver = &w83792d_driver;
1098 sub_client->flags = 0;
1099 strlcpy(sub_client->name, "w83792d subclient", I2C_NAME_SIZE);
1100 if ((err = i2c_attach_client(sub_client))) {
1101 dev_err(&new_client->dev, "subclient registration "
1102 "at address 0x%x failed\n", sub_client->addr);
1103 kfree(sub_client);
1104 return err;
1105 }
1106 return 0;
1107}
1108
1109
1110static int
1111w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind,
1112 struct i2c_client *new_client)
1113{
1114 int i, id, err;
1115 u8 val;
1116 struct w83792d_data *data = i2c_get_clientdata(new_client);
1117
1118 id = i2c_adapter_id(adapter);
1119 if (force_subclients[0] == id && force_subclients[1] == address) {
1120 for (i = 2; i <= 3; i++) {
1121 if (force_subclients[i] < 0x48 ||
1122 force_subclients[i] > 0x4f) {
1123 dev_err(&new_client->dev, "invalid subclient "
1124 "address %d; must be 0x48-0x4f\n",
1125 force_subclients[i]);
1126 err = -ENODEV;
1127 goto ERROR_SC_0;
1128 }
1129 }
1130 w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR,
1131 (force_subclients[2] & 0x07) |
1132 ((force_subclients[3] & 0x07) << 4));
1133 }
1134
1135 val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
1136 if (!(val & 0x08)) {
1137 err = w83792d_create_subclient(adapter, new_client, val & 0x7,
1138 &data->lm75[0]);
1139 if (err < 0)
1140 goto ERROR_SC_0;
1141 }
1142 if (!(val & 0x80)) {
1143 if ((data->lm75[0] != NULL) &&
1144 ((val & 0x7) == ((val >> 4) & 0x7))) {
1145 dev_err(&new_client->dev, "duplicate addresses 0x%x, "
1146 "use force_subclient\n", data->lm75[0]->addr);
1147 err = -ENODEV;
1148 goto ERROR_SC_1;
1149 }
1150 err = w83792d_create_subclient(adapter, new_client,
1151 (val >> 4) & 0x7, &data->lm75[1]);
1152 if (err < 0)
1153 goto ERROR_SC_1;
1154 }
1155
1156 return 0;
1157
1158/* Undo inits in case of errors */
1159
1160ERROR_SC_1:
1161 if (data->lm75[0] != NULL) {
1162 i2c_detach_client(data->lm75[0]);
1163 kfree(data->lm75[0]);
1164 }
1165ERROR_SC_0:
1166 return err;
1167}
1168
1169
1170static int
1171w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
1172{
1173 int i = 0, val1 = 0, val2;
1174 struct i2c_client *new_client;
1175 struct w83792d_data *data;
1176 int err = 0;
1177 const char *client_name = "";
1178
1179 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1180 goto ERROR0;
1181 }
1182
1183 /* OK. For now, we presume we have a valid client. We now create the
1184 client structure, even though we cannot fill it completely yet.
1185 But it allows us to access w83792d_{read,write}_value. */
1186
1187 if (!(data = kmalloc(sizeof(struct w83792d_data), GFP_KERNEL))) {
1188 err = -ENOMEM;
1189 goto ERROR0;
1190 }
1191 memset(data, 0, sizeof(struct w83792d_data));
1192
1193 new_client = &data->client;
1194 i2c_set_clientdata(new_client, data);
1195 new_client->addr = address;
1196 init_MUTEX(&data->lock);
1197 new_client->adapter = adapter;
1198 new_client->driver = &w83792d_driver;
1199 new_client->flags = 0;
1200
1201 /* Now, we do the remaining detection. */
1202
1203 /* The w83792d may be stuck in some other bank than bank 0. This may
1204 make reading other information impossible. Specify a force=... or
1205 force_*=... parameter, and the Winbond will be reset to the right
1206 bank. */
1207 if (kind < 0) {
1208 if (w83792d_read_value(new_client, W83792D_REG_CONFIG) & 0x80) {
1209 dev_warn(&new_client->dev, "Detection failed at step "
1210 "3\n");
1211 goto ERROR1;
1212 }
1213 val1 = w83792d_read_value(new_client, W83792D_REG_BANK);
1214 val2 = w83792d_read_value(new_client, W83792D_REG_CHIPMAN);
1215 /* Check for Winbond ID if in bank 0 */
1216 if (!(val1 & 0x07)) { /* is Bank0 */
1217 if (((!(val1 & 0x80)) && (val2 != 0xa3)) ||
1218 ((val1 & 0x80) && (val2 != 0x5c))) {
1219 goto ERROR1;
1220 }
1221 }
1222 /* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR
1223 should match */
1224 if (w83792d_read_value(new_client,
1225 W83792D_REG_I2C_ADDR) != address) {
1226 dev_warn(&new_client->dev, "Detection failed "
1227 "at step 5\n");
1228 goto ERROR1;
1229 }
1230 }
1231
1232 /* We have either had a force parameter, or we have already detected the
1233 Winbond. Put it now into bank 0 and Vendor ID High Byte */
1234 w83792d_write_value(new_client,
1235 W83792D_REG_BANK,
1236 (w83792d_read_value(new_client,
1237 W83792D_REG_BANK) & 0x78) | 0x80);
1238
1239 /* Determine the chip type. */
1240 if (kind <= 0) {
1241 /* get vendor ID */
1242 val2 = w83792d_read_value(new_client, W83792D_REG_CHIPMAN);
1243 if (val2 != 0x5c) { /* the vendor is NOT Winbond */
1244 goto ERROR1;
1245 }
1246 val1 = w83792d_read_value(new_client, W83792D_REG_WCHIPID);
1247 if (val1 == 0x7a && address >= 0x2c) {
1248 kind = w83792d;
1249 } else {
1250 if (kind == 0)
1251 dev_warn(&new_client->dev,
1252 "w83792d: Ignoring 'force' parameter for"
1253 " unknown chip at adapter %d, address"
1254 " 0x%02x\n", i2c_adapter_id(adapter),
1255 address);
1256 goto ERROR1;
1257 }
1258 }
1259
1260 if (kind == w83792d) {
1261 client_name = "w83792d";
1262 } else {
1263 dev_err(&new_client->dev, "w83792d: Internal error: unknown"
1264 " kind (%d)?!?", kind);
1265 goto ERROR1;
1266 }
1267
1268 /* Fill in the remaining client fields and put into the global list */
1269 strlcpy(new_client->name, client_name, I2C_NAME_SIZE);
1270 data->type = kind;
1271
1272 data->valid = 0;
1273 init_MUTEX(&data->update_lock);
1274
1275 /* Tell the I2C layer a new client has arrived */
1276 if ((err = i2c_attach_client(new_client)))
1277 goto ERROR1;
1278
1279 if ((err = w83792d_detect_subclients(adapter, address,
1280 kind, new_client)))
1281 goto ERROR2;
1282
1283 /* Initialize the chip */
1284 w83792d_init_client(new_client);
1285
1286 /* A few vars need to be filled upon startup */
1287 for (i = 1; i <= 7; i++) {
1288 data->fan_min[i - 1] = w83792d_read_value(new_client,
1289 W83792D_REG_FAN_MIN[i]);
1290 }
1291
1292 /* Register sysfs hooks */
1293 data->class_dev = hwmon_device_register(&new_client->dev);
1294 if (IS_ERR(data->class_dev)) {
1295 err = PTR_ERR(data->class_dev);
1296 goto ERROR3;
1297 }
1298 device_create_file_in(new_client, 0);
1299 device_create_file_in(new_client, 1);
1300 device_create_file_in(new_client, 2);
1301 device_create_file_in(new_client, 3);
1302 device_create_file_in(new_client, 4);
1303 device_create_file_in(new_client, 5);
1304 device_create_file_in(new_client, 6);
1305 device_create_file_in(new_client, 7);
1306 device_create_file_in(new_client, 8);
1307
1308 device_create_file_fan(new_client, 1);
1309 device_create_file_fan(new_client, 2);
1310 device_create_file_fan(new_client, 3);
1311 device_create_file_fan(new_client, 4);
1312 device_create_file_fan(new_client, 5);
1313 device_create_file_fan(new_client, 6);
1314 device_create_file_fan(new_client, 7);
1315
1316 device_create_file_temp1(new_client); /* Temp1 */
1317 device_create_file_temp_add(new_client, 2); /* Temp2 */
1318 device_create_file_temp_add(new_client, 3); /* Temp3 */
1319
1320 device_create_file_alarms(new_client);
1321
1322 device_create_file_pwm(new_client, 1);
1323 device_create_file_pwm(new_client, 2);
1324 device_create_file_pwm(new_client, 3);
1325
1326 device_create_file_pwmenable(new_client, 1);
1327 device_create_file_pwmenable(new_client, 2);
1328 device_create_file_pwmenable(new_client, 3);
1329
1330 device_create_file_pwm_mode(new_client, 1);
1331 device_create_file_pwm_mode(new_client, 2);
1332 device_create_file_pwm_mode(new_client, 3);
1333
1334 device_create_file_chassis(new_client);
1335 device_create_file_chassis_clear(new_client);
1336
1337 device_create_file_thermal_cruise(new_client, 1);
1338 device_create_file_thermal_cruise(new_client, 2);
1339 device_create_file_thermal_cruise(new_client, 3);
1340
1341 device_create_file_tolerance(new_client, 1);
1342 device_create_file_tolerance(new_client, 2);
1343 device_create_file_tolerance(new_client, 3);
1344
1345 device_create_file_sf2_point(new_client, 1, 1); /* Fan1 */
1346 device_create_file_sf2_point(new_client, 2, 1); /* Fan1 */
1347 device_create_file_sf2_point(new_client, 3, 1); /* Fan1 */
1348 device_create_file_sf2_point(new_client, 4, 1); /* Fan1 */
1349 device_create_file_sf2_point(new_client, 1, 2); /* Fan2 */
1350 device_create_file_sf2_point(new_client, 2, 2); /* Fan2 */
1351 device_create_file_sf2_point(new_client, 3, 2); /* Fan2 */
1352 device_create_file_sf2_point(new_client, 4, 2); /* Fan2 */
1353 device_create_file_sf2_point(new_client, 1, 3); /* Fan3 */
1354 device_create_file_sf2_point(new_client, 2, 3); /* Fan3 */
1355 device_create_file_sf2_point(new_client, 3, 3); /* Fan3 */
1356 device_create_file_sf2_point(new_client, 4, 3); /* Fan3 */
1357
1358 device_create_file_sf2_level(new_client, 1, 1); /* Fan1 */
1359 device_create_file_sf2_level(new_client, 2, 1); /* Fan1 */
1360 device_create_file_sf2_level(new_client, 3, 1); /* Fan1 */
1361 device_create_file_sf2_level(new_client, 1, 2); /* Fan2 */
1362 device_create_file_sf2_level(new_client, 2, 2); /* Fan2 */
1363 device_create_file_sf2_level(new_client, 3, 2); /* Fan2 */
1364 device_create_file_sf2_level(new_client, 1, 3); /* Fan3 */
1365 device_create_file_sf2_level(new_client, 2, 3); /* Fan3 */
1366 device_create_file_sf2_level(new_client, 3, 3); /* Fan3 */
1367
1368 return 0;
1369
1370ERROR3:
1371 if (data->lm75[0] != NULL) {
1372 i2c_detach_client(data->lm75[0]);
1373 kfree(data->lm75[0]);
1374 }
1375 if (data->lm75[1] != NULL) {
1376 i2c_detach_client(data->lm75[1]);
1377 kfree(data->lm75[1]);
1378 }
1379ERROR2:
1380 i2c_detach_client(new_client);
1381ERROR1:
1382 kfree(data);
1383ERROR0:
1384 return err;
1385}
1386
1387static int
1388w83792d_detach_client(struct i2c_client *client)
1389{
1390 struct w83792d_data *data = i2c_get_clientdata(client);
1391 int err;
1392
1393 /* main client */
1394 if (data)
1395 hwmon_device_unregister(data->class_dev);
1396
1397 if ((err = i2c_detach_client(client)))
1398 return err;
1399
1400 /* main client */
1401 if (data)
1402 kfree(data);
1403 /* subclient */
1404 else
1405 kfree(client);
1406
1407 return 0;
1408}
1409
1410/* The SMBus locks itself, usually, but nothing may access the Winbond between
1411 bank switches. ISA access must always be locked explicitly!
1412 We ignore the W83792D BUSY flag at this moment - it could lead to deadlocks,
1413 would slow down the W83792D access and should not be necessary.
1414 There are some ugly typecasts here, but the good news is - they should
1415 nowhere else be necessary! */
1416static int
1417w83792d_read_value(struct i2c_client *client, u8 reg)
1418{
1419 int res=0;
1420 res = i2c_smbus_read_byte_data(client, reg);
1421
1422 return res;
1423}
1424
1425static int
1426w83792d_write_value(struct i2c_client *client, u8 reg, u8 value)
1427{
1428 i2c_smbus_write_byte_data(client, reg, value);
1429 return 0;
1430}
1431
1432/* Called when we have found a new W83792D. It should set limits, etc. */
1433static void
1434w83792d_init_client(struct i2c_client *client)
1435{
1436 u8 temp2_cfg, temp3_cfg, vid_in_b;
1437
1438 if (init) {
1439 w83792d_write_value(client, W83792D_REG_CONFIG, 0x80);
1440 }
1441 /* Clear the bit6 of W83792D_REG_VID_IN_B(set it into 0):
1442 W83792D_REG_VID_IN_B bit6 = 0: the high/low limit of
1443 vin0/vin1 can be modified by user;
1444 W83792D_REG_VID_IN_B bit6 = 1: the high/low limit of
1445 vin0/vin1 auto-updated, can NOT be modified by user. */
1446 vid_in_b = w83792d_read_value(client, W83792D_REG_VID_IN_B);
1447 w83792d_write_value(client, W83792D_REG_VID_IN_B,
1448 vid_in_b & 0xbf);
1449
1450 temp2_cfg = w83792d_read_value(client, W83792D_REG_TEMP2_CONFIG);
1451 temp3_cfg = w83792d_read_value(client, W83792D_REG_TEMP3_CONFIG);
1452 w83792d_write_value(client, W83792D_REG_TEMP2_CONFIG,
1453 temp2_cfg & 0xe6);
1454 w83792d_write_value(client, W83792D_REG_TEMP3_CONFIG,
1455 temp3_cfg & 0xe6);
1456
1457 /* Start monitoring */
1458 w83792d_write_value(client, W83792D_REG_CONFIG,
1459 (w83792d_read_value(client,
1460 W83792D_REG_CONFIG) & 0xf7)
1461 | 0x01);
1462}
1463
1464static struct w83792d_data *w83792d_update_device(struct device *dev)
1465{
1466 struct i2c_client *client = to_i2c_client(dev);
1467 struct w83792d_data *data = i2c_get_clientdata(client);
1468 int i, j;
1469 u8 reg_array_tmp[4], pwm_array_tmp[7], reg_tmp;
1470
1471 down(&data->update_lock);
1472
1473 if (time_after
1474 (jiffies - data->last_updated, (unsigned long) (HZ * 3))
1475 || time_before(jiffies, data->last_updated) || !data->valid) {
1476 dev_dbg(dev, "Starting device update\n");
1477
1478 /* Update the voltages measured value and limits */
1479 for (i = 0; i < 9; i++) {
1480 data->in[i] = w83792d_read_value(client,
1481 W83792D_REG_IN[i]);
1482 data->in_max[i] = w83792d_read_value(client,
1483 W83792D_REG_IN_MAX[i]);
1484 data->in_min[i] = w83792d_read_value(client,
1485 W83792D_REG_IN_MIN[i]);
1486 }
1487 data->low_bits[0] = w83792d_read_value(client,
1488 W83792D_REG_LOW_BITS1);
1489 data->low_bits[1] = w83792d_read_value(client,
1490 W83792D_REG_LOW_BITS2);
1491 for (i = 0; i < 7; i++) {
1492 /* Update the Fan measured value and limits */
1493 data->fan[i] = w83792d_read_value(client,
1494 W83792D_REG_FAN[i]);
1495 data->fan_min[i] = w83792d_read_value(client,
1496 W83792D_REG_FAN_MIN[i]);
1497 /* Update the PWM/DC Value and PWM/DC flag */
1498 pwm_array_tmp[i] = w83792d_read_value(client,
1499 W83792D_REG_PWM[i]);
1500 data->pwm[i] = pwm_array_tmp[i] & 0x0f;
1501 data->pwm_mode[i] = (pwm_array_tmp[i] >> 7) & 0x01;
1502 }
1503
1504 reg_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG);
1505 data->pwmenable[0] = reg_tmp & 0x03;
1506 data->pwmenable[1] = (reg_tmp>>2) & 0x03;
1507 data->pwmenable[2] = (reg_tmp>>4) & 0x03;
1508
1509 for (i = 0; i < 3; i++) {
1510 data->temp1[i] = w83792d_read_value(client,
1511 W83792D_REG_TEMP1[i]);
1512 }
1513 for (i = 0; i < 2; i++) {
1514 for (j = 0; j < 6; j++) {
1515 data->temp_add[i][j] = w83792d_read_value(
1516 client,W83792D_REG_TEMP_ADD[i][j]);
1517 }
1518 }
1519
1520 /* Update the Fan Divisor */
1521 for (i = 0; i < 4; i++) {
1522 reg_array_tmp[i] = w83792d_read_value(client,
1523 W83792D_REG_FAN_DIV[i]);
1524 }
1525 data->fan_div[0] = reg_array_tmp[0] & 0x07;
1526 data->fan_div[1] = (reg_array_tmp[0] >> 4) & 0x07;
1527 data->fan_div[2] = reg_array_tmp[1] & 0x07;
1528 data->fan_div[3] = (reg_array_tmp[1] >> 4) & 0x07;
1529 data->fan_div[4] = reg_array_tmp[2] & 0x07;
1530 data->fan_div[5] = (reg_array_tmp[2] >> 4) & 0x07;
1531 data->fan_div[6] = reg_array_tmp[3] & 0x07;
1532
1533 /* Update the realtime status */
1534 data->alarms = w83792d_read_value(client, W83792D_REG_ALARM1) +
1535 (w83792d_read_value(client, W83792D_REG_ALARM2) << 8) +
1536 (w83792d_read_value(client, W83792D_REG_ALARM3) << 16);
1537
1538 /* Update CaseOpen status and it's CLR_CHS. */
1539 data->chassis = (w83792d_read_value(client,
1540 W83792D_REG_CHASSIS) >> 5) & 0x01;
1541 data->chassis_clear = (w83792d_read_value(client,
1542 W83792D_REG_CHASSIS_CLR) >> 7) & 0x01;
1543
1544 /* Update Thermal Cruise/Smart Fan I target value */
1545 for (i = 0; i < 3; i++) {
1546 data->thermal_cruise[i] =
1547 w83792d_read_value(client,
1548 W83792D_REG_THERMAL[i]) & 0x7f;
1549 }
1550
1551 /* Update Smart Fan I/II tolerance */
1552 reg_tmp = w83792d_read_value(client, W83792D_REG_TOLERANCE[0]);
1553 data->tolerance[0] = reg_tmp & 0x0f;
1554 data->tolerance[1] = (reg_tmp >> 4) & 0x0f;
1555 data->tolerance[2] = w83792d_read_value(client,
1556 W83792D_REG_TOLERANCE[2]) & 0x0f;
1557
1558 /* Update Smart Fan II temperature points */
1559 for (i = 0; i < 3; i++) {
1560 for (j = 0; j < 4; j++) {
1561 data->sf2_points[i][j] = w83792d_read_value(
1562 client,W83792D_REG_POINTS[i][j]) & 0x7f;
1563 }
1564 }
1565
1566 /* Update Smart Fan II duty cycle levels */
1567 for (i = 0; i < 3; i++) {
1568 reg_tmp = w83792d_read_value(client,
1569 W83792D_REG_LEVELS[i][0]);
1570 data->sf2_levels[i][0] = reg_tmp & 0x0f;
1571 data->sf2_levels[i][1] = (reg_tmp >> 4) & 0x0f;
1572 reg_tmp = w83792d_read_value(client,
1573 W83792D_REG_LEVELS[i][2]);
1574 data->sf2_levels[i][2] = (reg_tmp >> 4) & 0x0f;
1575 data->sf2_levels[i][3] = reg_tmp & 0x0f;
1576 }
1577
1578 data->last_updated = jiffies;
1579 data->valid = 1;
1580 }
1581
1582 up(&data->update_lock);
1583
1584#ifdef DEBUG
1585 w83792d_print_debug(data, dev);
1586#endif
1587
1588 return data;
1589}
1590
1591#ifdef DEBUG
1592static void w83792d_print_debug(struct w83792d_data *data, struct device *dev)
1593{
1594 int i=0, j=0;
1595 dev_dbg(dev, "==========The following is the debug message...========\n");
1596 dev_dbg(dev, "9 set of Voltages: =====>\n");
1597 for (i=0; i<9; i++) {
1598 dev_dbg(dev, "vin[%d] is: 0x%x\n", i, data->in[i]);
1599 dev_dbg(dev, "vin[%d] max is: 0x%x\n", i, data->in_max[i]);
1600 dev_dbg(dev, "vin[%d] min is: 0x%x\n", i, data->in_min[i]);
1601 }
1602 dev_dbg(dev, "Low Bit1 is: 0x%x\n", data->low_bits[0]);
1603 dev_dbg(dev, "Low Bit2 is: 0x%x\n", data->low_bits[1]);
1604 dev_dbg(dev, "7 set of Fan Counts and Duty Cycles: =====>\n");
1605 for (i=0; i<7; i++) {
1606 dev_dbg(dev, "fan[%d] is: 0x%x\n", i, data->fan[i]);
1607 dev_dbg(dev, "fan[%d] min is: 0x%x\n", i, data->fan_min[i]);
1608 dev_dbg(dev, "pwm[%d] is: 0x%x\n", i, data->pwm[i]);
1609 dev_dbg(dev, "pwm_mode[%d] is: 0x%x\n", i, data->pwm_mode[i]);
1610 }
1611 dev_dbg(dev, "3 set of Temperatures: =====>\n");
1612 for (i=0; i<3; i++) {
1613 dev_dbg(dev, "temp1[%d] is: 0x%x\n", i, data->temp1[i]);
1614 }
1615
1616 for (i=0; i<2; i++) {
1617 for (j=0; j<6; j++) {
1618 dev_dbg(dev, "temp_add[%d][%d] is: 0x%x\n", i, j,
1619 data->temp_add[i][j]);
1620 }
1621 }
1622
1623 for (i=0; i<7; i++) {
1624 dev_dbg(dev, "fan_div[%d] is: 0x%x\n", i, data->fan_div[i]);
1625 }
1626 dev_dbg(dev, "==========End of the debug message...==================\n");
1627 dev_dbg(dev, "\n");
1628}
1629#endif
1630
1631static int __init
1632sensors_w83792d_init(void)
1633{
1634 return i2c_add_driver(&w83792d_driver);
1635}
1636
1637static void __exit
1638sensors_w83792d_exit(void)
1639{
1640 i2c_del_driver(&w83792d_driver);
1641}
1642
1643MODULE_AUTHOR("Chunhao Huang @ Winbond <DZShen@Winbond.com.tw>");
1644MODULE_DESCRIPTION("W83792AD/D driver for linux-2.6");
1645MODULE_LICENSE("GPL");
1646
1647module_init(sensors_w83792d_init);
1648module_exit(sensors_w83792d_exit);
1649
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 4469d52aba4c..133e34ab1d0a 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -36,7 +36,8 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/i2c-sensor.h> 39#include <linux/hwmon.h>
40#include <linux/err.h>
40 41
41/* How many retries on register read error */ 42/* How many retries on register read error */
42#define MAX_RETRIES 5 43#define MAX_RETRIES 5
@@ -47,13 +48,12 @@
47 */ 48 */
48 49
49static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; 50static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
50static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
51 51
52/* 52/*
53 * Insmod parameters 53 * Insmod parameters
54 */ 54 */
55 55
56SENSORS_INSMOD_1(w83l785ts); 56I2C_CLIENT_INSMOD_1(w83l785ts);
57 57
58/* 58/*
59 * The W83L785TS-S registers 59 * The W83L785TS-S registers
@@ -105,6 +105,7 @@ static struct i2c_driver w83l785ts_driver = {
105 105
106struct w83l785ts_data { 106struct w83l785ts_data {
107 struct i2c_client client; 107 struct i2c_client client;
108 struct class_device *class_dev;
108 struct semaphore update_lock; 109 struct semaphore update_lock;
109 char valid; /* zero until following fields are valid */ 110 char valid; /* zero until following fields are valid */
110 unsigned long last_updated; /* in jiffies */ 111 unsigned long last_updated; /* in jiffies */
@@ -140,7 +141,7 @@ static int w83l785ts_attach_adapter(struct i2c_adapter *adapter)
140{ 141{
141 if (!(adapter->class & I2C_CLASS_HWMON)) 142 if (!(adapter->class & I2C_CLASS_HWMON))
142 return 0; 143 return 0;
143 return i2c_detect(adapter, &addr_data, w83l785ts_detect); 144 return i2c_probe(adapter, &addr_data, w83l785ts_detect);
144} 145}
145 146
146/* 147/*
@@ -239,11 +240,19 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind)
239 */ 240 */
240 241
241 /* Register sysfs hooks */ 242 /* Register sysfs hooks */
243 data->class_dev = hwmon_device_register(&new_client->dev);
244 if (IS_ERR(data->class_dev)) {
245 err = PTR_ERR(data->class_dev);
246 goto exit_detach;
247 }
248
242 device_create_file(&new_client->dev, &dev_attr_temp1_input); 249 device_create_file(&new_client->dev, &dev_attr_temp1_input);
243 device_create_file(&new_client->dev, &dev_attr_temp1_max); 250 device_create_file(&new_client->dev, &dev_attr_temp1_max);
244 251
245 return 0; 252 return 0;
246 253
254exit_detach:
255 i2c_detach_client(new_client);
247exit_free: 256exit_free:
248 kfree(data); 257 kfree(data);
249exit: 258exit:
@@ -252,15 +261,15 @@ exit:
252 261
253static int w83l785ts_detach_client(struct i2c_client *client) 262static int w83l785ts_detach_client(struct i2c_client *client)
254{ 263{
264 struct w83l785ts_data *data = i2c_get_clientdata(client);
255 int err; 265 int err;
256 266
257 if ((err = i2c_detach_client(client))) { 267 hwmon_device_unregister(data->class_dev);
258 dev_err(&client->dev, "Client deregistration failed, " 268
259 "client not detached.\n"); 269 if ((err = i2c_detach_client(client)))
260 return err; 270 return err;
261 }
262 271
263 kfree(i2c_get_clientdata(client)); 272 kfree(data);
264 return 0; 273 return 0;
265} 274}
266 275
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index cd170395a8c7..71c5a854ac5d 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,12 +4,8 @@
4 4
5obj-$(CONFIG_I2C) += i2c-core.o 5obj-$(CONFIG_I2C) += i2c-core.o
6obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o 6obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
7obj-$(CONFIG_I2C_SENSOR) += i2c-sensor.o
8obj-y += busses/ chips/ algos/ 7obj-y += busses/ chips/ algos/
9 8
10i2c-sensor-objs := i2c-sensor-detect.o i2c-sensor-vid.o
11
12
13ifeq ($(CONFIG_I2C_DEBUG_CORE),y) 9ifeq ($(CONFIG_I2C_DEBUG_CORE),y)
14EXTRA_CFLAGS += -DDEBUG 10EXTRA_CFLAGS += -DDEBUG
15endif 11endif
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index fb5b732238ed..df05df1a0ef6 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -519,8 +519,6 @@ static u32 bit_func(struct i2c_adapter *adap)
519/* -----exported algorithm data: ------------------------------------- */ 519/* -----exported algorithm data: ------------------------------------- */
520 520
521static struct i2c_algorithm i2c_bit_algo = { 521static struct i2c_algorithm i2c_bit_algo = {
522 .name = "Bit-shift algorithm",
523 .id = I2C_ALGO_BIT,
524 .master_xfer = bit_xfer, 522 .master_xfer = bit_xfer,
525 .functionality = bit_func, 523 .functionality = bit_func,
526}; 524};
@@ -541,8 +539,6 @@ int i2c_bit_add_bus(struct i2c_adapter *adap)
541 DEB2(dev_dbg(&adap->dev, "hw routines registered.\n")); 539 DEB2(dev_dbg(&adap->dev, "hw routines registered.\n"));
542 540
543 /* register new adapter to i2c module... */ 541 /* register new adapter to i2c module... */
544
545 adap->id |= i2c_bit_algo.id;
546 adap->algo = &i2c_bit_algo; 542 adap->algo = &i2c_bit_algo;
547 543
548 adap->timeout = 100; /* default values, should */ 544 adap->timeout = 100; /* default values, should */
diff --git a/drivers/i2c/algos/i2c-algo-ite.c b/drivers/i2c/algos/i2c-algo-ite.c
index e6cae39f47aa..2db7bfc85225 100644
--- a/drivers/i2c/algos/i2c-algo-ite.c
+++ b/drivers/i2c/algos/i2c-algo-ite.c
@@ -713,8 +713,6 @@ static u32 iic_func(struct i2c_adapter *adap)
713/* -----exported algorithm data: ------------------------------------- */ 713/* -----exported algorithm data: ------------------------------------- */
714 714
715static struct i2c_algorithm iic_algo = { 715static struct i2c_algorithm iic_algo = {
716 .name = "ITE IIC algorithm",
717 .id = I2C_ALGO_IIC,
718 .master_xfer = iic_xfer, 716 .master_xfer = iic_xfer,
719 .algo_control = algo_control, /* ioctl */ 717 .algo_control = algo_control, /* ioctl */
720 .functionality = iic_func, 718 .functionality = iic_func,
@@ -738,8 +736,6 @@ int i2c_iic_add_bus(struct i2c_adapter *adap)
738 adap->name)); 736 adap->name));
739 737
740 /* register new adapter to i2c module... */ 738 /* register new adapter to i2c module... */
741
742 adap->id |= iic_algo.id;
743 adap->algo = &iic_algo; 739 adap->algo = &iic_algo;
744 740
745 adap->timeout = 100; /* default values, should */ 741 adap->timeout = 100; /* default values, should */
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index cc3a952401f2..beb10edfe9c1 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -187,12 +187,14 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
187 int numbytes = 0; 187 int numbytes = 0;
188 int state; 188 int state;
189 int ret; 189 int ret;
190 int timeout = 100;
190 191
191 state = pca_status(adap); 192 while ((state = pca_status(adap)) != 0xf8 && timeout--) {
192 if ( state != 0xF8 ) { 193 msleep(10);
193 dev_dbg(&i2c_adap->dev, "bus is not idle. status is %#04x\n", state ); 194 }
194 /* FIXME: what to do. Force stop ? */ 195 if (state != 0xf8) {
195 return -EREMOTEIO; 196 dev_dbg(&i2c_adap->dev, "bus is not idle. status is %#04x\n", state);
197 return -EIO;
196 } 198 }
197 199
198 DEB1("{{{ XFER %d messages\n", num); 200 DEB1("{{{ XFER %d messages\n", num);
@@ -354,8 +356,6 @@ static int pca_init(struct i2c_algo_pca_data *adap)
354} 356}
355 357
356static struct i2c_algorithm pca_algo = { 358static struct i2c_algorithm pca_algo = {
357 .name = "PCA9564 algorithm",
358 .id = I2C_ALGO_PCA,
359 .master_xfer = pca_xfer, 359 .master_xfer = pca_xfer,
360 .functionality = pca_func, 360 .functionality = pca_func,
361}; 361};
@@ -369,8 +369,6 @@ int i2c_pca_add_bus(struct i2c_adapter *adap)
369 int rval; 369 int rval;
370 370
371 /* register new adapter to i2c module... */ 371 /* register new adapter to i2c module... */
372
373 adap->id |= pca_algo.id;
374 adap->algo = &pca_algo; 372 adap->algo = &pca_algo;
375 373
376 adap->timeout = 100; /* default values, should */ 374 adap->timeout = 100; /* default values, should */
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 8d087dac32af..6e498df1f717 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -459,8 +459,6 @@ static u32 pcf_func(struct i2c_adapter *adap)
459/* -----exported algorithm data: ------------------------------------- */ 459/* -----exported algorithm data: ------------------------------------- */
460 460
461static struct i2c_algorithm pcf_algo = { 461static struct i2c_algorithm pcf_algo = {
462 .name = "PCF8584 algorithm",
463 .id = I2C_ALGO_PCF,
464 .master_xfer = pcf_xfer, 462 .master_xfer = pcf_xfer,
465 .functionality = pcf_func, 463 .functionality = pcf_func,
466}; 464};
@@ -476,8 +474,6 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
476 DEB2(dev_dbg(&adap->dev, "hw routines registered.\n")); 474 DEB2(dev_dbg(&adap->dev, "hw routines registered.\n"));
477 475
478 /* register new adapter to i2c module... */ 476 /* register new adapter to i2c module... */
479
480 adap->id |= pcf_algo.id;
481 adap->algo = &pcf_algo; 477 adap->algo = &pcf_algo;
482 478
483 adap->timeout = 100; /* default values, should */ 479 adap->timeout = 100; /* default values, should */
diff --git a/drivers/i2c/algos/i2c-algo-sgi.c b/drivers/i2c/algos/i2c-algo-sgi.c
index 422721b241e5..932c4fa86c73 100644
--- a/drivers/i2c/algos/i2c-algo-sgi.c
+++ b/drivers/i2c/algos/i2c-algo-sgi.c
@@ -149,7 +149,7 @@ static int sgi_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
149 err = i2c_write(adap, p->buf, p->len); 149 err = i2c_write(adap, p->buf, p->len);
150 } 150 }
151 151
152 return err; 152 return (err < 0) ? err : i;
153} 153}
154 154
155static u32 sgi_func(struct i2c_adapter *adap) 155static u32 sgi_func(struct i2c_adapter *adap)
@@ -158,8 +158,6 @@ static u32 sgi_func(struct i2c_adapter *adap)
158} 158}
159 159
160static struct i2c_algorithm sgi_algo = { 160static struct i2c_algorithm sgi_algo = {
161 .name = "SGI algorithm",
162 .id = I2C_ALGO_SGI,
163 .master_xfer = sgi_xfer, 161 .master_xfer = sgi_xfer,
164 .functionality = sgi_func, 162 .functionality = sgi_func,
165}; 163};
@@ -169,7 +167,6 @@ static struct i2c_algorithm sgi_algo = {
169 */ 167 */
170int i2c_sgi_add_bus(struct i2c_adapter *adap) 168int i2c_sgi_add_bus(struct i2c_adapter *adap)
171{ 169{
172 adap->id |= sgi_algo.id;
173 adap->algo = &sgi_algo; 170 adap->algo = &sgi_algo;
174 171
175 return i2c_add_adapter(adap); 172 return i2c_add_adapter(adap);
diff --git a/drivers/i2c/algos/i2c-algo-sibyte.c b/drivers/i2c/algos/i2c-algo-sibyte.c
index f2785499237b..8ed5ad12552f 100644
--- a/drivers/i2c/algos/i2c-algo-sibyte.c
+++ b/drivers/i2c/algos/i2c-algo-sibyte.c
@@ -135,8 +135,6 @@ static u32 bit_func(struct i2c_adapter *adap)
135/* -----exported algorithm data: ------------------------------------- */ 135/* -----exported algorithm data: ------------------------------------- */
136 136
137static struct i2c_algorithm i2c_sibyte_algo = { 137static struct i2c_algorithm i2c_sibyte_algo = {
138 .name = "SiByte algorithm",
139 .id = I2C_ALGO_SIBYTE,
140 .smbus_xfer = smbus_xfer, 138 .smbus_xfer = smbus_xfer,
141 .algo_control = algo_control, /* ioctl */ 139 .algo_control = algo_control, /* ioctl */
142 .functionality = bit_func, 140 .functionality = bit_func,
@@ -151,8 +149,6 @@ int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
151 struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data; 149 struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
152 150
153 /* register new adapter to i2c module... */ 151 /* register new adapter to i2c module... */
154
155 i2c_adap->id |= i2c_sibyte_algo.id;
156 i2c_adap->algo = &i2c_sibyte_algo; 152 i2c_adap->algo = &i2c_sibyte_algo;
157 153
158 /* Set the frequency to 100 kHz */ 154 /* Set the frequency to 100 kHz */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 916ba5e40a96..6e9da1372225 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -182,14 +182,8 @@ config I2C_IOP3XX
182 will be called i2c-iop3xx. 182 will be called i2c-iop3xx.
183 183
184config I2C_ISA 184config I2C_ISA
185 tristate "ISA Bus support" 185 tristate
186 depends on I2C 186 depends on I2C
187 help
188 If you say yes to this option, support will be included for i2c
189 interfaces that are on the ISA bus.
190
191 This driver can also be built as a module. If so, the module
192 will be called i2c-isa.
193 187
194config I2C_ITE 188config I2C_ITE
195 tristate "ITE I2C Adapter" 189 tristate "ITE I2C Adapter"
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index f634a0780cf0..f021acd2674e 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -472,8 +472,6 @@ static u32 ali1535_func(struct i2c_adapter *adapter)
472} 472}
473 473
474static struct i2c_algorithm smbus_algorithm = { 474static struct i2c_algorithm smbus_algorithm = {
475 .name = "Non-i2c SMBus adapter",
476 .id = I2C_ALGO_SMBUS,
477 .smbus_xfer = ali1535_access, 475 .smbus_xfer = ali1535_access,
478 .functionality = ali1535_func, 476 .functionality = ali1535_func,
479}; 477};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index fdd881aee618..86947504aea1 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -366,8 +366,6 @@ static void ali1563_shutdown(struct pci_dev *dev)
366} 366}
367 367
368static struct i2c_algorithm ali1563_algorithm = { 368static struct i2c_algorithm ali1563_algorithm = {
369 .name = "Non-i2c SMBus adapter",
370 .id = I2C_ALGO_SMBUS,
371 .smbus_xfer = ali1563_access, 369 .smbus_xfer = ali1563_access,
372 .functionality = ali1563_func, 370 .functionality = ali1563_func,
373}; 371};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 0f781a1a3323..b3f50bff39a0 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -462,8 +462,6 @@ static u32 ali15x3_func(struct i2c_adapter *adapter)
462} 462}
463 463
464static struct i2c_algorithm smbus_algorithm = { 464static struct i2c_algorithm smbus_algorithm = {
465 .name = "Non-I2C SMBus adapter",
466 .id = I2C_ALGO_SMBUS,
467 .smbus_xfer = ali15x3_access, 465 .smbus_xfer = ali15x3_access,
468 .functionality = ali15x3_func, 466 .functionality = ali15x3_func,
469}; 467};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 6347ebc6fb53..6ad0603384b8 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -295,8 +295,6 @@ static u32 amd756_func(struct i2c_adapter *adapter)
295} 295}
296 296
297static struct i2c_algorithm smbus_algorithm = { 297static struct i2c_algorithm smbus_algorithm = {
298 .name = "Non-I2C SMBus adapter",
299 .id = I2C_ALGO_SMBUS,
300 .smbus_xfer = amd756_access, 298 .smbus_xfer = amd756_access,
301 .functionality = amd756_func, 299 .functionality = amd756_func,
302}; 300};
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index d6644481d2a0..45ea24ba14d5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -323,8 +323,6 @@ static u32 amd8111_func(struct i2c_adapter *adapter)
323} 323}
324 324
325static struct i2c_algorithm smbus_algorithm = { 325static struct i2c_algorithm smbus_algorithm = {
326 .name = "Non-I2C SMBus 2.0 adapter",
327 .id = I2C_ALGO_SMBUS,
328 .smbus_xfer = amd8111_access, 326 .smbus_xfer = amd8111_access,
329 .functionality = amd8111_func, 327 .functionality = amd8111_func,
330}; 328};
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index a7ff112e49bf..d06edce03bf4 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -283,8 +283,6 @@ au1550_func(struct i2c_adapter *adap)
283} 283}
284 284
285static struct i2c_algorithm au1550_algo = { 285static struct i2c_algorithm au1550_algo = {
286 .name = "Au1550 algorithm",
287 .id = I2C_ALGO_AU1550,
288 .master_xfer = au1550_xfer, 286 .master_xfer = au1550_xfer,
289 .functionality = au1550_func, 287 .functionality = au1550_func,
290}; 288};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 1ab41313ce51..709beab76609 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -535,8 +535,6 @@ static u32 i801_func(struct i2c_adapter *adapter)
535} 535}
536 536
537static struct i2c_algorithm smbus_algorithm = { 537static struct i2c_algorithm smbus_algorithm = {
538 .name = "Non-I2C SMBus adapter",
539 .id = I2C_ALGO_SMBUS,
540 .smbus_xfer = i801_access, 538 .smbus_xfer = i801_access,
541 .functionality = i801_func, 539 .functionality = i801_func,
542}; 540};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 93ca36dc777e..a3ed9590f028 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -627,8 +627,6 @@ static u32 iic_func(struct i2c_adapter *adap)
627} 627}
628 628
629static struct i2c_algorithm iic_algo = { 629static struct i2c_algorithm iic_algo = {
630 .name = "IBM IIC algorithm",
631 .id = I2C_ALGO_OCP,
632 .master_xfer = iic_xfer, 630 .master_xfer = iic_xfer,
633 .functionality = iic_func 631 .functionality = iic_func
634}; 632};
@@ -727,7 +725,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
727 adap = &dev->adap; 725 adap = &dev->adap;
728 strcpy(adap->name, "IBM IIC"); 726 strcpy(adap->name, "IBM IIC");
729 i2c_set_adapdata(adap, dev); 727 i2c_set_adapdata(adap, dev);
730 adap->id = I2C_HW_OCP | iic_algo.id; 728 adap->id = I2C_HW_OCP;
731 adap->algo = &iic_algo; 729 adap->algo = &iic_algo;
732 adap->client_register = NULL; 730 adap->client_register = NULL;
733 adap->client_unregister = NULL; 731 adap->client_unregister = NULL;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 6b682e903f09..7bd9102db701 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -399,8 +399,6 @@ iop3xx_i2c_func(struct i2c_adapter *adap)
399} 399}
400 400
401static struct i2c_algorithm iop3xx_i2c_algo = { 401static struct i2c_algorithm iop3xx_i2c_algo = {
402 .name = "IOP3xx I2C algorithm",
403 .id = I2C_ALGO_IOP3XX,
404 .master_xfer = iop3xx_i2c_master_xfer, 402 .master_xfer = iop3xx_i2c_master_xfer,
405 .algo_control = iop3xx_i2c_algo_control, 403 .algo_control = iop3xx_i2c_algo_control,
406 .functionality = iop3xx_i2c_func, 404 .functionality = iop3xx_i2c_func,
diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c
index 00e7f7157b75..bdc6806dafae 100644
--- a/drivers/i2c/busses/i2c-isa.c
+++ b/drivers/i2c/busses/i2c-isa.c
@@ -1,6 +1,8 @@
1/* 1/*
2 i2c-isa.c - Part of lm_sensors, Linux kernel modules for hardware 2 i2c-isa.c - an i2c-core-like thing for ISA hardware monitoring chips
3 monitoring 3 Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
4
5 Based on the i2c-isa pseudo-adapter from the lm_sensors project
4 Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> 6 Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
5 7
6 This program is free software; you can redistribute it and/or modify 8 This program is free software; you can redistribute it and/or modify
@@ -18,30 +20,36 @@
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/ 21*/
20 22
21/* This implements an i2c algorithm/adapter for ISA bus. Not that this is 23/* This implements an i2c-core-like thing for ISA hardware monitoring
22 on first sight very useful; almost no functionality is preserved. 24 chips. Such chips are linked to the i2c subsystem for historical
23 Except that it makes writing drivers for chips which can be on both 25 reasons (because the early ISA hardware monitoring chips such as the
24 the SMBus and the ISA bus very much easier. See lm78.c for an example 26 LM78 had both an I2C and an ISA interface). They used to be
25 of this. */ 27 registered with the main i2c-core, but as a first step in the
28 direction of a clean separation between I2C and ISA chip drivers,
29 we now have this separate core for ISA ones. It is significantly
30 more simple than the real one, of course, because we don't have to
31 handle multiple busses: there is only one (fake) ISA adapter.
32 It is worth noting that we still rely on i2c-core for some things
33 at the moment - but hopefully this won't last. */
26 34
27#include <linux/init.h> 35#include <linux/init.h>
28#include <linux/module.h> 36#include <linux/module.h>
29#include <linux/kernel.h> 37#include <linux/kernel.h>
30#include <linux/errno.h> 38#include <linux/errno.h>
31#include <linux/i2c.h> 39#include <linux/i2c.h>
40#include <linux/i2c-isa.h>
32 41
33static u32 isa_func(struct i2c_adapter *adapter); 42static u32 isa_func(struct i2c_adapter *adapter);
34 43
35/* This is the actual algorithm we define */ 44/* This is the actual algorithm we define */
36static struct i2c_algorithm isa_algorithm = { 45static struct i2c_algorithm isa_algorithm = {
37 .name = "ISA bus algorithm",
38 .id = I2C_ALGO_ISA,
39 .functionality = isa_func, 46 .functionality = isa_func,
40}; 47};
41 48
42/* There can only be one... */ 49/* There can only be one... */
43static struct i2c_adapter isa_adapter = { 50static struct i2c_adapter isa_adapter = {
44 .owner = THIS_MODULE, 51 .owner = THIS_MODULE,
52 .id = I2C_HW_ISA,
45 .class = I2C_CLASS_HWMON, 53 .class = I2C_CLASS_HWMON,
46 .algo = &isa_algorithm, 54 .algo = &isa_algorithm,
47 .name = "ISA main adapter", 55 .name = "ISA main adapter",
@@ -53,17 +61,146 @@ static u32 isa_func(struct i2c_adapter *adapter)
53 return 0; 61 return 0;
54} 62}
55 63
64
65/* Copied from i2c-core */
66static ssize_t show_adapter_name(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct i2c_adapter *adap = dev_to_i2c_adapter(dev);
70 return sprintf(buf, "%s\n", adap->name);
71}
72static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
73
74static int i2c_isa_device_probe(struct device *dev)
75{
76 return -ENODEV;
77}
78
79static int i2c_isa_device_remove(struct device *dev)
80{
81 return 0;
82}
83
84
85/* We implement an interface which resembles i2c_{add,del}_driver,
86 but for i2c-isa drivers. We don't have to remember and handle lists
87 of drivers and adapters so this is much more simple, of course. */
88
89int i2c_isa_add_driver(struct i2c_driver *driver)
90{
91 int res;
92
93 /* Add the driver to the list of i2c drivers in the driver core */
94 driver->driver.name = driver->name;
95 driver->driver.bus = &i2c_bus_type;
96 driver->driver.probe = i2c_isa_device_probe;
97 driver->driver.remove = i2c_isa_device_remove;
98 res = driver_register(&driver->driver);
99 if (res)
100 return res;
101 dev_dbg(&isa_adapter.dev, "Driver %s registered\n", driver->name);
102
103 /* Now look for clients */
104 driver->attach_adapter(&isa_adapter);
105
106 return 0;
107}
108
109int i2c_isa_del_driver(struct i2c_driver *driver)
110{
111 struct list_head *item, *_n;
112 struct i2c_client *client;
113 int res;
114
115 /* Detach all clients belonging to this one driver */
116 list_for_each_safe(item, _n, &isa_adapter.clients) {
117 client = list_entry(item, struct i2c_client, list);
118 if (client->driver != driver)
119 continue;
120 dev_dbg(&isa_adapter.dev, "Detaching client %s at 0x%x\n",
121 client->name, client->addr);
122 if ((res = driver->detach_client(client))) {
123 dev_err(&isa_adapter.dev, "Failed, driver "
124 "%s not unregistered!\n",
125 driver->name);
126 return res;
127 }
128 }
129
130 /* Get the driver off the core list */
131 driver_unregister(&driver->driver);
132 dev_dbg(&isa_adapter.dev, "Driver %s unregistered\n", driver->name);
133
134 return 0;
135}
136
137
56static int __init i2c_isa_init(void) 138static int __init i2c_isa_init(void)
57{ 139{
58 return i2c_add_adapter(&isa_adapter); 140 init_MUTEX(&isa_adapter.clist_lock);
141 INIT_LIST_HEAD(&isa_adapter.clients);
142
143 isa_adapter.nr = ANY_I2C_ISA_BUS;
144 isa_adapter.dev.parent = &platform_bus;
145 sprintf(isa_adapter.dev.bus_id, "i2c-%d", isa_adapter.nr);
146 isa_adapter.dev.driver = &i2c_adapter_driver;
147 isa_adapter.dev.release = &i2c_adapter_dev_release;
148 device_register(&isa_adapter.dev);
149 device_create_file(&isa_adapter.dev, &dev_attr_name);
150
151 /* Add this adapter to the i2c_adapter class */
152 memset(&isa_adapter.class_dev, 0x00, sizeof(struct class_device));
153 isa_adapter.class_dev.dev = &isa_adapter.dev;
154 isa_adapter.class_dev.class = &i2c_adapter_class;
155 strlcpy(isa_adapter.class_dev.class_id, isa_adapter.dev.bus_id,
156 BUS_ID_SIZE);
157 class_device_register(&isa_adapter.class_dev);
158
159 dev_dbg(&isa_adapter.dev, "%s registered\n", isa_adapter.name);
160
161 return 0;
59} 162}
60 163
61static void __exit i2c_isa_exit(void) 164static void __exit i2c_isa_exit(void)
62{ 165{
63 i2c_del_adapter(&isa_adapter); 166#ifdef DEBUG
167 struct list_head *item, *_n;
168 struct i2c_client *client = NULL;
169#endif
170
171 /* There should be no more active client */
172#ifdef DEBUG
173 dev_dbg(&isa_adapter.dev, "Looking for clients\n");
174 list_for_each_safe(item, _n, &isa_adapter.clients) {
175 client = list_entry(item, struct i2c_client, list);
176 dev_err(&isa_adapter.dev, "Driver %s still has an active "
177 "ISA client at 0x%x\n", client->driver->name,
178 client->addr);
179 }
180 if (client != NULL)
181 return;
182#endif
183
184 /* Clean up the sysfs representation */
185 dev_dbg(&isa_adapter.dev, "Unregistering from sysfs\n");
186 init_completion(&isa_adapter.dev_released);
187 init_completion(&isa_adapter.class_dev_released);
188 class_device_unregister(&isa_adapter.class_dev);
189 device_remove_file(&isa_adapter.dev, &dev_attr_name);
190 device_unregister(&isa_adapter.dev);
191
192 /* Wait for sysfs to drop all references */
193 dev_dbg(&isa_adapter.dev, "Waiting for sysfs completion\n");
194 wait_for_completion(&isa_adapter.dev_released);
195 wait_for_completion(&isa_adapter.class_dev_released);
196
197 dev_dbg(&isa_adapter.dev, "%s unregistered\n", isa_adapter.name);
64} 198}
65 199
66MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); 200EXPORT_SYMBOL(i2c_isa_add_driver);
201EXPORT_SYMBOL(i2c_isa_del_driver);
202
203MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
67MODULE_DESCRIPTION("ISA bus access through i2c"); 204MODULE_DESCRIPTION("ISA bus access through i2c");
68MODULE_LICENSE("GPL"); 205MODULE_LICENSE("GPL");
69 206
diff --git a/drivers/i2c/busses/i2c-keywest.c b/drivers/i2c/busses/i2c-keywest.c
index 94ae808314f7..37b49c2daf5f 100644
--- a/drivers/i2c/busses/i2c-keywest.c
+++ b/drivers/i2c/busses/i2c-keywest.c
@@ -87,12 +87,9 @@ static const char *__kw_state_names[] = {
87}; 87};
88#endif /* DEBUG */ 88#endif /* DEBUG */
89 89
90static int probe;
91
92MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); 90MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
93MODULE_DESCRIPTION("I2C driver for Apple's Keywest"); 91MODULE_DESCRIPTION("I2C driver for Apple's Keywest");
94MODULE_LICENSE("GPL"); 92MODULE_LICENSE("GPL");
95module_param(probe, bool, 0);
96 93
97#ifdef POLLED_MODE 94#ifdef POLLED_MODE
98/* Don't schedule, the g5 fan controller is too 95/* Don't schedule, the g5 fan controller is too
@@ -498,8 +495,6 @@ keywest_func(struct i2c_adapter * adapter)
498 495
499/* For now, we only handle combined mode (smbus) */ 496/* For now, we only handle combined mode (smbus) */
500static struct i2c_algorithm keywest_algorithm = { 497static struct i2c_algorithm keywest_algorithm = {
501 .name = "Keywest i2c",
502 .id = I2C_ALGO_SMBUS,
503 .smbus_xfer = keywest_smbus_xfer, 498 .smbus_xfer = keywest_smbus_xfer,
504 .master_xfer = keywest_xfer, 499 .master_xfer = keywest_xfer,
505 .functionality = keywest_func, 500 .functionality = keywest_func,
@@ -621,7 +616,6 @@ create_iface(struct device_node *np, struct device *dev)
621 sprintf(chan->adapter.name, "%s %d", np->parent->name, i); 616 sprintf(chan->adapter.name, "%s %d", np->parent->name, i);
622 chan->iface = iface; 617 chan->iface = iface;
623 chan->chan_no = i; 618 chan->chan_no = i;
624 chan->adapter.id = I2C_ALGO_SMBUS;
625 chan->adapter.algo = &keywest_algorithm; 619 chan->adapter.algo = &keywest_algorithm;
626 chan->adapter.algo_data = NULL; 620 chan->adapter.algo_data = NULL;
627 chan->adapter.client_register = NULL; 621 chan->adapter.client_register = NULL;
@@ -635,15 +629,6 @@ create_iface(struct device_node *np, struct device *dev)
635 chan->adapter.name); 629 chan->adapter.name);
636 i2c_set_adapdata(&chan->adapter, NULL); 630 i2c_set_adapdata(&chan->adapter, NULL);
637 } 631 }
638 if (probe) {
639 printk("Probe: ");
640 for (addr = 0x00; addr <= 0x7f; addr++) {
641 if (i2c_smbus_xfer(&chan->adapter,addr,
642 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
643 printk("%02x ", addr);
644 }
645 printk("\n");
646 }
647 } 632 }
648 633
649 printk(KERN_INFO "Found KeyWest i2c on \"%s\", %d channel%s, stepping: %d bits\n", 634 printk(KERN_INFO "Found KeyWest i2c on \"%s\", %d channel%s, stepping: %d bits\n",
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 9ad3e9262e8a..f065583ddcf1 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -272,8 +272,6 @@ static u32 mpc_functionality(struct i2c_adapter *adap)
272} 272}
273 273
274static struct i2c_algorithm mpc_algo = { 274static struct i2c_algorithm mpc_algo = {
275 .name = "MPC algorithm",
276 .id = I2C_ALGO_MPC107,
277 .master_xfer = mpc_xfer, 275 .master_xfer = mpc_xfer,
278 .functionality = mpc_functionality, 276 .functionality = mpc_functionality,
279}; 277};
@@ -281,7 +279,7 @@ static struct i2c_algorithm mpc_algo = {
281static struct i2c_adapter mpc_ops = { 279static struct i2c_adapter mpc_ops = {
282 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
283 .name = "MPC adapter", 281 .name = "MPC adapter",
284 .id = I2C_ALGO_MPC107 | I2C_HW_MPC107, 282 .id = I2C_HW_MPC107,
285 .algo = &mpc_algo, 283 .algo = &mpc_algo,
286 .class = I2C_CLASS_HWMON, 284 .class = I2C_CLASS_HWMON,
287 .timeout = 1, 285 .timeout = 1,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5b852782d2f5..99abca45fece 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -423,18 +423,16 @@ static int
423mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 423mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
424{ 424{
425 struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); 425 struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
426 int i, rc = 0; 426 int i, rc;
427 427
428 for (i=0; i<num; i++) 428 for (i=0; i<num; i++)
429 if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) != 0) 429 if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) < 0)
430 break; 430 return rc;
431 431
432 return rc; 432 return num;
433} 433}
434 434
435static struct i2c_algorithm mv64xxx_i2c_algo = { 435static struct i2c_algorithm mv64xxx_i2c_algo = {
436 .name = MV64XXX_I2C_CTLR_NAME " algorithm",
437 .id = I2C_ALGO_MV64XXX,
438 .master_xfer = mv64xxx_i2c_xfer, 436 .master_xfer = mv64xxx_i2c_xfer,
439 .functionality = mv64xxx_i2c_functionality, 437 .functionality = mv64xxx_i2c_functionality,
440}; 438};
@@ -523,7 +521,7 @@ mv64xxx_i2c_probe(struct device *dev)
523 drv_data->freq_m = pdata->freq_m; 521 drv_data->freq_m = pdata->freq_m;
524 drv_data->freq_n = pdata->freq_n; 522 drv_data->freq_n = pdata->freq_n;
525 drv_data->irq = platform_get_irq(pd, 0); 523 drv_data->irq = platform_get_irq(pd, 0);
526 drv_data->adapter.id = I2C_ALGO_MV64XXX | I2C_HW_MV64XXX; 524 drv_data->adapter.id = I2C_HW_MV64XXX;
527 drv_data->adapter.algo = &mv64xxx_i2c_algo; 525 drv_data->adapter.algo = &mv64xxx_i2c_algo;
528 drv_data->adapter.owner = THIS_MODULE; 526 drv_data->adapter.owner = THIS_MODULE;
529 drv_data->adapter.class = I2C_CLASS_HWMON; 527 drv_data->adapter.class = I2C_CLASS_HWMON;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 74eb89aa9350..e0b7a913431e 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -110,8 +110,6 @@ static u32 nforce2_func(struct i2c_adapter *adapter);
110 110
111 111
112static struct i2c_algorithm smbus_algorithm = { 112static struct i2c_algorithm smbus_algorithm = {
113 .name = "Non-I2C SMBus adapter",
114 .id = I2C_ALGO_SMBUS,
115 .smbus_xfer = nforce2_access, 113 .smbus_xfer = nforce2_access,
116 .functionality = nforce2_func, 114 .functionality = nforce2_func,
117}; 115};
@@ -131,7 +129,6 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
131 struct nforce2_smbus *smbus = adap->algo_data; 129 struct nforce2_smbus *smbus = adap->algo_data;
132 unsigned char protocol, pec, temp; 130 unsigned char protocol, pec, temp;
133 unsigned char len = 0; /* to keep the compiler quiet */ 131 unsigned char len = 0; /* to keep the compiler quiet */
134 int timeout = 0;
135 int i; 132 int i;
136 133
137 protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ : 134 protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ :
@@ -191,29 +188,10 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
191 case I2C_SMBUS_PROC_CALL: 188 case I2C_SMBUS_PROC_CALL:
192 dev_err(&adap->dev, "I2C_SMBUS_PROC_CALL not supported!\n"); 189 dev_err(&adap->dev, "I2C_SMBUS_PROC_CALL not supported!\n");
193 return -1; 190 return -1;
194 /*
195 outb_p(command, NVIDIA_SMB_CMD);
196 outb_p(data->word, NVIDIA_SMB_DATA);
197 outb_p(data->word >> 8, NVIDIA_SMB_DATA + 1);
198 protocol = NVIDIA_SMB_PRTCL_PROC_CALL | pec;
199 read_write = I2C_SMBUS_READ;
200 break;
201 */
202 191
203 case I2C_SMBUS_BLOCK_PROC_CALL: 192 case I2C_SMBUS_BLOCK_PROC_CALL:
204 dev_err(&adap->dev, "I2C_SMBUS_BLOCK_PROC_CALL not supported!\n"); 193 dev_err(&adap->dev, "I2C_SMBUS_BLOCK_PROC_CALL not supported!\n");
205 return -1; 194 return -1;
206 /*
207 protocol |= pec;
208 len = min_t(u8, data->block[0], 31);
209 outb_p(command, NVIDIA_SMB_CMD);
210 outb_p(len, NVIDIA_SMB_BCNT);
211 for (i = 0; i < len; i++)
212 outb_p(data->block[i + 1], NVIDIA_SMB_DATA + i);
213 protocol = NVIDIA_SMB_PRTCL_BLOCK_PROC_CALL | pec;
214 read_write = I2C_SMBUS_READ;
215 break;
216 */
217 195
218 case I2C_SMBUS_WORD_DATA_PEC: 196 case I2C_SMBUS_WORD_DATA_PEC:
219 case I2C_SMBUS_BLOCK_DATA_PEC: 197 case I2C_SMBUS_BLOCK_DATA_PEC:
@@ -232,12 +210,6 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
232 210
233 temp = inb_p(NVIDIA_SMB_STS); 211 temp = inb_p(NVIDIA_SMB_STS);
234 212
235#if 0
236 do {
237 i2c_do_pause(1);
238 temp = inb_p(NVIDIA_SMB_STS);
239 } while (((temp & NVIDIA_SMB_STS_DONE) == 0) && (timeout++ < MAX_TIMEOUT));
240#endif
241 if (~temp & NVIDIA_SMB_STS_DONE) { 213 if (~temp & NVIDIA_SMB_STS_DONE) {
242 udelay(500); 214 udelay(500);
243 temp = inb_p(NVIDIA_SMB_STS); 215 temp = inb_p(NVIDIA_SMB_STS);
@@ -247,9 +219,10 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
247 temp = inb_p(NVIDIA_SMB_STS); 219 temp = inb_p(NVIDIA_SMB_STS);
248 } 220 }
249 221
250 if ((timeout >= MAX_TIMEOUT) || (~temp & NVIDIA_SMB_STS_DONE) 222 if ((~temp & NVIDIA_SMB_STS_DONE) || (temp & NVIDIA_SMB_STS_STATUS)) {
251 || (temp & NVIDIA_SMB_STS_STATUS)) 223 dev_dbg(&adap->dev, "SMBus Timeout! (0x%02x)\n", temp);
252 return -1; 224 return -1;
225 }
253 226
254 if (read_write == I2C_SMBUS_WRITE) 227 if (read_write == I2C_SMBUS_WRITE)
255 return 0; 228 return 0;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 6d34ee381ce1..6d48a4da7bed 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -399,8 +399,6 @@ static u32 piix4_func(struct i2c_adapter *adapter)
399} 399}
400 400
401static struct i2c_algorithm smbus_algorithm = { 401static struct i2c_algorithm smbus_algorithm = {
402 .name = "Non-I2C SMBus adapter",
403 .id = I2C_ALGO_SMBUS,
404 .smbus_xfer = piix4_access, 402 .smbus_xfer = piix4_access,
405 .functionality = piix4_func, 403 .functionality = piix4_func,
406}; 404};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index a3b38257cc3d..73a092fb0e7e 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -568,7 +568,6 @@ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
568/* i2c bus registration info */ 568/* i2c bus registration info */
569 569
570static struct i2c_algorithm s3c24xx_i2c_algorithm = { 570static struct i2c_algorithm s3c24xx_i2c_algorithm = {
571 .name = "S3C2410-I2C-Algorithm",
572 .master_xfer = s3c24xx_i2c_xfer, 571 .master_xfer = s3c24xx_i2c_xfer,
573 .functionality = s3c24xx_i2c_func, 572 .functionality = s3c24xx_i2c_func,
574}; 573};
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index bbd5e4e52f09..080318d6f54b 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -357,8 +357,6 @@ static u32 sis5595_func(struct i2c_adapter *adapter)
357} 357}
358 358
359static struct i2c_algorithm smbus_algorithm = { 359static struct i2c_algorithm smbus_algorithm = {
360 .name = "Non-I2C SMBus adapter",
361 .id = I2C_ALGO_SMBUS,
362 .smbus_xfer = sis5595_access, 360 .smbus_xfer = sis5595_access,
363 .functionality = sis5595_func, 361 .functionality = sis5595_func,
364}; 362};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index f58455e7689e..86f0f448fa0b 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -448,8 +448,6 @@ exit:
448 448
449 449
450static struct i2c_algorithm smbus_algorithm = { 450static struct i2c_algorithm smbus_algorithm = {
451 .name = "Non-I2C SMBus adapter",
452 .id = I2C_ALGO_SMBUS,
453 .smbus_xfer = sis630_access, 451 .smbus_xfer = sis630_access,
454 .functionality = sis630_func, 452 .functionality = sis630_func,
455}; 453};
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 6484792e23a1..ead2ff3cf60e 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -249,8 +249,6 @@ static u32 sis96x_func(struct i2c_adapter *adapter)
249} 249}
250 250
251static struct i2c_algorithm smbus_algorithm = { 251static struct i2c_algorithm smbus_algorithm = {
252 .name = "Non-I2C SMBus adapter",
253 .id = I2C_ALGO_SMBUS,
254 .smbus_xfer = sis96x_access, 252 .smbus_xfer = sis96x_access,
255 .functionality = sis96x_func, 253 .functionality = sis96x_func,
256}; 254};
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 00d94e886955..73f481e93a36 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -109,8 +109,6 @@ static u32 stub_func(struct i2c_adapter *adapter)
109} 109}
110 110
111static struct i2c_algorithm smbus_algorithm = { 111static struct i2c_algorithm smbus_algorithm = {
112 .name = "Non-I2C SMBus adapter",
113 .id = I2C_ALGO_SMBUS,
114 .functionality = stub_func, 112 .functionality = stub_func,
115 .smbus_xfer = stub_xfer, 113 .smbus_xfer = stub_xfer,
116}; 114};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 6b5008005c6f..99d209e0485a 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -286,8 +286,6 @@ static u32 vt596_func(struct i2c_adapter *adapter)
286} 286}
287 287
288static struct i2c_algorithm smbus_algorithm = { 288static struct i2c_algorithm smbus_algorithm = {
289 .name = "Non-I2C SMBus adapter",
290 .id = I2C_ALGO_SMBUS,
291 .smbus_xfer = vt596_access, 289 .smbus_xfer = vt596_access,
292 .functionality = vt596_func, 290 .functionality = vt596_func,
293}; 291};
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index a18bdd9aa7ba..a1d580e05361 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -395,8 +395,6 @@ static u32 scx200_acb_func(struct i2c_adapter *adapter)
395 395
396/* For now, we only handle combined mode (smbus) */ 396/* For now, we only handle combined mode (smbus) */
397static struct i2c_algorithm scx200_acb_algorithm = { 397static struct i2c_algorithm scx200_acb_algorithm = {
398 .name = "NatSemi SCx200 ACCESS.bus",
399 .id = I2C_ALGO_SMBUS,
400 .smbus_xfer = scx200_acb_smbus_xfer, 398 .smbus_xfer = scx200_acb_smbus_xfer,
401 .functionality = scx200_acb_func, 399 .functionality = scx200_acb_func,
402}; 400};
@@ -456,7 +454,7 @@ static int __init scx200_acb_create(int base, int index)
456 i2c_set_adapdata(adapter, iface); 454 i2c_set_adapdata(adapter, iface);
457 snprintf(adapter->name, I2C_NAME_SIZE, "SCx200 ACB%d", index); 455 snprintf(adapter->name, I2C_NAME_SIZE, "SCx200 ACB%d", index);
458 adapter->owner = THIS_MODULE; 456 adapter->owner = THIS_MODULE;
459 adapter->id = I2C_ALGO_SMBUS; 457 adapter->id = I2C_HW_SMBUS_SCX200;
460 adapter->algo = &scx200_acb_algorithm; 458 adapter->algo = &scx200_acb_algorithm;
461 adapter->class = I2C_CLASS_HWMON; 459 adapter->class = I2C_CLASS_HWMON;
462 460
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 43f70dbfc03f..6bd44a44cd28 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -2,17 +2,12 @@
2# Miscellaneous I2C chip drivers configuration 2# Miscellaneous I2C chip drivers configuration
3# 3#
4 4
5config I2C_SENSOR
6 tristate
7 default n
8
9menu "Miscellaneous I2C Chip support" 5menu "Miscellaneous I2C Chip support"
10 depends on I2C 6 depends on I2C
11 7
12config SENSORS_DS1337 8config SENSORS_DS1337
13 tristate "Dallas Semiconductor DS1337 and DS1339 Real Time Clock" 9 tristate "Dallas Semiconductor DS1337 and DS1339 Real Time Clock"
14 depends on I2C && EXPERIMENTAL 10 depends on I2C && EXPERIMENTAL
15 select I2C_SENSOR
16 help 11 help
17 If you say yes here you get support for Dallas Semiconductor 12 If you say yes here you get support for Dallas Semiconductor
18 DS1337 and DS1339 real-time clock chips. 13 DS1337 and DS1339 real-time clock chips.
@@ -23,7 +18,6 @@ config SENSORS_DS1337
23config SENSORS_DS1374 18config SENSORS_DS1374
24 tristate "Maxim/Dallas Semiconductor DS1374 Real Time Clock" 19 tristate "Maxim/Dallas Semiconductor DS1374 Real Time Clock"
25 depends on I2C && EXPERIMENTAL 20 depends on I2C && EXPERIMENTAL
26 select I2C_SENSOR
27 help 21 help
28 If you say yes here you get support for Dallas Semiconductor 22 If you say yes here you get support for Dallas Semiconductor
29 DS1374 real-time clock chips. 23 DS1374 real-time clock chips.
@@ -34,7 +28,6 @@ config SENSORS_DS1374
34config SENSORS_EEPROM 28config SENSORS_EEPROM
35 tristate "EEPROM reader" 29 tristate "EEPROM reader"
36 depends on I2C && EXPERIMENTAL 30 depends on I2C && EXPERIMENTAL
37 select I2C_SENSOR
38 help 31 help
39 If you say yes here you get read-only access to the EEPROM data 32 If you say yes here you get read-only access to the EEPROM data
40 available on modern memory DIMMs and Sony Vaio laptops. Such 33 available on modern memory DIMMs and Sony Vaio laptops. Such
@@ -46,7 +39,6 @@ config SENSORS_EEPROM
46config SENSORS_PCF8574 39config SENSORS_PCF8574
47 tristate "Philips PCF8574 and PCF8574A" 40 tristate "Philips PCF8574 and PCF8574A"
48 depends on I2C && EXPERIMENTAL 41 depends on I2C && EXPERIMENTAL
49 select I2C_SENSOR
50 help 42 help
51 If you say yes here you get support for Philips PCF8574 and 43 If you say yes here you get support for Philips PCF8574 and
52 PCF8574A chips. 44 PCF8574A chips.
@@ -67,7 +59,6 @@ config SENSORS_PCA9539
67config SENSORS_PCF8591 59config SENSORS_PCF8591
68 tristate "Philips PCF8591" 60 tristate "Philips PCF8591"
69 depends on I2C && EXPERIMENTAL 61 depends on I2C && EXPERIMENTAL
70 select I2C_SENSOR
71 help 62 help
72 If you say yes here you get support for Philips PCF8591 chips. 63 If you say yes here you get support for Philips PCF8591 chips.
73 64
@@ -77,7 +68,6 @@ config SENSORS_PCF8591
77config SENSORS_RTC8564 68config SENSORS_RTC8564
78 tristate "Epson 8564 RTC chip" 69 tristate "Epson 8564 RTC chip"
79 depends on I2C && EXPERIMENTAL 70 depends on I2C && EXPERIMENTAL
80 select I2C_SENSOR
81 help 71 help
82 If you say yes here you get support for the Epson 8564 RTC chip. 72 If you say yes here you get support for the Epson 8564 RTC chip.
83 73
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
index 82cf959989fd..9d3175c03395 100644
--- a/drivers/i2c/chips/ds1337.c
+++ b/drivers/i2c/chips/ds1337.c
@@ -17,7 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/i2c-sensor.h>
21#include <linux/string.h> 20#include <linux/string.h>
22#include <linux/rtc.h> /* get the user-level API */ 21#include <linux/rtc.h> /* get the user-level API */
23#include <linux/bcd.h> 22#include <linux/bcd.h>
@@ -39,9 +38,8 @@
39 * Functions declaration 38 * Functions declaration
40 */ 39 */
41static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END }; 40static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
42static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
43 41
44SENSORS_INSMOD_1(ds1337); 42I2C_CLIENT_INSMOD_1(ds1337);
45 43
46static int ds1337_attach_adapter(struct i2c_adapter *adapter); 44static int ds1337_attach_adapter(struct i2c_adapter *adapter);
47static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind); 45static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind);
@@ -227,7 +225,7 @@ int ds1337_do_command(int bus, int cmd, void *arg)
227 225
228static int ds1337_attach_adapter(struct i2c_adapter *adapter) 226static int ds1337_attach_adapter(struct i2c_adapter *adapter)
229{ 227{
230 return i2c_detect(adapter, &addr_data, ds1337_detect); 228 return i2c_probe(adapter, &addr_data, ds1337_detect);
231} 229}
232 230
233/* 231/*
@@ -354,11 +352,8 @@ static int ds1337_detach_client(struct i2c_client *client)
354 int err; 352 int err;
355 struct ds1337_data *data = i2c_get_clientdata(client); 353 struct ds1337_data *data = i2c_get_clientdata(client);
356 354
357 if ((err = i2c_detach_client(client))) { 355 if ((err = i2c_detach_client(client)))
358 dev_err(&client->dev, "Client deregistration failed, "
359 "client not detached.\n");
360 return err; 356 return err;
361 }
362 357
363 list_del(&data->list); 358 list_del(&data->list);
364 kfree(data); 359 kfree(data);
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index a445736d8838..0936327a946d 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -53,7 +53,6 @@ static struct i2c_client_address_data addr_data = {
53 .normal_i2c = normal_addr, 53 .normal_i2c = normal_addr,
54 .probe = ignore, 54 .probe = ignore,
55 .ignore = ignore, 55 .ignore = ignore,
56 .force = ignore,
57}; 56};
58 57
59static ulong ds1374_read_rtc(void) 58static ulong ds1374_read_rtc(void)
@@ -166,7 +165,7 @@ static void ds1374_set_tlet(ulong arg)
166 "can't confirm time set from rtc chip\n"); 165 "can't confirm time set from rtc chip\n");
167} 166}
168 167
169ulong new_time; 168static ulong new_time;
170 169
171DECLARE_TASKLET_DISABLED(ds1374_tasklet, ds1374_set_tlet, (ulong) & new_time); 170DECLARE_TASKLET_DISABLED(ds1374_tasklet, ds1374_set_tlet, (ulong) & new_time);
172 171
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
index a2da31b0dd7b..d58403a47908 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/i2c/chips/eeprom.c
@@ -33,15 +33,13 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/jiffies.h> 34#include <linux/jiffies.h>
35#include <linux/i2c.h> 35#include <linux/i2c.h>
36#include <linux/i2c-sensor.h>
37 36
38/* Addresses to scan */ 37/* Addresses to scan */
39static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 38static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
40 0x55, 0x56, 0x57, I2C_CLIENT_END }; 39 0x55, 0x56, 0x57, I2C_CLIENT_END };
41static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
42 40
43/* Insmod parameters */ 41/* Insmod parameters */
44SENSORS_INSMOD_1(eeprom); 42I2C_CLIENT_INSMOD_1(eeprom);
45 43
46 44
47/* Size of EEPROM in bytes */ 45/* Size of EEPROM in bytes */
@@ -153,21 +151,16 @@ static struct bin_attribute eeprom_attr = {
153 151
154static int eeprom_attach_adapter(struct i2c_adapter *adapter) 152static int eeprom_attach_adapter(struct i2c_adapter *adapter)
155{ 153{
156 return i2c_detect(adapter, &addr_data, eeprom_detect); 154 return i2c_probe(adapter, &addr_data, eeprom_detect);
157} 155}
158 156
159/* This function is called by i2c_detect */ 157/* This function is called by i2c_probe */
160int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) 158int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
161{ 159{
162 struct i2c_client *new_client; 160 struct i2c_client *new_client;
163 struct eeprom_data *data; 161 struct eeprom_data *data;
164 int err = 0; 162 int err = 0;
165 163
166 /* prevent 24RF08 corruption */
167 if (kind < 0)
168 i2c_smbus_xfer(adapter, address, 0, 0, 0,
169 I2C_SMBUS_QUICK, NULL);
170
171 /* There are three ways we can read the EEPROM data: 164 /* There are three ways we can read the EEPROM data:
172 (1) I2C block reads (faster, but unsupported by most adapters) 165 (1) I2C block reads (faster, but unsupported by most adapters)
173 (2) Consecutive byte reads (100% overhead) 166 (2) Consecutive byte reads (100% overhead)
@@ -231,10 +224,8 @@ static int eeprom_detach_client(struct i2c_client *client)
231 int err; 224 int err;
232 225
233 err = i2c_detach_client(client); 226 err = i2c_detach_client(client);
234 if (err) { 227 if (err)
235 dev_err(&client->dev, "Client deregistration failed, client not detached.\n");
236 return err; 228 return err;
237 }
238 229
239 kfree(i2c_get_clientdata(client)); 230 kfree(i2c_get_clientdata(client));
240 231
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 354a26295672..8ee56d4b3891 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1489,7 +1489,7 @@ static int isp1301_probe(struct i2c_adapter *bus, int address, int kind)
1489 if (the_transceiver) 1489 if (the_transceiver)
1490 return 0; 1490 return 0;
1491 1491
1492 isp = kcalloc(1, sizeof *isp, GFP_KERNEL); 1492 isp = kzalloc(sizeof *isp, GFP_KERNEL);
1493 if (!isp) 1493 if (!isp)
1494 return 0; 1494 return 0;
1495 1495
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
index 778d7e12859d..3f14528a52a9 100644
--- a/drivers/i2c/chips/m41t00.c
+++ b/drivers/i2c/chips/m41t00.c
@@ -42,7 +42,6 @@ static struct i2c_client_address_data addr_data = {
42 .normal_i2c = normal_addr, 42 .normal_i2c = normal_addr,
43 .probe = ignore, 43 .probe = ignore,
44 .ignore = ignore, 44 .ignore = ignore,
45 .force = ignore,
46}; 45};
47 46
48ulong 47ulong
@@ -145,7 +144,7 @@ m41t00_set_tlet(ulong arg)
145 return; 144 return;
146} 145}
147 146
148ulong new_time; 147static ulong new_time;
149 148
150DECLARE_TASKLET_DISABLED(m41t00_tasklet, m41t00_set_tlet, (ulong)&new_time); 149DECLARE_TASKLET_DISABLED(m41t00_tasklet, m41t00_set_tlet, (ulong)&new_time);
151 150
diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
index 0230375f72e5..9e1aeb69abf9 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/i2c/chips/max6875.c
@@ -5,97 +5,60 @@
5 5
6 Based on i2c/chips/eeprom.c 6 Based on i2c/chips/eeprom.c
7 7
8 The MAX6875 has two EEPROM sections: config and user. 8 The MAX6875 has a bank of registers and two banks of EEPROM.
9 At reset, the config EEPROM is read into the registers. 9 Address ranges are defined as follows:
10 * 0x0000 - 0x0046 = configuration registers
11 * 0x8000 - 0x8046 = configuration EEPROM
12 * 0x8100 - 0x82FF = user EEPROM
10 13
11 This driver make 3 binary files available in sysfs: 14 This driver makes the user EEPROM available for read.
12 reg_config - direct access to the registers
13 eeprom_config - acesses configuration eeprom space
14 eeprom_user - free for application use
15 15
16 In our application, we put device serial & model numbers in user eeprom. 16 The registers & config EEPROM should be accessed via i2c-dev.
17 17
18 Notes: 18 The MAX6875 ignores the lowest address bit, so each chip responds to
19 1) The datasheet says that register 0x44 / EEPROM 0x8044 should NOT 19 two addresses - 0x50/0x51 and 0x52/0x53.
20 be overwritten, so the driver explicitly prevents that. 20
21 2) It's a good idea to keep the config (0x45) locked in config EEPROM. 21 Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
22 You can temporarily enable config writes by changing register 0x45. 22 address, so this driver is destructive if loaded for the wrong EEPROM chip.
23 23
24 This program is free software; you can redistribute it and/or modify 24 This program is free software; you can redistribute it and/or modify
25 it under the terms of the GNU General Public License as published by 25 it under the terms of the GNU General Public License as published by
26 the Free Software Foundation; version 2 of the License. 26 the Free Software Foundation; version 2 of the License.
27*/ 27*/
28 28
29#include <linux/config.h>
30#include <linux/kernel.h> 29#include <linux/kernel.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
34#include <linux/sched.h>
35#include <linux/delay.h>
36#include <linux/i2c.h> 33#include <linux/i2c.h>
37#include <linux/i2c-sensor.h> 34#include <asm/semaphore.h>
38 35
39/* Addresses to scan */ 36/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
40/* No address scanned by default, as this could corrupt standard EEPROMS. */
41static unsigned short normal_i2c[] = {I2C_CLIENT_END}; 37static unsigned short normal_i2c[] = {I2C_CLIENT_END};
42static unsigned int normal_isa[] = {I2C_CLIENT_ISA_END};
43 38
44/* Insmod parameters */ 39/* Insmod parameters */
45SENSORS_INSMOD_1(max6875); 40I2C_CLIENT_INSMOD_1(max6875);
46
47/* this param will prevent 'accidental' writes to the eeprom */
48static int allow_write = 0;
49module_param(allow_write, int, 0);
50MODULE_PARM_DESC(allow_write,
51 "Enable write access:\n"
52 "*0: Read only\n"
53 " 1: Read/Write access");
54 41
55/* The MAX6875 can only read/write 16 bytes at a time */ 42/* The MAX6875 can only read/write 16 bytes at a time */
56#define SLICE_SIZE 16 43#define SLICE_SIZE 16
57#define SLICE_BITS 4 44#define SLICE_BITS 4
58 45
59/* CONFIG EEPROM is at addresses 0x8000 - 0x8045, registers are at 0 - 0x45 */
60#define CONFIG_EEPROM_BASE 0x8000
61#define CONFIG_EEPROM_SIZE 0x0046
62#define CONFIG_EEPROM_SLICES 5
63
64/* USER EEPROM is at addresses 0x8100 - 0x82FF */ 46/* USER EEPROM is at addresses 0x8100 - 0x82FF */
65#define USER_EEPROM_BASE 0x8100 47#define USER_EEPROM_BASE 0x8100
66#define USER_EEPROM_SIZE 0x0200 48#define USER_EEPROM_SIZE 0x0200
67#define USER_EEPROM_SLICES 32 49#define USER_EEPROM_SLICES 32
68 50
69/* MAX6875 commands */ 51/* MAX6875 commands */
70#define MAX6875_CMD_BLOCK_WRITE 0x83 52#define MAX6875_CMD_BLK_READ 0x84
71#define MAX6875_CMD_BLOCK_READ 0x84
72#define MAX6875_CMD_REBOOT 0x88
73
74enum max6875_area_type {
75 max6875_register_config=0,
76 max6875_eeprom_config,
77 max6875_eeprom_user,
78 max6857_max
79};
80
81struct eeprom_block {
82 enum max6875_area_type type;
83 u8 slices;
84 u32 size;
85 u32 valid;
86 u32 base;
87 unsigned long *updated;
88 u8 *data;
89};
90 53
91/* Each client has this additional data */ 54/* Each client has this additional data */
92struct max6875_data { 55struct max6875_data {
93 struct i2c_client client; 56 struct i2c_client client;
94 struct semaphore update_lock; 57 struct semaphore update_lock;
95 struct eeprom_block blocks[max6857_max]; 58
96 /* the above structs point into the arrays below */ 59 u32 valid;
97 u8 data[USER_EEPROM_SIZE + (CONFIG_EEPROM_SIZE*2)]; 60 u8 data[USER_EEPROM_SIZE];
98 unsigned long last_updated[USER_EEPROM_SLICES + (CONFIG_EEPROM_SLICES*2)]; 61 unsigned long last_updated[USER_EEPROM_SLICES];
99}; 62};
100 63
101static int max6875_attach_adapter(struct i2c_adapter *adapter); 64static int max6875_attach_adapter(struct i2c_adapter *adapter);
@@ -111,337 +74,160 @@ static struct i2c_driver max6875_driver = {
111 .detach_client = max6875_detach_client, 74 .detach_client = max6875_detach_client,
112}; 75};
113 76
114static int max6875_update_slice(struct i2c_client *client, 77static void max6875_update_slice(struct i2c_client *client, int slice)
115 struct eeprom_block *blk,
116 int slice)
117{ 78{
118 struct max6875_data *data = i2c_get_clientdata(client); 79 struct max6875_data *data = i2c_get_clientdata(client);
119 int i, j, addr, count; 80 int i, j, addr;
120 u8 rdbuf[SLICE_SIZE]; 81 u8 *buf;
121 int retval = 0;
122 82
123 if (slice >= blk->slices) 83 if (slice >= USER_EEPROM_SLICES)
124 return -1; 84 return;
125 85
126 down(&data->update_lock); 86 down(&data->update_lock);
127 87
128 if (!(blk->valid & (1 << slice)) || 88 buf = &data->data[slice << SLICE_BITS];
129 (jiffies - blk->updated[slice] > 300 * HZ) ||
130 (jiffies < blk->updated[slice])) {
131 dev_dbg(&client->dev, "Starting eeprom update, slice %u, base %u\n",
132 slice, blk->base);
133 89
134 addr = blk->base + (slice << SLICE_BITS); 90 if (!(data->valid & (1 << slice)) ||
135 count = blk->size - (slice << SLICE_BITS); 91 time_after(jiffies, data->last_updated[slice])) {
136 if (count > SLICE_SIZE) {
137 count = SLICE_SIZE;
138 }
139 92
140 /* Preset the read address */ 93 dev_dbg(&client->dev, "Starting update of slice %u\n", slice);
141 if (addr < 0x100) { 94
142 /* select the register */ 95 data->valid &= ~(1 << slice);
143 if (i2c_smbus_write_byte(client, addr & 0xFF)) { 96
144 dev_dbg(&client->dev, "max6875 register select has failed!\n"); 97 addr = USER_EEPROM_BASE + (slice << SLICE_BITS);
145 retval = -1; 98
146 goto exit; 99 /* select the eeprom address */
147 } 100 if (i2c_smbus_write_byte_data(client, addr >> 8, addr & 0xFF)) {
148 } else { 101 dev_err(&client->dev, "address set failed\n");
149 /* select the eeprom */ 102 goto exit_up;
150 if (i2c_smbus_write_byte_data(client, addr >> 8, addr & 0xFF)) {
151 dev_dbg(&client->dev, "max6875 address set has failed!\n");
152 retval = -1;
153 goto exit;
154 }
155 } 103 }
156 104
157 if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { 105 if (i2c_check_functionality(client->adapter,
158 if (i2c_smbus_read_i2c_block_data(client, MAX6875_CMD_BLOCK_READ, 106 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
159 rdbuf) != SLICE_SIZE) 107 if (i2c_smbus_read_i2c_block_data(client,
160 { 108 MAX6875_CMD_BLK_READ,
161 retval = -1; 109 buf) != SLICE_SIZE) {
162 goto exit; 110 goto exit_up;
163 } 111 }
164
165 memcpy(&blk->data[slice << SLICE_BITS], rdbuf, count);
166 } else { 112 } else {
167 for (i = 0; i < count; i++) { 113 for (i = 0; i < SLICE_SIZE; i++) {
168 j = i2c_smbus_read_byte(client); 114 j = i2c_smbus_read_byte(client);
169 if (j < 0) 115 if (j < 0) {
170 { 116 goto exit_up;
171 retval = -1;
172 goto exit;
173 } 117 }
174 blk->data[(slice << SLICE_BITS) + i] = (u8) j; 118 buf[i] = j;
175 } 119 }
176 } 120 }
177 blk->updated[slice] = jiffies; 121 data->last_updated[slice] = jiffies;
178 blk->valid |= (1 << slice); 122 data->valid |= (1 << slice);
179 } 123 }
180 exit: 124exit_up:
181 up(&data->update_lock); 125 up(&data->update_lock);
182 return retval;
183} 126}
184 127
185static ssize_t max6875_read(struct kobject *kobj, char *buf, loff_t off, size_t count, 128static ssize_t max6875_read(struct kobject *kobj, char *buf, loff_t off,
186 enum max6875_area_type area_type) 129 size_t count)
187{ 130{
188 struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); 131 struct i2c_client *client = kobj_to_i2c_client(kobj);
189 struct max6875_data *data = i2c_get_clientdata(client); 132 struct max6875_data *data = i2c_get_clientdata(client);
190 struct eeprom_block *blk; 133 int slice, max_slice;
191 int slice;
192
193 blk = &data->blocks[area_type];
194 134
195 if (off > blk->size) 135 if (off > USER_EEPROM_SIZE)
196 return 0; 136 return 0;
197 if (off + count > blk->size)
198 count = blk->size - off;
199 137
200 /* Only refresh slices which contain requested bytes */ 138 if (off + count > USER_EEPROM_SIZE)
201 for (slice = (off >> SLICE_BITS); slice <= ((off + count - 1) >> SLICE_BITS); slice++) 139 count = USER_EEPROM_SIZE - off;
202 max6875_update_slice(client, blk, slice);
203 140
204 memcpy(buf, &blk->data[off], count); 141 /* refresh slices which contain requested bytes */
142 max_slice = (off + count - 1) >> SLICE_BITS;
143 for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
144 max6875_update_slice(client, slice);
205 145
206 return count; 146 memcpy(buf, &data->data[off], count);
207}
208
209static ssize_t max6875_user_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
210{
211 return max6875_read(kobj, buf, off, count, max6875_eeprom_user);
212}
213
214static ssize_t max6875_config_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
215{
216 return max6875_read(kobj, buf, off, count, max6875_eeprom_config);
217}
218
219static ssize_t max6875_cfgreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
220{
221 return max6875_read(kobj, buf, off, count, max6875_register_config);
222}
223
224
225static ssize_t max6875_write(struct kobject *kobj, char *buf, loff_t off, size_t count,
226 enum max6875_area_type area_type)
227{
228 struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
229 struct max6875_data *data = i2c_get_clientdata(client);
230 struct eeprom_block *blk;
231 int slice, addr, retval;
232 ssize_t sent = 0;
233
234 blk = &data->blocks[area_type];
235
236 if (off > blk->size)
237 return 0;
238 if ((off + count) > blk->size)
239 count = blk->size - off;
240
241 if (down_interruptible(&data->update_lock))
242 return -EAGAIN;
243
244 /* writing to a register is done with i2c_smbus_write_byte_data() */
245 if (blk->type == max6875_register_config) {
246 for (sent = 0; sent < count; sent++) {
247 addr = off + sent;
248 if (addr == 0x44)
249 continue;
250
251 retval = i2c_smbus_write_byte_data(client, addr, buf[sent]);
252 }
253 } else {
254 int cmd, val;
255
256 /* We are writing to EEPROM */
257 for (sent = 0; sent < count; sent++) {
258 addr = blk->base + off + sent;
259 cmd = addr >> 8;
260 val = (addr & 0xff) | (buf[sent] << 8); // reversed
261
262 if (addr == 0x8044)
263 continue;
264
265 retval = i2c_smbus_write_word_data(client, cmd, val);
266
267 if (retval) {
268 goto error_exit;
269 }
270 147
271 /* A write takes up to 11 ms */ 148 return count;
272 msleep(11);
273 }
274 }
275
276 /* Invalidate the scratch buffer */
277 for (slice = (off >> SLICE_BITS); slice <= ((off + count - 1) >> SLICE_BITS); slice++)
278 blk->valid &= ~(1 << slice);
279
280 error_exit:
281 up(&data->update_lock);
282
283 return sent;
284}
285
286static ssize_t max6875_user_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
287{
288 return max6875_write(kobj, buf, off, count, max6875_eeprom_user);
289}
290
291static ssize_t max6875_config_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
292{
293 return max6875_write(kobj, buf, off, count, max6875_eeprom_config);
294}
295
296static ssize_t max6875_cfgreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
297{
298 return max6875_write(kobj, buf, off, count, max6875_register_config);
299} 149}
300 150
301static struct bin_attribute user_eeprom_attr = { 151static struct bin_attribute user_eeprom_attr = {
302 .attr = { 152 .attr = {
303 .name = "eeprom_user", 153 .name = "eeprom",
304 .mode = S_IRUGO | S_IWUSR | S_IWGRP, 154 .mode = S_IRUGO,
305 .owner = THIS_MODULE,
306 },
307 .size = USER_EEPROM_SIZE,
308 .read = max6875_user_read,
309 .write = max6875_user_write,
310};
311
312static struct bin_attribute config_eeprom_attr = {
313 .attr = {
314 .name = "eeprom_config",
315 .mode = S_IRUGO | S_IWUSR,
316 .owner = THIS_MODULE,
317 },
318 .size = CONFIG_EEPROM_SIZE,
319 .read = max6875_config_read,
320 .write = max6875_config_write,
321};
322
323static struct bin_attribute config_register_attr = {
324 .attr = {
325 .name = "reg_config",
326 .mode = S_IRUGO | S_IWUSR,
327 .owner = THIS_MODULE, 155 .owner = THIS_MODULE,
328 }, 156 },
329 .size = CONFIG_EEPROM_SIZE, 157 .size = USER_EEPROM_SIZE,
330 .read = max6875_cfgreg_read, 158 .read = max6875_read,
331 .write = max6875_cfgreg_write,
332}; 159};
333 160
334static int max6875_attach_adapter(struct i2c_adapter *adapter) 161static int max6875_attach_adapter(struct i2c_adapter *adapter)
335{ 162{
336 return i2c_detect(adapter, &addr_data, max6875_detect); 163 return i2c_probe(adapter, &addr_data, max6875_detect);
337} 164}
338 165
339/* This function is called by i2c_detect */ 166/* This function is called by i2c_probe */
340static int max6875_detect(struct i2c_adapter *adapter, int address, int kind) 167static int max6875_detect(struct i2c_adapter *adapter, int address, int kind)
341{ 168{
342 struct i2c_client *new_client; 169 struct i2c_client *real_client;
170 struct i2c_client *fake_client;
343 struct max6875_data *data; 171 struct max6875_data *data;
344 int err = 0; 172 int err = 0;
345 173
346 /* Prevent 24RF08 corruption (in case of user error) */ 174 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
347 if (kind < 0) 175 | I2C_FUNC_SMBUS_READ_BYTE))
348 i2c_smbus_xfer(adapter, address, 0, 0, 0, 176 return 0;
349 I2C_SMBUS_QUICK, NULL); 177
350 178 /* Only check even addresses */
351 /* There are three ways we can read the EEPROM data: 179 if (address & 1)
352 (1) I2C block reads (faster, but unsupported by most adapters) 180 return 0;
353 (2) Consecutive byte reads (100% overhead) 181
354 (3) Regular byte data reads (200% overhead) 182 if (!(data = kmalloc(sizeof(struct max6875_data), GFP_KERNEL)))
355 The third method is not implemented by this driver because all 183 return -ENOMEM;
356 known adapters support at least the second. */
357 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA |
358 I2C_FUNC_SMBUS_BYTE |
359 I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
360 goto exit;
361
362 /* OK. For now, we presume we have a valid client. We now create the
363 client structure, even though we cannot fill it completely yet.
364 But it allows us to access eeprom_{read,write}_value. */
365 if (!(data = kmalloc(sizeof(struct max6875_data), GFP_KERNEL))) {
366 err = -ENOMEM;
367 goto exit;
368 }
369 memset(data, 0, sizeof(struct max6875_data)); 184 memset(data, 0, sizeof(struct max6875_data));
370 185
371 new_client = &data->client; 186 /* A fake client is created on the odd address */
372 i2c_set_clientdata(new_client, data); 187 if (!(fake_client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
373 new_client->addr = address; 188 err = -ENOMEM;
374 new_client->adapter = adapter; 189 goto exit_kfree1;
375 new_client->driver = &max6875_driver; 190 }
376 new_client->flags = 0; 191 memset(fake_client, 0, sizeof(struct i2c_client));
377 192
378 /* Setup the user section */ 193 /* Init real i2c_client */
379 data->blocks[max6875_eeprom_user].type = max6875_eeprom_user; 194 real_client = &data->client;
380 data->blocks[max6875_eeprom_user].slices = USER_EEPROM_SLICES; 195 i2c_set_clientdata(real_client, data);
381 data->blocks[max6875_eeprom_user].size = USER_EEPROM_SIZE; 196 real_client->addr = address;
382 data->blocks[max6875_eeprom_user].base = USER_EEPROM_BASE; 197 real_client->adapter = adapter;
383 data->blocks[max6875_eeprom_user].data = data->data; 198 real_client->driver = &max6875_driver;
384 data->blocks[max6875_eeprom_user].updated = data->last_updated; 199 real_client->flags = 0;
385 200 strlcpy(real_client->name, "max6875", I2C_NAME_SIZE);
386 /* Setup the config section */
387 data->blocks[max6875_eeprom_config].type = max6875_eeprom_config;
388 data->blocks[max6875_eeprom_config].slices = CONFIG_EEPROM_SLICES;
389 data->blocks[max6875_eeprom_config].size = CONFIG_EEPROM_SIZE;
390 data->blocks[max6875_eeprom_config].base = CONFIG_EEPROM_BASE;
391 data->blocks[max6875_eeprom_config].data = &data->data[USER_EEPROM_SIZE];
392 data->blocks[max6875_eeprom_config].updated = &data->last_updated[USER_EEPROM_SLICES];
393
394 /* Setup the register section */
395 data->blocks[max6875_register_config].type = max6875_register_config;
396 data->blocks[max6875_register_config].slices = CONFIG_EEPROM_SLICES;
397 data->blocks[max6875_register_config].size = CONFIG_EEPROM_SIZE;
398 data->blocks[max6875_register_config].base = 0;
399 data->blocks[max6875_register_config].data = &data->data[USER_EEPROM_SIZE+CONFIG_EEPROM_SIZE];
400 data->blocks[max6875_register_config].updated = &data->last_updated[USER_EEPROM_SLICES+CONFIG_EEPROM_SLICES];
401
402 /* Init the data */
403 memset(data->data, 0xff, sizeof(data->data));
404
405 /* Fill in the remaining client fields */
406 strlcpy(new_client->name, "max6875", I2C_NAME_SIZE);
407 init_MUTEX(&data->update_lock); 201 init_MUTEX(&data->update_lock);
408 202
409 /* Verify that the chip is really what we think it is */ 203 /* Init fake client data */
410 if ((max6875_update_slice(new_client, &data->blocks[max6875_eeprom_config], 4) < 0) || 204 /* set the client data to the i2c_client so that it will get freed */
411 (max6875_update_slice(new_client, &data->blocks[max6875_register_config], 4) < 0)) 205 i2c_set_clientdata(fake_client, fake_client);
412 goto exit_kfree; 206 fake_client->addr = address | 1;
413 207 fake_client->adapter = adapter;
414 /* 0x41,0x42 must be zero and 0x40 must match in eeprom and registers */ 208 fake_client->driver = &max6875_driver;
415 if ((data->blocks[max6875_eeprom_config].data[0x41] != 0) || 209 fake_client->flags = 0;
416 (data->blocks[max6875_eeprom_config].data[0x42] != 0) || 210 strlcpy(fake_client->name, "max6875 subclient", I2C_NAME_SIZE);
417 (data->blocks[max6875_register_config].data[0x41] != 0) || 211
418 (data->blocks[max6875_register_config].data[0x42] != 0) || 212 /* Prevent 24RF08 corruption (in case of user error) */
419 (data->blocks[max6875_eeprom_config].data[0x40] != 213 i2c_smbus_write_quick(real_client, 0);
420 data->blocks[max6875_register_config].data[0x40])) 214
421 goto exit_kfree; 215 if ((err = i2c_attach_client(real_client)) != 0)
422 216 goto exit_kfree2;
423 /* Tell the I2C layer a new client has arrived */ 217
424 if ((err = i2c_attach_client(new_client))) 218 if ((err = i2c_attach_client(fake_client)) != 0)
425 goto exit_kfree; 219 goto exit_detach;
426 220
427 /* create the sysfs eeprom files with the correct permissions */ 221 sysfs_create_bin_file(&real_client->dev.kobj, &user_eeprom_attr);
428 if (allow_write == 0) {
429 user_eeprom_attr.attr.mode &= ~S_IWUGO;
430 user_eeprom_attr.write = NULL;
431 config_eeprom_attr.attr.mode &= ~S_IWUGO;
432 config_eeprom_attr.write = NULL;
433 config_register_attr.attr.mode &= ~S_IWUGO;
434 config_register_attr.write = NULL;
435 }
436 sysfs_create_bin_file(&new_client->dev.kobj, &user_eeprom_attr);
437 sysfs_create_bin_file(&new_client->dev.kobj, &config_eeprom_attr);
438 sysfs_create_bin_file(&new_client->dev.kobj, &config_register_attr);
439 222
440 return 0; 223 return 0;
441 224
442exit_kfree: 225exit_detach:
226 i2c_detach_client(real_client);
227exit_kfree2:
228 kfree(fake_client);
229exit_kfree1:
443 kfree(data); 230 kfree(data);
444exit:
445 return err; 231 return err;
446} 232}
447 233
@@ -450,13 +236,9 @@ static int max6875_detach_client(struct i2c_client *client)
450 int err; 236 int err;
451 237
452 err = i2c_detach_client(client); 238 err = i2c_detach_client(client);
453 if (err) { 239 if (err)
454 dev_err(&client->dev, "Client deregistration failed, client not detached.\n");
455 return err; 240 return err;
456 }
457
458 kfree(i2c_get_clientdata(client)); 241 kfree(i2c_get_clientdata(client));
459
460 return 0; 242 return 0;
461} 243}
462 244
diff --git a/drivers/i2c/chips/pca9539.c b/drivers/i2c/chips/pca9539.c
index 9f3ad45daae2..225577fdda4d 100644
--- a/drivers/i2c/chips/pca9539.c
+++ b/drivers/i2c/chips/pca9539.c
@@ -13,14 +13,12 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/hwmon-sysfs.h> 15#include <linux/hwmon-sysfs.h>
16#include <linux/i2c-sensor.h>
17 16
18/* Addresses to scan */ 17/* Addresses to scan */
19static unsigned short normal_i2c[] = {0x74, 0x75, 0x76, 0x77, I2C_CLIENT_END}; 18static unsigned short normal_i2c[] = {0x74, 0x75, 0x76, 0x77, I2C_CLIENT_END};
20static unsigned int normal_isa[] = {I2C_CLIENT_ISA_END};
21 19
22/* Insmod parameters */ 20/* Insmod parameters */
23SENSORS_INSMOD_1(pca9539); 21I2C_CLIENT_INSMOD_1(pca9539);
24 22
25enum pca9539_cmd 23enum pca9539_cmd
26{ 24{
@@ -109,10 +107,10 @@ static struct attribute_group pca9539_defattr_group = {
109 107
110static int pca9539_attach_adapter(struct i2c_adapter *adapter) 108static int pca9539_attach_adapter(struct i2c_adapter *adapter)
111{ 109{
112 return i2c_detect(adapter, &addr_data, pca9539_detect); 110 return i2c_probe(adapter, &addr_data, pca9539_detect);
113} 111}
114 112
115/* This function is called by i2c_detect */ 113/* This function is called by i2c_probe */
116static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind) 114static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind)
117{ 115{
118 struct i2c_client *new_client; 116 struct i2c_client *new_client;
@@ -164,10 +162,8 @@ static int pca9539_detach_client(struct i2c_client *client)
164{ 162{
165 int err; 163 int err;
166 164
167 if ((err = i2c_detach_client(client))) { 165 if ((err = i2c_detach_client(client)))
168 dev_err(&client->dev, "Client deregistration failed.\n");
169 return err; 166 return err;
170 }
171 167
172 kfree(i2c_get_clientdata(client)); 168 kfree(i2c_get_clientdata(client));
173 return 0; 169 return 0;
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
index cfcf64654080..6525743ff9fd 100644
--- a/drivers/i2c/chips/pcf8574.c
+++ b/drivers/i2c/chips/pcf8574.c
@@ -39,16 +39,14 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/i2c.h> 41#include <linux/i2c.h>
42#include <linux/i2c-sensor.h>
43 42
44/* Addresses to scan */ 43/* Addresses to scan */
45static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 44static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
46 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 45 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
47 I2C_CLIENT_END }; 46 I2C_CLIENT_END };
48static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
49 47
50/* Insmod parameters */ 48/* Insmod parameters */
51SENSORS_INSMOD_2(pcf8574, pcf8574a); 49I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
52 50
53/* Initial values */ 51/* Initial values */
54#define PCF8574_INIT 255 /* All outputs on (input mode) */ 52#define PCF8574_INIT 255 /* All outputs on (input mode) */
@@ -113,10 +111,10 @@ static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write);
113 111
114static int pcf8574_attach_adapter(struct i2c_adapter *adapter) 112static int pcf8574_attach_adapter(struct i2c_adapter *adapter)
115{ 113{
116 return i2c_detect(adapter, &addr_data, pcf8574_detect); 114 return i2c_probe(adapter, &addr_data, pcf8574_detect);
117} 115}
118 116
119/* This function is called by i2c_detect */ 117/* This function is called by i2c_probe */
120int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind) 118int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind)
121{ 119{
122 struct i2c_client *new_client; 120 struct i2c_client *new_client;
@@ -186,11 +184,8 @@ static int pcf8574_detach_client(struct i2c_client *client)
186{ 184{
187 int err; 185 int err;
188 186
189 if ((err = i2c_detach_client(client))) { 187 if ((err = i2c_detach_client(client)))
190 dev_err(&client->dev,
191 "Client deregistration failed, client not detached.\n");
192 return err; 188 return err;
193 }
194 189
195 kfree(i2c_get_clientdata(client)); 190 kfree(i2c_get_clientdata(client));
196 return 0; 191 return 0;
diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c
index db812ade8564..80f1df9a4500 100644
--- a/drivers/i2c/chips/pcf8591.c
+++ b/drivers/i2c/chips/pcf8591.c
@@ -24,15 +24,13 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c-sensor.h>
28 27
29/* Addresses to scan */ 28/* Addresses to scan */
30static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 29static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
31 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 30 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
32static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
33 31
34/* Insmod parameters */ 32/* Insmod parameters */
35SENSORS_INSMOD_1(pcf8591); 33I2C_CLIENT_INSMOD_1(pcf8591);
36 34
37static int input_mode; 35static int input_mode;
38module_param(input_mode, int, 0); 36module_param(input_mode, int, 0);
@@ -164,10 +162,10 @@ static DEVICE_ATTR(out0_enable, S_IWUSR | S_IRUGO,
164 */ 162 */
165static int pcf8591_attach_adapter(struct i2c_adapter *adapter) 163static int pcf8591_attach_adapter(struct i2c_adapter *adapter)
166{ 164{
167 return i2c_detect(adapter, &addr_data, pcf8591_detect); 165 return i2c_probe(adapter, &addr_data, pcf8591_detect);
168} 166}
169 167
170/* This function is called by i2c_detect */ 168/* This function is called by i2c_probe */
171int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind) 169int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind)
172{ 170{
173 struct i2c_client *new_client; 171 struct i2c_client *new_client;
@@ -241,11 +239,8 @@ static int pcf8591_detach_client(struct i2c_client *client)
241{ 239{
242 int err; 240 int err;
243 241
244 if ((err = i2c_detach_client(client))) { 242 if ((err = i2c_detach_client(client)))
245 dev_err(&client->dev,
246 "Client deregistration failed, client not detached.\n");
247 return err; 243 return err;
248 }
249 244
250 kfree(i2c_get_clientdata(client)); 245 kfree(i2c_get_clientdata(client));
251 return 0; 246 return 0;
diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c
index 588fc2261a91..0b5385c892b1 100644
--- a/drivers/i2c/chips/rtc8564.c
+++ b/drivers/i2c/chips/rtc8564.c
@@ -67,7 +67,6 @@ static struct i2c_client_address_data addr_data = {
67 .normal_i2c = normal_addr, 67 .normal_i2c = normal_addr,
68 .probe = ignore, 68 .probe = ignore,
69 .ignore = ignore, 69 .ignore = ignore,
70 .force = ignore,
71}; 70};
72 71
73static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem); 72static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 4a9ead277596..dda472e5e8be 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -61,7 +61,7 @@ static int i2c_bus_resume(struct device * dev)
61 return rc; 61 return rc;
62} 62}
63 63
64static struct bus_type i2c_bus_type = { 64struct bus_type i2c_bus_type = {
65 .name = "i2c", 65 .name = "i2c",
66 .match = i2c_device_match, 66 .match = i2c_device_match,
67 .suspend = i2c_bus_suspend, 67 .suspend = i2c_bus_suspend,
@@ -78,13 +78,13 @@ static int i2c_device_remove(struct device *dev)
78 return 0; 78 return 0;
79} 79}
80 80
81static void i2c_adapter_dev_release(struct device *dev) 81void i2c_adapter_dev_release(struct device *dev)
82{ 82{
83 struct i2c_adapter *adap = dev_to_i2c_adapter(dev); 83 struct i2c_adapter *adap = dev_to_i2c_adapter(dev);
84 complete(&adap->dev_released); 84 complete(&adap->dev_released);
85} 85}
86 86
87static struct device_driver i2c_adapter_driver = { 87struct device_driver i2c_adapter_driver = {
88 .name = "i2c_adapter", 88 .name = "i2c_adapter",
89 .bus = &i2c_bus_type, 89 .bus = &i2c_bus_type,
90 .probe = i2c_device_probe, 90 .probe = i2c_device_probe,
@@ -97,7 +97,7 @@ static void i2c_adapter_class_dev_release(struct class_device *dev)
97 complete(&adap->class_dev_released); 97 complete(&adap->class_dev_released);
98} 98}
99 99
100static struct class i2c_adapter_class = { 100struct class i2c_adapter_class = {
101 .name = "i2c-adapter", 101 .name = "i2c-adapter",
102 .release = &i2c_adapter_class_dev_release, 102 .release = &i2c_adapter_class_dev_release,
103}; 103};
@@ -188,6 +188,8 @@ int i2c_add_adapter(struct i2c_adapter *adap)
188 strlcpy(adap->class_dev.class_id, adap->dev.bus_id, BUS_ID_SIZE); 188 strlcpy(adap->class_dev.class_id, adap->dev.bus_id, BUS_ID_SIZE);
189 class_device_register(&adap->class_dev); 189 class_device_register(&adap->class_dev);
190 190
191 dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
192
191 /* inform drivers of new adapters */ 193 /* inform drivers of new adapters */
192 list_for_each(item,&drivers) { 194 list_for_each(item,&drivers) {
193 driver = list_entry(item, struct i2c_driver, list); 195 driver = list_entry(item, struct i2c_driver, list);
@@ -196,8 +198,6 @@ int i2c_add_adapter(struct i2c_adapter *adap)
196 driver->attach_adapter(adap); 198 driver->attach_adapter(adap);
197 } 199 }
198 200
199 dev_dbg(&adap->dev, "registered as adapter #%d\n", adap->nr);
200
201out_unlock: 201out_unlock:
202 up(&core_lists); 202 up(&core_lists);
203 return res; 203 return res;
@@ -220,8 +220,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
220 break; 220 break;
221 } 221 }
222 if (adap_from_list != adap) { 222 if (adap_from_list != adap) {
223 pr_debug("I2C: Attempting to delete an unregistered " 223 pr_debug("i2c-core: attempting to delete unregistered "
224 "adapter\n"); 224 "adapter [%s]\n", adap->name);
225 res = -EINVAL; 225 res = -EINVAL;
226 goto out_unlock; 226 goto out_unlock;
227 } 227 }
@@ -230,9 +230,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
230 driver = list_entry(item, struct i2c_driver, list); 230 driver = list_entry(item, struct i2c_driver, list);
231 if (driver->detach_adapter) 231 if (driver->detach_adapter)
232 if ((res = driver->detach_adapter(adap))) { 232 if ((res = driver->detach_adapter(adap))) {
233 dev_warn(&adap->dev, "can't detach adapter " 233 dev_err(&adap->dev, "detach_adapter failed "
234 "while detaching driver %s: driver " 234 "for driver [%s]\n", driver->name);
235 "not detached!\n", driver->name);
236 goto out_unlock; 235 goto out_unlock;
237 } 236 }
238 } 237 }
@@ -247,9 +246,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
247 * must be deleted, as this would cause invalid states. 246 * must be deleted, as this would cause invalid states.
248 */ 247 */
249 if ((res=client->driver->detach_client(client))) { 248 if ((res=client->driver->detach_client(client))) {
250 dev_err(&adap->dev, "adapter not " 249 dev_err(&adap->dev, "detach_client failed for client "
251 "unregistered, because client at " 250 "[%s] at address 0x%02x\n", client->name,
252 "address %02x can't be detached. ",
253 client->addr); 251 client->addr);
254 goto out_unlock; 252 goto out_unlock;
255 } 253 }
@@ -270,7 +268,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
270 /* free dynamically allocated bus id */ 268 /* free dynamically allocated bus id */
271 idr_remove(&i2c_adapter_idr, adap->nr); 269 idr_remove(&i2c_adapter_idr, adap->nr);
272 270
273 dev_dbg(&adap->dev, "adapter unregistered\n"); 271 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
274 272
275 out_unlock: 273 out_unlock:
276 up(&core_lists); 274 up(&core_lists);
@@ -303,7 +301,7 @@ int i2c_add_driver(struct i2c_driver *driver)
303 goto out_unlock; 301 goto out_unlock;
304 302
305 list_add_tail(&driver->list,&drivers); 303 list_add_tail(&driver->list,&drivers);
306 pr_debug("i2c-core: driver %s registered.\n", driver->name); 304 pr_debug("i2c-core: driver [%s] registered\n", driver->name);
307 305
308 /* now look for instances of driver on our adapters */ 306 /* now look for instances of driver on our adapters */
309 if (driver->flags & I2C_DF_NOTIFY) { 307 if (driver->flags & I2C_DF_NOTIFY) {
@@ -331,21 +329,17 @@ int i2c_del_driver(struct i2c_driver *driver)
331 /* Have a look at each adapter, if clients of this driver are still 329 /* Have a look at each adapter, if clients of this driver are still
332 * attached. If so, detach them to be able to kill the driver 330 * attached. If so, detach them to be able to kill the driver
333 * afterwards. 331 * afterwards.
334 */ 332 *
335 pr_debug("i2c-core: unregister_driver - looking for clients.\n"); 333 * Removing clients does not depend on the notify flag, else
336 /* removing clients does not depend on the notify flag, else
337 * invalid operation might (will!) result, when using stale client 334 * invalid operation might (will!) result, when using stale client
338 * pointers. 335 * pointers.
339 */ 336 */
340 list_for_each(item1,&adapters) { 337 list_for_each(item1,&adapters) {
341 adap = list_entry(item1, struct i2c_adapter, list); 338 adap = list_entry(item1, struct i2c_adapter, list);
342 dev_dbg(&adap->dev, "examining adapter\n");
343 if (driver->detach_adapter) { 339 if (driver->detach_adapter) {
344 if ((res = driver->detach_adapter(adap))) { 340 if ((res = driver->detach_adapter(adap))) {
345 dev_warn(&adap->dev, "while unregistering " 341 dev_err(&adap->dev, "detach_adapter failed "
346 "dummy driver %s, adapter could " 342 "for driver [%s]\n", driver->name);
347 "not be detached properly; driver "
348 "not unloaded!",driver->name);
349 goto out_unlock; 343 goto out_unlock;
350 } 344 }
351 } else { 345 } else {
@@ -353,16 +347,13 @@ int i2c_del_driver(struct i2c_driver *driver)
353 client = list_entry(item2, struct i2c_client, list); 347 client = list_entry(item2, struct i2c_client, list);
354 if (client->driver != driver) 348 if (client->driver != driver)
355 continue; 349 continue;
356 pr_debug("i2c-core.o: detaching client %s:\n", client->name); 350 dev_dbg(&adap->dev, "detaching client [%s] "
351 "at 0x%02x\n", client->name,
352 client->addr);
357 if ((res = driver->detach_client(client))) { 353 if ((res = driver->detach_client(client))) {
358 dev_err(&adap->dev, "while " 354 dev_err(&adap->dev, "detach_client "
359 "unregistering driver " 355 "failed for client [%s] at "
360 "`%s', the client at " 356 "0x%02x\n", client->name,
361 "address %02x of "
362 "adapter could not "
363 "be detached; driver "
364 "not unloaded!",
365 driver->name,
366 client->addr); 357 client->addr);
367 goto out_unlock; 358 goto out_unlock;
368 } 359 }
@@ -372,7 +363,7 @@ int i2c_del_driver(struct i2c_driver *driver)
372 363
373 driver_unregister(&driver->driver); 364 driver_unregister(&driver->driver);
374 list_del(&driver->list); 365 list_del(&driver->list);
375 pr_debug("i2c-core: driver unregistered: %s\n", driver->name); 366 pr_debug("i2c-core: driver [%s] unregistered\n", driver->name);
376 367
377 out_unlock: 368 out_unlock:
378 up(&core_lists); 369 up(&core_lists);
@@ -417,15 +408,12 @@ int i2c_attach_client(struct i2c_client *client)
417 408
418 if (adapter->client_register) { 409 if (adapter->client_register) {
419 if (adapter->client_register(client)) { 410 if (adapter->client_register(client)) {
420 dev_warn(&adapter->dev, "warning: client_register " 411 dev_dbg(&adapter->dev, "client_register "
421 "seems to have failed for client %02x\n", 412 "failed for client [%s] at 0x%02x\n",
422 client->addr); 413 client->name, client->addr);
423 } 414 }
424 } 415 }
425 416
426 dev_dbg(&adapter->dev, "client [%s] registered to adapter\n",
427 client->name);
428
429 if (client->flags & I2C_CLIENT_ALLOW_USE) 417 if (client->flags & I2C_CLIENT_ALLOW_USE)
430 client->usage_count = 0; 418 client->usage_count = 0;
431 419
@@ -436,7 +424,8 @@ int i2c_attach_client(struct i2c_client *client)
436 424
437 snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id), 425 snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id),
438 "%d-%04x", i2c_adapter_id(adapter), client->addr); 426 "%d-%04x", i2c_adapter_id(adapter), client->addr);
439 pr_debug("registering %s\n", client->dev.bus_id); 427 dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
428 client->name, client->dev.bus_id);
440 device_register(&client->dev); 429 device_register(&client->dev);
441 device_create_file(&client->dev, &dev_attr_client_name); 430 device_create_file(&client->dev, &dev_attr_client_name);
442 431
@@ -449,8 +438,12 @@ int i2c_detach_client(struct i2c_client *client)
449 struct i2c_adapter *adapter = client->adapter; 438 struct i2c_adapter *adapter = client->adapter;
450 int res = 0; 439 int res = 0;
451 440
452 if ((client->flags & I2C_CLIENT_ALLOW_USE) && (client->usage_count > 0)) 441 if ((client->flags & I2C_CLIENT_ALLOW_USE)
442 && (client->usage_count > 0)) {
443 dev_warn(&client->dev, "Client [%s] still busy, "
444 "can't detach\n", client->name);
453 return -EBUSY; 445 return -EBUSY;
446 }
454 447
455 if (adapter->client_unregister) { 448 if (adapter->client_unregister) {
456 res = adapter->client_unregister(client); 449 res = adapter->client_unregister(client);
@@ -669,98 +662,128 @@ int i2c_control(struct i2c_client *client,
669 * Will not work for 10-bit addresses! 662 * Will not work for 10-bit addresses!
670 * ---------------------------------------------------- 663 * ----------------------------------------------------
671 */ 664 */
665static int i2c_probe_address(struct i2c_adapter *adapter, int addr, int kind,
666 int (*found_proc) (struct i2c_adapter *, int, int))
667{
668 int err;
669
670 /* Make sure the address is valid */
671 if (addr < 0x03 || addr > 0x77) {
672 dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n",
673 addr);
674 return -EINVAL;
675 }
676
677 /* Skip if already in use */
678 if (i2c_check_addr(adapter, addr))
679 return 0;
680
681 /* Make sure there is something at this address, unless forced */
682 if (kind < 0) {
683 if (i2c_smbus_xfer(adapter, addr, 0, 0, 0,
684 I2C_SMBUS_QUICK, NULL) < 0)
685 return 0;
686
687 /* prevent 24RF08 corruption */
688 if ((addr & ~0x0f) == 0x50)
689 i2c_smbus_xfer(adapter, addr, 0, 0, 0,
690 I2C_SMBUS_QUICK, NULL);
691 }
692
693 /* Finally call the custom detection function */
694 err = found_proc(adapter, addr, kind);
695
696 /* -ENODEV can be returned if there is a chip at the given address
697 but it isn't supported by this chip driver. We catch it here as
698 this isn't an error. */
699 return (err == -ENODEV) ? 0 : err;
700}
701
672int i2c_probe(struct i2c_adapter *adapter, 702int i2c_probe(struct i2c_adapter *adapter,
673 struct i2c_client_address_data *address_data, 703 struct i2c_client_address_data *address_data,
674 int (*found_proc) (struct i2c_adapter *, int, int)) 704 int (*found_proc) (struct i2c_adapter *, int, int))
675{ 705{
676 int addr,i,found,err; 706 int i, err;
677 int adap_id = i2c_adapter_id(adapter); 707 int adap_id = i2c_adapter_id(adapter);
678 708
679 /* Forget it if we can't probe using SMBUS_QUICK */ 709 /* Forget it if we can't probe using SMBUS_QUICK */
680 if (! i2c_check_functionality(adapter,I2C_FUNC_SMBUS_QUICK)) 710 if (! i2c_check_functionality(adapter,I2C_FUNC_SMBUS_QUICK))
681 return -1; 711 return -1;
682 712
683 for (addr = 0x00; addr <= 0x7f; addr++) { 713 /* Force entries are done first, and are not affected by ignore
684 714 entries */
685 /* Skip if already in use */ 715 if (address_data->forces) {
686 if (i2c_check_addr(adapter,addr)) 716 unsigned short **forces = address_data->forces;
687 continue; 717 int kind;
688 718
689 /* If it is in one of the force entries, we don't do any detection 719 for (kind = 0; forces[kind]; kind++) {
690 at all */ 720 for (i = 0; forces[kind][i] != I2C_CLIENT_END;
691 found = 0; 721 i += 2) {
692 722 if (forces[kind][i] == adap_id
693 for (i = 0; !found && (address_data->force[i] != I2C_CLIENT_END); i += 2) { 723 || forces[kind][i] == ANY_I2C_BUS) {
694 if (((adap_id == address_data->force[i]) || 724 dev_dbg(&adapter->dev, "found force "
695 (address_data->force[i] == ANY_I2C_BUS)) && 725 "parameter for adapter %d, "
696 (addr == address_data->force[i+1])) { 726 "addr 0x%02x, kind %d\n",
697 dev_dbg(&adapter->dev, "found force parameter for adapter %d, addr %04x\n", 727 adap_id, forces[kind][i + 1],
698 adap_id, addr); 728 kind);
699 if ((err = found_proc(adapter,addr,0))) 729 err = i2c_probe_address(adapter,
700 return err; 730 forces[kind][i + 1],
701 found = 1; 731 kind, found_proc);
702 } 732 if (err)
703 } 733 return err;
704 if (found) 734 }
705 continue;
706
707 /* If this address is in one of the ignores, we can forget about
708 it right now */
709 for (i = 0;
710 !found && (address_data->ignore[i] != I2C_CLIENT_END);
711 i += 2) {
712 if (((adap_id == address_data->ignore[i]) ||
713 ((address_data->ignore[i] == ANY_I2C_BUS))) &&
714 (addr == address_data->ignore[i+1])) {
715 dev_dbg(&adapter->dev, "found ignore parameter for adapter %d, "
716 "addr %04x\n", adap_id ,addr);
717 found = 1;
718 } 735 }
719 } 736 }
720 if (found) 737 }
721 continue;
722 738
723 /* Now, we will do a detection, but only if it is in the normal or 739 /* Probe entries are done second, and are not affected by ignore
724 probe entries */ 740 entries either */
725 for (i = 0; 741 for (i = 0; address_data->probe[i] != I2C_CLIENT_END; i += 2) {
726 !found && (address_data->normal_i2c[i] != I2C_CLIENT_END); 742 if (address_data->probe[i] == adap_id
727 i += 1) { 743 || address_data->probe[i] == ANY_I2C_BUS) {
728 if (addr == address_data->normal_i2c[i]) { 744 dev_dbg(&adapter->dev, "found probe parameter for "
729 found = 1; 745 "adapter %d, addr 0x%02x\n", adap_id,
730 dev_dbg(&adapter->dev, "found normal i2c entry for adapter %d, " 746 address_data->probe[i + 1]);
731 "addr %02x\n", adap_id, addr); 747 err = i2c_probe_address(adapter,
732 } 748 address_data->probe[i + 1],
749 -1, found_proc);
750 if (err)
751 return err;
733 } 752 }
753 }
734 754
735 for (i = 0; 755 /* Normal entries are done last, unless shadowed by an ignore entry */
736 !found && (address_data->probe[i] != I2C_CLIENT_END); 756 for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) {
737 i += 2) { 757 int j, ignore;
738 if (((adap_id == address_data->probe[i]) || 758
739 ((address_data->probe[i] == ANY_I2C_BUS))) && 759 ignore = 0;
740 (addr == address_data->probe[i+1])) { 760 for (j = 0; address_data->ignore[j] != I2C_CLIENT_END;
741 found = 1; 761 j += 2) {
742 dev_dbg(&adapter->dev, "found probe parameter for adapter %d, " 762 if ((address_data->ignore[j] == adap_id ||
743 "addr %04x\n", adap_id,addr); 763 address_data->ignore[j] == ANY_I2C_BUS)
764 && address_data->ignore[j + 1]
765 == address_data->normal_i2c[i]) {
766 dev_dbg(&adapter->dev, "found ignore "
767 "parameter for adapter %d, "
768 "addr 0x%02x\n", adap_id,
769 address_data->ignore[j + 1]);
744 } 770 }
771 ignore = 1;
772 break;
745 } 773 }
746 if (!found) 774 if (ignore)
747 continue; 775 continue;
748 776
749 /* OK, so we really should examine this address. First check 777 dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
750 whether there is some client here at all! */ 778 "addr 0x%02x\n", adap_id,
751 if (i2c_smbus_xfer(adapter,addr,0,0,0,I2C_SMBUS_QUICK,NULL) >= 0) 779 address_data->normal_i2c[i]);
752 if ((err = found_proc(adapter,addr,-1))) 780 err = i2c_probe_address(adapter, address_data->normal_i2c[i],
753 return err; 781 -1, found_proc);
782 if (err)
783 return err;
754 } 784 }
755 return 0;
756}
757 785
758/* 786 return 0;
759 * return id number for a specific adapter
760 */
761int i2c_adapter_id(struct i2c_adapter *adap)
762{
763 return adap->nr;
764} 787}
765 788
766struct i2c_adapter* i2c_get_adapter(int id) 789struct i2c_adapter* i2c_get_adapter(int id)
@@ -1171,6 +1194,12 @@ s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags,
1171} 1194}
1172 1195
1173 1196
1197/* Next four are needed by i2c-isa */
1198EXPORT_SYMBOL_GPL(i2c_adapter_dev_release);
1199EXPORT_SYMBOL_GPL(i2c_adapter_driver);
1200EXPORT_SYMBOL_GPL(i2c_adapter_class);
1201EXPORT_SYMBOL_GPL(i2c_bus_type);
1202
1174EXPORT_SYMBOL(i2c_add_adapter); 1203EXPORT_SYMBOL(i2c_add_adapter);
1175EXPORT_SYMBOL(i2c_del_adapter); 1204EXPORT_SYMBOL(i2c_del_adapter);
1176EXPORT_SYMBOL(i2c_add_driver); 1205EXPORT_SYMBOL(i2c_add_driver);
@@ -1186,7 +1215,6 @@ EXPORT_SYMBOL(i2c_master_send);
1186EXPORT_SYMBOL(i2c_master_recv); 1215EXPORT_SYMBOL(i2c_master_recv);
1187EXPORT_SYMBOL(i2c_control); 1216EXPORT_SYMBOL(i2c_control);
1188EXPORT_SYMBOL(i2c_transfer); 1217EXPORT_SYMBOL(i2c_transfer);
1189EXPORT_SYMBOL(i2c_adapter_id);
1190EXPORT_SYMBOL(i2c_get_adapter); 1218EXPORT_SYMBOL(i2c_get_adapter);
1191EXPORT_SYMBOL(i2c_put_adapter); 1219EXPORT_SYMBOL(i2c_put_adapter);
1192EXPORT_SYMBOL(i2c_probe); 1220EXPORT_SYMBOL(i2c_probe);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index bc5d557e5dd9..aa7a4fadef64 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -434,7 +434,8 @@ static int i2cdev_attach_adapter(struct i2c_adapter *adap)
434 434
435 devfs_mk_cdev(MKDEV(I2C_MAJOR, i2c_dev->minor), 435 devfs_mk_cdev(MKDEV(I2C_MAJOR, i2c_dev->minor),
436 S_IFCHR|S_IRUSR|S_IWUSR, "i2c/%d", i2c_dev->minor); 436 S_IFCHR|S_IRUSR|S_IWUSR, "i2c/%d", i2c_dev->minor);
437 dev_dbg(&adap->dev, "Registered as minor %d\n", i2c_dev->minor); 437 pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
438 adap->name, i2c_dev->minor);
438 439
439 /* register this i2c device with the driver core */ 440 /* register this i2c device with the driver core */
440 i2c_dev->adap = adap; 441 i2c_dev->adap = adap;
@@ -471,7 +472,7 @@ static int i2cdev_detach_adapter(struct i2c_adapter *adap)
471 wait_for_completion(&i2c_dev->released); 472 wait_for_completion(&i2c_dev->released);
472 kfree(i2c_dev); 473 kfree(i2c_dev);
473 474
474 dev_dbg(&adap->dev, "Adapter unregistered\n"); 475 pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
475 return 0; 476 return 0;
476} 477}
477 478
diff --git a/drivers/i2c/i2c-sensor-detect.c b/drivers/i2c/i2c-sensor-detect.c
deleted file mode 100644
index f99a8161a9f1..000000000000
--- a/drivers/i2c/i2c-sensor-detect.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 i2c-sensor-detect.c - Part of lm_sensors, Linux kernel modules for hardware
3 monitoring
4 Copyright (c) 1998 - 2001 Frodo Looijaard <frodol@dds.nl> and
5 Mark D. Studebaker <mdsxyz123@yahoo.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/i2c.h>
25#include <linux/i2c-sensor.h>
26
27static unsigned short empty[] = {I2C_CLIENT_END};
28static unsigned int empty_isa[] = {I2C_CLIENT_ISA_END};
29
30/* Very inefficient for ISA detects, and won't work for 10-bit addresses! */
31int i2c_detect(struct i2c_adapter *adapter,
32 struct i2c_address_data *address_data,
33 int (*found_proc) (struct i2c_adapter *, int, int))
34{
35 int addr, i, found, j, err;
36 struct i2c_force_data *this_force;
37 int is_isa = i2c_is_isa_adapter(adapter);
38 int adapter_id =
39 is_isa ? ANY_I2C_ISA_BUS : i2c_adapter_id(adapter);
40 unsigned short *normal_i2c;
41 unsigned int *normal_isa;
42 unsigned short *probe;
43 unsigned short *ignore;
44
45 /* Forget it if we can't probe using SMBUS_QUICK */
46 if ((!is_isa) &&
47 !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK))
48 return -1;
49
50 /* Use default "empty" list if the adapter doesn't specify any */
51 normal_i2c = probe = ignore = empty;
52 normal_isa = empty_isa;
53 if (address_data->normal_i2c)
54 normal_i2c = address_data->normal_i2c;
55 if (address_data->normal_isa)
56 normal_isa = address_data->normal_isa;
57 if (address_data->probe)
58 probe = address_data->probe;
59 if (address_data->ignore)
60 ignore = address_data->ignore;
61
62 for (addr = 0x00; addr <= (is_isa ? 0xffff : 0x7f); addr++) {
63 if (!is_isa && i2c_check_addr(adapter, addr))
64 continue;
65
66 /* If it is in one of the force entries, we don't do any
67 detection at all */
68 found = 0;
69 for (i = 0; !found && (this_force = address_data->forces + i, this_force->force); i++) {
70 for (j = 0; !found && (this_force->force[j] != I2C_CLIENT_END); j += 2) {
71 if ( ((adapter_id == this_force->force[j]) ||
72 ((this_force->force[j] == ANY_I2C_BUS) && !is_isa)) &&
73 (addr == this_force->force[j + 1]) ) {
74 dev_dbg(&adapter->dev, "found force parameter for adapter %d, addr %04x\n", adapter_id, addr);
75 if ((err = found_proc(adapter, addr, this_force->kind)))
76 return err;
77 found = 1;
78 }
79 }
80 }
81 if (found)
82 continue;
83
84 /* If this address is in one of the ignores, we can forget about it
85 right now */
86 for (i = 0; !found && (ignore[i] != I2C_CLIENT_END); i += 2) {
87 if ( ((adapter_id == ignore[i]) ||
88 ((ignore[i] == ANY_I2C_BUS) &&
89 !is_isa)) &&
90 (addr == ignore[i + 1])) {
91 dev_dbg(&adapter->dev, "found ignore parameter for adapter %d, addr %04x\n", adapter_id, addr);
92 found = 1;
93 }
94 }
95 if (found)
96 continue;
97
98 /* Now, we will do a detection, but only if it is in the normal or
99 probe entries */
100 if (is_isa) {
101 for (i = 0; !found && (normal_isa[i] != I2C_CLIENT_ISA_END); i += 1) {
102 if (addr == normal_isa[i]) {
103 dev_dbg(&adapter->dev, "found normal isa entry for adapter %d, addr %04x\n", adapter_id, addr);
104 found = 1;
105 }
106 }
107 } else {
108 for (i = 0; !found && (normal_i2c[i] != I2C_CLIENT_END); i += 1) {
109 if (addr == normal_i2c[i]) {
110 found = 1;
111 dev_dbg(&adapter->dev, "found normal i2c entry for adapter %d, addr %02x\n", adapter_id, addr);
112 }
113 }
114 }
115
116 for (i = 0;
117 !found && (probe[i] != I2C_CLIENT_END);
118 i += 2) {
119 if (((adapter_id == probe[i]) ||
120 ((probe[i] == ANY_I2C_BUS) && !is_isa))
121 && (addr == probe[i + 1])) {
122 dev_dbg(&adapter->dev, "found probe parameter for adapter %d, addr %04x\n", adapter_id, addr);
123 found = 1;
124 }
125 }
126 if (!found)
127 continue;
128
129 /* OK, so we really should examine this address. First check
130 whether there is some client here at all! */
131 if (is_isa ||
132 (i2c_smbus_xfer (adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) >= 0))
133 if ((err = found_proc(adapter, addr, -1)))
134 return err;
135 }
136 return 0;
137}
138
139EXPORT_SYMBOL(i2c_detect);
140
141MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
142 "Rudolf Marek <r.marek@sh.cvut.cz>");
143
144MODULE_DESCRIPTION("i2c-sensor driver");
145MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-sensor-vid.c b/drivers/i2c/i2c-sensor-vid.c
deleted file mode 100644
index 922e22f054bb..000000000000
--- a/drivers/i2c/i2c-sensor-vid.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 i2c-sensor-vid.c - Part of lm_sensors, Linux kernel modules for hardware
3 monitoring
4
5 Copyright (c) 2004 Rudolf Marek <r.marek@sh.cvut.cz>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/config.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25
26struct vrm_model {
27 u8 vendor;
28 u8 eff_family;
29 u8 eff_model;
30 int vrm_type;
31};
32
33#define ANY 0xFF
34
35#ifdef CONFIG_X86
36
37static struct vrm_model vrm_models[] = {
38 {X86_VENDOR_AMD, 0x6, ANY, 90}, /* Athlon Duron etc */
39 {X86_VENDOR_AMD, 0xF, ANY, 24}, /* Athlon 64, Opteron */
40 {X86_VENDOR_INTEL, 0x6, 0x9, 85}, /* 0.13um too */
41 {X86_VENDOR_INTEL, 0x6, 0xB, 85}, /* 0xB Tualatin */
42 {X86_VENDOR_INTEL, 0x6, ANY, 82}, /* any P6 */
43 {X86_VENDOR_INTEL, 0x7, ANY, 0}, /* Itanium */
44 {X86_VENDOR_INTEL, 0xF, 0x3, 100}, /* P4 Prescott */
45 {X86_VENDOR_INTEL, 0xF, ANY, 90}, /* P4 before Prescott */
46 {X86_VENDOR_INTEL, 0x10,ANY, 0}, /* Itanium 2 */
47 {X86_VENDOR_UNKNOWN, ANY, ANY, 0} /* stop here */
48 };
49
50static int find_vrm(u8 eff_family, u8 eff_model, u8 vendor)
51{
52 int i = 0;
53
54 while (vrm_models[i].vendor!=X86_VENDOR_UNKNOWN) {
55 if (vrm_models[i].vendor==vendor)
56 if ((vrm_models[i].eff_family==eff_family)&& \
57 ((vrm_models[i].eff_model==eff_model)|| \
58 (vrm_models[i].eff_model==ANY)))
59 return vrm_models[i].vrm_type;
60 i++;
61 }
62
63 return 0;
64}
65
66int i2c_which_vrm(void)
67{
68 struct cpuinfo_x86 *c = cpu_data;
69 u32 eax;
70 u8 eff_family, eff_model;
71 int vrm_ret;
72
73 if (c->x86 < 6) return 0; /* any CPU with familly lower than 6
74 dont have VID and/or CPUID */
75 eax = cpuid_eax(1);
76 eff_family = ((eax & 0x00000F00)>>8);
77 eff_model = ((eax & 0x000000F0)>>4);
78 if (eff_family == 0xF) { /* use extended model & family */
79 eff_family += ((eax & 0x00F00000)>>20);
80 eff_model += ((eax & 0x000F0000)>>16)<<4;
81 }
82 vrm_ret = find_vrm(eff_family,eff_model,c->x86_vendor);
83 if (vrm_ret == 0)
84 printk(KERN_INFO "i2c-sensor.o: Unknown VRM version of your"
85 " x86 CPU\n");
86 return vrm_ret;
87}
88
89/* and now for something completely different for Non-x86 world*/
90#else
91int i2c_which_vrm(void)
92{
93 printk(KERN_INFO "i2c-sensor.o: Unknown VRM version of your CPU\n");
94 return 0;
95}
96#endif
97
98EXPORT_SYMBOL(i2c_which_vrm);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c9d3a00a3c0c..234f5de3e929 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -754,7 +754,7 @@ static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
754 754
755 idedisk_prepare_flush(q, rq); 755 idedisk_prepare_flush(q, rq);
756 756
757 ret = blk_execute_rq(q, disk, rq); 757 ret = blk_execute_rq(q, disk, rq, 0);
758 758
759 /* 759 /*
760 * if we failed and caller wants error offset, get it 760 * if we failed and caller wants error offset, get it
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 248e3cc8b352..f174aee659e5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -150,7 +150,7 @@ static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 s
150 150
151 switch (rq->pm->pm_step) { 151 switch (rq->pm->pm_step) {
152 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 152 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */
153 if (rq->pm->pm_state == 4) 153 if (rq->pm->pm_state == PM_EVENT_FREEZE)
154 rq->pm->pm_step = ide_pm_state_completed; 154 rq->pm->pm_step = ide_pm_state_completed;
155 else 155 else
156 rq->pm->pm_step = idedisk_pm_standby; 156 rq->pm->pm_step = idedisk_pm_standby;
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index dae1bd5b8c3e..73ca8f73917d 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1229,7 +1229,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t state)
1229 rq.special = &args; 1229 rq.special = &args;
1230 rq.pm = &rqpm; 1230 rq.pm = &rqpm;
1231 rqpm.pm_step = ide_pm_state_start_suspend; 1231 rqpm.pm_step = ide_pm_state_start_suspend;
1232 rqpm.pm_state = state; 1232 rqpm.pm_state = state.event;
1233 1233
1234 return ide_do_drive_cmd(drive, &rq, ide_wait); 1234 return ide_do_drive_cmd(drive, &rq, ide_wait);
1235} 1235}
@@ -1248,7 +1248,7 @@ static int generic_ide_resume(struct device *dev)
1248 rq.special = &args; 1248 rq.special = &args;
1249 rq.pm = &rqpm; 1249 rq.pm = &rqpm;
1250 rqpm.pm_step = ide_pm_state_start_resume; 1250 rqpm.pm_step = ide_pm_state_start_resume;
1251 rqpm.pm_state = 0; 1251 rqpm.pm_state = PM_EVENT_ON;
1252 1252
1253 return ide_do_drive_cmd(drive, &rq, ide_head_wait); 1253 return ide_do_drive_cmd(drive, &rq, ide_head_wait);
1254} 1254}
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 10592cec6c43..24e21b2838c1 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -350,9 +350,9 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
350{ 350{
351 ide_hwif_t *hwif = NULL; 351 ide_hwif_t *hwif = NULL;
352 352
353 printk("SC1200: suspend(%u)\n", state); 353 printk("SC1200: suspend(%u)\n", state.event);
354 354
355 if (state == 0) { 355 if (state.event == PM_EVENT_ON) {
356 // we only save state when going from full power to less 356 // we only save state when going from full power to less
357 357
358 // 358 //
@@ -386,8 +386,8 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
386 /* You don't need to iterate over disks -- sysfs should have done that for you already */ 386 /* You don't need to iterate over disks -- sysfs should have done that for you already */
387 387
388 pci_disable_device(dev); 388 pci_disable_device(dev);
389 pci_set_power_state(dev,state); 389 pci_set_power_state(dev, pci_choose_state(dev, state));
390 dev->current_state = state; 390 dev->current_state = state.event;
391 return 0; 391 return 0;
392} 392}
393 393
@@ -396,8 +396,8 @@ static int sc1200_resume (struct pci_dev *dev)
396 ide_hwif_t *hwif = NULL; 396 ide_hwif_t *hwif = NULL;
397 397
398printk("SC1200: resume\n"); 398printk("SC1200: resume\n");
399 pci_set_power_state(dev,0); // bring chip back from sleep state 399 pci_set_power_state(dev, PCI_D0); // bring chip back from sleep state
400 dev->current_state = 0; 400 dev->current_state = PM_EVENT_ON;
401 pci_enable_device(dev); 401 pci_enable_device(dev);
402 // 402 //
403 // loop over all interfaces that are part of this pci device: 403 // loop over all interfaces that are part of this pci device:
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index ea65b070a367..87d1f8a1f41e 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1504,12 +1504,12 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1504} 1504}
1505 1505
1506static int 1506static int
1507pmac_ide_macio_suspend(struct macio_dev *mdev, u32 state) 1507pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t state)
1508{ 1508{
1509 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1509 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1510 int rc = 0; 1510 int rc = 0;
1511 1511
1512 if (state != mdev->ofdev.dev.power.power_state && state >= 2) { 1512 if (state.event != mdev->ofdev.dev.power.power_state.event && state.event >= PM_EVENT_SUSPEND) {
1513 rc = pmac_ide_do_suspend(hwif); 1513 rc = pmac_ide_do_suspend(hwif);
1514 if (rc == 0) 1514 if (rc == 0)
1515 mdev->ofdev.dev.power.power_state = state; 1515 mdev->ofdev.dev.power.power_state = state;
@@ -1524,10 +1524,10 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
1524 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1524 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1525 int rc = 0; 1525 int rc = 0;
1526 1526
1527 if (mdev->ofdev.dev.power.power_state != 0) { 1527 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
1528 rc = pmac_ide_do_resume(hwif); 1528 rc = pmac_ide_do_resume(hwif);
1529 if (rc == 0) 1529 if (rc == 0)
1530 mdev->ofdev.dev.power.power_state = 0; 1530 mdev->ofdev.dev.power.power_state = PMSG_ON;
1531 } 1531 }
1532 1532
1533 return rc; 1533 return rc;
@@ -1608,12 +1608,12 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1608} 1608}
1609 1609
1610static int 1610static int
1611pmac_ide_pci_suspend(struct pci_dev *pdev, u32 state) 1611pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1612{ 1612{
1613 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1613 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1614 int rc = 0; 1614 int rc = 0;
1615 1615
1616 if (state != pdev->dev.power.power_state && state >= 2) { 1616 if (state.event != pdev->dev.power.power_state.event && state.event >= 2) {
1617 rc = pmac_ide_do_suspend(hwif); 1617 rc = pmac_ide_do_suspend(hwif);
1618 if (rc == 0) 1618 if (rc == 0)
1619 pdev->dev.power.power_state = state; 1619 pdev->dev.power.power_state = state;
@@ -1628,10 +1628,10 @@ pmac_ide_pci_resume(struct pci_dev *pdev)
1628 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1628 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1629 int rc = 0; 1629 int rc = 0;
1630 1630
1631 if (pdev->dev.power.power_state != 0) { 1631 if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
1632 rc = pmac_ide_do_resume(hwif); 1632 rc = pmac_ide_do_resume(hwif);
1633 if (rc == 0) 1633 if (rc == 0)
1634 pdev->dev.power.power_state = 0; 1634 pdev->dev.power.power_state = PMSG_ON;
1635 } 1635 }
1636 1636
1637 return rc; 1637 return rc;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index b248d89de8b4..d633770fac8e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
681 return; 681 return;
682 } 682 }
683 683
684 __skb_unlink(skb, skb->list); 684 __skb_unlink(skb, &host->pending_packet_queue);
685 685
686 if (packet->state == hpsb_queued) { 686 if (packet->state == hpsb_queued) {
687 packet->sendtime = jiffies; 687 packet->sendtime = jiffies;
@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
989 packet = (struct hpsb_packet *)skb->data; 989 packet = (struct hpsb_packet *)skb->data;
990 990
991 if (time_before(packet->sendtime + expire, jiffies)) { 991 if (time_before(packet->sendtime + expire, jiffies)) {
992 __skb_unlink(skb, skb->list); 992 __skb_unlink(skb, &host->pending_packet_queue);
993 packet->state = hpsb_complete; 993 packet->state = hpsb_complete;
994 packet->ack_code = ACKX_TIMEOUT; 994 packet->ack_code = ACKX_TIMEOUT;
995 queue_packet_complete(packet); 995 queue_packet_complete(packet);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index bebcc47ab06c..b23322523ef5 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1068,6 +1068,8 @@ static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
1068 struct unit_directory *ud; 1068 struct unit_directory *ud;
1069 int i = 0; 1069 int i = 0;
1070 int length = 0; 1070 int length = 0;
1071 /* ieee1394:venNmoNspNverN */
1072 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
1071 1073
1072 if (!cdev) 1074 if (!cdev)
1073 return -ENODEV; 1075 return -ENODEV;
@@ -1094,6 +1096,12 @@ do { \
1094 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid); 1096 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
1095 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id); 1097 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
1096 PUT_ENVP("VERSION=%06x", ud->version); 1098 PUT_ENVP("VERSION=%06x", ud->version);
1099 snprintf(buf, sizeof(buf), "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
1100 ud->vendor_id,
1101 ud->model_id,
1102 ud->specifier_id,
1103 ud->version);
1104 PUT_ENVP("MODALIAS=%s", buf);
1097 1105
1098#undef PUT_ENVP 1106#undef PUT_ENVP
1099 1107
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 36074e6eeebb..6b1ab875333b 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1464,26 +1464,6 @@ static int __devinit add_card(struct pci_dev *dev,
1464 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block } 1464 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1465 }; 1465 };
1466 1466
1467
1468#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
1469 union i2c_smbus_data data;
1470
1471 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
1472 PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
1473 else
1474 {
1475 u16 addr;
1476 for (addr=0x00; addr < 0x100; addr++) {
1477 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
1478 PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
1479 break;
1480 }
1481 else
1482 PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
1483 }
1484 }
1485#endif
1486
1487 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we 1467 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
1488 do it more efficiently in one transaction rather then using several reads */ 1468 do it more efficiently in one transaction rather then using several reads */
1489 if (i2c_transfer(i2c_ad, msg, 2) < 0) { 1469 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index fae1c2dcee51..211ba3223f65 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -463,7 +463,7 @@ alloc_group_attrs(ssize_t (*show)(struct ib_port *,
463 return NULL; 463 return NULL;
464 464
465 for (i = 0; i < len; i++) { 465 for (i = 0; i < len; i++) {
466 element = kcalloc(1, sizeof(struct port_table_attribute), 466 element = kzalloc(sizeof(struct port_table_attribute),
467 GFP_KERNEL); 467 GFP_KERNEL);
468 if (!element) 468 if (!element)
469 goto err; 469 goto err;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 20e3a165989f..19c14c4beb44 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -160,6 +160,8 @@ struct input_event_compat {
160# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) 160# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
161#elif defined(CONFIG_ARCH_S390) 161#elif defined(CONFIG_ARCH_S390)
162# define COMPAT_TEST test_thread_flag(TIF_31BIT) 162# define COMPAT_TEST test_thread_flag(TIF_31BIT)
163#elif defined(CONFIG_MIPS)
164# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR)
163#else 165#else
164# define COMPAT_TEST test_thread_flag(TIF_32BIT) 166# define COMPAT_TEST test_thread_flag(TIF_32BIT)
165#endif 167#endif
@@ -391,6 +393,7 @@ static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
391 case EV_LED: bits = dev->ledbit; len = LED_MAX; break; 393 case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
392 case EV_SND: bits = dev->sndbit; len = SND_MAX; break; 394 case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
393 case EV_FF: bits = dev->ffbit; len = FF_MAX; break; 395 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
396 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
394 default: return -EINVAL; 397 default: return -EINVAL;
395 } 398 }
396 len = NBITS(len) * sizeof(long); 399 len = NBITS(len) * sizeof(long);
@@ -419,6 +422,13 @@ static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
419 return copy_to_user(p, dev->snd, len) ? -EFAULT : len; 422 return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
420 } 423 }
421 424
425 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0))) {
426 int len;
427 len = NBITS(SW_MAX) * sizeof(long);
428 if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
429 return copy_to_user(p, dev->sw, len) ? -EFAULT : len;
430 }
431
422 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) { 432 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
423 int len; 433 int len;
424 if (!dev->name) return -ENOENT; 434 if (!dev->name) return -ENOENT;
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index a0118038330a..462f8d300aae 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -75,7 +75,7 @@ static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id
75 if (!request_region(ioport, iolen, "emu10k1-gp")) 75 if (!request_region(ioport, iolen, "emu10k1-gp"))
76 return -EBUSY; 76 return -EBUSY;
77 77
78 emu = kcalloc(1, sizeof(struct emu), GFP_KERNEL); 78 emu = kzalloc(sizeof(struct emu), GFP_KERNEL);
79 port = gameport_allocate_port(); 79 port = gameport_allocate_port();
80 if (!emu || !port) { 80 if (!emu || !port) {
81 printk(KERN_ERR "emu10k1-gp: Memory allocation failed\n"); 81 printk(KERN_ERR "emu10k1-gp: Memory allocation failed\n");
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 57615bc63906..47e93daa0fa7 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -83,7 +83,7 @@ static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device
83 struct fm801_gp *gp; 83 struct fm801_gp *gp;
84 struct gameport *port; 84 struct gameport *port;
85 85
86 gp = kcalloc(1, sizeof(struct fm801_gp), GFP_KERNEL); 86 gp = kzalloc(sizeof(struct fm801_gp), GFP_KERNEL);
87 port = gameport_allocate_port(); 87 port = gameport_allocate_port();
88 if (!gp || !port) { 88 if (!gp || !port) {
89 printk(KERN_ERR "fm801-gp: Memory allocation failed\n"); 89 printk(KERN_ERR "fm801-gp: Memory allocation failed\n");
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 70f051894a3c..d2e55dc956ba 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -142,7 +142,7 @@ static int ns558_isa_probe(int io)
142 return -EBUSY; 142 return -EBUSY;
143 } 143 }
144 144
145 ns558 = kcalloc(1, sizeof(struct ns558), GFP_KERNEL); 145 ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL);
146 port = gameport_allocate_port(); 146 port = gameport_allocate_port();
147 if (!ns558 || !port) { 147 if (!ns558 || !port) {
148 printk(KERN_ERR "ns558: Memory allocation failed.\n"); 148 printk(KERN_ERR "ns558: Memory allocation failed.\n");
@@ -215,7 +215,7 @@ static int ns558_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
215 if (!request_region(ioport, iolen, "ns558-pnp")) 215 if (!request_region(ioport, iolen, "ns558-pnp"))
216 return -EBUSY; 216 return -EBUSY;
217 217
218 ns558 = kcalloc(1, sizeof(struct ns558), GFP_KERNEL); 218 ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL);
219 port = gameport_allocate_port(); 219 port = gameport_allocate_port();
220 if (!ns558 || !port) { 220 if (!ns558 || !port) {
221 printk(KERN_ERR "ns558: Memory allocation failed\n"); 221 printk(KERN_ERR "ns558: Memory allocation failed\n");
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a275211c8e1e..88636a204525 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -89,6 +89,15 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
89 89
90 break; 90 break;
91 91
92 case EV_SW:
93
94 if (code > SW_MAX || !test_bit(code, dev->swbit) || !!test_bit(code, dev->sw) == value)
95 return;
96
97 change_bit(code, dev->sw);
98
99 break;
100
92 case EV_ABS: 101 case EV_ABS:
93 102
94 if (code > ABS_MAX || !test_bit(code, dev->absbit)) 103 if (code > ABS_MAX || !test_bit(code, dev->absbit))
@@ -402,6 +411,7 @@ static void input_call_hotplug(char *verb, struct input_dev *dev)
402 SPRINTF_BIT_A2(ledbit, "LED=", LED_MAX, EV_LED); 411 SPRINTF_BIT_A2(ledbit, "LED=", LED_MAX, EV_LED);
403 SPRINTF_BIT_A2(sndbit, "SND=", SND_MAX, EV_SND); 412 SPRINTF_BIT_A2(sndbit, "SND=", SND_MAX, EV_SND);
404 SPRINTF_BIT_A2(ffbit, "FF=", FF_MAX, EV_FF); 413 SPRINTF_BIT_A2(ffbit, "FF=", FF_MAX, EV_FF);
414 SPRINTF_BIT_A2(swbit, "SW=", SW_MAX, EV_SW);
405 415
406 envp[i++] = NULL; 416 envp[i++] = NULL;
407 417
@@ -490,6 +500,7 @@ static int input_devices_read(char *buf, char **start, off_t pos, int count, int
490 SPRINTF_BIT_B2(ledbit, "LED=", LED_MAX, EV_LED); 500 SPRINTF_BIT_B2(ledbit, "LED=", LED_MAX, EV_LED);
491 SPRINTF_BIT_B2(sndbit, "SND=", SND_MAX, EV_SND); 501 SPRINTF_BIT_B2(sndbit, "SND=", SND_MAX, EV_SND);
492 SPRINTF_BIT_B2(ffbit, "FF=", FF_MAX, EV_FF); 502 SPRINTF_BIT_B2(ffbit, "FF=", FF_MAX, EV_FF);
503 SPRINTF_BIT_B2(swbit, "SW=", SW_MAX, EV_SW);
493 504
494 len += sprintf(buf + len, "\n"); 505 len += sprintf(buf + len, "\n");
495 506
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index bf34f75b9467..bf65430181fa 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -269,7 +269,7 @@ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv)
269 int i; 269 int i;
270 int err; 270 int err;
271 271
272 if (!(a3d = kcalloc(1, sizeof(struct a3d), GFP_KERNEL))) 272 if (!(a3d = kzalloc(sizeof(struct a3d), GFP_KERNEL)))
273 return -ENOMEM; 273 return -ENOMEM;
274 274
275 a3d->gameport = gameport; 275 a3d->gameport = gameport;
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 265962956c63..cf35ae638a0d 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -469,7 +469,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
469 int i; 469 int i;
470 int err; 470 int err;
471 471
472 if (!(port = kcalloc(1, sizeof(struct adi_port), GFP_KERNEL))) 472 if (!(port = kzalloc(sizeof(struct adi_port), GFP_KERNEL)))
473 return -ENOMEM; 473 return -ENOMEM;
474 474
475 port->gameport = gameport; 475 port->gameport = gameport;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c3a5739030c3..64b1313a3c66 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -655,7 +655,7 @@ static int analog_connect(struct gameport *gameport, struct gameport_driver *drv
655 int i; 655 int i;
656 int err; 656 int err;
657 657
658 if (!(port = kcalloc(1, sizeof(struct analog_port), GFP_KERNEL))) 658 if (!(port = kzalloc(sizeof(struct analog_port), GFP_KERNEL)))
659 return - ENOMEM; 659 return - ENOMEM;
660 660
661 err = analog_init_port(gameport, drv, port); 661 err = analog_init_port(gameport, drv, port);
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index a6002205328f..0b2e9fa26579 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -163,7 +163,7 @@ static int cobra_connect(struct gameport *gameport, struct gameport_driver *drv)
163 int i, j; 163 int i, j;
164 int err; 164 int err;
165 165
166 if (!(cobra = kcalloc(1, sizeof(struct cobra), GFP_KERNEL))) 166 if (!(cobra = kzalloc(sizeof(struct cobra), GFP_KERNEL)))
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 cobra->gameport = gameport; 169 cobra->gameport = gameport;
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index fbd3eed07f90..2a3e4bb2da50 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -572,7 +572,7 @@ static struct db9 __init *db9_probe(int *config, int nargs)
572 } 572 }
573 } 573 }
574 574
575 if (!(db9 = kcalloc(1, sizeof(struct db9), GFP_KERNEL))) { 575 if (!(db9 = kzalloc(sizeof(struct db9), GFP_KERNEL))) {
576 parport_put_port(pp); 576 parport_put_port(pp);
577 return NULL; 577 return NULL;
578 } 578 }
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 95bbdd302aad..5427bf9fc862 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -554,7 +554,7 @@ static struct gc __init *gc_probe(int *config, int nargs)
554 return NULL; 554 return NULL;
555 } 555 }
556 556
557 if (!(gc = kcalloc(1, sizeof(struct gc), GFP_KERNEL))) { 557 if (!(gc = kzalloc(sizeof(struct gc), GFP_KERNEL))) {
558 parport_put_port(pp); 558 parport_put_port(pp);
559 return NULL; 559 return NULL;
560 } 560 }
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 7d969420066c..8e4f92b115e6 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -242,7 +242,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
242 unsigned char data[GF2K_LENGTH]; 242 unsigned char data[GF2K_LENGTH];
243 int i, err; 243 int i, err;
244 244
245 if (!(gf2k = kcalloc(1, sizeof(struct gf2k), GFP_KERNEL))) 245 if (!(gf2k = kzalloc(sizeof(struct gf2k), GFP_KERNEL)))
246 return -ENOMEM; 246 return -ENOMEM;
247 247
248 gf2k->gameport = gameport; 248 gf2k->gameport = gameport;
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index d1500d2562d6..9d3f910dd568 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -301,7 +301,7 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
301 int i, j, t; 301 int i, j, t;
302 int err; 302 int err;
303 303
304 if (!(grip = kcalloc(1, sizeof(struct grip), GFP_KERNEL))) 304 if (!(grip = kzalloc(sizeof(struct grip), GFP_KERNEL)))
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 grip->gameport = gameport; 307 grip->gameport = gameport;
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index 0da7bd133ccf..da17eee6f574 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -607,7 +607,7 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
607 struct grip_mp *grip; 607 struct grip_mp *grip;
608 int err; 608 int err;
609 609
610 if (!(grip = kcalloc(1, sizeof(struct grip_mp), GFP_KERNEL))) 610 if (!(grip = kzalloc(sizeof(struct grip_mp), GFP_KERNEL)))
611 return -ENOMEM; 611 return -ENOMEM;
612 612
613 grip->gameport = gameport; 613 grip->gameport = gameport;
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index f93da7bc082d..6a70ec429f06 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -183,7 +183,7 @@ static int guillemot_connect(struct gameport *gameport, struct gameport_driver *
183 int i, t; 183 int i, t;
184 int err; 184 int err;
185 185
186 if (!(guillemot = kcalloc(1, sizeof(struct guillemot), GFP_KERNEL))) 186 if (!(guillemot = kzalloc(sizeof(struct guillemot), GFP_KERNEL)))
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 guillemot->gameport = gameport; 189 guillemot->gameport = gameport;
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 9d3f8c38cb09..d7b3472bd686 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -212,7 +212,7 @@ static int interact_connect(struct gameport *gameport, struct gameport_driver *d
212 int i, t; 212 int i, t;
213 int err; 213 int err;
214 214
215 if (!(interact = kcalloc(1, sizeof(struct interact), GFP_KERNEL))) 215 if (!(interact = kzalloc(sizeof(struct interact), GFP_KERNEL)))
216 return -ENOMEM; 216 return -ENOMEM;
217 217
218 interact->gameport = gameport; 218 interact->gameport = gameport;
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 47144a7ed9e7..9e0353721a35 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -590,7 +590,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
590 590
591 comment[0] = 0; 591 comment[0] = 0;
592 592
593 sw = kcalloc(1, sizeof(struct sw), GFP_KERNEL); 593 sw = kzalloc(sizeof(struct sw), GFP_KERNEL);
594 buf = kmalloc(SW_LENGTH, GFP_KERNEL); 594 buf = kmalloc(SW_LENGTH, GFP_KERNEL);
595 idbuf = kmalloc(SW_LENGTH, GFP_KERNEL); 595 idbuf = kmalloc(SW_LENGTH, GFP_KERNEL);
596 if (!sw || !buf || !idbuf) { 596 if (!sw || !buf || !idbuf) {
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 9eb9954cac6e..7431efc4330e 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -262,7 +262,7 @@ static int tmdc_connect(struct gameport *gameport, struct gameport_driver *drv)
262 int i, j, k, l, m; 262 int i, j, k, l, m;
263 int err; 263 int err;
264 264
265 if (!(tmdc = kcalloc(1, sizeof(struct tmdc), GFP_KERNEL))) 265 if (!(tmdc = kzalloc(sizeof(struct tmdc), GFP_KERNEL)))
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 tmdc->gameport = gameport; 268 tmdc->gameport = gameport;
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 28100d461cb7..0c5b9c8297cd 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -178,7 +178,7 @@ static struct tgfx __init *tgfx_probe(int *config, int nargs)
178 return NULL; 178 return NULL;
179 } 179 }
180 180
181 if (!(tgfx = kcalloc(1, sizeof(struct tgfx), GFP_KERNEL))) { 181 if (!(tgfx = kzalloc(sizeof(struct tgfx), GFP_KERNEL))) {
182 parport_put_port(pp); 182 parport_put_port(pp);
183 return NULL; 183 return NULL;
184 } 184 }
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index a8551711e8d6..cd4b6e795013 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <asm/irq.h> 22#include <asm/irq.h>
@@ -32,7 +33,6 @@
32/* zero code, 124 scancodes + 3 hinge combinations */ 33/* zero code, 124 scancodes + 3 hinge combinations */
33#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 ) 34#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 )
34#define SCAN_INTERVAL (HZ/10) 35#define SCAN_INTERVAL (HZ/10)
35#define CORGIKBD_PRESSED 1
36 36
37#define HINGE_SCAN_INTERVAL (HZ/4) 37#define HINGE_SCAN_INTERVAL (HZ/4)
38 38
@@ -73,25 +73,13 @@ struct corgikbd {
73 struct input_dev input; 73 struct input_dev input;
74 char phys[32]; 74 char phys[32];
75 75
76 unsigned char state[ARRAY_SIZE(corgikbd_keycode)];
77 spinlock_t lock; 76 spinlock_t lock;
78
79 struct timer_list timer; 77 struct timer_list timer;
80 struct timer_list htimer; 78 struct timer_list htimer;
81};
82 79
83static void handle_scancode(unsigned int pressed,unsigned int scancode, struct corgikbd *corgikbd_data) 80 unsigned int suspended;
84{ 81 unsigned long suspend_jiffies;
85 if (pressed && !(corgikbd_data->state[scancode] & CORGIKBD_PRESSED)) { 82};
86 corgikbd_data->state[scancode] |= CORGIKBD_PRESSED;
87 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], 1);
88 if (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
89 input_event(&corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
90 } else if (!pressed && corgikbd_data->state[scancode] & CORGIKBD_PRESSED) {
91 corgikbd_data->state[scancode] &= ~CORGIKBD_PRESSED;
92 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], 0);
93 }
94}
95 83
96#define KB_DISCHARGE_DELAY 10 84#define KB_DISCHARGE_DELAY 10
97#define KB_ACTIVATE_DELAY 10 85#define KB_ACTIVATE_DELAY 10
@@ -105,36 +93,36 @@ static void handle_scancode(unsigned int pressed,unsigned int scancode, struct c
105 */ 93 */
106static inline void corgikbd_discharge_all(void) 94static inline void corgikbd_discharge_all(void)
107{ 95{
108 // STROBE All HiZ 96 /* STROBE All HiZ */
109 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT; 97 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
110 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT; 98 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
111} 99}
112 100
113static inline void corgikbd_activate_all(void) 101static inline void corgikbd_activate_all(void)
114{ 102{
115 // STROBE ALL -> High 103 /* STROBE ALL -> High */
116 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT; 104 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
117 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT; 105 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
118 106
119 udelay(KB_DISCHARGE_DELAY); 107 udelay(KB_DISCHARGE_DELAY);
120 108
121 // Clear any interrupts we may have triggered when altering the GPIO lines 109 /* Clear any interrupts we may have triggered when altering the GPIO lines */
122 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT; 110 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
123 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT; 111 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
124} 112}
125 113
126static inline void corgikbd_activate_col(int col) 114static inline void corgikbd_activate_col(int col)
127{ 115{
128 // STROBE col -> High, not col -> HiZ 116 /* STROBE col -> High, not col -> HiZ */
129 GPSR2 = CORGI_GPIO_STROBE_BIT(col); 117 GPSR2 = CORGI_GPIO_STROBE_BIT(col);
130 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col); 118 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
131} 119}
132 120
133static inline void corgikbd_reset_col(int col) 121static inline void corgikbd_reset_col(int col)
134{ 122{
135 // STROBE col -> Low 123 /* STROBE col -> Low */
136 GPCR2 = CORGI_GPIO_STROBE_BIT(col); 124 GPCR2 = CORGI_GPIO_STROBE_BIT(col);
137 // STROBE col -> out, not col -> HiZ 125 /* STROBE col -> out, not col -> HiZ */
138 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col); 126 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
139} 127}
140 128
@@ -149,10 +137,13 @@ static inline void corgikbd_reset_col(int col)
149/* Scan the hardware keyboard and push any changes up through the input layer */ 137/* Scan the hardware keyboard and push any changes up through the input layer */
150static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs *regs) 138static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs *regs)
151{ 139{
152 unsigned int row, col, rowd, scancode; 140 unsigned int row, col, rowd;
153 unsigned long flags; 141 unsigned long flags;
154 unsigned int num_pressed; 142 unsigned int num_pressed;
155 143
144 if (corgikbd_data->suspended)
145 return;
146
156 spin_lock_irqsave(&corgikbd_data->lock, flags); 147 spin_lock_irqsave(&corgikbd_data->lock, flags);
157 148
158 if (regs) 149 if (regs)
@@ -173,10 +164,21 @@ static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs
173 164
174 rowd = GET_ROWS_STATUS(col); 165 rowd = GET_ROWS_STATUS(col);
175 for (row = 0; row < KB_ROWS; row++) { 166 for (row = 0; row < KB_ROWS; row++) {
167 unsigned int scancode, pressed;
168
176 scancode = SCANCODE(row, col); 169 scancode = SCANCODE(row, col);
177 handle_scancode((rowd & KB_ROWMASK(row)), scancode, corgikbd_data); 170 pressed = rowd & KB_ROWMASK(row);
178 if (rowd & KB_ROWMASK(row)) 171
172 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
173
174 if (pressed)
179 num_pressed++; 175 num_pressed++;
176
177 if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
178 && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
179 input_event(&corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
180 corgikbd_data->suspend_jiffies=jiffies;
181 }
180 } 182 }
181 corgikbd_reset_col(col); 183 corgikbd_reset_col(col);
182 } 184 }
@@ -221,8 +223,11 @@ static void corgikbd_timer_callback(unsigned long data)
221 * The hinge switches generate no interrupt so they need to be 223 * The hinge switches generate no interrupt so they need to be
222 * monitored by a timer. 224 * monitored by a timer.
223 * 225 *
224 * When we detect changes, we debounce it and then pass the three 226 * We debounce the switches and pass them to the input system.
225 * positions the system can take as keypresses to the input system. 227 *
228 * gprr == 0x00 - Keyboard with Landscape Screen
229 * 0x08 - No Keyboard with Portrait Screen
230 * 0x0c - Keyboard and Screen Closed
226 */ 231 */
227 232
228#define HINGE_STABLE_COUNT 2 233#define HINGE_STABLE_COUNT 2
@@ -235,7 +240,7 @@ static void corgikbd_hinge_timer(unsigned long data)
235 unsigned long gprr; 240 unsigned long gprr;
236 unsigned long flags; 241 unsigned long flags;
237 242
238 gprr = read_scoop_reg(SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB); 243 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
239 if (gprr != sharpsl_hinge_state) { 244 if (gprr != sharpsl_hinge_state) {
240 hinge_count = 0; 245 hinge_count = 0;
241 sharpsl_hinge_state = gprr; 246 sharpsl_hinge_state = gprr;
@@ -244,9 +249,8 @@ static void corgikbd_hinge_timer(unsigned long data)
244 if (hinge_count >= HINGE_STABLE_COUNT) { 249 if (hinge_count >= HINGE_STABLE_COUNT) {
245 spin_lock_irqsave(&corgikbd_data->lock, flags); 250 spin_lock_irqsave(&corgikbd_data->lock, flags);
246 251
247 handle_scancode((sharpsl_hinge_state == 0x00), 125, corgikbd_data); /* Keyboard with Landscape Screen */ 252 input_report_switch(&corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
248 handle_scancode((sharpsl_hinge_state == 0x08), 126, corgikbd_data); /* No Keyboard with Portrait Screen */ 253 input_report_switch(&corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
249 handle_scancode((sharpsl_hinge_state == 0x0c), 127, corgikbd_data); /* Keyboard and Screen Closed */
250 input_sync(&corgikbd_data->input); 254 input_sync(&corgikbd_data->input);
251 255
252 spin_unlock_irqrestore(&corgikbd_data->lock, flags); 256 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
@@ -255,19 +259,45 @@ static void corgikbd_hinge_timer(unsigned long data)
255 mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL); 259 mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL);
256} 260}
257 261
262#ifdef CONFIG_PM
263static int corgikbd_suspend(struct device *dev, pm_message_t state, uint32_t level)
264{
265 if (level == SUSPEND_POWER_DOWN) {
266 struct corgikbd *corgikbd = dev_get_drvdata(dev);
267 corgikbd->suspended = 1;
268 }
269 return 0;
270}
271
272static int corgikbd_resume(struct device *dev, uint32_t level)
273{
274 if (level == RESUME_POWER_ON) {
275 struct corgikbd *corgikbd = dev_get_drvdata(dev);
276
277 /* Upon resume, ignore the suspend key for a short while */
278 corgikbd->suspend_jiffies=jiffies;
279 corgikbd->suspended = 0;
280 }
281 return 0;
282}
283#else
284#define corgikbd_suspend NULL
285#define corgikbd_resume NULL
286#endif
287
258static int __init corgikbd_probe(struct device *dev) 288static int __init corgikbd_probe(struct device *dev)
259{ 289{
260 int i; 290 int i;
261 struct corgikbd *corgikbd; 291 struct corgikbd *corgikbd;
262 292
263 corgikbd = kcalloc(1, sizeof(struct corgikbd), GFP_KERNEL); 293 corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
264 if (!corgikbd) 294 if (!corgikbd)
265 return -ENOMEM; 295 return -ENOMEM;
266 296
267 dev_set_drvdata(dev,corgikbd); 297 dev_set_drvdata(dev,corgikbd);
268 strcpy(corgikbd->phys, "corgikbd/input0"); 298 strcpy(corgikbd->phys, "corgikbd/input0");
269 299
270 spin_lock_init(corgikbd->lock); 300 spin_lock_init(&corgikbd->lock);
271 301
272 /* Init Keyboard rescan timer */ 302 /* Init Keyboard rescan timer */
273 init_timer(&corgikbd->timer); 303 init_timer(&corgikbd->timer);
@@ -279,6 +309,8 @@ static int __init corgikbd_probe(struct device *dev)
279 corgikbd->htimer.function = corgikbd_hinge_timer; 309 corgikbd->htimer.function = corgikbd_hinge_timer;
280 corgikbd->htimer.data = (unsigned long) corgikbd; 310 corgikbd->htimer.data = (unsigned long) corgikbd;
281 311
312 corgikbd->suspend_jiffies=jiffies;
313
282 init_input_dev(&corgikbd->input); 314 init_input_dev(&corgikbd->input);
283 corgikbd->input.private = corgikbd; 315 corgikbd->input.private = corgikbd;
284 corgikbd->input.name = "Corgi Keyboard"; 316 corgikbd->input.name = "Corgi Keyboard";
@@ -288,7 +320,7 @@ static int __init corgikbd_probe(struct device *dev)
288 corgikbd->input.id.vendor = 0x0001; 320 corgikbd->input.id.vendor = 0x0001;
289 corgikbd->input.id.product = 0x0001; 321 corgikbd->input.id.product = 0x0001;
290 corgikbd->input.id.version = 0x0100; 322 corgikbd->input.id.version = 0x0100;
291 corgikbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR); 323 corgikbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR) | BIT(EV_SW);
292 corgikbd->input.keycode = corgikbd->keycode; 324 corgikbd->input.keycode = corgikbd->keycode;
293 corgikbd->input.keycodesize = sizeof(unsigned char); 325 corgikbd->input.keycodesize = sizeof(unsigned char);
294 corgikbd->input.keycodemax = ARRAY_SIZE(corgikbd_keycode); 326 corgikbd->input.keycodemax = ARRAY_SIZE(corgikbd_keycode);
@@ -297,6 +329,8 @@ static int __init corgikbd_probe(struct device *dev)
297 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++) 329 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
298 set_bit(corgikbd->keycode[i], corgikbd->input.keybit); 330 set_bit(corgikbd->keycode[i], corgikbd->input.keybit);
299 clear_bit(0, corgikbd->input.keybit); 331 clear_bit(0, corgikbd->input.keybit);
332 set_bit(SW_0, corgikbd->input.swbit);
333 set_bit(SW_1, corgikbd->input.swbit);
300 334
301 input_register_device(&corgikbd->input); 335 input_register_device(&corgikbd->input);
302 mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL); 336 mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL);
@@ -343,6 +377,8 @@ static struct device_driver corgikbd_driver = {
343 .bus = &platform_bus_type, 377 .bus = &platform_bus_type,
344 .probe = corgikbd_probe, 378 .probe = corgikbd_probe,
345 .remove = corgikbd_remove, 379 .remove = corgikbd_remove,
380 .suspend = corgikbd_suspend,
381 .resume = corgikbd_resume,
346}; 382};
347 383
348static int __devinit corgikbd_init(void) 384static int __devinit corgikbd_init(void)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 2bb2fe78bdca..12bdd3eff923 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -883,7 +883,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
883 psmouse_deactivate(parent); 883 psmouse_deactivate(parent);
884 } 884 }
885 885
886 if (!(psmouse = kcalloc(1, sizeof(struct psmouse), GFP_KERNEL))) { 886 if (!(psmouse = kzalloc(sizeof(struct psmouse), GFP_KERNEL))) {
887 retval = -ENOMEM; 887 retval = -ENOMEM;
888 goto out; 888 goto out;
889 } 889 }
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 79ca38469159..1bd88fca0542 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -87,7 +87,7 @@ static int serport_ldisc_open(struct tty_struct *tty)
87 if (!capable(CAP_SYS_ADMIN)) 87 if (!capable(CAP_SYS_ADMIN))
88 return -EPERM; 88 return -EPERM;
89 89
90 serport = kcalloc(1, sizeof(struct serport), GFP_KERNEL); 90 serport = kzalloc(sizeof(struct serport), GFP_KERNEL);
91 if (!serport) 91 if (!serport)
92 return -ENOMEM; 92 return -ENOMEM;
93 93
@@ -165,7 +165,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
165 if (test_and_set_bit(SERPORT_BUSY, &serport->flags)) 165 if (test_and_set_bit(SERPORT_BUSY, &serport->flags))
166 return -EBUSY; 166 return -EBUSY;
167 167
168 serport->serio = serio = kcalloc(1, sizeof(struct serio), GFP_KERNEL); 168 serport->serio = serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
169 if (!serio) 169 if (!serio)
170 return -ENOMEM; 170 return -ENOMEM;
171 171
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 3f8b61cfbc37..5d19261b884f 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -53,11 +53,8 @@ struct corgi_ts {
53 53
54#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0); 54#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0);
55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a)) 55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
56#define CCNT_ON() {int pmnc = 1; asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(pmnc));} 56#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
57#define CCNT_OFF() {int pmnc = 0; asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(pmnc));} 57#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
58
59#define WAIT_HS_400_VGA 7013U // 17.615us
60#define WAIT_HS_400_QVGA 16622U // 41.750us
61 58
62 59
63/* ADS7846 Touch Screen Controller bit definitions */ 60/* ADS7846 Touch Screen Controller bit definitions */
@@ -69,41 +66,29 @@ struct corgi_ts {
69#define ADSCTRL_STS (1u << 7) /* Start Bit */ 66#define ADSCTRL_STS (1u << 7) /* Start Bit */
70 67
71/* External Functions */ 68/* External Functions */
72extern int w100fb_get_xres(void); 69extern unsigned long w100fb_get_hsynclen(struct device *dev);
73extern int w100fb_get_blanking(void);
74extern int w100fb_get_fastsysclk(void);
75extern unsigned int get_clk_frequency_khz(int info); 70extern unsigned int get_clk_frequency_khz(int info);
76 71
77static unsigned long calc_waittime(void) 72static unsigned long calc_waittime(void)
78{ 73{
79 int w100fb_xres = w100fb_get_xres(); 74 unsigned long hsync_len = w100fb_get_hsynclen(&corgifb_device.dev);
80 unsigned int waittime = 0;
81
82 if (w100fb_xres == 480 || w100fb_xres == 640) {
83 waittime = WAIT_HS_400_VGA * get_clk_frequency_khz(0) / 398131U;
84
85 if (w100fb_get_fastsysclk() == 100)
86 waittime = waittime * 75 / 100;
87
88 if (w100fb_xres == 640)
89 waittime *= 3;
90 75
91 return waittime; 76 if (hsync_len)
92 } 77 return get_clk_frequency_khz(0)*1000/hsync_len;
93 78 else
94 return WAIT_HS_400_QVGA * get_clk_frequency_khz(0) / 398131U; 79 return 0;
95} 80}
96 81
97static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time) 82static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time)
98{ 83{
84 unsigned long timer1 = 0, timer2, pmnc = 0;
99 int pos = 0; 85 int pos = 0;
100 unsigned long timer1 = 0, timer2;
101 int dosleep;
102 86
103 dosleep = !w100fb_get_blanking(); 87 if (wait_time && doSend) {
88 PMNC_GET(pmnc);
89 if (!(pmnc & 0x01))
90 PMNC_SET(0x01);
104 91
105 if (dosleep && doSend) {
106 CCNT_ON();
107 /* polling HSync */ 92 /* polling HSync */
108 SyncHS(); 93 SyncHS();
109 /* get CCNT */ 94 /* get CCNT */
@@ -119,11 +104,11 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
119 corgi_ssp_ads7846_put(cmd); 104 corgi_ssp_ads7846_put(cmd);
120 corgi_ssp_ads7846_get(); 105 corgi_ssp_ads7846_get();
121 106
122 if (dosleep) { 107 if (wait_time) {
123 /* Wait after HSync */ 108 /* Wait after HSync */
124 CCNT(timer2); 109 CCNT(timer2);
125 if (timer2-timer1 > wait_time) { 110 if (timer2-timer1 > wait_time) {
126 /* timeout */ 111 /* too slow - timeout, try again */
127 SyncHS(); 112 SyncHS();
128 /* get OSCR */ 113 /* get OSCR */
129 CCNT(timer1); 114 CCNT(timer1);
@@ -134,8 +119,8 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
134 CCNT(timer2); 119 CCNT(timer2);
135 } 120 }
136 corgi_ssp_ads7846_put(cmd); 121 corgi_ssp_ads7846_put(cmd);
137 if (dosleep) 122 if (wait_time && !(pmnc & 0x01))
138 CCNT_OFF(); 123 PMNC_SET(pmnc);
139 } 124 }
140 return pos; 125 return pos;
141} 126}
@@ -244,7 +229,7 @@ static irqreturn_t ts_interrupt(int irq, void *dev_id, struct pt_regs *regs)
244} 229}
245 230
246#ifdef CONFIG_PM 231#ifdef CONFIG_PM
247static int corgits_suspend(struct device *dev, uint32_t state, uint32_t level) 232static int corgits_suspend(struct device *dev, pm_message_t state, uint32_t level)
248{ 233{
249 if (level == SUSPEND_POWER_DOWN) { 234 if (level == SUSPEND_POWER_DOWN) {
250 struct corgi_ts *corgi_ts = dev_get_drvdata(dev); 235 struct corgi_ts *corgi_ts = dev_get_drvdata(dev);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index afa46681f983..6ae6eb322111 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && 606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
607 (m->msg.data_b3_req.blocknr == blocknr)) { 607 (m->msg.data_b3_req.blocknr == blocknr)) {
608 /* found corresponding DATA_B3_REQ */ 608 /* found corresponding DATA_B3_REQ */
609 skb_unlink(tmp); 609 skb_unlink(tmp, &card->ackq);
610 chan->queued -= m->msg.data_b3_req.datalen; 610 chan->queued -= m->msg.data_b3_req.datalen;
611 if (m->msg.data_b3_req.flags) 611 if (m->msg.data_b3_req.flags)
612 ret = m->msg.data_b3_req.datalen; 612 ret = m->msg.data_b3_req.datalen;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 17cf7663c582..26c545fa223b 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -10,7 +10,6 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/major.h> 12#include <linux/major.h>
13#include <asm/segment.h>
14#include <asm/io.h> 13#include <asm/io.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -1242,6 +1241,8 @@ struct IsdnCardState {
1242 1241
1243#ifdef CONFIG_HISAX_ENTERNOW_PCI 1242#ifdef CONFIG_HISAX_ENTERNOW_PCI
1244#define CARD_FN_ENTERNOW_PCI 1 1243#define CARD_FN_ENTERNOW_PCI 1
1244#else
1245#define CARD_FN_ENTERNOW_PCI 0
1245#endif 1246#endif
1246 1247
1247#define TEI_PER_CARD 1 1248#define TEI_PER_CARD 1
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f30e8e63ae0d..96c115e13389 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1786,7 +1786,6 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1786 lp->stats.rx_bytes += skb->len; 1786 lp->stats.rx_bytes += skb->len;
1787 } 1787 }
1788 skb->dev = ndev; 1788 skb->dev = ndev;
1789 skb->input_dev = ndev;
1790 skb->pkt_type = PACKET_HOST; 1789 skb->pkt_type = PACKET_HOST;
1791 skb->mac.raw = skb->data; 1790 skb->mac.raw = skb->data;
1792#ifdef ISDN_DEBUG_NET_DUMP 1791#ifdef ISDN_DEBUG_NET_DUMP
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 260a323a96d3..d97a9be5469c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1177,7 +1177,6 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1177 mlp->huptimer = 0; 1177 mlp->huptimer = 0;
1178#endif /* CONFIG_IPPP_FILTER */ 1178#endif /* CONFIG_IPPP_FILTER */
1179 skb->dev = dev; 1179 skb->dev = dev;
1180 skb->input_dev = dev;
1181 skb->mac.raw = skb->data; 1180 skb->mac.raw = skb->data;
1182 netif_rx(skb); 1181 netif_rx(skb);
1183 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ 1182 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index f47f2b9846d8..38619e8cd823 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -516,11 +516,11 @@ buffer_full:
516} 516}
517 517
518int 518int
519isdn_v110_stat_callback(int idx, isdn_ctrl * c) 519isdn_v110_stat_callback(int idx, isdn_ctrl *c)
520{ 520{
521 isdn_v110_stream *v = NULL; 521 isdn_v110_stream *v = NULL;
522 int i; 522 int i;
523 int ret; 523 int ret = 0;
524 524
525 if (idx < 0) 525 if (idx < 0)
526 return 0; 526 return 0;
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 7c16c25fc5d4..c0712a1ea5af 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -708,7 +708,7 @@ static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
708{ 708{
709 struct media_bay_info *bay = macio_get_drvdata(mdev); 709 struct media_bay_info *bay = macio_get_drvdata(mdev);
710 710
711 if (state != mdev->ofdev.dev.power.power_state && state == PM_SUSPEND_MEM) { 711 if (state.event != mdev->ofdev.dev.power.power_state.event && state.event == PM_EVENT_SUSPEND) {
712 down(&bay->lock); 712 down(&bay->lock);
713 bay->sleeping = 1; 713 bay->sleeping = 1;
714 set_mb_power(bay, 0); 714 set_mb_power(bay, 0);
@@ -723,8 +723,8 @@ static int __pmac media_bay_resume(struct macio_dev *mdev)
723{ 723{
724 struct media_bay_info *bay = macio_get_drvdata(mdev); 724 struct media_bay_info *bay = macio_get_drvdata(mdev);
725 725
726 if (mdev->ofdev.dev.power.power_state != 0) { 726 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
727 mdev->ofdev.dev.power.power_state = 0; 727 mdev->ofdev.dev.power.power_state = PMSG_ON;
728 728
729 /* We re-enable the bay using it's previous content 729 /* We re-enable the bay using it's previous content
730 only if it did not change. Note those bozo timings, 730 only if it did not change. Note those bozo timings,
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 4a0a0ad2d03c..645a2e5c70ab 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -3065,7 +3065,7 @@ static int pmu_sys_suspended = 0;
3065 3065
3066static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) 3066static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
3067{ 3067{
3068 if (state != PM_SUSPEND_DISK || pmu_sys_suspended) 3068 if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended)
3069 return 0; 3069 return 0;
3070 3070
3071 /* Suspend PMU event interrupts */ 3071 /* Suspend PMU event interrupts */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d0a4bab220e5..b82bc3150476 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -144,7 +144,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
144 } 144 }
145 145
146 /* Hash the cipher key with the given hash algorithm */ 146 /* Hash the cipher key with the given hash algorithm */
147 hash_tfm = crypto_alloc_tfm(opts, 0); 147 hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
148 if (hash_tfm == NULL) { 148 if (hash_tfm == NULL) {
149 ti->error = PFX "Error initializing ESSIV hash"; 149 ti->error = PFX "Error initializing ESSIV hash";
150 return -EINVAL; 150 return -EINVAL;
@@ -172,7 +172,8 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
172 172
173 /* Setup the essiv_tfm with the given salt */ 173 /* Setup the essiv_tfm with the given salt */
174 essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), 174 essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
175 CRYPTO_TFM_MODE_ECB); 175 CRYPTO_TFM_MODE_ECB |
176 CRYPTO_TFM_REQ_MAY_SLEEP);
176 if (essiv_tfm == NULL) { 177 if (essiv_tfm == NULL) {
177 ti->error = PFX "Error allocating crypto tfm for ESSIV"; 178 ti->error = PFX "Error allocating crypto tfm for ESSIV";
178 kfree(salt); 179 kfree(salt);
@@ -587,7 +588,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
587 goto bad1; 588 goto bad1;
588 } 589 }
589 590
590 tfm = crypto_alloc_tfm(cipher, crypto_flags); 591 tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
591 if (!tfm) { 592 if (!tfm) {
592 ti->error = PFX "Error allocating crypto tfm"; 593 ti->error = PFX "Error allocating crypto tfm";
593 goto bad1; 594 goto bad1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 45754bb6a799..9de000131a8a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -239,6 +239,11 @@ static void vm_dp_init(struct dpages *dp, void *data)
239 dp->context_ptr = data; 239 dp->context_ptr = data;
240} 240}
241 241
242static void dm_bio_destructor(struct bio *bio)
243{
244 bio_free(bio, _bios);
245}
246
242/*----------------------------------------------------------------- 247/*-----------------------------------------------------------------
243 * IO routines that accept a list of pages. 248 * IO routines that accept a list of pages.
244 *---------------------------------------------------------------*/ 249 *---------------------------------------------------------------*/
@@ -263,6 +268,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
263 bio->bi_bdev = where->bdev; 268 bio->bi_bdev = where->bdev;
264 bio->bi_end_io = endio; 269 bio->bi_end_io = endio;
265 bio->bi_private = io; 270 bio->bi_private = io;
271 bio->bi_destructor = dm_bio_destructor;
266 bio_set_region(bio, region); 272 bio_set_region(bio, region);
267 273
268 /* 274 /*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d487d9deb98e..930b9fc27953 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -399,6 +399,11 @@ struct clone_info {
399 unsigned short idx; 399 unsigned short idx;
400}; 400};
401 401
402static void dm_bio_destructor(struct bio *bio)
403{
404 bio_free(bio, dm_set);
405}
406
402/* 407/*
403 * Creates a little bio that is just does part of a bvec. 408 * Creates a little bio that is just does part of a bvec.
404 */ 409 */
@@ -410,6 +415,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
410 struct bio_vec *bv = bio->bi_io_vec + idx; 415 struct bio_vec *bv = bio->bi_io_vec + idx;
411 416
412 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set); 417 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
418 clone->bi_destructor = dm_bio_destructor;
413 *clone->bi_io_vec = *bv; 419 *clone->bi_io_vec = *bv;
414 420
415 clone->bi_sector = sector; 421 clone->bi_sector = sector;
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 781f23f0cbcc..6284894505c6 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -387,8 +387,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in
387 387
388/* exported algorithm data */ 388/* exported algorithm data */
389static struct i2c_algorithm saa7146_algo = { 389static struct i2c_algorithm saa7146_algo = {
390 .name = "saa7146 i2c algorithm",
391 .id = I2C_ALGO_SAA7146,
392 .master_xfer = saa7146_i2c_xfer, 390 .master_xfer = saa7146_i2c_xfer,
393 .functionality = saa7146_i2c_func, 391 .functionality = saa7146_i2c_func,
394}; 392};
@@ -412,7 +410,7 @@ int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c
412#endif 410#endif
413 i2c_adapter->algo = &saa7146_algo; 411 i2c_adapter->algo = &saa7146_algo;
414 i2c_adapter->algo_data = NULL; 412 i2c_adapter->algo_data = NULL;
415 i2c_adapter->id = I2C_ALGO_SAA7146; 413 i2c_adapter->id = I2C_HW_SAA7146;
416 i2c_adapter->timeout = SAA7146_I2C_TIMEOUT; 414 i2c_adapter->timeout = SAA7146_I2C_TIMEOUT;
417 i2c_adapter->retries = SAA7146_I2C_RETRIES; 415 i2c_adapter->retries = SAA7146_I2C_RETRIES;
418 } 416 }
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index be4266d4ae91..56495cb6cd02 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -172,8 +172,6 @@ static u32 flexcop_i2c_func(struct i2c_adapter *adapter)
172} 172}
173 173
174static struct i2c_algorithm flexcop_algo = { 174static struct i2c_algorithm flexcop_algo = {
175 .name = "FlexCop I2C algorithm",
176 .id = I2C_ALGO_BIT,
177 .master_xfer = flexcop_master_xfer, 175 .master_xfer = flexcop_master_xfer,
178 .functionality = flexcop_i2c_func, 176 .functionality = flexcop_i2c_func,
179}; 177};
@@ -192,7 +190,6 @@ int flexcop_i2c_init(struct flexcop_device *fc)
192 fc->i2c_adap.class = I2C_CLASS_TV_DIGITAL; 190 fc->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
193 fc->i2c_adap.algo = &flexcop_algo; 191 fc->i2c_adap.algo = &flexcop_algo;
194 fc->i2c_adap.algo_data = NULL; 192 fc->i2c_adap.algo_data = NULL;
195 fc->i2c_adap.id = I2C_ALGO_BIT;
196 193
197 if ((ret = i2c_add_adapter(&fc->i2c_adap)) < 0) 194 if ((ret = i2c_add_adapter(&fc->i2c_adap)) < 0)
198 return ret; 195 return ret;
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index b12545f093f8..1e85d16491b0 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -1,5 +1,5 @@
1config DVB_BT8XX 1config DVB_BT8XX
2 tristate "Nebula/Pinnacle PCTV/Twinhan PCI cards" 2 tristate "BT8xx based PCI cards"
3 depends on DVB_CORE && PCI && VIDEO_BT848 3 depends on DVB_CORE && PCI && VIDEO_BT848
4 select DVB_MT352 4 select DVB_MT352
5 select DVB_SP887X 5 select DVB_SP887X
@@ -8,8 +8,8 @@ config DVB_BT8XX
8 select DVB_OR51211 8 select DVB_OR51211
9 help 9 help
10 Support for PCI cards based on the Bt8xx PCI bridge. Examples are 10 Support for PCI cards based on the Bt8xx PCI bridge. Examples are
11 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards and 11 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards,
12 pcHDTV HD2000 cards. 12 the pcHDTV HD2000 cards, and certain AVerMedia cards.
13 13
14 Since these cards have no MPEG decoder onboard, they transmit 14 Since these cards have no MPEG decoder onboard, they transmit
15 only compressed MPEG data over the PCI bus, so you need 15 only compressed MPEG data over the PCI bus, so you need
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 7d8b3cad350b..9ea5747b1211 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -888,7 +888,7 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
888 if (down_interruptible(&cinergyt2->sem)) 888 if (down_interruptible(&cinergyt2->sem))
889 return -ERESTARTSYS; 889 return -ERESTARTSYS;
890 890
891 if (state > 0) { /* state 0 seems to mean DEVICE_PM_ON */ 891 if (state.event > PM_EVENT_ON) {
892 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf); 892 struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
893#ifdef ENABLE_RC 893#ifdef ENABLE_RC
894 cancel_delayed_work(&cinergyt2->rc_query_work); 894 cancel_delayed_work(&cinergyt2->rc_query_work);
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index c3e1b661aae6..9e96a188f1e9 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -141,8 +141,6 @@ static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
141} 141}
142 142
143static struct i2c_algorithm cxusb_i2c_algo = { 143static struct i2c_algorithm cxusb_i2c_algo = {
144 .name = "Conexant USB I2C algorithm",
145 .id = I2C_ALGO_BIT,
146 .master_xfer = cxusb_i2c_xfer, 144 .master_xfer = cxusb_i2c_xfer,
147 .functionality = cxusb_i2c_func, 145 .functionality = cxusb_i2c_func,
148}; 146};
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 9b9d6f8ee74e..00b946419b40 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -156,8 +156,6 @@ static u32 dibusb_i2c_func(struct i2c_adapter *adapter)
156} 156}
157 157
158struct i2c_algorithm dibusb_i2c_algo = { 158struct i2c_algorithm dibusb_i2c_algo = {
159 .name = "DiBcom USB I2C algorithm",
160 .id = I2C_ALGO_BIT,
161 .master_xfer = dibusb_i2c_xfer, 159 .master_xfer = dibusb_i2c_xfer,
162 .functionality = dibusb_i2c_func, 160 .functionality = dibusb_i2c_func,
163}; 161};
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 9a676afc1d6e..f70e0be0920a 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -77,8 +77,6 @@ static u32 digitv_i2c_func(struct i2c_adapter *adapter)
77} 77}
78 78
79static struct i2c_algorithm digitv_i2c_algo = { 79static struct i2c_algorithm digitv_i2c_algo = {
80 .name = "Nebula DigiTV USB I2C algorithm",
81 .id = I2C_ALGO_BIT,
82 .master_xfer = digitv_i2c_xfer, 80 .master_xfer = digitv_i2c_xfer,
83 .functionality = digitv_i2c_func, 81 .functionality = digitv_i2c_func,
84}; 82};
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index 9f0a8d90d146..da970947dfc7 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -27,7 +27,6 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
27#endif 27#endif
28 d->i2c_adap.algo = d->props.i2c_algo; 28 d->i2c_adap.algo = d->props.i2c_algo;
29 d->i2c_adap.algo_data = NULL; 29 d->i2c_adap.algo_data = NULL;
30 d->i2c_adap.id = I2C_ALGO_BIT;
31 30
32 i2c_set_adapdata(&d->i2c_adap, d); 31 i2c_set_adapdata(&d->i2c_adap, d);
33 32
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 1f1cd7a8d500..7142b9c51dd2 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -69,8 +69,8 @@ struct lgdt330x_state
69}; 69};
70 70
71static int i2c_write_demod_bytes (struct lgdt330x_state* state, 71static int i2c_write_demod_bytes (struct lgdt330x_state* state,
72 u8 *buf, /* data bytes to send */ 72 u8 *buf, /* data bytes to send */
73 int len /* number of bytes to send */ ) 73 int len /* number of bytes to send */ )
74{ 74{
75 struct i2c_msg msg = 75 struct i2c_msg msg =
76 { .addr = state->config->demod_address, 76 { .addr = state->config->demod_address,
@@ -129,13 +129,13 @@ static int lgdt3302_SwReset(struct lgdt330x_state* state)
129 }; 129 };
130 130
131 ret = i2c_write_demod_bytes(state, 131 ret = i2c_write_demod_bytes(state,
132 reset, sizeof(reset)); 132 reset, sizeof(reset));
133 if (ret == 0) { 133 if (ret == 0) {
134 134
135 /* force reset high (inactive) and unmask interrupts */ 135 /* force reset high (inactive) and unmask interrupts */
136 reset[1] = 0x7f; 136 reset[1] = 0x7f;
137 ret = i2c_write_demod_bytes(state, 137 ret = i2c_write_demod_bytes(state,
138 reset, sizeof(reset)); 138 reset, sizeof(reset));
139 } 139 }
140 return ret; 140 return ret;
141} 141}
@@ -149,13 +149,13 @@ static int lgdt3303_SwReset(struct lgdt330x_state* state)
149 }; 149 };
150 150
151 ret = i2c_write_demod_bytes(state, 151 ret = i2c_write_demod_bytes(state,
152 reset, sizeof(reset)); 152 reset, sizeof(reset));
153 if (ret == 0) { 153 if (ret == 0) {
154 154
155 /* force reset high (inactive) */ 155 /* force reset high (inactive) */
156 reset[1] = 0x01; 156 reset[1] = 0x01;
157 ret = i2c_write_demod_bytes(state, 157 ret = i2c_write_demod_bytes(state,
158 reset, sizeof(reset)); 158 reset, sizeof(reset));
159 } 159 }
160 return ret; 160 return ret;
161} 161}
@@ -172,7 +172,6 @@ static int lgdt330x_SwReset(struct lgdt330x_state* state)
172 } 172 }
173} 173}
174 174
175
176static int lgdt330x_init(struct dvb_frontend* fe) 175static int lgdt330x_init(struct dvb_frontend* fe)
177{ 176{
178 /* Hardware reset is done using gpio[0] of cx23880x chip. 177 /* Hardware reset is done using gpio[0] of cx23880x chip.
@@ -229,13 +228,13 @@ static int lgdt330x_init(struct dvb_frontend* fe)
229 case LGDT3302: 228 case LGDT3302:
230 chip_name = "LGDT3302"; 229 chip_name = "LGDT3302";
231 err = i2c_write_demod_bytes(state, lgdt3302_init_data, 230 err = i2c_write_demod_bytes(state, lgdt3302_init_data,
232 sizeof(lgdt3302_init_data)); 231 sizeof(lgdt3302_init_data));
233 break; 232 break;
234 case LGDT3303: 233 case LGDT3303:
235 chip_name = "LGDT3303"; 234 chip_name = "LGDT3303";
236 err = i2c_write_demod_bytes(state, lgdt3303_init_data, 235 err = i2c_write_demod_bytes(state, lgdt3303_init_data,
237 sizeof(lgdt3303_init_data)); 236 sizeof(lgdt3303_init_data));
238 break; 237 break;
239 default: 238 default:
240 chip_name = "undefined"; 239 chip_name = "undefined";
241 printk (KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n"); 240 printk (KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n");
@@ -262,15 +261,15 @@ static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
262 switch (state->config->demod_chip) { 261 switch (state->config->demod_chip) {
263 case LGDT3302: 262 case LGDT3302:
264 err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1, 263 err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1,
265 buf, sizeof(buf)); 264 buf, sizeof(buf));
266 break; 265 break;
267 case LGDT3303: 266 case LGDT3303:
268 err = i2c_read_demod_bytes(state, LGDT3303_PACKET_ERR_COUNTER1, 267 err = i2c_read_demod_bytes(state, LGDT3303_PACKET_ERR_COUNTER1,
269 buf, sizeof(buf)); 268 buf, sizeof(buf));
270 break; 269 break;
271 default: 270 default:
272 printk(KERN_WARNING 271 printk(KERN_WARNING
273 "Only LGDT3302 and LGDT3303 are supported chips.\n"); 272 "Only LGDT3302 and LGDT3303 are supported chips.\n");
274 err = -ENODEV; 273 err = -ENODEV;
275 } 274 }
276 275
@@ -330,7 +329,7 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
330 329
331 if (state->config->demod_chip == LGDT3303) { 330 if (state->config->demod_chip == LGDT3303) {
332 err = i2c_write_demod_bytes(state, lgdt3303_8vsb_44_data, 331 err = i2c_write_demod_bytes(state, lgdt3303_8vsb_44_data,
333 sizeof(lgdt3303_8vsb_44_data)); 332 sizeof(lgdt3303_8vsb_44_data));
334 } 333 }
335 break; 334 break;
336 335
@@ -378,18 +377,19 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
378 377
379 /* Select the requested mode */ 378 /* Select the requested mode */
380 i2c_write_demod_bytes(state, top_ctrl_cfg, 379 i2c_write_demod_bytes(state, top_ctrl_cfg,
381 sizeof(top_ctrl_cfg)); 380 sizeof(top_ctrl_cfg));
382 state->config->set_ts_params(fe, 0); 381 if (state->config->set_ts_params)
382 state->config->set_ts_params(fe, 0);
383 state->current_modulation = param->u.vsb.modulation; 383 state->current_modulation = param->u.vsb.modulation;
384 } 384 }
385 385
386 /* Change only if we are actually changing the channel */ 386 /* Tune to the specified frequency */
387 if (state->current_frequency != param->frequency) { 387 if (state->config->pll_set)
388 /* Tune to the new frequency */
389 state->config->pll_set(fe, param); 388 state->config->pll_set(fe, param);
390 /* Keep track of the new frequency */ 389
391 state->current_frequency = param->frequency; 390 /* Keep track of the new frequency */
392 } 391 state->current_frequency = param->frequency;
392
393 lgdt330x_SwReset(state); 393 lgdt330x_SwReset(state);
394 return 0; 394 return 0;
395} 395}
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 706e0bcb5ede..85b437bbddcd 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -633,7 +633,6 @@ static int __devinit pluto2_probe(struct pci_dev *pdev,
633 i2c_set_adapdata(&pluto->i2c_adap, pluto); 633 i2c_set_adapdata(&pluto->i2c_adap, pluto);
634 strcpy(pluto->i2c_adap.name, DRIVER_NAME); 634 strcpy(pluto->i2c_adap.name, DRIVER_NAME);
635 pluto->i2c_adap.owner = THIS_MODULE; 635 pluto->i2c_adap.owner = THIS_MODULE;
636 pluto->i2c_adap.id = I2C_ALGO_BIT;
637 pluto->i2c_adap.class = I2C_CLASS_TV_DIGITAL; 636 pluto->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
638 pluto->i2c_adap.dev.parent = &pdev->dev; 637 pluto->i2c_adap.dev.parent = &pdev->dev;
639 pluto->i2c_adap.algo_data = &pluto->i2c_bit; 638 pluto->i2c_adap.algo_data = &pluto->i2c_bit;
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index bf3c011d2cfb..d8bf65877897 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -102,6 +102,9 @@ config DVB_BUDGET_AV
102 select VIDEO_DEV 102 select VIDEO_DEV
103 select VIDEO_SAA7146_VV 103 select VIDEO_SAA7146_VV
104 select DVB_STV0299 104 select DVB_STV0299
105 select DVB_TDA1004X
106 select DVB_TDA10021
107 select FW_LOADER
105 help 108 help
106 Support for simple SAA7146 based DVB cards 109 Support for simple SAA7146 based DVB cards
107 (so called Budget- or Nova-PCI cards) without onboard 110 (so called Budget- or Nova-PCI cards) without onboard
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index aa43b5fcb8e7..7daf7b1598a0 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1472,8 +1472,6 @@ static void frontend_init(struct ttusb* ttusb)
1472 1472
1473 1473
1474static struct i2c_algorithm ttusb_dec_algo = { 1474static struct i2c_algorithm ttusb_dec_algo = {
1475 .name = "ttusb dec i2c algorithm",
1476 .id = I2C_ALGO_BIT,
1477 .master_xfer = master_xfer, 1475 .master_xfer = master_xfer,
1478 .functionality = functionality, 1476 .functionality = functionality,
1479}; 1477};
@@ -1525,7 +1523,6 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
1525#endif 1523#endif
1526 ttusb->i2c_adap.algo = &ttusb_dec_algo; 1524 ttusb->i2c_adap.algo = &ttusb_dec_algo;
1527 ttusb->i2c_adap.algo_data = NULL; 1525 ttusb->i2c_adap.algo_data = NULL;
1528 ttusb->i2c_adap.id = I2C_ALGO_BIT;
1529 1526
1530 result = i2c_add_adapter(&ttusb->i2c_adap); 1527 result = i2c_add_adapter(&ttusb->i2c_adap);
1531 if (result) { 1528 if (result) {
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 3f5742396096..16c85c081e6e 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -254,6 +254,7 @@ config VIDEO_SAA7134_DVB
254 select VIDEO_BUF_DVB 254 select VIDEO_BUF_DVB
255 select DVB_MT352 255 select DVB_MT352
256 select DVB_CX22702 256 select DVB_CX22702
257 select DVB_TDA1004X
257 ---help--- 258 ---help---
258 This adds support for DVB cards based on the 259 This adds support for DVB cards based on the
259 Philips saa7134 chip. 260 Philips saa7134 chip.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 810e7aac0a53..3e6f5347da21 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_VIDEO_ZORAN_LML33R10) += saa7114.o adv7170.o zr36060.o
29obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o 29obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o
30obj-$(CONFIG_VIDEO_PMS) += pms.o 30obj-$(CONFIG_VIDEO_PMS) += pms.o
31obj-$(CONFIG_VIDEO_PLANB) += planb.o 31obj-$(CONFIG_VIDEO_PLANB) += planb.o
32obj-$(CONFIG_VIDEO_VINO) += vino.o 32obj-$(CONFIG_VIDEO_VINO) += vino.o saa7191.o indycam.o
33obj-$(CONFIG_VIDEO_STRADIS) += stradis.o 33obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
34obj-$(CONFIG_VIDEO_CPIA) += cpia.o 34obj-$(CONFIG_VIDEO_CPIA) += cpia.o
35obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o 35obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 48989eda2400..1ca2b67aedfb 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
@@ -391,7 +390,6 @@ static struct i2c_client_address_data addr_data = {
391 .normal_i2c = normal_i2c, 390 .normal_i2c = normal_i2c,
392 .probe = &ignore, 391 .probe = &ignore,
393 .ignore = &ignore, 392 .ignore = &ignore,
394 .force = &ignore,
395}; 393};
396 394
397static struct i2c_driver i2c_driver_adv7170; 395static struct i2c_driver i2c_driver_adv7170;
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f898b6586374..173bca1e0295 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <asm/segment.h>
43#include <linux/types.h> 42#include <linux/types.h>
44 43
45#include <linux/videodev.h> 44#include <linux/videodev.h>
@@ -441,7 +440,6 @@ static struct i2c_client_address_data addr_data = {
441 .normal_i2c = normal_i2c, 440 .normal_i2c = normal_i2c,
442 .probe = &ignore, 441 .probe = &ignore,
443 .ignore = &ignore, 442 .ignore = &ignore,
444 .force = &ignore,
445}; 443};
446 444
447static struct i2c_driver i2c_driver_adv7175; 445static struct i2c_driver i2c_driver_adv7175;
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 8733588f6db3..3ee0afca76a7 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
@@ -507,7 +506,6 @@ static struct i2c_client_address_data addr_data = {
507 .normal_i2c = normal_i2c, 506 .normal_i2c = normal_i2c,
508 .probe = &ignore, 507 .probe = &ignore,
509 .ignore = &ignore, 508 .ignore = &ignore,
510 .force = &ignore,
511}; 509};
512 510
513static struct i2c_driver i2c_driver_bt819; 511static struct i2c_driver i2c_driver_bt819;
diff --git a/drivers/media/video/bt832.c b/drivers/media/video/bt832.c
index a070417e65e6..76c1b63ebdf2 100644
--- a/drivers/media/video/bt832.c
+++ b/drivers/media/video/bt832.c
@@ -188,7 +188,7 @@ static int bt832_probe(struct i2c_adapter *adap)
188 if (adap->class & I2C_CLASS_TV_ANALOG) 188 if (adap->class & I2C_CLASS_TV_ANALOG)
189 return i2c_probe(adap, &addr_data, bt832_attach); 189 return i2c_probe(adap, &addr_data, bt832_attach);
190#else 190#else
191 if (adap->id == (I2C_ALGO_BIT | I2C_HW_B_BT848)) 191 if (adap->id == I2C_HW_B_BT848)
192 return i2c_probe(adap, &addr_data, bt832_attach); 192 return i2c_probe(adap, &addr_data, bt832_attach);
193#endif 193#endif
194 return 0; 194 return 0;
@@ -241,7 +241,7 @@ static struct i2c_driver driver = {
241}; 241};
242static struct i2c_client client_template = 242static struct i2c_client client_template =
243{ 243{
244 I2C_DEVNAME("bt832"), 244 .name = "bt832",
245 .flags = I2C_CLIENT_ALLOW_USE, 245 .flags = I2C_CLIENT_ALLOW_USE,
246 .driver = &driver, 246 .driver = &driver,
247}; 247};
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index a5d529ccf3ad..8eb871d0e85b 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
@@ -295,7 +294,6 @@ static struct i2c_client_address_data addr_data = {
295 .normal_i2c = normal_i2c, 294 .normal_i2c = normal_i2c,
296 .probe = &ignore, 295 .probe = &ignore,
297 .ignore = &ignore, 296 .ignore = &ignore,
298 .force = &ignore,
299}; 297};
300 298
301static struct i2c_driver i2c_driver_bt856; 299static struct i2c_driver i2c_driver_bt856;
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index eee9322ce21b..087efb4dea09 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -4047,7 +4047,6 @@ static int bttv_suspend(struct pci_dev *pci_dev, pm_message_t state)
4047 struct bttv_buffer_set idle; 4047 struct bttv_buffer_set idle;
4048 unsigned long flags; 4048 unsigned long flags;
4049 4049
4050 dprintk("bttv%d: suspend %d\n", btv->c.nr, state);
4051 4050
4052 /* stop dma + irqs */ 4051 /* stop dma + irqs */
4053 spin_lock_irqsave(&btv->s_lock,flags); 4052 spin_lock_irqsave(&btv->s_lock,flags);
diff --git a/drivers/media/video/bttv-i2c.c b/drivers/media/video/bttv-i2c.c
index 234a85563769..706dc48df962 100644
--- a/drivers/media/video/bttv-i2c.c
+++ b/drivers/media/video/bttv-i2c.c
@@ -109,7 +109,7 @@ static struct i2c_adapter bttv_i2c_adap_sw_template = {
109#ifdef I2C_CLASS_TV_ANALOG 109#ifdef I2C_CLASS_TV_ANALOG
110 .class = I2C_CLASS_TV_ANALOG, 110 .class = I2C_CLASS_TV_ANALOG,
111#endif 111#endif
112 I2C_DEVNAME("bt848"), 112 .name = "bt848",
113 .id = I2C_HW_B_BT848, 113 .id = I2C_HW_B_BT848,
114 .client_register = attach_inform, 114 .client_register = attach_inform,
115}; 115};
@@ -270,8 +270,6 @@ static int bttv_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int
270} 270}
271 271
272static struct i2c_algorithm bttv_algo = { 272static struct i2c_algorithm bttv_algo = {
273 .name = "bt878",
274 .id = I2C_ALGO_BIT | I2C_HW_B_BT848 /* FIXME */,
275 .master_xfer = bttv_i2c_xfer, 273 .master_xfer = bttv_i2c_xfer,
276 .algo_control = algo_control, 274 .algo_control = algo_control,
277 .functionality = functionality, 275 .functionality = functionality,
@@ -282,8 +280,8 @@ static struct i2c_adapter bttv_i2c_adap_hw_template = {
282#ifdef I2C_CLASS_TV_ANALOG 280#ifdef I2C_CLASS_TV_ANALOG
283 .class = I2C_CLASS_TV_ANALOG, 281 .class = I2C_CLASS_TV_ANALOG,
284#endif 282#endif
285 I2C_DEVNAME("bt878"), 283 .name = "bt878",
286 .id = I2C_ALGO_BIT | I2C_HW_B_BT848 /* FIXME */, 284 .id = I2C_HW_B_BT848 /* FIXME */,
287 .algo = &bttv_algo, 285 .algo = &bttv_algo,
288 .client_register = attach_inform, 286 .client_register = attach_inform,
289}; 287};
@@ -298,7 +296,7 @@ static int attach_inform(struct i2c_client *client)
298 if (bttv_debug) 296 if (bttv_debug)
299 printk(KERN_DEBUG "bttv%d: %s i2c attach [addr=0x%x,client=%s]\n", 297 printk(KERN_DEBUG "bttv%d: %s i2c attach [addr=0x%x,client=%s]\n",
300 btv->c.nr,client->driver->name,client->addr, 298 btv->c.nr,client->driver->name,client->addr,
301 i2c_clientname(client)); 299 client->name);
302 if (!client->driver->command) 300 if (!client->driver->command)
303 return 0; 301 return 0;
304 302
@@ -326,7 +324,7 @@ void bttv_call_i2c_clients(struct bttv *btv, unsigned int cmd, void *arg)
326} 324}
327 325
328static struct i2c_client bttv_i2c_client_template = { 326static struct i2c_client bttv_i2c_client_template = {
329 I2C_DEVNAME("bttv internal"), 327 .name = "bttv internal",
330}; 328};
331 329
332 330
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index a628a55299c6..7f598039e025 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -95,7 +95,7 @@ static int attach_inform(struct i2c_client *client)
95 struct cx88_core *core = i2c_get_adapdata(client->adapter); 95 struct cx88_core *core = i2c_get_adapdata(client->adapter);
96 96
97 dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n", 97 dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n",
98 client->driver->name,client->addr,i2c_clientname(client)); 98 client->driver->name, client->addr, client->name);
99 if (!client->driver->command) 99 if (!client->driver->command)
100 return 0; 100 return 0;
101 101
@@ -128,7 +128,7 @@ static int detach_inform(struct i2c_client *client)
128{ 128{
129 struct cx88_core *core = i2c_get_adapdata(client->adapter); 129 struct cx88_core *core = i2c_get_adapdata(client->adapter);
130 130
131 dprintk(1, "i2c detach [client=%s]\n", i2c_clientname(client)); 131 dprintk(1, "i2c detach [client=%s]\n", client->name);
132 return 0; 132 return 0;
133} 133}
134 134
@@ -152,7 +152,7 @@ static struct i2c_algo_bit_data cx8800_i2c_algo_template = {
152/* ----------------------------------------------------------------------- */ 152/* ----------------------------------------------------------------------- */
153 153
154static struct i2c_adapter cx8800_i2c_adap_template = { 154static struct i2c_adapter cx8800_i2c_adap_template = {
155 I2C_DEVNAME("cx2388x"), 155 .name = "cx2388x",
156 .owner = THIS_MODULE, 156 .owner = THIS_MODULE,
157 .id = I2C_HW_B_CX2388x, 157 .id = I2C_HW_B_CX2388x,
158 .client_register = attach_inform, 158 .client_register = attach_inform,
@@ -160,7 +160,7 @@ static struct i2c_adapter cx8800_i2c_adap_template = {
160}; 160};
161 161
162static struct i2c_client cx8800_i2c_client_template = { 162static struct i2c_client cx8800_i2c_client_template = {
163 I2C_DEVNAME("cx88xx internal"), 163 .name = "cx88xx internal",
164}; 164};
165 165
166static char *i2c_devs[128] = { 166static char *i2c_devs[128] = {
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
new file mode 100644
index 000000000000..b2b0384cd4b9
--- /dev/null
+++ b/drivers/media/video/indycam.c
@@ -0,0 +1,412 @@
1/*
2 * indycam.c - Silicon Graphics IndyCam digital camera driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/errno.h>
16#include <linux/fs.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sched.h>
22
23#include <linux/videodev.h>
24/* IndyCam decodes stream of photons into digital image representation ;-) */
25#include <linux/video_decoder.h>
26#include <linux/i2c.h>
27
28#include "indycam.h"
29
30//#define INDYCAM_DEBUG
31
32#define INDYCAM_MODULE_VERSION "0.0.3"
33
34MODULE_DESCRIPTION("SGI IndyCam driver");
35MODULE_VERSION(INDYCAM_MODULE_VERSION);
36MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
37MODULE_LICENSE("GPL");
38
39#ifdef INDYCAM_DEBUG
40#define dprintk(x...) printk("IndyCam: " x);
41#define indycam_regdump(client) indycam_regdump_debug(client)
42#else
43#define dprintk(x...)
44#define indycam_regdump(client)
45#endif
46
47#define VINO_ADAPTER (I2C_ALGO_SGI | I2C_HW_SGI_VINO)
48
49struct indycam {
50 struct i2c_client *client;
51 int version;
52};
53
54static struct i2c_driver i2c_driver_indycam;
55
56static const unsigned char initseq[] = {
57 INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */
58 INDYCAM_SHUTTER_DEFAULT, /* INDYCAM_SHUTTER */
59 INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */
60 0x00, /* INDYCAM_BRIGHTNESS (read-only) */
61 INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */
62 INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */
63 INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */
64 INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */
65};
66
67/* IndyCam register handling */
68
69static int indycam_read_reg(struct i2c_client *client, unsigned char reg,
70 unsigned char *value)
71{
72 int ret;
73
74 if (reg == INDYCAM_RESET) {
75 dprintk("indycam_read_reg(): "
76 "skipping write-only register %d\n", reg);
77 *value = 0;
78 return 0;
79 }
80
81 ret = i2c_smbus_read_byte_data(client, reg);
82 if (ret < 0) {
83 printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, "
84 "register = 0x%02x\n", reg);
85 return ret;
86 }
87
88 *value = (unsigned char)ret;
89
90 return 0;
91}
92
93static int indycam_write_reg(struct i2c_client *client, unsigned char reg,
94 unsigned char value)
95{
96 int err;
97
98 if ((reg == INDYCAM_BRIGHTNESS)
99 || (reg == INDYCAM_VERSION)) {
100 dprintk("indycam_write_reg(): "
101 "skipping read-only register %d\n", reg);
102 return 0;
103 }
104
105 dprintk("Writing Reg %d = 0x%02x\n", reg, value);
106 err = i2c_smbus_write_byte_data(client, reg, value);
107 if (err) {
108 printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, "
109 "register = 0x%02x, value = 0x%02x\n", reg, value);
110 }
111 return err;
112}
113
114static int indycam_write_block(struct i2c_client *client, unsigned char reg,
115 unsigned char length, unsigned char *data)
116{
117 unsigned char i;
118 int err;
119
120 for (i = reg; i < length; i++) {
121 err = indycam_write_reg(client, reg + i, data[i]);
122 if (err)
123 return err;
124 }
125
126 return 0;
127}
128
129/* Helper functions */
130
131#ifdef INDYCAM_DEBUG
132static void indycam_regdump_debug(struct i2c_client *client)
133{
134 int i;
135 unsigned char val;
136
137 for (i = 0; i < 9; i++) {
138 indycam_read_reg(client, i, &val);
139 dprintk("Reg %d = 0x%02x\n", i, val);
140 }
141}
142#endif
143
144static int indycam_get_controls(struct i2c_client *client,
145 struct indycam_control *ctrl)
146{
147 unsigned char ctrl_reg;
148
149 indycam_read_reg(client, INDYCAM_CONTROL, &ctrl_reg);
150 ctrl->agc = (ctrl_reg & INDYCAM_CONTROL_AGCENA)
151 ? INDYCAM_VALUE_ENABLED
152 : INDYCAM_VALUE_DISABLED;
153 ctrl->awb = (ctrl_reg & INDYCAM_CONTROL_AWBCTL)
154 ? INDYCAM_VALUE_ENABLED
155 : INDYCAM_VALUE_DISABLED;
156 indycam_read_reg(client, INDYCAM_SHUTTER,
157 (unsigned char *)&ctrl->shutter);
158 indycam_read_reg(client, INDYCAM_GAIN,
159 (unsigned char *)&ctrl->gain);
160 indycam_read_reg(client, INDYCAM_RED_BALANCE,
161 (unsigned char *)&ctrl->red_balance);
162 indycam_read_reg(client, INDYCAM_BLUE_BALANCE,
163 (unsigned char *)&ctrl->blue_balance);
164 indycam_read_reg(client, INDYCAM_RED_SATURATION,
165 (unsigned char *)&ctrl->red_saturation);
166 indycam_read_reg(client, INDYCAM_BLUE_SATURATION,
167 (unsigned char *)&ctrl->blue_saturation);
168 indycam_read_reg(client, INDYCAM_GAMMA,
169 (unsigned char *)&ctrl->gamma);
170
171 return 0;
172}
173
174static int indycam_set_controls(struct i2c_client *client,
175 struct indycam_control *ctrl)
176{
177 unsigned char ctrl_reg;
178
179 indycam_read_reg(client, INDYCAM_CONTROL, &ctrl_reg);
180 if (ctrl->agc != INDYCAM_VALUE_UNCHANGED) {
181 if (ctrl->agc)
182 ctrl_reg |= INDYCAM_CONTROL_AGCENA;
183 else
184 ctrl_reg &= ~INDYCAM_CONTROL_AGCENA;
185 }
186 if (ctrl->awb != INDYCAM_VALUE_UNCHANGED) {
187 if (ctrl->awb)
188 ctrl_reg |= INDYCAM_CONTROL_AWBCTL;
189 else
190 ctrl_reg &= ~INDYCAM_CONTROL_AWBCTL;
191 }
192 indycam_write_reg(client, INDYCAM_CONTROL, ctrl_reg);
193
194 if (ctrl->shutter >= 0)
195 indycam_write_reg(client, INDYCAM_SHUTTER, ctrl->shutter);
196 if (ctrl->gain >= 0)
197 indycam_write_reg(client, INDYCAM_GAIN, ctrl->gain);
198 if (ctrl->red_balance >= 0)
199 indycam_write_reg(client, INDYCAM_RED_BALANCE,
200 ctrl->red_balance);
201 if (ctrl->blue_balance >= 0)
202 indycam_write_reg(client, INDYCAM_BLUE_BALANCE,
203 ctrl->blue_balance);
204 if (ctrl->red_saturation >= 0)
205 indycam_write_reg(client, INDYCAM_RED_SATURATION,
206 ctrl->red_saturation);
207 if (ctrl->blue_saturation >= 0)
208 indycam_write_reg(client, INDYCAM_BLUE_SATURATION,
209 ctrl->blue_saturation);
210 if (ctrl->gamma >= 0)
211 indycam_write_reg(client, INDYCAM_GAMMA, ctrl->gamma);
212
213 return 0;
214}
215
216/* I2C-interface */
217
218static int indycam_attach(struct i2c_adapter *adap, int addr, int kind)
219{
220 int err = 0;
221 struct indycam *camera;
222 struct i2c_client *client;
223
224 printk(KERN_INFO "SGI IndyCam driver version %s\n",
225 INDYCAM_MODULE_VERSION);
226
227 client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
228 if (!client)
229 return -ENOMEM;
230 camera = kmalloc(sizeof(struct indycam), GFP_KERNEL);
231 if (!camera) {
232 err = -ENOMEM;
233 goto out_free_client;
234 }
235
236 memset(client, 0, sizeof(struct i2c_client));
237 memset(camera, 0, sizeof(struct indycam));
238
239 client->addr = addr;
240 client->adapter = adap;
241 client->driver = &i2c_driver_indycam;
242 client->flags = 0;
243 strcpy(client->name, "IndyCam client");
244 i2c_set_clientdata(client, camera);
245
246 camera->client = client;
247
248 err = i2c_attach_client(client);
249 if (err)
250 goto out_free_camera;
251
252 camera->version = i2c_smbus_read_byte_data(client, INDYCAM_VERSION);
253 if (camera->version != CAMERA_VERSION_INDY &&
254 camera->version != CAMERA_VERSION_MOOSE) {
255 err = -ENODEV;
256 goto out_detach_client;
257 }
258 printk(KERN_INFO "IndyCam v%d.%d detected\n",
259 INDYCAM_VERSION_MAJOR(camera->version),
260 INDYCAM_VERSION_MINOR(camera->version));
261
262 indycam_regdump(client);
263
264 // initialize
265 err = indycam_write_block(client, 0, sizeof(initseq),
266 (unsigned char *)&initseq);
267 if (err) {
268 printk(KERN_ERR "IndyCam initalization failed\n");
269 err = -EIO;
270 goto out_detach_client;
271 }
272
273 indycam_regdump(client);
274
275 // white balance
276 err = indycam_write_reg(client, INDYCAM_CONTROL,
277 INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL);
278 if (err) {
279 printk(KERN_ERR "IndyCam white balance "
280 "initialization failed\n");
281 err = -EIO;
282 goto out_detach_client;
283 }
284
285 indycam_regdump(client);
286
287 printk(KERN_INFO "IndyCam initialized\n");
288
289 return 0;
290
291out_detach_client:
292 i2c_detach_client(client);
293out_free_camera:
294 kfree(camera);
295out_free_client:
296 kfree(client);
297 return err;
298}
299
300static int indycam_probe(struct i2c_adapter *adap)
301{
302 /* Indy specific crap */
303 if (adap->id == VINO_ADAPTER)
304 return indycam_attach(adap, INDYCAM_ADDR, 0);
305 /* Feel free to add probe here :-) */
306 return -ENODEV;
307}
308
309static int indycam_detach(struct i2c_client *client)
310{
311 struct indycam *camera = i2c_get_clientdata(client);
312
313 i2c_detach_client(client);
314 kfree(camera);
315 kfree(client);
316 return 0;
317}
318
319static int indycam_command(struct i2c_client *client, unsigned int cmd,
320 void *arg)
321{
322 // struct indycam *camera = i2c_get_clientdata(client);
323
324 /* The old video_decoder interface just isn't enough,
325 * so we'll use some custom commands. */
326 switch (cmd) {
327 case DECODER_GET_CAPABILITIES: {
328 struct video_decoder_capability *cap = arg;
329
330 cap->flags = VIDEO_DECODER_NTSC;
331 cap->inputs = 1;
332 cap->outputs = 1;
333 break;
334 }
335 case DECODER_GET_STATUS: {
336 int *iarg = arg;
337
338 *iarg = DECODER_STATUS_GOOD | DECODER_STATUS_NTSC |
339 DECODER_STATUS_COLOR;
340 break;
341 }
342 case DECODER_SET_NORM: {
343 int *iarg = arg;
344
345 switch (*iarg) {
346 case VIDEO_MODE_NTSC:
347 break;
348 default:
349 return -EINVAL;
350 }
351 break;
352 }
353 case DECODER_SET_INPUT: {
354 int *iarg = arg;
355
356 if (*iarg != 0)
357 return -EINVAL;
358 break;
359 }
360 case DECODER_SET_OUTPUT: {
361 int *iarg = arg;
362
363 if (*iarg != 0)
364 return -EINVAL;
365 break;
366 }
367 case DECODER_ENABLE_OUTPUT: {
368 /* Always enabled */
369 break;
370 }
371 case DECODER_SET_PICTURE: {
372 // struct video_picture *pic = arg;
373 /* TODO: convert values for indycam_set_controls() */
374 break;
375 }
376 case DECODER_INDYCAM_GET_CONTROLS: {
377 struct indycam_control *ctrl = arg;
378 indycam_get_controls(client, ctrl);
379 }
380 case DECODER_INDYCAM_SET_CONTROLS: {
381 struct indycam_control *ctrl = arg;
382 indycam_set_controls(client, ctrl);
383 }
384 default:
385 return -EINVAL;
386 }
387
388 return 0;
389}
390
391static struct i2c_driver i2c_driver_indycam = {
392 .owner = THIS_MODULE,
393 .name = "indycam",
394 .id = I2C_DRIVERID_INDYCAM,
395 .flags = I2C_DF_NOTIFY,
396 .attach_adapter = indycam_probe,
397 .detach_client = indycam_detach,
398 .command = indycam_command,
399};
400
401static int __init indycam_init(void)
402{
403 return i2c_add_driver(&i2c_driver_indycam);
404}
405
406static void __exit indycam_exit(void)
407{
408 i2c_del_driver(&i2c_driver_indycam);
409}
410
411module_init(indycam_init);
412module_exit(indycam_exit);
diff --git a/drivers/media/video/indycam.h b/drivers/media/video/indycam.h
new file mode 100644
index 000000000000..d9ddb6b79a03
--- /dev/null
+++ b/drivers/media/video/indycam.h
@@ -0,0 +1,112 @@
1/*
2 * indycam.h - Silicon Graphics IndyCam digital camera driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _INDYCAM_H_
13#define _INDYCAM_H_
14
15/* I2C address for the Guinness Camera */
16#define INDYCAM_ADDR 0x56
17
18/* Camera version */
19#define CAMERA_VERSION_INDY 0x10 /* v1.0 */
20#define CAMERA_VERSION_MOOSE 0x12 /* v1.2 */
21#define INDYCAM_VERSION_MAJOR(x) (((x) & 0xf0) >> 4)
22#define INDYCAM_VERSION_MINOR(x) ((x) & 0x0f)
23
24/* Register bus addresses */
25#define INDYCAM_CONTROL 0x00
26#define INDYCAM_SHUTTER 0x01
27#define INDYCAM_GAIN 0x02
28#define INDYCAM_BRIGHTNESS 0x03 /* read-only */
29#define INDYCAM_RED_BALANCE 0x04
30#define INDYCAM_BLUE_BALANCE 0x05
31#define INDYCAM_RED_SATURATION 0x06
32#define INDYCAM_BLUE_SATURATION 0x07
33#define INDYCAM_GAMMA 0x08
34#define INDYCAM_VERSION 0x0e /* read-only */
35#define INDYCAM_RESET 0x0f /* write-only */
36
37#define INDYCAM_LED 0x46
38#define INDYCAM_ORIENTATION 0x47
39#define INDYCAM_BUTTON 0x48
40
41/* Field definitions of registers */
42#define INDYCAM_CONTROL_AGCENA (1<<0) /* automatic gain control */
43#define INDYCAM_CONTROL_AWBCTL (1<<1) /* automatic white balance */
44 /* 2-3 are reserved */
45#define INDYCAM_CONTROL_EVNFLD (1<<4) /* read-only */
46
47#define INDYCAM_SHUTTER_10000 0x02 /* 1/10000 second */
48#define INDYCAM_SHUTTER_4000 0x04 /* 1/4000 second */
49#define INDYCAM_SHUTTER_2000 0x08 /* 1/2000 second */
50#define INDYCAM_SHUTTER_1000 0x10 /* 1/1000 second */
51#define INDYCAM_SHUTTER_500 0x20 /* 1/500 second */
52#define INDYCAM_SHUTTER_250 0x3f /* 1/250 second */
53#define INDYCAM_SHUTTER_125 0x7e /* 1/125 second */
54#define INDYCAM_SHUTTER_100 0x9e /* 1/100 second */
55#define INDYCAM_SHUTTER_60 0x00 /* 1/60 second */
56
57#define INDYCAM_LED_ACTIVE 0x10
58#define INDYCAM_LED_INACTIVE 0x30
59#define INDYCAM_ORIENTATION_BOTTOM_TO_TOP 0x40
60#define INDYCAM_BUTTON_RELEASED 0x10
61
62#define INDYCAM_SHUTTER_MIN 0x00
63#define INDYCAM_SHUTTER_MAX 0xff
64#define INDYCAM_GAIN_MIN 0x00
65#define INDYCAM_GAIN_MAX 0xff
66#define INDYCAM_RED_BALANCE_MIN 0x00 /* the effect is the opposite? */
67#define INDYCAM_RED_BALANCE_MAX 0xff
68#define INDYCAM_BLUE_BALANCE_MIN 0x00 /* the effect is the opposite? */
69#define INDYCAM_BLUE_BALANCE_MAX 0xff
70#define INDYCAM_RED_SATURATION_MIN 0x00
71#define INDYCAM_RED_SATURATION_MAX 0xff
72#define INDYCAM_BLUE_SATURATION_MIN 0x00
73#define INDYCAM_BLUE_SATURATION_MAX 0xff
74#define INDYCAM_GAMMA_MIN 0x00
75#define INDYCAM_GAMMA_MAX 0xff
76
77/* Driver interface definitions */
78
79#define INDYCAM_VALUE_ENABLED 1
80#define INDYCAM_VALUE_DISABLED 0
81#define INDYCAM_VALUE_UNCHANGED -1
82
83/* When setting controls, a value of -1 leaves the control unchanged. */
84struct indycam_control {
85 int agc; /* boolean */
86 int awb; /* boolean */
87 int shutter;
88 int gain;
89 int red_balance;
90 int blue_balance;
91 int red_saturation;
92 int blue_saturation;
93 int gamma;
94};
95
96#define DECODER_INDYCAM_GET_CONTROLS _IOR('d', 193, struct indycam_control)
97#define DECODER_INDYCAM_SET_CONTROLS _IOW('d', 194, struct indycam_control)
98
99/* Default values for controls */
100
101#define INDYCAM_AGC_DEFAULT INDYCAM_VALUE_ENABLED
102#define INDYCAM_AWB_DEFAULT INDYCAM_VALUE_ENABLED
103
104#define INDYCAM_SHUTTER_DEFAULT INDYCAM_SHUTTER_60
105#define INDYCAM_GAIN_DEFAULT 0x80
106#define INDYCAM_RED_BALANCE_DEFAULT 0x18
107#define INDYCAM_BLUE_BALANCE_DEFAULT 0xa4
108#define INDYCAM_RED_SATURATION_DEFAULT 0x80
109#define INDYCAM_BLUE_SATURATION_DEFAULT 0xc0
110#define INDYCAM_GAMMA_DEFAULT 0x80
111
112#endif
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 9fc5055e001c..1e273ff3f956 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -308,7 +308,7 @@ static struct i2c_driver driver = {
308 308
309static struct i2c_client client_template = 309static struct i2c_client client_template =
310{ 310{
311 I2C_DEVNAME("unset"), 311 .name = "unset",
312 .driver = &driver 312 .driver = &driver
313}; 313};
314 314
@@ -429,10 +429,10 @@ static int ir_probe(struct i2c_adapter *adap)
429 struct i2c_client c; char buf; int i,rc; 429 struct i2c_client c; char buf; int i,rc;
430 430
431 switch (adap->id) { 431 switch (adap->id) {
432 case I2C_ALGO_BIT | I2C_HW_B_BT848: 432 case I2C_HW_B_BT848:
433 probe = probe_bttv; 433 probe = probe_bttv;
434 break; 434 break;
435 case I2C_ALGO_SAA7134: 435 case I2C_HW_SAA7134:
436 probe = probe_saa7134; 436 probe = probe_saa7134;
437 break; 437 break;
438 } 438 }
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index fe194012bccf..3f2a882bc20a 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -37,6 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/dma-mapping.h>
40 41
41#include "meye.h" 42#include "meye.h"
42#include <linux/meye.h> 43#include <linux/meye.h>
@@ -121,7 +122,7 @@ static int ptable_alloc(void)
121 memset(meye.mchip_ptable, 0, sizeof(meye.mchip_ptable)); 122 memset(meye.mchip_ptable, 0, sizeof(meye.mchip_ptable));
122 123
123 /* give only 32 bit DMA addresses */ 124 /* give only 32 bit DMA addresses */
124 if (dma_set_mask(&meye.mchip_dev->dev, 0xffffffff)) 125 if (dma_set_mask(&meye.mchip_dev->dev, DMA_32BIT_MASK))
125 return -1; 126 return -1;
126 127
127 meye.mchip_ptable_toc = dma_alloc_coherent(&meye.mchip_dev->dev, 128 meye.mchip_ptable_toc = dma_alloc_coherent(&meye.mchip_dev->dev,
diff --git a/drivers/media/video/msp3400.c b/drivers/media/video/msp3400.c
index 62f1b8ddb98b..ca02f6f14b00 100644
--- a/drivers/media/video/msp3400.c
+++ b/drivers/media/video/msp3400.c
@@ -1416,7 +1416,7 @@ static int msp_detach(struct i2c_client *client);
1416static int msp_probe(struct i2c_adapter *adap); 1416static int msp_probe(struct i2c_adapter *adap);
1417static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg); 1417static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg);
1418 1418
1419static int msp_suspend(struct device * dev, u32 state, u32 level); 1419static int msp_suspend(struct device * dev, pm_message_t state, u32 level);
1420static int msp_resume(struct device * dev, u32 level); 1420static int msp_resume(struct device * dev, u32 level);
1421 1421
1422static void msp_wake_thread(struct i2c_client *client); 1422static void msp_wake_thread(struct i2c_client *client);
@@ -1437,7 +1437,7 @@ static struct i2c_driver driver = {
1437 1437
1438static struct i2c_client client_template = 1438static struct i2c_client client_template =
1439{ 1439{
1440 I2C_DEVNAME("(unset)"), 1440 .name = "(unset)",
1441 .flags = I2C_CLIENT_ALLOW_USE, 1441 .flags = I2C_CLIENT_ALLOW_USE,
1442 .driver = &driver, 1442 .driver = &driver,
1443}; 1443};
@@ -1509,7 +1509,7 @@ static int msp_attach(struct i2c_adapter *adap, int addr, int kind)
1509 } 1509 }
1510 1510
1511 /* hello world :-) */ 1511 /* hello world :-) */
1512 printk(KERN_INFO "msp34xx: init: chip=%s",i2c_clientname(c)); 1512 printk(KERN_INFO "msp34xx: init: chip=%s", c->name);
1513 if (HAVE_NICAM(msp)) 1513 if (HAVE_NICAM(msp))
1514 printk(" +nicam"); 1514 printk(" +nicam");
1515 if (HAVE_SIMPLE(msp)) 1515 if (HAVE_SIMPLE(msp))
@@ -1817,7 +1817,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
1817 return 0; 1817 return 0;
1818} 1818}
1819 1819
1820static int msp_suspend(struct device * dev, u32 state, u32 level) 1820static int msp_suspend(struct device * dev, pm_message_t state, u32 level)
1821{ 1821{
1822 struct i2c_client *c = container_of(dev, struct i2c_client, dev); 1822 struct i2c_client *c = container_of(dev, struct i2c_client, dev);
1823 1823
diff --git a/drivers/media/video/ovcamchip/ov6x20.c b/drivers/media/video/ovcamchip/ov6x20.c
index 3433619ad93f..b3f4d266cede 100644
--- a/drivers/media/video/ovcamchip/ov6x20.c
+++ b/drivers/media/video/ovcamchip/ov6x20.c
@@ -164,10 +164,10 @@ static int ov6x20_init(struct i2c_client *c)
164 DDEBUG(4, &c->dev, "entered"); 164 DDEBUG(4, &c->dev, "entered");
165 165
166 switch (c->adapter->id) { 166 switch (c->adapter->id) {
167 case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV511: 167 case I2C_HW_SMBUS_OV511:
168 rc = ov_write_regvals(c, regvals_init_6x20_511); 168 rc = ov_write_regvals(c, regvals_init_6x20_511);
169 break; 169 break;
170 case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518: 170 case I2C_HW_SMBUS_OV518:
171 rc = ov_write_regvals(c, regvals_init_6x20_518); 171 rc = ov_write_regvals(c, regvals_init_6x20_518);
172 break; 172 break;
173 default: 173 default:
@@ -338,7 +338,7 @@ static int ov6x20_mode_init(struct i2c_client *c, struct ovcamchip_window *win)
338 /******** Palette-specific regs ********/ 338 /******** Palette-specific regs ********/
339 339
340 /* OV518 needs 8 bit multiplexed in color mode, and 16 bit in B&W */ 340 /* OV518 needs 8 bit multiplexed in color mode, and 16 bit in B&W */
341 if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { 341 if (c->adapter->id == I2C_HW_SMBUS_OV518) {
342 if (win->format == VIDEO_PALETTE_GREY) 342 if (win->format == VIDEO_PALETTE_GREY)
343 ov_write_mask(c, 0x13, 0x00, 0x20); 343 ov_write_mask(c, 0x13, 0x00, 0x20);
344 else 344 else
diff --git a/drivers/media/video/ovcamchip/ov6x30.c b/drivers/media/video/ovcamchip/ov6x30.c
index 44a842379b45..6eab458ab792 100644
--- a/drivers/media/video/ovcamchip/ov6x30.c
+++ b/drivers/media/video/ovcamchip/ov6x30.c
@@ -301,7 +301,7 @@ static int ov6x30_mode_init(struct i2c_client *c, struct ovcamchip_window *win)
301 /******** Palette-specific regs ********/ 301 /******** Palette-specific regs ********/
302 302
303 if (win->format == VIDEO_PALETTE_GREY) { 303 if (win->format == VIDEO_PALETTE_GREY) {
304 if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { 304 if (c->adapter->id == I2C_HW_SMBUS_OV518) {
305 /* Do nothing - we're already in 8-bit mode */ 305 /* Do nothing - we're already in 8-bit mode */
306 } else { 306 } else {
307 ov_write_mask(c, 0x13, 0x20, 0x20); 307 ov_write_mask(c, 0x13, 0x20, 0x20);
@@ -313,7 +313,7 @@ static int ov6x30_mode_init(struct i2c_client *c, struct ovcamchip_window *win)
313 * Therefore, the OV6630 needs to be in 8-bit multiplexed 313 * Therefore, the OV6630 needs to be in 8-bit multiplexed
314 * output mode */ 314 * output mode */
315 315
316 if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { 316 if (c->adapter->id == I2C_HW_SMBUS_OV518) {
317 /* Do nothing - we want to stay in 8-bit mode */ 317 /* Do nothing - we want to stay in 8-bit mode */
318 /* Warning: Messing with reg 0x13 breaks OV518 color */ 318 /* Warning: Messing with reg 0x13 breaks OV518 color */
319 } else { 319 } else {
diff --git a/drivers/media/video/ovcamchip/ovcamchip_core.c b/drivers/media/video/ovcamchip/ovcamchip_core.c
index 54dd5612d3b8..2de34ebf0673 100644
--- a/drivers/media/video/ovcamchip/ovcamchip_core.c
+++ b/drivers/media/video/ovcamchip/ovcamchip_core.c
@@ -296,10 +296,10 @@ static int ovcamchip_attach(struct i2c_adapter *adap)
296 * attach to adapters that are known to contain OV camera chips. */ 296 * attach to adapters that are known to contain OV camera chips. */
297 297
298 switch (adap->id) { 298 switch (adap->id) {
299 case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV511): 299 case I2C_HW_SMBUS_OV511:
300 case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518): 300 case I2C_HW_SMBUS_OV518:
301 case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OVFX2): 301 case I2C_HW_SMBUS_OVFX2:
302 case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_W9968CF): 302 case I2C_HW_SMBUS_W9968CF:
303 PDEBUG(1, "Adapter ID 0x%06x accepted", adap->id); 303 PDEBUG(1, "Adapter ID 0x%06x accepted", adap->id);
304 break; 304 break;
305 default: 305 default:
@@ -314,7 +314,7 @@ static int ovcamchip_attach(struct i2c_adapter *adap)
314 } 314 }
315 memcpy(c, &client_template, sizeof *c); 315 memcpy(c, &client_template, sizeof *c);
316 c->adapter = adap; 316 c->adapter = adap;
317 strcpy(i2c_clientname(c), "OV????"); 317 strcpy(c->name, "OV????");
318 318
319 ov = kmalloc(sizeof *ov, GFP_KERNEL); 319 ov = kmalloc(sizeof *ov, GFP_KERNEL);
320 if (!ov) { 320 if (!ov) {
@@ -328,7 +328,7 @@ static int ovcamchip_attach(struct i2c_adapter *adap)
328 if (rc < 0) 328 if (rc < 0)
329 goto error; 329 goto error;
330 330
331 strcpy(i2c_clientname(c), chip_names[ov->subtype]); 331 strcpy(c->name, chip_names[ov->subtype]);
332 332
333 PDEBUG(1, "Camera chip detection complete"); 333 PDEBUG(1, "Camera chip detection complete");
334 334
@@ -421,7 +421,7 @@ static struct i2c_driver driver = {
421}; 421};
422 422
423static struct i2c_client client_template = { 423static struct i2c_client client_template = {
424 I2C_DEVNAME("(unset)"), 424 .name = "(unset)",
425 .driver = &driver, 425 .driver = &driver,
426}; 426};
427 427
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 22d055d8a695..e116bdbed310 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -470,7 +470,6 @@ static struct i2c_client_address_data addr_data = {
470 .normal_i2c = normal_i2c, 470 .normal_i2c = normal_i2c,
471 .probe = &ignore, 471 .probe = &ignore,
472 .ignore = &ignore, 472 .ignore = &ignore,
473 .force = &ignore,
474}; 473};
475 474
476static struct i2c_driver i2c_driver_saa7110; 475static struct i2c_driver i2c_driver_saa7110;
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index fcd897382fcf..fe8a5e453969 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -42,7 +42,6 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/page.h> 43#include <asm/page.h>
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <asm/segment.h>
46#include <linux/types.h> 45#include <linux/types.h>
47 46
48#include <linux/videodev.h> 47#include <linux/videodev.h>
@@ -489,7 +488,6 @@ static struct i2c_client_address_data addr_data = {
489 .normal_i2c = normal_i2c, 488 .normal_i2c = normal_i2c,
490 .probe = &ignore, 489 .probe = &ignore,
491 .ignore = &ignore, 490 .ignore = &ignore,
492 .force = &ignore,
493}; 491};
494 492
495static struct i2c_driver i2c_driver_saa7111; 493static struct i2c_driver i2c_driver_saa7111;
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index 2ba997f5ef1d..d9f50e2f7b92 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -45,7 +45,6 @@
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <linux/sched.h> 47#include <linux/sched.h>
48#include <asm/segment.h>
49#include <linux/types.h> 48#include <linux/types.h>
50 49
51#include <linux/videodev.h> 50#include <linux/videodev.h>
@@ -827,7 +826,6 @@ static struct i2c_client_address_data addr_data = {
827 .normal_i2c = normal_i2c, 826 .normal_i2c = normal_i2c,
828 .probe = &ignore, 827 .probe = &ignore,
829 .ignore = &ignore, 828 .ignore = &ignore,
830 .force = &ignore,
831}; 829};
832 830
833static struct i2c_driver i2c_driver_saa7114; 831static struct i2c_driver i2c_driver_saa7114;
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index 79d05ea1b69b..382911c6ef22 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -598,7 +598,7 @@ static struct i2c_driver driver = {
598 598
599static struct i2c_client client_template = 599static struct i2c_client client_template =
600{ 600{
601 I2C_DEVNAME("saa6752hs"), 601 .name = "saa6752hs",
602 .flags = I2C_CLIENT_ALLOW_USE, 602 .flags = I2C_CLIENT_ALLOW_USE,
603 .driver = &driver, 603 .driver = &driver,
604}; 604};
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index 1203b93a572c..eae6b529713f 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -334,7 +334,7 @@ static int attach_inform(struct i2c_client *client)
334 struct tuner_setup tun_setup; 334 struct tuner_setup tun_setup;
335 335
336 d1printk( "%s i2c attach [addr=0x%x,client=%s]\n", 336 d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
337 client->driver->name,client->addr,i2c_clientname(client)); 337 client->driver->name, client->addr, client->name);
338 338
339 if (!client->driver->command) 339 if (!client->driver->command)
340 return 0; 340 return 0;
@@ -370,8 +370,6 @@ static int attach_inform(struct i2c_client *client)
370} 370}
371 371
372static struct i2c_algorithm saa7134_algo = { 372static struct i2c_algorithm saa7134_algo = {
373 .name = "saa7134",
374 .id = I2C_ALGO_SAA7134,
375 .master_xfer = saa7134_i2c_xfer, 373 .master_xfer = saa7134_i2c_xfer,
376 .algo_control = algo_control, 374 .algo_control = algo_control,
377 .functionality = functionality, 375 .functionality = functionality,
@@ -382,14 +380,14 @@ static struct i2c_adapter saa7134_adap_template = {
382#ifdef I2C_CLASS_TV_ANALOG 380#ifdef I2C_CLASS_TV_ANALOG
383 .class = I2C_CLASS_TV_ANALOG, 381 .class = I2C_CLASS_TV_ANALOG,
384#endif 382#endif
385 I2C_DEVNAME("saa7134"), 383 .name = "saa7134",
386 .id = I2C_ALGO_SAA7134, 384 .id = I2C_HW_SAA7134,
387 .algo = &saa7134_algo, 385 .algo = &saa7134_algo,
388 .client_register = attach_inform, 386 .client_register = attach_inform,
389}; 387};
390 388
391static struct i2c_client saa7134_client_template = { 389static struct i2c_client saa7134_client_template = {
392 I2C_DEVNAME("saa7134 internal"), 390 .name = "saa7134 internal",
393}; 391};
394 392
395/* ----------------------------------------------------------- */ 393/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 108e7a4a0273..132aa7943c16 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <asm/segment.h>
43#include <linux/types.h> 42#include <linux/types.h>
44 43
45#include <linux/videodev.h> 44#include <linux/videodev.h>
@@ -387,7 +386,6 @@ static struct i2c_client_address_data addr_data = {
387 .normal_i2c = normal_i2c, 386 .normal_i2c = normal_i2c,
388 .probe = &ignore, 387 .probe = &ignore,
389 .ignore = &ignore, 388 .ignore = &ignore,
390 .force = &ignore,
391}; 389};
392 390
393static struct i2c_driver i2c_driver_saa7185; 391static struct i2c_driver i2c_driver_saa7185;
diff --git a/drivers/media/video/saa7191.c b/drivers/media/video/saa7191.c
new file mode 100644
index 000000000000..454f5c1199b4
--- /dev/null
+++ b/drivers/media/video/saa7191.c
@@ -0,0 +1,512 @@
1/*
2 * saa7191.c - Philips SAA7191 video decoder driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/errno.h>
16#include <linux/fs.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sched.h>
22
23#include <linux/videodev.h>
24#include <linux/video_decoder.h>
25#include <linux/i2c.h>
26
27#include "saa7191.h"
28
29#define SAA7191_MODULE_VERSION "0.0.3"
30
31MODULE_DESCRIPTION("Philips SAA7191 video decoder driver");
32MODULE_VERSION(SAA7191_MODULE_VERSION);
33MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
34MODULE_LICENSE("GPL");
35
36#define VINO_ADAPTER (I2C_ALGO_SGI | I2C_HW_SGI_VINO)
37
38struct saa7191 {
39 struct i2c_client *client;
40
41 /* the register values are stored here as the actual
42 * I2C-registers are write-only */
43 unsigned char reg[25];
44
45 unsigned char norm;
46 unsigned char input;
47};
48
49static struct i2c_driver i2c_driver_saa7191;
50
51static const unsigned char initseq[] = {
52 0, /* Subaddress */
53 0x50, /* SAA7191_REG_IDEL */
54 0x30, /* SAA7191_REG_HSYB */
55 0x00, /* SAA7191_REG_HSYS */
56 0xe8, /* SAA7191_REG_HCLB */
57 0xb6, /* SAA7191_REG_HCLS */
58 0xf4, /* SAA7191_REG_HPHI */
59 0x01, /* SAA7191_REG_LUMA - chrominance trap active (CVBS) */
60 0x00, /* SAA7191_REG_HUEC */
61 0xf8, /* SAA7191_REG_CKTQ */
62 0xf8, /* SAA7191_REG_CKTS */
63 0x90, /* SAA7191_REG_PLSE */
64 0x90, /* SAA7191_REG_SESE */
65 0x00, /* SAA7191_REG_GAIN */
66 0x0c, /* SAA7191_REG_STDC - not SECAM, slow time constant */
67 0x78, /* SAA7191_REG_IOCK - chrominance from CVBS, GPSW1 & 2 off */
68 0x99, /* SAA7191_REG_CTL3 - automatic field detection */
69 0x00, /* SAA7191_REG_CTL4 */
70 0x2c, /* SAA7191_REG_CHCV */
71 0x00, /* unused */
72 0x00, /* unused */
73 0x34, /* SAA7191_REG_HS6B */
74 0x0a, /* SAA7191_REG_HS6S */
75 0xf4, /* SAA7191_REG_HC6B */
76 0xce, /* SAA7191_REG_HC6S */
77 0xf4, /* SAA7191_REG_HP6I */
78};
79
80/* SAA7191 register handling */
81
82static unsigned char saa7191_read_reg(struct i2c_client *client,
83 unsigned char reg)
84{
85 return ((struct saa7191 *)i2c_get_clientdata(client))->reg[reg];
86}
87
88static int saa7191_read_status(struct i2c_client *client,
89 unsigned char *value)
90{
91 int ret;
92
93 ret = i2c_master_recv(client, value, 1);
94 if (ret < 0) {
95 printk(KERN_ERR "SAA7191: saa7191_read_status(): read failed");
96 return ret;
97 }
98
99 return 0;
100}
101
102
103static int saa7191_write_reg(struct i2c_client *client, unsigned char reg,
104 unsigned char value)
105{
106
107 ((struct saa7191 *)i2c_get_clientdata(client))->reg[reg] = value;
108 return i2c_smbus_write_byte_data(client, reg, value);
109}
110
111/* the first byte of data must be the first subaddress number (register) */
112static int saa7191_write_block(struct i2c_client *client,
113 unsigned char length, unsigned char *data)
114{
115 int i;
116 int ret;
117
118 struct saa7191 *decoder = (struct saa7191 *)i2c_get_clientdata(client);
119 for (i = 0; i < (length - 1); i++) {
120 decoder->reg[data[0] + i] = data[i + 1];
121 }
122
123 ret = i2c_master_send(client, data, length);
124 if (ret < 0) {
125 printk(KERN_ERR "SAA7191: saa7191_write_block(): "
126 "write failed");
127 return ret;
128 }
129
130 return 0;
131}
132
133/* Helper functions */
134
135static int saa7191_set_input(struct i2c_client *client, int input)
136{
137 unsigned char luma = saa7191_read_reg(client, SAA7191_REG_LUMA);
138 unsigned char iock = saa7191_read_reg(client, SAA7191_REG_IOCK);
139 int err;
140
141 switch (input) {
142 case SAA7191_INPUT_COMPOSITE: /* Set Composite input */
143 iock &= ~(SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW1
144 | SAA7191_IOCK_GPSW2);
145 /* Chrominance trap active */
146 luma &= ~SAA7191_LUMA_BYPS;
147 break;
148 case SAA7191_INPUT_SVIDEO: /* Set S-Video input */
149 iock |= SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW2;
150 /* Chrominance trap bypassed */
151 luma |= SAA7191_LUMA_BYPS;
152 break;
153 default:
154 return -EINVAL;
155 }
156
157 err = saa7191_write_reg(client, SAA7191_REG_LUMA, luma);
158 if (err)
159 return -EIO;
160 err = saa7191_write_reg(client, SAA7191_REG_IOCK, iock);
161 if (err)
162 return -EIO;
163
164 return 0;
165}
166
167static int saa7191_set_norm(struct i2c_client *client, int norm)
168{
169 struct saa7191 *decoder = i2c_get_clientdata(client);
170 unsigned char stdc = saa7191_read_reg(client, SAA7191_REG_STDC);
171 unsigned char ctl3 = saa7191_read_reg(client, SAA7191_REG_CTL3);
172 unsigned char chcv = saa7191_read_reg(client, SAA7191_REG_CHCV);
173 int err;
174
175 switch(norm) {
176 case SAA7191_NORM_AUTO: {
177 unsigned char status;
178
179 // does status depend on current norm ?
180 if (saa7191_read_status(client, &status))
181 return -EIO;
182
183 stdc &= ~SAA7191_STDC_SECS;
184 ctl3 &= ~SAA7191_CTL3_FSEL;
185 ctl3 |= SAA7191_CTL3_AUFD;
186 chcv = (status & SAA7191_STATUS_FIDT)
187 ? SAA7191_CHCV_NTSC : SAA7191_CHCV_PAL;
188 break;
189 }
190 case SAA7191_NORM_PAL:
191 stdc &= ~SAA7191_STDC_SECS;
192 ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
193 chcv = SAA7191_CHCV_PAL;
194 break;
195 case SAA7191_NORM_NTSC:
196 stdc &= ~SAA7191_STDC_SECS;
197 ctl3 &= ~SAA7191_CTL3_AUFD;
198 ctl3 |= SAA7191_CTL3_FSEL;
199 chcv = SAA7191_CHCV_NTSC;
200 break;
201 case SAA7191_NORM_SECAM:
202 stdc |= SAA7191_STDC_SECS;
203 ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
204 chcv = SAA7191_CHCV_PAL;
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 err = saa7191_write_reg(client, SAA7191_REG_CTL3, ctl3);
211 if (err)
212 return -EIO;
213 err = saa7191_write_reg(client, SAA7191_REG_STDC, stdc);
214 if (err)
215 return -EIO;
216 err = saa7191_write_reg(client, SAA7191_REG_CHCV, chcv);
217 if (err)
218 return -EIO;
219
220 decoder->norm = norm;
221
222 return 0;
223}
224
225static int saa7191_get_controls(struct i2c_client *client,
226 struct saa7191_control *ctrl)
227{
228 unsigned char hue = saa7191_read_reg(client, SAA7191_REG_HUEC);
229 unsigned char stdc = saa7191_read_reg(client, SAA7191_REG_STDC);
230
231 if (hue < 0x80) {
232 hue += 0x80;
233 } else {
234 hue -= 0x80;
235 }
236 ctrl->hue = hue;
237
238 ctrl->vtrc = (stdc & SAA7191_STDC_VTRC)
239 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
240
241 return 0;
242}
243
244static int saa7191_set_controls(struct i2c_client *client,
245 struct saa7191_control *ctrl)
246{
247 int err;
248
249 if (ctrl->hue >= 0) {
250 unsigned char hue = ctrl->hue & 0xff;
251 if (hue < 0x80) {
252 hue += 0x80;
253 } else {
254 hue -= 0x80;
255 }
256 err = saa7191_write_reg(client, SAA7191_REG_HUEC, hue);
257 if (err)
258 return -EIO;
259 }
260 if (ctrl->vtrc >= 0) {
261 unsigned char stdc =
262 saa7191_read_reg(client, SAA7191_REG_STDC);
263
264 if (ctrl->vtrc) {
265 stdc |= SAA7191_STDC_VTRC;
266 } else {
267 stdc &= ~SAA7191_STDC_VTRC;
268 }
269
270 err = saa7191_write_reg(client, SAA7191_REG_STDC, stdc);
271 if (err)
272 return -EIO;
273 }
274
275 return 0;
276}
277
278/* I2C-interface */
279
280static int saa7191_attach(struct i2c_adapter *adap, int addr, int kind)
281{
282 int err = 0;
283 struct saa7191 *decoder;
284 struct i2c_client *client;
285
286 printk(KERN_INFO "Philips SAA7191 driver version %s\n",
287 SAA7191_MODULE_VERSION);
288
289 client = kmalloc(sizeof(*client), GFP_KERNEL);
290 if (!client)
291 return -ENOMEM;
292 decoder = kmalloc(sizeof(*decoder), GFP_KERNEL);
293 if (!decoder) {
294 err = -ENOMEM;
295 goto out_free_client;
296 }
297
298 memset(client, 0, sizeof(struct i2c_client));
299 memset(decoder, 0, sizeof(struct saa7191));
300
301 client->addr = addr;
302 client->adapter = adap;
303 client->driver = &i2c_driver_saa7191;
304 client->flags = 0;
305 strcpy(client->name, "saa7191 client");
306 i2c_set_clientdata(client, decoder);
307
308 decoder->client = client;
309
310 err = i2c_attach_client(client);
311 if (err)
312 goto out_free_decoder;
313
314 decoder->input = SAA7191_INPUT_COMPOSITE;
315 decoder->norm = SAA7191_NORM_AUTO;
316
317 err = saa7191_write_block(client, sizeof(initseq),
318 (unsigned char *)initseq);
319 if (err) {
320 printk(KERN_ERR "SAA7191 initialization failed\n");
321 goto out_detach_client;
322 }
323
324 printk(KERN_INFO "SAA7191 initialized\n");
325
326 return 0;
327
328out_detach_client:
329 i2c_detach_client(client);
330out_free_decoder:
331 kfree(decoder);
332out_free_client:
333 kfree(client);
334 return err;
335}
336
337static int saa7191_probe(struct i2c_adapter *adap)
338{
339 /* Always connected to VINO */
340 if (adap->id == VINO_ADAPTER)
341 return saa7191_attach(adap, SAA7191_ADDR, 0);
342 /* Feel free to add probe here :-) */
343 return -ENODEV;
344}
345
346static int saa7191_detach(struct i2c_client *client)
347{
348 struct saa7191 *decoder = i2c_get_clientdata(client);
349
350 i2c_detach_client(client);
351 kfree(decoder);
352 kfree(client);
353 return 0;
354}
355
356static int saa7191_command(struct i2c_client *client, unsigned int cmd,
357 void *arg)
358{
359 struct saa7191 *decoder = i2c_get_clientdata(client);
360
361 switch (cmd) {
362 case DECODER_GET_CAPABILITIES: {
363 struct video_decoder_capability *cap = arg;
364
365 cap->flags = VIDEO_DECODER_PAL | VIDEO_DECODER_NTSC |
366 VIDEO_DECODER_SECAM | VIDEO_DECODER_AUTO;
367 cap->inputs = (client->adapter->id == VINO_ADAPTER) ? 2 : 1;
368 cap->outputs = 1;
369 break;
370 }
371 case DECODER_GET_STATUS: {
372 int *iarg = arg;
373 unsigned char status;
374 int res = 0;
375
376 if (saa7191_read_status(client, &status)) {
377 return -EIO;
378 }
379 if ((status & SAA7191_STATUS_HLCK) == 0)
380 res |= DECODER_STATUS_GOOD;
381 if (status & SAA7191_STATUS_CODE)
382 res |= DECODER_STATUS_COLOR;
383 switch (decoder->norm) {
384 case SAA7191_NORM_NTSC:
385 res |= DECODER_STATUS_NTSC;
386 break;
387 case SAA7191_NORM_PAL:
388 res |= DECODER_STATUS_PAL;
389 break;
390 case SAA7191_NORM_SECAM:
391 res |= DECODER_STATUS_SECAM;
392 break;
393 case SAA7191_NORM_AUTO:
394 default:
395 if (status & SAA7191_STATUS_FIDT)
396 res |= DECODER_STATUS_NTSC;
397 else
398 res |= DECODER_STATUS_PAL;
399 break;
400 }
401 *iarg = res;
402 break;
403 }
404 case DECODER_SET_NORM: {
405 int *iarg = arg;
406
407 switch (*iarg) {
408 case VIDEO_MODE_AUTO:
409 return saa7191_set_norm(client, SAA7191_NORM_AUTO);
410 case VIDEO_MODE_PAL:
411 return saa7191_set_norm(client, SAA7191_NORM_PAL);
412 case VIDEO_MODE_NTSC:
413 return saa7191_set_norm(client, SAA7191_NORM_NTSC);
414 case VIDEO_MODE_SECAM:
415 return saa7191_set_norm(client, SAA7191_NORM_SECAM);
416 default:
417 return -EINVAL;
418 }
419 break;
420 }
421 case DECODER_SET_INPUT: {
422 int *iarg = arg;
423
424 switch (client->adapter->id) {
425 case VINO_ADAPTER:
426 return saa7191_set_input(client, *iarg);
427 default:
428 if (*iarg != 0)
429 return -EINVAL;
430 }
431 break;
432 }
433 case DECODER_SET_OUTPUT: {
434 int *iarg = arg;
435
436 /* not much choice of outputs */
437 if (*iarg != 0)
438 return -EINVAL;
439 break;
440 }
441 case DECODER_ENABLE_OUTPUT: {
442 /* Always enabled */
443 break;
444 }
445 case DECODER_SET_PICTURE: {
446 struct video_picture *pic = arg;
447 unsigned val;
448 int err;
449
450 val = (pic->hue >> 8) - 0x80;
451 err = saa7191_write_reg(client, SAA7191_REG_HUEC, val);
452 if (err)
453 return -EIO;
454 break;
455 }
456 case DECODER_SAA7191_GET_STATUS: {
457 struct saa7191_status *status = arg;
458 unsigned char status_reg;
459
460 if (saa7191_read_status(client, &status_reg))
461 return -EIO;
462 status->signal = ((status_reg & SAA7191_STATUS_HLCK) == 0)
463 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
464 status->ntsc = (status_reg & SAA7191_STATUS_FIDT)
465 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
466 status->color = (status_reg & SAA7191_STATUS_CODE)
467 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
468
469 status->input = decoder->input;
470 status->norm = decoder->norm;
471 }
472 case DECODER_SAA7191_SET_NORM: {
473 int *norm = arg;
474 return saa7191_set_norm(client, *norm);
475 }
476 case DECODER_SAA7191_GET_CONTROLS: {
477 struct saa7191_control *ctrl = arg;
478 return saa7191_get_controls(client, ctrl);
479 }
480 case DECODER_SAA7191_SET_CONTROLS: {
481 struct saa7191_control *ctrl = arg;
482 return saa7191_set_controls(client, ctrl);
483 }
484 default:
485 return -EINVAL;
486 }
487
488 return 0;
489}
490
491static struct i2c_driver i2c_driver_saa7191 = {
492 .owner = THIS_MODULE,
493 .name = "saa7191",
494 .id = I2C_DRIVERID_SAA7191,
495 .flags = I2C_DF_NOTIFY,
496 .attach_adapter = saa7191_probe,
497 .detach_client = saa7191_detach,
498 .command = saa7191_command
499};
500
501static int saa7191_init(void)
502{
503 return i2c_add_driver(&i2c_driver_saa7191);
504}
505
506static void saa7191_exit(void)
507{
508 i2c_del_driver(&i2c_driver_saa7191);
509}
510
511module_init(saa7191_init);
512module_exit(saa7191_exit);
diff --git a/drivers/media/video/saa7191.h b/drivers/media/video/saa7191.h
new file mode 100644
index 000000000000..272045031435
--- /dev/null
+++ b/drivers/media/video/saa7191.h
@@ -0,0 +1,139 @@
1/*
2 * saa7191.h - Philips SAA7191 video decoder driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _SAA7191_H_
13#define _SAA7191_H_
14
15/* Philips SAA7191 DMSD I2C bus address */
16#define SAA7191_ADDR 0x8a
17
18/* Register subaddresses. */
19#define SAA7191_REG_IDEL 0x00
20#define SAA7191_REG_HSYB 0x01
21#define SAA7191_REG_HSYS 0x02
22#define SAA7191_REG_HCLB 0x03
23#define SAA7191_REG_HCLS 0x04
24#define SAA7191_REG_HPHI 0x05
25#define SAA7191_REG_LUMA 0x06
26#define SAA7191_REG_HUEC 0x07
27#define SAA7191_REG_CKTQ 0x08
28#define SAA7191_REG_CKTS 0x09
29#define SAA7191_REG_PLSE 0x0a
30#define SAA7191_REG_SESE 0x0b
31#define SAA7191_REG_GAIN 0x0c
32#define SAA7191_REG_STDC 0x0d
33#define SAA7191_REG_IOCK 0x0e
34#define SAA7191_REG_CTL3 0x0f
35#define SAA7191_REG_CTL4 0x10
36#define SAA7191_REG_CHCV 0x11
37#define SAA7191_REG_HS6B 0x14
38#define SAA7191_REG_HS6S 0x15
39#define SAA7191_REG_HC6B 0x16
40#define SAA7191_REG_HC6S 0x17
41#define SAA7191_REG_HP6I 0x18
42#define SAA7191_REG_STATUS 0xff /* not really a subaddress */
43
44/* Status Register definitions */
45#define SAA7191_STATUS_CODE 0x01 /* color detected flag */
46#define SAA7191_STATUS_FIDT 0x20 /* format type NTSC/PAL */
47#define SAA7191_STATUS_HLCK 0x40 /* PLL unlocked/locked */
48#define SAA7191_STATUS_STTC 0x80 /* tv/vtr time constant */
49
50/* Luminance Control Register definitions */
51#define SAA7191_LUMA_BYPS 0x80
52
53/* Chroma Gain Control Settings Register definitions */
54/* 0=automatic colour-killer enabled, 1=forced colour on */
55#define SAA7191_GAIN_COLO 0x80
56
57/* Standard/Mode Control Register definitions */
58/* tv/vtr mode bit: 0=TV mode (slow time constant),
59 * 1=VTR mode (fast time constant) */
60#define SAA7191_STDC_VTRC 0x80
61/* SECAM mode bit: 0=other standards, 1=SECAM */
62#define SAA7191_STDC_SECS 0x01
63/* the bit fields above must be or'd with this value */
64#define SAA7191_STDC_VALUE 0x0c
65
66/* I/O and Clock Control Register definitions */
67/* horizontal clock PLL: 0=PLL closed,
68 * 1=PLL circuit open and horizontal freq fixed */
69#define SAA7191_IOCK_HPLL 0x80
70/* S-VHS bit (chrominance from CVBS or from chrominance input):
71 * 0=controlled by BYPS-bit, 1=from chrominance input */
72#define SAA7191_IOCK_CHRS 0x04
73/* general purpose switch 2
74 * VINO-specific: 0=used with CVBS, 1=used with S-Video */
75#define SAA7191_IOCK_GPSW2 0x02
76/* general purpose switch 1 */
77/* VINO-specific: 0=always, 1=not used!*/
78#define SAA7191_IOCK_GPSW1 0x01
79
80/* Miscellaneous Control #1 Register definitions */
81/* automatic field detection (50/60Hz standard) */
82#define SAA7191_CTL3_AUFD 0x80
83/* field select: (if AUFD=0)
84 * 0=50Hz (625 lines), 1=60Hz (525 lines) */
85#define SAA7191_CTL3_FSEL 0x40
86/* the bit fields above must be or'd with this value */
87#define SAA7191_CTL3_VALUE 0x19
88
89/* Chrominance Gain Control Register definitions
90 * (nominal value for UV CCIR level) */
91#define SAA7191_CHCV_NTSC 0x2c
92#define SAA7191_CHCV_PAL 0x59
93
94/* Driver interface definitions */
95#define SAA7191_INPUT_COMPOSITE 0
96#define SAA7191_INPUT_SVIDEO 1
97
98#define SAA7191_NORM_AUTO 0
99#define SAA7191_NORM_PAL 1
100#define SAA7191_NORM_NTSC 2
101#define SAA7191_NORM_SECAM 3
102
103#define SAA7191_VALUE_ENABLED 1
104#define SAA7191_VALUE_DISABLED 0
105#define SAA7191_VALUE_UNCHANGED -1
106
107struct saa7191_status {
108 /* 0=no signal, 1=signal active*/
109 int signal;
110 /* 0=50hz (pal) signal, 1=60hz (ntsc) signal */
111 int ntsc;
112 /* 0=no color detected, 1=color detected */
113 int color;
114
115 /* current SAA7191_INPUT_ */
116 int input;
117 /* current SAA7191_NORM_ */
118 int norm;
119};
120
121#define SAA7191_HUE_MIN 0x00
122#define SAA7191_HUE_MAX 0xff
123#define SAA7191_HUE_DEFAULT 0x80
124
125#define SAA7191_VTRC_MIN 0x00
126#define SAA7191_VTRC_MAX 0x01
127#define SAA7191_VTRC_DEFAULT 0x00
128
129struct saa7191_control {
130 int hue;
131 int vtrc;
132};
133
134#define DECODER_SAA7191_GET_STATUS _IOR('d', 195, struct saa7191_status)
135#define DECODER_SAA7191_SET_NORM _IOW('d', 196, int)
136#define DECODER_SAA7191_GET_CONTROLS _IOR('d', 197, struct saa7191_control)
137#define DECODER_SAA7191_SET_CONTROLS _IOW('d', 198, struct saa7191_control)
138
139#endif
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 7cb1fb3e66f9..255b6088ebf9 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -328,7 +328,7 @@ static int tda7432_probe(struct i2c_adapter *adap)
328 if (adap->class & I2C_CLASS_TV_ANALOG) 328 if (adap->class & I2C_CLASS_TV_ANALOG)
329 return i2c_probe(adap, &addr_data, tda7432_attach); 329 return i2c_probe(adap, &addr_data, tda7432_attach);
330#else 330#else
331 if (adap->id == (I2C_ALGO_BIT | I2C_HW_B_BT848)) 331 if (adap->id == I2C_HW_B_BT848)
332 return i2c_probe(adap, &addr_data, tda7432_attach); 332 return i2c_probe(adap, &addr_data, tda7432_attach);
333#endif 333#endif
334 return 0; 334 return 0;
@@ -513,7 +513,7 @@ static struct i2c_driver driver = {
513 513
514static struct i2c_client client_template = 514static struct i2c_client client_template =
515{ 515{
516 I2C_DEVNAME("tda7432"), 516 .name = "tda7432",
517 .driver = &driver, 517 .driver = &driver,
518}; 518};
519 519
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index c29bdfc3244e..1794686612c6 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -205,7 +205,7 @@ static int detect(struct i2c_adapter *adapter, int address, int kind)
205static int attach(struct i2c_adapter *adapter) 205static int attach(struct i2c_adapter *adapter)
206{ 206{
207 /* let's see whether this is a know adapter we can attach to */ 207 /* let's see whether this is a know adapter we can attach to */
208 if (adapter->id != I2C_ALGO_SAA7146) { 208 if (adapter->id != I2C_HW_SAA7146) {
209 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id); 209 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id);
210 return -ENODEV; 210 return -ENODEV;
211 } 211 }
@@ -231,7 +231,7 @@ static struct i2c_driver driver = {
231}; 231};
232 232
233static struct i2c_client client_template = { 233static struct i2c_client client_template = {
234 I2C_DEVNAME("tda9840"), 234 .name = "tda9840",
235 .driver = &driver, 235 .driver = &driver,
236}; 236};
237 237
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index 566e1a5ca135..7e3dcdb262b0 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -262,7 +262,7 @@ static int tda9875_probe(struct i2c_adapter *adap)
262 if (adap->class & I2C_CLASS_TV_ANALOG) 262 if (adap->class & I2C_CLASS_TV_ANALOG)
263 return i2c_probe(adap, &addr_data, tda9875_attach); 263 return i2c_probe(adap, &addr_data, tda9875_attach);
264#else 264#else
265 if (adap->id == (I2C_ALGO_BIT | I2C_HW_B_BT848)) 265 if (adap->id == I2C_HW_B_BT848)
266 return i2c_probe(adap, &addr_data, tda9875_attach); 266 return i2c_probe(adap, &addr_data, tda9875_attach);
267#endif 267#endif
268 return 0; 268 return 0;
@@ -384,7 +384,7 @@ static struct i2c_driver driver = {
384 384
385static struct i2c_client client_template = 385static struct i2c_client client_template =
386{ 386{
387 I2C_DEVNAME("tda9875"), 387 .name = "tda9875",
388 .driver = &driver, 388 .driver = &driver,
389}; 389};
390 390
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index 108c3ad7d622..d60fc562aecd 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -618,9 +618,9 @@ static int tda9887_probe(struct i2c_adapter *adap)
618 return i2c_probe(adap, &addr_data, tda9887_attach); 618 return i2c_probe(adap, &addr_data, tda9887_attach);
619#else 619#else
620 switch (adap->id) { 620 switch (adap->id) {
621 case I2C_ALGO_BIT | I2C_HW_B_BT848: 621 case I2C_HW_B_BT848:
622 case I2C_ALGO_BIT | I2C_HW_B_RIVA: 622 case I2C_HW_B_RIVA:
623 case I2C_ALGO_SAA7134: 623 case I2C_HW_SAA7134:
624 return i2c_probe(adap, &addr_data, tda9887_attach); 624 return i2c_probe(adap, &addr_data, tda9887_attach);
625 break; 625 break;
626 } 626 }
@@ -760,7 +760,7 @@ tda9887_command(struct i2c_client *client, unsigned int cmd, void *arg)
760 return 0; 760 return 0;
761} 761}
762 762
763static int tda9887_suspend(struct device * dev, u32 state, u32 level) 763static int tda9887_suspend(struct device * dev, pm_message_t state, u32 level)
764{ 764{
765 dprintk("tda9887: suspend\n"); 765 dprintk("tda9887: suspend\n");
766 return 0; 766 return 0;
@@ -793,7 +793,7 @@ static struct i2c_driver driver = {
793}; 793};
794static struct i2c_client client_template = 794static struct i2c_client client_template =
795{ 795{
796 I2C_DEVNAME("tda9887"), 796 .name = "tda9887",
797 .flags = I2C_CLIENT_ALLOW_USE, 797 .flags = I2C_CLIENT_ALLOW_USE,
798 .driver = &driver, 798 .driver = &driver,
799}; 799};
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index b44db8a7b94d..ee3688348b66 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -86,7 +86,7 @@ static int detect(struct i2c_adapter *adapter, int address, int kind)
86static int attach(struct i2c_adapter *adapter) 86static int attach(struct i2c_adapter *adapter)
87{ 87{
88 /* let's see whether this is a know adapter we can attach to */ 88 /* let's see whether this is a know adapter we can attach to */
89 if (adapter->id != I2C_ALGO_SAA7146) { 89 if (adapter->id != I2C_HW_SAA7146) {
90 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id); 90 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id);
91 return -ENODEV; 91 return -ENODEV;
92 } 92 }
@@ -200,7 +200,7 @@ static struct i2c_driver driver = {
200}; 200};
201 201
202static struct i2c_client client_template = { 202static struct i2c_client client_template = {
203 I2C_DEVNAME("tea6415c"), 203 .name = "tea6415c",
204 .driver = &driver, 204 .driver = &driver,
205}; 205};
206 206
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index 48d4db7d507b..17975c19da5e 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -135,7 +135,7 @@ static int tea6420_detect(struct i2c_adapter *adapter, int address, int kind)
135static int attach(struct i2c_adapter *adapter) 135static int attach(struct i2c_adapter *adapter)
136{ 136{
137 /* let's see whether this is a know adapter we can attach to */ 137 /* let's see whether this is a know adapter we can attach to */
138 if (adapter->id != I2C_ALGO_SAA7146) { 138 if (adapter->id != I2C_HW_SAA7146) {
139 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id); 139 dprintk("refusing to probe on unknown adapter [name='%s',id=0x%x]\n", adapter->name, adapter->id);
140 return -ENODEV; 140 return -ENODEV;
141 } 141 }
@@ -177,7 +177,7 @@ static struct i2c_driver driver = {
177}; 177};
178 178
179static struct i2c_client client_template = { 179static struct i2c_client client_template = {
180 I2C_DEVNAME("tea6420"), 180 .name = "tea6420",
181 .driver = &driver, 181 .driver = &driver,
182}; 182};
183 183
diff --git a/drivers/media/video/tuner-3036.c b/drivers/media/video/tuner-3036.c
index 7d825e510ffd..79203595b9c1 100644
--- a/drivers/media/video/tuner-3036.c
+++ b/drivers/media/video/tuner-3036.c
@@ -41,7 +41,6 @@ static struct i2c_client_address_data addr_data = {
41 .normal_i2c = normal_i2c, 41 .normal_i2c = normal_i2c,
42 .probe = &ignore, 42 .probe = &ignore,
43 .ignore = &ignore, 43 .ignore = &ignore,
44 .force = &ignore,
45}; 44};
46 45
47/* ---------------------------------------------------------------------- */ 46/* ---------------------------------------------------------------------- */
@@ -166,7 +165,7 @@ static int
166tuner_probe(struct i2c_adapter *adap) 165tuner_probe(struct i2c_adapter *adap)
167{ 166{
168 this_adap = 0; 167 this_adap = 0;
169 if (adap->id == (I2C_ALGO_BIT | I2C_HW_B_LP)) 168 if (adap->id == I2C_HW_B_LP)
170 return i2c_probe(adap, &addr_data, tuner_attach); 169 return i2c_probe(adap, &addr_data, tuner_attach);
171 return 0; 170 return 0;
172} 171}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index f0a579827a24..3b1893c2ae3b 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -672,7 +672,7 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
672 return 0; 672 return 0;
673} 673}
674 674
675static int tuner_suspend(struct device *dev, u32 state, u32 level) 675static int tuner_suspend(struct device *dev, pm_message_t state, u32 level)
676{ 676{
677 struct i2c_client *c = container_of (dev, struct i2c_client, dev); 677 struct i2c_client *c = container_of (dev, struct i2c_client, dev);
678 struct tuner *t = i2c_get_clientdata (c); 678 struct tuner *t = i2c_get_clientdata (c);
@@ -709,7 +709,7 @@ static struct i2c_driver driver = {
709 }, 709 },
710}; 710};
711static struct i2c_client client_template = { 711static struct i2c_client client_template = {
712 I2C_DEVNAME("(tuner unset)"), 712 .name = "(tuner unset)",
713 .flags = I2C_CLIENT_ALLOW_USE, 713 .flags = I2C_CLIENT_ALLOW_USE,
714 .driver = &driver, 714 .driver = &driver,
715}; 715};
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index f42a1efa8fcf..258724b2d6d2 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -162,24 +162,23 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
162 unsigned char buffer[2]; 162 unsigned char buffer[2];
163 163
164 if (-1 == subaddr) { 164 if (-1 == subaddr) {
165 dprintk("%s: chip_write: 0x%x\n", 165 dprintk("%s: chip_write: 0x%x\n", chip->c.name, val);
166 i2c_clientname(&chip->c), val);
167 chip->shadow.bytes[1] = val; 166 chip->shadow.bytes[1] = val;
168 buffer[0] = val; 167 buffer[0] = val;
169 if (1 != i2c_master_send(&chip->c,buffer,1)) { 168 if (1 != i2c_master_send(&chip->c,buffer,1)) {
170 printk(KERN_WARNING "%s: I/O error (write 0x%x)\n", 169 printk(KERN_WARNING "%s: I/O error (write 0x%x)\n",
171 i2c_clientname(&chip->c), val); 170 chip->c.name, val);
172 return -1; 171 return -1;
173 } 172 }
174 } else { 173 } else {
175 dprintk("%s: chip_write: reg%d=0x%x\n", 174 dprintk("%s: chip_write: reg%d=0x%x\n",
176 i2c_clientname(&chip->c), subaddr, val); 175 chip->c.name, subaddr, val);
177 chip->shadow.bytes[subaddr+1] = val; 176 chip->shadow.bytes[subaddr+1] = val;
178 buffer[0] = subaddr; 177 buffer[0] = subaddr;
179 buffer[1] = val; 178 buffer[1] = val;
180 if (2 != i2c_master_send(&chip->c,buffer,2)) { 179 if (2 != i2c_master_send(&chip->c,buffer,2)) {
181 printk(KERN_WARNING "%s: I/O error (write reg%d=0x%x)\n", 180 printk(KERN_WARNING "%s: I/O error (write reg%d=0x%x)\n",
182 i2c_clientname(&chip->c), subaddr, val); 181 chip->c.name, subaddr, val);
183 return -1; 182 return -1;
184 } 183 }
185 } 184 }
@@ -203,11 +202,10 @@ static int chip_read(struct CHIPSTATE *chip)
203 unsigned char buffer; 202 unsigned char buffer;
204 203
205 if (1 != i2c_master_recv(&chip->c,&buffer,1)) { 204 if (1 != i2c_master_recv(&chip->c,&buffer,1)) {
206 printk(KERN_WARNING "%s: I/O error (read)\n", 205 printk(KERN_WARNING "%s: I/O error (read)\n", chip->c.name);
207 i2c_clientname(&chip->c));
208 return -1; 206 return -1;
209 } 207 }
210 dprintk("%s: chip_read: 0x%x\n",i2c_clientname(&chip->c),buffer); 208 dprintk("%s: chip_read: 0x%x\n", chip->c.name, buffer);
211 return buffer; 209 return buffer;
212} 210}
213 211
@@ -222,12 +220,11 @@ static int chip_read2(struct CHIPSTATE *chip, int subaddr)
222 write[0] = subaddr; 220 write[0] = subaddr;
223 221
224 if (2 != i2c_transfer(chip->c.adapter,msgs,2)) { 222 if (2 != i2c_transfer(chip->c.adapter,msgs,2)) {
225 printk(KERN_WARNING "%s: I/O error (read2)\n", 223 printk(KERN_WARNING "%s: I/O error (read2)\n", chip->c.name);
226 i2c_clientname(&chip->c));
227 return -1; 224 return -1;
228 } 225 }
229 dprintk("%s: chip_read2: reg%d=0x%x\n", 226 dprintk("%s: chip_read2: reg%d=0x%x\n",
230 i2c_clientname(&chip->c),subaddr,read[0]); 227 chip->c.name, subaddr, read[0]);
231 return read[0]; 228 return read[0];
232} 229}
233 230
@@ -240,7 +237,7 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
240 237
241 /* update our shadow register set; print bytes if (debug > 0) */ 238 /* update our shadow register set; print bytes if (debug > 0) */
242 dprintk("%s: chip_cmd(%s): reg=%d, data:", 239 dprintk("%s: chip_cmd(%s): reg=%d, data:",
243 i2c_clientname(&chip->c),name,cmd->bytes[0]); 240 chip->c.name, name, cmd->bytes[0]);
244 for (i = 1; i < cmd->count; i++) { 241 for (i = 1; i < cmd->count; i++) {
245 dprintk(" 0x%x",cmd->bytes[i]); 242 dprintk(" 0x%x",cmd->bytes[i]);
246 chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; 243 chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i];
@@ -249,7 +246,7 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
249 246
250 /* send data to the chip */ 247 /* send data to the chip */
251 if (cmd->count != i2c_master_send(&chip->c,cmd->bytes,cmd->count)) { 248 if (cmd->count != i2c_master_send(&chip->c,cmd->bytes,cmd->count)) {
252 printk(KERN_WARNING "%s: I/O error (%s)\n", i2c_clientname(&chip->c), name); 249 printk(KERN_WARNING "%s: I/O error (%s)\n", chip->c.name, name);
253 return -1; 250 return -1;
254 } 251 }
255 return 0; 252 return 0;
@@ -274,9 +271,9 @@ static int chip_thread(void *data)
274 struct CHIPSTATE *chip = data; 271 struct CHIPSTATE *chip = data;
275 struct CHIPDESC *desc = chiplist + chip->type; 272 struct CHIPDESC *desc = chiplist + chip->type;
276 273
277 daemonize("%s",i2c_clientname(&chip->c)); 274 daemonize("%s", chip->c.name);
278 allow_signal(SIGTERM); 275 allow_signal(SIGTERM);
279 dprintk("%s: thread started\n", i2c_clientname(&chip->c)); 276 dprintk("%s: thread started\n", chip->c.name);
280 277
281 for (;;) { 278 for (;;) {
282 add_wait_queue(&chip->wq, &wait); 279 add_wait_queue(&chip->wq, &wait);
@@ -288,7 +285,7 @@ static int chip_thread(void *data)
288 try_to_freeze(); 285 try_to_freeze();
289 if (chip->done || signal_pending(current)) 286 if (chip->done || signal_pending(current))
290 break; 287 break;
291 dprintk("%s: thread wakeup\n", i2c_clientname(&chip->c)); 288 dprintk("%s: thread wakeup\n", chip->c.name);
292 289
293 /* don't do anything for radio or if mode != auto */ 290 /* don't do anything for radio or if mode != auto */
294 if (chip->norm == VIDEO_MODE_RADIO || chip->mode != 0) 291 if (chip->norm == VIDEO_MODE_RADIO || chip->mode != 0)
@@ -301,7 +298,7 @@ static int chip_thread(void *data)
301 mod_timer(&chip->wt, jiffies+2*HZ); 298 mod_timer(&chip->wt, jiffies+2*HZ);
302 } 299 }
303 300
304 dprintk("%s: thread exiting\n", i2c_clientname(&chip->c)); 301 dprintk("%s: thread exiting\n", chip->c.name);
305 complete_and_exit(&chip->texit, 0); 302 complete_and_exit(&chip->texit, 0);
306 return 0; 303 return 0;
307} 304}
@@ -314,7 +311,7 @@ static void generic_checkmode(struct CHIPSTATE *chip)
314 if (mode == chip->prevmode) 311 if (mode == chip->prevmode)
315 return; 312 return;
316 313
317 dprintk("%s: thread checkmode\n", i2c_clientname(&chip->c)); 314 dprintk("%s: thread checkmode\n", chip->c.name);
318 chip->prevmode = mode; 315 chip->prevmode = mode;
319 316
320 if (mode & VIDEO_SOUND_STEREO) 317 if (mode & VIDEO_SOUND_STEREO)
@@ -1098,7 +1095,7 @@ static int tda8425_initialize(struct CHIPSTATE *chip)
1098 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF, 1095 /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF,
1099 /* off */ TDA8425_S1_OFF, /* on */ TDA8425_S1_CH2}; 1096 /* off */ TDA8425_S1_OFF, /* on */ TDA8425_S1_CH2};
1100 1097
1101 if (chip->c.adapter->id == (I2C_ALGO_BIT | I2C_HW_B_RIVA)) { 1098 if (chip->c.adapter->id == I2C_HW_B_RIVA) {
1102 memcpy (desc->inputmap, inputmap, sizeof (inputmap)); 1099 memcpy (desc->inputmap, inputmap, sizeof (inputmap));
1103 } 1100 }
1104 return 0; 1101 return 0;
@@ -1501,7 +1498,7 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
1501 (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : ""); 1498 (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : "");
1502 1499
1503 /* fill required data structures */ 1500 /* fill required data structures */
1504 strcpy(i2c_clientname(&chip->c),desc->name); 1501 strcpy(chip->c.name, desc->name);
1505 chip->type = desc-chiplist; 1502 chip->type = desc-chiplist;
1506 chip->shadow.count = desc->registers+1; 1503 chip->shadow.count = desc->registers+1;
1507 chip->prevmode = -1; 1504 chip->prevmode = -1;
@@ -1538,7 +1535,7 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
1538 chip->tpid = kernel_thread(chip_thread,(void *)chip,0); 1535 chip->tpid = kernel_thread(chip_thread,(void *)chip,0);
1539 if (chip->tpid < 0) 1536 if (chip->tpid < 0)
1540 printk(KERN_WARNING "%s: kernel_thread() failed\n", 1537 printk(KERN_WARNING "%s: kernel_thread() failed\n",
1541 i2c_clientname(&chip->c)); 1538 chip->c.name);
1542 wake_up_interruptible(&chip->wq); 1539 wake_up_interruptible(&chip->wq);
1543 } 1540 }
1544 return 0; 1541 return 0;
@@ -1548,16 +1545,16 @@ static int chip_probe(struct i2c_adapter *adap)
1548{ 1545{
1549 /* don't attach on saa7146 based cards, 1546 /* don't attach on saa7146 based cards,
1550 because dedicated drivers are used */ 1547 because dedicated drivers are used */
1551 if ((adap->id & I2C_ALGO_SAA7146)) 1548 if (adap->id == I2C_HW_SAA7146)
1552 return 0; 1549 return 0;
1553#ifdef I2C_CLASS_TV_ANALOG 1550#ifdef I2C_CLASS_TV_ANALOG
1554 if (adap->class & I2C_CLASS_TV_ANALOG) 1551 if (adap->class & I2C_CLASS_TV_ANALOG)
1555 return i2c_probe(adap, &addr_data, chip_attach); 1552 return i2c_probe(adap, &addr_data, chip_attach);
1556#else 1553#else
1557 switch (adap->id) { 1554 switch (adap->id) {
1558 case I2C_ALGO_BIT | I2C_HW_B_BT848: 1555 case I2C_HW_B_BT848:
1559 case I2C_ALGO_BIT | I2C_HW_B_RIVA: 1556 case I2C_HW_B_RIVA:
1560 case I2C_ALGO_SAA7134: 1557 case I2C_HW_SAA7134:
1561 return i2c_probe(adap, &addr_data, chip_attach); 1558 return i2c_probe(adap, &addr_data, chip_attach);
1562 } 1559 }
1563#endif 1560#endif
@@ -1591,7 +1588,7 @@ static int chip_command(struct i2c_client *client,
1591 struct CHIPSTATE *chip = i2c_get_clientdata(client); 1588 struct CHIPSTATE *chip = i2c_get_clientdata(client);
1592 struct CHIPDESC *desc = chiplist + chip->type; 1589 struct CHIPDESC *desc = chiplist + chip->type;
1593 1590
1594 dprintk("%s: chip_command 0x%x\n",i2c_clientname(&chip->c),cmd); 1591 dprintk("%s: chip_command 0x%x\n", chip->c.name, cmd);
1595 1592
1596 switch (cmd) { 1593 switch (cmd) {
1597 case AUDC_SET_INPUT: 1594 case AUDC_SET_INPUT:
@@ -1702,7 +1699,7 @@ static struct i2c_driver driver = {
1702 1699
1703static struct i2c_client client_template = 1700static struct i2c_client client_template =
1704{ 1701{
1705 I2C_DEVNAME("(unset)"), 1702 .name = "(unset)",
1706 .flags = I2C_CLIENT_ALLOW_USE, 1703 .flags = I2C_CLIENT_ALLOW_USE,
1707 .driver = &driver, 1704 .driver = &driver,
1708}; 1705};
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 127ec38ebd60..3c3356a01cc6 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -534,7 +534,7 @@ static int
534tveeprom_attach_adapter (struct i2c_adapter *adapter) 534tveeprom_attach_adapter (struct i2c_adapter *adapter)
535{ 535{
536 dprintk(1,"%s: id 0x%x\n",__FUNCTION__,adapter->id); 536 dprintk(1,"%s: id 0x%x\n",__FUNCTION__,adapter->id);
537 if (adapter->id != (I2C_ALGO_BIT | I2C_HW_B_BT848)) 537 if (adapter->id != I2C_HW_B_BT848)
538 return 0; 538 return 0;
539 return i2c_probe(adapter, &addr_data, tveeprom_detect_client); 539 return i2c_probe(adapter, &addr_data, tveeprom_detect_client);
540} 540}
diff --git a/drivers/media/video/tvmixer.c b/drivers/media/video/tvmixer.c
index 51b99cdbf29e..a43301a154af 100644
--- a/drivers/media/video/tvmixer.c
+++ b/drivers/media/video/tvmixer.c
@@ -91,7 +91,7 @@ static int tvmixer_ioctl(struct inode *inode, struct file *file, unsigned int cm
91 if (cmd == SOUND_MIXER_INFO) { 91 if (cmd == SOUND_MIXER_INFO) {
92 mixer_info info; 92 mixer_info info;
93 strlcpy(info.id, "tv card", sizeof(info.id)); 93 strlcpy(info.id, "tv card", sizeof(info.id));
94 strlcpy(info.name, i2c_clientname(client), sizeof(info.name)); 94 strlcpy(info.name, client->name, sizeof(info.name));
95 info.modify_counter = 42 /* FIXME */; 95 info.modify_counter = 42 /* FIXME */;
96 if (copy_to_user(argp, &info, sizeof(info))) 96 if (copy_to_user(argp, &info, sizeof(info)))
97 return -EFAULT; 97 return -EFAULT;
@@ -100,7 +100,7 @@ static int tvmixer_ioctl(struct inode *inode, struct file *file, unsigned int cm
100 if (cmd == SOUND_OLD_MIXER_INFO) { 100 if (cmd == SOUND_OLD_MIXER_INFO) {
101 _old_mixer_info info; 101 _old_mixer_info info;
102 strlcpy(info.id, "tv card", sizeof(info.id)); 102 strlcpy(info.id, "tv card", sizeof(info.id));
103 strlcpy(info.name, i2c_clientname(client), sizeof(info.name)); 103 strlcpy(info.name, client->name, sizeof(info.name));
104 if (copy_to_user(argp, &info, sizeof(info))) 104 if (copy_to_user(argp, &info, sizeof(info)))
105 return -EFAULT; 105 return -EFAULT;
106 return 0; 106 return 0;
@@ -276,9 +276,9 @@ static int tvmixer_clients(struct i2c_client *client)
276#else 276#else
277 /* TV card ??? */ 277 /* TV card ??? */
278 switch (client->adapter->id) { 278 switch (client->adapter->id) {
279 case I2C_ALGO_BIT | I2C_HW_SMBUS_VOODOO3: 279 case I2C_HW_SMBUS_VOODOO3:
280 case I2C_ALGO_BIT | I2C_HW_B_BT848: 280 case I2C_HW_B_BT848:
281 case I2C_ALGO_BIT | I2C_HW_B_RIVA: 281 case I2C_HW_B_RIVA:
282 /* ok, have a look ... */ 282 /* ok, have a look ... */
283 break; 283 break;
284 default: 284 default:
@@ -295,7 +295,7 @@ static int tvmixer_clients(struct i2c_client *client)
295 devices[i].dev = NULL; 295 devices[i].dev = NULL;
296 devices[i].minor = -1; 296 devices[i].minor = -1;
297 printk("tvmixer: %s unregistered (#1)\n", 297 printk("tvmixer: %s unregistered (#1)\n",
298 i2c_clientname(client)); 298 client->name);
299 return 0; 299 return 0;
300 } 300 }
301 } 301 }
@@ -354,7 +354,7 @@ static void __exit tvmixer_cleanup_module(void)
354 if (devices[i].minor != -1) { 354 if (devices[i].minor != -1) {
355 unregister_sound_mixer(devices[i].minor); 355 unregister_sound_mixer(devices[i].minor);
356 printk("tvmixer: %s unregistered (#2)\n", 356 printk("tvmixer: %s unregistered (#2)\n",
357 i2c_clientname(devices[i].dev)); 357 devices[i].dev->name);
358 } 358 }
359 } 359 }
360} 360}
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 76e8681d65c6..d8a0f763ca10 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -1,80 +1,606 @@
1/* 1/*
2 * (incomplete) Driver for the VINO (Video In No Out) system found in SGI Indys. 2 * Driver for the VINO (Video In No Out) system found in SGI Indys.
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License version 2 as published by the Free Software Foundation. 5 * License version 2 as published by the Free Software Foundation.
6 * 6 *
7 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
8 *
9 * Based on the previous version of the driver for 2.4 kernels by:
7 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> 10 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
8 */ 11 */
9 12
10#include <linux/module.h> 13/*
14 * TODO:
15 * - remove "hacks" from memory allocation code and implement nopage()
16 * - check decimation, calculating and reporting image size when
17 * using decimation
18 * - check vino_acquire_input(), vino_set_input() and channel
19 * ownership handling
20 * - report VINO error-interrupts via ioctls ?
21 * - implement picture controls (all implemented?)
22 * - use macros for boolean values (?)
23 * - implement user mode buffers and overlay (?)
24 */
25
11#include <linux/init.h> 26#include <linux/init.h>
12#include <linux/types.h> 27#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/wrapper.h>
16#include <linux/errno.h>
17#include <linux/irq.h>
18#include <linux/delay.h> 28#include <linux/delay.h>
19#include <linux/videodev.h> 29#include <linux/errno.h>
30#include <linux/fs.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34#include <linux/dma-mapping.h>
35#include <linux/time.h>
36#include <linux/moduleparam.h>
37
38#ifdef CONFIG_KMOD
39#include <linux/kmod.h>
40#endif
41
20#include <linux/i2c.h> 42#include <linux/i2c.h>
21#include <linux/i2c-algo-sgi.h> 43#include <linux/i2c-algo-sgi.h>
22 44
23#include <asm/addrspace.h> 45#include <linux/videodev.h>
24#include <asm/system.h> 46#include <linux/videodev2.h>
25#include <asm/bootinfo.h> 47#include <linux/video_decoder.h>
26#include <asm/pgtable.h> 48
27#include <asm/paccess.h> 49#include <asm/paccess.h>
28#include <asm/io.h> 50#include <asm/io.h>
29#include <asm/sgi/ip22.h> 51#include <asm/sgi/ip22.h>
30#include <asm/sgi/hpc3.h>
31#include <asm/sgi/mc.h> 52#include <asm/sgi/mc.h>
32 53
33#include "vino.h" 54#include "vino.h"
55#include "saa7191.h"
56#include "indycam.h"
57
58/* Uncomment the following line to get lots and lots of (mostly useless)
59 * debug info.
60 * Note that the debug output also slows down the driver significantly */
61// #define VINO_DEBUG
62
63#define VINO_MODULE_VERSION "0.0.3"
64#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 3)
65
66MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
67MODULE_VERSION(VINO_MODULE_VERSION);
68MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
69MODULE_LICENSE("GPL");
34 70
35/* debugging? */ 71#define mem_map_reserve(p) set_bit(PG_reserved, &((p)->flags))
36#if 1 72#define mem_map_unreserve(p) clear_bit(PG_reserved, &((p)->flags))
37#define DEBUG(x...) printk(x); 73
74#ifdef VINO_DEBUG
75#define dprintk(x...) printk("VINO: " x);
38#else 76#else
39#define DEBUG(x...) 77#define dprintk(x...)
40#endif 78#endif
41 79
80#define VINO_NO_CHANNEL 0
81#define VINO_CHANNEL_A 1
82#define VINO_CHANNEL_B 2
83
84#define VINO_PAL_WIDTH 768
85#define VINO_PAL_HEIGHT 576
86#define VINO_NTSC_WIDTH 640
87#define VINO_NTSC_HEIGHT 480
88
89#define VINO_MIN_WIDTH 32
90#define VINO_MIN_HEIGHT 32
91
92#define VINO_CLIPPING_START_ODD_D1 1
93#define VINO_CLIPPING_START_ODD_PAL 1
94#define VINO_CLIPPING_START_ODD_NTSC 1
95
96#define VINO_CLIPPING_START_EVEN_D1 2
97#define VINO_CLIPPING_START_EVEN_PAL 2
98#define VINO_CLIPPING_START_EVEN_NTSC 2
99
100#define VINO_INPUT_CHANNEL_COUNT 3
101
102#define VINO_INPUT_NONE -1
103#define VINO_INPUT_COMPOSITE 0
104#define VINO_INPUT_SVIDEO 1
105#define VINO_INPUT_D1 2
106
107#define VINO_PAGE_RATIO (PAGE_SIZE / VINO_PAGE_SIZE)
108
109#define VINO_FIFO_THRESHOLD_DEFAULT 512
110
111/*#define VINO_FRAMEBUFFER_SIZE (VINO_PAL_WIDTH * VINO_PAL_HEIGHT * 4 \
112 + 2 * PAGE_SIZE)*/
113#define VINO_FRAMEBUFFER_SIZE ((VINO_PAL_WIDTH \
114 * VINO_PAL_HEIGHT * 4 \
115 + 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1))
116
117#define VINO_FRAMEBUFFER_MAX_COUNT 8
118
119#define VINO_FRAMEBUFFER_UNUSED 0
120#define VINO_FRAMEBUFFER_IN_USE 1
121#define VINO_FRAMEBUFFER_READY 2
122
123#define VINO_QUEUE_ERROR -1
124#define VINO_QUEUE_MAGIC 0x20050125
125
126#define VINO_MEMORY_NONE 0
127#define VINO_MEMORY_MMAP 1
128#define VINO_MEMORY_USERPTR 2
129
130#define VINO_DUMMY_DESC_COUNT 4
131#define VINO_DESC_FETCH_DELAY 5 /* microseconds */
132
133/* the number is the index for vino_data_formats */
134#define VINO_DATA_FMT_NONE -1
135#define VINO_DATA_FMT_GREY 0
136#define VINO_DATA_FMT_RGB332 1
137#define VINO_DATA_FMT_RGB32 2
138#define VINO_DATA_FMT_YUV 3
139//#define VINO_DATA_FMT_RGB24 4
140
141#define VINO_DATA_FMT_COUNT 4
142
143#define VINO_DATA_NORM_NONE -1
144#define VINO_DATA_NORM_NTSC 0
145#define VINO_DATA_NORM_PAL 1
146#define VINO_DATA_NORM_SECAM 2
147#define VINO_DATA_NORM_D1 3
148/* The following is a special entry that can be used to
149 * autodetect the norm. */
150#define VINO_DATA_NORM_AUTO 0xff
151
152#define VINO_DATA_NORM_COUNT 4
42 153
43/* VINO ASIC registers */ 154/* Internal data structure definitions */
44struct sgi_vino *vino;
45 155
46static const char *vinostr = "VINO IndyCam/TV"; 156struct vino_input {
47static int threshold_a = 512; 157 char *name;
48static int threshold_b = 512; 158 v4l2_std_id std;
159};
160
161struct vino_clipping {
162 unsigned int left, right, top, bottom;
163};
164
165struct vino_data_format {
166 /* the description */
167 char *description;
168 /* bytes per pixel */
169 unsigned int bpp;
170 /* V4L2 fourcc code */
171 __u32 pixelformat;
172 /* V4L2 colorspace (duh!) */
173 enum v4l2_colorspace colorspace;
174};
175
176struct vino_data_norm {
177 char *description;
178 unsigned int width, height;
179 struct vino_clipping odd;
180 struct vino_clipping even;
181
182 v4l2_std_id std;
183 unsigned int fps_min, fps_max;
184 __u32 framelines;
185};
186
187struct vino_descriptor_table {
188 /* the number of PAGE_SIZE sized pages in the buffer */
189 unsigned int page_count;
190 /* virtual (kmalloc'd) pointers to the actual data
191 * (in PAGE_SIZE chunks, used with mmap streaming) */
192 unsigned long *virtual;
193
194 /* cpu address for the VINO descriptor table
195 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
196 unsigned long *dma_cpu;
197 /* dma address for the VINO descriptor table
198 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
199 dma_addr_t dma;
200};
201
202struct vino_framebuffer {
203 /* identifier nubmer */
204 unsigned int id;
205 /* the length of the whole buffer */
206 unsigned int size;
207 /* the length of actual data in buffer */
208 unsigned int data_size;
209 /* the data format */
210 unsigned int data_format;
211 /* the state of buffer data */
212 unsigned int state;
213 /* is the buffer mapped in user space? */
214 unsigned int map_count;
215 /* memory offset for mmap() */
216 unsigned int offset;
217 /* frame counter */
218 unsigned int frame_counter;
219 /* timestamp (written when image capture finishes) */
220 struct timeval timestamp;
221
222 struct vino_descriptor_table desc_table;
223
224 spinlock_t state_lock;
225};
49 226
50struct vino_device { 227struct vino_framebuffer_fifo {
51 struct video_device vdev; 228 unsigned int length;
52#define VINO_CHAN_A 1 229
53#define VINO_CHAN_B 2 230 unsigned int used;
54 int chan; 231 unsigned int head;
232 unsigned int tail;
233
234 unsigned int data[VINO_FRAMEBUFFER_MAX_COUNT];
235};
236
237struct vino_framebuffer_queue {
238 unsigned int magic;
239
240 /* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */
241 unsigned int type;
242 unsigned int length;
243
244 /* data field of in and out contain index numbers for buffer */
245 struct vino_framebuffer_fifo in;
246 struct vino_framebuffer_fifo out;
247
248 struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_MAX_COUNT];
249
250 spinlock_t queue_lock;
251 struct semaphore queue_sem;
252 wait_queue_head_t frame_wait_queue;
253};
254
255struct vino_channel_settings {
256 unsigned int channel;
257
258 int input;
259 unsigned int data_format;
260 unsigned int data_norm;
261 struct vino_clipping clipping;
262 unsigned int decimation;
263 unsigned int line_size;
264 unsigned int alpha;
265 unsigned int fps;
266 unsigned int framert_reg;
267
268 unsigned int fifo_threshold;
269
270 struct vino_framebuffer_queue fb_queue;
271
272 /* number of the current field */
273 unsigned int field;
274
275 /* read in progress */
276 int reading;
277 /* streaming is active */
278 int streaming;
279 /* the driver is currently processing the queue */
280 int capturing;
281
282 struct semaphore sem;
283 spinlock_t capture_lock;
284
285 unsigned int users;
286
287 /* V4L support */
288 struct video_device *v4l_device;
55}; 289};
56 290
57struct vino_client { 291struct vino_client {
292 /* the channel which owns this client:
293 * VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */
294 unsigned int owner;
58 struct i2c_client *driver; 295 struct i2c_client *driver;
59 int owner;
60}; 296};
61 297
62struct vino_video { 298struct vino_settings {
63 struct vino_device chA; 299 struct vino_channel_settings a;
64 struct vino_device chB; 300 struct vino_channel_settings b;
65 301
66 struct vino_client decoder; 302 struct vino_client decoder;
67 struct vino_client camera; 303 struct vino_client camera;
68 304
69 struct semaphore input_lock; 305 /* a lock for vino register access */
306 spinlock_t vino_lock;
307 /* a lock for channel input changes */
308 spinlock_t input_lock;
70 309
71 /* Loaded into VINO descriptors to clear End Of Descriptors table
72 * interupt condition */
73 unsigned long dummy_page; 310 unsigned long dummy_page;
74 unsigned int dummy_buf[4] __attribute__((aligned(8))); 311 struct vino_descriptor_table dummy_desc_table;
75}; 312};
76 313
77static struct vino_video *Vino; 314/* Module parameters */
315
316/*
317 * Using vino_pixel_conversion the ARGB32-format pixels supplied
318 * by the VINO chip can be converted to more common formats
319 * like RGBA32 (or probably RGB24 in the future). This way we
320 * can give out data that can be specified correctly with
321 * the V4L2-definitions.
322 *
323 * The pixel format is specified as RGBA32 when no conversion
324 * is used.
325 *
326 * Note that this only affects the 32-bit bit depth.
327 *
328 * Use non-zero value to enable conversion.
329 */
330static int vino_pixel_conversion = 0;
331module_param_named(pixelconv, vino_pixel_conversion, int, 0);
332MODULE_PARM_DESC(pixelconv,
333 "enable pixel conversion (non-zero value enables)");
334
335/* Internal data structures */
336
337static struct sgi_vino *vino;
338
339static struct vino_settings *vino_drvdata;
340
341static const char *vino_driver_name = "vino";
342static const char *vino_driver_description = "SGI VINO";
343static const char *vino_bus_name = "GIO64 bus";
344static const char *vino_v4l_device_name_a = "SGI VINO Channel A";
345static const char *vino_v4l_device_name_b = "SGI VINO Channel B";
346
347static const struct vino_input vino_inputs[] = {
348 {
349 .name = "Composite",
350 .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
351 },{
352 .name = "S-Video",
353 .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
354 },{
355 .name = "D1 (IndyCam)",
356 .std = V4L2_STD_NTSC,
357 }
358};
359
360static const struct vino_data_format vino_data_formats[] = {
361 {
362 .description = "8-bit greyscale",
363 .bpp = 1,
364 .pixelformat = V4L2_PIX_FMT_GREY,
365 .colorspace = V4L2_COLORSPACE_SMPTE170M,
366 },{
367 .description = "8-bit dithered RGB 3-3-2",
368 .bpp = 1,
369 .pixelformat = V4L2_PIX_FMT_RGB332,
370 .colorspace = V4L2_COLORSPACE_SRGB,
371 },{
372 .description = "32-bit RGB",
373 .bpp = 4,
374 .pixelformat = V4L2_PIX_FMT_RGB32,
375 .colorspace = V4L2_COLORSPACE_SRGB,
376 },{
377 .description = "YUV 4:2:2",
378 .bpp = 4,
379 .pixelformat = V4L2_PIX_FMT_YUYV, // XXX: swapped?
380 .colorspace = V4L2_COLORSPACE_SMPTE170M,
381 }/*,{
382 .description = "24-bit RGB",
383 .bpp = 3,
384 .pixelformat = V4L2_PIX_FMT_RGB24,
385 .colorspace = V4L2_COLORSPACE_SRGB,
386 }*/
387};
388
389static const struct vino_data_norm vino_data_norms[] = {
390 {
391 .description = "NTSC",
392 .std = V4L2_STD_NTSC,
393 .fps_min = 6,
394 .fps_max = 30,
395 .framelines = 525,
396 .width = VINO_NTSC_WIDTH,
397 .height = VINO_NTSC_HEIGHT,
398 .odd = {
399 .top = VINO_CLIPPING_START_ODD_NTSC,
400 .left = 0,
401 .bottom = VINO_CLIPPING_START_ODD_NTSC
402 + VINO_NTSC_HEIGHT / 2 - 1,
403 .right = VINO_NTSC_WIDTH,
404 },
405 .even = {
406 .top = VINO_CLIPPING_START_EVEN_NTSC,
407 .left = 0,
408 .bottom = VINO_CLIPPING_START_EVEN_NTSC
409 + VINO_NTSC_HEIGHT / 2 - 1,
410 .right = VINO_NTSC_WIDTH,
411 },
412 },{
413 .description = "PAL",
414 .std = V4L2_STD_PAL,
415 .fps_min = 5,
416 .fps_max = 25,
417 .framelines = 625,
418 .width = VINO_PAL_WIDTH,
419 .height = VINO_PAL_HEIGHT,
420 .odd = {
421 .top = VINO_CLIPPING_START_ODD_PAL,
422 .left = 0,
423 .bottom = VINO_CLIPPING_START_ODD_PAL
424 + VINO_PAL_HEIGHT / 2 - 1,
425 .right = VINO_PAL_WIDTH,
426 },
427 .even = {
428 .top = VINO_CLIPPING_START_EVEN_PAL,
429 .left = 0,
430 .bottom = VINO_CLIPPING_START_EVEN_PAL
431 + VINO_PAL_HEIGHT / 2 - 1,
432 .right = VINO_PAL_WIDTH,
433 },
434 },{
435 .description = "SECAM",
436 .std = V4L2_STD_SECAM,
437 .fps_min = 5,
438 .fps_max = 25,
439 .framelines = 625,
440 .width = VINO_PAL_WIDTH,
441 .height = VINO_PAL_HEIGHT,
442 .odd = {
443 .top = VINO_CLIPPING_START_ODD_PAL,
444 .left = 0,
445 .bottom = VINO_CLIPPING_START_ODD_PAL
446 + VINO_PAL_HEIGHT / 2 - 1,
447 .right = VINO_PAL_WIDTH,
448 },
449 .even = {
450 .top = VINO_CLIPPING_START_EVEN_PAL,
451 .left = 0,
452 .bottom = VINO_CLIPPING_START_EVEN_PAL
453 + VINO_PAL_HEIGHT / 2 - 1,
454 .right = VINO_PAL_WIDTH,
455 },
456 },{
457 .description = "NTSC (D1 input)",
458 .std = V4L2_STD_NTSC,
459 .fps_min = 6,
460 .fps_max = 30,
461 .framelines = 525,
462 .width = VINO_NTSC_WIDTH,
463 .height = VINO_NTSC_HEIGHT,
464 .odd = {
465 .top = VINO_CLIPPING_START_ODD_D1,
466 .left = 0,
467 .bottom = VINO_CLIPPING_START_ODD_D1
468 + VINO_NTSC_HEIGHT / 2 - 1,
469 .right = VINO_NTSC_WIDTH,
470 },
471 .even = {
472 .top = VINO_CLIPPING_START_EVEN_D1,
473 .left = 0,
474 .bottom = VINO_CLIPPING_START_EVEN_D1
475 + VINO_NTSC_HEIGHT / 2 - 1,
476 .right = VINO_NTSC_WIDTH,
477 },
478 }
479};
480
481#define VINO_INDYCAM_V4L2_CONTROL_COUNT 9
482
483struct v4l2_queryctrl vino_indycam_v4l2_controls[] = {
484 {
485 .id = V4L2_CID_AUTOGAIN,
486 .type = V4L2_CTRL_TYPE_BOOLEAN,
487 .name = "Automatic Gain Control",
488 .minimum = 0,
489 .maximum = 1,
490 .step = 1,
491 .default_value = INDYCAM_AGC_DEFAULT,
492 .flags = 0,
493 .reserved = { 0, 0 },
494 },{
495 .id = V4L2_CID_AUTO_WHITE_BALANCE,
496 .type = V4L2_CTRL_TYPE_BOOLEAN,
497 .name = "Automatic White Balance",
498 .minimum = 0,
499 .maximum = 1,
500 .step = 1,
501 .default_value = INDYCAM_AWB_DEFAULT,
502 .flags = 0,
503 .reserved = { 0, 0 },
504 },{
505 .id = V4L2_CID_GAIN,
506 .type = V4L2_CTRL_TYPE_INTEGER,
507 .name = "Gain",
508 .minimum = INDYCAM_GAIN_MIN,
509 .maximum = INDYCAM_GAIN_MAX,
510 .step = 1,
511 .default_value = INDYCAM_GAIN_DEFAULT,
512 .flags = 0,
513 .reserved = { 0, 0 },
514 },{
515 .id = V4L2_CID_PRIVATE_BASE,
516 .type = V4L2_CTRL_TYPE_INTEGER,
517 .name = "Red Saturation",
518 .minimum = INDYCAM_RED_SATURATION_MIN,
519 .maximum = INDYCAM_RED_SATURATION_MAX,
520 .step = 1,
521 .default_value = INDYCAM_RED_SATURATION_DEFAULT,
522 .flags = 0,
523 .reserved = { 0, 0 },
524 },{
525 .id = V4L2_CID_PRIVATE_BASE + 1,
526 .type = V4L2_CTRL_TYPE_INTEGER,
527 .name = "Blue Saturation",
528 .minimum = INDYCAM_BLUE_SATURATION_MIN,
529 .maximum = INDYCAM_BLUE_SATURATION_MAX,
530 .step = 1,
531 .default_value = INDYCAM_BLUE_SATURATION_DEFAULT,
532 .flags = 0,
533 .reserved = { 0, 0 },
534 },{
535 .id = V4L2_CID_RED_BALANCE,
536 .type = V4L2_CTRL_TYPE_INTEGER,
537 .name = "Red Balance",
538 .minimum = INDYCAM_RED_BALANCE_MIN,
539 .maximum = INDYCAM_RED_BALANCE_MAX,
540 .step = 1,
541 .default_value = INDYCAM_RED_BALANCE_DEFAULT,
542 .flags = 0,
543 .reserved = { 0, 0 },
544 },{
545 .id = V4L2_CID_BLUE_BALANCE,
546 .type = V4L2_CTRL_TYPE_INTEGER,
547 .name = "Blue Balance",
548 .minimum = INDYCAM_BLUE_BALANCE_MIN,
549 .maximum = INDYCAM_BLUE_BALANCE_MAX,
550 .step = 1,
551 .default_value = INDYCAM_BLUE_BALANCE_DEFAULT,
552 .flags = 0,
553 .reserved = { 0, 0 },
554 },{
555 .id = V4L2_CID_EXPOSURE,
556 .type = V4L2_CTRL_TYPE_INTEGER,
557 .name = "Shutter Control",
558 .minimum = INDYCAM_SHUTTER_MIN,
559 .maximum = INDYCAM_SHUTTER_MAX,
560 .step = 1,
561 .default_value = INDYCAM_SHUTTER_DEFAULT,
562 .flags = 0,
563 .reserved = { 0, 0 },
564 },{
565 .id = V4L2_CID_GAMMA,
566 .type = V4L2_CTRL_TYPE_INTEGER,
567 .name = "Gamma",
568 .minimum = INDYCAM_GAMMA_MIN,
569 .maximum = INDYCAM_GAMMA_MAX,
570 .step = 1,
571 .default_value = INDYCAM_GAMMA_DEFAULT,
572 .flags = 0,
573 .reserved = { 0, 0 },
574 }
575};
576
577#define VINO_SAA7191_V4L2_CONTROL_COUNT 2
578
579struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = {
580 {
581 .id = V4L2_CID_HUE,
582 .type = V4L2_CTRL_TYPE_INTEGER,
583 .name = "Hue",
584 .minimum = SAA7191_HUE_MIN,
585 .maximum = SAA7191_HUE_MAX,
586 .step = 1,
587 .default_value = SAA7191_HUE_DEFAULT,
588 .flags = 0,
589 .reserved = { 0, 0 },
590 },{
591 .id = V4L2_CID_PRIVATE_BASE,
592 .type = V4L2_CTRL_TYPE_BOOLEAN,
593 .name = "VTR Time Constant",
594 .minimum = SAA7191_VTRC_MIN,
595 .maximum = SAA7191_VTRC_MAX,
596 .step = 1,
597 .default_value = SAA7191_VTRC_DEFAULT,
598 .flags = 0,
599 .reserved = { 0, 0 },
600 }
601};
602
603/* VINO I2C bus functions */
78 604
79unsigned i2c_vino_getctrl(void *data) 605unsigned i2c_vino_getctrl(void *data)
80{ 606{
@@ -112,49 +638,49 @@ static struct i2c_algo_sgi_data i2c_sgi_vino_data =
112 */ 638 */
113static int i2c_vino_client_reg(struct i2c_client *client) 639static int i2c_vino_client_reg(struct i2c_client *client)
114{ 640{
115 int res = 0; 641 int ret = 0;
116 642
117 down(&Vino->input_lock); 643 spin_lock(&vino_drvdata->input_lock);
118 switch (client->driver->id) { 644 switch (client->driver->id) {
119 case I2C_DRIVERID_SAA7191: 645 case I2C_DRIVERID_SAA7191:
120 if (Vino->decoder.driver) 646 if (vino_drvdata->decoder.driver)
121 res = -EBUSY; 647 ret = -EBUSY;
122 else 648 else
123 Vino->decoder.driver = client; 649 vino_drvdata->decoder.driver = client;
124 break; 650 break;
125 case I2C_DRIVERID_INDYCAM: 651 case I2C_DRIVERID_INDYCAM:
126 if (Vino->camera.driver) 652 if (vino_drvdata->camera.driver)
127 res = -EBUSY; 653 ret = -EBUSY;
128 else 654 else
129 Vino->camera.driver = client; 655 vino_drvdata->camera.driver = client;
130 break; 656 break;
131 default: 657 default:
132 res = -ENODEV; 658 ret = -ENODEV;
133 } 659 }
134 up(&Vino->input_lock); 660 spin_unlock(&vino_drvdata->input_lock);
135 661
136 return res; 662 return ret;
137} 663}
138 664
139static int i2c_vino_client_unreg(struct i2c_client *client) 665static int i2c_vino_client_unreg(struct i2c_client *client)
140{ 666{
141 int res = 0; 667 int ret = 0;
142 668
143 down(&Vino->input_lock); 669 spin_lock(&vino_drvdata->input_lock);
144 if (client == Vino->decoder.driver) { 670 if (client == vino_drvdata->decoder.driver) {
145 if (Vino->decoder.owner) 671 if (vino_drvdata->decoder.owner != VINO_NO_CHANNEL)
146 res = -EBUSY; 672 ret = -EBUSY;
147 else 673 else
148 Vino->decoder.driver = NULL; 674 vino_drvdata->decoder.driver = NULL;
149 } else if (client == Vino->camera.driver) { 675 } else if (client == vino_drvdata->camera.driver) {
150 if (Vino->camera.owner) 676 if (vino_drvdata->camera.owner != VINO_NO_CHANNEL)
151 res = -EBUSY; 677 ret = -EBUSY;
152 else 678 else
153 Vino->camera.driver = NULL; 679 vino_drvdata->camera.driver = NULL;
154 } 680 }
155 up(&Vino->input_lock); 681 spin_unlock(&vino_drvdata->input_lock);
156 682
157 return res; 683 return ret;
158} 684}
159 685
160static struct i2c_adapter vino_i2c_adapter = 686static struct i2c_adapter vino_i2c_adapter =
@@ -176,172 +702,3591 @@ static int vino_i2c_del_bus(void)
176 return i2c_sgi_del_bus(&vino_i2c_adapter); 702 return i2c_sgi_del_bus(&vino_i2c_adapter);
177} 703}
178 704
705static int i2c_camera_command(unsigned int cmd, void *arg)
706{
707 return vino_drvdata->camera.driver->
708 driver->command(vino_drvdata->camera.driver,
709 cmd, arg);
710}
711
712static int i2c_decoder_command(unsigned int cmd, void *arg)
713{
714 return vino_drvdata->decoder.driver->
715 driver->command(vino_drvdata->decoder.driver,
716 cmd, arg);
717}
718
719/* VINO framebuffer/DMA descriptor management */
720
721static void vino_free_buffer_with_count(struct vino_framebuffer *fb,
722 unsigned int count)
723{
724 unsigned int i;
725
726 dprintk("vino_free_buffer_with_count(): count = %d\n", count);
727
728 for (i = 0; i < count; i++) {
729 mem_map_unreserve(virt_to_page(fb->desc_table.virtual[i]));
730 dma_unmap_single(NULL,
731 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
732 PAGE_SIZE, DMA_FROM_DEVICE);
733 free_page(fb->desc_table.virtual[i]);
734 }
735
736 dma_free_coherent(NULL,
737 VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) *
738 sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu,
739 fb->desc_table.dma);
740 kfree(fb->desc_table.virtual);
741
742 memset(fb, 0, sizeof(struct vino_framebuffer));
743}
744
745static void vino_free_buffer(struct vino_framebuffer *fb)
746{
747 vino_free_buffer_with_count(fb, fb->desc_table.page_count);
748}
749
750static int vino_allocate_buffer(struct vino_framebuffer *fb,
751 unsigned int size)
752{
753 unsigned int count, i, j;
754 int ret = 0;
755
756 dprintk("vino_allocate_buffer():\n");
757
758 if (size < 1)
759 return -EINVAL;
760
761 memset(fb, 0, sizeof(struct vino_framebuffer));
762
763 count = ((size / PAGE_SIZE) + 4) & ~3;
764
765 dprintk("vino_allocate_buffer(): size = %d, count = %d\n",
766 size, count);
767
768 /* allocate memory for table with virtual (page) addresses */
769 fb->desc_table.virtual = (unsigned long *)
770 kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
771 if (!fb->desc_table.virtual)
772 return -ENOMEM;
773
774 /* allocate memory for table with dma addresses
775 * (has space for four extra descriptors) */
776 fb->desc_table.dma_cpu =
777 dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
778 sizeof(dma_addr_t), &fb->desc_table.dma,
779 GFP_KERNEL | GFP_DMA);
780 if (!fb->desc_table.dma_cpu) {
781 ret = -ENOMEM;
782 goto out_free_virtual;
783 }
784
785 /* allocate pages for the buffer and acquire the according
786 * dma addresses */
787 for (i = 0; i < count; i++) {
788 dma_addr_t dma_data_addr;
789
790 fb->desc_table.virtual[i] =
791 get_zeroed_page(GFP_KERNEL | GFP_DMA);
792 if (!fb->desc_table.virtual[i]) {
793 ret = -ENOBUFS;
794 break;
795 }
796
797 dma_data_addr =
798 dma_map_single(NULL,
799 (void *)fb->desc_table.virtual[i],
800 PAGE_SIZE, DMA_FROM_DEVICE);
801
802 for (j = 0; j < VINO_PAGE_RATIO; j++) {
803 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
804 dma_data_addr + VINO_PAGE_SIZE * j;
805 }
806
807 mem_map_reserve(virt_to_page(fb->desc_table.virtual[i]));
808 }
809
810 /* page_count needs to be set anyway, because the descriptor table has
811 * been allocated according to this number */
812 fb->desc_table.page_count = count;
813
814 if (ret) {
815 /* the descriptor with index i doesn't contain
816 * a valid address yet */
817 vino_free_buffer_with_count(fb, i);
818 return ret;
819 }
820
821 //fb->size = size;
822 fb->size = count * PAGE_SIZE;
823 fb->data_format = VINO_DATA_FMT_NONE;
824
825 /* set the dma stop-bit for the last (count+1)th descriptor */
826 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
827 return 0;
828
829 out_free_virtual:
830 kfree(fb->desc_table.virtual);
831 return ret;
832}
833
834#if 0
835/* user buffers not fully implemented yet */
836static int vino_prepare_user_buffer(struct vino_framebuffer *fb,
837 void *user,
838 unsigned int size)
839{
840 unsigned int count, i, j;
841 int ret = 0;
842
843 dprintk("vino_prepare_user_buffer():\n");
844
845 if (size < 1)
846 return -EINVAL;
847
848 memset(fb, 0, sizeof(struct vino_framebuffer));
849
850 count = ((size / PAGE_SIZE)) & ~3;
851
852 dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n",
853 size, count);
854
855 /* allocate memory for table with virtual (page) addresses */
856 fb->desc_table.virtual = (unsigned long *)
857 kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
858 if (!fb->desc_table.virtual)
859 return -ENOMEM;
860
861 /* allocate memory for table with dma addresses
862 * (has space for four extra descriptors) */
863 fb->desc_table.dma_cpu =
864 dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
865 sizeof(dma_addr_t), &fb->desc_table.dma,
866 GFP_KERNEL | GFP_DMA);
867 if (!fb->desc_table.dma_cpu) {
868 ret = -ENOMEM;
869 goto out_free_virtual;
870 }
871
872 /* allocate pages for the buffer and acquire the according
873 * dma addresses */
874 for (i = 0; i < count; i++) {
875 dma_addr_t dma_data_addr;
876
877 fb->desc_table.virtual[i] =
878 get_zeroed_page(GFP_KERNEL | GFP_DMA);
879 if (!fb->desc_table.virtual[i]) {
880 ret = -ENOBUFS;
881 break;
882 }
883
884 dma_data_addr =
885 dma_map_single(NULL,
886 (void *)fb->desc_table.virtual[i],
887 PAGE_SIZE, DMA_FROM_DEVICE);
888
889 for (j = 0; j < VINO_PAGE_RATIO; j++) {
890 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
891 dma_data_addr + VINO_PAGE_SIZE * j;
892 }
893
894 mem_map_reserve(virt_to_page(fb->desc_table.virtual[i]));
895 }
896
897 /* page_count needs to be set anyway, because the descriptor table has
898 * been allocated according to this number */
899 fb->desc_table.page_count = count;
900
901 if (ret) {
902 /* the descriptor with index i doesn't contain
903 * a valid address yet */
904 vino_free_buffer_with_count(fb, i);
905 return ret;
906 }
907
908 //fb->size = size;
909 fb->size = count * PAGE_SIZE;
910
911 /* set the dma stop-bit for the last (count+1)th descriptor */
912 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
913 return 0;
914
915 out_free_virtual:
916 kfree(fb->desc_table.virtual);
917 return ret;
918}
919#endif
920
921static void vino_sync_buffer(struct vino_framebuffer *fb)
922{
923 int i;
924
925 dprintk("vino_sync_buffer():\n");
926
927 for (i = 0; i < fb->desc_table.page_count; i++)
928 dma_sync_single(NULL,
929 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
930 PAGE_SIZE, DMA_FROM_DEVICE);
931}
932
933/* Framebuffer fifo functions (need to be locked externally) */
934
935static void vino_fifo_init(struct vino_framebuffer_fifo *f,
936 unsigned int length)
937{
938 f->length = 0;
939 f->used = 0;
940 f->head = 0;
941 f->tail = 0;
942
943 if (length > VINO_FRAMEBUFFER_MAX_COUNT)
944 length = VINO_FRAMEBUFFER_MAX_COUNT;
945
946 f->length = length;
947}
948
949/* returns true/false */
950static int vino_fifo_has_id(struct vino_framebuffer_fifo *f, unsigned int id)
951{
952 unsigned int i;
953 for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) {
954 if (f->data[i] == id)
955 return 1;
956 }
957
958 return 0;
959}
960
961/* returns true/false */
962static int vino_fifo_full(struct vino_framebuffer_fifo *f)
963{
964 return (f->used == f->length);
965}
966
967static unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f)
968{
969 return f->used;
970}
179 971
180static void vino_interrupt(int irq, void *dev_id, struct pt_regs *regs) 972static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id)
181{ 973{
974 if (id >= f->length) {
975 return VINO_QUEUE_ERROR;
976 }
977
978 if (vino_fifo_has_id(f, id)) {
979 return VINO_QUEUE_ERROR;
980 }
981
982 if (f->used < f->length) {
983 f->data[f->tail] = id;
984 f->tail = (f->tail + 1) % f->length;
985 f->used++;
986 } else {
987 return VINO_QUEUE_ERROR;
988 }
989
990 return 0;
182} 991}
183 992
184static int vino_open(struct video_device *dev, int flags) 993static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id)
185{ 994{
186 struct vino_device *videv = (struct vino_device *)dev; 995 if (f->used > 0) {
996 *id = f->data[f->head];
997 } else {
998 return VINO_QUEUE_ERROR;
999 }
187 1000
188 return 0; 1001 return 0;
189} 1002}
190 1003
191static void vino_close(struct video_device *dev) 1004static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id)
192{ 1005{
193 struct vino_device *videv = (struct vino_device *)dev; 1006 if (f->used > 0) {
1007 *id = f->data[f->head];
1008 f->head = (f->head + 1) % f->length;
1009 f->used--;
1010 } else {
1011 return VINO_QUEUE_ERROR;
1012 }
1013
1014 return 0;
194} 1015}
195 1016
196static int vino_mmap(struct video_device *dev, const char *adr, 1017/* Framebuffer queue functions */
197 unsigned long size) 1018
1019/* execute with queue_lock locked */
1020static void vino_queue_free_with_count(struct vino_framebuffer_queue *q,
1021 unsigned int length)
198{ 1022{
199 struct vino_device *videv = (struct vino_device *)dev; 1023 unsigned int i;
200 1024
201 return -EINVAL; 1025 q->length = 0;
1026 memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo));
1027 memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo));
1028 for (i = 0; i < length; i++) {
1029 dprintk("vino_queue_free_with_count(): freeing buffer %d\n",
1030 i);
1031 vino_free_buffer(q->buffer[i]);
1032 kfree(q->buffer[i]);
1033 }
1034
1035 q->type = VINO_MEMORY_NONE;
1036 q->magic = 0;
202} 1037}
203 1038
204static int vino_ioctl(struct video_device *dev, unsigned int cmd, void *arg) 1039static void vino_queue_free(struct vino_framebuffer_queue *q)
205{ 1040{
206 struct vino_device *videv = (struct vino_device *)dev; 1041 dprintk("vino_queue_free():\n");
1042
1043 if (q->magic != VINO_QUEUE_MAGIC)
1044 return;
1045 if (q->type != VINO_MEMORY_MMAP)
1046 return;
1047
1048 down(&q->queue_sem);
1049
1050 vino_queue_free_with_count(q, q->length);
1051
1052 up(&q->queue_sem);
1053}
1054
1055static int vino_queue_init(struct vino_framebuffer_queue *q,
1056 unsigned int *length)
1057{
1058 unsigned int i;
1059 int ret = 0;
1060
1061 dprintk("vino_queue_init(): length = %d\n", *length);
1062
1063 if (q->magic == VINO_QUEUE_MAGIC) {
1064 dprintk("vino_queue_init(): queue already initialized!\n");
1065 return -EINVAL;
1066 }
1067
1068 if (q->type != VINO_MEMORY_NONE) {
1069 dprintk("vino_queue_init(): queue already initialized!\n");
1070 return -EINVAL;
1071 }
1072
1073 if (*length < 1)
1074 return -EINVAL;
1075
1076 down(&q->queue_sem);
1077
1078 if (*length > VINO_FRAMEBUFFER_MAX_COUNT)
1079 *length = VINO_FRAMEBUFFER_MAX_COUNT;
1080
1081 q->length = 0;
1082
1083 for (i = 0; i < *length; i++) {
1084 dprintk("vino_queue_init(): allocating buffer %d\n", i);
1085 q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer),
1086 GFP_KERNEL);
1087 if (!q->buffer[i]) {
1088 dprintk("vino_queue_init(): kmalloc() failed\n");
1089 ret = -ENOMEM;
1090 break;
1091 }
1092
1093 ret = vino_allocate_buffer(q->buffer[i],
1094 VINO_FRAMEBUFFER_SIZE);
1095 if (ret) {
1096 kfree(q->buffer[i]);
1097 dprintk("vino_queue_init(): "
1098 "vino_allocate_buffer() failed\n");
1099 break;
1100 }
1101
1102 q->buffer[i]->id = i;
1103 if (i > 0) {
1104 q->buffer[i]->offset = q->buffer[i - 1]->offset +
1105 q->buffer[i - 1]->size;
1106 } else {
1107 q->buffer[i]->offset = 0;
1108 }
1109
1110 spin_lock_init(&q->buffer[i]->state_lock);
1111
1112 dprintk("vino_queue_init(): buffer = %d, offset = %d, "
1113 "size = %d\n", i, q->buffer[i]->offset,
1114 q->buffer[i]->size);
1115 }
1116
1117 if (ret) {
1118 vino_queue_free_with_count(q, i);
1119 *length = 0;
1120 } else {
1121 q->length = *length;
1122 vino_fifo_init(&q->in, q->length);
1123 vino_fifo_init(&q->out, q->length);
1124 q->type = VINO_MEMORY_MMAP;
1125 q->magic = VINO_QUEUE_MAGIC;
1126 }
1127
1128 up(&q->queue_sem);
1129
1130 return ret;
1131}
1132
1133static struct vino_framebuffer *vino_queue_add(struct
1134 vino_framebuffer_queue *q,
1135 unsigned int id)
1136{
1137 struct vino_framebuffer *ret = NULL;
1138 unsigned int total;
1139 unsigned long flags;
1140
1141 dprintk("vino_queue_add(): id = %d\n", id);
1142
1143 if (q->magic != VINO_QUEUE_MAGIC) {
1144 return ret;
1145 }
1146
1147 spin_lock_irqsave(&q->queue_lock, flags);
1148
1149 if (q->length == 0)
1150 goto out;
1151
1152 if (id >= q->length)
1153 goto out;
1154
1155 /* not needed?: if (vino_fifo_full(&q->out)) {
1156 goto out;
1157 }*/
1158 /* check that outgoing queue isn't already full
1159 * (or that it won't become full) */
1160 total = vino_fifo_get_used(&q->in) +
1161 vino_fifo_get_used(&q->out);
1162 if (total >= q->length)
1163 goto out;
1164
1165 if (vino_fifo_enqueue(&q->in, id))
1166 goto out;
1167
1168 ret = q->buffer[id];
1169
1170out:
1171 spin_unlock_irqrestore(&q->queue_lock, flags);
1172
1173 return ret;
1174}
1175
1176static struct vino_framebuffer *vino_queue_transfer(struct
1177 vino_framebuffer_queue *q)
1178{
1179 struct vino_framebuffer *ret = NULL;
1180 struct vino_framebuffer *fb;
1181 int id;
1182 unsigned long flags;
1183
1184 dprintk("vino_queue_transfer():\n");
1185
1186 if (q->magic != VINO_QUEUE_MAGIC) {
1187 return ret;
1188 }
1189
1190 spin_lock_irqsave(&q->queue_lock, flags);
1191
1192 if (q->length == 0)
1193 goto out;
1194
1195 // now this actually removes an entry from the incoming queue
1196 if (vino_fifo_dequeue(&q->in, &id)) {
1197 goto out;
1198 }
1199
1200 dprintk("vino_queue_transfer(): id = %d\n", id);
1201 fb = q->buffer[id];
1202
1203 // we have already checked that the outgoing queue is not full, but...
1204 if (vino_fifo_enqueue(&q->out, id)) {
1205 printk(KERN_ERR "vino_queue_transfer(): "
1206 "outgoing queue is full, this shouldn't happen!\n");
1207 goto out;
1208 }
1209
1210 ret = fb;
1211out:
1212 spin_unlock_irqrestore(&q->queue_lock, flags);
1213
1214 return ret;
1215}
1216
1217/* returns true/false */
1218static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q,
1219 unsigned int id)
1220{
1221 int ret = 0;
1222 unsigned long flags;
1223
1224 if (q->magic != VINO_QUEUE_MAGIC) {
1225 return ret;
1226 }
1227
1228 spin_lock_irqsave(&q->queue_lock, flags);
1229
1230 if (q->length == 0)
1231 goto out;
1232
1233 ret = vino_fifo_has_id(&q->in, id);
1234
1235out:
1236 spin_unlock_irqrestore(&q->queue_lock, flags);
1237
1238 return ret;
1239}
1240
1241/* returns true/false */
1242static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q,
1243 unsigned int id)
1244{
1245 int ret = 0;
1246 unsigned long flags;
1247
1248 if (q->magic != VINO_QUEUE_MAGIC) {
1249 return ret;
1250 }
1251
1252 spin_lock_irqsave(&q->queue_lock, flags);
1253
1254 if (q->length == 0)
1255 goto out;
1256
1257 ret = vino_fifo_has_id(&q->out, id);
1258
1259out:
1260 spin_unlock_irqrestore(&q->queue_lock, flags);
1261
1262 return ret;
1263}
1264
1265static int vino_queue_get_incoming(struct vino_framebuffer_queue *q,
1266 unsigned int *used)
1267{
1268 int ret = 0;
1269 unsigned long flags;
1270
1271 if (q->magic != VINO_QUEUE_MAGIC) {
1272 return VINO_QUEUE_ERROR;
1273 }
1274
1275 spin_lock_irqsave(&q->queue_lock, flags);
1276
1277 if (q->length == 0) {
1278 ret = VINO_QUEUE_ERROR;
1279 goto out;
1280 }
1281
1282 *used = vino_fifo_get_used(&q->in);
1283
1284out:
1285 spin_unlock_irqrestore(&q->queue_lock, flags);
1286
1287 return ret;
1288}
1289
1290static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q,
1291 unsigned int *used)
1292{
1293 int ret = 0;
1294 unsigned long flags;
1295
1296 if (q->magic != VINO_QUEUE_MAGIC) {
1297 return VINO_QUEUE_ERROR;
1298 }
1299
1300 spin_lock_irqsave(&q->queue_lock, flags);
1301
1302 if (q->length == 0) {
1303 ret = VINO_QUEUE_ERROR;
1304 goto out;
1305 }
1306
1307 *used = vino_fifo_get_used(&q->out);
1308
1309out:
1310 spin_unlock_irqrestore(&q->queue_lock, flags);
1311
1312 return ret;
1313}
1314
1315static int vino_queue_get_total(struct vino_framebuffer_queue *q,
1316 unsigned int *total)
1317{
1318 int ret = 0;
1319 unsigned long flags;
1320
1321 if (q->magic != VINO_QUEUE_MAGIC) {
1322 return VINO_QUEUE_ERROR;
1323 }
1324
1325 spin_lock_irqsave(&q->queue_lock, flags);
1326
1327 if (q->length == 0) {
1328 ret = VINO_QUEUE_ERROR;
1329 goto out;
1330 }
1331
1332 *total = vino_fifo_get_used(&q->in) +
1333 vino_fifo_get_used(&q->out);
1334
1335out:
1336 spin_unlock_irqrestore(&q->queue_lock, flags);
1337
1338 return ret;
1339}
1340
1341static struct vino_framebuffer *vino_queue_peek(struct
1342 vino_framebuffer_queue *q,
1343 unsigned int *id)
1344{
1345 struct vino_framebuffer *ret = NULL;
1346 unsigned long flags;
1347
1348 if (q->magic != VINO_QUEUE_MAGIC) {
1349 return ret;
1350 }
1351
1352 spin_lock_irqsave(&q->queue_lock, flags);
1353
1354 if (q->length == 0)
1355 goto out;
1356
1357 if (vino_fifo_peek(&q->in, id)) {
1358 goto out;
1359 }
1360
1361 ret = q->buffer[*id];
1362out:
1363 spin_unlock_irqrestore(&q->queue_lock, flags);
1364
1365 return ret;
1366}
1367
1368static struct vino_framebuffer *vino_queue_remove(struct
1369 vino_framebuffer_queue *q,
1370 unsigned int *id)
1371{
1372 struct vino_framebuffer *ret = NULL;
1373 unsigned long flags;
1374 dprintk("vino_queue_remove():\n");
1375
1376 if (q->magic != VINO_QUEUE_MAGIC) {
1377 return ret;
1378 }
1379
1380 spin_lock_irqsave(&q->queue_lock, flags);
1381
1382 if (q->length == 0)
1383 goto out;
1384
1385 if (vino_fifo_dequeue(&q->out, id)) {
1386 goto out;
1387 }
1388
1389 dprintk("vino_queue_remove(): id = %d\n", *id);
1390 ret = q->buffer[*id];
1391out:
1392 spin_unlock_irqrestore(&q->queue_lock, flags);
1393
1394 return ret;
1395}
1396
1397static struct
1398vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q,
1399 unsigned int id)
1400{
1401 struct vino_framebuffer *ret = NULL;
1402 unsigned long flags;
1403
1404 if (q->magic != VINO_QUEUE_MAGIC) {
1405 return ret;
1406 }
1407
1408 spin_lock_irqsave(&q->queue_lock, flags);
1409
1410 if (q->length == 0)
1411 goto out;
1412
1413 if (id >= q->length)
1414 goto out;
1415
1416 ret = q->buffer[id];
1417 out:
1418 spin_unlock_irqrestore(&q->queue_lock, flags);
1419
1420 return ret;
1421}
1422
1423static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q)
1424{
1425 unsigned int length = 0;
1426 unsigned long flags;
1427
1428 if (q->magic != VINO_QUEUE_MAGIC) {
1429 return length;
1430 }
1431
1432 spin_lock_irqsave(&q->queue_lock, flags);
1433 length = q->length;
1434 spin_unlock_irqrestore(&q->queue_lock, flags);
1435
1436 return length;
1437}
1438
1439static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q)
1440{
1441 unsigned int i;
1442 int ret = 0;
1443 unsigned long flags;
1444
1445 if (q->magic != VINO_QUEUE_MAGIC) {
1446 return ret;
1447 }
1448
1449 spin_lock_irqsave(&q->queue_lock, flags);
1450 for (i = 0; i < q->length; i++) {
1451 if (q->buffer[i]->map_count > 0) {
1452 ret = 1;
1453 break;
1454 }
1455 }
1456 spin_unlock_irqrestore(&q->queue_lock, flags);
1457
1458 return ret;
1459}
1460
1461/* VINO functions */
1462
1463/* execute with input_lock locked */
1464static void vino_update_line_size(struct vino_channel_settings *vcs)
1465{
1466 unsigned int w = vcs->clipping.right - vcs->clipping.left;
1467 unsigned int d = vcs->decimation;
1468 unsigned int bpp = vino_data_formats[vcs->data_format].bpp;
1469 unsigned int lsize;
1470
1471 dprintk("update_line_size(): before: w = %d, d = %d, "
1472 "line_size = %d\n", w, d, vcs->line_size);
1473 /* line size must be multiple of 8 bytes */
1474 lsize = (bpp * (w / d)) & ~7;
1475 w = (lsize / bpp) * d;
1476
1477 vcs->clipping.right = vcs->clipping.left + w;
1478 vcs->line_size = lsize;
1479 dprintk("update_line_size(): after: w = %d, d = %d, "
1480 "line_size = %d\n", w, d, vcs->line_size);
1481}
1482
1483/* execute with input_lock locked */
1484static void vino_set_clipping(struct vino_channel_settings *vcs,
1485 unsigned int x, unsigned int y,
1486 unsigned int w, unsigned int h)
1487{
1488 unsigned int maxwidth, maxheight;
1489 unsigned int d;
1490
1491 maxwidth = vino_data_norms[vcs->data_norm].width;
1492 maxheight = vino_data_norms[vcs->data_norm].height;
1493 d = vcs->decimation;
1494
1495 y &= ~1; /* odd/even fields */
1496
1497 if (x > maxwidth) {
1498 x = 0;
1499 }
1500 if (y > maxheight) {
1501 y = 0;
1502 }
1503
1504 if (((w / d) < VINO_MIN_WIDTH)
1505 || ((h / d) < VINO_MIN_HEIGHT)) {
1506 w = VINO_MIN_WIDTH * d;
1507 h = VINO_MIN_HEIGHT * d;
1508 }
1509
1510 if ((x + w) > maxwidth) {
1511 w = maxwidth - x;
1512 if ((w / d) < VINO_MIN_WIDTH)
1513 x = maxwidth - VINO_MIN_WIDTH * d;
1514 }
1515 if ((y + h) > maxheight) {
1516 h = maxheight - y;
1517 if ((h / d) < VINO_MIN_HEIGHT)
1518 y = maxheight - VINO_MIN_HEIGHT * d;
1519 }
1520
1521 vcs->clipping.left = x;
1522 vcs->clipping.top = y;
1523 vcs->clipping.right = x + w;
1524 vcs->clipping.bottom = y + h;
1525
1526 vino_update_line_size(vcs);
1527
1528 dprintk("clipping %d, %d, %d, %d / %d - %d\n",
1529 vcs->clipping.left, vcs->clipping.top, vcs->clipping.right,
1530 vcs->clipping.bottom, vcs->decimation, vcs->line_size);
1531}
1532
1533/* execute with input_lock locked */
1534static void vino_set_default_clipping(struct vino_channel_settings *vcs)
1535{
1536 vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width,
1537 vino_data_norms[vcs->data_norm].height);
1538}
1539
1540/* execute with input_lock locked */
1541static void vino_set_scaling(struct vino_channel_settings *vcs,
1542 unsigned int w, unsigned int h)
1543{
1544 unsigned int x, y, curw, curh, d;
1545
1546 x = vcs->clipping.left;
1547 y = vcs->clipping.top;
1548 curw = vcs->clipping.right - vcs->clipping.left;
1549 curh = vcs->clipping.bottom - vcs->clipping.top;
1550
1551 d = max(curw / w, curh / h);
1552
1553 dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n",
1554 w, h, curw, curh, d);
1555
1556 if (d < 1) {
1557 d = 1;
1558 }
1559 if (d > 8) {
1560 d = 8;
1561 }
1562
1563 vcs->decimation = d;
1564 vino_set_clipping(vcs, x, y, w * d, h * d);
1565
1566 dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left,
1567 vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom,
1568 vcs->decimation, vcs->line_size);
1569}
1570
1571/* execute with input_lock locked */
1572static void vino_reset_scaling(struct vino_channel_settings *vcs)
1573{
1574 vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left,
1575 vcs->clipping.bottom - vcs->clipping.top);
1576}
1577
1578/* execute with input_lock locked */
1579static void vino_set_framerate(struct vino_channel_settings *vcs,
1580 unsigned int fps)
1581{
1582 unsigned int mask;
1583
1584 switch (vcs->data_norm) {
1585 case VINO_DATA_NORM_NTSC:
1586 case VINO_DATA_NORM_D1:
1587 fps = (unsigned int)(fps / 6) * 6; // FIXME: round!
1588
1589 if (fps < vino_data_norms[vcs->data_norm].fps_min)
1590 fps = vino_data_norms[vcs->data_norm].fps_min;
1591 if (fps > vino_data_norms[vcs->data_norm].fps_max)
1592 fps = vino_data_norms[vcs->data_norm].fps_max;
1593
1594 switch (fps) {
1595 case 6:
1596 mask = 0x003;
1597 break;
1598 case 12:
1599 mask = 0x0c3;
1600 break;
1601 case 18:
1602 mask = 0x333;
1603 break;
1604 case 24:
1605 mask = 0x3ff;
1606 break;
1607 case 30:
1608 mask = 0xfff;
1609 break;
1610 default:
1611 mask = VINO_FRAMERT_FULL;
1612 }
1613 vcs->framert_reg = VINO_FRAMERT_RT(mask);
1614 break;
1615 case VINO_DATA_NORM_PAL:
1616 case VINO_DATA_NORM_SECAM:
1617 fps = (unsigned int)(fps / 5) * 5; // FIXME: round!
1618
1619 if (fps < vino_data_norms[vcs->data_norm].fps_min)
1620 fps = vino_data_norms[vcs->data_norm].fps_min;
1621 if (fps > vino_data_norms[vcs->data_norm].fps_max)
1622 fps = vino_data_norms[vcs->data_norm].fps_max;
1623
1624 switch (fps) {
1625 case 5:
1626 mask = 0x003;
1627 break;
1628 case 10:
1629 mask = 0x0c3;
1630 break;
1631 case 15:
1632 mask = 0x333;
1633 break;
1634 case 20:
1635 mask = 0x0ff;
1636 break;
1637 case 25:
1638 mask = 0x3ff;
1639 break;
1640 default:
1641 mask = VINO_FRAMERT_FULL;
1642 }
1643 vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL;
1644 break;
1645 }
1646
1647 vcs->fps = fps;
1648}
1649
1650/* execute with input_lock locked */
1651static void vino_set_default_framerate(struct vino_channel_settings *vcs)
1652{
1653 vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max);
1654}
1655
1656/*
1657 * Prepare VINO for DMA transfer...
1658 * (execute only with vino_lock and input_lock locked)
1659 */
1660static int vino_dma_setup(struct vino_channel_settings *vcs,
1661 struct vino_framebuffer *fb)
1662{
1663 u32 ctrl, intr;
1664 struct sgi_vino_channel *ch;
1665 const struct vino_data_norm *norm;
1666
1667 dprintk("vino_dma_setup():\n");
1668
1669 vcs->field = 0;
1670 fb->frame_counter = 0;
1671
1672 ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
1673 norm = &vino_data_norms[vcs->data_norm];
1674
1675 ch->page_index = 0;
1676 ch->line_count = 0;
1677
1678 /* VINO line size register is set 8 bytes less than actual */
1679 ch->line_size = vcs->line_size - 8;
1680
1681 /* let VINO know where to transfer data */
1682 ch->start_desc_tbl = fb->desc_table.dma;
1683 ch->next_4_desc = fb->desc_table.dma;
1684
1685 /* give vino time to fetch the first four descriptors, 5 usec
1686 * should be more than enough time */
1687 udelay(VINO_DESC_FETCH_DELAY);
1688
1689 /* set the alpha register */
1690 ch->alpha = vcs->alpha;
1691
1692 /* set clipping registers */
1693 ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) |
1694 VINO_CLIP_EVEN(norm->even.top +
1695 vcs->clipping.top / 2) |
1696 VINO_CLIP_X(vcs->clipping.left);
1697 ch->clip_end = VINO_CLIP_ODD(norm->odd.top +
1698 vcs->clipping.bottom / 2 - 1) |
1699 VINO_CLIP_EVEN(norm->even.top +
1700 vcs->clipping.bottom / 2 - 1) |
1701 VINO_CLIP_X(vcs->clipping.right);
1702 /* FIXME: end-of-field bug workaround
1703 VINO_CLIP_X(VINO_PAL_WIDTH);
1704 */
1705
1706 /* set the size of actual content in the buffer (DECIMATION !) */
1707 fb->data_size = ((vcs->clipping.right - vcs->clipping.left) /
1708 vcs->decimation) *
1709 ((vcs->clipping.bottom - vcs->clipping.top) /
1710 vcs->decimation) *
1711 vino_data_formats[vcs->data_format].bpp;
1712
1713 ch->frame_rate = vcs->framert_reg;
1714
1715 ctrl = vino->control;
1716 intr = vino->intr_status;
1717
1718 if (vcs->channel == VINO_CHANNEL_A) {
1719 /* All interrupt conditions for this channel was cleared
1720 * so clear the interrupt status register and enable
1721 * interrupts */
1722 intr &= ~VINO_INTSTAT_A;
1723 ctrl |= VINO_CTRL_A_INT;
1724
1725 /* enable synchronization */
1726 ctrl |= VINO_CTRL_A_SYNC_ENBL;
1727
1728 /* enable frame assembly */
1729 ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL;
1730
1731 /* set decimation used */
1732 if (vcs->decimation < 2)
1733 ctrl &= ~VINO_CTRL_A_DEC_ENBL;
1734 else {
1735 ctrl |= VINO_CTRL_A_DEC_ENBL;
1736 ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK;
1737 ctrl |= (vcs->decimation - 1) <<
1738 VINO_CTRL_A_DEC_SCALE_SHIFT;
1739 }
1740
1741 /* select input interface */
1742 if (vcs->input == VINO_INPUT_D1)
1743 ctrl |= VINO_CTRL_A_SELECT;
1744 else
1745 ctrl &= ~VINO_CTRL_A_SELECT;
1746
1747 /* palette */
1748 ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB |
1749 VINO_CTRL_A_DITHER);
1750 } else {
1751 intr &= ~VINO_INTSTAT_B;
1752 ctrl |= VINO_CTRL_B_INT;
1753
1754 ctrl |= VINO_CTRL_B_SYNC_ENBL;
1755 ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL;
1756
1757 if (vcs->decimation < 2)
1758 ctrl &= ~VINO_CTRL_B_DEC_ENBL;
1759 else {
1760 ctrl |= VINO_CTRL_B_DEC_ENBL;
1761 ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK;
1762 ctrl |= (vcs->decimation - 1) <<
1763 VINO_CTRL_B_DEC_SCALE_SHIFT;
1764
1765 }
1766 if (vcs->input == VINO_INPUT_D1)
1767 ctrl |= VINO_CTRL_B_SELECT;
1768 else
1769 ctrl &= ~VINO_CTRL_B_SELECT;
1770
1771 ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB |
1772 VINO_CTRL_B_DITHER);
1773 }
1774
1775 /* set palette */
1776 fb->data_format = vcs->data_format;
1777
1778 switch (vcs->data_format) {
1779 case VINO_DATA_FMT_GREY:
1780 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1781 VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY;
1782 break;
1783 case VINO_DATA_FMT_RGB32:
1784 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1785 VINO_CTRL_A_RGB : VINO_CTRL_B_RGB;
1786 break;
1787 case VINO_DATA_FMT_YUV:
1788 /* nothing needs to be done */
1789 break;
1790 case VINO_DATA_FMT_RGB332:
1791 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1792 VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER :
1793 VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER;
1794 break;
1795 }
1796
1797 vino->intr_status = intr;
1798 vino->control = ctrl;
1799
1800 return 0;
1801}
1802
1803/* (execute only with vino_lock locked) */
1804static void vino_dma_start(struct vino_channel_settings *vcs)
1805{
1806 u32 ctrl = vino->control;
1807
1808 dprintk("vino_dma_start():\n");
1809 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1810 VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL;
1811 vino->control = ctrl;
1812}
1813
1814/* (execute only with vino_lock locked) */
1815static void vino_dma_stop(struct vino_channel_settings *vcs)
1816{
1817 u32 ctrl = vino->control;
1818
1819 ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
1820 ~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL;
1821 vino->control = ctrl;
1822 dprintk("vino_dma_stop():\n");
1823}
1824
1825/*
1826 * Load dummy page to descriptor registers. This prevents generating of
1827 * spurious interrupts. (execute only with vino_lock locked)
1828 */
1829static void vino_clear_interrupt(struct vino_channel_settings *vcs)
1830{
1831 struct sgi_vino_channel *ch;
1832
1833 ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
1834
1835 ch->page_index = 0;
1836 ch->line_count = 0;
1837
1838 ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma;
1839 ch->next_4_desc = vino_drvdata->dummy_desc_table.dma;
1840
1841 udelay(VINO_DESC_FETCH_DELAY);
1842 dprintk("channel %c clear interrupt condition\n",
1843 (vcs->channel == VINO_CHANNEL_A) ? 'A':'B');
1844}
1845
1846static int vino_capture(struct vino_channel_settings *vcs,
1847 struct vino_framebuffer *fb)
1848{
1849 int err = 0;
1850 unsigned long flags, flags2;
1851
1852 spin_lock_irqsave(&fb->state_lock, flags);
1853
1854 if (fb->state == VINO_FRAMEBUFFER_IN_USE)
1855 err = -EBUSY;
1856 fb->state = VINO_FRAMEBUFFER_IN_USE;
1857
1858 spin_unlock_irqrestore(&fb->state_lock, flags);
1859
1860 if (err)
1861 return err;
1862
1863 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
1864 spin_lock_irqsave(&vino_drvdata->input_lock, flags2);
1865
1866 vino_dma_setup(vcs, fb);
1867 vino_dma_start(vcs);
1868
1869 spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2);
1870 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
1871
1872 return err;
1873}
1874
1875static
1876struct vino_framebuffer *vino_capture_enqueue(struct
1877 vino_channel_settings *vcs,
1878 unsigned int index)
1879{
1880 struct vino_framebuffer *fb;
1881 unsigned long flags;
1882
1883 dprintk("vino_capture_enqueue():\n");
1884
1885 spin_lock_irqsave(&vcs->capture_lock, flags);
1886
1887 fb = vino_queue_add(&vcs->fb_queue, index);
1888 if (fb == NULL) {
1889 dprintk("vino_capture_enqueue(): vino_queue_add() failed, "
1890 "queue full?\n");
1891 goto out;
1892 }
1893out:
1894 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1895
1896 return fb;
1897}
1898
1899static int vino_capture_next(struct vino_channel_settings *vcs, int start)
1900{
1901 struct vino_framebuffer *fb;
1902 unsigned int incoming, id;
1903 int err = 0;
1904 unsigned long flags, flags2;
1905
1906 dprintk("vino_capture_next():\n");
1907
1908 spin_lock_irqsave(&vcs->capture_lock, flags);
1909
1910 if (start) {
1911 /* start capture only if capture isn't in progress already */
1912 if (vcs->capturing) {
1913 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1914 return 0;
1915 }
1916
1917 } else {
1918 /* capture next frame:
1919 * stop capture if capturing is not set */
1920 if (!vcs->capturing) {
1921 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1922 return 0;
1923 }
1924 }
1925
1926 err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
1927 if (err) {
1928 dprintk("vino_capture_next(): vino_queue_get_incoming() "
1929 "failed\n");
1930 err = -EINVAL;
1931 goto out;
1932 }
1933 if (incoming == 0) {
1934 dprintk("vino_capture_next(): no buffers available\n");
1935 goto out;
1936 }
1937
1938 fb = vino_queue_peek(&vcs->fb_queue, &id);
1939 if (fb == NULL) {
1940 dprintk("vino_capture_next(): vino_queue_peek() failed\n");
1941 err = -EINVAL;
1942 goto out;
1943 }
1944
1945 spin_lock_irqsave(&fb->state_lock, flags2);
1946 fb->state = VINO_FRAMEBUFFER_UNUSED;
1947 spin_unlock_irqrestore(&fb->state_lock, flags2);
1948
1949 if (start) {
1950 vcs->capturing = 1;
1951 }
1952
1953 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1954
1955 err = vino_capture(vcs, fb);
1956
1957 return err;
1958
1959out:
1960 vcs->capturing = 0;
1961 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1962
1963 return err;
1964}
1965
1966static int vino_is_capturing(struct vino_channel_settings *vcs)
1967{
1968 int ret;
1969 unsigned long flags;
1970
1971 spin_lock_irqsave(&vcs->capture_lock, flags);
1972
1973 ret = vcs->capturing;
1974
1975 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1976
1977 return ret;
1978}
1979
1980/* waits until a frame is captured */
1981static int vino_wait_for_frame(struct vino_channel_settings *vcs)
1982{
1983 wait_queue_t wait;
1984 int err = 0;
1985
1986 dprintk("vino_wait_for_frame():\n");
1987
1988 init_waitqueue_entry(&wait, current);
1989 /* add ourselves into wait queue */
1990 add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
1991 /* and set current state */
1992 set_current_state(TASK_INTERRUPTIBLE);
1993
1994 /* to ensure that schedule_timeout will return immediately
1995 * if VINO interrupt was triggred meanwhile */
1996 schedule_timeout(HZ / 10);
1997
1998 if (signal_pending(current))
1999 err = -EINTR;
2000
2001 remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
2002
2003 dprintk("vino_wait_for_frame(): waiting for frame %s\n",
2004 err ? "failed" : "ok");
2005
2006 return err;
2007}
2008
2009/* the function assumes that PAGE_SIZE % 4 == 0 */
2010static void vino_convert_to_rgba(struct vino_framebuffer *fb) {
2011 unsigned char *pageptr;
2012 unsigned int page, i;
2013 unsigned char a;
2014
2015 for (page = 0; page < fb->desc_table.page_count; page++) {
2016 pageptr = (unsigned char *)fb->desc_table.virtual[page];
2017
2018 for (i = 0; i < PAGE_SIZE; i += 4) {
2019 a = pageptr[0];
2020 pageptr[0] = pageptr[3];
2021 pageptr[1] = pageptr[2];
2022 pageptr[2] = pageptr[1];
2023 pageptr[3] = a;
2024 pageptr += 4;
2025 }
2026 }
2027}
2028
2029/* checks if the buffer is in correct state and syncs data */
2030static int vino_check_buffer(struct vino_channel_settings *vcs,
2031 struct vino_framebuffer *fb)
2032{
2033 int err = 0;
2034 unsigned long flags;
2035
2036 dprintk("vino_check_buffer():\n");
2037
2038 spin_lock_irqsave(&fb->state_lock, flags);
2039 switch (fb->state) {
2040 case VINO_FRAMEBUFFER_IN_USE:
2041 err = -EIO;
2042 break;
2043 case VINO_FRAMEBUFFER_READY:
2044 vino_sync_buffer(fb);
2045 fb->state = VINO_FRAMEBUFFER_UNUSED;
2046 break;
2047 default:
2048 err = -EINVAL;
2049 }
2050 spin_unlock_irqrestore(&fb->state_lock, flags);
2051
2052 if (!err) {
2053 if (vino_pixel_conversion
2054 && (fb->data_format == VINO_DATA_FMT_RGB32)) {
2055 vino_convert_to_rgba(fb);
2056 }
2057 } else if (err && (err != -EINVAL)) {
2058 dprintk("vino_check_buffer(): buffer not ready\n");
2059
2060 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
2061 vino_dma_stop(vcs);
2062 vino_clear_interrupt(vcs);
2063 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
2064 }
2065
2066 return err;
2067}
2068
2069/* forcefully terminates capture */
2070static void vino_capture_stop(struct vino_channel_settings *vcs)
2071{
2072 unsigned int incoming = 0, outgoing = 0, id;
2073 unsigned long flags, flags2;
2074
2075 dprintk("vino_capture_stop():\n");
2076
2077 spin_lock_irqsave(&vcs->capture_lock, flags);
2078 /* unset capturing to stop queue processing */
2079 vcs->capturing = 0;
2080
2081 spin_lock_irqsave(&vino_drvdata->vino_lock, flags2);
2082
2083 vino_dma_stop(vcs);
2084 vino_clear_interrupt(vcs);
2085
2086 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2);
2087
2088 /* remove all items from the queue */
2089 if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
2090 dprintk("vino_capture_stop(): "
2091 "vino_queue_get_incoming() failed\n");
2092 goto out;
2093 }
2094 while (incoming > 0) {
2095 vino_queue_transfer(&vcs->fb_queue);
2096
2097 if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
2098 dprintk("vino_capture_stop(): "
2099 "vino_queue_get_incoming() failed\n");
2100 goto out;
2101 }
2102 }
2103
2104 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
2105 dprintk("vino_capture_stop(): "
2106 "vino_queue_get_outgoing() failed\n");
2107 goto out;
2108 }
2109 while (outgoing > 0) {
2110 vino_queue_remove(&vcs->fb_queue, &id);
2111
2112 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
2113 dprintk("vino_capture_stop(): "
2114 "vino_queue_get_outgoing() failed\n");
2115 goto out;
2116 }
2117 }
2118
2119out:
2120 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2121}
2122
2123static int vino_capture_failed(struct vino_channel_settings *vcs)
2124{
2125 struct vino_framebuffer *fb;
2126 unsigned long flags;
2127 unsigned int i;
2128 int ret;
2129
2130 dprintk("vino_capture_failed():\n");
2131
2132 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
2133
2134 vino_dma_stop(vcs);
2135 vino_clear_interrupt(vcs);
2136
2137 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
2138
2139 ret = vino_queue_get_incoming(&vcs->fb_queue, &i);
2140 if (ret == VINO_QUEUE_ERROR) {
2141 dprintk("vino_queue_get_incoming() failed\n");
2142 return -EINVAL;
2143 }
2144 if (i == 0) {
2145 /* no buffers to process */
2146 return 0;
2147 }
2148
2149 fb = vino_queue_peek(&vcs->fb_queue, &i);
2150 if (fb == NULL) {
2151 dprintk("vino_queue_peek() failed\n");
2152 return -EINVAL;
2153 }
2154
2155 spin_lock_irqsave(&fb->state_lock, flags);
2156 if (fb->state == VINO_FRAMEBUFFER_IN_USE) {
2157 fb->state = VINO_FRAMEBUFFER_UNUSED;
2158 vino_queue_transfer(&vcs->fb_queue);
2159 vino_queue_remove(&vcs->fb_queue, &i);
2160 /* we should actually discard the newest frame,
2161 * but who cares ... */
2162 }
2163 spin_unlock_irqrestore(&fb->state_lock, flags);
2164
2165 return 0;
2166}
2167
2168static void vino_frame_done(struct vino_channel_settings *vcs,
2169 unsigned int fc)
2170{
2171 struct vino_framebuffer *fb;
2172 unsigned long flags;
2173
2174 spin_lock_irqsave(&vcs->capture_lock, flags);
2175 fb = vino_queue_transfer(&vcs->fb_queue);
2176 if (!fb) {
2177 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2178 dprintk("vino_frame_done(): vino_queue_transfer() failed!\n");
2179 return;
2180 }
2181 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2182
2183 fb->frame_counter = fc;
2184 do_gettimeofday(&fb->timestamp);
2185
2186 spin_lock_irqsave(&fb->state_lock, flags);
2187 if (fb->state == VINO_FRAMEBUFFER_IN_USE)
2188 fb->state = VINO_FRAMEBUFFER_READY;
2189 spin_unlock_irqrestore(&fb->state_lock, flags);
2190
2191 wake_up(&vcs->fb_queue.frame_wait_queue);
2192
2193 vino_capture_next(vcs, 0);
2194}
2195
2196static irqreturn_t vino_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2197{
2198 u32 intr;
2199 unsigned int fc_a, fc_b;
2200 int done_a = 0;
2201 int done_b = 0;
2202
2203 spin_lock(&vino_drvdata->vino_lock);
2204
2205 intr = vino->intr_status;
2206 fc_a = vino->a.field_counter / 2;
2207 fc_b = vino->b.field_counter / 2;
2208
2209 // TODO: handle error-interrupts in some special way ?
2210
2211 if (intr & VINO_INTSTAT_A) {
2212 if (intr & VINO_INTSTAT_A_EOF) {
2213 vino_drvdata->a.field++;
2214 if (vino_drvdata->a.field > 1) {
2215 vino_dma_stop(&vino_drvdata->a);
2216 vino_clear_interrupt(&vino_drvdata->a);
2217 vino_drvdata->a.field = 0;
2218 done_a = 1;
2219 }
2220 dprintk("intr: channel A end-of-field interrupt: "
2221 "%04x\n", intr);
2222 } else {
2223 vino_dma_stop(&vino_drvdata->a);
2224 vino_clear_interrupt(&vino_drvdata->a);
2225 done_a = 1;
2226 dprintk("channel A error interrupt: %04x\n", intr);
2227 }
2228 }
2229 if (intr & VINO_INTSTAT_B) {
2230 if (intr & VINO_INTSTAT_B_EOF) {
2231 vino_drvdata->b.field++;
2232 if (vino_drvdata->b.field > 1) {
2233 vino_dma_stop(&vino_drvdata->b);
2234 vino_clear_interrupt(&vino_drvdata->b);
2235 vino_drvdata->b.field = 0;
2236 done_b = 1;
2237 }
2238 dprintk("intr: channel B end-of-field interrupt: "
2239 "%04x\n", intr);
2240 } else {
2241 vino_dma_stop(&vino_drvdata->b);
2242 vino_clear_interrupt(&vino_drvdata->b);
2243 done_b = 1;
2244 dprintk("channel B error interrupt: %04x\n", intr);
2245 }
2246 }
2247
2248 /* always remember to clear interrupt status */
2249 vino->intr_status = ~intr;
2250
2251 spin_unlock(&vino_drvdata->vino_lock);
2252
2253 if (done_a) {
2254 vino_frame_done(&vino_drvdata->a, fc_a);
2255 dprintk("channel A frame done, interrupt: %d\n", intr);
2256 }
2257 if (done_b) {
2258 vino_frame_done(&vino_drvdata->b, fc_b);
2259 dprintk("channel B frame done, interrupt: %d\n", intr);
2260 }
207 2261
208 return -EINVAL; 2262 return IRQ_HANDLED;
209} 2263}
210 2264
211static const struct video_device vino_device = { 2265/* VINO video input management */
2266
2267static int vino_get_saa7191_input(int input)
2268{
2269 switch (input) {
2270 case VINO_INPUT_COMPOSITE:
2271 return SAA7191_INPUT_COMPOSITE;
2272 case VINO_INPUT_SVIDEO:
2273 return SAA7191_INPUT_SVIDEO;
2274 default:
2275 printk(KERN_ERR "VINO: vino_get_saa7191_input(): "
2276 "invalid input!\n");
2277 return -1;
2278 }
2279}
2280
2281static int vino_get_saa7191_norm(int norm)
2282{
2283 switch (norm) {
2284 case VINO_DATA_NORM_AUTO:
2285 return SAA7191_NORM_AUTO;
2286 case VINO_DATA_NORM_PAL:
2287 return SAA7191_NORM_PAL;
2288 case VINO_DATA_NORM_NTSC:
2289 return SAA7191_NORM_NTSC;
2290 case VINO_DATA_NORM_SECAM:
2291 return SAA7191_NORM_SECAM;
2292 default:
2293 printk(KERN_ERR "VINO: vino_get_saa7191_norm(): "
2294 "invalid norm!\n");
2295 return -1;
2296 }
2297}
2298
2299/* execute with input_lock locked */
2300static int vino_is_input_owner(struct vino_channel_settings *vcs)
2301{
2302 switch(vcs->input) {
2303 case VINO_INPUT_COMPOSITE:
2304 case VINO_INPUT_SVIDEO:
2305 return (vino_drvdata->decoder.owner == vcs->channel);
2306 case VINO_INPUT_D1:
2307 return (vino_drvdata->camera.owner == vcs->channel);
2308 default:
2309 return 0;
2310 }
2311}
2312
2313static int vino_acquire_input(struct vino_channel_settings *vcs)
2314{
2315 int ret = 0;
2316
2317 dprintk("vino_acquire_input():\n");
2318
2319 spin_lock(&vino_drvdata->input_lock);
2320
2321 /* First try D1 and then SAA7191 */
2322 if (vino_drvdata->camera.driver
2323 && (vino_drvdata->camera.owner == VINO_NO_CHANNEL)) {
2324 if (i2c_use_client(vino_drvdata->camera.driver)) {
2325 ret = -ENODEV;
2326 goto out;
2327 }
2328
2329 vino_drvdata->camera.owner = vcs->channel;
2330 vcs->input = VINO_INPUT_D1;
2331 vcs->data_norm = VINO_DATA_NORM_D1;
2332 } else if (vino_drvdata->decoder.driver
2333 && (vino_drvdata->decoder.owner == VINO_NO_CHANNEL)) {
2334 int saa7191_input;
2335 int saa7191_norm;
2336
2337 if (i2c_use_client(vino_drvdata->decoder.driver)) {
2338 ret = -ENODEV;
2339 goto out;
2340 }
2341
2342 vino_drvdata->decoder.owner = vcs->channel;
2343 vcs->input = VINO_INPUT_COMPOSITE;
2344 vcs->data_norm = VINO_DATA_NORM_PAL;
2345
2346 saa7191_input = vino_get_saa7191_input(vcs->input);
2347 i2c_decoder_command(DECODER_SET_INPUT, &saa7191_input);
2348
2349 saa7191_norm = vino_get_saa7191_norm(vcs->data_norm);
2350 i2c_decoder_command(DECODER_SAA7191_SET_NORM, &saa7191_norm);
2351 } else {
2352 vcs->input = (vcs->channel == VINO_CHANNEL_A) ?
2353 vino_drvdata->b.input : vino_drvdata->a.input;
2354 vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ?
2355 vino_drvdata->b.data_norm : vino_drvdata->a.data_norm;
2356 }
2357
2358 if (vcs->input == VINO_INPUT_NONE) {
2359 ret = -ENODEV;
2360 goto out;
2361 }
2362
2363 if (vino_is_input_owner(vcs)) {
2364 vino_set_default_clipping(vcs);
2365 vino_set_default_framerate(vcs);
2366 }
2367
2368 dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name);
2369
2370out:
2371 spin_unlock(&vino_drvdata->input_lock);
2372
2373 return ret;
2374}
2375
2376static int vino_set_input(struct vino_channel_settings *vcs, int input)
2377{
2378 struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
2379 &vino_drvdata->b : &vino_drvdata->a;
2380 int ret = 0;
2381
2382 dprintk("vino_set_input():\n");
2383
2384 spin_lock(&vino_drvdata->input_lock);
2385
2386 if (vcs->input == input)
2387 goto out;
2388
2389 switch(input) {
2390 case VINO_INPUT_COMPOSITE:
2391 case VINO_INPUT_SVIDEO:
2392 if (!vino_drvdata->decoder.driver) {
2393 ret = -EINVAL;
2394 goto out;
2395 }
2396
2397 if (vino_drvdata->decoder.owner == VINO_NO_CHANNEL) {
2398 if (i2c_use_client(vino_drvdata->decoder.driver)) {
2399 ret = -ENODEV;
2400 goto out;
2401 }
2402 vino_drvdata->decoder.owner = vcs->channel;
2403 }
2404
2405 if (vino_drvdata->decoder.owner == vcs->channel) {
2406 int saa7191_input;
2407 int saa7191_norm;
2408
2409 vcs->input = input;
2410 vcs->data_norm = VINO_DATA_NORM_PAL;
2411
2412 saa7191_input = vino_get_saa7191_input(vcs->input);
2413 i2c_decoder_command(DECODER_SET_INPUT, &saa7191_input);
2414 saa7191_norm = vino_get_saa7191_norm(vcs->data_norm);
2415 i2c_decoder_command(DECODER_SAA7191_SET_NORM,
2416 &saa7191_norm);
2417 } else {
2418 if (vcs2->input != input) {
2419 ret = -EBUSY;
2420 goto out;
2421 }
2422
2423 vcs->input = input;
2424 vcs->data_norm = vcs2->data_norm;
2425 }
2426
2427 if (vino_drvdata->camera.owner == vcs->channel) {
2428 /* Transfer the ownership or release the input */
2429 if (vcs2->input == VINO_INPUT_D1) {
2430 vino_drvdata->camera.owner = vcs2->channel;
2431 } else {
2432 i2c_release_client(vino_drvdata->
2433 camera.driver);
2434 vino_drvdata->camera.owner = VINO_NO_CHANNEL;
2435 }
2436 }
2437 break;
2438 case VINO_INPUT_D1:
2439 if (!vino_drvdata->camera.driver) {
2440 ret = -EINVAL;
2441 goto out;
2442 }
2443
2444 if (vino_drvdata->camera.owner == VINO_NO_CHANNEL) {
2445 if (i2c_use_client(vino_drvdata->camera.driver)) {
2446 ret = -ENODEV;
2447 goto out;
2448 }
2449 vino_drvdata->camera.owner = vcs->channel;
2450 }
2451
2452 if (vino_drvdata->decoder.owner == vcs->channel) {
2453 /* Transfer the ownership or release the input */
2454 if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
2455 (vcs2->input == VINO_INPUT_SVIDEO)) {
2456 vino_drvdata->decoder.owner = vcs2->channel;
2457 } else {
2458 i2c_release_client(vino_drvdata->
2459 decoder.driver);
2460 vino_drvdata->decoder.owner = VINO_NO_CHANNEL;
2461 }
2462 }
2463
2464 vcs->input = input;
2465 vcs->data_norm = VINO_DATA_NORM_D1;
2466 break;
2467 default:
2468 ret = -EINVAL;
2469 goto out;
2470 }
2471
2472 vino_set_default_clipping(vcs);
2473 vino_set_default_framerate(vcs);
2474
2475 dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name);
2476
2477out:
2478 spin_unlock(&vino_drvdata->input_lock);
2479
2480 return ret;
2481}
2482
2483static void vino_release_input(struct vino_channel_settings *vcs)
2484{
2485 struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
2486 &vino_drvdata->b : &vino_drvdata->a;
2487
2488 dprintk("vino_release_input():\n");
2489
2490 spin_lock(&vino_drvdata->input_lock);
2491
2492 /* Release ownership of the channel
2493 * and if the other channel takes input from
2494 * the same source, transfer the ownership */
2495 if (vino_drvdata->camera.owner == vcs->channel) {
2496 if (vcs2->input == VINO_INPUT_D1) {
2497 vino_drvdata->camera.owner = vcs2->channel;
2498 } else {
2499 i2c_release_client(vino_drvdata->camera.driver);
2500 vino_drvdata->camera.owner = VINO_NO_CHANNEL;
2501 }
2502 } else if (vino_drvdata->decoder.owner == vcs->channel) {
2503 if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
2504 (vcs2->input == VINO_INPUT_SVIDEO)) {
2505 vino_drvdata->decoder.owner = vcs2->channel;
2506 } else {
2507 i2c_release_client(vino_drvdata->decoder.driver);
2508 vino_drvdata->decoder.owner = VINO_NO_CHANNEL;
2509 }
2510 }
2511 vcs->input = VINO_INPUT_NONE;
2512
2513 spin_unlock(&vino_drvdata->input_lock);
2514}
2515
2516/* execute with input_lock locked */
2517static int vino_set_data_norm(struct vino_channel_settings *vcs,
2518 unsigned int data_norm)
2519{
2520 int saa7191_norm;
2521
2522 switch (vcs->input) {
2523 case VINO_INPUT_D1:
2524 /* only one "norm" supported */
2525 if (data_norm != VINO_DATA_NORM_D1)
2526 return -EINVAL;
2527 break;
2528 case VINO_INPUT_COMPOSITE:
2529 case VINO_INPUT_SVIDEO:
2530
2531 saa7191_norm = vino_get_saa7191_norm(data_norm);
2532
2533 i2c_decoder_command(DECODER_SAA7191_SET_NORM, &saa7191_norm);
2534 vcs->data_norm = data_norm;
2535 break;
2536 default:
2537 return -EINVAL;
2538 }
2539
2540 return 0;
2541}
2542
2543/* V4L2 helper functions */
2544
2545static int vino_find_data_format(__u32 pixelformat)
2546{
2547 int i;
2548
2549 for (i = 0; i < VINO_DATA_FMT_COUNT; i++) {
2550 if (vino_data_formats[i].pixelformat == pixelformat)
2551 return i;
2552 }
2553
2554 return VINO_DATA_FMT_NONE;
2555}
2556
2557static int vino_enum_data_norm(struct vino_channel_settings *vcs, __u32 index)
2558{
2559 int data_norm = VINO_DATA_NORM_NONE;
2560
2561 spin_lock(&vino_drvdata->input_lock);
2562 switch(vcs->input) {
2563 case VINO_INPUT_COMPOSITE:
2564 case VINO_INPUT_SVIDEO:
2565 if (index == 0) {
2566 data_norm = VINO_DATA_NORM_PAL;
2567 } else if (index == 1) {
2568 data_norm = VINO_DATA_NORM_NTSC;
2569 } else if (index == 2) {
2570 data_norm = VINO_DATA_NORM_SECAM;
2571 }
2572 break;
2573 case VINO_INPUT_D1:
2574 if (index == 0) {
2575 data_norm = VINO_DATA_NORM_D1;
2576 }
2577 break;
2578 }
2579 spin_unlock(&vino_drvdata->input_lock);
2580
2581 return data_norm;
2582}
2583
2584static int vino_enum_input(struct vino_channel_settings *vcs, __u32 index)
2585{
2586 int input = VINO_INPUT_NONE;
2587
2588 spin_lock(&vino_drvdata->input_lock);
2589 if (vino_drvdata->decoder.driver && vino_drvdata->camera.driver) {
2590 switch (index) {
2591 case 0:
2592 input = VINO_INPUT_COMPOSITE;
2593 break;
2594 case 1:
2595 input = VINO_INPUT_SVIDEO;
2596 break;
2597 case 2:
2598 input = VINO_INPUT_D1;
2599 break;
2600 }
2601 } else if (vino_drvdata->decoder.driver) {
2602 switch (index) {
2603 case 0:
2604 input = VINO_INPUT_COMPOSITE;
2605 break;
2606 case 1:
2607 input = VINO_INPUT_SVIDEO;
2608 break;
2609 }
2610 } else if (vino_drvdata->camera.driver) {
2611 switch (index) {
2612 case 0:
2613 input = VINO_INPUT_D1;
2614 break;
2615 }
2616 }
2617 spin_unlock(&vino_drvdata->input_lock);
2618
2619 return input;
2620}
2621
2622/* execute with input_lock locked */
2623static __u32 vino_find_input_index(struct vino_channel_settings *vcs)
2624{
2625 __u32 index = 0;
2626 // FIXME: detect when no inputs available
2627
2628 if (vino_drvdata->decoder.driver && vino_drvdata->camera.driver) {
2629 switch (vcs->input) {
2630 case VINO_INPUT_COMPOSITE:
2631 index = 0;
2632 break;
2633 case VINO_INPUT_SVIDEO:
2634 index = 1;
2635 break;
2636 case VINO_INPUT_D1:
2637 index = 2;
2638 break;
2639 }
2640 } else if (vino_drvdata->decoder.driver) {
2641 switch (vcs->input) {
2642 case VINO_INPUT_COMPOSITE:
2643 index = 0;
2644 break;
2645 case VINO_INPUT_SVIDEO:
2646 index = 1;
2647 break;
2648 }
2649 } else if (vino_drvdata->camera.driver) {
2650 switch (vcs->input) {
2651 case VINO_INPUT_D1:
2652 index = 0;
2653 break;
2654 }
2655 }
2656
2657 return index;
2658}
2659
2660/* V4L2 ioctls */
2661
2662static void vino_v4l2_querycap(struct v4l2_capability *cap)
2663{
2664 memset(cap, 0, sizeof(struct v4l2_capability));
2665
2666 strcpy(cap->driver, vino_driver_name);
2667 strcpy(cap->card, vino_driver_description);
2668 strcpy(cap->bus_info, vino_bus_name);
2669 cap->version = VINO_VERSION_CODE;
2670 cap->capabilities =
2671 V4L2_CAP_VIDEO_CAPTURE |
2672 V4L2_CAP_STREAMING;
2673 // V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
2674}
2675
2676static int vino_v4l2_enuminput(struct vino_channel_settings *vcs,
2677 struct v4l2_input *i)
2678{
2679 __u32 index = i->index;
2680 int input;
2681 dprintk("requested index = %d\n", index);
2682
2683 input = vino_enum_input(vcs, index);
2684 if (input == VINO_INPUT_NONE)
2685 return -EINVAL;
2686
2687 memset(i, 0, sizeof(struct v4l2_input));
2688
2689 i->index = index;
2690 i->type = V4L2_INPUT_TYPE_CAMERA;
2691 i->std = vino_inputs[input].std;
2692 strcpy(i->name, vino_inputs[input].name);
2693
2694 if ((input == VINO_INPUT_COMPOSITE)
2695 || (input == VINO_INPUT_SVIDEO)) {
2696 struct saa7191_status status;
2697 i2c_decoder_command(DECODER_SAA7191_GET_STATUS, &status);
2698 i->status |= status.signal ? 0 : V4L2_IN_ST_NO_SIGNAL;
2699 i->status |= status.color ? 0 : V4L2_IN_ST_NO_COLOR;
2700 }
2701
2702 return 0;
2703}
2704
2705static int vino_v4l2_g_input(struct vino_channel_settings *vcs,
2706 struct v4l2_input *i)
2707{
2708 __u32 index;
2709 int input;
2710
2711 spin_lock(&vino_drvdata->input_lock);
2712 input = vcs->input;
2713 index = vino_find_input_index(vcs);
2714 spin_unlock(&vino_drvdata->input_lock);
2715
2716 dprintk("input = %d\n", input);
2717
2718 if (input == VINO_INPUT_NONE) {
2719 return -EINVAL;
2720 }
2721
2722 memset(i, 0, sizeof(struct v4l2_input));
2723
2724 i->index = index;
2725 i->type = V4L2_INPUT_TYPE_CAMERA;
2726 i->std = vino_inputs[input].std;
2727 strcpy(i->name, vino_inputs[input].name);
2728
2729 return 0;
2730}
2731
2732static int vino_v4l2_s_input(struct vino_channel_settings *vcs,
2733 struct v4l2_input *i)
2734{
2735 int input;
2736 dprintk("requested input = %d\n", i->index);
2737
2738 input = vino_enum_input(vcs, i->index);
2739 if (input == VINO_INPUT_NONE)
2740 return -EINVAL;
2741
2742 return vino_set_input(vcs, input);
2743}
2744
2745static int vino_v4l2_enumstd(struct vino_channel_settings *vcs,
2746 struct v4l2_standard *s)
2747{
2748 int index = s->index;
2749 int data_norm = vino_enum_data_norm(vcs, index);
2750 dprintk("standard index = %d\n", index);
2751
2752 if (data_norm == VINO_DATA_NORM_NONE)
2753 return -EINVAL;
2754
2755 dprintk("standard name = %s\n",
2756 vino_data_norms[data_norm].description);
2757
2758 memset(s, 0, sizeof(struct v4l2_standard));
2759 s->index = index;
2760
2761 s->id = vino_data_norms[data_norm].std;
2762 s->frameperiod.numerator = 1;
2763 s->frameperiod.denominator =
2764 vino_data_norms[data_norm].fps_max;
2765 s->framelines =
2766 vino_data_norms[data_norm].framelines;
2767 strcpy(s->name,
2768 vino_data_norms[data_norm].description);
2769
2770 return 0;
2771}
2772
2773static int vino_v4l2_g_std(struct vino_channel_settings *vcs,
2774 v4l2_std_id *std)
2775{
2776 spin_lock(&vino_drvdata->input_lock);
2777 dprintk("current standard = %d\n", vcs->data_norm);
2778 *std = vino_data_norms[vcs->data_norm].std;
2779 spin_unlock(&vino_drvdata->input_lock);
2780
2781 return 0;
2782}
2783
2784static int vino_v4l2_s_std(struct vino_channel_settings *vcs,
2785 v4l2_std_id *std)
2786{
2787 int ret = 0;
2788
2789 spin_lock(&vino_drvdata->input_lock);
2790
2791 /* check if the standard is valid for the current input */
2792 if (vino_is_input_owner(vcs)
2793 && (vino_inputs[vcs->input].std & (*std))) {
2794 dprintk("standard accepted\n");
2795
2796 /* change the video norm for SAA7191
2797 * and accept NTSC for D1 (do nothing) */
2798
2799 if (vcs->input == VINO_INPUT_D1)
2800 goto out;
2801
2802 if ((*std) & V4L2_STD_PAL) {
2803 vino_set_data_norm(vcs, VINO_DATA_NORM_PAL);
2804 vcs->data_norm = VINO_DATA_NORM_PAL;
2805 } else if ((*std) & V4L2_STD_NTSC) {
2806 vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC);
2807 vcs->data_norm = VINO_DATA_NORM_NTSC;
2808 } else if ((*std) & V4L2_STD_SECAM) {
2809 vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM);
2810 vcs->data_norm = VINO_DATA_NORM_SECAM;
2811 } else {
2812 ret = -EINVAL;
2813 }
2814 } else {
2815 ret = -EINVAL;
2816 }
2817
2818out:
2819 spin_unlock(&vino_drvdata->input_lock);
2820
2821 return ret;
2822}
2823
2824static int vino_v4l2_enum_fmt(struct vino_channel_settings *vcs,
2825 struct v4l2_fmtdesc *fd)
2826{
2827 enum v4l2_buf_type type = fd->type;
2828 int index = fd->index;
2829 dprintk("format index = %d\n", index);
2830
2831 switch (fd->type) {
2832 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
2833 if ((fd->index < 0) ||
2834 (fd->index >= VINO_DATA_FMT_COUNT))
2835 return -EINVAL;
2836 dprintk("format name = %s\n",
2837 vino_data_formats[index].description);
2838
2839 memset(fd, 0, sizeof(struct v4l2_fmtdesc));
2840 fd->index = index;
2841 fd->type = type;
2842 fd->pixelformat = vino_data_formats[index].pixelformat;
2843 strcpy(fd->description, vino_data_formats[index].description);
2844 break;
2845 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2846 default:
2847 return -EINVAL;
2848 }
2849
2850 return 0;
2851}
2852
2853static int vino_v4l2_try_fmt(struct vino_channel_settings *vcs,
2854 struct v4l2_format *f)
2855{
2856 struct vino_channel_settings tempvcs;
2857
2858 switch (f->type) {
2859 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2860 struct v4l2_pix_format *pf = &f->fmt.pix;
2861
2862 dprintk("requested: w = %d, h = %d\n",
2863 pf->width, pf->height);
2864
2865 spin_lock(&vino_drvdata->input_lock);
2866 memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings));
2867 spin_unlock(&vino_drvdata->input_lock);
2868
2869 tempvcs.data_format = vino_find_data_format(pf->pixelformat);
2870 if (tempvcs.data_format == VINO_DATA_FMT_NONE) {
2871 tempvcs.data_format = VINO_DATA_FMT_RGB32;
2872 pf->pixelformat =
2873 vino_data_formats[tempvcs.data_format].
2874 pixelformat;
2875 }
2876
2877 /* data format must be set before clipping/scaling */
2878 vino_set_scaling(&tempvcs, pf->width, pf->height);
2879
2880 dprintk("data format = %s\n",
2881 vino_data_formats[tempvcs.data_format].description);
2882
2883 pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) /
2884 tempvcs.decimation;
2885 pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) /
2886 tempvcs.decimation;
2887
2888 pf->field = V4L2_FIELD_INTERLACED;
2889 pf->bytesperline = tempvcs.line_size;
2890 pf->sizeimage = tempvcs.line_size *
2891 (tempvcs.clipping.bottom - tempvcs.clipping.top) /
2892 tempvcs.decimation;
2893 pf->colorspace =
2894 vino_data_formats[tempvcs.data_format].colorspace;
2895
2896 pf->priv = 0;
2897 break;
2898 }
2899 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2900 default:
2901 return -EINVAL;
2902 }
2903
2904 return 0;
2905}
2906
2907static int vino_v4l2_g_fmt(struct vino_channel_settings *vcs,
2908 struct v4l2_format *f)
2909{
2910 switch (f->type) {
2911 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2912 struct v4l2_pix_format *pf = &f->fmt.pix;
2913 spin_lock(&vino_drvdata->input_lock);
2914
2915 pf->width = (vcs->clipping.right - vcs->clipping.left) /
2916 vcs->decimation;
2917 pf->height = (vcs->clipping.bottom - vcs->clipping.top) /
2918 vcs->decimation;
2919 pf->pixelformat =
2920 vino_data_formats[vcs->data_format].pixelformat;
2921
2922 pf->field = V4L2_FIELD_INTERLACED;
2923 pf->bytesperline = vcs->line_size;
2924 pf->sizeimage = vcs->line_size *
2925 (vcs->clipping.bottom - vcs->clipping.top) /
2926 vcs->decimation;
2927 pf->colorspace =
2928 vino_data_formats[vcs->data_format].colorspace;
2929
2930 pf->priv = 0;
2931
2932 spin_unlock(&vino_drvdata->input_lock);
2933 break;
2934 }
2935 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2936 default:
2937 return -EINVAL;
2938 }
2939
2940 return 0;
2941}
2942
2943static int vino_v4l2_s_fmt(struct vino_channel_settings *vcs,
2944 struct v4l2_format *f)
2945{
2946 int data_format;
2947
2948 switch (f->type) {
2949 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2950 struct v4l2_pix_format *pf = &f->fmt.pix;
2951 spin_lock(&vino_drvdata->input_lock);
2952
2953 if (!vino_is_input_owner(vcs)) {
2954 spin_unlock(&vino_drvdata->input_lock);
2955 return -EINVAL;
2956 }
2957
2958 data_format = vino_find_data_format(pf->pixelformat);
2959 if (data_format == VINO_DATA_FMT_NONE) {
2960 vcs->data_format = VINO_DATA_FMT_RGB32;
2961 pf->pixelformat =
2962 vino_data_formats[vcs->data_format].
2963 pixelformat;
2964 } else {
2965 vcs->data_format = data_format;
2966 }
2967
2968 /* data format must be set before clipping/scaling */
2969 vino_set_scaling(vcs, pf->width, pf->height);
2970
2971 dprintk("data format = %s\n",
2972 vino_data_formats[vcs->data_format].description);
2973
2974 pf->width = vcs->clipping.right - vcs->clipping.left;
2975 pf->height = vcs->clipping.bottom - vcs->clipping.top;
2976
2977 pf->field = V4L2_FIELD_INTERLACED;
2978 pf->bytesperline = vcs->line_size;
2979 pf->sizeimage = vcs->line_size *
2980 (vcs->clipping.bottom - vcs->clipping.top) /
2981 vcs->decimation;
2982 pf->colorspace =
2983 vino_data_formats[vcs->data_format].colorspace;
2984
2985 pf->priv = 0;
2986
2987 spin_unlock(&vino_drvdata->input_lock);
2988 break;
2989 }
2990 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2991 default:
2992 return -EINVAL;
2993 }
2994
2995 return 0;
2996}
2997
2998static int vino_v4l2_cropcap(struct vino_channel_settings *vcs,
2999 struct v4l2_cropcap *ccap)
3000{
3001 const struct vino_data_norm *norm;
3002
3003 switch (ccap->type) {
3004 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3005 spin_lock(&vino_drvdata->input_lock);
3006 norm = &vino_data_norms[vcs->data_norm];
3007 spin_unlock(&vino_drvdata->input_lock);
3008
3009 ccap->bounds.left = 0;
3010 ccap->bounds.top = 0;
3011 ccap->bounds.width = norm->width;
3012 ccap->bounds.height = norm->height;
3013 memcpy(&ccap->defrect, &ccap->bounds,
3014 sizeof(struct v4l2_rect));
3015
3016 ccap->pixelaspect.numerator = 1;
3017 ccap->pixelaspect.denominator = 1;
3018 break;
3019 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3020 default:
3021 return -EINVAL;
3022 }
3023
3024 return 0;
3025}
3026
3027static int vino_v4l2_g_crop(struct vino_channel_settings *vcs,
3028 struct v4l2_crop *c)
3029{
3030 switch (c->type) {
3031 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3032 spin_lock(&vino_drvdata->input_lock);
3033
3034 c->c.left = vcs->clipping.left;
3035 c->c.top = vcs->clipping.top;
3036 c->c.width = vcs->clipping.right - vcs->clipping.left;
3037 c->c.height = vcs->clipping.bottom - vcs->clipping.top;
3038
3039 spin_unlock(&vino_drvdata->input_lock);
3040 break;
3041 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3042 default:
3043 return -EINVAL;
3044 }
3045
3046 return 0;
3047}
3048
3049static int vino_v4l2_s_crop(struct vino_channel_settings *vcs,
3050 struct v4l2_crop *c)
3051{
3052 switch (c->type) {
3053 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3054 spin_lock(&vino_drvdata->input_lock);
3055
3056 if (!vino_is_input_owner(vcs)) {
3057 spin_unlock(&vino_drvdata->input_lock);
3058 return -EINVAL;
3059 }
3060 vino_set_clipping(vcs, c->c.left, c->c.top,
3061 c->c.width, c->c.height);
3062
3063 spin_unlock(&vino_drvdata->input_lock);
3064 break;
3065 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3066 default:
3067 return -EINVAL;
3068 }
3069
3070 return 0;
3071}
3072
3073static int vino_v4l2_g_parm(struct vino_channel_settings *vcs,
3074 struct v4l2_streamparm *sp)
3075{
3076 switch (sp->type) {
3077 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3078 struct v4l2_captureparm *cp = &sp->parm.capture;
3079 memset(cp, 0, sizeof(struct v4l2_captureparm));
3080
3081 cp->capability = V4L2_CAP_TIMEPERFRAME;
3082 cp->timeperframe.numerator = 1;
3083
3084 spin_lock(&vino_drvdata->input_lock);
3085 cp->timeperframe.denominator = vcs->fps;
3086 spin_unlock(&vino_drvdata->input_lock);
3087
3088 // TODO: cp->readbuffers = xxx;
3089 break;
3090 }
3091 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3092 default:
3093 return -EINVAL;
3094 }
3095
3096 return 0;
3097}
3098
3099static int vino_v4l2_s_parm(struct vino_channel_settings *vcs,
3100 struct v4l2_streamparm *sp)
3101{
3102 switch (sp->type) {
3103 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3104 struct v4l2_captureparm *cp = &sp->parm.capture;
3105
3106 spin_lock(&vino_drvdata->input_lock);
3107 if (!vino_is_input_owner(vcs)) {
3108 spin_unlock(&vino_drvdata->input_lock);
3109 return -EINVAL;
3110 }
3111
3112 if ((cp->timeperframe.numerator == 0) ||
3113 (cp->timeperframe.denominator == 0)) {
3114 /* reset framerate */
3115 vino_set_default_framerate(vcs);
3116 } else {
3117 vino_set_framerate(vcs, cp->timeperframe.denominator /
3118 cp->timeperframe.numerator);
3119 }
3120 spin_unlock(&vino_drvdata->input_lock);
3121
3122 // TODO: set buffers according to cp->readbuffers
3123 break;
3124 }
3125 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3126 default:
3127 return -EINVAL;
3128 }
3129
3130 return 0;
3131}
3132
3133static int vino_v4l2_reqbufs(struct vino_channel_settings *vcs,
3134 struct v4l2_requestbuffers *rb)
3135{
3136 if (vcs->reading)
3137 return -EBUSY;
3138
3139 switch (rb->type) {
3140 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3141 // TODO: check queue type
3142 if (rb->memory != V4L2_MEMORY_MMAP) {
3143 dprintk("type not mmap\n");
3144 return -EINVAL;
3145 }
3146
3147 if (vino_is_capturing(vcs)) {
3148 dprintk("busy, capturing\n");
3149 return -EBUSY;
3150 }
3151
3152 dprintk("count = %d\n", rb->count);
3153 if (rb->count > 0) {
3154 if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) {
3155 dprintk("busy, buffers still mapped\n");
3156 return -EBUSY;
3157 } else {
3158 vino_queue_free(&vcs->fb_queue);
3159 vino_queue_init(&vcs->fb_queue, &rb->count);
3160 }
3161 } else {
3162 vino_capture_stop(vcs);
3163 vino_queue_free(&vcs->fb_queue);
3164 }
3165 break;
3166 }
3167 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3168 default:
3169 return -EINVAL;
3170 }
3171
3172 return 0;
3173}
3174
3175static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
3176 struct vino_framebuffer *fb,
3177 struct v4l2_buffer *b)
3178{
3179 if (vino_queue_outgoing_contains(&vcs->fb_queue,
3180 fb->id)) {
3181 b->flags &= ~V4L2_BUF_FLAG_QUEUED;
3182 b->flags |= V4L2_BUF_FLAG_DONE;
3183 } else if (vino_queue_incoming_contains(&vcs->fb_queue,
3184 fb->id)) {
3185 b->flags &= ~V4L2_BUF_FLAG_DONE;
3186 b->flags |= V4L2_BUF_FLAG_QUEUED;
3187 } else {
3188 b->flags &= ~(V4L2_BUF_FLAG_DONE |
3189 V4L2_BUF_FLAG_QUEUED);
3190 }
3191
3192 b->flags &= ~(V4L2_BUF_FLAG_TIMECODE);
3193
3194 if (fb->map_count > 0)
3195 b->flags |= V4L2_BUF_FLAG_MAPPED;
3196
3197 b->index = fb->id;
3198 b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
3199 V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
3200 b->m.offset = fb->offset;
3201 b->bytesused = fb->data_size;
3202 b->length = fb->size;
3203 b->field = V4L2_FIELD_INTERLACED;
3204 b->sequence = fb->frame_counter;
3205 memcpy(&b->timestamp, &fb->timestamp,
3206 sizeof(struct timeval));
3207 // b->input ?
3208
3209 dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n",
3210 fb->id, fb->size, fb->data_size, fb->offset);
3211}
3212
3213static int vino_v4l2_querybuf(struct vino_channel_settings *vcs,
3214 struct v4l2_buffer *b)
3215{
3216 if (vcs->reading)
3217 return -EBUSY;
3218
3219 switch (b->type) {
3220 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3221 struct vino_framebuffer *fb;
3222
3223 // TODO: check queue type
3224 if (b->index >= vino_queue_get_length(&vcs->fb_queue)) {
3225 dprintk("invalid index = %d\n",
3226 b->index);
3227 return -EINVAL;
3228 }
3229
3230 fb = vino_queue_get_buffer(&vcs->fb_queue,
3231 b->index);
3232 if (fb == NULL) {
3233 dprintk("vino_queue_get_buffer() failed");
3234 return -EINVAL;
3235 }
3236
3237 vino_v4l2_get_buffer_status(vcs, fb, b);
3238 break;
3239 }
3240 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3241 default:
3242 return -EINVAL;
3243 }
3244
3245 return 0;
3246}
3247
3248static int vino_v4l2_qbuf(struct vino_channel_settings *vcs,
3249 struct v4l2_buffer *b)
3250{
3251 if (vcs->reading)
3252 return -EBUSY;
3253
3254 switch (b->type) {
3255 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3256 struct vino_framebuffer *fb;
3257 int ret;
3258
3259 // TODO: check queue type
3260 if (b->memory != V4L2_MEMORY_MMAP) {
3261 dprintk("type not mmap\n");
3262 return -EINVAL;
3263 }
3264
3265 fb = vino_capture_enqueue(vcs, b->index);
3266 if (fb == NULL)
3267 return -EINVAL;
3268
3269 vino_v4l2_get_buffer_status(vcs, fb, b);
3270
3271 if (vcs->streaming) {
3272 ret = vino_capture_next(vcs, 1);
3273 if (ret)
3274 return ret;
3275 }
3276 break;
3277 }
3278 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3279 default:
3280 return -EINVAL;
3281 }
3282
3283 return 0;
3284}
3285
3286static int vino_v4l2_dqbuf(struct vino_channel_settings *vcs,
3287 struct v4l2_buffer *b,
3288 unsigned int nonblocking)
3289{
3290 if (vcs->reading)
3291 return -EBUSY;
3292
3293 switch (b->type) {
3294 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3295 struct vino_framebuffer *fb;
3296 unsigned int incoming, outgoing;
3297 int err;
3298
3299 // TODO: check queue type
3300
3301 err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
3302 if (err) {
3303 dprintk("vino_queue_get_incoming() failed\n");
3304 return -EIO;
3305 }
3306 err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing);
3307 if (err) {
3308 dprintk("vino_queue_get_outgoing() failed\n");
3309 return -EIO;
3310 }
3311
3312 dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing);
3313
3314 if (outgoing == 0) {
3315 if (incoming == 0) {
3316 dprintk("no incoming or outgoing buffers\n");
3317 return -EINVAL;
3318 }
3319 if (nonblocking) {
3320 dprintk("non-blocking I/O was selected and "
3321 "there are no buffers to dequeue\n");
3322 return -EAGAIN;
3323 }
3324
3325 err = vino_wait_for_frame(vcs);
3326 if (err) {
3327 err = vino_wait_for_frame(vcs);
3328 if (err) {
3329 /* interrupted */
3330 vino_capture_failed(vcs);
3331 return -EIO;
3332 }
3333 }
3334 }
3335
3336 fb = vino_queue_remove(&vcs->fb_queue, &b->index);
3337 if (fb == NULL) {
3338 dprintk("vino_queue_remove() failed\n");
3339 return -EINVAL;
3340 }
3341
3342 err = vino_check_buffer(vcs, fb);
3343 if (err)
3344 return -EIO;
3345
3346 vino_v4l2_get_buffer_status(vcs, fb, b);
3347 break;
3348 }
3349 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3350 default:
3351 return -EINVAL;
3352 }
3353
3354 return 0;
3355}
3356
3357static int vino_v4l2_streamon(struct vino_channel_settings *vcs)
3358{
3359 unsigned int incoming;
3360 int ret;
3361 if (vcs->reading)
3362 return -EBUSY;
3363
3364 if (vcs->streaming)
3365 return 0;
3366
3367 // TODO: check queue type
3368
3369 if (vino_queue_get_length(&vcs->fb_queue) < 1) {
3370 dprintk("no buffers allocated\n");
3371 return -EINVAL;
3372 }
3373
3374 ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
3375 if (ret) {
3376 dprintk("vino_queue_get_incoming() failed\n");
3377 return -EINVAL;
3378 }
3379
3380 vcs->streaming = 1;
3381
3382 if (incoming > 0) {
3383 ret = vino_capture_next(vcs, 1);
3384 if (ret) {
3385 vcs->streaming = 0;
3386
3387 dprintk("couldn't start capture\n");
3388 return -EINVAL;
3389 }
3390 }
3391
3392 return 0;
3393}
3394
3395static int vino_v4l2_streamoff(struct vino_channel_settings *vcs)
3396{
3397 if (vcs->reading)
3398 return -EBUSY;
3399
3400 if (!vcs->streaming)
3401 return 0;
3402
3403 vino_capture_stop(vcs);
3404 vcs->streaming = 0;
3405
3406 return 0;
3407}
3408
3409static int vino_v4l2_queryctrl(struct vino_channel_settings *vcs,
3410 struct v4l2_queryctrl *queryctrl)
3411{
3412 int i;
3413 int err = 0;
3414
3415 spin_lock(&vino_drvdata->input_lock);
3416
3417 switch (vcs->input) {
3418 case VINO_INPUT_D1:
3419 for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
3420 if (vino_indycam_v4l2_controls[i].id ==
3421 queryctrl->id) {
3422 memcpy(queryctrl,
3423 &vino_indycam_v4l2_controls[i],
3424 sizeof(struct v4l2_queryctrl));
3425 goto found;
3426 }
3427 }
3428
3429 err = -EINVAL;
3430 break;
3431 case VINO_INPUT_COMPOSITE:
3432 case VINO_INPUT_SVIDEO:
3433 for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
3434 if (vino_saa7191_v4l2_controls[i].id ==
3435 queryctrl->id) {
3436 memcpy(queryctrl,
3437 &vino_saa7191_v4l2_controls[i],
3438 sizeof(struct v4l2_queryctrl));
3439 goto found;
3440 }
3441 }
3442
3443 err = -EINVAL;
3444 break;
3445 default:
3446 err = -EINVAL;
3447 }
3448
3449 found:
3450 spin_unlock(&vino_drvdata->input_lock);
3451
3452 return err;
3453}
3454
3455static int vino_v4l2_g_ctrl(struct vino_channel_settings *vcs,
3456 struct v4l2_control *control)
3457{
3458 struct indycam_control indycam_ctrl;
3459 struct saa7191_control saa7191_ctrl;
3460 int err = 0;
3461
3462 spin_lock(&vino_drvdata->input_lock);
3463
3464 switch (vcs->input) {
3465 case VINO_INPUT_D1:
3466 i2c_camera_command(DECODER_INDYCAM_GET_CONTROLS,
3467 &indycam_ctrl);
3468
3469 switch(control->id) {
3470 case V4L2_CID_AUTOGAIN:
3471 control->value = indycam_ctrl.agc;
3472 break;
3473 case V4L2_CID_AUTO_WHITE_BALANCE:
3474 control->value = indycam_ctrl.awb;
3475 break;
3476 case V4L2_CID_GAIN:
3477 control->value = indycam_ctrl.gain;
3478 break;
3479 case V4L2_CID_PRIVATE_BASE:
3480 control->value = indycam_ctrl.red_saturation;
3481 break;
3482 case V4L2_CID_PRIVATE_BASE + 1:
3483 control->value = indycam_ctrl.blue_saturation;
3484 break;
3485 case V4L2_CID_RED_BALANCE:
3486 control->value = indycam_ctrl.red_balance;
3487 break;
3488 case V4L2_CID_BLUE_BALANCE:
3489 control->value = indycam_ctrl.blue_balance;
3490 break;
3491 case V4L2_CID_EXPOSURE:
3492 control->value = indycam_ctrl.shutter;
3493 break;
3494 case V4L2_CID_GAMMA:
3495 control->value = indycam_ctrl.gamma;
3496 break;
3497 default:
3498 err = -EINVAL;
3499 }
3500 break;
3501 case VINO_INPUT_COMPOSITE:
3502 case VINO_INPUT_SVIDEO:
3503 i2c_decoder_command(DECODER_SAA7191_GET_CONTROLS,
3504 &saa7191_ctrl);
3505
3506 switch(control->id) {
3507 case V4L2_CID_HUE:
3508 control->value = saa7191_ctrl.hue;
3509 break;
3510 case V4L2_CID_PRIVATE_BASE:
3511 control->value = saa7191_ctrl.vtrc;
3512 break;
3513 default:
3514 err = -EINVAL;
3515 }
3516 break;
3517 default:
3518 err = -EINVAL;
3519 }
3520
3521 spin_unlock(&vino_drvdata->input_lock);
3522
3523 return err;
3524}
3525
3526static int vino_v4l2_s_ctrl(struct vino_channel_settings *vcs,
3527 struct v4l2_control *control)
3528{
3529 struct indycam_control indycam_ctrl;
3530 struct saa7191_control saa7191_ctrl;
3531 int i;
3532 int err = 0;
3533
3534 spin_lock(&vino_drvdata->input_lock);
3535
3536 switch (vcs->input) {
3537 case VINO_INPUT_D1:
3538 for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
3539 if (vino_indycam_v4l2_controls[i].id ==
3540 control->id) {
3541 if ((control->value >=
3542 vino_indycam_v4l2_controls[i].minimum)
3543 && (control->value <=
3544 vino_indycam_v4l2_controls[i].
3545 maximum)) {
3546 goto ok1;
3547 } else {
3548 err = -ERANGE;
3549 goto error;
3550 }
3551 }
3552 }
3553 err = -EINVAL;
3554 goto error;
3555
3556ok1:
3557 indycam_ctrl.agc = INDYCAM_VALUE_UNCHANGED;
3558 indycam_ctrl.awb = INDYCAM_VALUE_UNCHANGED;
3559 indycam_ctrl.shutter = INDYCAM_VALUE_UNCHANGED;
3560 indycam_ctrl.gain = INDYCAM_VALUE_UNCHANGED;
3561 indycam_ctrl.red_balance = INDYCAM_VALUE_UNCHANGED;
3562 indycam_ctrl.blue_balance = INDYCAM_VALUE_UNCHANGED;
3563 indycam_ctrl.red_saturation = INDYCAM_VALUE_UNCHANGED;
3564 indycam_ctrl.blue_saturation = INDYCAM_VALUE_UNCHANGED;
3565 indycam_ctrl.gamma = INDYCAM_VALUE_UNCHANGED;
3566
3567 switch(control->id) {
3568 case V4L2_CID_AUTOGAIN:
3569 indycam_ctrl.agc = control->value;
3570 break;
3571 case V4L2_CID_AUTO_WHITE_BALANCE:
3572 indycam_ctrl.awb = control->value;
3573 break;
3574 case V4L2_CID_GAIN:
3575 indycam_ctrl.gain = control->value;
3576 break;
3577 case V4L2_CID_PRIVATE_BASE:
3578 indycam_ctrl.red_saturation = control->value;
3579 break;
3580 case V4L2_CID_PRIVATE_BASE + 1:
3581 indycam_ctrl.blue_saturation = control->value;
3582 break;
3583 case V4L2_CID_RED_BALANCE:
3584 indycam_ctrl.red_balance = control->value;
3585 break;
3586 case V4L2_CID_BLUE_BALANCE:
3587 indycam_ctrl.blue_balance = control->value;
3588 break;
3589 case V4L2_CID_EXPOSURE:
3590 indycam_ctrl.shutter = control->value;
3591 break;
3592 case V4L2_CID_GAMMA:
3593 indycam_ctrl.gamma = control->value;
3594 break;
3595 default:
3596 err = -EINVAL;
3597 }
3598
3599 if (!err)
3600 i2c_camera_command(DECODER_INDYCAM_SET_CONTROLS,
3601 &indycam_ctrl);
3602 break;
3603 case VINO_INPUT_COMPOSITE:
3604 case VINO_INPUT_SVIDEO:
3605 for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
3606 if (vino_saa7191_v4l2_controls[i].id ==
3607 control->id) {
3608 if ((control->value >=
3609 vino_saa7191_v4l2_controls[i].minimum)
3610 && (control->value <=
3611 vino_saa7191_v4l2_controls[i].
3612 maximum)) {
3613 goto ok2;
3614 } else {
3615 err = -ERANGE;
3616 goto error;
3617 }
3618 }
3619 }
3620 err = -EINVAL;
3621 goto error;
3622
3623ok2:
3624 saa7191_ctrl.hue = SAA7191_VALUE_UNCHANGED;
3625 saa7191_ctrl.vtrc = SAA7191_VALUE_UNCHANGED;
3626
3627 switch(control->id) {
3628 case V4L2_CID_HUE:
3629 saa7191_ctrl.hue = control->value;
3630 break;
3631 case V4L2_CID_PRIVATE_BASE:
3632 saa7191_ctrl.vtrc = control->value;
3633 break;
3634 default:
3635 err = -EINVAL;
3636 }
3637
3638 if (!err)
3639 i2c_decoder_command(DECODER_SAA7191_SET_CONTROLS,
3640 &saa7191_ctrl);
3641 break;
3642 default:
3643 err = -EINVAL;
3644 }
3645
3646error:
3647 spin_unlock(&vino_drvdata->input_lock);
3648
3649 return err;
3650}
3651
3652/* File operations */
3653
3654static int vino_open(struct inode *inode, struct file *file)
3655{
3656 struct video_device *dev = video_devdata(file);
3657 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3658 int ret = 0;
3659 dprintk("open(): channel = %c\n",
3660 (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
3661
3662 down(&vcs->sem);
3663
3664 if (vcs->users) {
3665 dprintk("open(): driver busy\n");
3666 ret = -EBUSY;
3667 goto out;
3668 }
3669
3670 ret = vino_acquire_input(vcs);
3671 if (ret) {
3672 dprintk("open(): vino_acquire_input() failed\n");
3673 goto out;
3674 }
3675
3676 vcs->users++;
3677
3678 out:
3679 up(&vcs->sem);
3680
3681 dprintk("open(): %s!\n", ret ? "failed" : "complete");
3682
3683 return ret;
3684}
3685
3686static int vino_close(struct inode *inode, struct file *file)
3687{
3688 struct video_device *dev = video_devdata(file);
3689 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3690 dprintk("close():\n");
3691
3692 down(&vcs->sem);
3693
3694 vcs->users--;
3695
3696 if (!vcs->users) {
3697 vino_release_input(vcs);
3698
3699 /* stop DMA and free buffers */
3700 vino_capture_stop(vcs);
3701 vino_queue_free(&vcs->fb_queue);
3702 }
3703
3704 up(&vcs->sem);
3705
3706 return 0;
3707}
3708
3709static void vino_vm_open(struct vm_area_struct *vma)
3710{
3711 struct vino_framebuffer *fb = vma->vm_private_data;
3712
3713 fb->map_count++;
3714 dprintk("vino_vm_open(): count = %d\n", fb->map_count);
3715}
3716
3717static void vino_vm_close(struct vm_area_struct *vma)
3718{
3719 struct vino_framebuffer *fb = vma->vm_private_data;
3720
3721 fb->map_count--;
3722 dprintk("vino_vm_close(): count = %d\n", fb->map_count);
3723}
3724
3725static struct vm_operations_struct vino_vm_ops = {
3726 .open = vino_vm_open,
3727 .close = vino_vm_close,
3728};
3729
3730static int vino_mmap(struct file *file, struct vm_area_struct *vma)
3731{
3732 struct video_device *dev = video_devdata(file);
3733 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3734
3735 unsigned long start = vma->vm_start;
3736 unsigned long size = vma->vm_end - vma->vm_start;
3737 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
3738
3739 struct vino_framebuffer *fb = NULL;
3740 unsigned int i, length;
3741 int ret = 0;
3742
3743 dprintk("mmap():\n");
3744
3745 // TODO: reject mmap if already mapped
3746
3747 if (down_interruptible(&vcs->sem))
3748 return -EINTR;
3749
3750 if (vcs->reading) {
3751 ret = -EBUSY;
3752 goto out;
3753 }
3754
3755 // TODO: check queue type
3756
3757 if (!(vma->vm_flags & VM_WRITE)) {
3758 dprintk("mmap(): app bug: PROT_WRITE please\n");
3759 ret = -EINVAL;
3760 goto out;
3761 }
3762 if (!(vma->vm_flags & VM_SHARED)) {
3763 dprintk("mmap(): app bug: MAP_SHARED please\n");
3764 ret = -EINVAL;
3765 goto out;
3766 }
3767
3768 /* find the correct buffer using offset */
3769 length = vino_queue_get_length(&vcs->fb_queue);
3770 if (length == 0) {
3771 dprintk("mmap(): queue not initialized\n");
3772 ret = -EINVAL;
3773 goto out;
3774 }
3775
3776 for (i = 0; i < length; i++) {
3777 fb = vino_queue_get_buffer(&vcs->fb_queue, i);
3778 if (fb == NULL) {
3779 dprintk("mmap(): vino_queue_get_buffer() failed\n");
3780 ret = -EINVAL;
3781 goto out;
3782 }
3783
3784 if (fb->offset == offset)
3785 goto found;
3786 }
3787
3788 dprintk("mmap(): invalid offset = %lu\n", offset);
3789 ret = -EINVAL;
3790 goto out;
3791
3792found:
3793 dprintk("mmap(): buffer = %d\n", i);
3794
3795 if (size > (fb->desc_table.page_count * PAGE_SIZE)) {
3796 dprintk("mmap(): failed: size = %lu > %lu\n",
3797 size, fb->desc_table.page_count * PAGE_SIZE);
3798 ret = -EINVAL;
3799 goto out;
3800 }
3801
3802 for (i = 0; i < fb->desc_table.page_count; i++) {
3803 unsigned long pfn =
3804 virt_to_phys((void *)fb->desc_table.virtual[i]) >>
3805 PAGE_SHIFT;
3806
3807 if (size < PAGE_SIZE)
3808 break;
3809
3810 // protection was: PAGE_READONLY
3811 if (remap_pfn_range(vma, start, pfn, PAGE_SIZE,
3812 vma->vm_page_prot)) {
3813 dprintk("mmap(): remap_pfn_range() failed\n");
3814 ret = -EAGAIN;
3815 goto out;
3816 }
3817
3818 start += PAGE_SIZE;
3819 size -= PAGE_SIZE;
3820 }
3821
3822 fb->map_count = 1;
3823
3824 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
3825 vma->vm_flags &= ~VM_IO;
3826 vma->vm_private_data = fb;
3827 vma->vm_file = file;
3828 vma->vm_ops = &vino_vm_ops;
3829
3830out:
3831 up(&vcs->sem);
3832
3833 return ret;
3834}
3835
3836static unsigned int vino_poll(struct file *file, poll_table *pt)
3837{
3838 struct video_device *dev = video_devdata(file);
3839 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3840 unsigned int outgoing;
3841 unsigned int ret = 0;
3842
3843 // lock mutex (?)
3844 // TODO: this has to be corrected for different read modes
3845
3846 dprintk("poll():\n");
3847
3848 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
3849 dprintk("poll(): vino_queue_get_outgoing() failed\n");
3850 ret = POLLERR;
3851 goto error;
3852 }
3853 if (outgoing > 0)
3854 goto over;
3855
3856 poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt);
3857
3858 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
3859 dprintk("poll(): vino_queue_get_outgoing() failed\n");
3860 ret = POLLERR;
3861 goto error;
3862 }
3863
3864over:
3865 dprintk("poll(): data %savailable\n",
3866 (outgoing > 0) ? "" : "not ");
3867 if (outgoing > 0) {
3868 ret = POLLIN | POLLRDNORM;
3869 }
3870
3871error:
3872
3873 return ret;
3874}
3875
3876static int vino_do_ioctl(struct inode *inode, struct file *file,
3877 unsigned int cmd, void *arg)
3878{
3879 struct video_device *dev = video_devdata(file);
3880 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3881
3882 switch (_IOC_TYPE(cmd)) {
3883 case 'v':
3884 dprintk("ioctl(): V4L1 unsupported (0x%08x)\n", cmd);
3885 break;
3886 case 'V':
3887 dprintk("ioctl(): V4L2 %s (0x%08x)\n",
3888 v4l2_ioctl_names[_IOC_NR(cmd)], cmd);
3889 break;
3890 default:
3891 dprintk("ioctl(): unsupported command 0x%08x\n", cmd);
3892 }
3893
3894 switch (cmd) {
3895 /* TODO: V4L1 interface (use compatibility layer?) */
3896 /* V4L2 interface */
3897 case VIDIOC_QUERYCAP: {
3898 vino_v4l2_querycap(arg);
3899 break;
3900 }
3901 case VIDIOC_ENUMINPUT: {
3902 return vino_v4l2_enuminput(vcs, arg);
3903 }
3904 case VIDIOC_G_INPUT: {
3905 return vino_v4l2_g_input(vcs, arg);
3906 }
3907 case VIDIOC_S_INPUT: {
3908 return vino_v4l2_s_input(vcs, arg);
3909 }
3910 case VIDIOC_ENUMSTD: {
3911 return vino_v4l2_enumstd(vcs, arg);
3912 }
3913 case VIDIOC_G_STD: {
3914 return vino_v4l2_g_std(vcs, arg);
3915 }
3916 case VIDIOC_S_STD: {
3917 return vino_v4l2_s_std(vcs, arg);
3918 }
3919 case VIDIOC_ENUM_FMT: {
3920 return vino_v4l2_enum_fmt(vcs, arg);
3921 }
3922 case VIDIOC_TRY_FMT: {
3923 return vino_v4l2_try_fmt(vcs, arg);
3924 }
3925 case VIDIOC_G_FMT: {
3926 return vino_v4l2_g_fmt(vcs, arg);
3927 }
3928 case VIDIOC_S_FMT: {
3929 return vino_v4l2_s_fmt(vcs, arg);
3930 }
3931 case VIDIOC_CROPCAP: {
3932 return vino_v4l2_cropcap(vcs, arg);
3933 }
3934 case VIDIOC_G_CROP: {
3935 return vino_v4l2_g_crop(vcs, arg);
3936 }
3937 case VIDIOC_S_CROP: {
3938 return vino_v4l2_s_crop(vcs, arg);
3939 }
3940 case VIDIOC_G_PARM: {
3941 return vino_v4l2_g_parm(vcs, arg);
3942 }
3943 case VIDIOC_S_PARM: {
3944 return vino_v4l2_s_parm(vcs, arg);
3945 }
3946 case VIDIOC_REQBUFS: {
3947 return vino_v4l2_reqbufs(vcs, arg);
3948 }
3949 case VIDIOC_QUERYBUF: {
3950 return vino_v4l2_querybuf(vcs, arg);
3951 }
3952 case VIDIOC_QBUF: {
3953 return vino_v4l2_qbuf(vcs, arg);
3954 }
3955 case VIDIOC_DQBUF: {
3956 return vino_v4l2_dqbuf(vcs, arg, file->f_flags & O_NONBLOCK);
3957 }
3958 case VIDIOC_STREAMON: {
3959 return vino_v4l2_streamon(vcs);
3960 }
3961 case VIDIOC_STREAMOFF: {
3962 return vino_v4l2_streamoff(vcs);
3963 }
3964 case VIDIOC_QUERYCTRL: {
3965 return vino_v4l2_queryctrl(vcs, arg);
3966 }
3967 case VIDIOC_G_CTRL: {
3968 return vino_v4l2_g_ctrl(vcs, arg);
3969 }
3970 case VIDIOC_S_CTRL: {
3971 return vino_v4l2_s_ctrl(vcs, arg);
3972 }
3973 default:
3974 return -ENOIOCTLCMD;
3975 }
3976
3977 return 0;
3978}
3979
3980static int vino_ioctl(struct inode *inode, struct file *file,
3981 unsigned int cmd, unsigned long arg)
3982{
3983 struct video_device *dev = video_devdata(file);
3984 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3985 int ret;
3986
3987 if (down_interruptible(&vcs->sem))
3988 return -EINTR;
3989
3990 ret = video_usercopy(inode, file, cmd, arg, vino_do_ioctl);
3991
3992 up(&vcs->sem);
3993
3994 return ret;
3995}
3996
3997/* Initialization and cleanup */
3998
3999// __initdata
4000static int vino_init_stage = 0;
4001
4002static struct file_operations vino_fops = {
212 .owner = THIS_MODULE, 4003 .owner = THIS_MODULE,
213 .type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE,
214 .hardware = VID_HARDWARE_VINO,
215 .name = "VINO",
216 .open = vino_open, 4004 .open = vino_open,
217 .close = vino_close, 4005 .release = vino_close,
218 .ioctl = vino_ioctl, 4006 .ioctl = vino_ioctl,
219 .mmap = vino_mmap, 4007 .mmap = vino_mmap,
4008 .poll = vino_poll,
4009 .llseek = no_llseek,
220}; 4010};
221 4011
222static int __init vino_init(void) 4012static struct video_device v4l_device_template = {
4013 .name = "NOT SET",
4014 //.type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE |
4015 // VID_TYPE_CLIPPING | VID_TYPE_SCALES, VID_TYPE_OVERLAY
4016 .hardware = VID_HARDWARE_VINO,
4017 .fops = &vino_fops,
4018 .minor = -1,
4019};
4020
4021static void vino_module_cleanup(int stage)
4022{
4023 switch(stage) {
4024 case 10:
4025 video_unregister_device(vino_drvdata->b.v4l_device);
4026 vino_drvdata->b.v4l_device = NULL;
4027 case 9:
4028 video_unregister_device(vino_drvdata->a.v4l_device);
4029 vino_drvdata->a.v4l_device = NULL;
4030 case 8:
4031 vino_i2c_del_bus();
4032 case 7:
4033 free_irq(SGI_VINO_IRQ, NULL);
4034 case 6:
4035 if (vino_drvdata->b.v4l_device) {
4036 video_device_release(vino_drvdata->b.v4l_device);
4037 vino_drvdata->b.v4l_device = NULL;
4038 }
4039 case 5:
4040 if (vino_drvdata->a.v4l_device) {
4041 video_device_release(vino_drvdata->a.v4l_device);
4042 vino_drvdata->a.v4l_device = NULL;
4043 }
4044 case 4:
4045 /* all entries in dma_cpu dummy table have the same address */
4046 dma_unmap_single(NULL,
4047 vino_drvdata->dummy_desc_table.dma_cpu[0],
4048 PAGE_SIZE, DMA_FROM_DEVICE);
4049 dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT
4050 * sizeof(dma_addr_t),
4051 (void *)vino_drvdata->
4052 dummy_desc_table.dma_cpu,
4053 vino_drvdata->dummy_desc_table.dma);
4054 case 3:
4055 free_page(vino_drvdata->dummy_page);
4056 case 2:
4057 kfree(vino_drvdata);
4058 case 1:
4059 iounmap(vino);
4060 case 0:
4061 break;
4062 default:
4063 dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n",
4064 stage);
4065 }
4066}
4067
4068static int vino_probe(void)
223{ 4069{
224 unsigned long rev; 4070 unsigned long rev_id;
225 int i, ret = 0;
226 4071
227 /* VINO is Indy specific beast */ 4072 if (ip22_is_fullhouse()) {
228 if (ip22_is_fullhouse()) 4073 printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n");
229 return -ENODEV; 4074 return -ENODEV;
4075 }
230 4076
231 /*
232 * VINO is in the EISA address space, so the sysid register will tell
233 * us if the EISA_PRESENT pin on MC has been pulled low.
234 *
235 * If EISA_PRESENT is not set we definitely don't have a VINO equiped
236 * system.
237 */
238 if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) { 4077 if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
239 printk(KERN_ERR "VINO not found\n"); 4078 printk(KERN_ERR "VINO is not found (EISA BUS not present)\n");
240 return -ENODEV; 4079 return -ENODEV;
241 } 4080 }
242 4081
243 vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino)); 4082 vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
244 if (!vino) 4083 if (!vino) {
4084 printk(KERN_ERR "VINO: ioremap() failed\n");
245 return -EIO; 4085 return -EIO;
4086 }
4087 vino_init_stage++;
246 4088
247 /* Okay, once we know that VINO is present we'll read its revision 4089 if (get_dbe(rev_id, &(vino->rev_id))) {
248 * safe way. One never knows... */ 4090 printk(KERN_ERR "Failed to read VINO revision register\n");
249 if (get_dbe(rev, &(vino->rev_id))) { 4091 vino_module_cleanup(vino_init_stage);
250 printk(KERN_ERR "VINO: failed to read revision register\n"); 4092 return -ENODEV;
251 ret = -ENODEV;
252 goto out_unmap;
253 } 4093 }
254 if (VINO_ID_VALUE(rev) != VINO_CHIP_ID) { 4094
255 printk(KERN_ERR "VINO is not VINO (Rev/ID: 0x%04lx)\n", rev); 4095 if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) {
256 ret = -ENODEV; 4096 printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n",
257 goto out_unmap; 4097 rev_id);
4098 vino_module_cleanup(vino_init_stage);
4099 return -ENODEV;
258 } 4100 }
259 printk(KERN_INFO "VINO Rev: 0x%02lx\n", VINO_REV_NUM(rev));
260 4101
261 Vino = (struct vino_video *) 4102 printk(KERN_INFO "VINO with chip ID %ld, revision %ld found\n",
262 kmalloc(sizeof(struct vino_video), GFP_KERNEL); 4103 VINO_ID_VALUE(rev_id), VINO_REV_NUM(rev_id));
263 if (!Vino) { 4104
264 ret = -ENOMEM; 4105 return 0;
265 goto out_unmap; 4106}
4107
4108static int vino_init(void)
4109{
4110 dma_addr_t dma_dummy_address;
4111 int i;
4112
4113 vino_drvdata = (struct vino_settings *)
4114 kmalloc(sizeof(struct vino_settings), GFP_KERNEL);
4115 if (!vino_drvdata) {
4116 vino_module_cleanup(vino_init_stage);
4117 return -ENOMEM;
266 } 4118 }
4119 memset(vino_drvdata, 0, sizeof(struct vino_settings));
4120 vino_init_stage++;
267 4121
268 Vino->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 4122 /* create a dummy dma descriptor */
269 if (!Vino->dummy_page) { 4123 vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
270 ret = -ENOMEM; 4124 if (!vino_drvdata->dummy_page) {
271 goto out_free_vino; 4125 vino_module_cleanup(vino_init_stage);
4126 return -ENOMEM;
272 } 4127 }
273 for (i = 0; i < 4; i++) 4128 vino_init_stage++;
274 Vino->dummy_buf[i] = PHYSADDR(Vino->dummy_page); 4129
4130 // TODO: use page_count in dummy_desc_table
4131
4132 vino_drvdata->dummy_desc_table.dma_cpu =
4133 dma_alloc_coherent(NULL,
4134 VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t),
4135 &vino_drvdata->dummy_desc_table.dma,
4136 GFP_KERNEL | GFP_DMA);
4137 if (!vino_drvdata->dummy_desc_table.dma_cpu) {
4138 vino_module_cleanup(vino_init_stage);
4139 return -ENOMEM;
4140 }
4141 vino_init_stage++;
4142
4143 dma_dummy_address = dma_map_single(NULL,
4144 (void *)vino_drvdata->dummy_page,
4145 PAGE_SIZE, DMA_FROM_DEVICE);
4146 for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) {
4147 vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address;
4148 }
4149
4150 /* initialize VINO */
275 4151
276 vino->control = 0; 4152 vino->control = 0;
277 /* prevent VINO from throwing spurious interrupts */ 4153 vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma;
278 vino->a.next_4_desc = PHYSADDR(Vino->dummy_buf); 4154 vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma;
279 vino->b.next_4_desc = PHYSADDR(Vino->dummy_buf); 4155 udelay(VINO_DESC_FETCH_DELAY);
280 udelay(5); 4156
281 vino->intr_status = 0; 4157 vino->intr_status = 0;
282 /* set threshold level */
283 vino->a.fifo_thres = threshold_a;
284 vino->b.fifo_thres = threshold_b;
285 4158
286 init_MUTEX(&Vino->input_lock); 4159 vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
4160 vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
4161
4162 return 0;
4163}
4164
4165static int vino_init_channel_settings(struct vino_channel_settings *vcs,
4166 unsigned int channel, const char *name)
4167{
4168 vcs->channel = channel;
4169 vcs->input = VINO_INPUT_NONE;
4170 vcs->alpha = 0;
4171 vcs->users = 0;
4172 vcs->data_format = VINO_DATA_FMT_GREY;
4173 vcs->data_norm = VINO_DATA_NORM_NTSC;
4174 vcs->decimation = 1;
4175 vino_set_default_clipping(vcs);
4176 vino_set_default_framerate(vcs);
4177
4178 vcs->capturing = 0;
4179
4180 init_MUTEX(&vcs->sem);
4181 spin_lock_init(&vcs->capture_lock);
4182
4183 init_MUTEX(&vcs->fb_queue.queue_sem);
4184 spin_lock_init(&vcs->fb_queue.queue_lock);
4185 init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
4186
4187 vcs->v4l_device = video_device_alloc();
4188 if (!vcs->v4l_device) {
4189 vino_module_cleanup(vino_init_stage);
4190 return -ENOMEM;
4191 }
4192 vino_init_stage++;
4193
4194 memcpy(vcs->v4l_device, &v4l_device_template,
4195 sizeof(struct video_device));
4196 strcpy(vcs->v4l_device->name, name);
4197 vcs->v4l_device->release = video_device_release;
4198
4199 video_set_drvdata(vcs->v4l_device, vcs);
4200
4201 return 0;
4202}
4203
4204static int __init vino_module_init(void)
4205{
4206 int ret;
4207
4208 printk(KERN_INFO "SGI VINO driver version %s\n",
4209 VINO_MODULE_VERSION);
4210
4211 ret = vino_probe();
4212 if (ret)
4213 return ret;
4214
4215 ret = vino_init();
4216 if (ret)
4217 return ret;
4218
4219 /* initialize data structures */
287 4220
288 if (request_irq(SGI_VINO_IRQ, vino_interrupt, 0, vinostr, NULL)) { 4221 spin_lock_init(&vino_drvdata->vino_lock);
289 printk(KERN_ERR "VINO: irq%02d registration failed\n", 4222 spin_lock_init(&vino_drvdata->input_lock);
4223
4224 ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A,
4225 vino_v4l_device_name_a);
4226 if (ret)
4227 return ret;
4228
4229 ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B,
4230 vino_v4l_device_name_b);
4231 if (ret)
4232 return ret;
4233
4234 /* initialize hardware and register V4L devices */
4235
4236 ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0,
4237 vino_driver_description, NULL);
4238 if (ret) {
4239 printk(KERN_ERR "VINO: requesting IRQ %02d failed\n",
290 SGI_VINO_IRQ); 4240 SGI_VINO_IRQ);
291 ret = -EAGAIN; 4241 vino_module_cleanup(vino_init_stage);
292 goto out_free_page; 4242 return -EAGAIN;
293 } 4243 }
4244 vino_init_stage++;
294 4245
295 ret = vino_i2c_add_bus(); 4246 ret = vino_i2c_add_bus();
296 if (ret) { 4247 if (ret) {
297 printk(KERN_ERR "VINO: I2C bus registration failed\n"); 4248 printk(KERN_ERR "VINO I2C bus registration failed\n");
298 goto out_free_irq; 4249 vino_module_cleanup(vino_init_stage);
4250 return ret;
299 } 4251 }
4252 vino_init_stage++;
300 4253
301 if (video_register_device(&Vino->chA.vdev, VFL_TYPE_GRABBER, -1) < 0) { 4254 ret = video_register_device(vino_drvdata->a.v4l_device,
302 printk("%s, chnl %d: device registration failed.\n", 4255 VFL_TYPE_GRABBER, -1);
303 Vino->chA.vdev.name, Vino->chA.chan); 4256 if (ret < 0) {
304 ret = -EINVAL; 4257 printk(KERN_ERR "VINO channel A Video4Linux-device "
305 goto out_i2c_del_bus; 4258 "registration failed\n");
4259 vino_module_cleanup(vino_init_stage);
4260 return -EINVAL;
306 } 4261 }
307 if (video_register_device(&Vino->chB.vdev, VFL_TYPE_GRABBER, -1) < 0) { 4262 vino_init_stage++;
308 printk("%s, chnl %d: device registration failed.\n", 4263
309 Vino->chB.vdev.name, Vino->chB.chan); 4264 ret = video_register_device(vino_drvdata->b.v4l_device,
310 ret = -EINVAL; 4265 VFL_TYPE_GRABBER, -1);
311 goto out_unregister_vdev; 4266 if (ret < 0) {
4267 printk(KERN_ERR "VINO channel B Video4Linux-device "
4268 "registration failed\n");
4269 vino_module_cleanup(vino_init_stage);
4270 return -EINVAL;
312 } 4271 }
4272 vino_init_stage++;
313 4273
314 return 0; 4274#if defined(CONFIG_KMOD) && defined(MODULE)
4275 request_module("saa7191");
4276 request_module("indycam");
4277#endif
315 4278
316out_unregister_vdev: 4279 dprintk("init complete!\n");
317 video_unregister_device(&Vino->chA.vdev);
318out_i2c_del_bus:
319 vino_i2c_del_bus();
320out_free_irq:
321 free_irq(SGI_VINO_IRQ, NULL);
322out_free_page:
323 free_page(Vino->dummy_page);
324out_free_vino:
325 kfree(Vino);
326out_unmap:
327 iounmap(vino);
328 4280
329 return ret; 4281 return 0;
330} 4282}
331 4283
332static void __exit vino_exit(void) 4284static void __exit vino_module_exit(void)
333{ 4285{
334 video_unregister_device(&Vino->chA.vdev); 4286 dprintk("exiting, stage = %d ...\n", vino_init_stage);
335 video_unregister_device(&Vino->chB.vdev); 4287 vino_module_cleanup(vino_init_stage);
336 vino_i2c_del_bus(); 4288 dprintk("cleanup complete, exit!\n");
337 free_irq(SGI_VINO_IRQ, NULL);
338 free_page(Vino->dummy_page);
339 kfree(Vino);
340 iounmap(vino);
341} 4289}
342 4290
343module_init(vino_init); 4291module_init(vino_module_init);
344module_exit(vino_exit); 4292module_exit(vino_module_exit);
345
346MODULE_DESCRIPTION("Video4Linux driver for SGI Indy VINO (IndyCam)");
347MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/vino.h b/drivers/media/video/vino.h
index d2fce472f35a..de2d615ae7c9 100644
--- a/drivers/media/video/vino.h
+++ b/drivers/media/video/vino.h
@@ -1,13 +1,19 @@
1/* 1/*
2 * Driver for the VINO (Video In No Out) system found in SGI Indys.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License version 2 as published by the Free Software Foundation.
6 *
2 * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se> 7 * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se>
3 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> 8 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
4 */ 9 */
5 10
6#ifndef VINO_H 11#ifndef _VINO_H_
7#define VINO_H 12#define _VINO_H_
8 13
9#define VINO_BASE 0x00080000 /* Vino is in the EISA address space, 14#define VINO_BASE 0x00080000 /* Vino is in the EISA address space,
10 * but it is not an EISA bus card */ 15 * but it is not an EISA bus card */
16#define VINO_PAGE_SIZE 4096
11 17
12struct sgi_vino_channel { 18struct sgi_vino_channel {
13 u32 _pad_alpha; 19 u32 _pad_alpha;
@@ -21,8 +27,9 @@ struct sgi_vino_channel {
21 u32 _pad_clip_end; 27 u32 _pad_clip_end;
22 volatile u32 clip_end; 28 volatile u32 clip_end;
23 29
30#define VINO_FRAMERT_FULL 0xfff
24#define VINO_FRAMERT_PAL (1<<0) /* 0=NTSC 1=PAL */ 31#define VINO_FRAMERT_PAL (1<<0) /* 0=NTSC 1=PAL */
25#define VINO_FRAMERT_RT(x) (((x) & 0x1fff) << 1) /* bits 1:12 */ 32#define VINO_FRAMERT_RT(x) (((x) & 0xfff) << 1) /* bits 1:12 */
26 u32 _pad_frame_rate; 33 u32 _pad_frame_rate;
27 volatile u32 frame_rate; 34 volatile u32 frame_rate;
28 35
@@ -67,18 +74,18 @@ struct sgi_vino {
67 volatile u32 rev_id; 74 volatile u32 rev_id;
68 75
69#define VINO_CTRL_LITTLE_ENDIAN (1<<0) 76#define VINO_CTRL_LITTLE_ENDIAN (1<<0)
70#define VINO_CTRL_A_FIELD_TRANS_INT (1<<1) /* Field transferred int */ 77#define VINO_CTRL_A_EOF_INT (1<<1) /* Field transferred int */
71#define VINO_CTRL_A_FIFO_OF_INT (1<<2) /* FIFO overflow int */ 78#define VINO_CTRL_A_FIFO_INT (1<<2) /* FIFO overflow int */
72#define VINO_CTRL_A_END_DESC_TBL_INT (1<<3) /* End of desc table int */ 79#define VINO_CTRL_A_EOD_INT (1<<3) /* End of desc table int */
73#define VINO_CTRL_A_INT (VINO_CTRL_A_FIELD_TRANS_INT | \ 80#define VINO_CTRL_A_INT (VINO_CTRL_A_EOF_INT | \
74 VINO_CTRL_A_FIFO_OF_INT | \ 81 VINO_CTRL_A_FIFO_INT | \
75 VINO_CTRL_A_END_DESC_TBL_INT) 82 VINO_CTRL_A_EOD_INT)
76#define VINO_CTRL_B_FIELD_TRANS_INT (1<<4) /* Field transferred int */ 83#define VINO_CTRL_B_EOF_INT (1<<4) /* Field transferred int */
77#define VINO_CTRL_B_FIFO_OF_INT (1<<5) /* FIFO overflow int */ 84#define VINO_CTRL_B_FIFO_INT (1<<5) /* FIFO overflow int */
78#define VINO_CTRL_B_END_DESC_TBL_INT (1<<6) /* End of desc table int */ 85#define VINO_CTRL_B_EOD_INT (1<<6) /* End of desc table int */
79#define VINO_CTRL_B_INT (VINO_CTRL_B_FIELD_TRANS_INT | \ 86#define VINO_CTRL_B_INT (VINO_CTRL_B_EOF_INT | \
80 VINO_CTRL_B_FIFO_OF_INT | \ 87 VINO_CTRL_B_FIFO_INT | \
81 VINO_CTRL_B_END_DESC_TBL_INT) 88 VINO_CTRL_B_EOD_INT)
82#define VINO_CTRL_A_DMA_ENBL (1<<7) 89#define VINO_CTRL_A_DMA_ENBL (1<<7)
83#define VINO_CTRL_A_INTERLEAVE_ENBL (1<<8) 90#define VINO_CTRL_A_INTERLEAVE_ENBL (1<<8)
84#define VINO_CTRL_A_SYNC_ENBL (1<<9) 91#define VINO_CTRL_A_SYNC_ENBL (1<<9)
@@ -104,18 +111,18 @@ struct sgi_vino {
104 u32 _pad_control; 111 u32 _pad_control;
105 volatile u32 control; 112 volatile u32 control;
106 113
107#define VINO_INTSTAT_A_FIELD_TRANS (1<<0) /* Field transferred int */ 114#define VINO_INTSTAT_A_EOF (1<<0) /* Field transferred int */
108#define VINO_INTSTAT_A_FIFO_OF (1<<1) /* FIFO overflow int */ 115#define VINO_INTSTAT_A_FIFO (1<<1) /* FIFO overflow int */
109#define VINO_INTSTAT_A_END_DESC_TBL (1<<2) /* End of desc table int */ 116#define VINO_INTSTAT_A_EOD (1<<2) /* End of desc table int */
110#define VINO_INTSTAT_A (VINO_INTSTAT_A_FIELD_TRANS | \ 117#define VINO_INTSTAT_A (VINO_INTSTAT_A_EOF | \
111 VINO_INTSTAT_A_FIFO_OF | \ 118 VINO_INTSTAT_A_FIFO | \
112 VINO_INTSTAT_A_END_DESC_TBL) 119 VINO_INTSTAT_A_EOD)
113#define VINO_INTSTAT_B_FIELD_TRANS (1<<3) /* Field transferred int */ 120#define VINO_INTSTAT_B_EOF (1<<3) /* Field transferred int */
114#define VINO_INTSTAT_B_FIFO_OF (1<<4) /* FIFO overflow int */ 121#define VINO_INTSTAT_B_FIFO (1<<4) /* FIFO overflow int */
115#define VINO_INTSTAT_B_END_DESC_TBL (1<<5) /* End of desc table int */ 122#define VINO_INTSTAT_B_EOD (1<<5) /* End of desc table int */
116#define VINO_INTSTAT_B (VINO_INTSTAT_B_FIELD_TRANS | \ 123#define VINO_INTSTAT_B (VINO_INTSTAT_B_EOF | \
117 VINO_INTSTAT_B_FIFO_OF | \ 124 VINO_INTSTAT_B_FIFO | \
118 VINO_INTSTAT_B_END_DESC_TBL) 125 VINO_INTSTAT_B_EOD)
119 u32 _pad_intr_status; 126 u32 _pad_intr_status;
120 volatile u32 intr_status; 127 volatile u32 intr_status;
121 128
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 5dbd9f6bf353..4437bdebe24f 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -576,7 +576,6 @@ static struct i2c_client_address_data addr_data = {
576 .normal_i2c = normal_i2c, 576 .normal_i2c = normal_i2c,
577 .probe = &ignore, 577 .probe = &ignore,
578 .ignore = &ignore, 578 .ignore = &ignore,
579 .force = &ignore,
580}; 579};
581 580
582static struct i2c_driver vpx3220_i2c_driver; 581static struct i2c_driver vpx3220_i2c_driver;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 25743085b2d5..eed2acea1779 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -737,7 +737,7 @@ static struct i2c_algo_bit_data zoran_i2c_bit_data_template = {
737}; 737};
738 738
739static struct i2c_adapter zoran_i2c_adapter_template = { 739static struct i2c_adapter zoran_i2c_adapter_template = {
740 I2C_DEVNAME("zr36057"), 740 .name = "zr36057",
741 .id = I2C_HW_B_ZR36067, 741 .id = I2C_HW_B_ZR36067,
742 .algo = NULL, 742 .algo = NULL,
743 .client_register = zoran_i2c_client_register, 743 .client_register = zoran_i2c_client_register,
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 9f98334e5076..b61e3d175070 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -6,7 +6,7 @@
6 * Title: MPI Message independent structures and definitions 6 * Title: MPI Message independent structures and definitions
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi.h Version: 01.05.07 9 * mpi.h Version: 01.05.08
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -71,6 +71,9 @@
71 * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and 71 * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
72 * TargetAssistExtended requests. 72 * TargetAssistExtended requests.
73 * Removed EEDP IOCStatus codes. 73 * Removed EEDP IOCStatus codes.
74 * 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
75 * TargetAssistExtended requests.
76 * Added EEDP IOCStatus codes.
74 * -------------------------------------------------------------------------- 77 * --------------------------------------------------------------------------
75 */ 78 */
76 79
@@ -101,7 +104,7 @@
101/* Note: The major versions of 0xe0 through 0xff are reserved */ 104/* Note: The major versions of 0xe0 through 0xff are reserved */
102 105
103/* versioning for this MPI header set */ 106/* versioning for this MPI header set */
104#define MPI_HEADER_VERSION_UNIT (0x09) 107#define MPI_HEADER_VERSION_UNIT (0x0A)
105#define MPI_HEADER_VERSION_DEV (0x00) 108#define MPI_HEADER_VERSION_DEV (0x00)
106#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) 109#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
107#define MPI_HEADER_VERSION_UNIT_SHIFT (8) 110#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
@@ -292,10 +295,13 @@
292#define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D) 295#define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D)
293#define MPI_FUNCTION_DIAG_RELEASE (0x1E) 296#define MPI_FUNCTION_DIAG_RELEASE (0x1E)
294 297
298#define MPI_FUNCTION_SCSI_IO_32 (0x1F)
299
295#define MPI_FUNCTION_LAN_SEND (0x20) 300#define MPI_FUNCTION_LAN_SEND (0x20)
296#define MPI_FUNCTION_LAN_RECEIVE (0x21) 301#define MPI_FUNCTION_LAN_RECEIVE (0x21)
297#define MPI_FUNCTION_LAN_RESET (0x22) 302#define MPI_FUNCTION_LAN_RESET (0x22)
298 303
304#define MPI_FUNCTION_TARGET_ASSIST_EXTENDED (0x23)
299#define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) 305#define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
300#define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) 306#define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
301 307
@@ -681,6 +687,15 @@ typedef struct _MSG_DEFAULT_REPLY
681#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) 687#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
682 688
683/****************************************************************************/ 689/****************************************************************************/
690/* For use by SCSI Initiator and SCSI Target end-to-end data protection */
691/****************************************************************************/
692
693#define MPI_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
694#define MPI_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
695#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
696
697
698/****************************************************************************/
684/* SCSI Target values */ 699/* SCSI Target values */
685/****************************************************************************/ 700/****************************************************************************/
686 701
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 15b12b06799d..d8339896f734 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Config message, structures, and Pages 6 * Title: MPI Config message, structures, and Pages
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi_cnfg.h Version: 01.05.08 9 * mpi_cnfg.h Version: 01.05.09
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -232,6 +232,23 @@
232 * New physical mapping mode in SAS IO Unit Page 2. 232 * New physical mapping mode in SAS IO Unit Page 2.
233 * Added CONFIG_PAGE_SAS_ENCLOSURE_0. 233 * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
234 * Added Slot and Enclosure fields to SAS Device Page 0. 234 * Added Slot and Enclosure fields to SAS Device Page 0.
235 * 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
236 * Added more RAID type defines to IOC Page 2.
237 * Added Port Enable Delay settings to BIOS Page 1.
238 * Added Bad Block Table Full define to RAID Volume Page 0.
239 * Added Previous State defines to RAID Physical Disk
240 * Page 0.
241 * Added Max Sata Targets define for DiscoveryStatus field
242 * of SAS IO Unit Page 0.
243 * Added Device Self Test to Control Flags of SAS IO Unit
244 * Page 1.
245 * Added Direct Attach Starting Slot Number define for SAS
246 * IO Unit Page 2.
247 * Added new fields in SAS Device Page 2 for enclosure
248 * mapping.
249 * Added OwnerDevHandle and Flags field to SAS PHY Page 0.
250 * Added IOC GPIO Flags define to SAS Enclosure Page 0.
251 * Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
235 * -------------------------------------------------------------------------- 252 * --------------------------------------------------------------------------
236 */ 253 */
237 254
@@ -477,6 +494,7 @@ typedef struct _MSG_CONFIG_REPLY
477#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626) 494#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626)
478#define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642) 495#define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642)
479#define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640) 496#define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640)
497#define MPI_MANUFACTPAGE_DEVICEID_FC949ES (0x0646)
480/* SCSI */ 498/* SCSI */
481#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030) 499#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030)
482#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031) 500#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031)
@@ -769,9 +787,13 @@ typedef struct _CONFIG_PAGE_IOC_1
769} CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1, 787} CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
770 IOCPage1_t, MPI_POINTER pIOCPage1_t; 788 IOCPage1_t, MPI_POINTER pIOCPage1_t;
771 789
772#define MPI_IOCPAGE1_PAGEVERSION (0x02) 790#define MPI_IOCPAGE1_PAGEVERSION (0x03)
773 791
774/* defines for the Flags field */ 792/* defines for the Flags field */
793#define MPI_IOCPAGE1_EEDP_MODE_MASK (0x07000000)
794#define MPI_IOCPAGE1_EEDP_MODE_OFF (0x00000000)
795#define MPI_IOCPAGE1_EEDP_MODE_T10 (0x01000000)
796#define MPI_IOCPAGE1_EEDP_MODE_LSI_1 (0x02000000)
775#define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010) 797#define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010)
776#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001) 798#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001)
777 799
@@ -795,6 +817,11 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
795#define MPI_RAID_VOL_TYPE_IS (0x00) 817#define MPI_RAID_VOL_TYPE_IS (0x00)
796#define MPI_RAID_VOL_TYPE_IME (0x01) 818#define MPI_RAID_VOL_TYPE_IME (0x01)
797#define MPI_RAID_VOL_TYPE_IM (0x02) 819#define MPI_RAID_VOL_TYPE_IM (0x02)
820#define MPI_RAID_VOL_TYPE_RAID_5 (0x03)
821#define MPI_RAID_VOL_TYPE_RAID_6 (0x04)
822#define MPI_RAID_VOL_TYPE_RAID_10 (0x05)
823#define MPI_RAID_VOL_TYPE_RAID_50 (0x06)
824#define MPI_RAID_VOL_TYPE_UNKNOWN (0xFF)
798 825
799/* IOC Page 2 Volume Flags values */ 826/* IOC Page 2 Volume Flags values */
800 827
@@ -820,13 +847,17 @@ typedef struct _CONFIG_PAGE_IOC_2
820} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, 847} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
821 IOCPage2_t, MPI_POINTER pIOCPage2_t; 848 IOCPage2_t, MPI_POINTER pIOCPage2_t;
822 849
823#define MPI_IOCPAGE2_PAGEVERSION (0x02) 850#define MPI_IOCPAGE2_PAGEVERSION (0x03)
824 851
825/* IOC Page 2 Capabilities flags */ 852/* IOC Page 2 Capabilities flags */
826 853
827#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001) 854#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001)
828#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002) 855#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002)
829#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004) 856#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004)
857#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008)
858#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010)
859#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020)
860#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040)
830#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000) 861#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
831#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000) 862#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
832#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000) 863#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
@@ -945,7 +976,7 @@ typedef struct _CONFIG_PAGE_BIOS_1
945} CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1, 976} CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
946 BIOSPage1_t, MPI_POINTER pBIOSPage1_t; 977 BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
947 978
948#define MPI_BIOSPAGE1_PAGEVERSION (0x01) 979#define MPI_BIOSPAGE1_PAGEVERSION (0x02)
949 980
950/* values for the BiosOptions field */ 981/* values for the BiosOptions field */
951#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400) 982#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400)
@@ -954,6 +985,8 @@ typedef struct _CONFIG_PAGE_BIOS_1
954#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) 985#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
955 986
956/* values for the IOCSettings field */ 987/* values for the IOCSettings field */
988#define MPI_BIOSPAGE1_IOCSET_MASK_PORT_ENABLE_DELAY (0x00F00000)
989#define MPI_BIOSPAGE1_IOCSET_SHIFT_PORT_ENABLE_DELAY (20)
957#define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) 990#define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
958#define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) 991#define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
959#define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) 992#define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
@@ -1167,6 +1200,7 @@ typedef struct _CONFIG_PAGE_BIOS_2
1167#define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03) 1200#define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03)
1168#define MPI_BIOSPAGE2_FORM_FC_WWN (0x04) 1201#define MPI_BIOSPAGE2_FORM_FC_WWN (0x04)
1169#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05) 1202#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05)
1203#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
1170 1204
1171 1205
1172/**************************************************************************** 1206/****************************************************************************
@@ -1957,11 +1991,11 @@ typedef struct _RAID_VOL0_STATUS
1957 RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t; 1991 RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
1958 1992
1959/* RAID Volume Page 0 VolumeStatus defines */ 1993/* RAID Volume Page 0 VolumeStatus defines */
1960
1961#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01) 1994#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01)
1962#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02) 1995#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02)
1963#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04) 1996#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04)
1964#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08) 1997#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08)
1998#define MPI_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x10)
1965 1999
1966#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00) 2000#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00)
1967#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01) 2001#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01)
@@ -2025,7 +2059,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
2025} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, 2059} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
2026 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; 2060 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
2027 2061
2028#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x04) 2062#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x05)
2029 2063
2030/* values for RAID Volume Page 0 InactiveStatus field */ 2064/* values for RAID Volume Page 0 InactiveStatus field */
2031#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) 2065#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@@ -2104,6 +2138,8 @@ typedef struct _RAID_PHYS_DISK0_STATUS
2104#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01) 2138#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
2105#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02) 2139#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
2106#define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04) 2140#define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04)
2141#define MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00)
2142#define MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x08)
2107 2143
2108#define MPI_PHYSDISK0_STATUS_ONLINE (0x00) 2144#define MPI_PHYSDISK0_STATUS_ONLINE (0x00)
2109#define MPI_PHYSDISK0_STATUS_MISSING (0x01) 2145#define MPI_PHYSDISK0_STATUS_MISSING (0x01)
@@ -2132,7 +2168,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
2132} CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0, 2168} CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
2133 RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t; 2169 RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
2134 2170
2135#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x01) 2171#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x02)
2136 2172
2137 2173
2138typedef struct _RAID_PHYS_DISK1_PATH 2174typedef struct _RAID_PHYS_DISK1_PATH
@@ -2263,7 +2299,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
2263} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0, 2299} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
2264 SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t; 2300 SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
2265 2301
2266#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x02) 2302#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x03)
2267 2303
2268/* values for SAS IO Unit Page 0 PortFlags */ 2304/* values for SAS IO Unit Page 0 PortFlags */
2269#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08) 2305#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
@@ -2299,6 +2335,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
2299#define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) 2335#define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
2300#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400) 2336#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
2301#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) 2337#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
2338#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
2302 2339
2303 2340
2304typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA 2341typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
@@ -2336,6 +2373,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2336#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x04) 2373#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x04)
2337 2374
2338/* values for SAS IO Unit Page 1 ControlFlags */ 2375/* values for SAS IO Unit Page 1 ControlFlags */
2376#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
2339#define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) 2377#define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
2340#define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) 2378#define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
2341#define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) 2379#define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
@@ -2345,9 +2383,8 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2345#define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) 2383#define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
2346#define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00) 2384#define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00)
2347#define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01) 2385#define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01)
2348#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x10) 2386#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x02)
2349 2387
2350#define MPI_SAS_IOUNIT1_CONTROL_AUTO_PORT_SAME_SAS_ADDR (0x0100)
2351#define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) 2388#define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
2352#define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) 2389#define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
2353#define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) 2390#define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
@@ -2390,7 +2427,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
2390} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2, 2427} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
2391 SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t; 2428 SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
2392 2429
2393#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x03) 2430#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x04)
2394 2431
2395/* values for SAS IO Unit Page 2 Status field */ 2432/* values for SAS IO Unit Page 2 Status field */
2396#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02) 2433#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
@@ -2406,6 +2443,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
2406#define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02) 2443#define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02)
2407 2444
2408#define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10) 2445#define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10)
2446#define MPI_SAS_IOUNIT2_FLAGS_DA_STARTING_SLOT (0x20)
2409 2447
2410 2448
2411typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3 2449typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
@@ -2584,11 +2622,19 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
2584{ 2622{
2585 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ 2623 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2586 U64 PhysicalIdentifier; /* 08h */ 2624 U64 PhysicalIdentifier; /* 08h */
2587 U32 Reserved1; /* 10h */ 2625 U32 EnclosureMapping; /* 10h */
2588} CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2, 2626} CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2,
2589 SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t; 2627 SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t;
2590 2628
2591#define MPI_SASDEVICE2_PAGEVERSION (0x00) 2629#define MPI_SASDEVICE2_PAGEVERSION (0x01)
2630
2631/* defines for SAS Device Page 2 EnclosureMapping field */
2632#define MPI_SASDEVICE2_ENC_MAP_MASK_MISSING_COUNT (0x0000000F)
2633#define MPI_SASDEVICE2_ENC_MAP_SHIFT_MISSING_COUNT (0)
2634#define MPI_SASDEVICE2_ENC_MAP_MASK_NUM_SLOTS (0x000007F0)
2635#define MPI_SASDEVICE2_ENC_MAP_SHIFT_NUM_SLOTS (4)
2636#define MPI_SASDEVICE2_ENC_MAP_MASK_START_INDEX (0x001FF800)
2637#define MPI_SASDEVICE2_ENC_MAP_SHIFT_START_INDEX (11)
2592 2638
2593 2639
2594/**************************************************************************** 2640/****************************************************************************
@@ -2598,7 +2644,8 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
2598typedef struct _CONFIG_PAGE_SAS_PHY_0 2644typedef struct _CONFIG_PAGE_SAS_PHY_0
2599{ 2645{
2600 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ 2646 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2601 U32 Reserved1; /* 08h */ 2647 U16 OwnerDevHandle; /* 08h */
2648 U16 Reserved1; /* 0Ah */
2602 U64 SASAddress; /* 0Ch */ 2649 U64 SASAddress; /* 0Ch */
2603 U16 AttachedDevHandle; /* 14h */ 2650 U16 AttachedDevHandle; /* 14h */
2604 U8 AttachedPhyIdentifier; /* 16h */ 2651 U8 AttachedPhyIdentifier; /* 16h */
@@ -2607,12 +2654,12 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
2607 U8 ProgrammedLinkRate; /* 20h */ 2654 U8 ProgrammedLinkRate; /* 20h */
2608 U8 HwLinkRate; /* 21h */ 2655 U8 HwLinkRate; /* 21h */
2609 U8 ChangeCount; /* 22h */ 2656 U8 ChangeCount; /* 22h */
2610 U8 Reserved3; /* 23h */ 2657 U8 Flags; /* 23h */
2611 U32 PhyInfo; /* 24h */ 2658 U32 PhyInfo; /* 24h */
2612} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0, 2659} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
2613 SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t; 2660 SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
2614 2661
2615#define MPI_SASPHY0_PAGEVERSION (0x00) 2662#define MPI_SASPHY0_PAGEVERSION (0x01)
2616 2663
2617/* values for SAS PHY Page 0 ProgrammedLinkRate field */ 2664/* values for SAS PHY Page 0 ProgrammedLinkRate field */
2618#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0) 2665#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0)
@@ -2632,6 +2679,9 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
2632#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08) 2679#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08)
2633#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09) 2680#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09)
2634 2681
2682/* values for SAS PHY Page 0 Flags field */
2683#define MPI_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
2684
2635/* values for SAS PHY Page 0 PhyInfo field */ 2685/* values for SAS PHY Page 0 PhyInfo field */
2636#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000) 2686#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
2637#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000) 2687#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000)
@@ -2690,7 +2740,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
2690} CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0, 2740} CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0,
2691 SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t; 2741 SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t;
2692 2742
2693#define MPI_SASENCLOSURE0_PAGEVERSION (0x00) 2743#define MPI_SASENCLOSURE0_PAGEVERSION (0x01)
2694 2744
2695/* values for SAS Enclosure Page 0 Flags field */ 2745/* values for SAS Enclosure Page 0 Flags field */
2696#define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020) 2746#define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020)
@@ -2702,6 +2752,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
2702#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) 2752#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
2703#define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) 2753#define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
2704#define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) 2754#define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
2755#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
2705 2756
2706 2757
2707/**************************************************************************** 2758/****************************************************************************
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index c9edbee41edf..1a30ef16adb4 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -6,17 +6,17 @@
6 Copyright (c) 2000-2005 LSI Logic Corporation. 6 Copyright (c) 2000-2005 LSI Logic Corporation.
7 7
8 --------------------------------------- 8 ---------------------------------------
9 Header Set Release Version: 01.05.09 9 Header Set Release Version: 01.05.10
10 Header Set Release Date: 03-11-05 10 Header Set Release Date: 03-11-05
11 --------------------------------------- 11 ---------------------------------------
12 12
13 Filename Current version Prior version 13 Filename Current version Prior version
14 ---------- --------------- ------------- 14 ---------- --------------- -------------
15 mpi.h 01.05.07 01.05.06 15 mpi.h 01.05.08 01.05.07
16 mpi_ioc.h 01.05.08 01.05.07 16 mpi_ioc.h 01.05.09 01.05.08
17 mpi_cnfg.h 01.05.08 01.05.07 17 mpi_cnfg.h 01.05.09 01.05.08
18 mpi_init.h 01.05.04 01.05.03 18 mpi_init.h 01.05.05 01.05.04
19 mpi_targ.h 01.05.04 01.05.03 19 mpi_targ.h 01.05.05 01.05.04
20 mpi_fc.h 01.05.01 01.05.01 20 mpi_fc.h 01.05.01 01.05.01
21 mpi_lan.h 01.05.01 01.05.01 21 mpi_lan.h 01.05.01 01.05.01
22 mpi_raid.h 01.05.02 01.05.02 22 mpi_raid.h 01.05.02 01.05.02
@@ -24,7 +24,7 @@
24 mpi_inb.h 01.05.01 01.05.01 24 mpi_inb.h 01.05.01 01.05.01
25 mpi_sas.h 01.05.01 01.05.01 25 mpi_sas.h 01.05.01 01.05.01
26 mpi_type.h 01.05.01 01.05.01 26 mpi_type.h 01.05.01 01.05.01
27 mpi_history.txt 01.05.09 01.05.08 27 mpi_history.txt 01.05.09 01.05.09
28 28
29 29
30 * Date Version Description 30 * Date Version Description
@@ -88,6 +88,9 @@ mpi.h
88 * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and 88 * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
89 * TargetAssistExtended requests. 89 * TargetAssistExtended requests.
90 * Removed EEDP IOCStatus codes. 90 * Removed EEDP IOCStatus codes.
91 * 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
92 * TargetAssistExtended requests.
93 * Added EEDP IOCStatus codes.
91 * -------------------------------------------------------------------------- 94 * --------------------------------------------------------------------------
92 95
93mpi_ioc.h 96mpi_ioc.h
@@ -159,6 +162,8 @@ mpi_ioc.h
159 * Reply and IOC Init Request. 162 * Reply and IOC Init Request.
160 * 03-11-05 01.05.08 Added family code for 1068E family. 163 * 03-11-05 01.05.08 Added family code for 1068E family.
161 * Removed IOCFacts Reply EEDP Capability bit. 164 * Removed IOCFacts Reply EEDP Capability bit.
165 * 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
166 * Added Max SATA Targets to SAS Discovery Error event.
162 * -------------------------------------------------------------------------- 167 * --------------------------------------------------------------------------
163 168
164mpi_cnfg.h 169mpi_cnfg.h
@@ -380,6 +385,23 @@ mpi_cnfg.h
380 * New physical mapping mode in SAS IO Unit Page 2. 385 * New physical mapping mode in SAS IO Unit Page 2.
381 * Added CONFIG_PAGE_SAS_ENCLOSURE_0. 386 * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
382 * Added Slot and Enclosure fields to SAS Device Page 0. 387 * Added Slot and Enclosure fields to SAS Device Page 0.
388 * 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
389 * Added more RAID type defines to IOC Page 2.
390 * Added Port Enable Delay settings to BIOS Page 1.
391 * Added Bad Block Table Full define to RAID Volume Page 0.
392 * Added Previous State defines to RAID Physical Disk
393 * Page 0.
394 * Added Max Sata Targets define for DiscoveryStatus field
395 * of SAS IO Unit Page 0.
396 * Added Device Self Test to Control Flags of SAS IO Unit
397 * Page 1.
398 * Added Direct Attach Starting Slot Number define for SAS
399 * IO Unit Page 2.
400 * Added new fields in SAS Device Page 2 for enclosure
401 * mapping.
402 * Added OwnerDevHandle and Flags field to SAS PHY Page 0.
403 * Added IOC GPIO Flags define to SAS Enclosure Page 0.
404 * Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
383 * -------------------------------------------------------------------------- 405 * --------------------------------------------------------------------------
384 406
385mpi_init.h 407mpi_init.h
@@ -418,6 +440,8 @@ mpi_init.h
418 * Modified SCSI Enclosure Processor Request and Reply to 440 * Modified SCSI Enclosure Processor Request and Reply to
419 * support Enclosure/Slot addressing rather than WWID 441 * support Enclosure/Slot addressing rather than WWID
420 * addressing. 442 * addressing.
443 * 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
444 * Added four new defines for SEP SlotStatus.
421 * -------------------------------------------------------------------------- 445 * --------------------------------------------------------------------------
422 446
423mpi_targ.h 447mpi_targ.h
@@ -461,6 +485,7 @@ mpi_targ.h
461 * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added. 485 * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
462 * 02-22-05 01.05.03 Changed a comment. 486 * 02-22-05 01.05.03 Changed a comment.
463 * 03-11-05 01.05.04 Removed TargetAssistExtended Request. 487 * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
488 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
464 * -------------------------------------------------------------------------- 489 * --------------------------------------------------------------------------
465 490
466mpi_fc.h 491mpi_fc.h
@@ -571,20 +596,20 @@ mpi_type.h
571 596
572mpi_history.txt Parts list history 597mpi_history.txt Parts list history
573 598
574Filename 01.05.09 599Filename 01.05.10 01.05.09
575---------- -------- 600---------- -------- --------
576mpi.h 01.05.07 601mpi.h 01.05.08 01.05.07
577mpi_ioc.h 01.05.08 602mpi_ioc.h 01.05.09 01.05.08
578mpi_cnfg.h 01.05.08 603mpi_cnfg.h 01.05.09 01.05.08
579mpi_init.h 01.05.04 604mpi_init.h 01.05.05 01.05.04
580mpi_targ.h 01.05.04 605mpi_targ.h 01.05.05 01.05.04
581mpi_fc.h 01.05.01 606mpi_fc.h 01.05.01 01.05.01
582mpi_lan.h 01.05.01 607mpi_lan.h 01.05.01 01.05.01
583mpi_raid.h 01.05.02 608mpi_raid.h 01.05.02 01.05.02
584mpi_tool.h 01.05.03 609mpi_tool.h 01.05.03 01.05.03
585mpi_inb.h 01.05.01 610mpi_inb.h 01.05.01 01.05.01
586mpi_sas.h 01.05.01 611mpi_sas.h 01.05.01 01.05.01
587mpi_type.h 01.05.01 612mpi_type.h 01.05.01 01.05.01
588 613
589Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 614Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
590---------- -------- -------- -------- -------- -------- -------- 615---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index aca035801a86..d5af75afbd94 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI initiator mode messages and structures 6 * Title: MPI initiator mode messages and structures
7 * Creation Date: June 8, 2000 7 * Creation Date: June 8, 2000
8 * 8 *
9 * mpi_init.h Version: 01.05.04 9 * mpi_init.h Version: 01.05.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -48,6 +48,8 @@
48 * Modified SCSI Enclosure Processor Request and Reply to 48 * Modified SCSI Enclosure Processor Request and Reply to
49 * support Enclosure/Slot addressing rather than WWID 49 * support Enclosure/Slot addressing rather than WWID
50 * addressing. 50 * addressing.
51 * 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
52 * Added four new defines for SEP SlotStatus.
51 * -------------------------------------------------------------------------- 53 * --------------------------------------------------------------------------
52 */ 54 */
53 55
@@ -203,6 +205,197 @@ typedef struct _MSG_SCSI_IO_REPLY
203 205
204 206
205/****************************************************************************/ 207/****************************************************************************/
208/* SCSI IO 32 messages and associated structures */
209/****************************************************************************/
210
211typedef struct
212{
213 U8 CDB[20]; /* 00h */
214 U32 PrimaryReferenceTag; /* 14h */
215 U16 PrimaryApplicationTag; /* 18h */
216 U16 PrimaryApplicationTagMask; /* 1Ah */
217 U32 TransferLength; /* 1Ch */
218} MPI_SCSI_IO32_CDB_EEDP32, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP32,
219 MpiScsiIo32CdbEedp32_t, MPI_POINTER pMpiScsiIo32CdbEedp32_t;
220
221typedef struct
222{
223 U8 CDB[16]; /* 00h */
224 U32 DataLength; /* 10h */
225 U32 PrimaryReferenceTag; /* 14h */
226 U16 PrimaryApplicationTag; /* 18h */
227 U16 PrimaryApplicationTagMask; /* 1Ah */
228 U32 TransferLength; /* 1Ch */
229} MPI_SCSI_IO32_CDB_EEDP16, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP16,
230 MpiScsiIo32CdbEedp16_t, MPI_POINTER pMpiScsiIo32CdbEedp16_t;
231
232typedef union
233{
234 U8 CDB32[32];
235 MPI_SCSI_IO32_CDB_EEDP32 EEDP32;
236 MPI_SCSI_IO32_CDB_EEDP16 EEDP16;
237 SGE_SIMPLE_UNION SGE;
238} MPI_SCSI_IO32_CDB_UNION, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_UNION,
239 MpiScsiIo32Cdb_t, MPI_POINTER pMpiScsiIo32Cdb_t;
240
241typedef struct
242{
243 U8 TargetID; /* 00h */
244 U8 Bus; /* 01h */
245 U16 Reserved1; /* 02h */
246 U32 Reserved2; /* 04h */
247} MPI_SCSI_IO32_BUS_TARGET_ID_FORM, MPI_POINTER PTR_MPI_SCSI_IO32_BUS_TARGET_ID_FORM,
248 MpiScsiIo32BusTargetIdForm_t, MPI_POINTER pMpiScsiIo32BusTargetIdForm_t;
249
250typedef union
251{
252 MPI_SCSI_IO32_BUS_TARGET_ID_FORM SCSIID;
253 U64 WWID;
254} MPI_SCSI_IO32_ADDRESS, MPI_POINTER PTR_MPI_SCSI_IO32_ADDRESS,
255 MpiScsiIo32Address_t, MPI_POINTER pMpiScsiIo32Address_t;
256
257typedef struct _MSG_SCSI_IO32_REQUEST
258{
259 U8 Port; /* 00h */
260 U8 Reserved1; /* 01h */
261 U8 ChainOffset; /* 02h */
262 U8 Function; /* 03h */
263 U8 CDBLength; /* 04h */
264 U8 SenseBufferLength; /* 05h */
265 U8 Flags; /* 06h */
266 U8 MsgFlags; /* 07h */
267 U32 MsgContext; /* 08h */
268 U8 LUN[8]; /* 0Ch */
269 U32 Control; /* 14h */
270 MPI_SCSI_IO32_CDB_UNION CDB; /* 18h */
271 U32 DataLength; /* 38h */
272 U32 BidirectionalDataLength; /* 3Ch */
273 U32 SecondaryReferenceTag; /* 40h */
274 U16 SecondaryApplicationTag; /* 44h */
275 U16 Reserved2; /* 46h */
276 U16 EEDPFlags; /* 48h */
277 U16 ApplicationTagTranslationMask; /* 4Ah */
278 U32 EEDPBlockSize; /* 4Ch */
279 MPI_SCSI_IO32_ADDRESS DeviceAddress; /* 50h */
280 U8 SGLOffset0; /* 58h */
281 U8 SGLOffset1; /* 59h */
282 U8 SGLOffset2; /* 5Ah */
283 U8 SGLOffset3; /* 5Bh */
284 U32 Reserved3; /* 5Ch */
285 U32 Reserved4; /* 60h */
286 U32 SenseBufferLowAddr; /* 64h */
287 SGE_IO_UNION SGL; /* 68h */
288} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
289 SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
290
291/* SCSI IO 32 MsgFlags bits */
292#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH (0x01)
293#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_32 (0x00)
294#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_64 (0x01)
295
296#define MPI_SCSIIO32_MSGFLGS_SENSE_LOCATION (0x02)
297#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_HOST (0x00)
298#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_IOC (0x02)
299
300#define MPI_SCSIIO32_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
301#define MPI_SCSIIO32_MSGFLGS_SGL_OFFSETS_CHAINS (0x08)
302#define MPI_SCSIIO32_MSGFLGS_MULTICAST (0x10)
303#define MPI_SCSIIO32_MSGFLGS_BIDIRECTIONAL (0x20)
304#define MPI_SCSIIO32_MSGFLGS_LARGE_CDB (0x40)
305
306/* SCSI IO 32 Flags bits */
307#define MPI_SCSIIO32_FLAGS_FORM_MASK (0x03)
308#define MPI_SCSIIO32_FLAGS_FORM_SCSIID (0x00)
309#define MPI_SCSIIO32_FLAGS_FORM_WWID (0x01)
310
311/* SCSI IO 32 LUN fields */
312#define MPI_SCSIIO32_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
313#define MPI_SCSIIO32_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
314#define MPI_SCSIIO32_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
315#define MPI_SCSIIO32_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
316#define MPI_SCSIIO32_LUN_LEVEL_1_WORD (0xFF00)
317#define MPI_SCSIIO32_LUN_LEVEL_1_DWORD (0x0000FF00)
318
319/* SCSI IO 32 Control bits */
320#define MPI_SCSIIO32_CONTROL_DATADIRECTION_MASK (0x03000000)
321#define MPI_SCSIIO32_CONTROL_NODATATRANSFER (0x00000000)
322#define MPI_SCSIIO32_CONTROL_WRITE (0x01000000)
323#define MPI_SCSIIO32_CONTROL_READ (0x02000000)
324#define MPI_SCSIIO32_CONTROL_BIDIRECTIONAL (0x03000000)
325
326#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_MASK (0xFC000000)
327#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_SHIFT (26)
328
329#define MPI_SCSIIO32_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
330#define MPI_SCSIIO32_CONTROL_SIMPLEQ (0x00000000)
331#define MPI_SCSIIO32_CONTROL_HEADOFQ (0x00000100)
332#define MPI_SCSIIO32_CONTROL_ORDEREDQ (0x00000200)
333#define MPI_SCSIIO32_CONTROL_ACAQ (0x00000400)
334#define MPI_SCSIIO32_CONTROL_UNTAGGED (0x00000500)
335#define MPI_SCSIIO32_CONTROL_NO_DISCONNECT (0x00000700)
336
337#define MPI_SCSIIO32_CONTROL_TASKMANAGE_MASK (0x00FF0000)
338#define MPI_SCSIIO32_CONTROL_OBSOLETE (0x00800000)
339#define MPI_SCSIIO32_CONTROL_CLEAR_ACA_RSV (0x00400000)
340#define MPI_SCSIIO32_CONTROL_TARGET_RESET (0x00200000)
341#define MPI_SCSIIO32_CONTROL_LUN_RESET_RSV (0x00100000)
342#define MPI_SCSIIO32_CONTROL_RESERVED (0x00080000)
343#define MPI_SCSIIO32_CONTROL_CLR_TASK_SET_RSV (0x00040000)
344#define MPI_SCSIIO32_CONTROL_ABORT_TASK_SET (0x00020000)
345#define MPI_SCSIIO32_CONTROL_RESERVED2 (0x00010000)
346
347/* SCSI IO 32 EEDPFlags */
348#define MPI_SCSIIO32_EEDPFLAGS_MASK_OP (0x0007)
349#define MPI_SCSIIO32_EEDPFLAGS_NOOP_OP (0x0000)
350#define MPI_SCSIIO32_EEDPFLAGS_CHK_OP (0x0001)
351#define MPI_SCSIIO32_EEDPFLAGS_STRIP_OP (0x0002)
352#define MPI_SCSIIO32_EEDPFLAGS_CHKRM_OP (0x0003)
353#define MPI_SCSIIO32_EEDPFLAGS_INSERT_OP (0x0004)
354#define MPI_SCSIIO32_EEDPFLAGS_REPLACE_OP (0x0006)
355#define MPI_SCSIIO32_EEDPFLAGS_CHKREGEN_OP (0x0007)
356
357#define MPI_SCSIIO32_EEDPFLAGS_PASS_REF_TAG (0x0008)
358#define MPI_SCSIIO32_EEDPFLAGS_8_9THS_MODE (0x0010)
359
360#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_MASK (0x0700)
361#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_GUARD (0x0100)
362#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_REFTAG (0x0200)
363#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_LBATAG (0x0400)
364#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_SHIFT (8)
365
366#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
367#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
368#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
369#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
370
371
372/* SCSIIO32 IO reply structure */
373typedef struct _MSG_SCSIIO32_IO_REPLY
374{
375 U8 Port; /* 00h */
376 U8 Reserved1; /* 01h */
377 U8 MsgLength; /* 02h */
378 U8 Function; /* 03h */
379 U8 CDBLength; /* 04h */
380 U8 SenseBufferLength; /* 05h */
381 U8 Flags; /* 06h */
382 U8 MsgFlags; /* 07h */
383 U32 MsgContext; /* 08h */
384 U8 SCSIStatus; /* 0Ch */
385 U8 SCSIState; /* 0Dh */
386 U16 IOCStatus; /* 0Eh */
387 U32 IOCLogInfo; /* 10h */
388 U32 TransferCount; /* 14h */
389 U32 SenseCount; /* 18h */
390 U32 ResponseInfo; /* 1Ch */
391 U16 TaskTag; /* 20h */
392 U16 Reserved2; /* 22h */
393 U32 BidirectionalTransferCount; /* 24h */
394} MSG_SCSIIO32_IO_REPLY, MPI_POINTER PTR_MSG_SCSIIO32_IO_REPLY,
395 SCSIIO32Reply_t, MPI_POINTER pSCSIIO32Reply_t;
396
397
398/****************************************************************************/
206/* SCSI Task Management messages */ 399/* SCSI Task Management messages */
207/****************************************************************************/ 400/****************************************************************************/
208 401
@@ -310,10 +503,14 @@ typedef struct _MSG_SEP_REQUEST
310#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) 503#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
311#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) 504#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
312#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) 505#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
506#define MPI_SEP_REQ_SLOTSTATUS_REQ_CONSISTENCY_CHECK (0x00001000)
507#define MPI_SEP_REQ_SLOTSTATUS_DISABLE (0x00002000)
508#define MPI_SEP_REQ_SLOTSTATUS_REQ_RESERVED_DEVICE (0x00004000)
313#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) 509#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
314#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) 510#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
315#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000) 511#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000)
316#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000) 512#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000)
513#define MPI_SEP_REQ_SLOTSTATUS_ACTIVE (0x00800000)
317#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) 514#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
318#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) 515#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
319#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000) 516#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000)
@@ -352,11 +549,15 @@ typedef struct _MSG_SEP_REPLY
352#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) 549#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
353#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) 550#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
354#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) 551#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
552#define MPI_SEP_REPLY_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
553#define MPI_SEP_REPLY_SLOTSTATUS_DISABLE (0x00002000)
554#define MPI_SEP_REPLY_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
355#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000) 555#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000)
356#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) 556#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
357#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) 557#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
358#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000) 558#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000)
359#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000) 559#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
560#define MPI_SEP_REPLY_SLOTSTATUS_ACTIVE (0x00800000)
360#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000) 561#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000)
361#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000) 562#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000)
362#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) 563#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index f91eb4efe8cc..93b70e2b4266 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: August 11, 2000 7 * Creation Date: August 11, 2000
8 * 8 *
9 * mpi_ioc.h Version: 01.05.08 9 * mpi_ioc.h Version: 01.05.09
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -81,6 +81,8 @@
81 * Reply and IOC Init Request. 81 * Reply and IOC Init Request.
82 * 03-11-05 01.05.08 Added family code for 1068E family. 82 * 03-11-05 01.05.08 Added family code for 1068E family.
83 * Removed IOCFacts Reply EEDP Capability bit. 83 * Removed IOCFacts Reply EEDP Capability bit.
84 * 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
85 * Added Max SATA Targets to SAS Discovery Error event.
84 * -------------------------------------------------------------------------- 86 * --------------------------------------------------------------------------
85 */ 87 */
86 88
@@ -261,7 +263,11 @@ typedef struct _MSG_IOC_FACTS_REPLY
261#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) 263#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
262#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) 264#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
263#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) 265#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
264 266#define MPI_IOCFACTS_CAPABILITY_EEDP (0x00000040)
267#define MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL (0x00000080)
268#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
269#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200)
270#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400)
265 271
266 272
267/***************************************************************************** 273/*****************************************************************************
@@ -677,6 +683,7 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
677#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200) 683#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
678#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400) 684#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
679#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800) 685#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
686#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
680 687
681 688
682/***************************************************************************** 689/*****************************************************************************
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
index 623901fd82be..3f462859ceea 100644
--- a/drivers/message/fusion/lsi/mpi_targ.h
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -6,7 +6,7 @@
6 * Title: MPI Target mode messages and structures 6 * Title: MPI Target mode messages and structures
7 * Creation Date: June 22, 2000 7 * Creation Date: June 22, 2000
8 * 8 *
9 * mpi_targ.h Version: 01.05.04 9 * mpi_targ.h Version: 01.05.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -53,6 +53,7 @@
53 * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added. 53 * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
54 * 02-22-05 01.05.03 Changed a comment. 54 * 02-22-05 01.05.03 Changed a comment.
55 * 03-11-05 01.05.04 Removed TargetAssistExtended Request. 55 * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
56 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
56 * -------------------------------------------------------------------------- 57 * --------------------------------------------------------------------------
57 */ 58 */
58 59
@@ -371,6 +372,77 @@ typedef struct _MSG_TARGET_ERROR_REPLY
371 372
372 373
373/****************************************************************************/ 374/****************************************************************************/
375/* Target Assist Extended Request */
376/****************************************************************************/
377
378typedef struct _MSG_TARGET_ASSIST_EXT_REQUEST
379{
380 U8 StatusCode; /* 00h */
381 U8 TargetAssistFlags; /* 01h */
382 U8 ChainOffset; /* 02h */
383 U8 Function; /* 03h */
384 U16 QueueTag; /* 04h */
385 U8 Reserved1; /* 06h */
386 U8 MsgFlags; /* 07h */
387 U32 MsgContext; /* 08h */
388 U32 ReplyWord; /* 0Ch */
389 U8 LUN[8]; /* 10h */
390 U32 RelativeOffset; /* 18h */
391 U32 Reserved2; /* 1Ch */
392 U32 Reserved3; /* 20h */
393 U32 PrimaryReferenceTag; /* 24h */
394 U16 PrimaryApplicationTag; /* 28h */
395 U16 PrimaryApplicationTagMask; /* 2Ah */
396 U32 Reserved4; /* 2Ch */
397 U32 DataLength; /* 30h */
398 U32 BidirectionalDataLength; /* 34h */
399 U32 SecondaryReferenceTag; /* 38h */
400 U16 SecondaryApplicationTag; /* 3Ch */
401 U16 Reserved5; /* 3Eh */
402 U16 EEDPFlags; /* 40h */
403 U16 ApplicationTagTranslationMask; /* 42h */
404 U32 EEDPBlockSize; /* 44h */
405 U8 SGLOffset0; /* 48h */
406 U8 SGLOffset1; /* 49h */
407 U8 SGLOffset2; /* 4Ah */
408 U8 SGLOffset3; /* 4Bh */
409 U32 Reserved6; /* 4Ch */
410 SGE_IO_UNION SGL[1]; /* 50h */
411} MSG_TARGET_ASSIST_EXT_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_EXT_REQUEST,
412 TargetAssistExtRequest_t, MPI_POINTER pTargetAssistExtRequest_t;
413
414/* see the defines after MSG_TARGET_ASSIST_REQUEST for TargetAssistFlags */
415
416/* defines for the MsgFlags field */
417#define TARGET_ASSIST_EXT_MSGFLAGS_BIDIRECTIONAL (0x20)
418#define TARGET_ASSIST_EXT_MSGFLAGS_MULTICAST (0x10)
419#define TARGET_ASSIST_EXT_MSGFLAGS_SGL_OFFSET_CHAINS (0x08)
420
421/* defines for the EEDPFlags field */
422#define TARGET_ASSIST_EXT_EEDP_MASK_OP (0x0007)
423#define TARGET_ASSIST_EXT_EEDP_NOOP_OP (0x0000)
424#define TARGET_ASSIST_EXT_EEDP_CHK_OP (0x0001)
425#define TARGET_ASSIST_EXT_EEDP_STRIP_OP (0x0002)
426#define TARGET_ASSIST_EXT_EEDP_CHKRM_OP (0x0003)
427#define TARGET_ASSIST_EXT_EEDP_INSERT_OP (0x0004)
428#define TARGET_ASSIST_EXT_EEDP_REPLACE_OP (0x0006)
429#define TARGET_ASSIST_EXT_EEDP_CHKREGEN_OP (0x0007)
430
431#define TARGET_ASSIST_EXT_EEDP_PASS_REF_TAG (0x0008)
432
433#define TARGET_ASSIST_EXT_EEDP_T10_CHK_MASK (0x0700)
434#define TARGET_ASSIST_EXT_EEDP_T10_CHK_GUARD (0x0100)
435#define TARGET_ASSIST_EXT_EEDP_T10_CHK_APPTAG (0x0200)
436#define TARGET_ASSIST_EXT_EEDP_T10_CHK_REFTAG (0x0400)
437#define TARGET_ASSIST_EXT_EEDP_T10_CHK_SHIFT (8)
438
439#define TARGET_ASSIST_EXT_EEDP_INC_SEC_APPTAG (0x1000)
440#define TARGET_ASSIST_EXT_EEDP_INC_PRI_APPTAG (0x2000)
441#define TARGET_ASSIST_EXT_EEDP_INC_SEC_REFTAG (0x4000)
442#define TARGET_ASSIST_EXT_EEDP_INC_PRI_REFTAG (0x8000)
443
444
445/****************************************************************************/
374/* Target Status Send Request */ 446/* Target Status Send Request */
375/****************************************************************************/ 447/****************************************************************************/
376 448
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index ffbe6f4720e1..f517d0692d5f 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -218,8 +218,7 @@ pci_enable_io_access(struct pci_dev *pdev)
218 * (also referred to as a IO Controller or IOC). 218 * (also referred to as a IO Controller or IOC).
219 * This routine must clear the interrupt from the adapter and does 219 * This routine must clear the interrupt from the adapter and does
220 * so by reading the reply FIFO. Multiple replies may be processed 220 * so by reading the reply FIFO. Multiple replies may be processed
221 * per single call to this routine; up to MPT_MAX_REPLIES_PER_ISR 221 * per single call to this routine.
222 * which is currently set to 32 in mptbase.h.
223 * 222 *
224 * This routine handles register-level access of the adapter but 223 * This routine handles register-level access of the adapter but
225 * dispatches (calls) a protocol-specific callback routine to handle 224 * dispatches (calls) a protocol-specific callback routine to handle
@@ -279,11 +278,11 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
279 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; 278 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
280 mf = MPT_INDEX_2_MFPTR(ioc, req_idx); 279 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
281 280
282 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x\n", 281 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
283 ioc->name, mr, req_idx)); 282 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
284 DBG_DUMP_REPLY_FRAME(mr) 283 DBG_DUMP_REPLY_FRAME(mr)
285 284
286 /* Check/log IOC log info 285 /* Check/log IOC log info
287 */ 286 */
288 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); 287 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
289 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 288 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
@@ -345,7 +344,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
345 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth)) 344 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
346 || (mf < ioc->req_frames)) ) { 345 || (mf < ioc->req_frames)) ) {
347 printk(MYIOC_s_WARN_FMT 346 printk(MYIOC_s_WARN_FMT
348 "mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, (void *)mf, req_idx); 347 "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
349 cb_idx = 0; 348 cb_idx = 0;
350 pa = 0; 349 pa = 0;
351 freeme = 0; 350 freeme = 0;
@@ -399,7 +398,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
399 * @mf: Pointer to original MPT request frame 398 * @mf: Pointer to original MPT request frame
400 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 399 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
401 * 400 *
402 * Returns 1 indicating original alloc'd request frame ptr 401 * Returns 1 indicating original alloc'd request frame ptr
403 * should be freed, or 0 if it shouldn't. 402 * should be freed, or 0 if it shouldn't.
404 */ 403 */
405static int 404static int
@@ -408,28 +407,17 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
408 int freereq = 1; 407 int freereq = 1;
409 u8 func; 408 u8 func;
410 409
411 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name)); 410 dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
412
413 if ((mf == NULL) ||
414 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
415 printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n",
416 ioc->name, (void *)mf);
417 return 1;
418 }
419
420 if (reply == NULL) {
421 dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n",
422 ioc->name));
423 return 1;
424 }
425 411
412#if defined(MPT_DEBUG_MSG_FRAME)
426 if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { 413 if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
427 dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf)); 414 dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf));
428 DBG_DUMP_REQUEST_FRAME_HDR(mf) 415 DBG_DUMP_REQUEST_FRAME_HDR(mf)
429 } 416 }
417#endif
430 418
431 func = reply->u.hdr.Function; 419 func = reply->u.hdr.Function;
432 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n", 420 dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
433 ioc->name, func)); 421 ioc->name, func));
434 422
435 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { 423 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
@@ -448,8 +436,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
448 * Hmmm... It seems that EventNotificationReply is an exception 436 * Hmmm... It seems that EventNotificationReply is an exception
449 * to the rule of one reply per request. 437 * to the rule of one reply per request.
450 */ 438 */
451 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) 439 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
452 freereq = 0; 440 freereq = 0;
441 devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
442 ioc->name, pEvReply));
443 } else {
444 devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
445 ioc->name, pEvReply));
446 }
453 447
454#ifdef CONFIG_PROC_FS 448#ifdef CONFIG_PROC_FS
455// LogEvent(ioc, pEvReply); 449// LogEvent(ioc, pEvReply);
@@ -491,10 +485,21 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
491 485
492 pCfg->status = status; 486 pCfg->status = status;
493 if (status == MPI_IOCSTATUS_SUCCESS) { 487 if (status == MPI_IOCSTATUS_SUCCESS) {
494 pCfg->hdr->PageVersion = pReply->Header.PageVersion; 488 if ((pReply->Header.PageType &
495 pCfg->hdr->PageLength = pReply->Header.PageLength; 489 MPI_CONFIG_PAGETYPE_MASK) ==
496 pCfg->hdr->PageNumber = pReply->Header.PageNumber; 490 MPI_CONFIG_PAGETYPE_EXTENDED) {
497 pCfg->hdr->PageType = pReply->Header.PageType; 491 pCfg->cfghdr.ehdr->ExtPageLength =
492 le16_to_cpu(pReply->ExtPageLength);
493 pCfg->cfghdr.ehdr->ExtPageType =
494 pReply->ExtPageType;
495 }
496 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
497
498 /* If this is a regular header, save PageLength. */
499 /* LMP Do this better so not using a reserved field! */
500 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
501 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
502 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
498 } 503 }
499 } 504 }
500 505
@@ -705,7 +710,7 @@ mpt_device_driver_deregister(int cb_idx)
705 if (dd_cbfunc->remove) 710 if (dd_cbfunc->remove)
706 dd_cbfunc->remove(ioc->pcidev); 711 dd_cbfunc->remove(ioc->pcidev);
707 } 712 }
708 713
709 MptDeviceDriverHandlers[cb_idx] = NULL; 714 MptDeviceDriverHandlers[cb_idx] = NULL;
710} 715}
711 716
@@ -818,7 +823,7 @@ mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
818 } 823 }
819#endif 824#endif
820 825
821 mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx]; 826 mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
822 dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx])); 827 dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx]));
823 CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr); 828 CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
824} 829}
@@ -920,7 +925,7 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
920 925
921 /* Make sure there are no doorbells */ 926 /* Make sure there are no doorbells */
922 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 927 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
923 928
924 CHIPREG_WRITE32(&ioc->chip->Doorbell, 929 CHIPREG_WRITE32(&ioc->chip->Doorbell,
925 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) | 930 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
926 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT))); 931 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
@@ -935,14 +940,14 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
935 return -5; 940 return -5;
936 941
937 dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n", 942 dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n",
938 ioc->name, ii)); 943 ioc->name, ii));
939 944
940 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 945 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
941 946
942 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { 947 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
943 return -2; 948 return -2;
944 } 949 }
945 950
946 /* Send request via doorbell handshake */ 951 /* Send request via doorbell handshake */
947 req_as_bytes = (u8 *) req; 952 req_as_bytes = (u8 *) req;
948 for (ii = 0; ii < reqBytes/4; ii++) { 953 for (ii = 0; ii < reqBytes/4; ii++) {
@@ -988,9 +993,9 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
988 if (ioc->id == iocid) { 993 if (ioc->id == iocid) {
989 *iocpp =ioc; 994 *iocpp =ioc;
990 return iocid; 995 return iocid;
991 } 996 }
992 } 997 }
993 998
994 *iocpp = NULL; 999 *iocpp = NULL;
995 return -1; 1000 return -1;
996} 1001}
@@ -1032,9 +1037,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1032 1037
1033 if (pci_enable_device(pdev)) 1038 if (pci_enable_device(pdev))
1034 return r; 1039 return r;
1035 1040
1036 dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n")); 1041 dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1037 1042
1038 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1043 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1039 dprintk((KERN_INFO MYNAM 1044 dprintk((KERN_INFO MYNAM
1040 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n")); 1045 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n"));
@@ -1059,7 +1064,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1059 ioc->alloc_total = sizeof(MPT_ADAPTER); 1064 ioc->alloc_total = sizeof(MPT_ADAPTER);
1060 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1065 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1061 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1066 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1062 1067
1063 ioc->pcidev = pdev; 1068 ioc->pcidev = pdev;
1064 ioc->diagPending = 0; 1069 ioc->diagPending = 0;
1065 spin_lock_init(&ioc->diagLock); 1070 spin_lock_init(&ioc->diagLock);
@@ -1088,7 +1093,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1088 /* Find lookup slot. */ 1093 /* Find lookup slot. */
1089 INIT_LIST_HEAD(&ioc->list); 1094 INIT_LIST_HEAD(&ioc->list);
1090 ioc->id = mpt_ids++; 1095 ioc->id = mpt_ids++;
1091 1096
1092 mem_phys = msize = 0; 1097 mem_phys = msize = 0;
1093 port = psize = 0; 1098 port = psize = 0;
1094 for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) { 1099 for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
@@ -1143,7 +1148,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1143 ioc->prod_name = "LSIFC909"; 1148 ioc->prod_name = "LSIFC909";
1144 ioc->bus_type = FC; 1149 ioc->bus_type = FC;
1145 } 1150 }
1146 if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) { 1151 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
1147 ioc->prod_name = "LSIFC929"; 1152 ioc->prod_name = "LSIFC929";
1148 ioc->bus_type = FC; 1153 ioc->bus_type = FC;
1149 } 1154 }
@@ -1322,7 +1327,7 @@ mpt_detach(struct pci_dev *pdev)
1322 remove_proc_entry(pname, NULL); 1327 remove_proc_entry(pname, NULL);
1323 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); 1328 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
1324 remove_proc_entry(pname, NULL); 1329 remove_proc_entry(pname, NULL);
1325 1330
1326 /* call per device driver remove entry point */ 1331 /* call per device driver remove entry point */
1327 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) { 1332 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1328 if(MptDeviceDriverHandlers[ii] && 1333 if(MptDeviceDriverHandlers[ii] &&
@@ -1330,7 +1335,7 @@ mpt_detach(struct pci_dev *pdev)
1330 MptDeviceDriverHandlers[ii]->remove(pdev); 1335 MptDeviceDriverHandlers[ii]->remove(pdev);
1331 } 1336 }
1332 } 1337 }
1333 1338
1334 /* Disable interrupts! */ 1339 /* Disable interrupts! */
1335 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 1340 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1336 1341
@@ -1403,7 +1408,7 @@ mpt_resume(struct pci_dev *pdev)
1403 u32 device_state = pdev->current_state; 1408 u32 device_state = pdev->current_state;
1404 int recovery_state; 1409 int recovery_state;
1405 int ii; 1410 int ii;
1406 1411
1407 printk(MYIOC_s_INFO_FMT 1412 printk(MYIOC_s_INFO_FMT
1408 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", 1413 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
1409 ioc->name, pdev, pci_name(pdev), device_state); 1414 ioc->name, pdev, pci_name(pdev), device_state);
@@ -1534,7 +1539,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1534 if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0) 1539 if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
1535 break; 1540 break;
1536 } 1541 }
1537 1542
1538 1543
1539 if (ii == 5) { 1544 if (ii == 5) {
1540 dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc)); 1545 dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc));
@@ -1542,7 +1547,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1542 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 1547 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
1543 MptDisplayIocCapabilities(ioc); 1548 MptDisplayIocCapabilities(ioc);
1544 } 1549 }
1545 1550
1546 if (alt_ioc_ready) { 1551 if (alt_ioc_ready) {
1547 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 1552 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
1548 dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); 1553 dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
@@ -1613,7 +1618,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1613 1618
1614 if (reset_alt_ioc_active && ioc->alt_ioc) { 1619 if (reset_alt_ioc_active && ioc->alt_ioc) {
1615 /* (re)Enable alt-IOC! (reply interrupt) */ 1620 /* (re)Enable alt-IOC! (reply interrupt) */
1616 dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n", 1621 dinitprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
1617 ioc->alt_ioc->name)); 1622 ioc->alt_ioc->name));
1618 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM)); 1623 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
1619 ioc->alt_ioc->active = 1; 1624 ioc->alt_ioc->active = 1;
@@ -1670,7 +1675,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1670 1675
1671 /* Find IM volumes 1676 /* Find IM volumes
1672 */ 1677 */
1673 if (ioc->facts.MsgVersion >= 0x0102) 1678 if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
1674 mpt_findImVolumes(ioc); 1679 mpt_findImVolumes(ioc);
1675 1680
1676 /* Check, and possibly reset, the coalescing value 1681 /* Check, and possibly reset, the coalescing value
@@ -1700,7 +1705,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1700 } 1705 }
1701 1706
1702 if (alt_ioc_ready && MptResetHandlers[ii]) { 1707 if (alt_ioc_ready && MptResetHandlers[ii]) {
1703 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", 1708 drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
1704 ioc->name, ioc->alt_ioc->name, ii)); 1709 ioc->name, ioc->alt_ioc->name, ii));
1705 rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); 1710 rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
1706 handlers++; 1711 handlers++;
@@ -1733,8 +1738,8 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
1733 1738
1734 dprintk((MYIOC_s_INFO_FMT "PCI device %s devfn=%x/%x," 1739 dprintk((MYIOC_s_INFO_FMT "PCI device %s devfn=%x/%x,"
1735 " searching for devfn match on %x or %x\n", 1740 " searching for devfn match on %x or %x\n",
1736 ioc->name, pci_name(pdev), pdev->devfn, 1741 ioc->name, pci_name(pdev), pdev->bus->number,
1737 func-1, func+1)); 1742 pdev->devfn, func-1, func+1));
1738 1743
1739 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1)); 1744 peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
1740 if (!peer) { 1745 if (!peer) {
@@ -1861,36 +1866,39 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1861static void 1866static void
1862mpt_adapter_dispose(MPT_ADAPTER *ioc) 1867mpt_adapter_dispose(MPT_ADAPTER *ioc)
1863{ 1868{
1864 if (ioc != NULL) { 1869 int sz_first, sz_last;
1865 int sz_first, sz_last;
1866 1870
1867 sz_first = ioc->alloc_total; 1871 if (ioc == NULL)
1872 return;
1868 1873
1869 mpt_adapter_disable(ioc); 1874 sz_first = ioc->alloc_total;
1870 1875
1871 if (ioc->pci_irq != -1) { 1876 mpt_adapter_disable(ioc);
1872 free_irq(ioc->pci_irq, ioc);
1873 ioc->pci_irq = -1;
1874 }
1875 1877
1876 if (ioc->memmap != NULL) 1878 if (ioc->pci_irq != -1) {
1877 iounmap(ioc->memmap); 1879 free_irq(ioc->pci_irq, ioc);
1880 ioc->pci_irq = -1;
1881 }
1882
1883 if (ioc->memmap != NULL) {
1884 iounmap(ioc->memmap);
1885 ioc->memmap = NULL;
1886 }
1878 1887
1879#if defined(CONFIG_MTRR) && 0 1888#if defined(CONFIG_MTRR) && 0
1880 if (ioc->mtrr_reg > 0) { 1889 if (ioc->mtrr_reg > 0) {
1881 mtrr_del(ioc->mtrr_reg, 0, 0); 1890 mtrr_del(ioc->mtrr_reg, 0, 0);
1882 dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name)); 1891 dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
1883 } 1892 }
1884#endif 1893#endif
1885 1894
1886 /* Zap the adapter lookup ptr! */ 1895 /* Zap the adapter lookup ptr! */
1887 list_del(&ioc->list); 1896 list_del(&ioc->list);
1888 1897
1889 sz_last = ioc->alloc_total; 1898 sz_last = ioc->alloc_total;
1890 dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n", 1899 dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
1891 ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first)); 1900 ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
1892 kfree(ioc); 1901 kfree(ioc);
1893 }
1894} 1902}
1895 1903
1896/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1904/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1977,7 +1985,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
1977 } 1985 }
1978 1986
1979 /* Is it already READY? */ 1987 /* Is it already READY? */
1980 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 1988 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
1981 return 0; 1989 return 0;
1982 1990
1983 /* 1991 /*
@@ -1995,7 +2003,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
1995 * Hmmm... Did it get left operational? 2003 * Hmmm... Did it get left operational?
1996 */ 2004 */
1997 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) { 2005 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
1998 dinitprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n", 2006 dinitprintk((MYIOC_s_INFO_FMT "IOC operational unexpected\n",
1999 ioc->name)); 2007 ioc->name));
2000 2008
2001 /* Check WhoInit. 2009 /* Check WhoInit.
@@ -2004,8 +2012,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2004 * Else, fall through to KickStart case 2012 * Else, fall through to KickStart case
2005 */ 2013 */
2006 whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT; 2014 whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
2007 dprintk((KERN_WARNING MYNAM 2015 dinitprintk((KERN_INFO MYNAM
2008 ": whoinit 0x%x\n statefault %d force %d\n", 2016 ": whoinit 0x%x statefault %d force %d\n",
2009 whoinit, statefault, force)); 2017 whoinit, statefault, force));
2010 if (whoinit == MPI_WHOINIT_PCI_PEER) 2018 if (whoinit == MPI_WHOINIT_PCI_PEER)
2011 return -4; 2019 return -4;
@@ -2140,8 +2148,8 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2140 get_facts.Function = MPI_FUNCTION_IOC_FACTS; 2148 get_facts.Function = MPI_FUNCTION_IOC_FACTS;
2141 /* Assert: All other get_facts fields are zero! */ 2149 /* Assert: All other get_facts fields are zero! */
2142 2150
2143 dinitprintk((MYIOC_s_INFO_FMT 2151 dinitprintk((MYIOC_s_INFO_FMT
2144 "Sending get IocFacts request req_sz=%d reply_sz=%d\n", 2152 "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
2145 ioc->name, req_sz, reply_sz)); 2153 ioc->name, req_sz, reply_sz));
2146 2154
2147 /* No non-zero fields in the get_facts request are greater than 2155 /* No non-zero fields in the get_facts request are greater than
@@ -2174,7 +2182,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2174 facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); 2182 facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
2175 facts->IOCStatus = le16_to_cpu(facts->IOCStatus); 2183 facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
2176 facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo); 2184 facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
2177 status = facts->IOCStatus & MPI_IOCSTATUS_MASK; 2185 status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
2178 /* CHECKME! IOCStatus, IOCLogInfo */ 2186 /* CHECKME! IOCStatus, IOCLogInfo */
2179 2187
2180 facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth); 2188 facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
@@ -2221,7 +2229,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2221 if ( sz & 0x02 ) 2229 if ( sz & 0x02 )
2222 sz += 2; 2230 sz += 2;
2223 facts->FWImageSize = sz; 2231 facts->FWImageSize = sz;
2224 2232
2225 if (!facts->RequestFrameSize) { 2233 if (!facts->RequestFrameSize) {
2226 /* Something is wrong! */ 2234 /* Something is wrong! */
2227 printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n", 2235 printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
@@ -2240,7 +2248,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2240 ioc->NBShiftFactor = shiftFactor; 2248 ioc->NBShiftFactor = shiftFactor;
2241 dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n", 2249 dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
2242 ioc->name, vv, shiftFactor, r)); 2250 ioc->name, vv, shiftFactor, r));
2243 2251
2244 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { 2252 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2245 /* 2253 /*
2246 * Set values for this IOC's request & reply frame sizes, 2254 * Set values for this IOC's request & reply frame sizes,
@@ -2261,7 +2269,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2261 return r; 2269 return r;
2262 } 2270 }
2263 } else { 2271 } else {
2264 printk(MYIOC_s_ERR_FMT 2272 printk(MYIOC_s_ERR_FMT
2265 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n", 2273 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
2266 ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t, 2274 ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
2267 RequestFrameSize)/sizeof(u32))); 2275 RequestFrameSize)/sizeof(u32)));
@@ -2413,9 +2421,11 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2413 2421
2414 dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n", 2422 dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
2415 ioc->name, &ioc_init)); 2423 ioc->name, &ioc_init));
2416 2424
2417 if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) 2425 if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
2426 printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
2418 return r; 2427 return r;
2428 }
2419 2429
2420 /* YIKES! SUPER IMPORTANT!!! 2430 /* YIKES! SUPER IMPORTANT!!!
2421 * Poll IocState until _OPERATIONAL while IOC is doing 2431 * Poll IocState until _OPERATIONAL while IOC is doing
@@ -2440,7 +2450,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2440 state = mpt_GetIocState(ioc, 1); 2450 state = mpt_GetIocState(ioc, 1);
2441 count++; 2451 count++;
2442 } 2452 }
2443 dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", 2453 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
2444 ioc->name, count)); 2454 ioc->name, count));
2445 2455
2446 return r; 2456 return r;
@@ -2529,7 +2539,7 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
2529 int sz; 2539 int sz;
2530 2540
2531 sz = ioc->facts.FWImageSize; 2541 sz = ioc->facts.FWImageSize;
2532 dinitprintk((KERN_WARNING MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n", 2542 dinitprintk((KERN_INFO MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
2533 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 2543 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
2534 pci_free_consistent(ioc->pcidev, sz, 2544 pci_free_consistent(ioc->pcidev, sz,
2535 ioc->cached_fw, ioc->cached_fw_dma); 2545 ioc->cached_fw, ioc->cached_fw_dma);
@@ -2573,9 +2583,9 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2573 2583
2574 mpt_alloc_fw_memory(ioc, sz); 2584 mpt_alloc_fw_memory(ioc, sz);
2575 2585
2576 dinitprintk((KERN_WARNING MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n", 2586 dinitprintk((KERN_INFO MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
2577 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); 2587 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
2578 2588
2579 if (ioc->cached_fw == NULL) { 2589 if (ioc->cached_fw == NULL) {
2580 /* Major Failure. 2590 /* Major Failure.
2581 */ 2591 */
@@ -2605,14 +2615,14 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2605 mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma); 2615 mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma);
2606 2616
2607 sgeoffset += sizeof(u32) + sizeof(dma_addr_t); 2617 sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
2608 dinitprintk((KERN_WARNING MYNAM "Sending FW Upload (req @ %p) sgeoffset=%d \n", 2618 dinitprintk((KERN_INFO MYNAM ": Sending FW Upload (req @ %p) sgeoffset=%d \n",
2609 prequest, sgeoffset)); 2619 prequest, sgeoffset));
2610 DBG_DUMP_FW_REQUEST_FRAME(prequest) 2620 DBG_DUMP_FW_REQUEST_FRAME(prequest)
2611 2621
2612 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, 2622 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
2613 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); 2623 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
2614 2624
2615 dinitprintk((KERN_WARNING MYNAM "FW Upload completed rc=%x \n", ii)); 2625 dinitprintk((KERN_INFO MYNAM ": FW Upload completed rc=%x \n", ii));
2616 2626
2617 cmdStatus = -EFAULT; 2627 cmdStatus = -EFAULT;
2618 if (ii == 0) { 2628 if (ii == 0) {
@@ -2627,10 +2637,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2627 cmdStatus = 0; 2637 cmdStatus = 0;
2628 } 2638 }
2629 } 2639 }
2630 dinitprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n", 2640 dinitprintk((MYIOC_s_INFO_FMT ": do_upload cmdStatus=%d \n",
2631 ioc->name, cmdStatus)); 2641 ioc->name, cmdStatus));
2632 2642
2633 2643
2634 if (cmdStatus) { 2644 if (cmdStatus) {
2635 2645
2636 ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n", 2646 ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n",
@@ -2761,8 +2771,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2761 fwSize = (pExtImage->ImageSize + 3) >> 2; 2771 fwSize = (pExtImage->ImageSize + 3) >> 2;
2762 ptrFw = (u32 *)pExtImage; 2772 ptrFw = (u32 *)pExtImage;
2763 2773
2764 ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x bytes @ %p load_addr=%x\n", 2774 ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
2765 ioc->name, fwSize*4, ptrFw, load_addr)); 2775 ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
2766 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr); 2776 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
2767 2777
2768 while (fwSize--) { 2778 while (fwSize--) {
@@ -2845,9 +2855,9 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2845 * 0 else 2855 * 0 else
2846 * 2856 *
2847 * Returns: 2857 * Returns:
2848 * 1 - hard reset, READY 2858 * 1 - hard reset, READY
2849 * 0 - no reset due to History bit, READY 2859 * 0 - no reset due to History bit, READY
2850 * -1 - no reset due to History bit but not READY 2860 * -1 - no reset due to History bit but not READY
2851 * OR reset but failed to come READY 2861 * OR reset but failed to come READY
2852 * -2 - no reset, could not enter DIAG mode 2862 * -2 - no reset, could not enter DIAG mode
2853 * -3 - reset but bad FW bit 2863 * -3 - reset but bad FW bit
@@ -2990,7 +3000,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
2990 * 3000 *
2991 */ 3001 */
2992 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM); 3002 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
2993 mdelay (1); 3003 mdelay(1);
2994 3004
2995 /* 3005 /*
2996 * Now hit the reset bit in the Diagnostic register 3006 * Now hit the reset bit in the Diagnostic register
@@ -3170,7 +3180,7 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
3170 u32 state; 3180 u32 state;
3171 int cntdn, count; 3181 int cntdn, count;
3172 3182
3173 drsprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n", 3183 drsprintk((KERN_INFO MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
3174 ioc->name, reset_type)); 3184 ioc->name, reset_type));
3175 CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT); 3185 CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
3176 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) 3186 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
@@ -3374,6 +3384,9 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
3374 ioc->reply_frames = (MPT_FRAME_HDR *) mem; 3384 ioc->reply_frames = (MPT_FRAME_HDR *) mem;
3375 ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); 3385 ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
3376 3386
3387 dinitprintk((KERN_INFO MYNAM ": %s ReplyBuffers @ %p[%p]\n",
3388 ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
3389
3377 alloc_dma += reply_sz; 3390 alloc_dma += reply_sz;
3378 mem += reply_sz; 3391 mem += reply_sz;
3379 3392
@@ -3382,7 +3395,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
3382 ioc->req_frames = (MPT_FRAME_HDR *) mem; 3395 ioc->req_frames = (MPT_FRAME_HDR *) mem;
3383 ioc->req_frames_dma = alloc_dma; 3396 ioc->req_frames_dma = alloc_dma;
3384 3397
3385 dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffers @ %p[%p]\n", 3398 dinitprintk((KERN_INFO MYNAM ": %s RequestBuffers @ %p[%p]\n",
3386 ioc->name, mem, (void *)(ulong)alloc_dma)); 3399 ioc->name, mem, (void *)(ulong)alloc_dma));
3387 3400
3388 ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); 3401 ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
@@ -3408,7 +3421,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
3408 ioc->ChainBuffer = mem; 3421 ioc->ChainBuffer = mem;
3409 ioc->ChainBufferDMA = alloc_dma; 3422 ioc->ChainBufferDMA = alloc_dma;
3410 3423
3411 dinitprintk((KERN_INFO MYNAM " :%s.ChainBuffers @ %p(%p)\n", 3424 dinitprintk((KERN_INFO MYNAM " :%s ChainBuffers @ %p(%p)\n",
3412 ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA)); 3425 ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
3413 3426
3414 /* Initialize the free chain Q. 3427 /* Initialize the free chain Q.
@@ -3513,7 +3526,7 @@ out_fail:
3513 */ 3526 */
3514static int 3527static int
3515mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, 3528mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
3516 int replyBytes, u16 *u16reply, int maxwait, int sleepFlag) 3529 int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
3517{ 3530{
3518 MPIDefaultReply_t *mptReply; 3531 MPIDefaultReply_t *mptReply;
3519 int failcnt = 0; 3532 int failcnt = 0;
@@ -3588,7 +3601,7 @@ mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
3588 */ 3601 */
3589 if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0) 3602 if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
3590 failcnt++; 3603 failcnt++;
3591 3604
3592 dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n", 3605 dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n",
3593 ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : "")); 3606 ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
3594 3607
@@ -3747,7 +3760,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3747 } 3760 }
3748 3761
3749 dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n", 3762 dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
3750 ioc->name, t, le32_to_cpu(*(u32 *)hs_reply), 3763 ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
3751 failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); 3764 failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
3752 3765
3753 /* 3766 /*
@@ -3819,7 +3832,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
3819 hdr.PageLength = 0; 3832 hdr.PageLength = 0;
3820 hdr.PageNumber = 0; 3833 hdr.PageNumber = 0;
3821 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; 3834 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
3822 cfg.hdr = &hdr; 3835 cfg.cfghdr.hdr = &hdr;
3823 cfg.physAddr = -1; 3836 cfg.physAddr = -1;
3824 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 3837 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
3825 cfg.dir = 0; 3838 cfg.dir = 0;
@@ -3863,7 +3876,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
3863 hdr.PageLength = 0; 3876 hdr.PageLength = 0;
3864 hdr.PageNumber = 1; 3877 hdr.PageNumber = 1;
3865 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; 3878 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
3866 cfg.hdr = &hdr; 3879 cfg.cfghdr.hdr = &hdr;
3867 cfg.physAddr = -1; 3880 cfg.physAddr = -1;
3868 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 3881 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
3869 cfg.dir = 0; 3882 cfg.dir = 0;
@@ -3930,7 +3943,7 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
3930 hdr.PageLength = 0; 3943 hdr.PageLength = 0;
3931 hdr.PageNumber = 0; 3944 hdr.PageNumber = 0;
3932 hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; 3945 hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
3933 cfg.hdr = &hdr; 3946 cfg.cfghdr.hdr = &hdr;
3934 cfg.physAddr = -1; 3947 cfg.physAddr = -1;
3935 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 3948 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
3936 cfg.dir = 0; 3949 cfg.dir = 0;
@@ -4012,7 +4025,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
4012 hdr.PageLength = 0; 4025 hdr.PageLength = 0;
4013 hdr.PageNumber = 2; 4026 hdr.PageNumber = 2;
4014 hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT; 4027 hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
4015 cfg.hdr = &hdr; 4028 cfg.cfghdr.hdr = &hdr;
4016 cfg.physAddr = -1; 4029 cfg.physAddr = -1;
4017 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4030 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4018 cfg.dir = 0; 4031 cfg.dir = 0;
@@ -4102,7 +4115,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4102 header.PageLength = 0; 4115 header.PageLength = 0;
4103 header.PageNumber = 0; 4116 header.PageNumber = 0;
4104 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; 4117 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
4105 cfg.hdr = &header; 4118 cfg.cfghdr.hdr = &header;
4106 cfg.physAddr = -1; 4119 cfg.physAddr = -1;
4107 cfg.pageAddr = portnum; 4120 cfg.pageAddr = portnum;
4108 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4121 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4122,6 +4135,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4122 ioc->spi_data.minSyncFactor = MPT_ASYNC; 4135 ioc->spi_data.minSyncFactor = MPT_ASYNC;
4123 ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN; 4136 ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
4124 rc = 1; 4137 rc = 1;
4138 ddvprintk((MYIOC_s_INFO_FMT "Unable to read PortPage0 minSyncFactor=%x\n",
4139 ioc->name, ioc->spi_data.minSyncFactor));
4125 } else { 4140 } else {
4126 /* Save the Port Page 0 data 4141 /* Save the Port Page 0 data
4127 */ 4142 */
@@ -4131,7 +4146,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4131 4146
4132 if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) { 4147 if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
4133 ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS; 4148 ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
4134 dinitprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n", 4149 ddvprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
4135 ioc->name, pPP0->Capabilities)); 4150 ioc->name, pPP0->Capabilities));
4136 } 4151 }
4137 ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0; 4152 ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
@@ -4140,6 +4155,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4140 ioc->spi_data.maxSyncOffset = (u8) (data >> 16); 4155 ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
4141 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK; 4156 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
4142 ioc->spi_data.minSyncFactor = (u8) (data >> 8); 4157 ioc->spi_data.minSyncFactor = (u8) (data >> 8);
4158 ddvprintk((MYIOC_s_INFO_FMT "PortPage0 minSyncFactor=%x\n",
4159 ioc->name, ioc->spi_data.minSyncFactor));
4143 } else { 4160 } else {
4144 ioc->spi_data.maxSyncOffset = 0; 4161 ioc->spi_data.maxSyncOffset = 0;
4145 ioc->spi_data.minSyncFactor = MPT_ASYNC; 4162 ioc->spi_data.minSyncFactor = MPT_ASYNC;
@@ -4152,8 +4169,11 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4152 if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) || 4169 if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
4153 (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) { 4170 (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
4154 4171
4155 if (ioc->spi_data.minSyncFactor < MPT_ULTRA) 4172 if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
4156 ioc->spi_data.minSyncFactor = MPT_ULTRA; 4173 ioc->spi_data.minSyncFactor = MPT_ULTRA;
4174 ddvprintk((MYIOC_s_INFO_FMT "HVD or SE detected, minSyncFactor=%x\n",
4175 ioc->name, ioc->spi_data.minSyncFactor));
4176 }
4157 } 4177 }
4158 } 4178 }
4159 if (pbuf) { 4179 if (pbuf) {
@@ -4168,7 +4188,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4168 header.PageLength = 0; 4188 header.PageLength = 0;
4169 header.PageNumber = 2; 4189 header.PageNumber = 2;
4170 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; 4190 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
4171 cfg.hdr = &header; 4191 cfg.cfghdr.hdr = &header;
4172 cfg.physAddr = -1; 4192 cfg.physAddr = -1;
4173 cfg.pageAddr = portnum; 4193 cfg.pageAddr = portnum;
4174 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4194 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4236,7 +4256,7 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
4236 header.PageLength = 0; 4256 header.PageLength = 0;
4237 header.PageNumber = 1; 4257 header.PageNumber = 1;
4238 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 4258 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4239 cfg.hdr = &header; 4259 cfg.cfghdr.hdr = &header;
4240 cfg.physAddr = -1; 4260 cfg.physAddr = -1;
4241 cfg.pageAddr = portnum; 4261 cfg.pageAddr = portnum;
4242 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4262 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4245,8 +4265,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
4245 if (mpt_config(ioc, &cfg) != 0) 4265 if (mpt_config(ioc, &cfg) != 0)
4246 return -EFAULT; 4266 return -EFAULT;
4247 4267
4248 ioc->spi_data.sdp1version = cfg.hdr->PageVersion; 4268 ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
4249 ioc->spi_data.sdp1length = cfg.hdr->PageLength; 4269 ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
4250 4270
4251 header.PageVersion = 0; 4271 header.PageVersion = 0;
4252 header.PageLength = 0; 4272 header.PageLength = 0;
@@ -4255,8 +4275,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
4255 if (mpt_config(ioc, &cfg) != 0) 4275 if (mpt_config(ioc, &cfg) != 0)
4256 return -EFAULT; 4276 return -EFAULT;
4257 4277
4258 ioc->spi_data.sdp0version = cfg.hdr->PageVersion; 4278 ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
4259 ioc->spi_data.sdp0length = cfg.hdr->PageLength; 4279 ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
4260 4280
4261 dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n", 4281 dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n",
4262 ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length)); 4282 ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
@@ -4298,7 +4318,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4298 header.PageLength = 0; 4318 header.PageLength = 0;
4299 header.PageNumber = 2; 4319 header.PageNumber = 2;
4300 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 4320 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4301 cfg.hdr = &header; 4321 cfg.cfghdr.hdr = &header;
4302 cfg.physAddr = -1; 4322 cfg.physAddr = -1;
4303 cfg.pageAddr = 0; 4323 cfg.pageAddr = 0;
4304 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4324 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4394,7 +4414,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4394 header.PageLength = 0; 4414 header.PageLength = 0;
4395 header.PageNumber = 3; 4415 header.PageNumber = 3;
4396 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 4416 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4397 cfg.hdr = &header; 4417 cfg.cfghdr.hdr = &header;
4398 cfg.physAddr = -1; 4418 cfg.physAddr = -1;
4399 cfg.pageAddr = 0; 4419 cfg.pageAddr = 0;
4400 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4420 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4446,7 +4466,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4446 header.PageLength = 0; 4466 header.PageLength = 0;
4447 header.PageNumber = 4; 4467 header.PageNumber = 4;
4448 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 4468 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4449 cfg.hdr = &header; 4469 cfg.cfghdr.hdr = &header;
4450 cfg.physAddr = -1; 4470 cfg.physAddr = -1;
4451 cfg.pageAddr = 0; 4471 cfg.pageAddr = 0;
4452 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4472 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4498,7 +4518,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
4498 header.PageLength = 0; 4518 header.PageLength = 0;
4499 header.PageNumber = 1; 4519 header.PageNumber = 1;
4500 header.PageType = MPI_CONFIG_PAGETYPE_IOC; 4520 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4501 cfg.hdr = &header; 4521 cfg.cfghdr.hdr = &header;
4502 cfg.physAddr = -1; 4522 cfg.physAddr = -1;
4503 cfg.pageAddr = 0; 4523 cfg.pageAddr = 0;
4504 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4524 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4580,13 +4600,13 @@ SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
4580 4600
4581 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); 4601 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
4582 if (evnp == NULL) { 4602 if (evnp == NULL) {
4583 dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", 4603 devtprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
4584 ioc->name)); 4604 ioc->name));
4585 return 0; 4605 return 0;
4586 } 4606 }
4587 memset(evnp, 0, sizeof(*evnp)); 4607 memset(evnp, 0, sizeof(*evnp));
4588 4608
4589 dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch)); 4609 devtprintk((MYIOC_s_INFO_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
4590 4610
4591 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 4611 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
4592 evnp->ChainOffset = 0; 4612 evnp->ChainOffset = 0;
@@ -4610,8 +4630,10 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
4610 EventAck_t *pAck; 4630 EventAck_t *pAck;
4611 4631
4612 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 4632 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4613 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n", 4633 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK "
4614 ioc->name); 4634 "request frame for Event=%x EventContext=%x EventData=%x!\n",
4635 ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
4636 le32_to_cpu(evnp->Data[0]));
4615 return -1; 4637 return -1;
4616 } 4638 }
4617 memset(pAck, 0, sizeof(*pAck)); 4639 memset(pAck, 0, sizeof(*pAck));
@@ -4647,10 +4669,11 @@ int
4647mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 4669mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4648{ 4670{
4649 Config_t *pReq; 4671 Config_t *pReq;
4672 ConfigExtendedPageHeader_t *pExtHdr = NULL;
4650 MPT_FRAME_HDR *mf; 4673 MPT_FRAME_HDR *mf;
4651 unsigned long flags; 4674 unsigned long flags;
4652 int ii, rc; 4675 int ii, rc;
4653 u32 flagsLength; 4676 int flagsLength;
4654 int in_isr; 4677 int in_isr;
4655 4678
4656 /* Prevent calling wait_event() (below), if caller happens 4679 /* Prevent calling wait_event() (below), if caller happens
@@ -4675,16 +4698,30 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4675 pReq->Reserved = 0; 4698 pReq->Reserved = 0;
4676 pReq->ChainOffset = 0; 4699 pReq->ChainOffset = 0;
4677 pReq->Function = MPI_FUNCTION_CONFIG; 4700 pReq->Function = MPI_FUNCTION_CONFIG;
4701
4702 /* Assume page type is not extended and clear "reserved" fields. */
4678 pReq->ExtPageLength = 0; 4703 pReq->ExtPageLength = 0;
4679 pReq->ExtPageType = 0; 4704 pReq->ExtPageType = 0;
4680 pReq->MsgFlags = 0; 4705 pReq->MsgFlags = 0;
4706
4681 for (ii=0; ii < 8; ii++) 4707 for (ii=0; ii < 8; ii++)
4682 pReq->Reserved2[ii] = 0; 4708 pReq->Reserved2[ii] = 0;
4683 4709
4684 pReq->Header.PageVersion = pCfg->hdr->PageVersion; 4710 pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
4685 pReq->Header.PageLength = pCfg->hdr->PageLength; 4711 pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
4686 pReq->Header.PageNumber = pCfg->hdr->PageNumber; 4712 pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
4687 pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 4713 pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
4714
4715 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
4716 pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
4717 pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
4718 pReq->ExtPageType = pExtHdr->ExtPageType;
4719 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
4720
4721 /* Page Length must be treated as a reserved field for the extended header. */
4722 pReq->Header.PageLength = 0;
4723 }
4724
4688 pReq->PageAddress = cpu_to_le32(pCfg->pageAddr); 4725 pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
4689 4726
4690 /* Add a SGE to the config request. 4727 /* Add a SGE to the config request.
@@ -4694,12 +4731,20 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4694 else 4731 else
4695 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 4732 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
4696 4733
4697 flagsLength |= pCfg->hdr->PageLength * 4; 4734 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
4735 flagsLength |= pExtHdr->ExtPageLength * 4;
4698 4736
4699 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 4737 dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
4738 ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action));
4739 }
4740 else {
4741 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
4742
4743 dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
4744 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
4745 }
4700 4746
4701 dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n", 4747 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
4702 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
4703 4748
4704 /* Append pCfg pointer to end of mf 4749 /* Append pCfg pointer to end of mf
4705 */ 4750 */
@@ -4789,8 +4834,8 @@ mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4789 pReq->Reserved3 = 0; 4834 pReq->Reserved3 = 0;
4790 pReq->NumAddressBytes = 0x01; 4835 pReq->NumAddressBytes = 0x01;
4791 pReq->Reserved4 = 0; 4836 pReq->Reserved4 = 0;
4792 pReq->DataLength = 0x04; 4837 pReq->DataLength = cpu_to_le16(0x04);
4793 pdev = (struct pci_dev *) ioc->pcidev; 4838 pdev = ioc->pcidev;
4794 if (pdev->devfn & 1) 4839 if (pdev->devfn & 1)
4795 pReq->DeviceAddr = 0xB2; 4840 pReq->DeviceAddr = 0xB2;
4796 else 4841 else
@@ -5504,6 +5549,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5504 * If needed, send (a single) EventAck. 5549 * If needed, send (a single) EventAck.
5505 */ 5550 */
5506 if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) { 5551 if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
5552 devtprintk((MYIOC_s_WARN_FMT
5553 "EventAck required\n",ioc->name));
5507 if ((ii = SendEventAck(ioc, pEventReply)) != 0) { 5554 if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
5508 devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n", 5555 devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n",
5509 ioc->name, ii)); 5556 ioc->name, ii));
@@ -5584,7 +5631,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5584 case 0x00080000: 5631 case 0x00080000:
5585 desc = "Outbound DMA Overrun"; 5632 desc = "Outbound DMA Overrun";
5586 break; 5633 break;
5587 5634
5588 case 0x00090000: 5635 case 0x00090000:
5589 desc = "Task Management"; 5636 desc = "Task Management";
5590 break; 5637 break;
@@ -5600,7 +5647,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5600 case 0x000C0000: 5647 case 0x000C0000:
5601 desc = "Untagged Table Size"; 5648 desc = "Untagged Table Size";
5602 break; 5649 break;
5603 5650
5604 } 5651 }
5605 5652
5606 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); 5653 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
@@ -5692,7 +5739,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
5692 break; 5739 break;
5693 5740
5694 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 5741 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
5695 /* This error is checked in scsi_io_done(). Skip. 5742 /* This error is checked in scsi_io_done(). Skip.
5696 desc = "SCSI Data Underrun"; 5743 desc = "SCSI Data Underrun";
5697 */ 5744 */
5698 break; 5745 break;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 848fb236b175..f4827d923731 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -915,7 +915,10 @@ struct scsi_cmnd;
915typedef struct _x_config_parms { 915typedef struct _x_config_parms {
916 struct list_head linkage; /* linked list */ 916 struct list_head linkage; /* linked list */
917 struct timer_list timer; /* timer function for this request */ 917 struct timer_list timer; /* timer function for this request */
918 ConfigPageHeader_t *hdr; 918 union {
919 ConfigExtendedPageHeader_t *ehdr;
920 ConfigPageHeader_t *hdr;
921 } cfghdr;
919 dma_addr_t physAddr; 922 dma_addr_t physAddr;
920 int wait_done; /* wait for this request */ 923 int wait_done; /* wait for this request */
921 u32 pageAddr; /* properly formatted */ 924 u32 pageAddr; /* properly formatted */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 05ea5944c487..7577c2417e2e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -242,7 +242,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
242 /* Set the command status to GOOD if IOC Status is GOOD 242 /* Set the command status to GOOD if IOC Status is GOOD
243 * OR if SCSI I/O cmd and data underrun or recovered error. 243 * OR if SCSI I/O cmd and data underrun or recovered error.
244 */ 244 */
245 iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK; 245 iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
246 if (iocStatus == MPI_IOCSTATUS_SUCCESS) 246 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; 247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248 248
@@ -2324,7 +2324,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2324 hdr.PageLength = 0; 2324 hdr.PageLength = 0;
2325 hdr.PageNumber = 0; 2325 hdr.PageNumber = 0;
2326 hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; 2326 hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
2327 cfg.hdr = &hdr; 2327 cfg.cfghdr.hdr = &hdr;
2328 cfg.physAddr = -1; 2328 cfg.physAddr = -1;
2329 cfg.pageAddr = 0; 2329 cfg.pageAddr = 0;
2330 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2330 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -2333,7 +2333,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2333 2333
2334 strncpy(karg.serial_number, " ", 24); 2334 strncpy(karg.serial_number, " ", 24);
2335 if (mpt_config(ioc, &cfg) == 0) { 2335 if (mpt_config(ioc, &cfg) == 0) {
2336 if (cfg.hdr->PageLength > 0) { 2336 if (cfg.cfghdr.hdr->PageLength > 0) {
2337 /* Issue the second config page request */ 2337 /* Issue the second config page request */
2338 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2338 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2339 2339
@@ -2479,7 +2479,7 @@ mptctl_hp_targetinfo(unsigned long arg)
2479 hdr.PageNumber = 0; 2479 hdr.PageNumber = 0;
2480 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 2480 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
2481 2481
2482 cfg.hdr = &hdr; 2482 cfg.cfghdr.hdr = &hdr;
2483 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2483 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2484 cfg.dir = 0; 2484 cfg.dir = 0;
2485 cfg.timeout = 0; 2485 cfg.timeout = 0;
@@ -2527,15 +2527,15 @@ mptctl_hp_targetinfo(unsigned long arg)
2527 hdr.PageNumber = 3; 2527 hdr.PageNumber = 3;
2528 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 2528 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
2529 2529
2530 cfg.hdr = &hdr; 2530 cfg.cfghdr.hdr = &hdr;
2531 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2531 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2532 cfg.dir = 0; 2532 cfg.dir = 0;
2533 cfg.timeout = 0; 2533 cfg.timeout = 0;
2534 cfg.physAddr = -1; 2534 cfg.physAddr = -1;
2535 if ((mpt_config(ioc, &cfg) == 0) && (cfg.hdr->PageLength > 0)) { 2535 if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) {
2536 /* Issue the second config page request */ 2536 /* Issue the second config page request */
2537 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2537 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2538 data_sz = (int) cfg.hdr->PageLength * 4; 2538 data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
2539 pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( 2539 pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
2540 ioc->pcidev, data_sz, &page_dma); 2540 ioc->pcidev, data_sz, &page_dma);
2541 if (pg3_alloc) { 2541 if (pg3_alloc) {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index b9d4f78725b4..4a003dc5fde8 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -281,12 +281,12 @@ mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
281 offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer; 281 offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
282 chain_idx = offset / ioc->req_sz; 282 chain_idx = offset / ioc->req_sz;
283 rc = SUCCESS; 283 rc = SUCCESS;
284 dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n", 284 dsgprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
285 ioc->name, *retIndex, chainBuf)); 285 ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
286 } else { 286 } else {
287 rc = FAILED; 287 rc = FAILED;
288 chain_idx = MPT_HOST_NO_CHAIN; 288 chain_idx = MPT_HOST_NO_CHAIN;
289 dfailprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n", 289 dfailprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer failed\n",
290 ioc->name)); 290 ioc->name));
291 } 291 }
292 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 292 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
@@ -432,7 +432,7 @@ nextSGEset:
432 */ 432 */
433 pReq->ChainOffset = 0; 433 pReq->ChainOffset = 0;
434 RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03; 434 RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
435 dsgprintk((MYIOC_s_ERR_FMT 435 dsgprintk((MYIOC_s_INFO_FMT
436 "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset)); 436 "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
437 ioc->RequestNB[req_idx] = RequestNB; 437 ioc->RequestNB[req_idx] = RequestNB;
438 } 438 }
@@ -491,11 +491,12 @@ nextSGEset:
491 /* NOTE: psge points to the beginning of the chain element 491 /* NOTE: psge points to the beginning of the chain element
492 * in current buffer. Get a chain buffer. 492 * in current buffer. Get a chain buffer.
493 */ 493 */
494 dsgprintk((MYIOC_s_INFO_FMT 494 if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
495 "calling getFreeChainBuffer SCSI cmd=%02x (%p)\n", 495 dfailprintk((MYIOC_s_INFO_FMT
496 ioc->name, pReq->CDB[0], SCpnt)); 496 "getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
497 if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) 497 ioc->name, pReq->CDB[0], SCpnt));
498 return FAILED; 498 return FAILED;
499 }
499 500
500 /* Update the tracking arrays. 501 /* Update the tracking arrays.
501 * If chainSge == NULL, update ReqToChain, else ChainToChain 502 * If chainSge == NULL, update ReqToChain, else ChainToChain
@@ -577,14 +578,20 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
577 return 1; 578 return 1;
578 } 579 }
579 580
580 dmfprintk((MYIOC_s_INFO_FMT
581 "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
582 ioc->name, mf, mr, sc, req_idx));
583
584 sc->result = DID_OK << 16; /* Set default reply as OK */ 581 sc->result = DID_OK << 16; /* Set default reply as OK */
585 pScsiReq = (SCSIIORequest_t *) mf; 582 pScsiReq = (SCSIIORequest_t *) mf;
586 pScsiReply = (SCSIIOReply_t *) mr; 583 pScsiReply = (SCSIIOReply_t *) mr;
587 584
585 if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
586 dmfprintk((MYIOC_s_INFO_FMT
587 "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
588 ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
589 }else{
590 dmfprintk((MYIOC_s_INFO_FMT
591 "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
592 ioc->name, mf, mr, sc, req_idx));
593 }
594
588 if (pScsiReply == NULL) { 595 if (pScsiReply == NULL) {
589 /* special context reply handling */ 596 /* special context reply handling */
590 ; 597 ;
@@ -658,8 +665,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
658 /* Sufficient data transfer occurred */ 665 /* Sufficient data transfer occurred */
659 sc->result = (DID_OK << 16) | scsi_status; 666 sc->result = (DID_OK << 16) | scsi_status;
660 } else if ( xfer_cnt == 0 ) { 667 } else if ( xfer_cnt == 0 ) {
661 /* A CRC Error causes this condition; retry */ 668 /* A CRC Error causes this condition; retry */
662 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) | 669 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
663 (CHECK_CONDITION << 1); 670 (CHECK_CONDITION << 1);
664 sc->sense_buffer[0] = 0x70; 671 sc->sense_buffer[0] = 0x70;
665 sc->sense_buffer[2] = NO_SENSE; 672 sc->sense_buffer[2] = NO_SENSE;
@@ -668,7 +675,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
668 } else { 675 } else {
669 sc->result = DID_SOFT_ERROR << 16; 676 sc->result = DID_SOFT_ERROR << 16;
670 } 677 }
671 dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->target)); 678 dreplyprintk((KERN_NOTICE
679 "RESIDUAL_MISMATCH: result=%x on id=%d\n",
680 sc->result, sc->device->id));
672 break; 681 break;
673 682
674 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 683 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -796,7 +805,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
796 return 1; 805 return 1;
797} 806}
798 807
799
800/* 808/*
801 * mptscsih_flush_running_cmds - For each command found, search 809 * mptscsih_flush_running_cmds - For each command found, search
802 * Scsi_Host instance taskQ and reply to OS. 810 * Scsi_Host instance taskQ and reply to OS.
@@ -1017,7 +1025,7 @@ mptscsih_remove(struct pci_dev *pdev)
1017 scsi_host_put(host); 1025 scsi_host_put(host);
1018 1026
1019 mpt_detach(pdev); 1027 mpt_detach(pdev);
1020 1028
1021} 1029}
1022 1030
1023/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1031/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1072,7 +1080,7 @@ mptscsih_resume(struct pci_dev *pdev)
1072 MPT_SCSI_HOST *hd; 1080 MPT_SCSI_HOST *hd;
1073 1081
1074 mpt_resume(pdev); 1082 mpt_resume(pdev);
1075 1083
1076 if(!host) 1084 if(!host)
1077 return 0; 1085 return 0;
1078 1086
@@ -1214,8 +1222,8 @@ mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t off
1214 int size = 0; 1222 int size = 0;
1215 1223
1216 if (func) { 1224 if (func) {
1217 /* 1225 /*
1218 * write is not supported 1226 * write is not supported
1219 */ 1227 */
1220 } else { 1228 } else {
1221 if (start) 1229 if (start)
@@ -1535,17 +1543,17 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
1535 */ 1543 */
1536 if (mptscsih_tm_pending_wait(hd) == FAILED) { 1544 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1537 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 1545 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1538 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler abort: " 1546 dtmprintk((KERN_INFO MYNAM ": %s: TMHandler abort: "
1539 "Timed out waiting for last TM (%d) to complete! \n", 1547 "Timed out waiting for last TM (%d) to complete! \n",
1540 hd->ioc->name, hd->tmPending)); 1548 hd->ioc->name, hd->tmPending));
1541 return FAILED; 1549 return FAILED;
1542 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1550 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1543 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler target reset: " 1551 dtmprintk((KERN_INFO MYNAM ": %s: TMHandler target reset: "
1544 "Timed out waiting for last TM (%d) to complete! \n", 1552 "Timed out waiting for last TM (%d) to complete! \n",
1545 hd->ioc->name, hd->tmPending)); 1553 hd->ioc->name, hd->tmPending));
1546 return FAILED; 1554 return FAILED;
1547 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { 1555 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1548 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler bus reset: " 1556 dtmprintk((KERN_INFO MYNAM ": %s: TMHandler bus reset: "
1549 "Timed out waiting for last TM (%d) to complete! \n", 1557 "Timed out waiting for last TM (%d) to complete! \n",
1550 hd->ioc->name, hd->tmPending)); 1558 hd->ioc->name, hd->tmPending));
1551 if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)) 1559 if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS))
@@ -1631,8 +1639,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
1631 if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) { 1639 if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) {
1632 dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1640 dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
1633 hd->ioc->name)); 1641 hd->ioc->name));
1634 //return FAILED; 1642 return FAILED;
1635 return -999;
1636 } 1643 }
1637 dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n", 1644 dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
1638 hd->ioc->name, mf)); 1645 hd->ioc->name, mf));
@@ -1661,9 +1668,8 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
1661 1668
1662 pScsiTm->TaskMsgContext = ctx2abort; 1669 pScsiTm->TaskMsgContext = ctx2abort;
1663 1670
1664 dtmprintk((MYIOC_s_INFO_FMT 1671 dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
1665 "IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n", 1672 hd->ioc->name, ctx2abort, type));
1666 hd->ioc->name, ctx2abort, type));
1667 1673
1668 DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm); 1674 DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm);
1669 1675
@@ -1902,13 +1908,13 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
1902 1908
1903 /* If we can't locate the host to reset, then we failed. */ 1909 /* If we can't locate the host to reset, then we failed. */
1904 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1910 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1905 dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: " 1911 dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
1906 "Can't locate host! (sc=%p)\n", 1912 "Can't locate host! (sc=%p)\n",
1907 SCpnt ) ); 1913 SCpnt ) );
1908 return FAILED; 1914 return FAILED;
1909 } 1915 }
1910 1916
1911 printk(KERN_WARNING MYNAM ": %s: >> Attempting host reset! (sc=%p)\n", 1917 printk(KERN_WARNING MYNAM ": %s: Attempting host reset! (sc=%p)\n",
1912 hd->ioc->name, SCpnt); 1918 hd->ioc->name, SCpnt);
1913 1919
1914 /* If our attempts to reset the host failed, then return a failed 1920 /* If our attempts to reset the host failed, then return a failed
@@ -1924,7 +1930,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
1924 hd->tmState = TM_STATE_NONE; 1930 hd->tmState = TM_STATE_NONE;
1925 } 1931 }
1926 1932
1927 dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: " 1933 dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
1928 "Status = %s\n", 1934 "Status = %s\n",
1929 (status == SUCCESS) ? "SUCCESS" : "FAILED" ) ); 1935 (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
1930 1936
@@ -1951,8 +1957,8 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
1951 if (hd->tmState == TM_STATE_NONE) { 1957 if (hd->tmState == TM_STATE_NONE) {
1952 hd->tmState = TM_STATE_IN_PROGRESS; 1958 hd->tmState = TM_STATE_IN_PROGRESS;
1953 hd->tmPending = 1; 1959 hd->tmPending = 1;
1954 status = SUCCESS;
1955 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 1960 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
1961 status = SUCCESS;
1956 break; 1962 break;
1957 } 1963 }
1958 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 1964 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@@ -1980,7 +1986,7 @@ mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
1980 spin_lock_irqsave(&hd->ioc->FreeQlock, flags); 1986 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
1981 if(hd->tmPending == 0) { 1987 if(hd->tmPending == 0) {
1982 status = SUCCESS; 1988 status = SUCCESS;
1983 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 1989 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
1984 break; 1990 break;
1985 } 1991 }
1986 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 1992 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@@ -2318,10 +2324,10 @@ mptscsih_slave_configure(struct scsi_device *device)
2318 if (pTarget == NULL) { 2324 if (pTarget == NULL) {
2319 /* Driver doesn't know about this device. 2325 /* Driver doesn't know about this device.
2320 * Kernel may generate a "Dummy Lun 0" which 2326 * Kernel may generate a "Dummy Lun 0" which
2321 * may become a real Lun if a 2327 * may become a real Lun if a
2322 * "scsi add-single-device" command is executed 2328 * "scsi add-single-device" command is executed
2323 * while the driver is active (hot-plug a 2329 * while the driver is active (hot-plug a
2324 * device). LSI Raid controllers need 2330 * device). LSI Raid controllers need
2325 * queue_depth set to DEV_HIGH for this reason. 2331 * queue_depth set to DEV_HIGH for this reason.
2326 */ 2332 */
2327 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, 2333 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
@@ -2691,7 +2697,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2691 * If the peripheral qualifier filter is enabled then if the target reports a 0x1 2697 * If the peripheral qualifier filter is enabled then if the target reports a 0x1
2692 * (i.e. The targer is capable of supporting the specified peripheral device type 2698 * (i.e. The targer is capable of supporting the specified peripheral device type
2693 * on this logical unit; however, the physical device is not currently connected 2699 * on this logical unit; however, the physical device is not currently connected
2694 * to this logical unit) it will be converted to a 0x3 (i.e. The target is not 2700 * to this logical unit) it will be converted to a 0x3 (i.e. The target is not
2695 * capable of supporting a physical device on this logical unit). This is to work 2701 * capable of supporting a physical device on this logical unit). This is to work
2696 * around a bug in th emid-layer in some distributions in which the mid-layer will 2702 * around a bug in th emid-layer in some distributions in which the mid-layer will
2697 * continue to try to communicate to the LUN and evntually create a dummy LUN. 2703 * continue to try to communicate to the LUN and evntually create a dummy LUN.
@@ -3194,8 +3200,8 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3194 /* Get a MF for this command. 3200 /* Get a MF for this command.
3195 */ 3201 */
3196 if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { 3202 if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
3197 dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n", 3203 dfailprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
3198 ioc->name)); 3204 ioc->name));
3199 return -EAGAIN; 3205 return -EAGAIN;
3200 } 3206 }
3201 3207
@@ -3289,7 +3295,7 @@ mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus)
3289 /* Get a MF for this command. 3295 /* Get a MF for this command.
3290 */ 3296 */
3291 if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { 3297 if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
3292 dprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n", 3298 dfailprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
3293 ioc->name)); 3299 ioc->name));
3294 return -EAGAIN; 3300 return -EAGAIN;
3295 } 3301 }
@@ -3447,7 +3453,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
3447 * some type of error occurred. 3453 * some type of error occurred.
3448 */ 3454 */
3449 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; 3455 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
3450 if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS) 3456 if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
3451 completionCode = MPT_SCANDV_GOOD; 3457 completionCode = MPT_SCANDV_GOOD;
3452 else 3458 else
3453 completionCode = MPT_SCANDV_SOME_ERROR; 3459 completionCode = MPT_SCANDV_SOME_ERROR;
@@ -3955,7 +3961,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
3955 header1.PageLength = ioc->spi_data.sdp1length; 3961 header1.PageLength = ioc->spi_data.sdp1length;
3956 header1.PageNumber = 1; 3962 header1.PageNumber = 1;
3957 header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; 3963 header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
3958 cfg.hdr = &header1; 3964 cfg.cfghdr.hdr = &header1;
3959 cfg.physAddr = cfg1_dma_addr; 3965 cfg.physAddr = cfg1_dma_addr;
3960 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 3966 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
3961 cfg.dir = 1; 3967 cfg.dir = 1;
@@ -3996,9 +4002,9 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
3996 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC " 4002 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
3997 "offset=0 negoFlags=%x request=%x config=%x\n", 4003 "offset=0 negoFlags=%x request=%x config=%x\n",
3998 id, flags, requested, configuration)); 4004 id, flags, requested, configuration));
3999 pcfg1Data->RequestedParameters = le32_to_cpu(requested); 4005 pcfg1Data->RequestedParameters = cpu_to_le32(requested);
4000 pcfg1Data->Reserved = 0; 4006 pcfg1Data->Reserved = 0;
4001 pcfg1Data->Configuration = le32_to_cpu(configuration); 4007 pcfg1Data->Configuration = cpu_to_le32(configuration);
4002 cfg.pageAddr = (bus<<8) | id; 4008 cfg.pageAddr = (bus<<8) | id;
4003 mpt_config(hd->ioc, &cfg); 4009 mpt_config(hd->ioc, &cfg);
4004 } 4010 }
@@ -4353,7 +4359,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4353 /* Prep cfg structure 4359 /* Prep cfg structure
4354 */ 4360 */
4355 cfg.pageAddr = (bus<<8) | id; 4361 cfg.pageAddr = (bus<<8) | id;
4356 cfg.hdr = NULL; 4362 cfg.cfghdr.hdr = NULL;
4357 4363
4358 /* Prep SDP0 header 4364 /* Prep SDP0 header
4359 */ 4365 */
@@ -4399,7 +4405,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4399 pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz); 4405 pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
4400 cfg1_dma_addr = dvbuf_dma + sz; 4406 cfg1_dma_addr = dvbuf_dma + sz;
4401 4407
4402 /* Skip this ID? Set cfg.hdr to force config page write 4408 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write
4403 */ 4409 */
4404 { 4410 {
4405 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 4411 ScsiCfgData *pspi_data = &hd->ioc->spi_data;
@@ -4417,7 +4423,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4417 4423
4418 dv.cmd = MPT_SET_MAX; 4424 dv.cmd = MPT_SET_MAX;
4419 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); 4425 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
4420 cfg.hdr = &header1; 4426 cfg.cfghdr.hdr = &header1;
4421 4427
4422 /* Save the final negotiated settings to 4428 /* Save the final negotiated settings to
4423 * SCSI device page 1. 4429 * SCSI device page 1.
@@ -4483,7 +4489,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4483 dv.cmd = MPT_SET_MIN; 4489 dv.cmd = MPT_SET_MIN;
4484 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); 4490 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
4485 4491
4486 cfg.hdr = &header1; 4492 cfg.cfghdr.hdr = &header1;
4487 cfg.physAddr = cfg1_dma_addr; 4493 cfg.physAddr = cfg1_dma_addr;
4488 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 4494 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4489 cfg.dir = 1; 4495 cfg.dir = 1;
@@ -4596,8 +4602,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4596 if ((pbuf1[56] & 0x02) == 0) { 4602 if ((pbuf1[56] & 0x02) == 0) {
4597 pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS; 4603 pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
4598 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS; 4604 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
4599 ddvprintk((MYIOC_s_NOTE_FMT 4605 ddvprintk((MYIOC_s_NOTE_FMT
4600 "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n", 4606 "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
4601 ioc->name, id, pbuf1[56])); 4607 ioc->name, id, pbuf1[56]));
4602 } 4608 }
4603 } 4609 }
@@ -4637,7 +4643,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4637 u32 sdp0_info; 4643 u32 sdp0_info;
4638 u32 sdp0_nego; 4644 u32 sdp0_nego;
4639 4645
4640 cfg.hdr = &header0; 4646 cfg.cfghdr.hdr = &header0;
4641 cfg.physAddr = cfg0_dma_addr; 4647 cfg.physAddr = cfg0_dma_addr;
4642 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 4648 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4643 cfg.dir = 0; 4649 cfg.dir = 0;
@@ -4673,7 +4679,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4673 if (!firstPass) 4679 if (!firstPass)
4674 doFallback = 1; 4680 doFallback = 1;
4675 } else { 4681 } else {
4676 ddvprintk((MYIOC_s_NOTE_FMT 4682 ddvprintk((MYIOC_s_NOTE_FMT
4677 "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id)); 4683 "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
4678 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE; 4684 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
4679 mptscsih_initTarget(hd, 4685 mptscsih_initTarget(hd,
@@ -4689,8 +4695,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4689 4695
4690 } else if (rc == MPT_SCANDV_ISSUE_SENSE) 4696 } else if (rc == MPT_SCANDV_ISSUE_SENSE)
4691 doFallback = 1; /* set fallback flag */ 4697 doFallback = 1; /* set fallback flag */
4692 else if ((rc == MPT_SCANDV_DID_RESET) || 4698 else if ((rc == MPT_SCANDV_DID_RESET) ||
4693 (rc == MPT_SCANDV_SENSE) || 4699 (rc == MPT_SCANDV_SENSE) ||
4694 (rc == MPT_SCANDV_FALLBACK)) 4700 (rc == MPT_SCANDV_FALLBACK))
4695 doFallback = 1; /* set fallback flag */ 4701 doFallback = 1; /* set fallback flag */
4696 else 4702 else
@@ -4722,7 +4728,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4722 * 4) release 4728 * 4) release
4723 * 5) update nego parms to target struct 4729 * 5) update nego parms to target struct
4724 */ 4730 */
4725 cfg.hdr = &header1; 4731 cfg.cfghdr.hdr = &header1;
4726 cfg.physAddr = cfg1_dma_addr; 4732 cfg.physAddr = cfg1_dma_addr;
4727 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 4733 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4728 cfg.dir = 1; 4734 cfg.dir = 1;
@@ -5121,12 +5127,12 @@ target_done:
5121 5127
5122 /* Set if cfg1_dma_addr contents is valid 5128 /* Set if cfg1_dma_addr contents is valid
5123 */ 5129 */
5124 if ((cfg.hdr != NULL) && (retcode == 0)){ 5130 if ((cfg.cfghdr.hdr != NULL) && (retcode == 0)){
5125 /* If disk, not U320, disable QAS 5131 /* If disk, not U320, disable QAS
5126 */ 5132 */
5127 if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) { 5133 if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
5128 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS; 5134 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
5129 ddvprintk((MYIOC_s_NOTE_FMT 5135 ddvprintk((MYIOC_s_NOTE_FMT
5130 "noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor)); 5136 "noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
5131 } 5137 }
5132 5138
@@ -5137,7 +5143,7 @@ target_done:
5137 * skip save of the final negotiated settings to 5143 * skip save of the final negotiated settings to
5138 * SCSI device page 1. 5144 * SCSI device page 1.
5139 * 5145 *
5140 cfg.hdr = &header1; 5146 cfg.cfghdr.hdr = &header1;
5141 cfg.physAddr = cfg1_dma_addr; 5147 cfg.physAddr = cfg1_dma_addr;
5142 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 5148 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
5143 cfg.dir = 1; 5149 cfg.dir = 1;
@@ -5248,7 +5254,7 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5248 /* Update tmax values with those from Device Page 0.*/ 5254 /* Update tmax values with those from Device Page 0.*/
5249 pPage0 = (SCSIDevicePage0_t *) pPage; 5255 pPage0 = (SCSIDevicePage0_t *) pPage;
5250 if (pPage0) { 5256 if (pPage0) {
5251 val = cpu_to_le32(pPage0->NegotiatedParameters); 5257 val = le32_to_cpu(pPage0->NegotiatedParameters);
5252 dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0; 5258 dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
5253 dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16; 5259 dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
5254 dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; 5260 dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
@@ -5276,12 +5282,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5276 dv->now.offset, &val, &configuration, dv->now.flags); 5282 dv->now.offset, &val, &configuration, dv->now.flags);
5277 dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n", 5283 dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
5278 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration)); 5284 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
5279 pPage1->RequestedParameters = le32_to_cpu(val); 5285 pPage1->RequestedParameters = cpu_to_le32(val);
5280 pPage1->Reserved = 0; 5286 pPage1->Reserved = 0;
5281 pPage1->Configuration = le32_to_cpu(configuration); 5287 pPage1->Configuration = cpu_to_le32(configuration);
5282 } 5288 }
5283 5289
5284 ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x request=%x configuration=%x\n", 5290 ddvprintk(("id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x configuration=%x\n",
5285 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration)); 5291 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
5286 break; 5292 break;
5287 5293
@@ -5301,9 +5307,9 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5301 offset, &val, &configuration, negoFlags); 5307 offset, &val, &configuration, negoFlags);
5302 dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n", 5308 dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
5303 id, width, factor, offset, negoFlags, val, configuration)); 5309 id, width, factor, offset, negoFlags, val, configuration));
5304 pPage1->RequestedParameters = le32_to_cpu(val); 5310 pPage1->RequestedParameters = cpu_to_le32(val);
5305 pPage1->Reserved = 0; 5311 pPage1->Reserved = 0;
5306 pPage1->Configuration = le32_to_cpu(configuration); 5312 pPage1->Configuration = cpu_to_le32(configuration);
5307 } 5313 }
5308 ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n", 5314 ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
5309 id, width, factor, offset, val, configuration, negoFlags)); 5315 id, width, factor, offset, val, configuration, negoFlags));
@@ -5377,12 +5383,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5377 if (pPage1) { 5383 if (pPage1) {
5378 mptscsih_setDevicePage1Flags (width, factor, offset, &val, 5384 mptscsih_setDevicePage1Flags (width, factor, offset, &val,
5379 &configuration, dv->now.flags); 5385 &configuration, dv->now.flags);
5380 dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x flags=%x request=%x config=%x\n", 5386 dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x negoFlags=%x request=%x config=%x\n",
5381 id, width, offset, factor, dv->now.flags, val, configuration)); 5387 id, width, offset, factor, dv->now.flags, val, configuration));
5382 5388
5383 pPage1->RequestedParameters = le32_to_cpu(val); 5389 pPage1->RequestedParameters = cpu_to_le32(val);
5384 pPage1->Reserved = 0; 5390 pPage1->Reserved = 0;
5385 pPage1->Configuration = le32_to_cpu(configuration); 5391 pPage1->Configuration = cpu_to_le32(configuration);
5386 } 5392 }
5387 5393
5388 ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n", 5394 ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index dfa8806b1e13..587d1274fd74 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -162,15 +162,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
162 u8 *mem; 162 u8 *mem;
163 int error=0; 163 int error=0;
164 int r; 164 int r;
165 165
166 if ((r = mpt_attach(pdev,id)) != 0) 166 if ((r = mpt_attach(pdev,id)) != 0)
167 return r; 167 return r;
168 168
169 ioc = pci_get_drvdata(pdev); 169 ioc = pci_get_drvdata(pdev);
170 ioc->DoneCtx = mptspiDoneCtx; 170 ioc->DoneCtx = mptspiDoneCtx;
171 ioc->TaskCtx = mptspiTaskCtx; 171 ioc->TaskCtx = mptspiTaskCtx;
172 ioc->InternalCtx = mptspiInternalCtx; 172 ioc->InternalCtx = mptspiInternalCtx;
173 173
174 /* Added sanity check on readiness of the MPT adapter. 174 /* Added sanity check on readiness of the MPT adapter.
175 */ 175 */
176 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { 176 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 914804512dba..7e98434cfa37 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -25,15 +25,15 @@
25#include <linux/termios.h> 25#include <linux/termios.h>
26#include <linux/tty.h> 26#include <linux/tty.h>
27#include <linux/serial_core.h> 27#include <linux/serial_core.h>
28#include <linux/serial.h>
29#include <linux/serial_reg.h> 28#include <linux/serial_reg.h>
29#include <linux/serial_8250.h>
30#include "ibmasm.h" 30#include "ibmasm.h"
31#include "lowlevel.h" 31#include "lowlevel.h"
32 32
33 33
34void ibmasm_register_uart(struct service_processor *sp) 34void ibmasm_register_uart(struct service_processor *sp)
35{ 35{
36 struct serial_struct serial; 36 struct uart_port uport;
37 void __iomem *iomem_base; 37 void __iomem *iomem_base;
38 38
39 iomem_base = sp->base_address + SCOUT_COM_B_BASE; 39 iomem_base = sp->base_address + SCOUT_COM_B_BASE;
@@ -47,14 +47,14 @@ void ibmasm_register_uart(struct service_processor *sp)
47 return; 47 return;
48 } 48 }
49 49
50 memset(&serial, 0, sizeof(serial)); 50 memset(&uport, 0, sizeof(struct uart_port));
51 serial.irq = sp->irq; 51 uport.irq = sp->irq;
52 serial.baud_base = 3686400 / 16; 52 uport.uartclk = 3686400;
53 serial.flags = UPF_AUTOPROBE | UPF_SHARE_IRQ; 53 uport.flags = UPF_AUTOPROBE | UPF_SHARE_IRQ;
54 serial.io_type = UPIO_MEM; 54 uport.iotype = UPIO_MEM;
55 serial.iomem_base = iomem_base; 55 uport.membase = iomem_base;
56 56
57 sp->serial_line = register_serial(&serial); 57 sp->serial_line = serial8250_register_port(&uport);
58 if (sp->serial_line < 0) { 58 if (sp->serial_line < 0) {
59 dev_err(sp->dev, "Failed to register serial port\n"); 59 dev_err(sp->dev, "Failed to register serial port\n");
60 return; 60 return;
@@ -68,5 +68,5 @@ void ibmasm_unregister_uart(struct service_processor *sp)
68 return; 68 return;
69 69
70 disable_uart_interrupts(sp->base_address); 70 disable_uart_interrupts(sp->base_address);
71 unregister_serial(sp->serial_line); 71 serial8250_unregister_port(sp->serial_line);
72} 72}
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 3c5904834fe8..0a117c61cd18 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -2,6 +2,8 @@
2 * linux/drivers/mmc/mmc.c 2 * linux/drivers/mmc/mmc.c
3 * 3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved.
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +18,8 @@
16#include <linux/delay.h> 18#include <linux/delay.h>
17#include <linux/pagemap.h> 19#include <linux/pagemap.h>
18#include <linux/err.h> 20#include <linux/err.h>
21#include <asm/scatterlist.h>
22#include <linux/scatterlist.h>
19 23
20#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
21#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
@@ -172,7 +176,81 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
172 176
173EXPORT_SYMBOL(mmc_wait_for_cmd); 177EXPORT_SYMBOL(mmc_wait_for_cmd);
174 178
179/**
180 * mmc_wait_for_app_cmd - start an application command and wait for
181 completion
182 * @host: MMC host to start command
183 * @rca: RCA to send MMC_APP_CMD to
184 * @cmd: MMC command to start
185 * @retries: maximum number of retries
186 *
187 * Sends a MMC_APP_CMD, checks the card response, sends the command
188 * in the parameter and waits for it to complete. Return any error
189 * that occurred while the command was executing. Do not attempt to
190 * parse the response.
191 */
192int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
193 struct mmc_command *cmd, int retries)
194{
195 struct mmc_request mrq;
196 struct mmc_command appcmd;
197
198 int i, err;
199
200 BUG_ON(host->card_busy == NULL);
201 BUG_ON(retries < 0);
202
203 err = MMC_ERR_INVALID;
204
205 /*
206 * We have to resend MMC_APP_CMD for each attempt so
207 * we cannot use the retries field in mmc_command.
208 */
209 for (i = 0;i <= retries;i++) {
210 memset(&mrq, 0, sizeof(struct mmc_request));
211
212 appcmd.opcode = MMC_APP_CMD;
213 appcmd.arg = rca << 16;
214 appcmd.flags = MMC_RSP_R1;
215 appcmd.retries = 0;
216 memset(appcmd.resp, 0, sizeof(appcmd.resp));
217 appcmd.data = NULL;
218
219 mrq.cmd = &appcmd;
220 appcmd.data = NULL;
221
222 mmc_wait_for_req(host, &mrq);
223
224 if (appcmd.error) {
225 err = appcmd.error;
226 continue;
227 }
228
229 /* Check that card supported application commands */
230 if (!(appcmd.resp[0] & R1_APP_CMD))
231 return MMC_ERR_FAILED;
232
233 memset(&mrq, 0, sizeof(struct mmc_request));
234
235 memset(cmd->resp, 0, sizeof(cmd->resp));
236 cmd->retries = 0;
237
238 mrq.cmd = cmd;
239 cmd->data = NULL;
240
241 mmc_wait_for_req(host, &mrq);
175 242
243 err = cmd->error;
244 if (cmd->error == MMC_ERR_NONE)
245 break;
246 }
247
248 return err;
249}
250
251EXPORT_SYMBOL(mmc_wait_for_app_cmd);
252
253static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
176 254
177/** 255/**
178 * __mmc_claim_host - exclusively claim a host 256 * __mmc_claim_host - exclusively claim a host
@@ -206,16 +284,10 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
206 spin_unlock_irqrestore(&host->lock, flags); 284 spin_unlock_irqrestore(&host->lock, flags);
207 remove_wait_queue(&host->wq, &wait); 285 remove_wait_queue(&host->wq, &wait);
208 286
209 if (card != (void *)-1 && host->card_selected != card) { 287 if (card != (void *)-1) {
210 struct mmc_command cmd; 288 err = mmc_select_card(host, card);
211 289 if (err != MMC_ERR_NONE)
212 host->card_selected = card; 290 return err;
213
214 cmd.opcode = MMC_SELECT_CARD;
215 cmd.arg = card->rca << 16;
216 cmd.flags = MMC_RSP_R1;
217
218 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
219 } 291 }
220 292
221 return err; 293 return err;
@@ -245,6 +317,63 @@ void mmc_release_host(struct mmc_host *host)
245 317
246EXPORT_SYMBOL(mmc_release_host); 318EXPORT_SYMBOL(mmc_release_host);
247 319
320static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
321{
322 int err;
323 struct mmc_command cmd;
324
325 BUG_ON(host->card_busy == NULL);
326
327 if (host->card_selected == card)
328 return MMC_ERR_NONE;
329
330 host->card_selected = card;
331
332 cmd.opcode = MMC_SELECT_CARD;
333 cmd.arg = card->rca << 16;
334 cmd.flags = MMC_RSP_R1;
335
336 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
337 if (err != MMC_ERR_NONE)
338 return err;
339
340 /*
341 * Default bus width is 1 bit.
342 */
343 host->ios.bus_width = MMC_BUS_WIDTH_1;
344
345 /*
346 * We can only change the bus width of the selected
347 * card so therefore we have to put the handling
348 * here.
349 */
350 if (host->caps & MMC_CAP_4_BIT_DATA) {
351 /*
352 * The card is in 1 bit mode by default so
353 * we only need to change if it supports the
354 * wider version.
355 */
356 if (mmc_card_sd(card) &&
357 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
358 struct mmc_command cmd;
359 cmd.opcode = SD_APP_SET_BUS_WIDTH;
360 cmd.arg = SD_BUS_WIDTH_4;
361 cmd.flags = MMC_RSP_R1;
362
363 err = mmc_wait_for_app_cmd(host, card->rca, &cmd,
364 CMD_RETRIES);
365 if (err != MMC_ERR_NONE)
366 return err;
367
368 host->ios.bus_width = MMC_BUS_WIDTH_4;
369 }
370 }
371
372 host->ops->set_ios(host, &host->ios);
373
374 return MMC_ERR_NONE;
375}
376
248/* 377/*
249 * Ensure that no card is selected. 378 * Ensure that no card is selected.
250 */ 379 */
@@ -322,48 +451,69 @@ static void mmc_decode_cid(struct mmc_card *card)
322 451
323 memset(&card->cid, 0, sizeof(struct mmc_cid)); 452 memset(&card->cid, 0, sizeof(struct mmc_cid));
324 453
325 /* 454 if (mmc_card_sd(card)) {
326 * The selection of the format here is guesswork based upon 455 /*
327 * information people have sent to date. 456 * SD doesn't currently have a version field so we will
328 */ 457 * have to assume we can parse this.
329 switch (card->csd.mmca_vsn) { 458 */
330 case 0: /* MMC v1.? */ 459 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
331 case 1: /* MMC v1.4 */ 460 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
332 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 461 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
333 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 462 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
334 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 463 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
335 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 464 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
336 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 465 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
337 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 466 card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
338 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 467 card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
339 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 468 card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
340 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 469 card->cid.year = UNSTUFF_BITS(resp, 12, 8);
341 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 470 card->cid.month = UNSTUFF_BITS(resp, 8, 4);
342 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 471
343 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 472 card->cid.year += 2000; /* SD cards year offset */
344 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 473 } else {
345 break; 474 /*
346 475 * The selection of the format here is based upon published
347 case 2: /* MMC v2.x ? */ 476 * specs from sandisk and from what people have reported.
348 case 3: /* MMC v3.x ? */ 477 */
349 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 478 switch (card->csd.mmca_vsn) {
350 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 479 case 0: /* MMC v1.0 - v1.2 */
351 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 480 case 1: /* MMC v1.4 */
352 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 481 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
353 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 482 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
354 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 483 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
355 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 484 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
356 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 485 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
357 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 486 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
358 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 487 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
359 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 488 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
360 break; 489 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
361 490 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
362 default: 491 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
363 printk("%s: card has unknown MMCA version %d\n", 492 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
364 mmc_hostname(card->host), card->csd.mmca_vsn); 493 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
365 mmc_card_set_bad(card); 494 break;
366 break; 495
496 case 2: /* MMC v2.0 - v2.2 */
497 case 3: /* MMC v3.1 - v3.3 */
498 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
499 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
500 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
501 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
502 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
503 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
504 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
505 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
506 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
507 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
508 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
509 break;
510
511 default:
512 printk("%s: card has unknown MMCA version %d\n",
513 mmc_hostname(card->host), card->csd.mmca_vsn);
514 mmc_card_set_bad(card);
515 break;
516 }
367 } 517 }
368} 518}
369 519
@@ -376,34 +526,86 @@ static void mmc_decode_csd(struct mmc_card *card)
376 unsigned int e, m, csd_struct; 526 unsigned int e, m, csd_struct;
377 u32 *resp = card->raw_csd; 527 u32 *resp = card->raw_csd;
378 528
379 /* 529 if (mmc_card_sd(card)) {
380 * We only understand CSD structure v1.1 and v2. 530 csd_struct = UNSTUFF_BITS(resp, 126, 2);
381 * v2 has extra information in bits 15, 11 and 10. 531 if (csd_struct != 0) {
382 */ 532 printk("%s: unrecognised CSD structure version %d\n",
383 csd_struct = UNSTUFF_BITS(resp, 126, 2); 533 mmc_hostname(card->host), csd_struct);
384 if (csd_struct != 1 && csd_struct != 2) { 534 mmc_card_set_bad(card);
385 printk("%s: unrecognised CSD structure version %d\n", 535 return;
386 mmc_hostname(card->host), csd_struct); 536 }
387 mmc_card_set_bad(card); 537
388 return; 538 m = UNSTUFF_BITS(resp, 115, 4);
539 e = UNSTUFF_BITS(resp, 112, 3);
540 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
541 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
542
543 m = UNSTUFF_BITS(resp, 99, 4);
544 e = UNSTUFF_BITS(resp, 96, 3);
545 csd->max_dtr = tran_exp[e] * tran_mant[m];
546 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
547
548 e = UNSTUFF_BITS(resp, 47, 3);
549 m = UNSTUFF_BITS(resp, 62, 12);
550 csd->capacity = (1 + m) << (e + 2);
551
552 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
553 } else {
554 /*
555 * We only understand CSD structure v1.1 and v1.2.
556 * v1.2 has extra information in bits 15, 11 and 10.
557 */
558 csd_struct = UNSTUFF_BITS(resp, 126, 2);
559 if (csd_struct != 1 && csd_struct != 2) {
560 printk("%s: unrecognised CSD structure version %d\n",
561 mmc_hostname(card->host), csd_struct);
562 mmc_card_set_bad(card);
563 return;
564 }
565
566 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
567 m = UNSTUFF_BITS(resp, 115, 4);
568 e = UNSTUFF_BITS(resp, 112, 3);
569 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
570 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
571
572 m = UNSTUFF_BITS(resp, 99, 4);
573 e = UNSTUFF_BITS(resp, 96, 3);
574 csd->max_dtr = tran_exp[e] * tran_mant[m];
575 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
576
577 e = UNSTUFF_BITS(resp, 47, 3);
578 m = UNSTUFF_BITS(resp, 62, 12);
579 csd->capacity = (1 + m) << (e + 2);
580
581 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
389 } 582 }
583}
584
585/*
586 * Given a 64-bit response, decode to our card SCR structure.
587 */
588static void mmc_decode_scr(struct mmc_card *card)
589{
590 struct sd_scr *scr = &card->scr;
591 unsigned int scr_struct;
592 u32 resp[4];
390 593
391 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 594 BUG_ON(!mmc_card_sd(card));
392 m = UNSTUFF_BITS(resp, 115, 4);
393 e = UNSTUFF_BITS(resp, 112, 3);
394 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
395 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
396 595
397 m = UNSTUFF_BITS(resp, 99, 4); 596 resp[3] = card->raw_scr[1];
398 e = UNSTUFF_BITS(resp, 96, 3); 597 resp[2] = card->raw_scr[0];
399 csd->max_dtr = tran_exp[e] * tran_mant[m];
400 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
401 598
402 e = UNSTUFF_BITS(resp, 47, 3); 599 scr_struct = UNSTUFF_BITS(resp, 60, 4);
403 m = UNSTUFF_BITS(resp, 62, 12); 600 if (scr_struct != 0) {
404 csd->capacity = (1 + m) << (e + 2); 601 printk("%s: unrecognised SCR structure version %d\n",
602 mmc_hostname(card->host), scr_struct);
603 mmc_card_set_bad(card);
604 return;
605 }
405 606
406 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 607 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
608 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
407} 609}
408 610
409/* 611/*
@@ -457,6 +659,11 @@ static void mmc_idle_cards(struct mmc_host *host)
457{ 659{
458 struct mmc_command cmd; 660 struct mmc_command cmd;
459 661
662 host->ios.chip_select = MMC_CS_HIGH;
663 host->ops->set_ios(host, &host->ios);
664
665 mmc_delay(1);
666
460 cmd.opcode = MMC_GO_IDLE_STATE; 667 cmd.opcode = MMC_GO_IDLE_STATE;
461 cmd.arg = 0; 668 cmd.arg = 0;
462 cmd.flags = MMC_RSP_NONE; 669 cmd.flags = MMC_RSP_NONE;
@@ -464,6 +671,11 @@ static void mmc_idle_cards(struct mmc_host *host)
464 mmc_wait_for_cmd(host, &cmd, 0); 671 mmc_wait_for_cmd(host, &cmd, 0);
465 672
466 mmc_delay(1); 673 mmc_delay(1);
674
675 host->ios.chip_select = MMC_CS_DONTCARE;
676 host->ops->set_ios(host, &host->ios);
677
678 mmc_delay(1);
467} 679}
468 680
469/* 681/*
@@ -475,7 +687,9 @@ static void mmc_power_up(struct mmc_host *host)
475 687
476 host->ios.vdd = bit; 688 host->ios.vdd = bit;
477 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 689 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
690 host->ios.chip_select = MMC_CS_DONTCARE;
478 host->ios.power_mode = MMC_POWER_UP; 691 host->ios.power_mode = MMC_POWER_UP;
692 host->ios.bus_width = MMC_BUS_WIDTH_1;
479 host->ops->set_ios(host, &host->ios); 693 host->ops->set_ios(host, &host->ios);
480 694
481 mmc_delay(1); 695 mmc_delay(1);
@@ -492,7 +706,9 @@ static void mmc_power_off(struct mmc_host *host)
492 host->ios.clock = 0; 706 host->ios.clock = 0;
493 host->ios.vdd = 0; 707 host->ios.vdd = 0;
494 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 708 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
709 host->ios.chip_select = MMC_CS_DONTCARE;
495 host->ios.power_mode = MMC_POWER_OFF; 710 host->ios.power_mode = MMC_POWER_OFF;
711 host->ios.bus_width = MMC_BUS_WIDTH_1;
496 host->ops->set_ios(host, &host->ios); 712 host->ops->set_ios(host, &host->ios);
497} 713}
498 714
@@ -524,6 +740,34 @@ static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
524 return err; 740 return err;
525} 741}
526 742
743static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
744{
745 struct mmc_command cmd;
746 int i, err = 0;
747
748 cmd.opcode = SD_APP_OP_COND;
749 cmd.arg = ocr;
750 cmd.flags = MMC_RSP_R3;
751
752 for (i = 100; i; i--) {
753 err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES);
754 if (err != MMC_ERR_NONE)
755 break;
756
757 if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
758 break;
759
760 err = MMC_ERR_TIMEOUT;
761
762 mmc_delay(10);
763 }
764
765 if (rocr)
766 *rocr = cmd.resp[0];
767
768 return err;
769}
770
527/* 771/*
528 * Discover cards by requesting their CID. If this command 772 * Discover cards by requesting their CID. If this command
529 * times out, it is not an error; there are no further cards 773 * times out, it is not an error; there are no further cards
@@ -567,13 +811,38 @@ static void mmc_discover_cards(struct mmc_host *host)
567 811
568 card->state &= ~MMC_STATE_DEAD; 812 card->state &= ~MMC_STATE_DEAD;
569 813
570 cmd.opcode = MMC_SET_RELATIVE_ADDR; 814 if (host->mode == MMC_MODE_SD) {
571 cmd.arg = card->rca << 16; 815 mmc_card_set_sd(card);
572 cmd.flags = MMC_RSP_R1;
573 816
574 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 817 cmd.opcode = SD_SEND_RELATIVE_ADDR;
575 if (err != MMC_ERR_NONE) 818 cmd.arg = 0;
576 mmc_card_set_dead(card); 819 cmd.flags = MMC_RSP_R1;
820
821 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
822 if (err != MMC_ERR_NONE)
823 mmc_card_set_dead(card);
824 else {
825 card->rca = cmd.resp[0] >> 16;
826
827 if (!host->ops->get_ro) {
828 printk(KERN_WARNING "%s: host does not "
829 "support reading read-only "
830 "switch. assuming write-enable.\n",
831 mmc_hostname(host));
832 } else {
833 if (host->ops->get_ro(host))
834 mmc_card_set_readonly(card);
835 }
836 }
837 } else {
838 cmd.opcode = MMC_SET_RELATIVE_ADDR;
839 cmd.arg = card->rca << 16;
840 cmd.flags = MMC_RSP_R1;
841
842 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
843 if (err != MMC_ERR_NONE)
844 mmc_card_set_dead(card);
845 }
577 } 846 }
578} 847}
579 848
@@ -605,6 +874,79 @@ static void mmc_read_csds(struct mmc_host *host)
605 } 874 }
606} 875}
607 876
877static void mmc_read_scrs(struct mmc_host *host)
878{
879 int err;
880 struct mmc_card *card;
881
882 struct mmc_request mrq;
883 struct mmc_command cmd;
884 struct mmc_data data;
885
886 struct scatterlist sg;
887
888 list_for_each_entry(card, &host->cards, node) {
889 if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
890 continue;
891 if (!mmc_card_sd(card))
892 continue;
893
894 err = mmc_select_card(host, card);
895 if (err != MMC_ERR_NONE) {
896 mmc_card_set_dead(card);
897 continue;
898 }
899
900 memset(&cmd, 0, sizeof(struct mmc_command));
901
902 cmd.opcode = MMC_APP_CMD;
903 cmd.arg = card->rca << 16;
904 cmd.flags = MMC_RSP_R1;
905
906 err = mmc_wait_for_cmd(host, &cmd, 0);
907 if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) {
908 mmc_card_set_dead(card);
909 continue;
910 }
911
912 memset(&cmd, 0, sizeof(struct mmc_command));
913
914 cmd.opcode = SD_APP_SEND_SCR;
915 cmd.arg = 0;
916 cmd.flags = MMC_RSP_R1;
917
918 memset(&data, 0, sizeof(struct mmc_data));
919
920 data.timeout_ns = card->csd.tacc_ns * 10;
921 data.timeout_clks = card->csd.tacc_clks * 10;
922 data.blksz_bits = 3;
923 data.blocks = 1;
924 data.flags = MMC_DATA_READ;
925 data.sg = &sg;
926 data.sg_len = 1;
927
928 memset(&mrq, 0, sizeof(struct mmc_request));
929
930 mrq.cmd = &cmd;
931 mrq.data = &data;
932
933 sg_init_one(&sg, (u8*)card->raw_scr, 8);
934
935 err = mmc_wait_for_req(host, &mrq);
936 if (err != MMC_ERR_NONE) {
937 mmc_card_set_dead(card);
938 continue;
939 }
940
941 card->raw_scr[0] = ntohl(card->raw_scr[0]);
942 card->raw_scr[1] = ntohl(card->raw_scr[1]);
943
944 mmc_decode_scr(card);
945 }
946
947 mmc_deselect_cards(host);
948}
949
608static unsigned int mmc_calculate_clock(struct mmc_host *host) 950static unsigned int mmc_calculate_clock(struct mmc_host *host)
609{ 951{
610 struct mmc_card *card; 952 struct mmc_card *card;
@@ -657,12 +999,24 @@ static void mmc_setup(struct mmc_host *host)
657 int err; 999 int err;
658 u32 ocr; 1000 u32 ocr;
659 1001
1002 host->mode = MMC_MODE_SD;
1003
660 mmc_power_up(host); 1004 mmc_power_up(host);
661 mmc_idle_cards(host); 1005 mmc_idle_cards(host);
662 1006
663 err = mmc_send_op_cond(host, 0, &ocr); 1007 err = mmc_send_app_op_cond(host, 0, &ocr);
664 if (err != MMC_ERR_NONE) 1008
665 return; 1009 /*
1010 * If we fail to detect any SD cards then try
1011 * searching for MMC cards.
1012 */
1013 if (err != MMC_ERR_NONE) {
1014 host->mode = MMC_MODE_MMC;
1015
1016 err = mmc_send_op_cond(host, 0, &ocr);
1017 if (err != MMC_ERR_NONE)
1018 return;
1019 }
666 1020
667 host->ocr = mmc_select_voltage(host, ocr); 1021 host->ocr = mmc_select_voltage(host, ocr);
668 1022
@@ -702,7 +1056,10 @@ static void mmc_setup(struct mmc_host *host)
702 * all get the idea that they should be ready for CMD2. 1056 * all get the idea that they should be ready for CMD2.
703 * (My SanDisk card seems to need this.) 1057 * (My SanDisk card seems to need this.)
704 */ 1058 */
705 mmc_send_op_cond(host, host->ocr, NULL); 1059 if (host->mode == MMC_MODE_SD)
1060 mmc_send_app_op_cond(host, host->ocr, NULL);
1061 else
1062 mmc_send_op_cond(host, host->ocr, NULL);
706 1063
707 mmc_discover_cards(host); 1064 mmc_discover_cards(host);
708 1065
@@ -713,6 +1070,9 @@ static void mmc_setup(struct mmc_host *host)
713 host->ops->set_ios(host, &host->ios); 1070 host->ops->set_ios(host, &host->ios);
714 1071
715 mmc_read_csds(host); 1072 mmc_read_csds(host);
1073
1074 if (host->mode == MMC_MODE_SD)
1075 mmc_read_scrs(host);
716} 1076}
717 1077
718 1078
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index d4eee99c2bf6..fa83f15fdf16 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -95,6 +95,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
95 if (md->usage == 2) 95 if (md->usage == 2)
96 check_disk_change(inode->i_bdev); 96 check_disk_change(inode->i_bdev);
97 ret = 0; 97 ret = 0;
98
99 if ((filp->f_mode & FMODE_WRITE) &&
100 mmc_card_readonly(md->queue.card))
101 ret = -EROFS;
98 } 102 }
99 103
100 return ret; 104 return ret;
@@ -403,9 +407,10 @@ static int mmc_blk_probe(struct mmc_card *card)
403 if (err) 407 if (err)
404 goto out; 408 goto out;
405 409
406 printk(KERN_INFO "%s: %s %s %dKiB\n", 410 printk(KERN_INFO "%s: %s %s %dKiB %s\n",
407 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 411 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
408 (card->csd.capacity << card->csd.read_blkbits) / 1024); 412 (card->csd.capacity << card->csd.read_blkbits) / 1024,
413 mmc_card_readonly(card)?"(ro)":"");
409 414
410 mmc_set_drvdata(card, md); 415 mmc_set_drvdata(card, md);
411 add_disk(md->disk); 416 add_disk(md->disk);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ad8949810fc5..3f4a66ca9555 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -34,6 +34,7 @@ MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
34 card->raw_cid[2], card->raw_cid[3]); 34 card->raw_cid[2], card->raw_cid[3]);
35MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 35MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
36 card->raw_csd[2], card->raw_csd[3]); 36 card->raw_csd[2], card->raw_csd[3]);
37MMC_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
37MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 38MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
38MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 39MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
39MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 40MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
@@ -57,6 +58,8 @@ static struct device_attribute mmc_dev_attrs[] = {
57 __ATTR_NULL 58 __ATTR_NULL
58}; 59};
59 60
61static struct device_attribute mmc_dev_attr_scr = MMC_ATTR_RO(scr);
62
60 63
61static void mmc_release_card(struct device *dev) 64static void mmc_release_card(struct device *dev)
62{ 65{
@@ -207,10 +210,20 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
207 */ 210 */
208int mmc_register_card(struct mmc_card *card) 211int mmc_register_card(struct mmc_card *card)
209{ 212{
213 int ret;
214
210 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), 215 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
211 "%s:%04x", mmc_hostname(card->host), card->rca); 216 "%s:%04x", mmc_hostname(card->host), card->rca);
212 217
213 return device_add(&card->dev); 218 ret = device_add(&card->dev);
219 if (ret == 0) {
220 if (mmc_card_sd(card)) {
221 ret = device_create_file(&card->dev, &mmc_dev_attr_scr);
222 if (ret)
223 device_del(&card->dev);
224 }
225 }
226 return ret;
214} 227}
215 228
216/* 229/*
@@ -219,8 +232,12 @@ int mmc_register_card(struct mmc_card *card)
219 */ 232 */
220void mmc_remove_card(struct mmc_card *card) 233void mmc_remove_card(struct mmc_card *card)
221{ 234{
222 if (mmc_card_present(card)) 235 if (mmc_card_present(card)) {
236 if (mmc_card_sd(card))
237 device_remove_file(&card->dev, &mmc_dev_attr_scr);
238
223 device_del(&card->dev); 239 device_del(&card->dev);
240 }
224 241
225 put_device(&card->dev); 242 put_device(&card->dev);
226} 243}
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index b78beb1b0159..e99a53b09e32 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -362,6 +362,16 @@ static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
362 pxamci_start_cmd(host, mrq->cmd, cmdat); 362 pxamci_start_cmd(host, mrq->cmd, cmdat);
363} 363}
364 364
365static int pxamci_get_ro(struct mmc_host *mmc)
366{
367 struct pxamci_host *host = mmc_priv(mmc);
368
369 if (host->pdata && host->pdata->get_ro)
370 return host->pdata->get_ro(mmc->dev);
371 /* Host doesn't support read only detection so assume writeable */
372 return 0;
373}
374
365static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 375static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
366{ 376{
367 struct pxamci_host *host = mmc_priv(mmc); 377 struct pxamci_host *host = mmc_priv(mmc);
@@ -401,6 +411,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
401 411
402static struct mmc_host_ops pxamci_ops = { 412static struct mmc_host_ops pxamci_ops = {
403 .request = pxamci_request, 413 .request = pxamci_request,
414 .get_ro = pxamci_get_ro,
404 .set_ios = pxamci_set_ios, 415 .set_ios = pxamci_set_ios,
405}; 416};
406 417
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 402c2d661fb2..dec01d38c782 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -42,7 +42,7 @@
42#include "wbsd.h" 42#include "wbsd.h"
43 43
44#define DRIVER_NAME "wbsd" 44#define DRIVER_NAME "wbsd"
45#define DRIVER_VERSION "1.3" 45#define DRIVER_VERSION "1.4"
46 46
47#ifdef CONFIG_MMC_DEBUG 47#ifdef CONFIG_MMC_DEBUG
48#define DBG(x...) \ 48#define DBG(x...) \
@@ -720,11 +720,28 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
720 * calculate CRC. 720 * calculate CRC.
721 * 721 *
722 * Space for CRC must be included in the size. 722 * Space for CRC must be included in the size.
723 * Two bytes are needed for each data line.
723 */ 724 */
724 blksize = (1 << data->blksz_bits) + 2; 725 if (host->bus_width == MMC_BUS_WIDTH_1)
726 {
727 blksize = (1 << data->blksz_bits) + 2;
728
729 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
730 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
731 }
732 else if (host->bus_width == MMC_BUS_WIDTH_4)
733 {
734 blksize = (1 << data->blksz_bits) + 2 * 4;
725 735
726 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); 736 wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0)
727 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); 737 | WBSD_DATA_WIDTH);
738 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
739 }
740 else
741 {
742 data->error = MMC_ERR_INVALID;
743 return;
744 }
728 745
729 /* 746 /*
730 * Clear the FIFO. This is needed even for DMA 747 * Clear the FIFO. This is needed even for DMA
@@ -960,8 +977,9 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
960 struct wbsd_host* host = mmc_priv(mmc); 977 struct wbsd_host* host = mmc_priv(mmc);
961 u8 clk, setup, pwr; 978 u8 clk, setup, pwr;
962 979
963 DBGF("clock %uHz busmode %u powermode %u Vdd %u\n", 980 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
964 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd); 981 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
982 ios->vdd, ios->bus_width);
965 983
966 spin_lock_bh(&host->lock); 984 spin_lock_bh(&host->lock);
967 985
@@ -1003,30 +1021,63 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
1003 1021
1004 /* 1022 /*
1005 * MMC cards need to have pin 1 high during init. 1023 * MMC cards need to have pin 1 high during init.
1006 * Init time corresponds rather nicely with the bus mode.
1007 * It wreaks havoc with the card detection though so 1024 * It wreaks havoc with the card detection though so
1008 * that needs to be disabed. 1025 * that needs to be disabled.
1009 */ 1026 */
1010 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 1027 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1011 if ((ios->power_mode == MMC_POWER_ON) && 1028 if (ios->chip_select == MMC_CS_HIGH)
1012 (ios->bus_mode == MMC_BUSMODE_OPENDRAIN))
1013 { 1029 {
1030 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
1014 setup |= WBSD_DAT3_H; 1031 setup |= WBSD_DAT3_H;
1015 host->flags |= WBSD_FIGNORE_DETECT; 1032 host->flags |= WBSD_FIGNORE_DETECT;
1016 } 1033 }
1017 else 1034 else
1018 { 1035 {
1019 setup &= ~WBSD_DAT3_H; 1036 setup &= ~WBSD_DAT3_H;
1020 host->flags &= ~WBSD_FIGNORE_DETECT; 1037
1038 /*
1039 * We cannot resume card detection immediatly
1040 * because of capacitance and delays in the chip.
1041 */
1042 mod_timer(&host->ignore_timer, jiffies + HZ/100);
1021 } 1043 }
1022 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 1044 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1023 1045
1046 /*
1047 * Store bus width for later. Will be used when
1048 * setting up the data transfer.
1049 */
1050 host->bus_width = ios->bus_width;
1051
1052 spin_unlock_bh(&host->lock);
1053}
1054
1055static int wbsd_get_ro(struct mmc_host* mmc)
1056{
1057 struct wbsd_host* host = mmc_priv(mmc);
1058 u8 csr;
1059
1060 spin_lock_bh(&host->lock);
1061
1062 csr = inb(host->base + WBSD_CSR);
1063 csr |= WBSD_MSLED;
1064 outb(csr, host->base + WBSD_CSR);
1065
1066 mdelay(1);
1067
1068 csr = inb(host->base + WBSD_CSR);
1069 csr &= ~WBSD_MSLED;
1070 outb(csr, host->base + WBSD_CSR);
1071
1024 spin_unlock_bh(&host->lock); 1072 spin_unlock_bh(&host->lock);
1073
1074 return csr & WBSD_WRPT;
1025} 1075}
1026 1076
1027static struct mmc_host_ops wbsd_ops = { 1077static struct mmc_host_ops wbsd_ops = {
1028 .request = wbsd_request, 1078 .request = wbsd_request,
1029 .set_ios = wbsd_set_ios, 1079 .set_ios = wbsd_set_ios,
1080 .get_ro = wbsd_get_ro,
1030}; 1081};
1031 1082
1032/*****************************************************************************\ 1083/*****************************************************************************\
@@ -1036,6 +1087,31 @@ static struct mmc_host_ops wbsd_ops = {
1036\*****************************************************************************/ 1087\*****************************************************************************/
1037 1088
1038/* 1089/*
1090 * Helper function to reset detection ignore
1091 */
1092
1093static void wbsd_reset_ignore(unsigned long data)
1094{
1095 struct wbsd_host *host = (struct wbsd_host*)data;
1096
1097 BUG_ON(host == NULL);
1098
1099 DBG("Resetting card detection ignore\n");
1100
1101 spin_lock_bh(&host->lock);
1102
1103 host->flags &= ~WBSD_FIGNORE_DETECT;
1104
1105 /*
1106 * Card status might have changed during the
1107 * blackout.
1108 */
1109 tasklet_schedule(&host->card_tasklet);
1110
1111 spin_unlock_bh(&host->lock);
1112}
1113
1114/*
1039 * Helper function for card detection 1115 * Helper function for card detection
1040 */ 1116 */
1041static void wbsd_detect_card(unsigned long data) 1117static void wbsd_detect_card(unsigned long data)
@@ -1097,7 +1173,7 @@ static void wbsd_tasklet_card(unsigned long param)
1097 * Delay card detection to allow electrical connections 1173 * Delay card detection to allow electrical connections
1098 * to stabilise. 1174 * to stabilise.
1099 */ 1175 */
1100 mod_timer(&host->timer, jiffies + HZ/2); 1176 mod_timer(&host->detect_timer, jiffies + HZ/2);
1101 } 1177 }
1102 1178
1103 spin_unlock(&host->lock); 1179 spin_unlock(&host->lock);
@@ -1124,6 +1200,8 @@ static void wbsd_tasklet_card(unsigned long param)
1124 1200
1125 mmc_detect_change(host->mmc); 1201 mmc_detect_change(host->mmc);
1126 } 1202 }
1203 else
1204 spin_unlock(&host->lock);
1127} 1205}
1128 1206
1129static void wbsd_tasklet_fifo(unsigned long param) 1207static void wbsd_tasklet_fifo(unsigned long param)
@@ -1324,15 +1402,20 @@ static int __devinit wbsd_alloc_mmc(struct device* dev)
1324 mmc->f_min = 375000; 1402 mmc->f_min = 375000;
1325 mmc->f_max = 24000000; 1403 mmc->f_max = 24000000;
1326 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1404 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1405 mmc->caps = MMC_CAP_4_BIT_DATA;
1327 1406
1328 spin_lock_init(&host->lock); 1407 spin_lock_init(&host->lock);
1329 1408
1330 /* 1409 /*
1331 * Set up detection timer 1410 * Set up timers
1332 */ 1411 */
1333 init_timer(&host->timer); 1412 init_timer(&host->detect_timer);
1334 host->timer.data = (unsigned long)host; 1413 host->detect_timer.data = (unsigned long)host;
1335 host->timer.function = wbsd_detect_card; 1414 host->detect_timer.function = wbsd_detect_card;
1415
1416 init_timer(&host->ignore_timer);
1417 host->ignore_timer.data = (unsigned long)host;
1418 host->ignore_timer.function = wbsd_reset_ignore;
1336 1419
1337 /* 1420 /*
1338 * Maximum number of segments. Worst case is one sector per segment 1421 * Maximum number of segments. Worst case is one sector per segment
@@ -1370,7 +1453,8 @@ static void __devexit wbsd_free_mmc(struct device* dev)
1370 host = mmc_priv(mmc); 1453 host = mmc_priv(mmc);
1371 BUG_ON(host == NULL); 1454 BUG_ON(host == NULL);
1372 1455
1373 del_timer_sync(&host->timer); 1456 del_timer_sync(&host->ignore_timer);
1457 del_timer_sync(&host->detect_timer);
1374 1458
1375 mmc_free_host(mmc); 1459 mmc_free_host(mmc);
1376 1460
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 661a9f6a6e6f..9005b5241b3c 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -106,6 +106,8 @@
106#define WBSD_CLK_16M 0x02 106#define WBSD_CLK_16M 0x02
107#define WBSD_CLK_24M 0x03 107#define WBSD_CLK_24M 0x03
108 108
109#define WBSD_DATA_WIDTH 0x01
110
109#define WBSD_DAT3_H 0x08 111#define WBSD_DAT3_H 0x08
110#define WBSD_FIFO_RESET 0x04 112#define WBSD_FIFO_RESET 0x04
111#define WBSD_SOFT_RESET 0x02 113#define WBSD_SOFT_RESET 0x02
@@ -164,6 +166,7 @@ struct wbsd_host
164 int firsterr; /* See fifo functions */ 166 int firsterr; /* See fifo functions */
165 167
166 u8 clk; /* Current clock speed */ 168 u8 clk; /* Current clock speed */
169 unsigned char bus_width; /* Current bus width */
167 170
168 int config; /* Config port */ 171 int config; /* Config port */
169 u8 unlock_code; /* Code to unlock config */ 172 u8 unlock_code; /* Code to unlock config */
@@ -181,5 +184,6 @@ struct wbsd_host
181 struct tasklet_struct finish_tasklet; 184 struct tasklet_struct finish_tasklet;
182 struct tasklet_struct block_tasklet; 185 struct tasklet_struct block_tasklet;
183 186
184 struct timer_list timer; /* Card detection timer */ 187 struct timer_list detect_timer; /* Card detection timer */
188 struct timer_list ignore_timer; /* Ignore detection timer */
185}; 189};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index eee5115658c8..04e54318bc6a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -526,6 +526,7 @@ static void nand_wait_ready(struct mtd_info *mtd)
526 do { 526 do {
527 if (this->dev_ready(mtd)) 527 if (this->dev_ready(mtd))
528 return; 528 return;
529 touch_softlockup_watchdog();
529 } while (time_before(jiffies, timeo)); 530 } while (time_before(jiffies, timeo));
530} 531}
531 532
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 07746b95fd83..455ba915ede7 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -973,6 +973,11 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
973 netif_device_detach(dev); 973 netif_device_detach(dev);
974 vortex_down(dev, 1); 974 vortex_down(dev, 1);
975 } 975 }
976 pci_save_state(pdev);
977 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
978 free_irq(dev->irq, dev);
979 pci_disable_device(pdev);
980 pci_set_power_state(pdev, pci_choose_state(pdev, state));
976 } 981 }
977 return 0; 982 return 0;
978} 983}
@@ -980,8 +985,19 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
980static int vortex_resume (struct pci_dev *pdev) 985static int vortex_resume (struct pci_dev *pdev)
981{ 986{
982 struct net_device *dev = pci_get_drvdata(pdev); 987 struct net_device *dev = pci_get_drvdata(pdev);
988 struct vortex_private *vp = netdev_priv(dev);
983 989
984 if (dev && dev->priv) { 990 if (dev && vp) {
991 pci_set_power_state(pdev, PCI_D0);
992 pci_restore_state(pdev);
993 pci_enable_device(pdev);
994 pci_set_master(pdev);
995 if (request_irq(dev->irq, vp->full_bus_master_rx ?
996 &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev)) {
997 printk(KERN_WARNING "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
998 pci_disable_device(pdev);
999 return -EBUSY;
1000 }
985 if (netif_running(dev)) { 1001 if (netif_running(dev)) {
986 vortex_up(dev); 1002 vortex_up(dev);
987 netif_device_attach(dev); 1003 netif_device_attach(dev);
@@ -1873,6 +1889,7 @@ vortex_timer(unsigned long data)
1873 { 1889 {
1874 spin_lock_bh(&vp->lock); 1890 spin_lock_bh(&vp->lock);
1875 mii_status = mdio_read(dev, vp->phys[0], 1); 1891 mii_status = mdio_read(dev, vp->phys[0], 1);
1892 mii_status = mdio_read(dev, vp->phys[0], 1);
1876 ok = 1; 1893 ok = 1;
1877 if (vortex_debug > 2) 1894 if (vortex_debug > 2)
1878 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", 1895 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 79e8aa6f2b9e..6bb9232514b4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -397,7 +397,7 @@ config SUN3LANCE
397 If you're not building a kernel for a Sun 3, say N. 397 If you're not building a kernel for a Sun 3, say N.
398 398
399config SUN3_82586 399config SUN3_82586
400 tristate "Sun3 on-board Intel 82586 support" 400 bool "Sun3 on-board Intel 82586 support"
401 depends on NET_ETHERNET && SUN3 401 depends on NET_ETHERNET && SUN3
402 help 402 help
403 This driver enables support for the on-board Intel 82586 based 403 This driver enables support for the on-board Intel 82586 based
@@ -447,7 +447,7 @@ config NET_SB1250_MAC
447 447
448config SGI_IOC3_ETH 448config SGI_IOC3_ETH
449 bool "SGI IOC3 Ethernet" 449 bool "SGI IOC3 Ethernet"
450 depends on NET_ETHERNET && PCI && SGI_IP27 450 depends on NET_ETHERNET && PCI && SGI_IP27 && BROKEN
451 select CRC32 451 select CRC32
452 select MII 452 select MII
453 help 453 help
@@ -1923,6 +1923,20 @@ config R8169_VLAN
1923 1923
1924 If in doubt, say Y. 1924 If in doubt, say Y.
1925 1925
1926config SIS190
1927 tristate "SiS190/SiS191 gigabit ethernet support"
1928 depends on PCI
1929 select CRC32
1930 select MII
1931 ---help---
1932 Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
1933 a SiS 191 PCI Gigabit Ethernet adapter. Both are expected to
1934 appear in lan on motherboard designs which are based on SiS 965
1935 and SiS 966 south bridge.
1936
1937 To compile this driver as a module, choose M here: the module
1938 will be called sis190. This is recommended.
1939
1926config SKGE 1940config SKGE
1927 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 1941 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
1928 depends on PCI && EXPERIMENTAL 1942 depends on PCI && EXPERIMENTAL
@@ -2044,6 +2058,13 @@ config BNX2
2044 To compile this driver as a module, choose M here: the module 2058 To compile this driver as a module, choose M here: the module
2045 will be called bnx2. This is recommended. 2059 will be called bnx2. This is recommended.
2046 2060
2061config SPIDER_NET
2062 tristate "Spider Gigabit Ethernet driver"
2063 depends on PCI && PPC_BPA
2064 help
2065 This driver supports the Gigabit Ethernet chips present on the
2066 Cell Processor-Based Blades from IBM.
2067
2047config GIANFAR 2068config GIANFAR
2048 tristate "Gianfar Ethernet" 2069 tristate "Gianfar Ethernet"
2049 depends on 85xx || 83xx 2070 depends on 85xx || 83xx
@@ -2093,6 +2114,25 @@ endmenu
2093menu "Ethernet (10000 Mbit)" 2114menu "Ethernet (10000 Mbit)"
2094 depends on !UML 2115 depends on !UML
2095 2116
2117config CHELSIO_T1
2118 tristate "Chelsio 10Gb Ethernet support"
2119 depends on PCI
2120 help
2121 This driver supports Chelsio N110 and N210 models 10Gb Ethernet
2122 cards. More information about adapter features and performance
2123 tuning is in <file:Documentation/networking/cxgb.txt>.
2124
2125 For general information about Chelsio and our products, visit
2126 our website at <http://www.chelsio.com>.
2127
2128 For customer support, please visit our customer support page at
2129 <http://www.chelsio.com/support.htm>.
2130
2131 Please send feedback to <linux-bugs@chelsio.com>.
2132
2133 To compile this driver as a module, choose M here: the module
2134 will be called cxgb.
2135
2096config IXGB 2136config IXGB
2097 tristate "Intel(R) PRO/10GbE support" 2137 tristate "Intel(R) PRO/10GbE support"
2098 depends on PCI 2138 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a369ae284a9a..8645c843cf4d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ endif
9obj-$(CONFIG_E1000) += e1000/ 9obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 10obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
12obj-$(CONFIG_CHELSIO_T1) += chelsio/
12obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
13obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
14 15
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
42obj-$(CONFIG_E100) += e100.o 43obj-$(CONFIG_E100) += e100.o
43obj-$(CONFIG_TLAN) += tlan.o 44obj-$(CONFIG_TLAN) += tlan.o
44obj-$(CONFIG_EPIC100) += epic100.o 45obj-$(CONFIG_EPIC100) += epic100.o
46obj-$(CONFIG_SIS190) += sis190.o
45obj-$(CONFIG_SIS900) += sis900.o 47obj-$(CONFIG_SIS900) += sis900.o
46obj-$(CONFIG_YELLOWFIN) += yellowfin.o 48obj-$(CONFIG_YELLOWFIN) += yellowfin.o
47obj-$(CONFIG_ACENIC) += acenic.o 49obj-$(CONFIG_ACENIC) += acenic.o
@@ -52,6 +54,8 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
52obj-$(CONFIG_FEALNX) += fealnx.o 54obj-$(CONFIG_FEALNX) += fealnx.o
53obj-$(CONFIG_TIGON3) += tg3.o 55obj-$(CONFIG_TIGON3) += tg3.o
54obj-$(CONFIG_BNX2) += bnx2.o 56obj-$(CONFIG_BNX2) += bnx2.o
57spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o
58obj-$(CONFIG_SPIDER_NET) += spidernet.o
55obj-$(CONFIG_TC35815) += tc35815.o 59obj-$(CONFIG_TC35815) += tc35815.o
56obj-$(CONFIG_SKGE) += skge.o 60obj-$(CONFIG_SKGE) += skge.o
57obj-$(CONFIG_SK98LIN) += sk98lin/ 61obj-$(CONFIG_SK98LIN) += sk98lin/
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 91791ba37769..8a0af5453e21 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -275,7 +275,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
275 return 0; 275 return 0;
276out2: 276out2:
277 if (ei_status.reg0) 277 if (ei_status.reg0)
278 iounmap((void *)dev->mem_start); 278 iounmap(ei_status.mem);
279out1: 279out1:
280 free_irq(dev->irq, dev); 280 free_irq(dev->irq, dev);
281out: 281out:
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 4f9f69e22c1b..12ef52c193a3 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -597,7 +597,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
597 struct ArcProto *proto; 597 struct ArcProto *proto;
598 int txbuf; 598 int txbuf;
599 unsigned long flags; 599 unsigned long flags;
600 int freeskb = 0; 600 int freeskb, retval;
601 601
602 BUGMSG(D_DURING, 602 BUGMSG(D_DURING,
603 "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", 603 "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
@@ -615,7 +615,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
615 if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) { 615 if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
616 BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n"); 616 BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n");
617 dev_kfree_skb(skb); 617 dev_kfree_skb(skb);
618 return 0; /* don't try again */ 618 return NETDEV_TX_OK; /* don't try again */
619 } 619 }
620 620
621 /* We're busy transmitting a packet... */ 621 /* We're busy transmitting a packet... */
@@ -623,8 +623,11 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
623 623
624 spin_lock_irqsave(&lp->lock, flags); 624 spin_lock_irqsave(&lp->lock, flags);
625 AINTMASK(0); 625 AINTMASK(0);
626 626 if(lp->next_tx == -1)
627 txbuf = get_arcbuf(dev); 627 txbuf = get_arcbuf(dev);
628 else {
629 txbuf = -1;
630 }
628 if (txbuf != -1) { 631 if (txbuf != -1) {
629 if (proto->prepare_tx(dev, pkt, skb->len, txbuf) && 632 if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
630 !proto->ack_tx) { 633 !proto->ack_tx) {
@@ -638,6 +641,8 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
638 lp->outgoing.skb = skb; 641 lp->outgoing.skb = skb;
639 lp->outgoing.pkt = pkt; 642 lp->outgoing.pkt = pkt;
640 643
644 freeskb = 0;
645
641 if (proto->continue_tx && 646 if (proto->continue_tx &&
642 proto->continue_tx(dev, txbuf)) { 647 proto->continue_tx(dev, txbuf)) {
643 BUGMSG(D_NORMAL, 648 BUGMSG(D_NORMAL,
@@ -645,10 +650,12 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
645 "(proto='%c')\n", proto->suffix); 650 "(proto='%c')\n", proto->suffix);
646 } 651 }
647 } 652 }
648 653 retval = NETDEV_TX_OK;
654 dev->trans_start = jiffies;
649 lp->next_tx = txbuf; 655 lp->next_tx = txbuf;
650 } else { 656 } else {
651 freeskb = 1; 657 retval = NETDEV_TX_BUSY;
658 freeskb = 0;
652 } 659 }
653 660
654 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 661 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
@@ -664,7 +671,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
664 if (freeskb) { 671 if (freeskb) {
665 dev_kfree_skb(skb); 672 dev_kfree_skb(skb);
666 } 673 }
667 return 0; /* no need to try again */ 674 return retval; /* no need to try again */
668} 675}
669 676
670 677
@@ -690,7 +697,6 @@ static int go_tx(struct net_device *dev)
690 /* start sending */ 697 /* start sending */
691 ACOMMAND(TXcmd | (lp->cur_tx << 3)); 698 ACOMMAND(TXcmd | (lp->cur_tx << 3));
692 699
693 dev->trans_start = jiffies;
694 lp->stats.tx_packets++; 700 lp->stats.tx_packets++;
695 lp->lasttrans_dest = lp->lastload_dest; 701 lp->lasttrans_dest = lp->lastload_dest;
696 lp->lastload_dest = 0; 702 lp->lastload_dest = 0;
@@ -917,6 +923,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
917 923
918 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", 924 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
919 status); 925 status);
926 /* MYRECON bit is at bit 7 of diagstatus */
927 if(diagstatus & 0x80)
928 BUGMSG(D_RECON,"Put out that recon myself\n");
920 929
921 /* is the RECON info empty or old? */ 930 /* is the RECON info empty or old? */
922 if (!lp->first_recon || !lp->last_recon || 931 if (!lp->first_recon || !lp->last_recon ||
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index ad011214c7f2..e01b6a78ec63 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -235,7 +235,7 @@ struct lance_private {
235#define MEM lp->mem 235#define MEM lp->mem
236#define DREG IO->data 236#define DREG IO->data
237#define AREG IO->addr 237#define AREG IO->addr
238#define REGA(a) ( AREG = (a), DREG ) 238#define REGA(a) (*( AREG = (a), &DREG ))
239 239
240/* Definitions for packet buffer access: */ 240/* Definitions for packet buffer access: */
241#define PKT_BUF_SZ 1544 241#define PKT_BUF_SZ 1544
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8acc655ec1e8..55a72c7ad001 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -14,8 +14,8 @@
14 14
15#define DRV_MODULE_NAME "bnx2" 15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": " 16#define PFX DRV_MODULE_NAME ": "
17#define DRV_MODULE_VERSION "1.2.19" 17#define DRV_MODULE_VERSION "1.2.20"
18#define DRV_MODULE_RELDATE "May 23, 2005" 18#define DRV_MODULE_RELDATE "August 22, 2005"
19 19
20#define RUN_AT(x) (jiffies + (x)) 20#define RUN_AT(x) (jiffies + (x))
21 21
@@ -52,7 +52,6 @@ static struct {
52 { "HP NC370i Multifunction Gigabit Server Adapter" }, 52 { "HP NC370i Multifunction Gigabit Server Adapter" },
53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
54 { "HP NC370F Multifunction Gigabit Server Adapter" }, 54 { "HP NC370F Multifunction Gigabit Server Adapter" },
55 { 0 },
56 }; 55 };
57 56
58static struct pci_device_id bnx2_pci_tbl[] = { 57static struct pci_device_id bnx2_pci_tbl[] = {
@@ -108,6 +107,15 @@ static struct flash_spec flash_table[] =
108 107
109MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
110 109
110static inline u32 bnx2_tx_avail(struct bnx2 *bp)
111{
112 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
113
114 if (diff > MAX_TX_DESC_CNT)
115 diff = (diff & MAX_TX_DESC_CNT) - 1;
116 return (bp->tx_ring_size - diff);
117}
118
111static u32 119static u32
112bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 120bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
113{ 121{
@@ -807,7 +815,19 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
807 bnx2_write_phy(bp, MII_ADVERTISE, new_adv); 815 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
808 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | 816 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
809 BMCR_ANENABLE); 817 BMCR_ANENABLE);
810 bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval; 818 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
819 /* Speed up link-up time when the link partner
820 * does not autonegotiate which is very common
821 * in blade servers. Some blade servers use
822 * IPMI for kerboard input and it's important
823 * to minimize link disruptions. Autoneg. involves
824 * exchanging base pages plus 3 next pages and
825 * normally completes in about 120 msec.
826 */
827 bp->current_interval = SERDES_AN_TIMEOUT;
828 bp->serdes_an_pending = 1;
829 mod_timer(&bp->timer, jiffies + bp->current_interval);
830 }
811 } 831 }
812 832
813 return 0; 833 return 0;
@@ -1327,22 +1347,17 @@ bnx2_tx_int(struct bnx2 *bp)
1327 } 1347 }
1328 } 1348 }
1329 1349
1330 atomic_add(tx_free_bd, &bp->tx_avail_bd); 1350 bp->tx_cons = sw_cons;
1331 1351
1332 if (unlikely(netif_queue_stopped(bp->dev))) { 1352 if (unlikely(netif_queue_stopped(bp->dev))) {
1333 unsigned long flags; 1353 spin_lock(&bp->tx_lock);
1334
1335 spin_lock_irqsave(&bp->tx_lock, flags);
1336 if ((netif_queue_stopped(bp->dev)) && 1354 if ((netif_queue_stopped(bp->dev)) &&
1337 (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { 1355 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1338 1356
1339 netif_wake_queue(bp->dev); 1357 netif_wake_queue(bp->dev);
1340 } 1358 }
1341 spin_unlock_irqrestore(&bp->tx_lock, flags); 1359 spin_unlock(&bp->tx_lock);
1342 } 1360 }
1343
1344 bp->tx_cons = sw_cons;
1345
1346} 1361}
1347 1362
1348static inline void 1363static inline void
@@ -1523,15 +1538,12 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1538 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1524 1539
1525 /* Return here if interrupt is disabled. */ 1540 /* Return here if interrupt is disabled. */
1526 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1541 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1527 return IRQ_RETVAL(1); 1542 return IRQ_HANDLED;
1528 }
1529 1543
1530 if (netif_rx_schedule_prep(dev)) { 1544 netif_rx_schedule(dev);
1531 __netif_rx_schedule(dev);
1532 }
1533 1545
1534 return IRQ_RETVAL(1); 1546 return IRQ_HANDLED;
1535} 1547}
1536 1548
1537static irqreturn_t 1549static irqreturn_t
@@ -1549,22 +1561,19 @@ bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1549 if ((bp->status_blk->status_idx == bp->last_status_idx) || 1561 if ((bp->status_blk->status_idx == bp->last_status_idx) ||
1550 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 1562 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1551 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 1563 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1552 return IRQ_RETVAL(0); 1564 return IRQ_NONE;
1553 1565
1554 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 1566 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1555 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 1567 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1568 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1557 1569
1558 /* Return here if interrupt is shared and is disabled. */ 1570 /* Return here if interrupt is shared and is disabled. */
1559 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1571 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1560 return IRQ_RETVAL(1); 1572 return IRQ_HANDLED;
1561 }
1562 1573
1563 if (netif_rx_schedule_prep(dev)) { 1574 netif_rx_schedule(dev);
1564 __netif_rx_schedule(dev);
1565 }
1566 1575
1567 return IRQ_RETVAL(1); 1576 return IRQ_HANDLED;
1568} 1577}
1569 1578
1570static int 1579static int
@@ -1581,11 +1590,9 @@ bnx2_poll(struct net_device *dev, int *budget)
1581 (bp->status_blk->status_attn_bits_ack & 1590 (bp->status_blk->status_attn_bits_ack &
1582 STATUS_ATTN_BITS_LINK_STATE)) { 1591 STATUS_ATTN_BITS_LINK_STATE)) {
1583 1592
1584 unsigned long flags; 1593 spin_lock(&bp->phy_lock);
1585
1586 spin_lock_irqsave(&bp->phy_lock, flags);
1587 bnx2_phy_int(bp); 1594 bnx2_phy_int(bp);
1588 spin_unlock_irqrestore(&bp->phy_lock, flags); 1595 spin_unlock(&bp->phy_lock);
1589 } 1596 }
1590 1597
1591 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) { 1598 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
@@ -1628,9 +1635,8 @@ bnx2_set_rx_mode(struct net_device *dev)
1628 struct bnx2 *bp = dev->priv; 1635 struct bnx2 *bp = dev->priv;
1629 u32 rx_mode, sort_mode; 1636 u32 rx_mode, sort_mode;
1630 int i; 1637 int i;
1631 unsigned long flags;
1632 1638
1633 spin_lock_irqsave(&bp->phy_lock, flags); 1639 spin_lock_bh(&bp->phy_lock);
1634 1640
1635 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 1641 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1636 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 1642 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
@@ -1691,7 +1697,7 @@ bnx2_set_rx_mode(struct net_device *dev)
1691 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 1697 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1692 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 1698 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1693 1699
1694 spin_unlock_irqrestore(&bp->phy_lock, flags); 1700 spin_unlock_bh(&bp->phy_lock);
1695} 1701}
1696 1702
1697static void 1703static void
@@ -1998,14 +2004,14 @@ bnx2_init_cpus(struct bnx2 *bp)
1998} 2004}
1999 2005
2000static int 2006static int
2001bnx2_set_power_state(struct bnx2 *bp, int state) 2007bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2002{ 2008{
2003 u16 pmcsr; 2009 u16 pmcsr;
2004 2010
2005 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 2011 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2006 2012
2007 switch (state) { 2013 switch (state) {
2008 case 0: { 2014 case PCI_D0: {
2009 u32 val; 2015 u32 val;
2010 2016
2011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 2017 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
@@ -2026,7 +2032,7 @@ bnx2_set_power_state(struct bnx2 *bp, int state)
2026 REG_WR(bp, BNX2_RPM_CONFIG, val); 2032 REG_WR(bp, BNX2_RPM_CONFIG, val);
2027 break; 2033 break;
2028 } 2034 }
2029 case 3: { 2035 case PCI_D3hot: {
2030 int i; 2036 int i;
2031 u32 val, wol_msg; 2037 u32 val, wol_msg;
2032 2038
@@ -2960,7 +2966,6 @@ bnx2_init_tx_ring(struct bnx2 *bp)
2960 bp->tx_prod = 0; 2966 bp->tx_prod = 0;
2961 bp->tx_cons = 0; 2967 bp->tx_cons = 0;
2962 bp->tx_prod_bseq = 0; 2968 bp->tx_prod_bseq = 0;
2963 atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2964 2969
2965 val = BNX2_L2CTX_TYPE_TYPE_L2; 2970 val = BNX2_L2CTX_TYPE_TYPE_L2;
2966 val |= BNX2_L2CTX_TYPE_SIZE_L2; 2971 val |= BNX2_L2CTX_TYPE_SIZE_L2;
@@ -3507,11 +3512,11 @@ bnx2_test_registers(struct bnx2 *bp)
3507 rw_mask = reg_tbl[i].rw_mask; 3512 rw_mask = reg_tbl[i].rw_mask;
3508 ro_mask = reg_tbl[i].ro_mask; 3513 ro_mask = reg_tbl[i].ro_mask;
3509 3514
3510 save_val = readl((u8 *) bp->regview + offset); 3515 save_val = readl(bp->regview + offset);
3511 3516
3512 writel(0, (u8 *) bp->regview + offset); 3517 writel(0, bp->regview + offset);
3513 3518
3514 val = readl((u8 *) bp->regview + offset); 3519 val = readl(bp->regview + offset);
3515 if ((val & rw_mask) != 0) { 3520 if ((val & rw_mask) != 0) {
3516 goto reg_test_err; 3521 goto reg_test_err;
3517 } 3522 }
@@ -3520,9 +3525,9 @@ bnx2_test_registers(struct bnx2 *bp)
3520 goto reg_test_err; 3525 goto reg_test_err;
3521 } 3526 }
3522 3527
3523 writel(0xffffffff, (u8 *) bp->regview + offset); 3528 writel(0xffffffff, bp->regview + offset);
3524 3529
3525 val = readl((u8 *) bp->regview + offset); 3530 val = readl(bp->regview + offset);
3526 if ((val & rw_mask) != rw_mask) { 3531 if ((val & rw_mask) != rw_mask) {
3527 goto reg_test_err; 3532 goto reg_test_err;
3528 } 3533 }
@@ -3531,11 +3536,11 @@ bnx2_test_registers(struct bnx2 *bp)
3531 goto reg_test_err; 3536 goto reg_test_err;
3532 } 3537 }
3533 3538
3534 writel(save_val, (u8 *) bp->regview + offset); 3539 writel(save_val, bp->regview + offset);
3535 continue; 3540 continue;
3536 3541
3537reg_test_err: 3542reg_test_err:
3538 writel(save_val, (u8 *) bp->regview + offset); 3543 writel(save_val, bp->regview + offset);
3539 ret = -ENODEV; 3544 ret = -ENODEV;
3540 break; 3545 break;
3541 } 3546 }
@@ -3752,10 +3757,10 @@ bnx2_test_link(struct bnx2 *bp)
3752{ 3757{
3753 u32 bmsr; 3758 u32 bmsr;
3754 3759
3755 spin_lock_irq(&bp->phy_lock); 3760 spin_lock_bh(&bp->phy_lock);
3756 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3761 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3757 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3762 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3758 spin_unlock_irq(&bp->phy_lock); 3763 spin_unlock_bh(&bp->phy_lock);
3759 3764
3760 if (bmsr & BMSR_LSTATUS) { 3765 if (bmsr & BMSR_LSTATUS) {
3761 return 0; 3766 return 0;
@@ -3801,6 +3806,9 @@ bnx2_timer(unsigned long data)
3801 struct bnx2 *bp = (struct bnx2 *) data; 3806 struct bnx2 *bp = (struct bnx2 *) data;
3802 u32 msg; 3807 u32 msg;
3803 3808
3809 if (!netif_running(bp->dev))
3810 return;
3811
3804 if (atomic_read(&bp->intr_sem) != 0) 3812 if (atomic_read(&bp->intr_sem) != 0)
3805 goto bnx2_restart_timer; 3813 goto bnx2_restart_timer;
3806 3814
@@ -3809,15 +3817,16 @@ bnx2_timer(unsigned long data)
3809 3817
3810 if ((bp->phy_flags & PHY_SERDES_FLAG) && 3818 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3811 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 3819 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3812 unsigned long flags;
3813 3820
3814 spin_lock_irqsave(&bp->phy_lock, flags); 3821 spin_lock(&bp->phy_lock);
3815 if (bp->serdes_an_pending) { 3822 if (bp->serdes_an_pending) {
3816 bp->serdes_an_pending--; 3823 bp->serdes_an_pending--;
3817 } 3824 }
3818 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 3825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3819 u32 bmcr; 3826 u32 bmcr;
3820 3827
3828 bp->current_interval = bp->timer_interval;
3829
3821 bnx2_read_phy(bp, MII_BMCR, &bmcr); 3830 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3822 3831
3823 if (bmcr & BMCR_ANENABLE) { 3832 if (bmcr & BMCR_ANENABLE) {
@@ -3860,14 +3869,14 @@ bnx2_timer(unsigned long data)
3860 3869
3861 } 3870 }
3862 } 3871 }
3872 else
3873 bp->current_interval = bp->timer_interval;
3863 3874
3864 spin_unlock_irqrestore(&bp->phy_lock, flags); 3875 spin_unlock(&bp->phy_lock);
3865 } 3876 }
3866 3877
3867bnx2_restart_timer: 3878bnx2_restart_timer:
3868 bp->timer.expires = RUN_AT(bp->timer_interval); 3879 mod_timer(&bp->timer, jiffies + bp->current_interval);
3869
3870 add_timer(&bp->timer);
3871} 3880}
3872 3881
3873/* Called with rtnl_lock */ 3882/* Called with rtnl_lock */
@@ -3877,7 +3886,7 @@ bnx2_open(struct net_device *dev)
3877 struct bnx2 *bp = dev->priv; 3886 struct bnx2 *bp = dev->priv;
3878 int rc; 3887 int rc;
3879 3888
3880 bnx2_set_power_state(bp, 0); 3889 bnx2_set_power_state(bp, PCI_D0);
3881 bnx2_disable_int(bp); 3890 bnx2_disable_int(bp);
3882 3891
3883 rc = bnx2_alloc_mem(bp); 3892 rc = bnx2_alloc_mem(bp);
@@ -3920,12 +3929,7 @@ bnx2_open(struct net_device *dev)
3920 return rc; 3929 return rc;
3921 } 3930 }
3922 3931
3923 init_timer(&bp->timer); 3932 mod_timer(&bp->timer, jiffies + bp->current_interval);
3924
3925 bp->timer.expires = RUN_AT(bp->timer_interval);
3926 bp->timer.data = (unsigned long) bp;
3927 bp->timer.function = bnx2_timer;
3928 add_timer(&bp->timer);
3929 3933
3930 atomic_set(&bp->intr_sem, 0); 3934 atomic_set(&bp->intr_sem, 0);
3931 3935
@@ -3976,12 +3980,17 @@ bnx2_reset_task(void *data)
3976{ 3980{
3977 struct bnx2 *bp = data; 3981 struct bnx2 *bp = data;
3978 3982
3983 if (!netif_running(bp->dev))
3984 return;
3985
3986 bp->in_reset_task = 1;
3979 bnx2_netif_stop(bp); 3987 bnx2_netif_stop(bp);
3980 3988
3981 bnx2_init_nic(bp); 3989 bnx2_init_nic(bp);
3982 3990
3983 atomic_set(&bp->intr_sem, 1); 3991 atomic_set(&bp->intr_sem, 1);
3984 bnx2_netif_start(bp); 3992 bnx2_netif_start(bp);
3993 bp->in_reset_task = 0;
3985} 3994}
3986 3995
3987static void 3996static void
@@ -4041,9 +4050,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4041 u16 prod, ring_prod; 4050 u16 prod, ring_prod;
4042 int i; 4051 int i;
4043 4052
4044 if (unlikely(atomic_read(&bp->tx_avail_bd) < 4053 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4045 (skb_shinfo(skb)->nr_frags + 1))) {
4046
4047 netif_stop_queue(dev); 4054 netif_stop_queue(dev);
4048 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 4055 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4049 dev->name); 4056 dev->name);
@@ -4140,8 +4147,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4140 prod = NEXT_TX_BD(prod); 4147 prod = NEXT_TX_BD(prod);
4141 bp->tx_prod_bseq += skb->len; 4148 bp->tx_prod_bseq += skb->len;
4142 4149
4143 atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4144
4145 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); 4150 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4146 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); 4151 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4147 4152
@@ -4150,17 +4155,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4150 bp->tx_prod = prod; 4155 bp->tx_prod = prod;
4151 dev->trans_start = jiffies; 4156 dev->trans_start = jiffies;
4152 4157
4153 if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { 4158 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4154 unsigned long flags; 4159 spin_lock(&bp->tx_lock);
4155 4160 netif_stop_queue(dev);
4156 spin_lock_irqsave(&bp->tx_lock, flags); 4161
4157 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { 4162 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4158 netif_stop_queue(dev); 4163 netif_wake_queue(dev);
4159 4164 spin_unlock(&bp->tx_lock);
4160 if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)
4161 netif_wake_queue(dev);
4162 }
4163 spin_unlock_irqrestore(&bp->tx_lock, flags);
4164 } 4165 }
4165 4166
4166 return NETDEV_TX_OK; 4167 return NETDEV_TX_OK;
@@ -4173,7 +4174,13 @@ bnx2_close(struct net_device *dev)
4173 struct bnx2 *bp = dev->priv; 4174 struct bnx2 *bp = dev->priv;
4174 u32 reset_code; 4175 u32 reset_code;
4175 4176
4176 flush_scheduled_work(); 4177 /* Calling flush_scheduled_work() may deadlock because
4178 * linkwatch_event() may be on the workqueue and it will try to get
4179 * the rtnl_lock which we are holding.
4180 */
4181 while (bp->in_reset_task)
4182 msleep(1);
4183
4177 bnx2_netif_stop(bp); 4184 bnx2_netif_stop(bp);
4178 del_timer_sync(&bp->timer); 4185 del_timer_sync(&bp->timer);
4179 if (bp->wol) 4186 if (bp->wol)
@@ -4190,7 +4197,7 @@ bnx2_close(struct net_device *dev)
4190 bnx2_free_mem(bp); 4197 bnx2_free_mem(bp);
4191 bp->link_up = 0; 4198 bp->link_up = 0;
4192 netif_carrier_off(bp->dev); 4199 netif_carrier_off(bp->dev);
4193 bnx2_set_power_state(bp, 3); 4200 bnx2_set_power_state(bp, PCI_D3hot);
4194 return 0; 4201 return 0;
4195} 4202}
4196 4203
@@ -4390,11 +4397,11 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4390 bp->req_line_speed = req_line_speed; 4397 bp->req_line_speed = req_line_speed;
4391 bp->req_duplex = req_duplex; 4398 bp->req_duplex = req_duplex;
4392 4399
4393 spin_lock_irq(&bp->phy_lock); 4400 spin_lock_bh(&bp->phy_lock);
4394 4401
4395 bnx2_setup_phy(bp); 4402 bnx2_setup_phy(bp);
4396 4403
4397 spin_unlock_irq(&bp->phy_lock); 4404 spin_unlock_bh(&bp->phy_lock);
4398 4405
4399 return 0; 4406 return 0;
4400} 4407}
@@ -4464,19 +4471,20 @@ bnx2_nway_reset(struct net_device *dev)
4464 return -EINVAL; 4471 return -EINVAL;
4465 } 4472 }
4466 4473
4467 spin_lock_irq(&bp->phy_lock); 4474 spin_lock_bh(&bp->phy_lock);
4468 4475
4469 /* Force a link down visible on the other side */ 4476 /* Force a link down visible on the other side */
4470 if (bp->phy_flags & PHY_SERDES_FLAG) { 4477 if (bp->phy_flags & PHY_SERDES_FLAG) {
4471 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK); 4478 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4472 spin_unlock_irq(&bp->phy_lock); 4479 spin_unlock_bh(&bp->phy_lock);
4473 4480
4474 msleep(20); 4481 msleep(20);
4475 4482
4476 spin_lock_irq(&bp->phy_lock); 4483 spin_lock_bh(&bp->phy_lock);
4477 if (CHIP_NUM(bp) == CHIP_NUM_5706) { 4484 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4478 bp->serdes_an_pending = SERDES_AN_TIMEOUT / 4485 bp->current_interval = SERDES_AN_TIMEOUT;
4479 bp->timer_interval; 4486 bp->serdes_an_pending = 1;
4487 mod_timer(&bp->timer, jiffies + bp->current_interval);
4480 } 4488 }
4481 } 4489 }
4482 4490
@@ -4484,7 +4492,7 @@ bnx2_nway_reset(struct net_device *dev)
4484 bmcr &= ~BMCR_LOOPBACK; 4492 bmcr &= ~BMCR_LOOPBACK;
4485 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); 4493 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4486 4494
4487 spin_unlock_irq(&bp->phy_lock); 4495 spin_unlock_bh(&bp->phy_lock);
4488 4496
4489 return 0; 4497 return 0;
4490} 4498}
@@ -4670,11 +4678,11 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4670 bp->autoneg &= ~AUTONEG_FLOW_CTRL; 4678 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4671 } 4679 }
4672 4680
4673 spin_lock_irq(&bp->phy_lock); 4681 spin_lock_bh(&bp->phy_lock);
4674 4682
4675 bnx2_setup_phy(bp); 4683 bnx2_setup_phy(bp);
4676 4684
4677 spin_unlock_irq(&bp->phy_lock); 4685 spin_unlock_bh(&bp->phy_lock);
4678 4686
4679 return 0; 4687 return 0;
4680} 4688}
@@ -4698,7 +4706,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
4698 4706
4699#define BNX2_NUM_STATS 45 4707#define BNX2_NUM_STATS 45
4700 4708
4701struct { 4709static struct {
4702 char string[ETH_GSTRING_LEN]; 4710 char string[ETH_GSTRING_LEN];
4703} bnx2_stats_str_arr[BNX2_NUM_STATS] = { 4711} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4704 { "rx_bytes" }, 4712 { "rx_bytes" },
@@ -4750,7 +4758,7 @@ struct {
4750 4758
4751#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 4759#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4752 4760
4753unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { 4761static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4754 STATS_OFFSET32(stat_IfHCInOctets_hi), 4762 STATS_OFFSET32(stat_IfHCInOctets_hi),
4755 STATS_OFFSET32(stat_IfHCInBadOctets_hi), 4763 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4756 STATS_OFFSET32(stat_IfHCOutOctets_hi), 4764 STATS_OFFSET32(stat_IfHCOutOctets_hi),
@@ -4801,7 +4809,7 @@ unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4801/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are 4809/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4802 * skipped because of errata. 4810 * skipped because of errata.
4803 */ 4811 */
4804u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { 4812static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4805 8,0,8,8,8,8,8,8,8,8, 4813 8,0,8,8,8,8,8,8,8,8,
4806 4,0,4,4,4,4,4,4,4,4, 4814 4,0,4,4,4,4,4,4,4,4,
4807 4,4,4,4,4,4,4,4,4,4, 4815 4,4,4,4,4,4,4,4,4,4,
@@ -4811,7 +4819,7 @@ u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4811 4819
4812#define BNX2_NUM_TESTS 6 4820#define BNX2_NUM_TESTS 6
4813 4821
4814struct { 4822static struct {
4815 char string[ETH_GSTRING_LEN]; 4823 char string[ETH_GSTRING_LEN];
4816} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { 4824} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
4817 { "register_test (offline)" }, 4825 { "register_test (offline)" },
@@ -4910,7 +4918,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
4910 struct bnx2 *bp = dev->priv; 4918 struct bnx2 *bp = dev->priv;
4911 int i; 4919 int i;
4912 u32 *hw_stats = (u32 *) bp->stats_blk; 4920 u32 *hw_stats = (u32 *) bp->stats_blk;
4913 u8 *stats_len_arr = 0; 4921 u8 *stats_len_arr = NULL;
4914 4922
4915 if (hw_stats == NULL) { 4923 if (hw_stats == NULL) {
4916 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); 4924 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
@@ -5012,7 +5020,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5012static int 5020static int
5013bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5021bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5014{ 5022{
5015 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; 5023 struct mii_ioctl_data *data = if_mii(ifr);
5016 struct bnx2 *bp = dev->priv; 5024 struct bnx2 *bp = dev->priv;
5017 int err; 5025 int err;
5018 5026
@@ -5024,9 +5032,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5024 case SIOCGMIIREG: { 5032 case SIOCGMIIREG: {
5025 u32 mii_regval; 5033 u32 mii_regval;
5026 5034
5027 spin_lock_irq(&bp->phy_lock); 5035 spin_lock_bh(&bp->phy_lock);
5028 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); 5036 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5029 spin_unlock_irq(&bp->phy_lock); 5037 spin_unlock_bh(&bp->phy_lock);
5030 5038
5031 data->val_out = mii_regval; 5039 data->val_out = mii_regval;
5032 5040
@@ -5037,9 +5045,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5037 if (!capable(CAP_NET_ADMIN)) 5045 if (!capable(CAP_NET_ADMIN))
5038 return -EPERM; 5046 return -EPERM;
5039 5047
5040 spin_lock_irq(&bp->phy_lock); 5048 spin_lock_bh(&bp->phy_lock);
5041 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); 5049 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5042 spin_unlock_irq(&bp->phy_lock); 5050 spin_unlock_bh(&bp->phy_lock);
5043 5051
5044 return err; 5052 return err;
5045 5053
@@ -5057,6 +5065,9 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
5057 struct sockaddr *addr = p; 5065 struct sockaddr *addr = p;
5058 struct bnx2 *bp = dev->priv; 5066 struct bnx2 *bp = dev->priv;
5059 5067
5068 if (!is_valid_ether_addr(addr->sa_data))
5069 return -EINVAL;
5070
5060 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5071 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5061 if (netif_running(dev)) 5072 if (netif_running(dev))
5062 bnx2_set_mac_addr(bp); 5073 bnx2_set_mac_addr(bp);
@@ -5192,7 +5203,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5192 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 5203 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5193 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 5204 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5194 5205
5195 bnx2_set_power_state(bp, 0); 5206 bnx2_set_power_state(bp, PCI_D0);
5196 5207
5197 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 5208 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5198 5209
@@ -5305,6 +5316,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5305 bp->stats_ticks = 1000000 & 0xffff00; 5316 bp->stats_ticks = 1000000 & 0xffff00;
5306 5317
5307 bp->timer_interval = HZ; 5318 bp->timer_interval = HZ;
5319 bp->current_interval = HZ;
5308 5320
5309 /* Disable WOL support if we are running on a SERDES chip. */ 5321 /* Disable WOL support if we are running on a SERDES chip. */
5310 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) { 5322 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
@@ -5328,6 +5340,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5328 bp->req_line_speed = 0; 5340 bp->req_line_speed = 0;
5329 if (bp->phy_flags & PHY_SERDES_FLAG) { 5341 if (bp->phy_flags & PHY_SERDES_FLAG) {
5330 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; 5342 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5343
5344 reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE +
5345 BNX2_PORT_HW_CFG_CONFIG);
5346 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5347 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5348 bp->autoneg = 0;
5349 bp->req_line_speed = bp->line_speed = SPEED_1000;
5350 bp->req_duplex = DUPLEX_FULL;
5351 }
5331 } 5352 }
5332 else { 5353 else {
5333 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; 5354 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
@@ -5335,11 +5356,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5335 5356
5336 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 5357 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5337 5358
5359 init_timer(&bp->timer);
5360 bp->timer.expires = RUN_AT(bp->timer_interval);
5361 bp->timer.data = (unsigned long) bp;
5362 bp->timer.function = bnx2_timer;
5363
5338 return 0; 5364 return 0;
5339 5365
5340err_out_unmap: 5366err_out_unmap:
5341 if (bp->regview) { 5367 if (bp->regview) {
5342 iounmap(bp->regview); 5368 iounmap(bp->regview);
5369 bp->regview = NULL;
5343 } 5370 }
5344 5371
5345err_out_release: 5372err_out_release:
@@ -5454,6 +5481,8 @@ bnx2_remove_one(struct pci_dev *pdev)
5454 struct net_device *dev = pci_get_drvdata(pdev); 5481 struct net_device *dev = pci_get_drvdata(pdev);
5455 struct bnx2 *bp = dev->priv; 5482 struct bnx2 *bp = dev->priv;
5456 5483
5484 flush_scheduled_work();
5485
5457 unregister_netdev(dev); 5486 unregister_netdev(dev);
5458 5487
5459 if (bp->regview) 5488 if (bp->regview)
@@ -5466,7 +5495,7 @@ bnx2_remove_one(struct pci_dev *pdev)
5466} 5495}
5467 5496
5468static int 5497static int
5469bnx2_suspend(struct pci_dev *pdev, u32 state) 5498bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5470{ 5499{
5471 struct net_device *dev = pci_get_drvdata(pdev); 5500 struct net_device *dev = pci_get_drvdata(pdev);
5472 struct bnx2 *bp = dev->priv; 5501 struct bnx2 *bp = dev->priv;
@@ -5484,7 +5513,7 @@ bnx2_suspend(struct pci_dev *pdev, u32 state)
5484 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; 5513 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5485 bnx2_reset_chip(bp, reset_code); 5514 bnx2_reset_chip(bp, reset_code);
5486 bnx2_free_skbs(bp); 5515 bnx2_free_skbs(bp);
5487 bnx2_set_power_state(bp, state); 5516 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5488 return 0; 5517 return 0;
5489} 5518}
5490 5519
@@ -5497,7 +5526,7 @@ bnx2_resume(struct pci_dev *pdev)
5497 if (!netif_running(dev)) 5526 if (!netif_running(dev))
5498 return 0; 5527 return 0;
5499 5528
5500 bnx2_set_power_state(bp, 0); 5529 bnx2_set_power_state(bp, PCI_D0);
5501 netif_device_attach(dev); 5530 netif_device_attach(dev);
5502 bnx2_init_nic(bp); 5531 bnx2_init_nic(bp);
5503 bnx2_netif_start(bp); 5532 bnx2_netif_start(bp);
@@ -5505,12 +5534,12 @@ bnx2_resume(struct pci_dev *pdev)
5505} 5534}
5506 5535
5507static struct pci_driver bnx2_pci_driver = { 5536static struct pci_driver bnx2_pci_driver = {
5508 name: DRV_MODULE_NAME, 5537 .name = DRV_MODULE_NAME,
5509 id_table: bnx2_pci_tbl, 5538 .id_table = bnx2_pci_tbl,
5510 probe: bnx2_init_one, 5539 .probe = bnx2_init_one,
5511 remove: __devexit_p(bnx2_remove_one), 5540 .remove = __devexit_p(bnx2_remove_one),
5512 suspend: bnx2_suspend, 5541 .suspend = bnx2_suspend,
5513 resume: bnx2_resume, 5542 .resume = bnx2_resume,
5514}; 5543};
5515 5544
5516static int __init bnx2_init(void) 5545static int __init bnx2_init(void)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 8214a2853d0d..9ad3f5740cd8 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3841,12 +3841,12 @@ struct bnx2 {
3841 struct status_block *status_blk; 3841 struct status_block *status_blk;
3842 u32 last_status_idx; 3842 u32 last_status_idx;
3843 3843
3844 atomic_t tx_avail_bd;
3845 struct tx_bd *tx_desc_ring; 3844 struct tx_bd *tx_desc_ring;
3846 struct sw_bd *tx_buf_ring; 3845 struct sw_bd *tx_buf_ring;
3847 u32 tx_prod_bseq; 3846 u32 tx_prod_bseq;
3848 u16 tx_prod; 3847 u16 tx_prod;
3849 u16 tx_cons; 3848 u16 tx_cons;
3849 int tx_ring_size;
3850 3850
3851#ifdef BCM_VLAN 3851#ifdef BCM_VLAN
3852 struct vlan_group *vlgrp; 3852 struct vlan_group *vlgrp;
@@ -3872,8 +3872,10 @@ struct bnx2 {
3872 char *name; 3872 char *name;
3873 3873
3874 int timer_interval; 3874 int timer_interval;
3875 int current_interval;
3875 struct timer_list timer; 3876 struct timer_list timer;
3876 struct work_struct reset_task; 3877 struct work_struct reset_task;
3878 int in_reset_task;
3877 3879
3878 /* Used to synchronize phy accesses. */ 3880 /* Used to synchronize phy accesses. */
3879 spinlock_t phy_lock; 3881 spinlock_t phy_lock;
@@ -3927,7 +3929,6 @@ struct bnx2 {
3927 u16 fw_wr_seq; 3929 u16 fw_wr_seq;
3928 u16 fw_drv_pulse_wr_seq; 3930 u16 fw_drv_pulse_wr_seq;
3929 3931
3930 int tx_ring_size;
3931 dma_addr_t tx_desc_mapping; 3932 dma_addr_t tx_desc_mapping;
3932 3933
3933 3934
@@ -3985,7 +3986,7 @@ struct bnx2 {
3985#define PHY_LOOPBACK 2 3986#define PHY_LOOPBACK 2
3986 3987
3987 u8 serdes_an_pending; 3988 u8 serdes_an_pending;
3988#define SERDES_AN_TIMEOUT (2 * HZ) 3989#define SERDES_AN_TIMEOUT (HZ / 3)
3989 3990
3990 u8 mac_addr[8]; 3991 u8 mac_addr[8];
3991 3992
@@ -4171,6 +4172,9 @@ struct fw_info {
4171 4172
4172#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 4173#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054
4173#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 4174#define BNX2_PORT_HW_CFG_CONFIG 0x00000058
4175#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000
4176#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000
4177#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000
4174 4178
4175#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 4179#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068
4176#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c 4180#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a2e8dda5afac..d2f34d5a8083 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2419,22 +2419,19 @@ out:
2419 return 0; 2419 return 0;
2420} 2420}
2421 2421
2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype) 2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
2423{ 2423{
2424 struct bonding *bond = dev->priv; 2424 struct bonding *bond = dev->priv;
2425 struct slave *slave = NULL; 2425 struct slave *slave = NULL;
2426 int ret = NET_RX_DROP; 2426 int ret = NET_RX_DROP;
2427 2427
2428 if (!(dev->flags & IFF_MASTER)) { 2428 if (!(dev->flags & IFF_MASTER))
2429 goto out; 2429 goto out;
2430 }
2431 2430
2432 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2433 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, 2432 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, orig_dev);
2434 skb->real_dev); 2433 if (!slave)
2435 if (slave == NULL) {
2436 goto out_unlock; 2434 goto out_unlock;
2437 }
2438 2435
2439 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2436 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2440 2437
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f46823894187..673a30af5660 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -295,6 +295,6 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
295void bond_3ad_handle_link_change(struct slave *slave, char link); 295void bond_3ad_handle_link_change(struct slave *slave, char link);
296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype); 298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev);
299#endif //__BOND_3AD_H__ 299#endif //__BOND_3AD_H__
300 300
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 19e829b567d0..f8fce3961197 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -354,15 +354,14 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
354 _unlock_rx_hashtbl(bond); 354 _unlock_rx_hashtbl(bond);
355} 355}
356 356
357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype) 357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
358{ 358{
359 struct bonding *bond = bond_dev->priv; 359 struct bonding *bond = bond_dev->priv;
360 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 360 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
361 int res = NET_RX_DROP; 361 int res = NET_RX_DROP;
362 362
363 if (!(bond_dev->flags & IFF_MASTER)) { 363 if (!(bond_dev->flags & IFF_MASTER))
364 goto out; 364 goto out;
365 }
366 365
367 if (!arp) { 366 if (!arp) {
368 dprintk("Packet has no ARP data\n"); 367 dprintk("Packet has no ARP data\n");
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 000000000000..91e927827c43
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
1#
2# Chelsio 10Gb NIC driver for Linux.
3#
4
5obj-$(CONFIG_CHELSIO_T1) += cxgb.o
6
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8
9
10cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
11
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 000000000000..bf3e7b6a7a18
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
1/*****************************************************************************
2 * *
3 * File: common.h *
4 * $Revision: 1.21 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_COMMON_H_
40#define _CXGB_COMMON_H_
41
42#include <linux/config.h>
43#include <linux/module.h>
44#include <linux/netdevice.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/pci_ids.h>
54
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.1.1"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64#define CH_DEVICE(devid, ssid, idx) \
65 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
66
67#define SUPPORTED_PAUSE (1 << 13)
68#define SUPPORTED_LOOPBACK (1 << 15)
69
70#define ADVERTISED_PAUSE (1 << 13)
71#define ADVERTISED_ASYM_PAUSE (1 << 14)
72
73typedef struct adapter adapter_t;
74
75void t1_elmer0_ext_intr(adapter_t *adapter);
76void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
77 int speed, int duplex, int fc);
78
79struct t1_rx_mode {
80 struct net_device *dev;
81 u32 idx;
82 struct dev_mc_list *list;
83};
84
85#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
86#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
87#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
88
89static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
90{
91 u8 *addr = NULL;
92
93 if (rm->idx++ < rm->dev->mc_count) {
94 addr = rm->list->dmi_addr;
95 rm->list = rm->list->next;
96 }
97 return addr;
98}
99
100#define MAX_NPORTS 4
101
102#define SPEED_INVALID 0xffff
103#define DUPLEX_INVALID 0xff
104
105enum {
106 CHBT_BOARD_N110,
107 CHBT_BOARD_N210
108};
109
110enum {
111 CHBT_TERM_T1,
112 CHBT_TERM_T2
113};
114
115enum {
116 CHBT_MAC_PM3393,
117};
118
119enum {
120 CHBT_PHY_88X2010,
121};
122
123enum {
124 PAUSE_RX = 1 << 0,
125 PAUSE_TX = 1 << 1,
126 PAUSE_AUTONEG = 1 << 2
127};
128
129/* Revisions of T1 chip */
130enum {
131 TERM_T1A = 0,
132 TERM_T1B = 1,
133 TERM_T2 = 3
134};
135
136struct sge_params {
137 unsigned int cmdQ_size[2];
138 unsigned int freelQ_size[2];
139 unsigned int large_buf_capacity;
140 unsigned int rx_coalesce_usecs;
141 unsigned int last_rx_coalesce_raw;
142 unsigned int default_rx_coalesce_usecs;
143 unsigned int sample_interval_usecs;
144 unsigned int coalesce_enable;
145 unsigned int polling;
146};
147
148struct chelsio_pci_params {
149 unsigned short speed;
150 unsigned char width;
151 unsigned char is_pcix;
152};
153
154struct adapter_params {
155 struct sge_params sge;
156 struct chelsio_pci_params pci;
157
158 const struct board_info *brd_info;
159
160 unsigned int nports; /* # of ethernet ports */
161 unsigned int stats_update_period;
162 unsigned short chip_revision;
163 unsigned char chip_version;
164};
165
166struct link_config {
167 unsigned int supported; /* link capabilities */
168 unsigned int advertising; /* advertised capabilities */
169 unsigned short requested_speed; /* speed user has requested */
170 unsigned short speed; /* actual link speed */
171 unsigned char requested_duplex; /* duplex user has requested */
172 unsigned char duplex; /* actual link duplex */
173 unsigned char requested_fc; /* flow control user has requested */
174 unsigned char fc; /* actual link flow control */
175 unsigned char autoneg; /* autonegotiating? */
176};
177
178struct cmac;
179struct cphy;
180
181struct port_info {
182 struct net_device *dev;
183 struct cmac *mac;
184 struct cphy *phy;
185 struct link_config link_config;
186 struct net_device_stats netstats;
187};
188
189struct sge;
190struct peespi;
191
192struct adapter {
193 u8 __iomem *regs;
194 struct pci_dev *pdev;
195 unsigned long registered_device_map;
196 unsigned long open_device_map;
197 unsigned long flags;
198
199 const char *name;
200 int msg_enable;
201 u32 mmio_len;
202
203 struct work_struct ext_intr_handler_task;
204 struct adapter_params params;
205
206 struct vlan_group *vlan_grp;
207
208 /* Terminator modules. */
209 struct sge *sge;
210 struct peespi *espi;
211
212 struct port_info port[MAX_NPORTS];
213 struct work_struct stats_update_task;
214 struct timer_list stats_update_timer;
215
216 struct semaphore mib_mutex;
217 spinlock_t tpi_lock;
218 spinlock_t work_lock;
219 /* guards async operations */
220 spinlock_t async_lock ____cacheline_aligned;
221 u32 slow_intr_mask;
222};
223
224enum { /* adapter flags */
225 FULL_INIT_DONE = 1 << 0,
226 TSO_CAPABLE = 1 << 2,
227 TCP_CSUM_CAPABLE = 1 << 3,
228 UDP_CSUM_CAPABLE = 1 << 4,
229 VLAN_ACCEL_CAPABLE = 1 << 5,
230 RX_CSUM_ENABLED = 1 << 6,
231};
232
233struct mdio_ops;
234struct gmac;
235struct gphy;
236
237struct board_info {
238 unsigned char board;
239 unsigned char port_number;
240 unsigned long caps;
241 unsigned char chip_term;
242 unsigned char chip_mac;
243 unsigned char chip_phy;
244 unsigned int clock_core;
245 unsigned int clock_mc3;
246 unsigned int clock_mc4;
247 unsigned int espi_nports;
248 unsigned int clock_cspi;
249 unsigned int clock_elmer0;
250 unsigned char mdio_mdien;
251 unsigned char mdio_mdiinv;
252 unsigned char mdio_mdc;
253 unsigned char mdio_phybaseaddr;
254 struct gmac *gmac;
255 struct gphy *gphy;
256 struct mdio_ops *mdio_ops;
257 const char *desc;
258};
259
260extern struct pci_device_id t1_pci_tbl[];
261
262static inline int adapter_matches_type(const adapter_t *adapter,
263 int version, int revision)
264{
265 return adapter->params.chip_version == version &&
266 adapter->params.chip_revision == revision;
267}
268
269#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
270#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
271
272/* Returns true if an adapter supports VLAN acceleration and TSO */
273static inline int vlan_tso_capable(const adapter_t *adapter)
274{
275 return !t1_is_T1B(adapter);
276}
277
278#define for_each_port(adapter, iter) \
279 for (iter = 0; iter < (adapter)->params.nports; ++iter)
280
281#define board_info(adapter) ((adapter)->params.brd_info)
282#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
283
284static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
285{
286 return board_info(adap)->clock_core / 1000000;
287}
288
289extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
290extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
291
292extern void t1_interrupts_enable(adapter_t *adapter);
293extern void t1_interrupts_disable(adapter_t *adapter);
294extern void t1_interrupts_clear(adapter_t *adapter);
295extern int elmer0_ext_intr_handler(adapter_t *adapter);
296extern int t1_slow_intr_handler(adapter_t *adapter);
297
298extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
299extern const struct board_info *t1_get_board_info(unsigned int board_id);
300extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
301 unsigned short ssid);
302extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
303extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
304 struct adapter_params *p);
305extern int t1_init_hw_modules(adapter_t *adapter);
306extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
307extern void t1_free_sw_modules(adapter_t *adapter);
308extern void t1_fatal_err(adapter_t *adapter);
309
310extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
311extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
312extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
313
314#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 000000000000..3412342f7345
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
1/*****************************************************************************
2 * *
3 * File: cphy.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPHY_H_
40#define _CXGB_CPHY_H_
41
42#include "common.h"
43
44struct mdio_ops {
45 void (*init)(adapter_t *adapter, const struct board_info *bi);
46 int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
47 int reg_addr, unsigned int *val);
48 int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
49 int reg_addr, unsigned int val);
50};
51
52/* PHY interrupt types */
53enum {
54 cphy_cause_link_change = 0x1,
55 cphy_cause_error = 0x2
56};
57
58struct cphy;
59
60/* PHY operations */
61struct cphy_ops {
62 void (*destroy)(struct cphy *);
63 int (*reset)(struct cphy *, int wait);
64
65 int (*interrupt_enable)(struct cphy *);
66 int (*interrupt_disable)(struct cphy *);
67 int (*interrupt_clear)(struct cphy *);
68 int (*interrupt_handler)(struct cphy *);
69
70 int (*autoneg_enable)(struct cphy *);
71 int (*autoneg_disable)(struct cphy *);
72 int (*autoneg_restart)(struct cphy *);
73
74 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
75 int (*set_loopback)(struct cphy *, int on);
76 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
77 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
78 int *duplex, int *fc);
79};
80
81/* A PHY instance */
82struct cphy {
83 int addr; /* PHY address */
84 adapter_t *adapter; /* associated adapter */
85 struct cphy_ops *ops; /* PHY operations */
86 int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
87 int reg_addr, unsigned int *val);
88 int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
89 int reg_addr, unsigned int val);
90 struct cphy_instance *instance;
91};
92
93/* Convenience MDIO read/write wrappers */
94static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
95 unsigned int *valp)
96{
97 return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
98}
99
100static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
101 unsigned int val)
102{
103 return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
104}
105
106static inline int simple_mdio_read(struct cphy *cphy, int reg,
107 unsigned int *valp)
108{
109 return mdio_read(cphy, 0, reg, valp);
110}
111
112static inline int simple_mdio_write(struct cphy *cphy, int reg,
113 unsigned int val)
114{
115 return mdio_write(cphy, 0, reg, val);
116}
117
118/* Convenience initializer */
119static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
120 int phy_addr, struct cphy_ops *phy_ops,
121 struct mdio_ops *mdio_ops)
122{
123 phy->adapter = adapter;
124 phy->addr = phy_addr;
125 phy->ops = phy_ops;
126 if (mdio_ops) {
127 phy->mdio_read = mdio_ops->read;
128 phy->mdio_write = mdio_ops->write;
129 }
130}
131
132/* Operations of the PHY-instance factory */
133struct gphy {
134 /* Construct a PHY instance with the given PHY address */
135 struct cphy *(*create)(adapter_t *adapter, int phy_addr,
136 struct mdio_ops *mdio_ops);
137
138 /*
139 * Reset the PHY chip. This resets the whole PHY chip, not individual
140 * ports.
141 */
142 int (*reset)(adapter_t *adapter);
143};
144
145extern struct gphy t1_mv88x201x_ops;
146extern struct gphy t1_dummy_phy_ops;
147
148#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 000000000000..27925e487bcf
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
1/*****************************************************************************
2 * *
3 * File: cpl5_cmd.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPL5_CMD_H_
40#define _CXGB_CPL5_CMD_H_
41
42#include <asm/byteorder.h>
43
44#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
45#error "Adjust your <asm/byteorder.h> defines"
46#endif
47
48enum CPL_opcode {
49 CPL_RX_PKT = 0xAD,
50 CPL_TX_PKT = 0xB2,
51 CPL_TX_PKT_LSO = 0xB6,
52};
53
54enum { /* TX_PKT_LSO ethernet types */
55 CPL_ETH_II,
56 CPL_ETH_II_VLAN,
57 CPL_ETH_802_3,
58 CPL_ETH_802_3_VLAN
59};
60
61struct cpl_rx_data {
62 u32 rsvd0;
63 u32 len;
64 u32 seq;
65 u16 urg;
66 u8 rsvd1;
67 u8 status;
68};
69
70/*
71 * We want this header's alignment to be no more stringent than 2-byte aligned.
72 * All fields are u8 or u16 except for the length. However that field is not
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */
75struct cpl_tx_pkt {
76 u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 u8 iff:4;
79 u8 ip_csum_dis:1;
80 u8 l4_csum_dis:1;
81 u8 vlan_valid:1;
82 u8 rsvd:1;
83#else
84 u8 rsvd:1;
85 u8 vlan_valid:1;
86 u8 l4_csum_dis:1;
87 u8 ip_csum_dis:1;
88 u8 iff:4;
89#endif
90 u16 vlan;
91 u16 len_hi;
92 u16 len_lo;
93};
94
95struct cpl_tx_pkt_lso {
96 u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 u8 iff:4;
99 u8 ip_csum_dis:1;
100 u8 l4_csum_dis:1;
101 u8 vlan_valid:1;
102 u8 rsvd:1;
103#else
104 u8 rsvd:1;
105 u8 vlan_valid:1;
106 u8 l4_csum_dis:1;
107 u8 ip_csum_dis:1;
108 u8 iff:4;
109#endif
110 u16 vlan;
111 u32 len;
112
113 u32 rsvd2;
114 u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 u8 tcp_hdr_words:4;
117 u8 ip_hdr_words:4;
118#else
119 u8 ip_hdr_words:4;
120 u8 tcp_hdr_words:4;
121#endif
122 u16 eth_type_mss;
123};
124
125struct cpl_rx_pkt {
126 u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 u8 iff:4;
129 u8 csum_valid:1;
130 u8 bad_pkt:1;
131 u8 vlan_valid:1;
132 u8 rsvd:1;
133#else
134 u8 rsvd:1;
135 u8 vlan_valid:1;
136 u8 bad_pkt:1;
137 u8 csum_valid:1;
138 u8 iff:4;
139#endif
140 u16 csum;
141 u16 vlan;
142 u16 len;
143};
144
145#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 000000000000..349ebe783ed6
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/pci.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <linux/if_vlan.h>
47#include <linux/mii.h>
48#include <linux/sockios.h>
49#include <linux/proc_fs.h>
50#include <linux/dma-mapping.h>
51#include <asm/uaccess.h>
52
53#include "cpl5_cmd.h"
54#include "regs.h"
55#include "gmac.h"
56#include "cphy.h"
57#include "sge.h"
58#include "espi.h"
59
60#ifdef work_struct
61#include <linux/tqueue.h>
62#define INIT_WORK INIT_TQUEUE
63#define schedule_work schedule_task
64#define flush_scheduled_work flush_scheduled_tasks
65
66static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
67{
68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
69}
70
71static inline void cancel_mac_stats_update(struct adapter *ap)
72{
73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
75}
76
77/*
78 * Stats update timer for 2.4. It schedules a task to do the actual update as
79 * we need to access MAC statistics in process context.
80 */
81static void mac_stats_timer(unsigned long data)
82{
83 struct adapter *ap = (struct adapter *)data;
84
85 schedule_task(&ap->stats_update_task);
86}
87#else
88#include <linux/workqueue.h>
89
90static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
91{
92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
93}
94
95static inline void cancel_mac_stats_update(struct adapter *ap)
96{
97 cancel_delayed_work(&ap->stats_update_task);
98}
99#endif
100
101#define MAX_CMDQ_ENTRIES 16384
102#define MAX_CMDQ1_ENTRIES 1024
103#define MAX_RX_BUFFERS 16384
104#define MAX_RX_JUMBO_BUFFERS 16384
105#define MAX_TX_BUFFERS_HIGH 16384U
106#define MAX_TX_BUFFERS_LOW 1536U
107#define MIN_FL_ENTRIES 32
108
109#define PORT_MASK ((1 << MAX_NPORTS) - 1)
110
111#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
113 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
114
115/*
116 * The EEPROM is actually bigger but only the first few bytes are used so we
117 * only report those.
118 */
119#define EEPROM_SIZE 32
120
121MODULE_DESCRIPTION(DRV_DESCRIPTION);
122MODULE_AUTHOR("Chelsio Communications");
123MODULE_LICENSE("GPL");
124
125static int dflt_msg_enable = DFLT_MSG_ENABLE;
126
127MODULE_PARM(dflt_msg_enable, "i");
128MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
129
130
131static const char pci_speed[][4] = {
132 "33", "66", "100", "133"
133};
134
135/*
136 * Setup MAC to receive the types of packets we want.
137 */
138static void t1_set_rxmode(struct net_device *dev)
139{
140 struct adapter *adapter = dev->priv;
141 struct cmac *mac = adapter->port[dev->if_port].mac;
142 struct t1_rx_mode rm;
143
144 rm.dev = dev;
145 rm.idx = 0;
146 rm.list = dev->mc_list;
147 mac->ops->set_rx_mode(mac, &rm);
148}
149
150static void link_report(struct port_info *p)
151{
152 if (!netif_carrier_ok(p->dev))
153 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 else {
155 const char *s = "10Mbps";
156
157 switch (p->link_config.speed) {
158 case SPEED_10000: s = "10Gbps"; break;
159 case SPEED_1000: s = "1000Mbps"; break;
160 case SPEED_100: s = "100Mbps"; break;
161 }
162
163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->dev->name, s,
165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
166 }
167}
168
169void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
170 int speed, int duplex, int pause)
171{
172 struct port_info *p = &adapter->port[port_id];
173
174 if (link_stat != netif_carrier_ok(p->dev)) {
175 if (link_stat)
176 netif_carrier_on(p->dev);
177 else
178 netif_carrier_off(p->dev);
179 link_report(p);
180
181 }
182}
183
184static void link_start(struct port_info *p)
185{
186 struct cmac *mac = p->mac;
187
188 mac->ops->reset(mac);
189 if (mac->ops->macaddress_set)
190 mac->ops->macaddress_set(mac, p->dev->dev_addr);
191 t1_set_rxmode(p->dev);
192 t1_link_start(p->phy, mac, &p->link_config);
193 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
194}
195
196static void enable_hw_csum(struct adapter *adapter)
197{
198 if (adapter->flags & TSO_CAPABLE)
199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
200 t1_tp_set_tcp_checksum_offload(adapter, 1);
201}
202
203/*
204 * Things to do upon first use of a card.
205 * This must run with the rtnl lock held.
206 */
207static int cxgb_up(struct adapter *adapter)
208{
209 int err = 0;
210
211 if (!(adapter->flags & FULL_INIT_DONE)) {
212 err = t1_init_hw_modules(adapter);
213 if (err)
214 goto out_err;
215
216 enable_hw_csum(adapter);
217 adapter->flags |= FULL_INIT_DONE;
218 }
219
220 t1_interrupts_clear(adapter);
221 if ((err = request_irq(adapter->pdev->irq,
222 t1_select_intr_handler(adapter), SA_SHIRQ,
223 adapter->name, adapter))) {
224 goto out_err;
225 }
226 t1_sge_start(adapter->sge);
227 t1_interrupts_enable(adapter);
228 out_err:
229 return err;
230}
231
232/*
233 * Release resources when all the ports have been stopped.
234 */
235static void cxgb_down(struct adapter *adapter)
236{
237 t1_sge_stop(adapter->sge);
238 t1_interrupts_disable(adapter);
239 free_irq(adapter->pdev->irq, adapter);
240}
241
242static int cxgb_open(struct net_device *dev)
243{
244 int err;
245 struct adapter *adapter = dev->priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
247
248 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
249 return err;
250
251 __set_bit(dev->if_port, &adapter->open_device_map);
252 link_start(&adapter->port[dev->if_port]);
253 netif_start_queue(dev);
254 if (!other_ports && adapter->params.stats_update_period)
255 schedule_mac_stats_update(adapter,
256 adapter->params.stats_update_period);
257 return 0;
258}
259
260static int cxgb_close(struct net_device *dev)
261{
262 struct adapter *adapter = dev->priv;
263 struct port_info *p = &adapter->port[dev->if_port];
264 struct cmac *mac = p->mac;
265
266 netif_stop_queue(dev);
267 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
268 netif_carrier_off(dev);
269
270 clear_bit(dev->if_port, &adapter->open_device_map);
271 if (adapter->params.stats_update_period &&
272 !(adapter->open_device_map & PORT_MASK)) {
273 /* Stop statistics accumulation. */
274 smp_mb__after_clear_bit();
275 spin_lock(&adapter->work_lock); /* sync with update task */
276 spin_unlock(&adapter->work_lock);
277 cancel_mac_stats_update(adapter);
278 }
279
280 if (!adapter->open_device_map)
281 cxgb_down(adapter);
282 return 0;
283}
284
285static struct net_device_stats *t1_get_stats(struct net_device *dev)
286{
287 struct adapter *adapter = dev->priv;
288 struct port_info *p = &adapter->port[dev->if_port];
289 struct net_device_stats *ns = &p->netstats;
290 const struct cmac_statistics *pstats;
291
292 /* Do a full update of the MAC stats */
293 pstats = p->mac->ops->statistics_update(p->mac,
294 MAC_STATS_UPDATE_FULL);
295
296 ns->tx_packets = pstats->TxUnicastFramesOK +
297 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
298
299 ns->rx_packets = pstats->RxUnicastFramesOK +
300 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
301
302 ns->tx_bytes = pstats->TxOctetsOK;
303 ns->rx_bytes = pstats->RxOctetsOK;
304
305 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
306 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
307 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
308 pstats->RxFCSErrors + pstats->RxAlignErrors +
309 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
310 pstats->RxSymbolErrors + pstats->RxRuntErrors;
311
312 ns->multicast = pstats->RxMulticastFramesOK;
313 ns->collisions = pstats->TxTotalCollisions;
314
315 /* detailed rx_errors */
316 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
317 pstats->RxJabberErrors;
318 ns->rx_over_errors = 0;
319 ns->rx_crc_errors = pstats->RxFCSErrors;
320 ns->rx_frame_errors = pstats->RxAlignErrors;
321 ns->rx_fifo_errors = 0;
322 ns->rx_missed_errors = 0;
323
324 /* detailed tx_errors */
325 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
326 ns->tx_carrier_errors = 0;
327 ns->tx_fifo_errors = pstats->TxUnderrun;
328 ns->tx_heartbeat_errors = 0;
329 ns->tx_window_errors = pstats->TxLateCollisions;
330 return ns;
331}
332
333static u32 get_msglevel(struct net_device *dev)
334{
335 struct adapter *adapter = dev->priv;
336
337 return adapter->msg_enable;
338}
339
340static void set_msglevel(struct net_device *dev, u32 val)
341{
342 struct adapter *adapter = dev->priv;
343
344 adapter->msg_enable = val;
345}
346
347static char stats_strings[][ETH_GSTRING_LEN] = {
348 "TxOctetsOK",
349 "TxOctetsBad",
350 "TxUnicastFramesOK",
351 "TxMulticastFramesOK",
352 "TxBroadcastFramesOK",
353 "TxPauseFrames",
354 "TxFramesWithDeferredXmissions",
355 "TxLateCollisions",
356 "TxTotalCollisions",
357 "TxFramesAbortedDueToXSCollisions",
358 "TxUnderrun",
359 "TxLengthErrors",
360 "TxInternalMACXmitError",
361 "TxFramesWithExcessiveDeferral",
362 "TxFCSErrors",
363
364 "RxOctetsOK",
365 "RxOctetsBad",
366 "RxUnicastFramesOK",
367 "RxMulticastFramesOK",
368 "RxBroadcastFramesOK",
369 "RxPauseFrames",
370 "RxFCSErrors",
371 "RxAlignErrors",
372 "RxSymbolErrors",
373 "RxDataErrors",
374 "RxSequenceErrors",
375 "RxRuntErrors",
376 "RxJabberErrors",
377 "RxInternalMACRcvError",
378 "RxInRangeLengthErrors",
379 "RxOutOfRangeLengthField",
380 "RxFrameTooLongErrors",
381
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
388
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
400
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
407};
408
409#define T2_REGMAP_SIZE (3 * 1024)
410
411static int get_regs_len(struct net_device *dev)
412{
413 return T2_REGMAP_SIZE;
414}
415
416static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
417{
418 struct adapter *adapter = dev->priv;
419
420 strcpy(info->driver, DRV_NAME);
421 strcpy(info->version, DRV_VERSION);
422 strcpy(info->fw_version, "N/A");
423 strcpy(info->bus_info, pci_name(adapter->pdev));
424}
425
426static int get_stats_count(struct net_device *dev)
427{
428 return ARRAY_SIZE(stats_strings);
429}
430
431static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
432{
433 if (stringset == ETH_SS_STATS)
434 memcpy(data, stats_strings, sizeof(stats_strings));
435}
436
437static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
438 u64 *data)
439{
440 struct adapter *adapter = dev->priv;
441 struct cmac *mac = adapter->port[dev->if_port].mac;
442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
445
446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
449
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
465
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
483
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
490
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
502}
503
504static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
506{
507 u32 *p = buf + start;
508
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
511}
512
513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
514 void *buf)
515{
516 struct adapter *ap = dev->priv;
517
518 /*
519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
520 */
521 regs->version = 2;
522
523 memset(buf, 0, T2_REGMAP_SIZE);
524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
525}
526
527static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
528{
529 struct adapter *adapter = dev->priv;
530 struct port_info *p = &adapter->port[dev->if_port];
531
532 cmd->supported = p->link_config.supported;
533 cmd->advertising = p->link_config.advertising;
534
535 if (netif_carrier_ok(dev)) {
536 cmd->speed = p->link_config.speed;
537 cmd->duplex = p->link_config.duplex;
538 } else {
539 cmd->speed = -1;
540 cmd->duplex = -1;
541 }
542
543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
544 cmd->phy_address = p->phy->addr;
545 cmd->transceiver = XCVR_EXTERNAL;
546 cmd->autoneg = p->link_config.autoneg;
547 cmd->maxtxpkt = 0;
548 cmd->maxrxpkt = 0;
549 return 0;
550}
551
552static int speed_duplex_to_caps(int speed, int duplex)
553{
554 int cap = 0;
555
556 switch (speed) {
557 case SPEED_10:
558 if (duplex == DUPLEX_FULL)
559 cap = SUPPORTED_10baseT_Full;
560 else
561 cap = SUPPORTED_10baseT_Half;
562 break;
563 case SPEED_100:
564 if (duplex == DUPLEX_FULL)
565 cap = SUPPORTED_100baseT_Full;
566 else
567 cap = SUPPORTED_100baseT_Half;
568 break;
569 case SPEED_1000:
570 if (duplex == DUPLEX_FULL)
571 cap = SUPPORTED_1000baseT_Full;
572 else
573 cap = SUPPORTED_1000baseT_Half;
574 break;
575 case SPEED_10000:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10000baseT_Full;
578 }
579 return cap;
580}
581
582#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
583 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
584 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
585 ADVERTISED_10000baseT_Full)
586
587static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
588{
589 struct adapter *adapter = dev->priv;
590 struct port_info *p = &adapter->port[dev->if_port];
591 struct link_config *lc = &p->link_config;
592
593 if (!(lc->supported & SUPPORTED_Autoneg))
594 return -EOPNOTSUPP; /* can't change speed/duplex */
595
596 if (cmd->autoneg == AUTONEG_DISABLE) {
597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
598
599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 return -EINVAL;
601 lc->requested_speed = cmd->speed;
602 lc->requested_duplex = cmd->duplex;
603 lc->advertising = 0;
604 } else {
605 cmd->advertising &= ADVERTISED_MASK;
606 if (cmd->advertising & (cmd->advertising - 1))
607 cmd->advertising = lc->supported;
608 cmd->advertising &= lc->supported;
609 if (!cmd->advertising)
610 return -EINVAL;
611 lc->requested_speed = SPEED_INVALID;
612 lc->requested_duplex = DUPLEX_INVALID;
613 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
614 }
615 lc->autoneg = cmd->autoneg;
616 if (netif_running(dev))
617 t1_link_start(p->phy, p->mac, lc);
618 return 0;
619}
620
621static void get_pauseparam(struct net_device *dev,
622 struct ethtool_pauseparam *epause)
623{
624 struct adapter *adapter = dev->priv;
625 struct port_info *p = &adapter->port[dev->if_port];
626
627 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
628 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
629 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
630}
631
632static int set_pauseparam(struct net_device *dev,
633 struct ethtool_pauseparam *epause)
634{
635 struct adapter *adapter = dev->priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638
639 if (epause->autoneg == AUTONEG_DISABLE)
640 lc->requested_fc = 0;
641 else if (lc->supported & SUPPORTED_Autoneg)
642 lc->requested_fc = PAUSE_AUTONEG;
643 else
644 return -EINVAL;
645
646 if (epause->rx_pause)
647 lc->requested_fc |= PAUSE_RX;
648 if (epause->tx_pause)
649 lc->requested_fc |= PAUSE_TX;
650 if (lc->autoneg == AUTONEG_ENABLE) {
651 if (netif_running(dev))
652 t1_link_start(p->phy, p->mac, lc);
653 } else {
654 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
655 if (netif_running(dev))
656 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
657 lc->fc);
658 }
659 return 0;
660}
661
662static u32 get_rx_csum(struct net_device *dev)
663{
664 struct adapter *adapter = dev->priv;
665
666 return (adapter->flags & RX_CSUM_ENABLED) != 0;
667}
668
669static int set_rx_csum(struct net_device *dev, u32 data)
670{
671 struct adapter *adapter = dev->priv;
672
673 if (data)
674 adapter->flags |= RX_CSUM_ENABLED;
675 else
676 adapter->flags &= ~RX_CSUM_ENABLED;
677 return 0;
678}
679
680static int set_tso(struct net_device *dev, u32 value)
681{
682 struct adapter *adapter = dev->priv;
683
684 if (!(adapter->flags & TSO_CAPABLE))
685 return value ? -EOPNOTSUPP : 0;
686 return ethtool_op_set_tso(dev, value);
687}
688
689static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
690{
691 struct adapter *adapter = dev->priv;
692 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
693
694 e->rx_max_pending = MAX_RX_BUFFERS;
695 e->rx_mini_max_pending = 0;
696 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
697 e->tx_max_pending = MAX_CMDQ_ENTRIES;
698
699 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
700 e->rx_mini_pending = 0;
701 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
702 e->tx_pending = adapter->params.sge.cmdQ_size[0];
703}
704
705static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
706{
707 struct adapter *adapter = dev->priv;
708 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
709
710 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
711 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
712 e->tx_pending > MAX_CMDQ_ENTRIES ||
713 e->rx_pending < MIN_FL_ENTRIES ||
714 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
715 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
716 return -EINVAL;
717
718 if (adapter->flags & FULL_INIT_DONE)
719 return -EBUSY;
720
721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
723 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
724 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
725 MAX_CMDQ1_ENTRIES : e->tx_pending;
726 return 0;
727}
728
729static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
730{
731 struct adapter *adapter = dev->priv;
732
733 /*
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
736 */
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
739
740 if (adapter->params.sge.polling) {
741 adapter->params.sge.rx_coalesce_usecs = 0;
742 } else {
743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
744 }
745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
748 return 0;
749}
750
751static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752{
753 struct adapter *adapter = dev->priv;
754
755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
758 return 0;
759}
760
761static int get_eeprom_len(struct net_device *dev)
762{
763 return EEPROM_SIZE;
764}
765
766#define EEPROM_MAGIC(ap) \
767 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
768
769static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
770 u8 *data)
771{
772 int i;
773 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
774 struct adapter *adapter = dev->priv;
775
776 e->magic = EEPROM_MAGIC(adapter);
777 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
778 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
779 memcpy(data, buf + e->offset, e->len);
780 return 0;
781}
782
783static struct ethtool_ops t1_ethtool_ops = {
784 .get_settings = get_settings,
785 .set_settings = set_settings,
786 .get_drvinfo = get_drvinfo,
787 .get_msglevel = get_msglevel,
788 .set_msglevel = set_msglevel,
789 .get_ringparam = get_sge_param,
790 .set_ringparam = set_sge_param,
791 .get_coalesce = get_coalesce,
792 .set_coalesce = set_coalesce,
793 .get_eeprom_len = get_eeprom_len,
794 .get_eeprom = get_eeprom,
795 .get_pauseparam = get_pauseparam,
796 .set_pauseparam = set_pauseparam,
797 .get_rx_csum = get_rx_csum,
798 .set_rx_csum = set_rx_csum,
799 .get_tx_csum = ethtool_op_get_tx_csum,
800 .set_tx_csum = ethtool_op_set_tx_csum,
801 .get_sg = ethtool_op_get_sg,
802 .set_sg = ethtool_op_set_sg,
803 .get_link = ethtool_op_get_link,
804 .get_strings = get_strings,
805 .get_stats_count = get_stats_count,
806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
809 .get_tso = ethtool_op_get_tso,
810 .set_tso = set_tso,
811};
812
813static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
815{
816 const char *name;
817 name = adapter->name;
818 remove_proc_entry(name, dir);
819}
820//#define chtoe_setup_toedev(adapter) NULL
821#define update_mtu_tab(adapter)
822#define write_smt_entry(adapter, idx)
823
824static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
825{
826 struct adapter *adapter = dev->priv;
827 struct mii_ioctl_data *data = if_mii(req);
828
829 switch (cmd) {
830 case SIOCGMIIPHY:
831 data->phy_id = adapter->port[dev->if_port].phy->addr;
832 /* FALLTHRU */
833 case SIOCGMIIREG: {
834 struct cphy *phy = adapter->port[dev->if_port].phy;
835 u32 val;
836
837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
840 &val);
841 data->val_out = val;
842 break;
843 }
844 case SIOCSMIIREG: {
845 struct cphy *phy = adapter->port[dev->if_port].phy;
846
847 if (!capable(CAP_NET_ADMIN))
848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
852 data->val_in);
853 break;
854 }
855
856 default:
857 return -EOPNOTSUPP;
858 }
859 return 0;
860}
861
862static int t1_change_mtu(struct net_device *dev, int new_mtu)
863{
864 int ret;
865 struct adapter *adapter = dev->priv;
866 struct cmac *mac = adapter->port[dev->if_port].mac;
867
868 if (!mac->ops->set_mtu)
869 return -EOPNOTSUPP;
870 if (new_mtu < 68)
871 return -EINVAL;
872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
873 return ret;
874 dev->mtu = new_mtu;
875 return 0;
876}
877
878static int t1_set_mac_addr(struct net_device *dev, void *p)
879{
880 struct adapter *adapter = dev->priv;
881 struct cmac *mac = adapter->port[dev->if_port].mac;
882 struct sockaddr *addr = p;
883
884 if (!mac->ops->macaddress_set)
885 return -EOPNOTSUPP;
886
887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
888 mac->ops->macaddress_set(mac, dev->dev_addr);
889 return 0;
890}
891
892#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
893static void vlan_rx_register(struct net_device *dev,
894 struct vlan_group *grp)
895{
896 struct adapter *adapter = dev->priv;
897
898 spin_lock_irq(&adapter->async_lock);
899 adapter->vlan_grp = grp;
900 t1_set_vlan_accel(adapter, grp != NULL);
901 spin_unlock_irq(&adapter->async_lock);
902}
903
904static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
905{
906 struct adapter *adapter = dev->priv;
907
908 spin_lock_irq(&adapter->async_lock);
909 if (adapter->vlan_grp)
910 adapter->vlan_grp->vlan_devices[vid] = NULL;
911 spin_unlock_irq(&adapter->async_lock);
912}
913#endif
914
915#ifdef CONFIG_NET_POLL_CONTROLLER
916static void t1_netpoll(struct net_device *dev)
917{
918 unsigned long flags;
919 struct adapter *adapter = dev->priv;
920
921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
924}
925#endif
926
927/*
928 * Periodic accumulation of MAC statistics. This is used only if the MAC
929 * does not have any other way to prevent stats counter overflow.
930 */
931static void mac_stats_task(void *data)
932{
933 int i;
934 struct adapter *adapter = data;
935
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
938
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
942 }
943
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
950}
951
952/*
953 * Processes elmer0 external interrupts in process context.
954 */
955static void ext_intr_task(void *data)
956{
957 struct adapter *adapter = data;
958
959 elmer0_ext_intr_handler(adapter);
960
961 /* Now reenable external interrupts */
962 spin_lock_irq(&adapter->async_lock);
963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
968}
969
970/*
971 * Interrupt-context handler for elmer0 external interrupts.
972 */
973void t1_elmer0_ext_intr(struct adapter *adapter)
974{
975 /*
976 * Schedule a task to handle external interrupts as we require
977 * a process context. We disable EXT interrupts in the interim
978 * and let the task reenable them when it's done.
979 */
980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
983 schedule_work(&adapter->ext_intr_handler_task);
984}
985
986void t1_fatal_err(struct adapter *adapter)
987{
988 if (adapter->flags & FULL_INIT_DONE) {
989 t1_sge_stop(adapter->sge);
990 t1_interrupts_disable(adapter);
991 }
992 CH_ALERT("%s: encountered fatal error, operation suspended\n",
993 adapter->name);
994}
995
996static int __devinit init_one(struct pci_dev *pdev,
997 const struct pci_device_id *ent)
998{
999 static int version_printed;
1000
1001 int i, err, pci_using_dac = 0;
1002 unsigned long mmio_start, mmio_len;
1003 const struct board_info *bi;
1004 struct adapter *adapter = NULL;
1005 struct port_info *pi;
1006
1007 if (!version_printed) {
1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1009 DRV_VERSION);
1010 ++version_printed;
1011 }
1012
1013 err = pci_enable_device(pdev);
1014 if (err)
1015 return err;
1016
1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1018 CH_ERR("%s: cannot find PCI device memory base address\n",
1019 pci_name(pdev));
1020 err = -ENODEV;
1021 goto out_disable_pdev;
1022 }
1023
1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1025 pci_using_dac = 1;
1026
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1029 "consistent allocations\n", pci_name(pdev));
1030 err = -ENODEV;
1031 goto out_disable_pdev;
1032 }
1033
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1036 goto out_disable_pdev;
1037 }
1038
1039 err = pci_request_regions(pdev, DRV_NAME);
1040 if (err) {
1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1042 goto out_disable_pdev;
1043 }
1044
1045 pci_set_master(pdev);
1046
1047 mmio_start = pci_resource_start(pdev, 0);
1048 mmio_len = pci_resource_len(pdev, 0);
1049 bi = t1_get_board_info(ent->driver_data);
1050
1051 for (i = 0; i < bi->port_number; ++i) {
1052 struct net_device *netdev;
1053
1054 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1055 if (!netdev) {
1056 err = -ENOMEM;
1057 goto out_free_dev;
1058 }
1059
1060 SET_MODULE_OWNER(netdev);
1061 SET_NETDEV_DEV(netdev, &pdev->dev);
1062
1063 if (!adapter) {
1064 adapter = netdev->priv;
1065 adapter->pdev = pdev;
1066 adapter->port[0].dev = netdev; /* so we don't leak it */
1067
1068 adapter->regs = ioremap(mmio_start, mmio_len);
1069 if (!adapter->regs) {
1070 CH_ERR("%s: cannot map device registers\n",
1071 pci_name(pdev));
1072 err = -ENOMEM;
1073 goto out_free_dev;
1074 }
1075
1076 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1077 err = -ENODEV; /* Can't handle this chip rev */
1078 goto out_free_dev;
1079 }
1080
1081 adapter->name = pci_name(pdev);
1082 adapter->msg_enable = dflt_msg_enable;
1083 adapter->mmio_len = mmio_len;
1084
1085 init_MUTEX(&adapter->mib_mutex);
1086 spin_lock_init(&adapter->tpi_lock);
1087 spin_lock_init(&adapter->work_lock);
1088 spin_lock_init(&adapter->async_lock);
1089
1090 INIT_WORK(&adapter->ext_intr_handler_task,
1091 ext_intr_task, adapter);
1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1093 adapter);
1094#ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099#endif
1100
1101 pci_set_drvdata(pdev, netdev);
1102 }
1103
1104 pi = &adapter->port[i];
1105 pi->dev = netdev;
1106 netif_carrier_off(netdev);
1107 netdev->irq = pdev->irq;
1108 netdev->if_port = i;
1109 netdev->mem_start = mmio_start;
1110 netdev->mem_end = mmio_start + mmio_len - 1;
1111 netdev->priv = adapter;
1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1114
1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 if (pci_using_dac)
1117 netdev->features |= NETIF_F_HIGHDMA;
1118 if (vlan_tso_capable(adapter)) {
1119#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 netdev->features |=
1122 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1123 netdev->vlan_rx_register = vlan_rx_register;
1124 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125#endif
1126 adapter->flags |= TSO_CAPABLE;
1127 netdev->features |= NETIF_F_TSO;
1128 }
1129
1130 netdev->open = cxgb_open;
1131 netdev->stop = cxgb_close;
1132 netdev->hard_start_xmit = t1_start_xmit;
1133 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1134 sizeof(struct cpl_tx_pkt_lso) :
1135 sizeof(struct cpl_tx_pkt);
1136 netdev->get_stats = t1_get_stats;
1137 netdev->set_multicast_list = t1_set_rxmode;
1138 netdev->do_ioctl = t1_ioctl;
1139 netdev->change_mtu = t1_change_mtu;
1140 netdev->set_mac_address = t1_set_mac_addr;
1141#ifdef CONFIG_NET_POLL_CONTROLLER
1142 netdev->poll_controller = t1_netpoll;
1143#endif
1144 netdev->weight = 64;
1145
1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1147 }
1148
1149 if (t1_init_sw_modules(adapter, bi) < 0) {
1150 err = -ENODEV;
1151 goto out_free_dev;
1152 }
1153
1154 /*
1155 * The card is now ready to go. If any errors occur during device
1156 * registration we do not fail the whole card but rather proceed only
1157 * with the ports we manage to register successfully. However we must
1158 * register at least one net device.
1159 */
1160 for (i = 0; i < bi->port_number; ++i) {
1161 err = register_netdev(adapter->port[i].dev);
1162 if (err)
1163 CH_WARN("%s: cannot register net device %s, skipping\n",
1164 pci_name(pdev), adapter->port[i].dev->name);
1165 else {
1166 /*
1167 * Change the name we use for messages to the name of
1168 * the first successfully registered interface.
1169 */
1170 if (!adapter->registered_device_map)
1171 adapter->name = adapter->port[i].dev->name;
1172
1173 __set_bit(i, &adapter->registered_device_map);
1174 }
1175 }
1176 if (!adapter->registered_device_map) {
1177 CH_ERR("%s: could not register any net devices\n",
1178 pci_name(pdev));
1179 goto out_release_adapter_res;
1180 }
1181
1182 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1183 bi->desc, adapter->params.chip_revision,
1184 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1185 adapter->params.pci.speed, adapter->params.pci.width);
1186 return 0;
1187
1188 out_release_adapter_res:
1189 t1_free_sw_modules(adapter);
1190 out_free_dev:
1191 if (adapter) {
1192 if (adapter->regs) iounmap(adapter->regs);
1193 for (i = bi->port_number - 1; i >= 0; --i)
1194 if (adapter->port[i].dev) {
1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1197 }
1198 }
1199 pci_release_regions(pdev);
1200 out_disable_pdev:
1201 pci_disable_device(pdev);
1202 pci_set_drvdata(pdev, NULL);
1203 return err;
1204}
1205
1206static inline void t1_sw_reset(struct pci_dev *pdev)
1207{
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1210}
1211
1212static void __devexit remove_one(struct pci_dev *pdev)
1213{
1214 struct net_device *dev = pci_get_drvdata(pdev);
1215
1216 if (dev) {
1217 int i;
1218 struct adapter *adapter = dev->priv;
1219
1220 for_each_port(adapter, i)
1221 if (test_bit(i, &adapter->registered_device_map))
1222 unregister_netdev(adapter->port[i].dev);
1223
1224 t1_free_sw_modules(adapter);
1225 iounmap(adapter->regs);
1226 while (--i >= 0)
1227 if (adapter->port[i].dev) {
1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1230 }
1231 pci_release_regions(pdev);
1232 pci_disable_device(pdev);
1233 pci_set_drvdata(pdev, NULL);
1234 t1_sw_reset(pdev);
1235 }
1236}
1237
1238static struct pci_driver driver = {
1239 .name = DRV_NAME,
1240 .id_table = t1_pci_tbl,
1241 .probe = init_one,
1242 .remove = __devexit_p(remove_one),
1243};
1244
1245static int __init t1_init_module(void)
1246{
1247 return pci_module_init(&driver);
1248}
1249
1250static void __exit t1_cleanup_module(void)
1251{
1252 pci_unregister_driver(&driver);
1253}
1254
1255module_init(t1_init_module);
1256module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 000000000000..5590cb2dac19
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
1/*****************************************************************************
2 * *
3 * File: elmer0.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 22:49:43 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ELMER0_H_
40#define _CXGB_ELMER0_H_
41
42/* ELMER0 registers */
43#define A_ELMER0_VERSION 0x100000
44#define A_ELMER0_PHY_CFG 0x100004
45#define A_ELMER0_INT_ENABLE 0x100008
46#define A_ELMER0_INT_CAUSE 0x10000c
47#define A_ELMER0_GPI_CFG 0x100010
48#define A_ELMER0_GPI_STAT 0x100014
49#define A_ELMER0_GPO 0x100018
50#define A_ELMER0_PORT0_MI1_CFG 0x400000
51
52#define S_MI1_MDI_ENABLE 0
53#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
54#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
55
56#define S_MI1_MDI_INVERT 1
57#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
58#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
59
60#define S_MI1_PREAMBLE_ENABLE 2
61#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
62#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
63
64#define S_MI1_SOF 3
65#define M_MI1_SOF 0x3
66#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
67#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
68
69#define S_MI1_CLK_DIV 5
70#define M_MI1_CLK_DIV 0xff
71#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
72#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
73
74#define A_ELMER0_PORT0_MI1_ADDR 0x400004
75
76#define S_MI1_REG_ADDR 0
77#define M_MI1_REG_ADDR 0x1f
78#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
79#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
80
81#define S_MI1_PHY_ADDR 5
82#define M_MI1_PHY_ADDR 0x1f
83#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
84#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
85
86#define A_ELMER0_PORT0_MI1_DATA 0x400008
87
88#define S_MI1_DATA 0
89#define M_MI1_DATA 0xffff
90#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
91#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
92
93#define A_ELMER0_PORT0_MI1_OP 0x40000c
94
95#define S_MI1_OP 0
96#define M_MI1_OP 0x3
97#define V_MI1_OP(x) ((x) << S_MI1_OP)
98#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
99
100#define S_MI1_ADDR_AUTOINC 2
101#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
102#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
103
104#define S_MI1_OP_BUSY 31
105#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
106#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
107
108#define A_ELMER0_PORT1_MI1_CFG 0x500000
109#define A_ELMER0_PORT1_MI1_ADDR 0x500004
110#define A_ELMER0_PORT1_MI1_DATA 0x500008
111#define A_ELMER0_PORT1_MI1_OP 0x50000c
112#define A_ELMER0_PORT2_MI1_CFG 0x600000
113#define A_ELMER0_PORT2_MI1_ADDR 0x600004
114#define A_ELMER0_PORT2_MI1_DATA 0x600008
115#define A_ELMER0_PORT2_MI1_OP 0x60000c
116#define A_ELMER0_PORT3_MI1_CFG 0x700000
117#define A_ELMER0_PORT3_MI1_ADDR 0x700004
118#define A_ELMER0_PORT3_MI1_DATA 0x700008
119#define A_ELMER0_PORT3_MI1_OP 0x70000c
120
121/* Simple bit definition for GPI and GP0 registers. */
122#define ELMER0_GP_BIT0 0x0001
123#define ELMER0_GP_BIT1 0x0002
124#define ELMER0_GP_BIT2 0x0004
125#define ELMER0_GP_BIT3 0x0008
126#define ELMER0_GP_BIT4 0x0010
127#define ELMER0_GP_BIT5 0x0020
128#define ELMER0_GP_BIT6 0x0040
129#define ELMER0_GP_BIT7 0x0080
130#define ELMER0_GP_BIT8 0x0100
131#define ELMER0_GP_BIT9 0x0200
132#define ELMER0_GP_BIT10 0x0400
133#define ELMER0_GP_BIT11 0x0800
134#define ELMER0_GP_BIT12 0x1000
135#define ELMER0_GP_BIT13 0x2000
136#define ELMER0_GP_BIT14 0x4000
137#define ELMER0_GP_BIT15 0x8000
138#define ELMER0_GP_BIT16 0x10000
139#define ELMER0_GP_BIT17 0x20000
140#define ELMER0_GP_BIT18 0x40000
141#define ELMER0_GP_BIT19 0x80000
142
143#define MI1_OP_DIRECT_WRITE 1
144#define MI1_OP_DIRECT_READ 2
145
146#define MI1_OP_INDIRECT_ADDRESS 0
147#define MI1_OP_INDIRECT_WRITE 1
148#define MI1_OP_INDIRECT_READ_INC 2
149#define MI1_OP_INDIRECT_READ 3
150
151#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 000000000000..230642571c92
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
1/*****************************************************************************
2 * *
3 * File: espi.c *
4 * $Revision: 1.14 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "espi.h"
43
44struct peespi {
45 adapter_t *adapter;
46 struct espi_intr_counts intr_cnt;
47 u32 misc_ctrl;
48 spinlock_t lock;
49};
50
51#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
52 F_RAMPARITYERR | F_DIP2PARITYERR)
53#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
54 | F_MONITORED_INTERFACE)
55
56#define TRICN_CNFG 14
57#define TRICN_CMD_READ 0x11
58#define TRICN_CMD_WRITE 0x21
59#define TRICN_CMD_ATTEMPTS 10
60
61static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
62 int ch_addr, int reg_offset, u32 wr_data)
63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65
66 writel(V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE),
71 adapter->regs + A_ESPI_CMD_ADDR);
72 writel(0, adapter->regs + A_ESPI_GOSTAT);
73
74 do {
75 busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
76 } while (busy && --attempts);
77
78 if (busy)
79 CH_ERR("%s: TRICN write timed out\n", adapter->name);
80
81 return busy;
82}
83
84/* 1. Deassert rx_reset_core. */
85/* 2. Program TRICN_CNFG registers. */
86/* 3. Deassert rx_reset_link */
87static int tricn_init(adapter_t *adapter)
88{
89 int i = 0;
90 int sme = 1;
91 int stat = 0;
92 int timeout = 0;
93 int is_ready = 0;
94 int dynamic_deskew = 0;
95
96 if (dynamic_deskew)
97 sme = 0;
98
99
100 /* 1 */
101 timeout=1000;
102 do {
103 stat = readl(adapter->regs + A_ESPI_RX_RESET);
104 is_ready = (stat & 0x4);
105 timeout--;
106 udelay(5);
107 } while (!is_ready || (timeout==0));
108 writel(0x2, adapter->regs + A_ESPI_RX_RESET);
109 if (timeout==0)
110 {
111 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
112 t1_fatal_err(adapter);
113 }
114
115 /* 2 */
116 if (sme) {
117 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
119 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
120 }
121 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
123 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
124 for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
125 for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
126 for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
127 for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
128 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
129
130 /* 3 */
131 writel(0x3, adapter->regs + A_ESPI_RX_RESET);
132
133 return 0;
134}
135
136void t1_espi_intr_enable(struct peespi *espi)
137{
138 u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
139
140 /*
141 * Cannot enable ESPI interrupts on T1B because HW asserts the
142 * interrupt incorrectly, namely the driver gets ESPI interrupts
143 * but no data is actually dropped (can verify this reading the ESPI
144 * drop registers). Also, once the ESPI interrupt is asserted it
145 * cannot be cleared (HW bug).
146 */
147 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
148 writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
149 writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
150}
151
152void t1_espi_intr_clear(struct peespi *espi)
153{
154 writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
155 writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
156}
157
158void t1_espi_intr_disable(struct peespi *espi)
159{
160 u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
161
162 writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
163 writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
164}
165
166int t1_espi_intr_handler(struct peespi *espi)
167{
168 u32 cnt;
169 u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
170
171 if (status & F_DIP4ERR)
172 espi->intr_cnt.DIP4_err++;
173 if (status & F_RXDROP)
174 espi->intr_cnt.rx_drops++;
175 if (status & F_TXDROP)
176 espi->intr_cnt.tx_drops++;
177 if (status & F_RXOVERFLOW)
178 espi->intr_cnt.rx_ovflw++;
179 if (status & F_RAMPARITYERR)
180 espi->intr_cnt.parity_err++;
181 if (status & F_DIP2PARITYERR) {
182 espi->intr_cnt.DIP2_parity_err++;
183
184 /*
185 * Must read the error count to clear the interrupt
186 * that it causes.
187 */
188 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
189 }
190
191 /*
192 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
193 * write the status as is.
194 */
195 if (status && t1_is_T1B(espi->adapter))
196 status = 1;
197 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
198 return 0;
199}
200
201const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
202{
203 return &espi->intr_cnt;
204}
205
206static void espi_setup_for_pm3393(adapter_t *adapter)
207{
208 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
209
210 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
211 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
212 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
213 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
214 writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
215 writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
216 writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
217 writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
218 writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
219}
220
221/* T2 Init part -- */
222/* 1. Set T_ESPI_MISCCTRL_ADDR */
223/* 2. Init ESPI registers. */
224/* 3. Init TriCN Hard Macro */
225int t1_espi_init(struct peespi *espi, int mac_type, int nports)
226{
227 u32 cnt;
228
229 u32 status_enable_extra = 0;
230 adapter_t *adapter = espi->adapter;
231 u32 status, burstval = 0x800100;
232
233 /* Disable ESPI training. MACs that can handle it enable it below. */
234 writel(0, adapter->regs + A_ESPI_TRAIN);
235
236 if (is_T2(adapter)) {
237 writel(V_OUT_OF_SYNC_COUNT(4) |
238 V_DIP2_PARITY_ERR_THRES(3) |
239 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
240 if (nports == 4) {
241 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
242 burstval = 0x200040;
243 }
244 }
245 writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
246
247 switch (mac_type) {
248 case CHBT_MAC_PM3393:
249 espi_setup_for_pm3393(adapter);
250 break;
251 default:
252 return -1;
253 }
254
255 /*
256 * Make sure any pending interrupts from the SPI are
257 * Cleared before enabling the interrupt.
258 */
259 writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
260 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
261 if (status & F_DIP2PARITYERR) {
262 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
263 }
264
265 /*
266 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
267 * write the status as is.
268 */
269 if (status && t1_is_T1B(espi->adapter))
270 status = 1;
271 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
272
273 writel(status_enable_extra | F_RXSTATUSENABLE,
274 adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
275
276 if (is_T2(adapter)) {
277 tricn_init(adapter);
278 /*
279 * Always position the control at the 1st port egress IN
280 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
281 */
282 espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
283 & ~MON_MASK) | (F_MONITORED_DIRECTION
284 | F_MONITORED_INTERFACE);
285 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
286 spin_lock_init(&espi->lock);
287 }
288
289 return 0;
290}
291
292void t1_espi_destroy(struct peespi *espi)
293{
294 kfree(espi);
295}
296
297struct peespi *t1_espi_create(adapter_t *adapter)
298{
299 struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
300
301 memset(espi, 0, sizeof(*espi));
302
303 if (espi)
304 espi->adapter = adapter;
305 return espi;
306}
307
308void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
309{
310 struct peespi *espi = adapter->espi;
311
312 if (!is_T2(adapter))
313 return;
314 spin_lock(&espi->lock);
315 espi->misc_ctrl = (val & ~MON_MASK) |
316 (espi->misc_ctrl & MON_MASK);
317 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
318 spin_unlock(&espi->lock);
319}
320
321u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
322{
323 u32 sel;
324
325 struct peespi *espi = adapter->espi;
326
327 if (!is_T2(adapter))
328 return 0;
329 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
330 if (!wait) {
331 if (!spin_trylock(&espi->lock))
332 return 0;
333 }
334 else
335 spin_lock(&espi->lock);
336 if ((sel != (espi->misc_ctrl & MON_MASK))) {
337 writel(((espi->misc_ctrl & ~MON_MASK) | sel),
338 adapter->regs + A_ESPI_MISC_CONTROL);
339 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
340 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
341 }
342 else
343 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
344 spin_unlock(&espi->lock);
345 return sel;
346}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 000000000000..c90e37f8457c
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
1/*****************************************************************************
2 * *
3 * File: espi.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ESPI_H_
40#define _CXGB_ESPI_H_
41
42#include "common.h"
43
44struct espi_intr_counts {
45 unsigned int DIP4_err;
46 unsigned int rx_drops;
47 unsigned int tx_drops;
48 unsigned int rx_ovflw;
49 unsigned int parity_err;
50 unsigned int DIP2_parity_err;
51};
52
53struct peespi;
54
55struct peespi *t1_espi_create(adapter_t *adapter);
56void t1_espi_destroy(struct peespi *espi);
57int t1_espi_init(struct peespi *espi, int mac_type, int nports);
58
59void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *);
63const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
64
65void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
66u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
67
68#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 000000000000..746b0eeea964
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
1/*****************************************************************************
2 * *
3 * File: gmac.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef _CXGB_GMAC_H_
41#define _CXGB_GMAC_H_
42
43#include "common.h"
44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
47
48struct cmac_statistics {
49 /* Transmit */
50 u64 TxOctetsOK;
51 u64 TxOctetsBad;
52 u64 TxUnicastFramesOK;
53 u64 TxMulticastFramesOK;
54 u64 TxBroadcastFramesOK;
55 u64 TxPauseFrames;
56 u64 TxFramesWithDeferredXmissions;
57 u64 TxLateCollisions;
58 u64 TxTotalCollisions;
59 u64 TxFramesAbortedDueToXSCollisions;
60 u64 TxUnderrun;
61 u64 TxLengthErrors;
62 u64 TxInternalMACXmitError;
63 u64 TxFramesWithExcessiveDeferral;
64 u64 TxFCSErrors;
65
66 /* Receive */
67 u64 RxOctetsOK;
68 u64 RxOctetsBad;
69 u64 RxUnicastFramesOK;
70 u64 RxMulticastFramesOK;
71 u64 RxBroadcastFramesOK;
72 u64 RxPauseFrames;
73 u64 RxFCSErrors;
74 u64 RxAlignErrors;
75 u64 RxSymbolErrors;
76 u64 RxDataErrors;
77 u64 RxSequenceErrors;
78 u64 RxRuntErrors;
79 u64 RxJabberErrors;
80 u64 RxInternalMACRcvError;
81 u64 RxInRangeLengthErrors;
82 u64 RxOutOfRangeLengthField;
83 u64 RxFrameTooLongErrors;
84};
85
86struct cmac_ops {
87 void (*destroy)(struct cmac *);
88 int (*reset)(struct cmac *);
89 int (*interrupt_enable)(struct cmac *);
90 int (*interrupt_disable)(struct cmac *);
91 int (*interrupt_clear)(struct cmac *);
92 int (*interrupt_handler)(struct cmac *);
93
94 int (*enable)(struct cmac *, int);
95 int (*disable)(struct cmac *, int);
96
97 int (*loopback_enable)(struct cmac *);
98 int (*loopback_disable)(struct cmac *);
99
100 int (*set_mtu)(struct cmac *, int mtu);
101 int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
102
103 int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
104 int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
105 int *fc);
106
107 const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
108
109 int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
110 int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
111};
112
113typedef struct _cmac_instance cmac_instance;
114
115struct cmac {
116 struct cmac_statistics stats;
117 adapter_t *adapter;
118 struct cmac_ops *ops;
119 cmac_instance *instance;
120};
121
122struct gmac {
123 unsigned int stats_update_period;
124 struct cmac *(*create)(adapter_t *adapter, int index);
125 int (*reset)(adapter_t *);
126};
127
128extern struct gmac t1_pm3393_ops;
129extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops;
133
134#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 000000000000..db5034282782
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
1/*****************************************************************************
2 * *
3 * File: mv88x201x.c *
4 * $Revision: 1.12 $ *
5 * $Date: 2005/04/15 19:27:14 $ *
6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "cphy.h"
41#include "elmer0.h"
42
43/*
44 * The 88x2010 Rev C. requires some link status registers * to be read
45 * twice in order to get the right values. Future * revisions will fix
46 * this problem and then this macro * can disappear.
47 */
48#define MV88x2010_LINK_STATUS_BUGS 1
49
50static int led_init(struct cphy *cphy)
51{
52 /* Setup the LED registers so we can turn on/off.
53 * Writing these bits maps control to another
54 * register. mmd(0x1) addr(0x7)
55 */
56 mdio_write(cphy, 0x3, 0x8304, 0xdddd);
57 return 0;
58}
59
60static int led_link(struct cphy *cphy, u32 do_enable)
61{
62 u32 led = 0;
63#define LINK_ENABLE_BIT 0x1
64
65 mdio_read(cphy, 0x1, 0x7, &led);
66
67 if (do_enable & LINK_ENABLE_BIT) {
68 led |= LINK_ENABLE_BIT;
69 mdio_write(cphy, 0x1, 0x7, led);
70 } else {
71 led &= ~LINK_ENABLE_BIT;
72 mdio_write(cphy, 0x1, 0x7, led);
73 }
74 return 0;
75}
76
77/* Port Reset */
78static int mv88x201x_reset(struct cphy *cphy, int wait)
79{
80 /* This can be done through registers. It is not required since
81 * a full chip reset is used.
82 */
83 return 0;
84}
85
86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{
88 u32 elmer;
89
90 /* Enable PHY LASI interrupts. */
91 mdio_write(cphy, 0x1, 0x9002, 0x1);
92
93 /* Enable Marvell interrupts through Elmer0. */
94 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
95 elmer |= ELMER0_GP_BIT6;
96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
97 return 0;
98}
99
100static int mv88x201x_interrupt_disable(struct cphy *cphy)
101{
102 u32 elmer;
103
104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106
107 /* Disable Marvell interrupts through Elmer0. */
108 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
109 elmer &= ~ELMER0_GP_BIT6;
110 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
111 return 0;
112}
113
114static int mv88x201x_interrupt_clear(struct cphy *cphy)
115{
116 u32 elmer;
117 u32 val;
118
119#ifdef MV88x2010_LINK_STATUS_BUGS
120 /* Required to read twice before clear takes affect. */
121 mdio_read(cphy, 0x1, 0x9003, &val);
122 mdio_read(cphy, 0x1, 0x9004, &val);
123 mdio_read(cphy, 0x1, 0x9005, &val);
124
125 /* Read this register after the others above it else
126 * the register doesn't clear correctly.
127 */
128 mdio_read(cphy, 0x1, 0x1, &val);
129#endif
130
131 /* Clear link status. */
132 mdio_read(cphy, 0x1, 0x1, &val);
133 /* Clear PHY LASI interrupts. */
134 mdio_read(cphy, 0x1, 0x9005, &val);
135
136#ifdef MV88x2010_LINK_STATUS_BUGS
137 /* Do it again. */
138 mdio_read(cphy, 0x1, 0x9003, &val);
139 mdio_read(cphy, 0x1, 0x9004, &val);
140#endif
141
142 /* Clear Marvell interrupts through Elmer0. */
143 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
144 elmer |= ELMER0_GP_BIT6;
145 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
146 return 0;
147}
148
149static int mv88x201x_interrupt_handler(struct cphy *cphy)
150{
151 /* Clear interrupts */
152 mv88x201x_interrupt_clear(cphy);
153
154 /* We have only enabled link change interrupts and so
155 * cphy_cause must be a link change interrupt.
156 */
157 return cphy_cause_link_change;
158}
159
160static int mv88x201x_set_loopback(struct cphy *cphy, int on)
161{
162 return 0;
163}
164
165static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
166 int *speed, int *duplex, int *fc)
167{
168 u32 val = 0;
169#define LINK_STATUS_BIT 0x4
170
171 if (link_ok) {
172 /* Read link status. */
173 mdio_read(cphy, 0x1, 0x1, &val);
174 val &= LINK_STATUS_BIT;
175 *link_ok = (val == LINK_STATUS_BIT);
176 /* Turn on/off Link LED */
177 led_link(cphy, *link_ok);
178 }
179 if (speed)
180 *speed = SPEED_10000;
181 if (duplex)
182 *duplex = DUPLEX_FULL;
183 if (fc)
184 *fc = PAUSE_RX | PAUSE_TX;
185 return 0;
186}
187
188static void mv88x201x_destroy(struct cphy *cphy)
189{
190 kfree(cphy);
191}
192
193static struct cphy_ops mv88x201x_ops = {
194 .destroy = mv88x201x_destroy,
195 .reset = mv88x201x_reset,
196 .interrupt_enable = mv88x201x_interrupt_enable,
197 .interrupt_disable = mv88x201x_interrupt_disable,
198 .interrupt_clear = mv88x201x_interrupt_clear,
199 .interrupt_handler = mv88x201x_interrupt_handler,
200 .get_link_status = mv88x201x_get_link_status,
201 .set_loopback = mv88x201x_set_loopback,
202};
203
204static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
205 struct mdio_ops *mdio_ops)
206{
207 u32 val;
208 struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
209
210 if (!cphy)
211 return NULL;
212 memset(cphy, 0, sizeof(*cphy));
213 cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
214
215 /* Commands the PHY to enable XFP's clock. */
216 mdio_read(cphy, 0x3, 0x8300, &val);
217 mdio_write(cphy, 0x3, 0x8300, val | 1);
218
219 /* Clear link status. Required because of a bug in the PHY. */
220 mdio_read(cphy, 0x1, 0x8, &val);
221 mdio_read(cphy, 0x3, 0x8, &val);
222
223 /* Allows for Link,Ack LED turn on/off */
224 led_init(cphy);
225 return cphy;
226}
227
228/* Chip Reset */
229static int mv88x201x_phy_reset(adapter_t *adapter)
230{
231 u32 val;
232
233 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
234 val &= ~4;
235 t1_tpi_write(adapter, A_ELMER0_GPO, val);
236 msleep(100);
237
238 t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
239 msleep(1000);
240
241 /* Now lets enable the Laser. Delay 100us */
242 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
243 val |= 0x8000;
244 t1_tpi_write(adapter, A_ELMER0_GPO, val);
245 udelay(100);
246 return 0;
247}
248
249struct gphy t1_mv88x201x_ops = {
250 mv88x201x_phy_create,
251 mv88x201x_phy_reset
252};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 000000000000..04a1404fc65e
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
1/*****************************************************************************
2 * *
3 * File: pm3393.c *
4 * $Revision: 1.16 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "gmac.h"
43#include "elmer0.h"
44#include "suni1x10gexp_regs.h"
45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */
48enum {
49 MMD_RESERVED,
50 MMD_PMAPMD,
51 MMD_WIS,
52 MMD_PCS,
53 MMD_PHY_XGXS, /* XGMII Extender Sublayer */
54 MMD_DTE_XGXS,
55};
56
57enum {
58 PHY_XGXS_CTRL_1,
59 PHY_XGXS_STATUS_1
60};
61
62#define OFFSET(REG_ADDR) (REG_ADDR << 2)
63
64/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
65#define MAX_FRAME_SIZE 9600
66
67#define IPG 12
68#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
69 SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
70 SUNI1x10GEXP_BITMSK_TXXG_PADEN)
71#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
72 SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
73
74/* Update statistics every 15 minutes */
75#define STATS_TICK_SECS (15 * 60)
76
77enum { /* RMON registers */
78 RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
79 RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
80 RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
81 RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
82 RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
83 RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
84 RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
85 RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
86 RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
87 RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
88 RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
89 RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
90 RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
91
92 TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
93 TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
94 TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
95 TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
96 TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
97 TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
98 TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
99};
100
101struct _cmac_instance {
102 u8 enabled;
103 u8 fc;
104 u8 mac_addr[6];
105};
106
107static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
108{
109 t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
110 return 0;
111}
112
113static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
114{
115 t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
116 return 0;
117}
118
119/* Port reset. */
120static int pm3393_reset(struct cmac *cmac)
121{
122 return 0;
123}
124
125/*
126 * Enable interrupts for the PM3393
127
128 1. Enable PM3393 BLOCK interrupts.
129 2. Enable PM3393 Master Interrupt bit(INTE)
130 3. Enable ELMER's PM3393 bit.
131 4. Enable Terminator external interrupt.
132*/
133static int pm3393_interrupt_enable(struct cmac *cmac)
134{
135 u32 pl_intr;
136
137 /* PM3393 - Enabling all hardware block interrupts.
138 */
139 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
140 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
141 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
142 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
143
144 /* Don't interrupt on statistics overflow, we are polling */
145 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
146 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
147 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
148 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
149
150 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
151 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
152 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
153 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
154 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
155 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
156 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
157 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
158 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
159
160 /* PM3393 - Global interrupt enable
161 */
162 /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
163 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
164 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
165
166 /* TERMINATOR - PL_INTERUPTS_EXT */
167 pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
168 pl_intr |= F_PL_INTR_EXT;
169 writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
170 return 0;
171}
172
173static int pm3393_interrupt_disable(struct cmac *cmac)
174{
175 u32 elmer;
176
177 /* PM3393 - Enabling HW interrupt blocks. */
178 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
179 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
180 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
181 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
182 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
183 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
184 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
185 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
186 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
187 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
188 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
189 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
190 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
191 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
192 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
193 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
194 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
195
196 /* PM3393 - Global interrupt enable */
197 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
198
199 /* ELMER - External chip interrupts. */
200 t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
201 elmer &= ~ELMER0_GP_BIT1;
202 t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
203
204 /* TERMINATOR - PL_INTERUPTS_EXT */
205 /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
206 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
207 */
208
209 return 0;
210}
211
212static int pm3393_interrupt_clear(struct cmac *cmac)
213{
214 u32 elmer;
215 u32 pl_intr;
216 u32 val32;
217
218 /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
219 * bit WCIMODE=0 for a clear-on-read.
220 */
221 pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
222 pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
223 pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
224 pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
225 pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
226 pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
227 pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
228 pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
229 pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
230 pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
231 pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
232 pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
233 &val32);
234 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
235 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
236
237 /* PM3393 - Global interrupt status
238 */
239 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
240
241 /* ELMER - External chip interrupts.
242 */
243 t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
244 elmer |= ELMER0_GP_BIT1;
245 t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
246
247 /* TERMINATOR - PL_INTERUPTS_EXT
248 */
249 pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
250 pl_intr |= F_PL_INTR_EXT;
251 writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
252
253 return 0;
254}
255
256/* Interrupt handler */
257static int pm3393_interrupt_handler(struct cmac *cmac)
258{
259 u32 master_intr_status;
260/*
261 1. Read master interrupt register.
262 2. Read BLOCK's interrupt status registers.
263 3. Handle BLOCK interrupts.
264*/
265 /* Read the master interrupt status register. */
266 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
267 &master_intr_status);
268
269 /* TBD XXX Lets just clear everything for now */
270 pm3393_interrupt_clear(cmac);
271
272 return 0;
273}
274
275static int pm3393_enable(struct cmac *cmac, int which)
276{
277 if (which & MAC_DIRECTION_RX)
278 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
279 (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
280
281 if (which & MAC_DIRECTION_TX) {
282 u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
283
284 if (cmac->instance->fc & PAUSE_RX)
285 val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
286 if (cmac->instance->fc & PAUSE_TX)
287 val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
288 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
289 }
290
291 cmac->instance->enabled |= which;
292 return 0;
293}
294
295static int pm3393_enable_port(struct cmac *cmac, int which)
296{
297 /* Clear port statistics */
298 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
299 SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
300 udelay(2);
301 memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
302
303 pm3393_enable(cmac, which);
304
305 /*
306 * XXX This should be done by the PHY and preferrably not at all.
307 * The PHY doesn't give us link status indication on its own so have
308 * the link management code query it instead.
309 */
310 {
311 extern void link_changed(adapter_t *adapter, int port_id);
312
313 link_changed(cmac->adapter, 0);
314 }
315 return 0;
316}
317
318static int pm3393_disable(struct cmac *cmac, int which)
319{
320 if (which & MAC_DIRECTION_RX)
321 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
322 if (which & MAC_DIRECTION_TX)
323 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
324
325 /*
326 * The disable is graceful. Give the PM3393 time. Can't wait very
327 * long here, we may be holding locks.
328 */
329 udelay(20);
330
331 cmac->instance->enabled &= ~which;
332 return 0;
333}
334
335static int pm3393_loopback_enable(struct cmac *cmac)
336{
337 return 0;
338}
339
340static int pm3393_loopback_disable(struct cmac *cmac)
341{
342 return 0;
343}
344
345static int pm3393_set_mtu(struct cmac *cmac, int mtu)
346{
347 int enabled = cmac->instance->enabled;
348
349 /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
350 mtu += 14 + 4;
351 if (mtu > MAX_FRAME_SIZE)
352 return -EINVAL;
353
354 /* Disable Rx/Tx MAC before configuring it. */
355 if (enabled)
356 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357
358 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
359 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
360
361 if (enabled)
362 pm3393_enable(cmac, enabled);
363 return 0;
364}
365
366static u32 calc_crc(u8 *b, int len)
367{
368 int i;
369 u32 crc = (u32)~0;
370
371 /* calculate crc one bit at a time */
372 while (len--) {
373 crc ^= *b++;
374 for (i = 0; i < 8; i++) {
375 if (crc & 0x1)
376 crc = (crc >> 1) ^ 0xedb88320;
377 else
378 crc = (crc >> 1);
379 }
380 }
381
382 /* reverse bits */
383 crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
384 crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
385 crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
386 /* swap bytes */
387 crc = (crc >> 16) | (crc << 16);
388 crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
389
390 return crc;
391}
392
393static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
394{
395 int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
396 u32 rx_mode;
397
398 /* Disable MAC RX before reconfiguring it */
399 if (enabled)
400 pm3393_disable(cmac, MAC_DIRECTION_RX);
401
402 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
403 rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
404 SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
405 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
406 (u16)rx_mode);
407
408 if (t1_rx_mode_promisc(rm)) {
409 /* Promiscuous mode. */
410 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
411 }
412 if (t1_rx_mode_allmulti(rm)) {
413 /* Accept all multicast. */
414 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
415 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
416 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
417 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
418 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
419 } else if (t1_rx_mode_mc_cnt(rm)) {
420 /* Accept one or more multicast(s). */
421 u8 *addr;
422 int bit;
423 u16 mc_filter[4] = { 0, };
424
425 while ((addr = t1_get_next_mcaddr(rm))) {
426 bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
427 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
428 }
429 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
430 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
431 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
432 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
433 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
434 }
435
436 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
437
438 if (enabled)
439 pm3393_enable(cmac, MAC_DIRECTION_RX);
440
441 return 0;
442}
443
444static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
445 int *duplex, int *fc)
446{
447 if (speed)
448 *speed = SPEED_10000;
449 if (duplex)
450 *duplex = DUPLEX_FULL;
451 if (fc)
452 *fc = cmac->instance->fc;
453 return 0;
454}
455
456static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
457 int fc)
458{
459 if (speed >= 0 && speed != SPEED_10000)
460 return -1;
461 if (duplex >= 0 && duplex != DUPLEX_FULL)
462 return -1;
463 if (fc & ~(PAUSE_TX | PAUSE_RX))
464 return -1;
465
466 if (fc != cmac->instance->fc) {
467 cmac->instance->fc = (u8) fc;
468 if (cmac->instance->enabled & MAC_DIRECTION_TX)
469 pm3393_enable(cmac, MAC_DIRECTION_TX);
470 }
471 return 0;
472}
473
474#define RMON_UPDATE(mac, name, stat_name) \
475 { \
476 t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
477 t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
478 t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
479 (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
480 (((u64)val1 & 0xffff) << 16) | \
481 (((u64)val2 & 0xff) << 32) | \
482 ((mac)->stats.stat_name & \
483 (~(u64)0 << 40)); \
484 if (ro & \
485 ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
486 (mac)->stats.stat_name += ((u64)1 << 40); \
487 }
488
489static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
490 int flag)
491{
492 u64 ro;
493 u32 val0, val1, val2, val3;
494
495 /* Snap the counters */
496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
497 SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
498
499 /* Counter rollover, clear on read */
500 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
501 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
502 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
503 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
506
507 /* Rx stats */
508 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
509 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
510 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
511 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
512 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
513 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
514 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
515 RxInternalMACRcvError);
516 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
517 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
518 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
519 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
520 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
521 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
522
523 /* Tx stats */
524 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
525 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
526 TxInternalMACXmitError);
527 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
528 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
529 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
530 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
531 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
532
533 return &mac->stats;
534}
535
536static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
537{
538 memcpy(mac_addr, cmac->instance->mac_addr, 6);
539 return 0;
540}
541
542static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
543{
544 u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
545
546 /*
547 * MAC addr: 00:07:43:00:13:09
548 *
549 * ma[5] = 0x09
550 * ma[4] = 0x13
551 * ma[3] = 0x00
552 * ma[2] = 0x43
553 * ma[1] = 0x07
554 * ma[0] = 0x00
555 *
556 * The PM3393 requires byte swapping and reverse order entry
557 * when programming MAC addresses:
558 *
559 * low_bits[15:0] = ma[1]:ma[0]
560 * mid_bits[31:16] = ma[3]:ma[2]
561 * high_bits[47:32] = ma[5]:ma[4]
562 */
563
564 /* Store local copy */
565 memcpy(cmac->instance->mac_addr, ma, 6);
566
567 lo = ((u32) ma[1] << 8) | (u32) ma[0];
568 mid = ((u32) ma[3] << 8) | (u32) ma[2];
569 hi = ((u32) ma[5] << 8) | (u32) ma[4];
570
571 /* Disable Rx/Tx MAC before configuring it. */
572 if (enabled)
573 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
574
575 /* Set RXXG Station Address */
576 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
577 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
578 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
579
580 /* Set TXXG Station Address */
581 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
582 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
583 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
584
585 /* Setup Exact Match Filter 1 with our MAC address
586 *
587 * Must disable exact match filter before configuring it.
588 */
589 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
590 val &= 0xff0f;
591 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
592
593 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
594 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
595 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
596
597 val |= 0x0090;
598 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
599
600 if (enabled)
601 pm3393_enable(cmac, enabled);
602 return 0;
603}
604
605static void pm3393_destroy(struct cmac *cmac)
606{
607 kfree(cmac);
608}
609
610static struct cmac_ops pm3393_ops = {
611 .destroy = pm3393_destroy,
612 .reset = pm3393_reset,
613 .interrupt_enable = pm3393_interrupt_enable,
614 .interrupt_disable = pm3393_interrupt_disable,
615 .interrupt_clear = pm3393_interrupt_clear,
616 .interrupt_handler = pm3393_interrupt_handler,
617 .enable = pm3393_enable_port,
618 .disable = pm3393_disable,
619 .loopback_enable = pm3393_loopback_enable,
620 .loopback_disable = pm3393_loopback_disable,
621 .set_mtu = pm3393_set_mtu,
622 .set_rx_mode = pm3393_set_rx_mode,
623 .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
624 .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
625 .statistics_update = pm3393_update_statistics,
626 .macaddress_get = pm3393_macaddress_get,
627 .macaddress_set = pm3393_macaddress_set
628};
629
630static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
631{
632 struct cmac *cmac;
633
634 cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
635 if (!cmac)
636 return NULL;
637 memset(cmac, 0, sizeof(*cmac));
638
639 cmac->ops = &pm3393_ops;
640 cmac->instance = (cmac_instance *) (cmac + 1);
641 cmac->adapter = adapter;
642 cmac->instance->fc = PAUSE_TX | PAUSE_RX;
643
644 t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
645 t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
646 t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
647 t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
648 t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
649 t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
650 t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
651 t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
652 t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
653 t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
654 t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
655 t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
656 t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
657 t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
658 t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
659 t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
660 t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
661 t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
662 t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
663 t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
664 t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
665 t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
666
667 t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
668 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
669 t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
670 t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
671 t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
672 t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
673 t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
674 t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
675 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
676 t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
677 t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
678 t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
679 t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
680
681 t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
682 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
683 t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
684 t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
685 t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
686 t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
687 t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
688 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
689 t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
690 t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
691 t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
692
693 t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
694 t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
695 t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
696 t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
697 t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
698 t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
699
700 t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
701 t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
702
703 t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
704 t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
705
706 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
707 /* For T1 use timer based Mac flow control. */
708 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
709 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
710 t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
711 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
712
713 /* Setup Exact Match Filter 0 to allow broadcast packets.
714 */
715 t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
716 t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
717 t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
718 t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
719 t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
720
721 t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
722 t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
723 t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
724
725 return cmac;
726}
727
728static int pm3393_mac_reset(adapter_t * adapter)
729{
730 u32 val;
731 u32 x;
732 u32 is_pl4_reset_finished;
733 u32 is_pl4_outof_lock;
734 u32 is_xaui_mabc_pll_locked;
735 u32 successful_reset;
736 int i;
737
738 /* The following steps are required to properly reset
739 * the PM3393. This information is provided in the
740 * PM3393 datasheet (Issue 2: November 2002)
741 * section 13.1 -- Device Reset.
742 *
743 * The PM3393 has three types of components that are
744 * individually reset:
745 *
746 * DRESETB - Digital circuitry
747 * PL4_ARESETB - PL4 analog circuitry
748 * XAUI_ARESETB - XAUI bus analog circuitry
749 *
750 * Steps to reset PM3393 using RSTB pin:
751 *
752 * 1. Assert RSTB pin low ( write 0 )
753 * 2. Wait at least 1ms to initiate a complete initialization of device.
754 * 3. Wait until all external clocks and REFSEL are stable.
755 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
756 * 5. De-assert RSTB ( write 1 )
757 * 6. Wait until internal timers to expires after ~14ms.
758 * - Allows analog clock synthesizer(PL4CSU) to stabilize to
759 * selected reference frequency before allowing the digital
760 * portion of the device to operate.
761 * 7. Wait at least 200us for XAUI interface to stabilize.
762 * 8. Verify the PM3393 came out of reset successfully.
763 * Set successful reset flag if everything worked else try again
764 * a few more times.
765 */
766
767 successful_reset = 0;
768 for (i = 0; i < 3 && !successful_reset; i++) {
769 /* 1 */
770 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
771 val &= ~1;
772 t1_tpi_write(adapter, A_ELMER0_GPO, val);
773
774 /* 2 */
775 msleep(1);
776
777 /* 3 */
778 msleep(1);
779
780 /* 4 */
781 msleep(2 /*1 extra ms for safety */ );
782
783 /* 5 */
784 val |= 1;
785 t1_tpi_write(adapter, A_ELMER0_GPO, val);
786
787 /* 6 */
788 msleep(15 /*1 extra ms for safety */ );
789
790 /* 7 */
791 msleep(1);
792
793 /* 8 */
794
795 /* Has PL4 analog block come out of reset correctly? */
796 t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
797 is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
798
799 /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
800 * figure out why? */
801
802 /* Have all PL4 block clocks locked? */
803 x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
804 /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
805 SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
806 SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
807 SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
808 is_pl4_outof_lock = (val & x);
809
810 /* ??? If this fails, might be able to software reset the XAUI part
811 * and try to recover... thus saving us from doing another HW reset */
812 /* Has the XAUI MABC PLL circuitry stablized? */
813 is_xaui_mabc_pll_locked =
814 (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
815
816 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
817 && is_xaui_mabc_pll_locked);
818 }
819 return successful_reset ? 0 : 1;
820}
821
822struct gmac t1_pm3393_ops = {
823 STATS_TICK_SECS,
824 pm3393_mac_create,
825 pm3393_mac_reset
826};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 000000000000..b90e11f40d1f
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
1/*****************************************************************************
2 * *
3 * File: regs.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_REGS_H_
40#define _CXGB_REGS_H_
41
42/* SGE registers */
43#define A_SG_CONTROL 0x0
44
45#define S_CMDQ0_ENABLE 0
46#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
47#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
48
49#define S_CMDQ1_ENABLE 1
50#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
51#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
52
53#define S_FL0_ENABLE 2
54#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
55#define F_FL0_ENABLE V_FL0_ENABLE(1U)
56
57#define S_FL1_ENABLE 3
58#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
59#define F_FL1_ENABLE V_FL1_ENABLE(1U)
60
61#define S_CPL_ENABLE 4
62#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
63#define F_CPL_ENABLE V_CPL_ENABLE(1U)
64
65#define S_RESPONSE_QUEUE_ENABLE 5
66#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
67#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
68
69#define S_CMDQ_PRIORITY 6
70#define M_CMDQ_PRIORITY 0x3
71#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
72#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
73
74#define S_DISABLE_CMDQ1_GTS 9
75#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
76#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
77
78#define S_DISABLE_FL0_GTS 10
79#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
80#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
81
82#define S_DISABLE_FL1_GTS 11
83#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
84#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
85
86#define S_ENABLE_BIG_ENDIAN 12
87#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
88#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
89
90#define S_ISCSI_COALESCE 14
91#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
92#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
93
94#define S_RX_PKT_OFFSET 15
95#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
96
97#define S_VLAN_XTRACT 18
98#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
99#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
100
101#define A_SG_DOORBELL 0x4
102#define A_SG_CMD0BASELWR 0x8
103#define A_SG_CMD0BASEUPR 0xc
104#define A_SG_CMD1BASELWR 0x10
105#define A_SG_CMD1BASEUPR 0x14
106#define A_SG_FL0BASELWR 0x18
107#define A_SG_FL0BASEUPR 0x1c
108#define A_SG_FL1BASELWR 0x20
109#define A_SG_FL1BASEUPR 0x24
110#define A_SG_CMD0SIZE 0x28
111#define A_SG_FL0SIZE 0x2c
112#define A_SG_RSPSIZE 0x30
113#define A_SG_RSPBASELWR 0x34
114#define A_SG_RSPBASEUPR 0x38
115#define A_SG_FLTHRESHOLD 0x3c
116#define A_SG_RSPQUEUECREDIT 0x40
117#define A_SG_SLEEPING 0x48
118#define A_SG_INTRTIMER 0x4c
119#define A_SG_CMD1SIZE 0xb0
120#define A_SG_FL1SIZE 0xb4
121#define A_SG_INT_ENABLE 0xb8
122
123#define S_RESPQ_EXHAUSTED 0
124#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
125#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
126
127#define S_RESPQ_OVERFLOW 1
128#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
129#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
130
131#define S_FL_EXHAUSTED 2
132#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
133#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
134
135#define S_PACKET_TOO_BIG 3
136#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
137#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
138
139#define S_PACKET_MISMATCH 4
140#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
141#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
142
143#define A_SG_INT_CAUSE 0xbc
144#define A_SG_RESPACCUTIMER 0xc0
145
146/* MC3 registers */
147
148#define S_READY 1
149#define V_READY(x) ((x) << S_READY)
150#define F_READY V_READY(1U)
151
152/* MC4 registers */
153
154#define A_MC4_CFG 0x180
155#define S_MC4_SLOW 25
156#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
157#define F_MC4_SLOW V_MC4_SLOW(1U)
158
159/* TPI registers */
160
161#define A_TPI_ADDR 0x280
162#define A_TPI_WR_DATA 0x284
163#define A_TPI_RD_DATA 0x288
164#define A_TPI_CSR 0x28c
165
166#define S_TPIWR 0
167#define V_TPIWR(x) ((x) << S_TPIWR)
168#define F_TPIWR V_TPIWR(1U)
169
170#define S_TPIRDY 1
171#define V_TPIRDY(x) ((x) << S_TPIRDY)
172#define F_TPIRDY V_TPIRDY(1U)
173
174#define A_TPI_PAR 0x29c
175
176#define S_TPIPAR 0
177#define M_TPIPAR 0x7f
178#define V_TPIPAR(x) ((x) << S_TPIPAR)
179#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
180
181/* TP registers */
182
183#define A_TP_IN_CONFIG 0x300
184
185#define S_TP_IN_CSPI_CPL 3
186#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
187#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
188
189#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
190#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
191#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
192
193#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
194#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
195#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
196
197#define S_TP_IN_ESPI_ETHERNET 8
198#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
199#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
200
201#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
202#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
203#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
204
205#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
206#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
207#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
208
209#define S_OFFLOAD_DISABLE 14
210#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
211#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
212
213#define A_TP_OUT_CONFIG 0x304
214
215#define S_TP_OUT_CSPI_CPL 2
216#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
217#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
218
219#define S_TP_OUT_ESPI_ETHERNET 6
220#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
221#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
222
223#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
224#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
225#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
226
227#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
228#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
229#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
230
231#define A_TP_GLOBAL_CONFIG 0x308
232
233#define S_IP_TTL 0
234#define M_IP_TTL 0xff
235#define V_IP_TTL(x) ((x) << S_IP_TTL)
236
237#define S_TCP_CSUM 11
238#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
239#define F_TCP_CSUM V_TCP_CSUM(1U)
240
241#define S_UDP_CSUM 12
242#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
243#define F_UDP_CSUM V_UDP_CSUM(1U)
244
245#define S_IP_CSUM 13
246#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
247#define F_IP_CSUM V_IP_CSUM(1U)
248
249#define S_PATH_MTU 15
250#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
251#define F_PATH_MTU V_PATH_MTU(1U)
252
253#define S_5TUPLE_LOOKUP 17
254#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
255
256#define S_SYN_COOKIE_PARAMETER 26
257#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
258
259#define A_TP_PC_CONFIG 0x348
260#define S_DIS_TX_FILL_WIN_PUSH 12
261#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
262#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
263
264#define S_TP_PC_REV 30
265#define M_TP_PC_REV 0x3
266#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
267#define A_TP_RESET 0x44c
268#define S_TP_RESET 0
269#define V_TP_RESET(x) ((x) << S_TP_RESET)
270#define F_TP_RESET V_TP_RESET(1U)
271
272#define A_TP_INT_ENABLE 0x470
273#define A_TP_INT_CAUSE 0x474
274#define A_TP_TX_DROP_CONFIG 0x4b8
275
276#define S_ENABLE_TX_DROP 31
277#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
278#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
279
280#define S_ENABLE_TX_ERROR 30
281#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
282#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
283
284#define S_DROP_TICKS_CNT 4
285#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
286
287#define S_NUM_PKTS_DROPPED 0
288#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
289
290/* CSPI registers */
291
292#define S_DIP4ERR 0
293#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
294#define F_DIP4ERR V_DIP4ERR(1U)
295
296#define S_RXDROP 1
297#define V_RXDROP(x) ((x) << S_RXDROP)
298#define F_RXDROP V_RXDROP(1U)
299
300#define S_TXDROP 2
301#define V_TXDROP(x) ((x) << S_TXDROP)
302#define F_TXDROP V_TXDROP(1U)
303
304#define S_RXOVERFLOW 3
305#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
306#define F_RXOVERFLOW V_RXOVERFLOW(1U)
307
308#define S_RAMPARITYERR 4
309#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
310#define F_RAMPARITYERR V_RAMPARITYERR(1U)
311
312/* ESPI registers */
313
314#define A_ESPI_SCH_TOKEN0 0x880
315#define A_ESPI_SCH_TOKEN1 0x884
316#define A_ESPI_SCH_TOKEN2 0x888
317#define A_ESPI_SCH_TOKEN3 0x88c
318#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
319#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
320#define A_ESPI_CALENDAR_LENGTH 0x898
321#define A_PORT_CONFIG 0x89c
322
323#define S_RX_NPORTS 0
324#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
325
326#define S_TX_NPORTS 8
327#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
328
329#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
330
331#define S_RXSTATUSENABLE 0
332#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
333#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
334
335#define S_INTEL1010MODE 4
336#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
337#define F_INTEL1010MODE V_INTEL1010MODE(1U)
338
339#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
340#define A_ESPI_TRAIN 0x8ac
341#define A_ESPI_INTR_STATUS 0x8c8
342
343#define S_DIP2PARITYERR 5
344#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
345#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
346
347#define A_ESPI_INTR_ENABLE 0x8cc
348#define A_RX_DROP_THRESHOLD 0x8d0
349#define A_ESPI_RX_RESET 0x8ec
350#define A_ESPI_MISC_CONTROL 0x8f0
351
352#define S_OUT_OF_SYNC_COUNT 0
353#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
354
355#define S_DIP2_PARITY_ERR_THRES 5
356#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
357
358#define S_DIP4_THRES 9
359#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
360
361#define S_MONITORED_PORT_NUM 25
362#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
363
364#define S_MONITORED_DIRECTION 27
365#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
366#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
367
368#define S_MONITORED_INTERFACE 28
369#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
370#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
371
372#define A_ESPI_DIP2_ERR_COUNT 0x8f4
373#define A_ESPI_CMD_ADDR 0x8f8
374
375#define S_WRITE_DATA 0
376#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
377
378#define S_REGISTER_OFFSET 8
379#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
380
381#define S_CHANNEL_ADDR 12
382#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
383
384#define S_MODULE_ADDR 16
385#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
386
387#define S_BUNDLE_ADDR 20
388#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
389
390#define S_SPI4_COMMAND 24
391#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
392
393#define A_ESPI_GOSTAT 0x8fc
394#define S_ESPI_CMD_BUSY 8
395#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
396#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
397
398/* PL registers */
399
400#define A_PL_ENABLE 0xa00
401
402#define S_PL_INTR_SGE_ERR 0
403#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
404#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
405
406#define S_PL_INTR_SGE_DATA 1
407#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
408#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
409
410#define S_PL_INTR_TP 6
411#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
412#define F_PL_INTR_TP V_PL_INTR_TP(1U)
413
414#define S_PL_INTR_ESPI 8
415#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
416#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
417
418#define S_PL_INTR_PCIX 10
419#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
420#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
421
422#define S_PL_INTR_EXT 11
423#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
424#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
425
426#define A_PL_CAUSE 0xa04
427
428/* MC5 registers */
429
430#define A_MC5_CONFIG 0xc04
431
432#define S_TCAM_RESET 1
433#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
434#define F_TCAM_RESET V_TCAM_RESET(1U)
435
436#define S_M_BUS_ENABLE 5
437#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
438#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
439
440/* PCICFG registers */
441
442#define A_PCICFG_PM_CSR 0x44
443#define A_PCICFG_VPD_ADDR 0x4a
444
445#define S_VPD_OP_FLAG 15
446#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
447#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
448
449#define A_PCICFG_VPD_DATA 0x4c
450
451#define A_PCICFG_INTR_ENABLE 0xf4
452#define A_PCICFG_INTR_CAUSE 0xf8
453
454#define A_PCICFG_MODE 0xfc
455
456#define S_PCI_MODE_64BIT 0
457#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
458#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
459
460#define S_PCI_MODE_PCIX 5
461#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
462#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
463
464#define S_PCI_MODE_CLK 6
465#define M_PCI_MODE_CLK 0x3
466#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
467
468#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 000000000000..53b41d99b00b
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
1/*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
42#include <linux/config.h>
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <linux/pci.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/ip.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55
56#include "cpl5_cmd.h"
57#include "sge.h"
58#include "regs.h"
59#include "espi.h"
60
61
62#ifdef NETIF_F_TSO
63#include <linux/tcp.h>
64#endif
65
66#define SGE_CMDQ_N 2
67#define SGE_FREELQ_N 2
68#define SGE_CMDQ0_E_N 1024
69#define SGE_CMDQ1_E_N 128
70#define SGE_FREEL_SIZE 4096
71#define SGE_JUMBO_FREEL_SIZE 512
72#define SGE_FREEL_REFILL_THRESH 16
73#define SGE_RESPQ_E_N 1024
74#define SGE_INTRTIMER_NRES 1000
75#define SGE_RX_COPY_THRES 256
76#define SGE_RX_SM_BUF_SIZE 1536
77
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
87
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
101/*
102 * Command queue, receive buffer list, and response queue descriptors.
103 */
104#if defined(__BIG_ENDIAN_BITFIELD)
105struct cmdQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
110};
111
112struct freelQ_e {
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
117};
118
119struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
133};
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135struct cmdQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
140};
141
142struct freelQ_e {
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
147};
148
149struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
163} ;
164#endif
165
166/*
167 * SW Context Command and Freelist Queue Descriptors
168 */
169struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
173};
174
175struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
179};
180
181/*
182 * SW command, freelist and response rings
183 */
184struct cmdQ {
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
194 u8 sop; /* is next entry start of packet? */
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
198 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
199};
200
201struct freelQ {
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
206 u16 rx_buffer_size; /* Buffer size on this free list */
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
213};
214
215struct respQ {
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
220 struct respQ_e *entries; /* HW response descriptor Q */
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
228};
229
230/*
231 * Main SGE data structure
232 *
233 * Interrupts are handled by a single CPU and it is likely that on a MP system
234 * the application is migrated to another CPU. In that scenario, we try to
235 * seperate the RX(in irq context) and TX state in order to decrease memory
236 * contention.
237 */
238struct sge {
239 struct adapter *adapter; /* adapter backpointer */
240 struct net_device *netdev; /* netdevice backpointer */
241 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
242 struct respQ respQ; /* response Q */
243 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 unsigned int intrtimer_nres; /* no-resource interrupt timer */
247 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
248 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
249 struct timer_list espibug_timer;
250 unsigned int espibug_timeout;
251 struct sk_buff *espibug_skb;
252 u32 sge_control; /* shadow value of sge control reg */
253 struct sge_intr_counts stats;
254 struct sge_port_stats port_stats[MAX_NPORTS];
255 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
256};
257
258/*
259 * PIO to indicate that memory mapped Q contains valid descriptor(s).
260 */
261static inline void doorbell_pio(struct adapter *adapter, u32 val)
262{
263 wmb();
264 writel(val, adapter->regs + A_SG_DOORBELL);
265}
266
267/*
268 * Frees all RX buffers on the freelist Q. The caller must make sure that
269 * the SGE is turned off before calling this function.
270 */
271static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
272{
273 unsigned int cidx = q->cidx;
274
275 while (q->credits--) {
276 struct freelQ_ce *ce = &q->centries[cidx];
277
278 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
279 pci_unmap_len(ce, dma_len),
280 PCI_DMA_FROMDEVICE);
281 dev_kfree_skb(ce->skb);
282 ce->skb = NULL;
283 if (++cidx == q->size)
284 cidx = 0;
285 }
286}
287
288/*
289 * Free RX free list and response queue resources.
290 */
291static void free_rx_resources(struct sge *sge)
292{
293 struct pci_dev *pdev = sge->adapter->pdev;
294 unsigned int size, i;
295
296 if (sge->respQ.entries) {
297 size = sizeof(struct respQ_e) * sge->respQ.size;
298 pci_free_consistent(pdev, size, sge->respQ.entries,
299 sge->respQ.dma_addr);
300 }
301
302 for (i = 0; i < SGE_FREELQ_N; i++) {
303 struct freelQ *q = &sge->freelQ[i];
304
305 if (q->centries) {
306 free_freelQ_buffers(pdev, q);
307 kfree(q->centries);
308 }
309 if (q->entries) {
310 size = sizeof(struct freelQ_e) * q->size;
311 pci_free_consistent(pdev, size, q->entries,
312 q->dma_addr);
313 }
314 }
315}
316
317/*
318 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
319 * response queue.
320 */
321static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
322{
323 struct pci_dev *pdev = sge->adapter->pdev;
324 unsigned int size, i;
325
326 for (i = 0; i < SGE_FREELQ_N; i++) {
327 struct freelQ *q = &sge->freelQ[i];
328
329 q->genbit = 1;
330 q->size = p->freelQ_size[i];
331 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
332 size = sizeof(struct freelQ_e) * q->size;
333 q->entries = (struct freelQ_e *)
334 pci_alloc_consistent(pdev, size, &q->dma_addr);
335 if (!q->entries)
336 goto err_no_mem;
337 memset(q->entries, 0, size);
338 size = sizeof(struct freelQ_ce) * q->size;
339 q->centries = kmalloc(size, GFP_KERNEL);
340 if (!q->centries)
341 goto err_no_mem;
342 memset(q->centries, 0, size);
343 }
344
345 /*
346 * Calculate the buffer sizes for the two free lists. FL0 accommodates
347 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
348 * including all the sk_buff overhead.
349 *
350 * Note: For T2 FL0 and FL1 are reversed.
351 */
352 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
353 sizeof(struct cpl_rx_data) +
354 sge->freelQ[!sge->jumbo_fl].dma_offset;
355 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
356 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
357
358 /*
359 * Setup which skb recycle Q should be used when recycling buffers from
360 * each free list.
361 */
362 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
363 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
364
365 sge->respQ.genbit = 1;
366 sge->respQ.size = SGE_RESPQ_E_N;
367 sge->respQ.credits = 0;
368 size = sizeof(struct respQ_e) * sge->respQ.size;
369 sge->respQ.entries = (struct respQ_e *)
370 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
371 if (!sge->respQ.entries)
372 goto err_no_mem;
373 memset(sge->respQ.entries, 0, size);
374 return 0;
375
376err_no_mem:
377 free_rx_resources(sge);
378 return -ENOMEM;
379}
380
381/*
382 * Reclaims n TX descriptors and frees the buffers associated with them.
383 */
384static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
385{
386 struct cmdQ_ce *ce;
387 struct pci_dev *pdev = sge->adapter->pdev;
388 unsigned int cidx = q->cidx;
389
390 q->in_use -= n;
391 ce = &q->centries[cidx];
392 while (n--) {
393 if (q->sop)
394 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
395 pci_unmap_len(ce, dma_len),
396 PCI_DMA_TODEVICE);
397 else
398 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
399 pci_unmap_len(ce, dma_len),
400 PCI_DMA_TODEVICE);
401 q->sop = 0;
402 if (ce->skb) {
403 dev_kfree_skb(ce->skb);
404 q->sop = 1;
405 }
406 ce++;
407 if (++cidx == q->size) {
408 cidx = 0;
409 ce = q->centries;
410 }
411 }
412 q->cidx = cidx;
413}
414
415/*
416 * Free TX resources.
417 *
418 * Assumes that SGE is stopped and all interrupts are disabled.
419 */
420static void free_tx_resources(struct sge *sge)
421{
422 struct pci_dev *pdev = sge->adapter->pdev;
423 unsigned int size, i;
424
425 for (i = 0; i < SGE_CMDQ_N; i++) {
426 struct cmdQ *q = &sge->cmdQ[i];
427
428 if (q->centries) {
429 if (q->in_use)
430 free_cmdQ_buffers(sge, q, q->in_use);
431 kfree(q->centries);
432 }
433 if (q->entries) {
434 size = sizeof(struct cmdQ_e) * q->size;
435 pci_free_consistent(pdev, size, q->entries,
436 q->dma_addr);
437 }
438 }
439}
440
441/*
442 * Allocates basic TX resources, consisting of memory mapped command Qs.
443 */
444static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
445{
446 struct pci_dev *pdev = sge->adapter->pdev;
447 unsigned int size, i;
448
449 for (i = 0; i < SGE_CMDQ_N; i++) {
450 struct cmdQ *q = &sge->cmdQ[i];
451
452 q->genbit = 1;
453 q->sop = 1;
454 q->size = p->cmdQ_size[i];
455 q->in_use = 0;
456 q->status = 0;
457 q->processed = q->cleaned = 0;
458 q->stop_thres = 0;
459 spin_lock_init(&q->lock);
460 size = sizeof(struct cmdQ_e) * q->size;
461 q->entries = (struct cmdQ_e *)
462 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 if (!q->entries)
464 goto err_no_mem;
465 memset(q->entries, 0, size);
466 size = sizeof(struct cmdQ_ce) * q->size;
467 q->centries = kmalloc(size, GFP_KERNEL);
468 if (!q->centries)
469 goto err_no_mem;
470 memset(q->centries, 0, size);
471 }
472
473 /*
474 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
475 * only. For queue 0 set the stop threshold so we can handle one more
476 * packet from each port, plus reserve an additional 24 entries for
477 * Ethernet packets only. Queue 1 never suspends nor do we reserve
478 * space for Ethernet packets.
479 */
480 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
481 (MAX_SKB_FRAGS + 1);
482 return 0;
483
484err_no_mem:
485 free_tx_resources(sge);
486 return -ENOMEM;
487}
488
489static inline void setup_ring_params(struct adapter *adapter, u64 addr,
490 u32 size, int base_reg_lo,
491 int base_reg_hi, int size_reg)
492{
493 writel((u32)addr, adapter->regs + base_reg_lo);
494 writel(addr >> 32, adapter->regs + base_reg_hi);
495 writel(size, adapter->regs + size_reg);
496}
497
498/*
499 * Enable/disable VLAN acceleration.
500 */
501void t1_set_vlan_accel(struct adapter *adapter, int on_off)
502{
503 struct sge *sge = adapter->sge;
504
505 sge->sge_control &= ~F_VLAN_XTRACT;
506 if (on_off)
507 sge->sge_control |= F_VLAN_XTRACT;
508 if (adapter->open_device_map) {
509 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
510 readl(adapter->regs + A_SG_CONTROL); /* flush */
511 }
512}
513
514/*
515 * Programs the various SGE registers. However, the engine is not yet enabled,
516 * but sge->sge_control is setup and ready to go.
517 */
518static void configure_sge(struct sge *sge, struct sge_params *p)
519{
520 struct adapter *ap = sge->adapter;
521
522 writel(0, ap->regs + A_SG_CONTROL);
523 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
524 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
525 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
526 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
527 setup_ring_params(ap, sge->freelQ[0].dma_addr,
528 sge->freelQ[0].size, A_SG_FL0BASELWR,
529 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
530 setup_ring_params(ap, sge->freelQ[1].dma_addr,
531 sge->freelQ[1].size, A_SG_FL1BASELWR,
532 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
533
534 /* The threshold comparison uses <. */
535 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
536
537 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
538 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
539 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
540
541 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
542 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
543 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
544 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
545 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
546
547#if defined(__BIG_ENDIAN_BITFIELD)
548 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
549#endif
550
551 /* Initialize no-resource timer */
552 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
553
554 t1_sge_set_coalesce_params(sge, p);
555}
556
557/*
558 * Return the payload capacity of the jumbo free-list buffers.
559 */
560static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
561{
562 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
563 sge->freelQ[sge->jumbo_fl].dma_offset -
564 sizeof(struct cpl_rx_data);
565}
566
567/*
568 * Frees all SGE related resources and the sge structure itself
569 */
570void t1_sge_destroy(struct sge *sge)
571{
572 if (sge->espibug_skb)
573 kfree_skb(sge->espibug_skb);
574
575 free_tx_resources(sge);
576 free_rx_resources(sge);
577 kfree(sge);
578}
579
580/*
581 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
582 * context Q) until the Q is full or alloc_skb fails.
583 *
584 * It is possible that the generation bits already match, indicating that the
585 * buffer is already valid and nothing needs to be done. This happens when we
586 * copied a received buffer into a new sk_buff during the interrupt processing.
587 *
588 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
589 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
590 * aligned.
591 */
592static void refill_free_list(struct sge *sge, struct freelQ *q)
593{
594 struct pci_dev *pdev = sge->adapter->pdev;
595 struct freelQ_ce *ce = &q->centries[q->pidx];
596 struct freelQ_e *e = &q->entries[q->pidx];
597 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
598
599
600 while (q->credits < q->size) {
601 struct sk_buff *skb;
602 dma_addr_t mapping;
603
604 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
605 if (!skb)
606 break;
607
608 skb_reserve(skb, q->dma_offset);
609 mapping = pci_map_single(pdev, skb->data, dma_len,
610 PCI_DMA_FROMDEVICE);
611 ce->skb = skb;
612 pci_unmap_addr_set(ce, dma_addr, mapping);
613 pci_unmap_len_set(ce, dma_len, dma_len);
614 e->addr_lo = (u32)mapping;
615 e->addr_hi = (u64)mapping >> 32;
616 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
617 wmb();
618 e->gen2 = V_CMD_GEN2(q->genbit);
619
620 e++;
621 ce++;
622 if (++q->pidx == q->size) {
623 q->pidx = 0;
624 q->genbit ^= 1;
625 ce = q->centries;
626 e = q->entries;
627 }
628 q->credits++;
629 }
630
631}
632
633/*
634 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
635 * of both rings, we go into 'few interrupt mode' in order to give the system
636 * time to free up resources.
637 */
638static void freelQs_empty(struct sge *sge)
639{
640 struct adapter *adapter = sge->adapter;
641 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
642 u32 irqholdoff_reg;
643
644 refill_free_list(sge, &sge->freelQ[0]);
645 refill_free_list(sge, &sge->freelQ[1]);
646
647 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
648 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
649 irq_reg |= F_FL_EXHAUSTED;
650 irqholdoff_reg = sge->fixed_intrtimer;
651 } else {
652 /* Clear the F_FL_EXHAUSTED interrupts for now */
653 irq_reg &= ~F_FL_EXHAUSTED;
654 irqholdoff_reg = sge->intrtimer_nres;
655 }
656 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
657 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
658
659 /* We reenable the Qs to force a freelist GTS interrupt later */
660 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
661}
662
663#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
664#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
665#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
666 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
667
668/*
669 * Disable SGE Interrupts
670 */
671void t1_sge_intr_disable(struct sge *sge)
672{
673 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
674
675 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
676 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
677}
678
679/*
680 * Enable SGE interrupts.
681 */
682void t1_sge_intr_enable(struct sge *sge)
683{
684 u32 en = SGE_INT_ENABLE;
685 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
686
687 if (sge->adapter->flags & TSO_CAPABLE)
688 en &= ~F_PACKET_TOO_BIG;
689 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
690 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
691}
692
693/*
694 * Clear SGE interrupts.
695 */
696void t1_sge_intr_clear(struct sge *sge)
697{
698 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
699 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
700}
701
702/*
703 * SGE 'Error' interrupt handler
704 */
705int t1_sge_intr_error_handler(struct sge *sge)
706{
707 struct adapter *adapter = sge->adapter;
708 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
709
710 if (adapter->flags & TSO_CAPABLE)
711 cause &= ~F_PACKET_TOO_BIG;
712 if (cause & F_RESPQ_EXHAUSTED)
713 sge->stats.respQ_empty++;
714 if (cause & F_RESPQ_OVERFLOW) {
715 sge->stats.respQ_overflow++;
716 CH_ALERT("%s: SGE response queue overflow\n",
717 adapter->name);
718 }
719 if (cause & F_FL_EXHAUSTED) {
720 sge->stats.freelistQ_empty++;
721 freelQs_empty(sge);
722 }
723 if (cause & F_PACKET_TOO_BIG) {
724 sge->stats.pkt_too_big++;
725 CH_ALERT("%s: SGE max packet size exceeded\n",
726 adapter->name);
727 }
728 if (cause & F_PACKET_MISMATCH) {
729 sge->stats.pkt_mismatch++;
730 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
731 }
732 if (cause & SGE_INT_FATAL)
733 t1_fatal_err(adapter);
734
735 writel(cause, adapter->regs + A_SG_INT_CAUSE);
736 return 0;
737}
738
739const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
740{
741 return &sge->stats;
742}
743
744const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
745{
746 return &sge->port_stats[port];
747}
748
749/**
750 * recycle_fl_buf - recycle a free list buffer
751 * @fl: the free list
752 * @idx: index of buffer to recycle
753 *
754 * Recycles the specified buffer on the given free list by adding it at
755 * the next available slot on the list.
756 */
757static void recycle_fl_buf(struct freelQ *fl, int idx)
758{
759 struct freelQ_e *from = &fl->entries[idx];
760 struct freelQ_e *to = &fl->entries[fl->pidx];
761
762 fl->centries[fl->pidx] = fl->centries[idx];
763 to->addr_lo = from->addr_lo;
764 to->addr_hi = from->addr_hi;
765 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
766 wmb();
767 to->gen2 = V_CMD_GEN2(fl->genbit);
768 fl->credits++;
769
770 if (++fl->pidx == fl->size) {
771 fl->pidx = 0;
772 fl->genbit ^= 1;
773 }
774}
775
776/**
777 * get_packet - return the next ingress packet buffer
778 * @pdev: the PCI device that received the packet
779 * @fl: the SGE free list holding the packet
780 * @len: the actual packet length, excluding any SGE padding
781 * @dma_pad: padding at beginning of buffer left by SGE DMA
782 * @skb_pad: padding to be used if the packet is copied
783 * @copy_thres: length threshold under which a packet should be copied
784 * @drop_thres: # of remaining buffers before we start dropping packets
785 *
786 * Get the next packet from a free list and complete setup of the
787 * sk_buff. If the packet is small we make a copy and recycle the
788 * original buffer, otherwise we use the original buffer itself. If a
789 * positive drop threshold is supplied packets are dropped and their
790 * buffers recycled if (a) the number of remaining buffers is under the
791 * threshold and the packet is too big to copy, or (b) the packet should
792 * be copied but there is no memory for the copy.
793 */
794static inline struct sk_buff *get_packet(struct pci_dev *pdev,
795 struct freelQ *fl, unsigned int len,
796 int dma_pad, int skb_pad,
797 unsigned int copy_thres,
798 unsigned int drop_thres)
799{
800 struct sk_buff *skb;
801 struct freelQ_ce *ce = &fl->centries[fl->cidx];
802
803 if (len < copy_thres) {
804 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
805 if (likely(skb != NULL)) {
806 skb_reserve(skb, skb_pad);
807 skb_put(skb, len);
808 pci_dma_sync_single_for_cpu(pdev,
809 pci_unmap_addr(ce, dma_addr),
810 pci_unmap_len(ce, dma_len),
811 PCI_DMA_FROMDEVICE);
812 memcpy(skb->data, ce->skb->data + dma_pad, len);
813 pci_dma_sync_single_for_device(pdev,
814 pci_unmap_addr(ce, dma_addr),
815 pci_unmap_len(ce, dma_len),
816 PCI_DMA_FROMDEVICE);
817 } else if (!drop_thres)
818 goto use_orig_buf;
819
820 recycle_fl_buf(fl, fl->cidx);
821 return skb;
822 }
823
824 if (fl->credits < drop_thres) {
825 recycle_fl_buf(fl, fl->cidx);
826 return NULL;
827 }
828
829use_orig_buf:
830 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
831 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
832 skb = ce->skb;
833 skb_reserve(skb, dma_pad);
834 skb_put(skb, len);
835 return skb;
836}
837
838/**
839 * unexpected_offload - handle an unexpected offload packet
840 * @adapter: the adapter
841 * @fl: the free list that received the packet
842 *
843 * Called when we receive an unexpected offload packet (e.g., the TOE
844 * function is disabled or the card is a NIC). Prints a message and
845 * recycles the buffer.
846 */
847static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
848{
849 struct freelQ_ce *ce = &fl->centries[fl->cidx];
850 struct sk_buff *skb = ce->skb;
851
852 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
853 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
854 CH_ERR("%s: unexpected offload packet, cmd %u\n",
855 adapter->name, *skb->data);
856 recycle_fl_buf(fl, fl->cidx);
857}
858
859/*
860 * Write the command descriptors to transmit the given skb starting at
861 * descriptor pidx with the given generation.
862 */
863static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
864 unsigned int pidx, unsigned int gen,
865 struct cmdQ *q)
866{
867 dma_addr_t mapping;
868 struct cmdQ_e *e, *e1;
869 struct cmdQ_ce *ce;
870 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
871
872 mapping = pci_map_single(adapter->pdev, skb->data,
873 skb->len - skb->data_len, PCI_DMA_TODEVICE);
874 ce = &q->centries[pidx];
875 ce->skb = NULL;
876 pci_unmap_addr_set(ce, dma_addr, mapping);
877 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
878
879 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
880 V_CMD_GEN2(gen);
881 e = &q->entries[pidx];
882 e->addr_lo = (u32)mapping;
883 e->addr_hi = (u64)mapping >> 32;
884 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
885 for (e1 = e, i = 0; nfrags--; i++) {
886 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
887
888 ce++;
889 e1++;
890 if (++pidx == q->size) {
891 pidx = 0;
892 gen ^= 1;
893 ce = q->centries;
894 e1 = q->entries;
895 }
896
897 mapping = pci_map_page(adapter->pdev, frag->page,
898 frag->page_offset, frag->size,
899 PCI_DMA_TODEVICE);
900 ce->skb = NULL;
901 pci_unmap_addr_set(ce, dma_addr, mapping);
902 pci_unmap_len_set(ce, dma_len, frag->size);
903
904 e1->addr_lo = (u32)mapping;
905 e1->addr_hi = (u64)mapping >> 32;
906 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
907 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
908 V_CMD_GEN2(gen);
909 }
910
911 ce->skb = skb;
912 wmb();
913 e->flags = flags;
914}
915
916/*
917 * Clean up completed Tx buffers.
918 */
919static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
920{
921 unsigned int reclaim = q->processed - q->cleaned;
922
923 if (reclaim) {
924 free_cmdQ_buffers(sge, q, reclaim);
925 q->cleaned += reclaim;
926 }
927}
928
929#ifndef SET_ETHTOOL_OPS
930# define __netif_rx_complete(dev) netif_rx_complete(dev)
931#endif
932
933/*
934 * We cannot use the standard netif_rx_schedule_prep() because we have multiple
935 * ports plus the TOE all multiplexing onto a single response queue, therefore
936 * accepting new responses cannot depend on the state of any particular port.
937 * So define our own equivalent that omits the netif_running() test.
938 */
939static inline int napi_schedule_prep(struct net_device *dev)
940{
941 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
942}
943
944
945/**
946 * sge_rx - process an ingress ethernet packet
947 * @sge: the sge structure
948 * @fl: the free list that contains the packet buffer
949 * @len: the packet length
950 *
951 * Process an ingress ethernet pakcet and deliver it to the stack.
952 */
953static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
954{
955 struct sk_buff *skb;
956 struct cpl_rx_pkt *p;
957 struct adapter *adapter = sge->adapter;
958
959 sge->stats.ethernet_pkts++;
960 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
961 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
962 SGE_RX_DROP_THRES);
963 if (!skb) {
964 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
965 return 0;
966 }
967
968 p = (struct cpl_rx_pkt *)skb->data;
969 skb_pull(skb, sizeof(*p));
970 skb->dev = adapter->port[p->iff].dev;
971 skb->dev->last_rx = jiffies;
972 skb->protocol = eth_type_trans(skb, skb->dev);
973 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
974 skb->protocol == htons(ETH_P_IP) &&
975 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
976 sge->port_stats[p->iff].rx_cso_good++;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 } else
979 skb->ip_summed = CHECKSUM_NONE;
980
981 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
982 sge->port_stats[p->iff].vlan_xtract++;
983 if (adapter->params.sge.polling)
984 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
985 ntohs(p->vlan));
986 else
987 vlan_hwaccel_rx(skb, adapter->vlan_grp,
988 ntohs(p->vlan));
989 } else if (adapter->params.sge.polling)
990 netif_receive_skb(skb);
991 else
992 netif_rx(skb);
993 return 0;
994}
995
996/*
997 * Returns true if a command queue has enough available descriptors that
998 * we can resume Tx operation after temporarily disabling its packet queue.
999 */
1000static inline int enough_free_Tx_descs(const struct cmdQ *q)
1001{
1002 unsigned int r = q->processed - q->cleaned;
1003
1004 return q->in_use - r < (q->size >> 1);
1005}
1006
1007/*
1008 * Called when sufficient space has become available in the SGE command queues
1009 * after the Tx packet schedulers have been suspended to restart the Tx path.
1010 */
1011static void restart_tx_queues(struct sge *sge)
1012{
1013 struct adapter *adap = sge->adapter;
1014
1015 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1016 int i;
1017
1018 for_each_port(adap, i) {
1019 struct net_device *nd = adap->port[i].dev;
1020
1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++;
1025 netif_wake_queue(nd);
1026 }
1027 }
1028 }
1029}
1030
1031/*
1032 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1033 * information.
1034 */
1035static unsigned int update_tx_info(struct adapter *adapter,
1036 unsigned int flags,
1037 unsigned int pr0)
1038{
1039 struct sge *sge = adapter->sge;
1040 struct cmdQ *cmdq = &sge->cmdQ[0];
1041
1042 cmdq->processed += pr0;
1043
1044 if (flags & F_CMDQ0_ENABLE) {
1045 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1046
1047 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1048 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1049 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1050 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1051 }
1052 flags &= ~F_CMDQ0_ENABLE;
1053 }
1054
1055 if (unlikely(sge->stopped_tx_queues != 0))
1056 restart_tx_queues(sge);
1057
1058 return flags;
1059}
1060
1061/*
1062 * Process SGE responses, up to the supplied budget. Returns the number of
1063 * responses processed. A negative budget is effectively unlimited.
1064 */
1065static int process_responses(struct adapter *adapter, int budget)
1066{
1067 struct sge *sge = adapter->sge;
1068 struct respQ *q = &sge->respQ;
1069 struct respQ_e *e = &q->entries[q->cidx];
1070 int budget_left = budget;
1071 unsigned int flags = 0;
1072 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1073
1074
1075 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1076 flags |= e->Qsleeping;
1077
1078 cmdq_processed[0] += e->Cmdq0CreditReturn;
1079 cmdq_processed[1] += e->Cmdq1CreditReturn;
1080
1081 /* We batch updates to the TX side to avoid cacheline
1082 * ping-pong of TX state information on MP where the sender
1083 * might run on a different CPU than this function...
1084 */
1085 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1086 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1087 cmdq_processed[0] = 0;
1088 }
1089 if (unlikely(cmdq_processed[1] > 16)) {
1090 sge->cmdQ[1].processed += cmdq_processed[1];
1091 cmdq_processed[1] = 0;
1092 }
1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095
1096 if (unlikely(!e->Sop || !e->Eop))
1097 BUG();
1098 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl);
1100 else
1101 sge_rx(sge, fl, e->BufferLength);
1102
1103 /*
1104 * Note: this depends on each packet consuming a
1105 * single free-list buffer; cf. the BUG above.
1106 */
1107 if (++fl->cidx == fl->size)
1108 fl->cidx = 0;
1109 if (unlikely(--fl->credits <
1110 fl->size - SGE_FREEL_REFILL_THRESH))
1111 refill_free_list(sge, fl);
1112 } else
1113 sge->stats.pure_rsps++;
1114
1115 e++;
1116 if (unlikely(++q->cidx == q->size)) {
1117 q->cidx = 0;
1118 q->genbit ^= 1;
1119 e = q->entries;
1120 }
1121 prefetch(e);
1122
1123 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1124 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1125 q->credits = 0;
1126 }
1127 --budget_left;
1128 }
1129
1130 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1131 sge->cmdQ[1].processed += cmdq_processed[1];
1132
1133 budget -= budget_left;
1134 return budget;
1135}
1136
1137/*
1138 * A simpler version of process_responses() that handles only pure (i.e.,
1139 * non data-carrying) responses. Such respones are too light-weight to justify
1140 * calling a softirq when using NAPI, so we handle them specially in hard
1141 * interrupt context. The function is called with a pointer to a response,
1142 * which the caller must ensure is a valid pure response. Returns 1 if it
1143 * encounters a valid data-carrying response, 0 otherwise.
1144 */
1145static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1146{
1147 struct sge *sge = adapter->sge;
1148 struct respQ *q = &sge->respQ;
1149 unsigned int flags = 0;
1150 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1151
1152 do {
1153 flags |= e->Qsleeping;
1154
1155 cmdq_processed[0] += e->Cmdq0CreditReturn;
1156 cmdq_processed[1] += e->Cmdq1CreditReturn;
1157
1158 e++;
1159 if (unlikely(++q->cidx == q->size)) {
1160 q->cidx = 0;
1161 q->genbit ^= 1;
1162 e = q->entries;
1163 }
1164 prefetch(e);
1165
1166 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1167 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1168 q->credits = 0;
1169 }
1170 sge->stats.pure_rsps++;
1171 } while (e->GenerationBit == q->genbit && !e->DataValid);
1172
1173 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1174 sge->cmdQ[1].processed += cmdq_processed[1];
1175
1176 return e->GenerationBit == q->genbit;
1177}
1178
1179/*
1180 * Handler for new data events when using NAPI. This does not need any locking
1181 * or protection from interrupts as data interrupts are off at this point and
1182 * other adapter interrupts do not interfere.
1183 */
1184static int t1_poll(struct net_device *dev, int *budget)
1185{
1186 struct adapter *adapter = dev->priv;
1187 int effective_budget = min(*budget, dev->quota);
1188
1189 int work_done = process_responses(adapter, effective_budget);
1190 *budget -= work_done;
1191 dev->quota -= work_done;
1192
1193 if (work_done >= effective_budget)
1194 return 1;
1195
1196 __netif_rx_complete(dev);
1197
1198 /*
1199 * Because we don't atomically flush the following write it is
1200 * possible that in very rare cases it can reach the device in a way
1201 * that races with a new response being written plus an error interrupt
1202 * causing the NAPI interrupt handler below to return unhandled status
1203 * to the OS. To protect against this would require flushing the write
1204 * and doing both the write and the flush with interrupts off. Way too
1205 * expensive and unjustifiable given the rarity of the race.
1206 */
1207 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1208 return 0;
1209}
1210
1211/*
1212 * Returns true if the device is already scheduled for polling.
1213 */
1214static inline int napi_is_scheduled(struct net_device *dev)
1215{
1216 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1217}
1218
1219/*
1220 * NAPI version of the main interrupt handler.
1221 */
1222static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
1223{
1224 int handled;
1225 struct adapter *adapter = data;
1226 struct sge *sge = adapter->sge;
1227 struct respQ *q = &adapter->sge->respQ;
1228
1229 /*
1230 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1231 * handler has control of the response queue and the interrupt handler
1232 * can look at the queue reliably only once it knows NAPI is off.
1233 * We can't wait that long to clear the SGE_DATA interrupt because we
1234 * could race with t1_poll rearming the SGE interrupt, so we need to
1235 * clear the interrupt speculatively and really early on.
1236 */
1237 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1238
1239 spin_lock(&adapter->async_lock);
1240 if (!napi_is_scheduled(sge->netdev)) {
1241 struct respQ_e *e = &q->entries[q->cidx];
1242
1243 if (e->GenerationBit == q->genbit) {
1244 if (e->DataValid ||
1245 process_pure_responses(adapter, e)) {
1246 if (likely(napi_schedule_prep(sge->netdev)))
1247 __netif_rx_schedule(sge->netdev);
1248 else
1249 printk(KERN_CRIT
1250 "NAPI schedule failure!\n");
1251 } else
1252 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1253 handled = 1;
1254 goto unlock;
1255 } else
1256 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1257 } else
1258 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1259 printk(KERN_ERR "data interrupt while NAPI running\n");
1260
1261 handled = t1_slow_intr_handler(adapter);
1262 if (!handled)
1263 sge->stats.unhandled_irqs++;
1264 unlock:
1265 spin_unlock(&adapter->async_lock);
1266 return IRQ_RETVAL(handled != 0);
1267}
1268
1269/*
1270 * Main interrupt handler, optimized assuming that we took a 'DATA'
1271 * interrupt.
1272 *
1273 * 1. Clear the interrupt
1274 * 2. Loop while we find valid descriptors and process them; accumulate
1275 * information that can be processed after the loop
1276 * 3. Tell the SGE at which index we stopped processing descriptors
1277 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1278 * outstanding TX buffers waiting, replenish RX buffers, potentially
1279 * reenable upper layers if they were turned off due to lack of TX
1280 * resources which are available again.
1281 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1282 * let the slow_intr_handler run and do error handling.
1283 */
1284static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1285{
1286 int work_done;
1287 struct respQ_e *e;
1288 struct adapter *adapter = cookie;
1289 struct respQ *Q = &adapter->sge->respQ;
1290
1291 spin_lock(&adapter->async_lock);
1292 e = &Q->entries[Q->cidx];
1293 prefetch(e);
1294
1295 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1296
1297 if (likely(e->GenerationBit == Q->genbit))
1298 work_done = process_responses(adapter, -1);
1299 else
1300 work_done = t1_slow_intr_handler(adapter);
1301
1302 /*
1303 * The unconditional clearing of the PL_CAUSE above may have raced
1304 * with DMA completion and the corresponding generation of a response
1305 * to cause us to miss the resulting data interrupt. The next write
1306 * is also unconditional to recover the missed interrupt and render
1307 * this race harmless.
1308 */
1309 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1310
1311 if (!work_done)
1312 adapter->sge->stats.unhandled_irqs++;
1313 spin_unlock(&adapter->async_lock);
1314 return IRQ_RETVAL(work_done != 0);
1315}
1316
1317intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1318{
1319 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1320}
1321
1322/*
1323 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1324 *
1325 * The code figures out how many entries the sk_buff will require in the
1326 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1327 * has complete. Then, it doesn't access the global structure anymore, but
1328 * uses the corresponding fields on the stack. In conjuction with a spinlock
1329 * around that code, we can make the function reentrant without holding the
1330 * lock when we actually enqueue (which might be expensive, especially on
1331 * architectures with IO MMUs).
1332 *
1333 * This runs with softirqs disabled.
1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev)
1337{
1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid];
1340 unsigned int credits, pidx, genbit, count;
1341
1342 spin_lock(&q->lock);
1343 reclaim_completed_tx(sge, q);
1344
1345 pidx = q->pidx;
1346 credits = q->size - q->in_use;
1347 count = 1 + skb_shinfo(skb)->nr_frags;
1348
1349 { /* Ethernet packet */
1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n",
1356 adapter->name);
1357 return 1;
1358 }
1359 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++;
1361 netif_stop_queue(dev);
1362 set_bit(dev->if_port, &sge->stopped_tx_queues);
1363 }
1364 }
1365 q->in_use += count;
1366 genbit = q->genbit;
1367 q->pidx += count;
1368 if (q->pidx >= q->size) {
1369 q->pidx -= q->size;
1370 q->genbit ^= 1;
1371 }
1372 spin_unlock(&q->lock);
1373
1374 write_tx_descs(adapter, skb, pidx, genbit, q);
1375
1376 /*
1377 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1378 * the doorbell if the Q is asleep. There is a natural race, where
1379 * the hardware is going to sleep just after we checked, however,
1380 * then the interrupt handler will detect the outstanding TX packet
1381 * and ring the doorbell for us.
1382 */
1383 if (qid)
1384 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1385 else {
1386 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1387 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1388 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 }
1391 }
1392 return 0;
1393}
1394
1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1396
1397/*
1398 * eth_hdr_len - return the length of an Ethernet header
1399 * @data: pointer to the start of the Ethernet header
1400 *
1401 * Returns the length of an Ethernet header, including optional VLAN tag.
1402 */
1403static inline int eth_hdr_len(const void *data)
1404{
1405 const struct ethhdr *e = data;
1406
1407 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1408}
1409
1410/*
1411 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1412 */
1413int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414{
1415 struct adapter *adapter = dev->priv;
1416 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1417 struct sge *sge = adapter->sge;
1418 struct cpl_tx_pkt *cpl;
1419
1420#ifdef NETIF_F_TSO
1421 if (skb_shinfo(skb)->tso_size) {
1422 int eth_type;
1423 struct cpl_tx_pkt_lso *hdr;
1424
1425 st->tso++;
1426
1427 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1428 CPL_ETH_II : CPL_ETH_II_VLAN;
1429
1430 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1431 hdr->opcode = CPL_TX_PKT_LSO;
1432 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1433 hdr->ip_hdr_words = skb->nh.iph->ihl;
1434 hdr->tcp_hdr_words = skb->h.th->doff;
1435 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1436 skb_shinfo(skb)->tso_size));
1437 hdr->len = htonl(skb->len - sizeof(*hdr));
1438 cpl = (struct cpl_tx_pkt *)hdr;
1439 sge->stats.tx_lso_pkts++;
1440 } else
1441#endif
1442 {
1443 /*
1444 * Packets shorter than ETH_HLEN can break the MAC, drop them
1445 * early. Also, we may get oversized packets because some
1446 * parts of the kernel don't handle our unusual hard_header_len
1447 * right, drop those too.
1448 */
1449 if (unlikely(skb->len < ETH_HLEN ||
1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1451 dev_kfree_skb_any(skb);
1452 return NET_XMIT_SUCCESS;
1453 }
1454
1455 /*
1456 * We are using a non-standard hard_header_len and some kernel
1457 * components, such as pktgen, do not handle it right.
1458 * Complain when this happens but try to fix things up.
1459 */
1460 if (unlikely(skb_headroom(skb) <
1461 dev->hard_header_len - ETH_HLEN)) {
1462 struct sk_buff *orig_skb = skb;
1463
1464 if (net_ratelimit())
1465 printk(KERN_ERR "%s: inadequate headroom in "
1466 "Tx packet\n", dev->name);
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb);
1469 if (!skb)
1470 return -ENOMEM;
1471 }
1472
1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1474 skb->ip_summed == CHECKSUM_HW &&
1475 skb->nh.iph->protocol == IPPROTO_UDP)
1476 if (unlikely(skb_checksum_help(skb, 0))) {
1477 dev_kfree_skb_any(skb);
1478 return -ENOMEM;
1479 }
1480
1481 /* Hmmm, assuming to catch the gratious arp... and we'll use
1482 * it to flush out stuck espi packets...
1483 */
1484 if (unlikely(!adapter->sge->espibug_skb)) {
1485 if (skb->protocol == htons(ETH_P_ARP) &&
1486 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1487 adapter->sge->espibug_skb = skb;
1488 /* We want to re-use this skb later. We
1489 * simply bump the reference count and it
1490 * will not be freed...
1491 */
1492 skb = skb_get(skb);
1493 }
1494 }
1495
1496 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1497 cpl->opcode = CPL_TX_PKT;
1498 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1499 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1500 /* the length field isn't used so don't bother setting it */
1501
1502 st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
1503 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
1504 sge->stats.tx_reg_pkts++;
1505 }
1506 cpl->iff = dev->if_port;
1507
1508#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1509 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1510 cpl->vlan_valid = 1;
1511 cpl->vlan = htons(vlan_tx_tag_get(skb));
1512 st->vlan_insert++;
1513 } else
1514#endif
1515 cpl->vlan_valid = 0;
1516
1517 dev->trans_start = jiffies;
1518 return t1_sge_tx(skb, adapter, 0, dev);
1519}
1520
1521/*
1522 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1523 */
1524static void sge_tx_reclaim_cb(unsigned long data)
1525{
1526 int i;
1527 struct sge *sge = (struct sge *)data;
1528
1529 for (i = 0; i < SGE_CMDQ_N; ++i) {
1530 struct cmdQ *q = &sge->cmdQ[i];
1531
1532 if (!spin_trylock(&q->lock))
1533 continue;
1534
1535 reclaim_completed_tx(sge, q);
1536 if (i == 0 && q->in_use) /* flush pending credits */
1537 writel(F_CMDQ0_ENABLE,
1538 sge->adapter->regs + A_SG_DOORBELL);
1539
1540 spin_unlock(&q->lock);
1541 }
1542 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1543}
1544
1545/*
1546 * Propagate changes of the SGE coalescing parameters to the HW.
1547 */
1548int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1549{
1550 sge->netdev->poll = t1_poll;
1551 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1552 core_ticks_per_usec(sge->adapter);
1553 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1554 return 0;
1555}
1556
1557/*
1558 * Allocates both RX and TX resources and configures the SGE. However,
1559 * the hardware is not enabled yet.
1560 */
1561int t1_sge_configure(struct sge *sge, struct sge_params *p)
1562{
1563 if (alloc_rx_resources(sge, p))
1564 return -ENOMEM;
1565 if (alloc_tx_resources(sge, p)) {
1566 free_rx_resources(sge);
1567 return -ENOMEM;
1568 }
1569 configure_sge(sge, p);
1570
1571 /*
1572 * Now that we have sized the free lists calculate the payload
1573 * capacity of the large buffers. Other parts of the driver use
1574 * this to set the max offload coalescing size so that RX packets
1575 * do not overflow our large buffers.
1576 */
1577 p->large_buf_capacity = jumbo_payload_capacity(sge);
1578 return 0;
1579}
1580
1581/*
1582 * Disables the DMA engine.
1583 */
1584void t1_sge_stop(struct sge *sge)
1585{
1586 writel(0, sge->adapter->regs + A_SG_CONTROL);
1587 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1588 if (is_T2(sge->adapter))
1589 del_timer_sync(&sge->espibug_timer);
1590 del_timer_sync(&sge->tx_reclaim_timer);
1591}
1592
1593/*
1594 * Enables the DMA engine.
1595 */
1596void t1_sge_start(struct sge *sge)
1597{
1598 refill_free_list(sge, &sge->freelQ[0]);
1599 refill_free_list(sge, &sge->freelQ[1]);
1600
1601 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1602 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1603 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1604
1605 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1606
1607 if (is_T2(sge->adapter))
1608 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1609}
1610
1611/*
1612 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1613 */
1614static void espibug_workaround(void *data)
1615{
1616 struct adapter *adapter = (struct adapter *)data;
1617 struct sge *sge = adapter->sge;
1618
1619 if (netif_running(adapter->port[0].dev)) {
1620 struct sk_buff *skb = sge->espibug_skb;
1621
1622 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1623
1624 if ((seop & 0xfff0fff) == 0xfff && skb) {
1625 if (!skb->cb[0]) {
1626 u8 ch_mac_addr[ETH_ALEN] =
1627 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1628 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1629 ch_mac_addr, ETH_ALEN);
1630 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1631 ETH_ALEN);
1632 skb->cb[0] = 0xff;
1633 }
1634
1635 /* bump the reference count to avoid freeing of the
1636 * skb once the DMA has completed.
1637 */
1638 skb = skb_get(skb);
1639 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1640 }
1641 }
1642 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1643}
1644
1645/*
1646 * Creates a t1_sge structure and returns suggested resource parameters.
1647 */
1648struct sge * __devinit t1_sge_create(struct adapter *adapter,
1649 struct sge_params *p)
1650{
1651 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
1652
1653 if (!sge)
1654 return NULL;
1655 memset(sge, 0, sizeof(*sge));
1656
1657 sge->adapter = adapter;
1658 sge->netdev = adapter->port[0].dev;
1659 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1660 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1661
1662 init_timer(&sge->tx_reclaim_timer);
1663 sge->tx_reclaim_timer.data = (unsigned long)sge;
1664 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1665
1666 if (is_T2(sge->adapter)) {
1667 init_timer(&sge->espibug_timer);
1668 sge->espibug_timer.function = (void *)&espibug_workaround;
1669 sge->espibug_timer.data = (unsigned long)sge->adapter;
1670 sge->espibug_timeout = 1;
1671 }
1672
1673
1674 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1675 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1676 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1677 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1678 p->rx_coalesce_usecs = 50;
1679 p->coalesce_enable = 0;
1680 p->sample_interval_usecs = 0;
1681 p->polling = 0;
1682
1683 return sge;
1684}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 000000000000..434b25586851
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
1/*****************************************************************************
2 * *
3 * File: sge.h *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/06/21 22:10:55 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_SGE_H_
40#define _CXGB_SGE_H_
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45
46#ifndef IRQ_RETVAL
47#define IRQ_RETVAL(x)
48typedef void irqreturn_t;
49#endif
50
51typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
52
53struct sge_intr_counts {
54 unsigned int respQ_empty; /* # times respQ empty */
55 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
56 unsigned int freelistQ_empty; /* # times freelist empty */
57 unsigned int pkt_too_big; /* packet too large (fatal) */
58 unsigned int pkt_mismatch;
59 unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
60 unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
61 unsigned int ethernet_pkts; /* # of Ethernet packets received */
62 unsigned int offload_pkts; /* # of offload packets received */
63 unsigned int offload_bundles; /* # of offload pkt bundles delivered */
64 unsigned int pure_rsps; /* # of non-payload responses */
65 unsigned int unhandled_irqs; /* # of unhandled interrupts */
66 unsigned int tx_ipfrags;
67 unsigned int tx_reg_pkts;
68 unsigned int tx_lso_pkts;
69 unsigned int tx_do_cksum;
70};
71
72struct sge_port_stats {
73 unsigned long rx_cso_good; /* # of successful RX csum offloads */
74 unsigned long tx_cso; /* # of TX checksum offloads */
75 unsigned long vlan_xtract; /* # of VLAN tag extractions */
76 unsigned long vlan_insert; /* # of VLAN tag extractions */
77 unsigned long tso; /* # of TSO requests */
78 unsigned long rx_drops; /* # of packets dropped due to no mem */
79};
80
81struct sk_buff;
82struct net_device;
83struct adapter;
84struct sge_params;
85struct sge;
86
87struct sge *t1_sge_create(struct adapter *, struct sge_params *);
88int t1_sge_configure(struct sge *, struct sge_params *);
89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
90void t1_sge_destroy(struct sge *);
91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
95void t1_set_vlan_accel(struct adapter *adapter, int on_off);
96void t1_sge_start(struct sge *);
97void t1_sge_stop(struct sge *);
98int t1_sge_intr_error_handler(struct sge *);
99void t1_sge_intr_enable(struct sge *);
100void t1_sge_intr_disable(struct sge *);
101void t1_sge_intr_clear(struct sge *);
102const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
103const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
104
105#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 000000000000..1ebb5d149aef
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
1/*****************************************************************************
2 * *
3 * File: subr.c *
4 * $Revision: 1.27 $ *
5 * $Date: 2005/06/22 01:08:36 $ *
6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "elmer0.h"
42#include "regs.h"
43#include "gmac.h"
44#include "cphy.h"
45#include "sge.h"
46#include "espi.h"
47
48/**
49 * t1_wait_op_done - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. Returns %0 if the operation completes and %1
59 * otherwise.
60 */
61static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
62 int attempts, int delay)
63{
64 while (1) {
65 u32 val = readl(adapter->regs + reg) & mask;
66
67 if (!!val == polarity)
68 return 0;
69 if (--attempts == 0)
70 return 1;
71 if (delay)
72 udelay(delay);
73 }
74}
75
76#define TPI_ATTEMPTS 50
77
78/*
79 * Write a register over the TPI interface (unlocked and locked versions).
80 */
81static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
82{
83 int tpi_busy;
84
85 writel(addr, adapter->regs + A_TPI_ADDR);
86 writel(value, adapter->regs + A_TPI_WR_DATA);
87 writel(F_TPIWR, adapter->regs + A_TPI_CSR);
88
89 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
90 TPI_ATTEMPTS, 3);
91 if (tpi_busy)
92 CH_ALERT("%s: TPI write to 0x%x failed\n",
93 adapter->name, addr);
94 return tpi_busy;
95}
96
97int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
98{
99 int ret;
100
101 spin_lock(&(adapter)->tpi_lock);
102 ret = __t1_tpi_write(adapter, addr, value);
103 spin_unlock(&(adapter)->tpi_lock);
104 return ret;
105}
106
107/*
108 * Read a register over the TPI interface (unlocked and locked versions).
109 */
110static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
111{
112 int tpi_busy;
113
114 writel(addr, adapter->regs + A_TPI_ADDR);
115 writel(0, adapter->regs + A_TPI_CSR);
116
117 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
118 TPI_ATTEMPTS, 3);
119 if (tpi_busy)
120 CH_ALERT("%s: TPI read from 0x%x failed\n",
121 adapter->name, addr);
122 else
123 *valp = readl(adapter->regs + A_TPI_RD_DATA);
124 return tpi_busy;
125}
126
127int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
128{
129 int ret;
130
131 spin_lock(&(adapter)->tpi_lock);
132 ret = __t1_tpi_read(adapter, addr, valp);
133 spin_unlock(&(adapter)->tpi_lock);
134 return ret;
135}
136
137/*
138 * Called when a port's link settings change to propagate the new values to the
139 * associated PHY and MAC. After performing the common tasks it invokes an
140 * OS-specific handler.
141 */
142/* static */ void link_changed(adapter_t *adapter, int port_id)
143{
144 int link_ok, speed, duplex, fc;
145 struct cphy *phy = adapter->port[port_id].phy;
146 struct link_config *lc = &adapter->port[port_id].link_config;
147
148 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
149
150 lc->speed = speed < 0 ? SPEED_INVALID : speed;
151 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
152 if (!(lc->requested_fc & PAUSE_AUTONEG))
153 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
154
155 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
156 /* Set MAC speed, duplex, and flow control to match PHY. */
157 struct cmac *mac = adapter->port[port_id].mac;
158
159 mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
160 lc->fc = (unsigned char)fc;
161 }
162 t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
163}
164
165static int t1_pci_intr_handler(adapter_t *adapter)
166{
167 u32 pcix_cause;
168
169 pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
170
171 if (pcix_cause) {
172 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
173 pcix_cause);
174 t1_fatal_err(adapter); /* PCI errors are fatal */
175 }
176 return 0;
177}
178
179
180/*
181 * Wait until Elmer's MI1 interface is ready for new operations.
182 */
183static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
184{
185 int attempts = 100, busy;
186
187 do {
188 u32 val;
189
190 __t1_tpi_read(adapter, mi1_reg, &val);
191 busy = val & F_MI1_OP_BUSY;
192 if (busy)
193 udelay(10);
194 } while (busy && --attempts);
195 if (busy)
196 CH_ALERT("%s: MDIO operation timed out\n",
197 adapter->name);
198 return busy;
199}
200
201/*
202 * MI1 MDIO initialization.
203 */
204static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
205{
206 u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
207 u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
208 V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
209
210 if (!(bi->caps & SUPPORTED_10000baseT_Full))
211 val |= V_MI1_SOF(1);
212 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
213}
214
215static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
216 int reg_addr, unsigned int *valp)
217{
218 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
219
220 spin_lock(&(adapter)->tpi_lock);
221
222 /* Write the address we want. */
223 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
224 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
225 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
226 MI1_OP_INDIRECT_ADDRESS);
227 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
228
229 /* Write the operation we want. */
230 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
231 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
232
233 /* Read the data. */
234 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
235 spin_unlock(&(adapter)->tpi_lock);
236 return 0;
237}
238
239static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
240 int reg_addr, unsigned int val)
241{
242 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
243
244 spin_lock(&(adapter)->tpi_lock);
245
246 /* Write the address we want. */
247 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
248 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
249 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
250 MI1_OP_INDIRECT_ADDRESS);
251 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
252
253 /* Write the data. */
254 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
255 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
256 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
257 spin_unlock(&(adapter)->tpi_lock);
258 return 0;
259}
260
261static struct mdio_ops mi1_mdio_ext_ops = {
262 mi1_mdio_init,
263 mi1_mdio_ext_read,
264 mi1_mdio_ext_write
265};
266
267enum {
268 CH_BRD_N110_1F,
269 CH_BRD_N210_1F,
270};
271
272static struct board_info t1_board[] = {
273
274{ CHBT_BOARD_N110, 1/*ports#*/,
275 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
276 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
277 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
278 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
279 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
280 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
281 "Chelsio N110 1x10GBaseX NIC" },
282
283{ CHBT_BOARD_N210, 1/*ports#*/,
284 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
285 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
286 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
287 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
288 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
289 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
290 "Chelsio N210 1x10GBaseX NIC" },
291
292};
293
294struct pci_device_id t1_pci_tbl[] = {
295 CH_DEVICE(7, 0, CH_BRD_N110_1F),
296 CH_DEVICE(10, 1, CH_BRD_N210_1F),
297 { 0, }
298};
299
300MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
301
302/*
303 * Return the board_info structure with a given index. Out-of-range indices
304 * return NULL.
305 */
306const struct board_info *t1_get_board_info(unsigned int board_id)
307{
308 return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
309}
310
311struct chelsio_vpd_t {
312 u32 format_version;
313 u8 serial_number[16];
314 u8 mac_base_address[6];
315 u8 pad[2]; /* make multiple-of-4 size requirement explicit */
316};
317
318#define EEPROMSIZE (8 * 1024)
319#define EEPROM_MAX_POLL 4
320
321/*
322 * Read SEEPROM. A zero is written to the flag register when the addres is
323 * written to the Control register. The hardware device will set the flag to a
324 * one when 4B have been transferred to the Data register.
325 */
326int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
327{
328 int i = EEPROM_MAX_POLL;
329 u16 val;
330
331 if (addr >= EEPROMSIZE || (addr & 3))
332 return -EINVAL;
333
334 pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
335 do {
336 udelay(50);
337 pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
338 } while (!(val & F_VPD_OP_FLAG) && --i);
339
340 if (!(val & F_VPD_OP_FLAG)) {
341 CH_ERR("%s: reading EEPROM address 0x%x failed\n",
342 adapter->name, addr);
343 return -EIO;
344 }
345 pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
346 *data = le32_to_cpu(*data);
347 return 0;
348}
349
350static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
351{
352 int addr, ret = 0;
353
354 for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
355 ret = t1_seeprom_read(adapter, addr,
356 (u32 *)((u8 *)vpd + addr));
357
358 return ret;
359}
360
361/*
362 * Read a port's MAC address from the VPD ROM.
363 */
364static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
365{
366 struct chelsio_vpd_t vpd;
367
368 if (t1_eeprom_vpd_get(adapter, &vpd))
369 return 1;
370 memcpy(mac_addr, vpd.mac_base_address, 5);
371 mac_addr[5] = vpd.mac_base_address[5] + index;
372 return 0;
373}
374
375/*
376 * Set up the MAC/PHY according to the requested link settings.
377 *
378 * If the PHY can auto-negotiate first decide what to advertise, then
379 * enable/disable auto-negotiation as desired and reset.
380 *
381 * If the PHY does not auto-negotiate we just reset it.
382 *
383 * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
384 * otherwise do it later based on the outcome of auto-negotiation.
385 */
386int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
387{
388 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
389
390 if (lc->supported & SUPPORTED_Autoneg) {
391 lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
392 if (fc) {
393 lc->advertising |= ADVERTISED_ASYM_PAUSE;
394 if (fc == (PAUSE_RX | PAUSE_TX))
395 lc->advertising |= ADVERTISED_PAUSE;
396 }
397 phy->ops->advertise(phy, lc->advertising);
398
399 if (lc->autoneg == AUTONEG_DISABLE) {
400 lc->speed = lc->requested_speed;
401 lc->duplex = lc->requested_duplex;
402 lc->fc = (unsigned char)fc;
403 mac->ops->set_speed_duplex_fc(mac, lc->speed,
404 lc->duplex, fc);
405 /* Also disables autoneg */
406 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
407 phy->ops->reset(phy, 0);
408 } else
409 phy->ops->autoneg_enable(phy); /* also resets PHY */
410 } else {
411 mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
412 lc->fc = (unsigned char)fc;
413 phy->ops->reset(phy, 0);
414 }
415 return 0;
416}
417
418/*
419 * External interrupt handler for boards using elmer0.
420 */
421int elmer0_ext_intr_handler(adapter_t *adapter)
422{
423 struct cphy *phy;
424 int phy_cause;
425 u32 cause;
426
427 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
428
429 switch (board_info(adapter)->board) {
430 case CHBT_BOARD_N210:
431 case CHBT_BOARD_N110:
432 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
433 phy = adapter->port[0].phy;
434 phy_cause = phy->ops->interrupt_handler(phy);
435 if (phy_cause & cphy_cause_link_change)
436 link_changed(adapter, 0);
437 }
438 break;
439 }
440 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
441 return 0;
442}
443
444/* Enables all interrupts. */
445void t1_interrupts_enable(adapter_t *adapter)
446{
447 unsigned int i;
448 u32 pl_intr;
449
450 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
451
452 t1_sge_intr_enable(adapter->sge);
453 if (adapter->espi) {
454 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
455 t1_espi_intr_enable(adapter->espi);
456 }
457
458 /* Enable MAC/PHY interrupts for each port. */
459 for_each_port(adapter, i) {
460 adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
461 adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
462 }
463
464 /* Enable PCIX & external chip interrupts on ASIC boards. */
465 pl_intr = readl(adapter->regs + A_PL_ENABLE);
466
467 /* PCI-X interrupts */
468 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
469 0xffffffff);
470
471 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
472 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
473 writel(pl_intr, adapter->regs + A_PL_ENABLE);
474}
475
476/* Disables all interrupts. */
477void t1_interrupts_disable(adapter_t* adapter)
478{
479 unsigned int i;
480
481 t1_sge_intr_disable(adapter->sge);
482 if (adapter->espi)
483 t1_espi_intr_disable(adapter->espi);
484
485 /* Disable MAC/PHY interrupts for each port. */
486 for_each_port(adapter, i) {
487 adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
488 adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
489 }
490
491 /* Disable PCIX & external chip interrupts. */
492 writel(0, adapter->regs + A_PL_ENABLE);
493
494 /* PCI-X interrupts */
495 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
496
497 adapter->slow_intr_mask = 0;
498}
499
500/* Clears all interrupts */
501void t1_interrupts_clear(adapter_t* adapter)
502{
503 unsigned int i;
504 u32 pl_intr;
505
506
507 t1_sge_intr_clear(adapter->sge);
508 if (adapter->espi)
509 t1_espi_intr_clear(adapter->espi);
510
511 /* Clear MAC/PHY interrupts for each port. */
512 for_each_port(adapter, i) {
513 adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
514 adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
515 }
516
517 /* Enable interrupts for external devices. */
518 pl_intr = readl(adapter->regs + A_PL_CAUSE);
519
520 writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
521 adapter->regs + A_PL_CAUSE);
522
523 /* PCI-X interrupts */
524 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
525}
526
527/*
528 * Slow path interrupt handler for ASICs.
529 */
530int t1_slow_intr_handler(adapter_t *adapter)
531{
532 u32 cause = readl(adapter->regs + A_PL_CAUSE);
533
534 cause &= adapter->slow_intr_mask;
535 if (!cause)
536 return 0;
537 if (cause & F_PL_INTR_SGE_ERR)
538 t1_sge_intr_error_handler(adapter->sge);
539 if (cause & F_PL_INTR_ESPI)
540 t1_espi_intr_handler(adapter->espi);
541 if (cause & F_PL_INTR_PCIX)
542 t1_pci_intr_handler(adapter);
543 if (cause & F_PL_INTR_EXT)
544 t1_elmer0_ext_intr(adapter);
545
546 /* Clear the interrupts just processed. */
547 writel(cause, adapter->regs + A_PL_CAUSE);
548 (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
549 return 1;
550}
551
552/* Pause deadlock avoidance parameters */
553#define DROP_MSEC 16
554#define DROP_PKTS_CNT 1
555
556static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
557{
558 u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
559
560 if (enable)
561 val |= csum_bit;
562 else
563 val &= ~csum_bit;
564 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
565}
566
567void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
568{
569 set_csum_offload(adapter, F_IP_CSUM, enable);
570}
571
572void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
573{
574 set_csum_offload(adapter, F_UDP_CSUM, enable);
575}
576
577void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
578{
579 set_csum_offload(adapter, F_TCP_CSUM, enable);
580}
581
582static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
583{
584 u32 val;
585
586 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
587 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
588 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
589 F_TP_IN_ESPI_CHECK_TCP_CSUM;
590 writel(val, adapter->regs + A_TP_IN_CONFIG);
591 writel(F_TP_OUT_CSPI_CPL |
592 F_TP_OUT_ESPI_ETHERNET |
593 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
594 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
595 adapter->regs + A_TP_OUT_CONFIG);
596
597 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
598 val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
599 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
600
601 /*
602 * Enable pause frame deadlock prevention.
603 */
604 if (is_T2(adapter)) {
605 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
606
607 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
608 V_DROP_TICKS_CNT(drop_ticks) |
609 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
610 adapter->regs + A_TP_TX_DROP_CONFIG);
611 }
612
613 writel(F_TP_RESET, adapter->regs + A_TP_RESET);
614}
615
616int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
617 struct adapter_params *p)
618{
619 p->chip_version = bi->chip_term;
620 if (p->chip_version == CHBT_TERM_T1 ||
621 p->chip_version == CHBT_TERM_T2) {
622 u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
623
624 val = G_TP_PC_REV(val);
625 if (val == 2)
626 p->chip_revision = TERM_T1B;
627 else if (val == 3)
628 p->chip_revision = TERM_T2;
629 else
630 return -1;
631 } else
632 return -1;
633 return 0;
634}
635
636/*
637 * Enable board components other than the Chelsio chip, such as external MAC
638 * and PHY.
639 */
640static int board_init(adapter_t *adapter, const struct board_info *bi)
641{
642 switch (bi->board) {
643 case CHBT_BOARD_N110:
644 case CHBT_BOARD_N210:
645 writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
647 break;
648 }
649 return 0;
650}
651
652/*
653 * Initialize and configure the Terminator HW modules. Note that external
654 * MAC and PHYs are initialized separately.
655 */
656int t1_init_hw_modules(adapter_t *adapter)
657{
658 int err = -EIO;
659 const struct board_info *bi = board_info(adapter);
660
661 if (!bi->clock_mc4) {
662 u32 val = readl(adapter->regs + A_MC4_CFG);
663
664 writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
665 writel(F_M_BUS_ENABLE | F_TCAM_RESET,
666 adapter->regs + A_MC5_CONFIG);
667 }
668
669 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
670 bi->espi_nports))
671 goto out_err;
672
673 t1_tp_reset(adapter, bi->clock_core);
674
675 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
676 if (err)
677 goto out_err;
678
679 err = 0;
680 out_err:
681 return err;
682}
683
684/*
685 * Determine a card's PCI mode.
686 */
687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
688{
689 static unsigned short speed_map[] = { 33, 66, 100, 133 };
690 u32 pci_mode;
691
692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
693 p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
694 p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
695 p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
696}
697
698/*
699 * Release the structures holding the SW per-Terminator-HW-module state.
700 */
701void t1_free_sw_modules(adapter_t *adapter)
702{
703 unsigned int i;
704
705 for_each_port(adapter, i) {
706 struct cmac *mac = adapter->port[i].mac;
707 struct cphy *phy = adapter->port[i].phy;
708
709 if (mac)
710 mac->ops->destroy(mac);
711 if (phy)
712 phy->ops->destroy(phy);
713 }
714
715 if (adapter->sge)
716 t1_sge_destroy(adapter->sge);
717 if (adapter->espi)
718 t1_espi_destroy(adapter->espi);
719}
720
721static void __devinit init_link_config(struct link_config *lc,
722 const struct board_info *bi)
723{
724 lc->supported = bi->caps;
725 lc->requested_speed = lc->speed = SPEED_INVALID;
726 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
727 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
728 if (lc->supported & SUPPORTED_Autoneg) {
729 lc->advertising = lc->supported;
730 lc->autoneg = AUTONEG_ENABLE;
731 lc->requested_fc |= PAUSE_AUTONEG;
732 } else {
733 lc->advertising = 0;
734 lc->autoneg = AUTONEG_DISABLE;
735 }
736}
737
738
739/*
740 * Allocate and initialize the data structures that hold the SW state of
741 * the Terminator HW modules.
742 */
743int __devinit t1_init_sw_modules(adapter_t *adapter,
744 const struct board_info *bi)
745{
746 unsigned int i;
747
748 adapter->params.brd_info = bi;
749 adapter->params.nports = bi->port_number;
750 adapter->params.stats_update_period = bi->gmac->stats_update_period;
751
752 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
753 if (!adapter->sge) {
754 CH_ERR("%s: SGE initialization failed\n",
755 adapter->name);
756 goto error;
757 }
758
759 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
760 CH_ERR("%s: ESPI initialization failed\n",
761 adapter->name);
762 goto error;
763 }
764
765 board_init(adapter, bi);
766 bi->mdio_ops->init(adapter, bi);
767 if (bi->gphy->reset)
768 bi->gphy->reset(adapter);
769 if (bi->gmac->reset)
770 bi->gmac->reset(adapter);
771
772 for_each_port(adapter, i) {
773 u8 hw_addr[6];
774 struct cmac *mac;
775 int phy_addr = bi->mdio_phybaseaddr + i;
776
777 adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
778 bi->mdio_ops);
779 if (!adapter->port[i].phy) {
780 CH_ERR("%s: PHY %d initialization failed\n",
781 adapter->name, i);
782 goto error;
783 }
784
785 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
786 if (!mac) {
787 CH_ERR("%s: MAC %d initialization failed\n",
788 adapter->name, i);
789 goto error;
790 }
791
792 /*
793 * Get the port's MAC addresses either from the EEPROM if one
794 * exists or the one hardcoded in the MAC.
795 */
796 if (vpd_macaddress_get(adapter, i, hw_addr)) {
797 CH_ERR("%s: could not read MAC address from VPD ROM\n",
798 adapter->port[i].dev->name);
799 goto error;
800 }
801 memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
802 init_link_config(&adapter->port[i].link_config, bi);
803 }
804
805 get_pci_mode(adapter, &adapter->params.pci);
806 t1_interrupts_clear(adapter);
807 return 0;
808
809 error:
810 t1_free_sw_modules(adapter);
811 return -1;
812}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..81816c2b708a
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
1/*****************************************************************************
2 * *
3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/06/22 00:17:04 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Maintainers: maintainers@chelsio.com *
25 * *
26 * Authors: PMC/SIERRA *
27 * *
28 * History: *
29 * *
30 ****************************************************************************/
31
32#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
33#define _CXGB_SUNI1x10GEXP_REGS_H_
34
35/******************************************************************************/
36/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
37/******************************************************************************/
38/* Refer to the Register Bit Masks bellow for the naming of each register and */
39/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
40/******************************************************************************/
41
42#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
43#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
44#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
45#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
46#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
47#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
48#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
49#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
50#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
51#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
52#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
53#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
54#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
55#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
56#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
57#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
58#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
59#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
60#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
61#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
62#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
63#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
64#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
65#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
66#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
67#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
68#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
69#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
70#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
71#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
72#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
73#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
74#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
75#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
76#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
77#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
78#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
79#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
80#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
81#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
82#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
83#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
84#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
85#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
86#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
87#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
88#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
89#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
90#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
91#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
92#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
93#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
94#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
95#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
96#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
97#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
98#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
99#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
100#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
101#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
102#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
103#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
104#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
105#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
106#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
107#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
108#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
109#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
110#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
111#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
112#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
113#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
114#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
115#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
116#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
117#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
118#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
119#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
120#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
121
122/******************************************************************************/
123/* -- End register offset definitions -- */
124/******************************************************************************/
125
126/******************************************************************************/
127/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
128/******************************************************************************/
129
130/*----------------------------------------------------------------------------
131 * Register 0x0004: S/UNI-1x10GE-XP Device Status
132 * Bit 9 TOP_SXRA_EXPIRED
133 * Bit 8 TOP_MDIO_BUSY
134 * Bit 7 TOP_DTRB
135 * Bit 6 TOP_EXPIRED
136 * Bit 5 TOP_PAUSED
137 * Bit 4 TOP_PL4_ID_DOOL
138 * Bit 3 TOP_PL4_IS_DOOL
139 * Bit 2 TOP_PL4_ID_ROOL
140 * Bit 1 TOP_PL4_IS_ROOL
141 * Bit 0 TOP_PL4_OUT_ROOL
142 *----------------------------------------------------------------------------*/
143#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
144#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
145#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
146#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
147#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
148#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
149#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
150
151/*----------------------------------------------------------------------------
152 * Register 0x000E:PM3393 Global interrupt enable
153 * Bit 15 TOP_INTE
154 *----------------------------------------------------------------------------*/
155#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
156
157/*----------------------------------------------------------------------------
158 * Register 0x2040: RXXG Configuration 1
159 * Bit 15 RXXG_RXEN
160 * Bit 14 RXXG_ROCF
161 * Bit 13 RXXG_PAD_STRIP
162 * Bit 10 RXXG_PUREP
163 * Bit 9 RXXG_LONGP
164 * Bit 8 RXXG_PARF
165 * Bit 7 RXXG_FLCHK
166 * Bit 5 RXXG_PASS_CTRL
167 * Bit 3 RXXG_CRC_STRIP
168 * Bit 2-0 RXXG_MIFG
169 *----------------------------------------------------------------------------*/
170#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
171#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
172#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
173#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
174
175/*----------------------------------------------------------------------------
176 * Register 0x2070: RXXG Address Filter Control 2
177 * Bit 1 RXXG_PMODE
178 * Bit 0 RXXG_MHASH_EN
179 *----------------------------------------------------------------------------*/
180#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
181#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
182
183/*----------------------------------------------------------------------------
184 * Register 0x2100: MSTAT Control
185 * Bit 2 MSTAT_WRITE
186 * Bit 1 MSTAT_CLEAR
187 * Bit 0 MSTAT_SNAP
188 *----------------------------------------------------------------------------*/
189#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
190#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
191
192/*----------------------------------------------------------------------------
193 * Register 0x3040: TXXG Configuration Register 1
194 * Bit 15 TXXG_TXEN0
195 * Bit 13 TXXG_HOSTPAUSE
196 * Bit 12-7 TXXG_IPGT
197 * Bit 5 TXXG_32BIT_ALIGN
198 * Bit 4 TXXG_CRCEN
199 * Bit 3 TXXG_FCTX
200 * Bit 2 TXXG_FCRX
201 * Bit 1 TXXG_PADEN
202 * Bit 0 TXXG_SPRE
203 *----------------------------------------------------------------------------*/
204#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
205#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
206#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
207#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
208#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
209#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
210#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
211
212#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
213
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 6440a892bb81..e54fc10f6846 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1140,7 +1140,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1140} 1140}
1141 1141
1142static int 1142static int
1143dm9000_drv_suspend(struct device *dev, u32 state, u32 level) 1143dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level)
1144{ 1144{
1145 struct net_device *ndev = dev_get_drvdata(dev); 1145 struct net_device *ndev = dev_get_drvdata(dev);
1146 1146
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d0fa2448761d..25cc20e415da 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -156,7 +156,7 @@
156 156
157#define DRV_NAME "e100" 157#define DRV_NAME "e100"
158#define DRV_EXT "-NAPI" 158#define DRV_EXT "-NAPI"
159#define DRV_VERSION "3.4.8-k2"DRV_EXT 159#define DRV_VERSION "3.4.14-k2"DRV_EXT
160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
162#define PFX DRV_NAME ": " 162#define PFX DRV_NAME ": "
@@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
785} 785}
786 786
787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
788static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
789{ 790{
790 unsigned long flags; 791 unsigned long flags;
@@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
798 if(likely(!readb(&nic->csr->scb.cmd_lo))) 799 if(likely(!readb(&nic->csr->scb.cmd_lo)))
799 break; 800 break;
800 cpu_relax(); 801 cpu_relax();
801 if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1))) 802 if(unlikely(i > E100_WAIT_SCB_FAST))
802 udelay(5); 803 udelay(5);
803 } 804 }
804 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { 805 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
902 903
903static void e100_get_defaults(struct nic *nic) 904static void e100_get_defaults(struct nic *nic)
904{ 905{
905 struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 906 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
906 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 907 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
907 908
908 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); 909 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
909 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ 910 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1006 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1007 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1007} 1008}
1008 1009
1010/********************************************************/
1011/* Micro code for 8086:1229 Rev 8 */
1012/********************************************************/
1013
1014/* Parameter values for the D101M B-step */
1015#define D101M_CPUSAVER_TIMER_DWORD 78
1016#define D101M_CPUSAVER_BUNDLE_DWORD 65
1017#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1018
1019#define D101M_B_RCVBUNDLE_UCODE \
1020{\
10210x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10220x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10230x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10240x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10250x00380438, 0x00000000, 0x00140000, 0x00380555, \
10260x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10270x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10280x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10290x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10300x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10310x00000000, 0x00000000, 0x00000000, 0x00000000, \
10320x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10330x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10340x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10350x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10360x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10370x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10380x00000000, 0x00000000, 0x00000000, 0x00000000, \
10390x00000000, 0x00000000, 0x00000000, 0x00000000, \
10400x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10410x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10420x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10430x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10440x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10450x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10460x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10470x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10480x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10490x00380559, 0x00000000, 0x00000000, 0x00000000, \
10500x00000000, 0x00000000, 0x00000000, 0x00000000, \
10510x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10520x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10530x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1054}
1055
1056/********************************************************/
1057/* Micro code for 8086:1229 Rev 9 */
1058/********************************************************/
1059
1060/* Parameter values for the D101S */
1061#define D101S_CPUSAVER_TIMER_DWORD 78
1062#define D101S_CPUSAVER_BUNDLE_DWORD 67
1063#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1064
1065#define D101S_RCVBUNDLE_UCODE \
1066{\
10670x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10680x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10690x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10700x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10710x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
10720x00308000, 0x00100610, 0x00100561, 0x000E0408, \
10730x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10740x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10750x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
10760x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
10770x00000000, 0x00000000, 0x00000000, 0x00000000, \
10780x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
10790x003A047E, 0x00044010, 0x00380819, 0x00000000, \
10800x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
10810x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
10820x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
10830x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
10840x00101313, 0x00380700, 0x00000000, 0x00000000, \
10850x00000000, 0x00000000, 0x00000000, 0x00000000, \
10860x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10870x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
10880x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
10890x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
10900x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
10910x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
10920x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
10930x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
10940x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
10950x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
10960x00000000, 0x00000000, 0x00000000, 0x00000000, \
10970x00000000, 0x00000000, 0x00000000, 0x00130831, \
10980x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
10990x00041000, 0x00010004, 0x00380700 \
1100}
1101
1102/********************************************************/
1103/* Micro code for the 8086:1229 Rev F/10 */
1104/********************************************************/
1105
1106/* Parameter values for the D102 E-step */
1107#define D102_E_CPUSAVER_TIMER_DWORD 42
1108#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1109#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1110
1111#define D102_E_RCVBUNDLE_UCODE \
1112{\
11130x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11140x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11150x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11160x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11170x00000000, 0x00000000, 0x00000000, 0x00000000, \
11180x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11190x00000000, 0x00000000, 0x00000000, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00000000, 0x00000000, 0x00000000, 0x00000000, \
11220x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11230x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11240x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11250x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11260x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11270x00000000, 0x00000000, 0x00000000, 0x00000000, \
11280x00000000, 0x00000000, 0x00000000, 0x00000000, \
11290x00000000, 0x00000000, 0x00000000, 0x00000000, \
11300x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11310x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00000000, \
11330x00000000, 0x00000000, 0x00000000, 0x00000000, \
11340x00000000, 0x00000000, 0x00000000, 0x00000000, \
11350x00000000, 0x00000000, 0x00000000, 0x00000000, \
11360x00000000, 0x00000000, 0x00000000, 0x00000000, \
11370x00000000, 0x00000000, 0x00000000, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00000000, \
11400x00000000, 0x00000000, 0x00000000, 0x00000000, \
11410x00000000, 0x00000000, 0x00000000, 0x00000000, \
11420x00000000, 0x00000000, 0x00000000, 0x00000000, \
11430x00000000, 0x00000000, 0x00000000, 0x00000000, \
11440x00000000, 0x00000000, 0x00000000, 0x00000000, \
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \
1146}
1147
1009static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1148static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1010{ 1149{
1011 int i; 1150/* *INDENT-OFF* */
1012 static const u32 ucode[UCODE_SIZE] = { 1151 static struct {
1013 /* NFS packets are misinterpreted as TCO packets and 1152 u32 ucode[UCODE_SIZE + 1];
1014 * incorrectly routed to the BMC over SMBus. This 1153 u8 mac;
1015 * microcode patch checks the fragmented IP bit in the 1154 u8 timer_dword;
1016 * NFS/UDP header to distinguish between NFS and TCO. */ 1155 u8 bundle_dword;
1017 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 1156 u8 min_size_dword;
1018 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, 1157 } ucode_opts[] = {
1019 0x00906EFD, 0x00900EFD, 0x00E00EF8, 1158 { D101M_B_RCVBUNDLE_UCODE,
1020 }; 1159 mac_82559_D101M,
1160 D101M_CPUSAVER_TIMER_DWORD,
1161 D101M_CPUSAVER_BUNDLE_DWORD,
1162 D101M_CPUSAVER_MIN_SIZE_DWORD },
1163 { D101S_RCVBUNDLE_UCODE,
1164 mac_82559_D101S,
1165 D101S_CPUSAVER_TIMER_DWORD,
1166 D101S_CPUSAVER_BUNDLE_DWORD,
1167 D101S_CPUSAVER_MIN_SIZE_DWORD },
1168 { D102_E_RCVBUNDLE_UCODE,
1169 mac_82551_F,
1170 D102_E_CPUSAVER_TIMER_DWORD,
1171 D102_E_CPUSAVER_BUNDLE_DWORD,
1172 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1173 { D102_E_RCVBUNDLE_UCODE,
1174 mac_82551_10,
1175 D102_E_CPUSAVER_TIMER_DWORD,
1176 D102_E_CPUSAVER_BUNDLE_DWORD,
1177 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1178 { {0}, 0, 0, 0, 0}
1179 }, *opts;
1180/* *INDENT-ON* */
1181
1182#define BUNDLESMALL 1
1183#define BUNDLEMAX 50
1184#define INTDELAY 15000
1185
1186 opts = ucode_opts;
1187
1188 /* do not load u-code for ICH devices */
1189 if (nic->flags & ich)
1190 return;
1191
1192 /* Search for ucode match against h/w rev_id */
1193 while (opts->mac) {
1194 if (nic->mac == opts->mac) {
1195 int i;
1196 u32 *ucode = opts->ucode;
1197
1198 /* Insert user-tunable settings */
1199 ucode[opts->timer_dword] &= 0xFFFF0000;
1200 ucode[opts->timer_dword] |=
1201 (u16) INTDELAY;
1202 ucode[opts->bundle_dword] &= 0xFFFF0000;
1203 ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
1204 ucode[opts->min_size_dword] &= 0xFFFF0000;
1205 ucode[opts->min_size_dword] |=
1206 (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1207
1208 for(i = 0; i < UCODE_SIZE; i++)
1209 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1210 cb->command = cpu_to_le16(cb_ucode);
1211 return;
1212 }
1213 opts++;
1214 }
1021 1215
1022 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { 1216 cb->command = cpu_to_le16(cb_nop);
1023 for(i = 0; i < UCODE_SIZE; i++)
1024 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1025 cb->command = cpu_to_le16(cb_ucode);
1026 } else
1027 cb->command = cpu_to_le16(cb_nop);
1028} 1217}
1029 1218
1030static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1219static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1307{ 1496{
1308 cb->command = nic->tx_command; 1497 cb->command = nic->tx_command;
1309 /* interrupt every 16 packets regardless of delay */ 1498 /* interrupt every 16 packets regardless of delay */
1310 if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; 1499 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1500 cb->command |= cpu_to_le16(cb_i);
1311 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1501 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1312 cb->u.tcb.tcb_byte_count = 0; 1502 cb->u.tcb.tcb_byte_count = 0;
1313 cb->u.tcb.threshold = nic->tx_threshold; 1503 cb->u.tcb.threshold = nic->tx_threshold;
1314 cb->u.tcb.tbd_count = 1; 1504 cb->u.tcb.tbd_count = 1;
1315 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1505 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1316 skb->data, skb->len, PCI_DMA_TODEVICE)); 1506 skb->data, skb->len, PCI_DMA_TODEVICE));
1317 // check for mapping failure? 1507 /* check for mapping failure? */
1318 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1508 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1319} 1509}
1320 1510
@@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1539 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1540 nic->net_stats.rx_dropped++; 1730 nic->net_stats.rx_dropped++;
1541 dev_kfree_skb_any(skb); 1731 dev_kfree_skb_any(skb);
1542 } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) { 1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1543 /* Don't indicate oversized frames */ 1733 /* Don't indicate oversized frames */
1544 nic->rx_over_length_errors++; 1734 nic->rx_over_length_errors++;
1545 nic->net_stats.rx_dropped++; 1735 nic->net_stats.rx_dropped++;
@@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget)
1706static void e100_netpoll(struct net_device *netdev) 1896static void e100_netpoll(struct net_device *netdev)
1707{ 1897{
1708 struct nic *nic = netdev_priv(netdev); 1898 struct nic *nic = netdev_priv(netdev);
1899
1709 e100_disable_irq(nic); 1900 e100_disable_irq(nic);
1710 e100_intr(nic->pdev->irq, netdev, NULL); 1901 e100_intr(nic->pdev->irq, netdev, NULL);
1711 e100_tx_clean(nic); 1902 e100_tx_clean(nic);
@@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev,
2108 } 2299 }
2109 for(i = 0; i < E100_TEST_LEN; i++) 2300 for(i = 0; i < E100_TEST_LEN; i++)
2110 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; 2301 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2302
2303 msleep_interruptible(4 * 1000);
2111} 2304}
2112 2305
2113static int e100_phys_id(struct net_device *netdev, u32 data) 2306static int e100_phys_id(struct net_device *netdev, u32 data)
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 93e9f8788751..51c2b3a18b6f 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1270,7 +1270,7 @@ struct e1000_hw_stats {
1270 1270
1271/* Structure containing variables used by the shared code (e1000_hw.c) */ 1271/* Structure containing variables used by the shared code (e1000_hw.c) */
1272struct e1000_hw { 1272struct e1000_hw {
1273 uint8_t *hw_addr; 1273 uint8_t __iomem *hw_addr;
1274 uint8_t *flash_address; 1274 uint8_t *flash_address;
1275 e1000_mac_type mac_type; 1275 e1000_mac_type mac_type;
1276 e1000_phy_type phy_type; 1276 e1000_phy_type phy_type;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 9b596e0bbf95..7c8a0a22dcd5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -162,7 +162,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
162static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 162static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
163static void e1000_restore_vlan(struct e1000_adapter *adapter); 163static void e1000_restore_vlan(struct e1000_adapter *adapter);
164 164
165static int e1000_suspend(struct pci_dev *pdev, uint32_t state); 165static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
166#ifdef CONFIG_PM 166#ifdef CONFIG_PM
167static int e1000_resume(struct pci_dev *pdev); 167static int e1000_resume(struct pci_dev *pdev);
168#endif 168#endif
@@ -3642,7 +3642,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
3642} 3642}
3643 3643
3644static int 3644static int
3645e1000_suspend(struct pci_dev *pdev, uint32_t state) 3645e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3646{ 3646{
3647 struct net_device *netdev = pci_get_drvdata(pdev); 3647 struct net_device *netdev = pci_get_drvdata(pdev);
3648 struct e1000_adapter *adapter = netdev_priv(netdev); 3648 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3726,9 +3726,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3726 } 3726 }
3727 3727
3728 pci_disable_device(pdev); 3728 pci_disable_device(pdev);
3729 3729 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3730 state = (state > 0) ? 3 : 0;
3731 pci_set_power_state(pdev, state);
3732 3730
3733 return 0; 3731 return 0;
3734} 3732}
@@ -3741,13 +3739,13 @@ e1000_resume(struct pci_dev *pdev)
3741 struct e1000_adapter *adapter = netdev_priv(netdev); 3739 struct e1000_adapter *adapter = netdev_priv(netdev);
3742 uint32_t manc, ret_val, swsm; 3740 uint32_t manc, ret_val, swsm;
3743 3741
3744 pci_set_power_state(pdev, 0); 3742 pci_set_power_state(pdev, PCI_D0);
3745 pci_restore_state(pdev); 3743 pci_restore_state(pdev);
3746 ret_val = pci_enable_device(pdev); 3744 ret_val = pci_enable_device(pdev);
3747 pci_set_master(pdev); 3745 pci_set_master(pdev);
3748 3746
3749 pci_enable_wake(pdev, 3, 0); 3747 pci_enable_wake(pdev, PCI_D3hot, 0);
3750 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 3748 pci_enable_wake(pdev, PCI_D3cold, 0);
3751 3749
3752 e1000_reset(adapter); 3750 e1000_reset(adapter);
3753 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 3751 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7d93948aec83..d6eefdb71c17 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1372,7 +1372,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1372 1372
1373 /* synchronized against open : rtnl_lock() held by caller */ 1373 /* synchronized against open : rtnl_lock() held by caller */
1374 if (netif_running(dev)) { 1374 if (netif_running(dev)) {
1375 u8 *base = get_hwbase(dev); 1375 u8 __iomem *base = get_hwbase(dev);
1376 /* 1376 /*
1377 * It seems that the nic preloads valid ring entries into an 1377 * It seems that the nic preloads valid ring entries into an
1378 * internal buffer. The procedure for flushing everything is 1378 * internal buffer. The procedure for flushing everything is
@@ -1423,7 +1423,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1423 1423
1424static void nv_copy_mac_to_hw(struct net_device *dev) 1424static void nv_copy_mac_to_hw(struct net_device *dev)
1425{ 1425{
1426 u8 *base = get_hwbase(dev); 1426 u8 __iomem *base = get_hwbase(dev);
1427 u32 mac[2]; 1427 u32 mac[2];
1428 1428
1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index d9df1d9a5739..bc9a3bf8d560 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -204,6 +204,10 @@ KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>
204 204
205#define RUN_AT(x) (jiffies + (x)) 205#define RUN_AT(x) (jiffies + (x))
206 206
207#ifndef ADDRLEN
208#define ADDRLEN 32
209#endif
210
207/* Condensed bus+endian portability operations. */ 211/* Condensed bus+endian portability operations. */
208#if ADDRLEN == 64 212#if ADDRLEN == 64
209#define cpu_to_leXX(addr) cpu_to_le64(addr) 213#define cpu_to_leXX(addr) cpu_to_le64(addr)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ba9f0580e1f9..2946e037a9b1 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -98,7 +98,7 @@ static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
98 98
99static char bpq_eth_addr[6]; 99static char bpq_eth_addr[6];
100 100
101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *); 101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
102static int bpq_device_event(struct notifier_block *, unsigned long, void *); 102static int bpq_device_event(struct notifier_block *, unsigned long, void *);
103static const char *bpq_print_ethaddr(const unsigned char *); 103static const char *bpq_print_ethaddr(const unsigned char *);
104 104
@@ -165,7 +165,7 @@ static inline int dev_is_ethdev(struct net_device *dev)
165/* 165/*
166 * Receive an AX.25 frame via an ethernet interface. 166 * Receive an AX.25 frame via an ethernet interface.
167 */ 167 */
168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
169{ 169{
170 int len; 170 int len;
171 char * ptr; 171 char * ptr;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c39b0609742a..32d5fabd4b10 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1144,7 +1144,7 @@ static void ibmveth_proc_unregister_driver(void)
1144 1144
1145static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1145static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1146 { "network", "IBM,l-lan"}, 1146 { "network", "IBM,l-lan"},
1147 { 0,} 1147 { "", "" }
1148}; 1148};
1149 1149
1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 10125a1dba22..dd89bda1f131 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -4,10 +4,10 @@
4 * Description: Driver for the SMC Infrared Communications Controller 4 * Description: Driver for the SMC Infrared Communications Controller
5 * Status: Experimental. 5 * Status: Experimental.
6 * Author: Daniele Peri (peri@csai.unipa.it) 6 * Author: Daniele Peri (peri@csai.unipa.it)
7 * Created at: 7 * Created at:
8 * Modified at: 8 * Modified at:
9 * Modified by: 9 * Modified by:
10 * 10 *
11 * Copyright (c) 2002 Daniele Peri 11 * Copyright (c) 2002 Daniele Peri
12 * All Rights Reserved. 12 * All Rights Reserved.
13 * Copyright (c) 2002 Jean Tourrilhes 13 * Copyright (c) 2002 Jean Tourrilhes
@@ -17,26 +17,26 @@
17 * 17 *
18 * Copyright (c) 2001 Stefani Seibold 18 * Copyright (c) 2001 Stefani Seibold
19 * Copyright (c) 1999-2001 Dag Brattli 19 * Copyright (c) 1999-2001 Dag Brattli
20 * Copyright (c) 1998-1999 Thomas Davis, 20 * Copyright (c) 1998-1999 Thomas Davis,
21 * 21 *
22 * and irport.c: 22 * and irport.c:
23 * 23 *
24 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved. 24 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
25 * 25 *
26 * 26 *
27 * This program is free software; you can redistribute it and/or 27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License as 28 * modify it under the terms of the GNU General Public License as
29 * published by the Free Software Foundation; either version 2 of 29 * published by the Free Software Foundation; either version 2 of
30 * the License, or (at your option) any later version. 30 * the License, or (at your option) any later version.
31 * 31 *
32 * This program is distributed in the hope that it will be useful, 32 * This program is distributed in the hope that it will be useful,
33 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * but WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 * GNU General Public License for more details. 35 * GNU General Public License for more details.
36 * 36 *
37 * You should have received a copy of the GNU General Public License 37 * You should have received a copy of the GNU General Public License
38 * along with this program; if not, write to the Free Software 38 * along with this program; if not, write to the Free Software
39 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 39 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
40 * MA 02111-1307 USA 40 * MA 02111-1307 USA
41 * 41 *
42 ********************************************************************/ 42 ********************************************************************/
@@ -68,24 +68,42 @@
68#include "smsc-ircc2.h" 68#include "smsc-ircc2.h"
69#include "smsc-sio.h" 69#include "smsc-sio.h"
70 70
71
72MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
73MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
74MODULE_LICENSE("GPL");
75
76static int ircc_dma = 255;
77module_param(ircc_dma, int, 0);
78MODULE_PARM_DESC(ircc_dma, "DMA channel");
79
80static int ircc_irq = 255;
81module_param(ircc_irq, int, 0);
82MODULE_PARM_DESC(ircc_irq, "IRQ line");
83
84static int ircc_fir;
85module_param(ircc_fir, int, 0);
86MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
87
88static int ircc_sir;
89module_param(ircc_sir, int, 0);
90MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
91
92static int ircc_cfg;
93module_param(ircc_cfg, int, 0);
94MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
95
96static int ircc_transceiver;
97module_param(ircc_transceiver, int, 0);
98MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
99
71/* Types */ 100/* Types */
72 101
73struct smsc_transceiver { 102struct smsc_transceiver {
74 char *name; 103 char *name;
75 void (*set_for_speed)(int fir_base, u32 speed); 104 void (*set_for_speed)(int fir_base, u32 speed);
76 int (*probe)(int fir_base); 105 int (*probe)(int fir_base);
77}; 106};
78typedef struct smsc_transceiver smsc_transceiver_t;
79
80#if 0
81struct smc_chip {
82 char *name;
83 u16 flags;
84 u8 devid;
85 u8 rev;
86};
87typedef struct smc_chip smc_chip_t;
88#endif
89 107
90struct smsc_chip { 108struct smsc_chip {
91 char *name; 109 char *name;
@@ -96,20 +114,18 @@ struct smsc_chip {
96 u8 devid; 114 u8 devid;
97 u8 rev; 115 u8 rev;
98}; 116};
99typedef struct smsc_chip smsc_chip_t;
100 117
101struct smsc_chip_address { 118struct smsc_chip_address {
102 unsigned int cfg_base; 119 unsigned int cfg_base;
103 unsigned int type; 120 unsigned int type;
104}; 121};
105typedef struct smsc_chip_address smsc_chip_address_t;
106 122
107/* Private data for each instance */ 123/* Private data for each instance */
108struct smsc_ircc_cb { 124struct smsc_ircc_cb {
109 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 125 struct net_device *netdev; /* Yes! we are some kind of netdevice */
110 struct net_device_stats stats; 126 struct net_device_stats stats;
111 struct irlap_cb *irlap; /* The link layer we are binded to */ 127 struct irlap_cb *irlap; /* The link layer we are binded to */
112 128
113 chipio_t io; /* IrDA controller information */ 129 chipio_t io; /* IrDA controller information */
114 iobuff_t tx_buff; /* Transmit buffer */ 130 iobuff_t tx_buff; /* Transmit buffer */
115 iobuff_t rx_buff; /* Receive buffer */ 131 iobuff_t rx_buff; /* Receive buffer */
@@ -119,7 +135,7 @@ struct smsc_ircc_cb {
119 struct qos_info qos; /* QoS capabilities for this device */ 135 struct qos_info qos; /* QoS capabilities for this device */
120 136
121 spinlock_t lock; /* For serializing operations */ 137 spinlock_t lock; /* For serializing operations */
122 138
123 __u32 new_speed; 139 __u32 new_speed;
124 __u32 flags; /* Interface flags */ 140 __u32 flags; /* Interface flags */
125 141
@@ -127,18 +143,20 @@ struct smsc_ircc_cb {
127 int tx_len; /* Number of frames in tx_buff */ 143 int tx_len; /* Number of frames in tx_buff */
128 144
129 int transceiver; 145 int transceiver;
130 struct pm_dev *pmdev; 146 struct platform_device *pldev;
131}; 147};
132 148
133/* Constants */ 149/* Constants */
134 150
135static const char *driver_name = "smsc-ircc2"; 151#define SMSC_IRCC2_DRIVER_NAME "smsc-ircc2"
136#define DIM(x) (sizeof(x)/(sizeof(*(x)))) 152
137#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600 153#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
138#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1 154#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
139#define SMSC_IRCC2_C_NET_TIMEOUT 0 155#define SMSC_IRCC2_C_NET_TIMEOUT 0
140#define SMSC_IRCC2_C_SIR_STOP 0 156#define SMSC_IRCC2_C_SIR_STOP 0
141 157
158static const char *driver_name = SMSC_IRCC2_DRIVER_NAME;
159
142/* Prototypes */ 160/* Prototypes */
143 161
144static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq); 162static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
@@ -147,15 +165,15 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base,
147static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self); 165static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
148static void smsc_ircc_init_chip(struct smsc_ircc_cb *self); 166static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
149static int __exit smsc_ircc_close(struct smsc_ircc_cb *self); 167static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
150static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase); 168static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self);
151static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase); 169static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self);
152static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self); 170static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
153static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev); 171static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
154static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev); 172static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
155static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs); 173static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs);
156static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase); 174static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self);
157static void smsc_ircc_change_speed(void *priv, u32 speed); 175static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed);
158static void smsc_ircc_set_sir_speed(void *priv, u32 speed); 176static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, u32 speed);
159static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs); 177static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
160static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev); 178static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
161static void smsc_ircc_sir_start(struct smsc_ircc_cb *self); 179static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
@@ -171,7 +189,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cm
171static void smsc_ircc_timeout(struct net_device *dev); 189static void smsc_ircc_timeout(struct net_device *dev);
172#endif 190#endif
173static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev); 191static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
174static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
175static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self); 192static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
176static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self); 193static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
177static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed); 194static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
@@ -179,9 +196,9 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
179 196
180/* Probing */ 197/* Probing */
181static int __init smsc_ircc_look_for_chips(void); 198static int __init smsc_ircc_look_for_chips(void);
182static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type); 199static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
183static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg_base, char *type); 200static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
184static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type); 201static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
185static int __init smsc_superio_fdc(unsigned short cfg_base); 202static int __init smsc_superio_fdc(unsigned short cfg_base);
186static int __init smsc_superio_lpc(unsigned short cfg_base); 203static int __init smsc_superio_lpc(unsigned short cfg_base);
187 204
@@ -196,21 +213,26 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
196 213
197/* Power Management */ 214/* Power Management */
198 215
199static void smsc_ircc_suspend(struct smsc_ircc_cb *self); 216static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level);
200static void smsc_ircc_wakeup(struct smsc_ircc_cb *self); 217static int smsc_ircc_resume(struct device *dev, u32 level);
201static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
202 218
219static struct device_driver smsc_ircc_driver = {
220 .name = SMSC_IRCC2_DRIVER_NAME,
221 .bus = &platform_bus_type,
222 .suspend = smsc_ircc_suspend,
223 .resume = smsc_ircc_resume,
224};
203 225
204/* Transceivers for SMSC-ircc */ 226/* Transceivers for SMSC-ircc */
205 227
206static smsc_transceiver_t smsc_transceivers[]= 228static struct smsc_transceiver smsc_transceivers[] =
207{ 229{
208 { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800}, 230 { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800 },
209 { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select}, 231 { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select },
210 { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc}, 232 { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc },
211 { NULL, NULL} 233 { NULL, NULL }
212}; 234};
213#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (DIM(smsc_transceivers)-1) 235#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (ARRAY_SIZE(smsc_transceivers) - 1)
214 236
215/* SMC SuperIO chipsets definitions */ 237/* SMC SuperIO chipsets definitions */
216 238
@@ -221,7 +243,7 @@ static smsc_transceiver_t smsc_transceivers[]=
221#define FIR 4 /* SuperIO Chip has fast IRDA */ 243#define FIR 4 /* SuperIO Chip has fast IRDA */
222#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */ 244#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
223 245
224static smsc_chip_t __initdata fdc_chips_flat[]= 246static struct smsc_chip __initdata fdc_chips_flat[] =
225{ 247{
226 /* Base address 0x3f0 or 0x370 */ 248 /* Base address 0x3f0 or 0x370 */
227 { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */ 249 { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
@@ -235,7 +257,7 @@ static smsc_chip_t __initdata fdc_chips_flat[]=
235 { NULL } 257 { NULL }
236}; 258};
237 259
238static smsc_chip_t __initdata fdc_chips_paged[]= 260static struct smsc_chip __initdata fdc_chips_paged[] =
239{ 261{
240 /* Base address 0x3f0 or 0x370 */ 262 /* Base address 0x3f0 or 0x370 */
241 { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 }, 263 { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
@@ -254,7 +276,7 @@ static smsc_chip_t __initdata fdc_chips_paged[]=
254 { NULL } 276 { NULL }
255}; 277};
256 278
257static smsc_chip_t __initdata lpc_chips_flat[]= 279static struct smsc_chip __initdata lpc_chips_flat[] =
258{ 280{
259 /* Base address 0x2E or 0x4E */ 281 /* Base address 0x2E or 0x4E */
260 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, 282 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
@@ -262,7 +284,7 @@ static smsc_chip_t __initdata lpc_chips_flat[]=
262 { NULL } 284 { NULL }
263}; 285};
264 286
265static smsc_chip_t __initdata lpc_chips_paged[]= 287static struct smsc_chip __initdata lpc_chips_paged[] =
266{ 288{
267 /* Base address 0x2E or 0x4E */ 289 /* Base address 0x2E or 0x4E */
268 { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 }, 290 { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
@@ -281,33 +303,25 @@ static smsc_chip_t __initdata lpc_chips_paged[]=
281#define SMSCSIO_TYPE_FLAT 4 303#define SMSCSIO_TYPE_FLAT 4
282#define SMSCSIO_TYPE_PAGED 8 304#define SMSCSIO_TYPE_PAGED 8
283 305
284static smsc_chip_address_t __initdata possible_addresses[]= 306static struct smsc_chip_address __initdata possible_addresses[] =
285{ 307{
286 {0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 308 { 0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
287 {0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 309 { 0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
288 {0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 310 { 0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
289 {0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 311 { 0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
290 {0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 312 { 0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
291 {0,0} 313 { 0, 0 }
292}; 314};
293 315
294/* Globals */ 316/* Globals */
295 317
296static struct smsc_ircc_cb *dev_self[] = { NULL, NULL}; 318static struct smsc_ircc_cb *dev_self[] = { NULL, NULL };
297 319static unsigned short dev_count;
298static int ircc_irq=255;
299static int ircc_dma=255;
300static int ircc_fir=0;
301static int ircc_sir=0;
302static int ircc_cfg=0;
303static int ircc_transceiver=0;
304
305static unsigned short dev_count=0;
306 320
307static inline void register_bank(int iobase, int bank) 321static inline void register_bank(int iobase, int bank)
308{ 322{
309 outb(((inb(iobase+IRCC_MASTER) & 0xf0) | (bank & 0x07)), 323 outb(((inb(iobase + IRCC_MASTER) & 0xf0) | (bank & 0x07)),
310 iobase+IRCC_MASTER); 324 iobase + IRCC_MASTER);
311} 325}
312 326
313 327
@@ -327,34 +341,44 @@ static inline void register_bank(int iobase, int bank)
327 */ 341 */
328static int __init smsc_ircc_init(void) 342static int __init smsc_ircc_init(void)
329{ 343{
330 int ret=-ENODEV; 344 int ret;
331 345
332 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 346 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
333 347
334 dev_count=0; 348 ret = driver_register(&smsc_ircc_driver);
335 349 if (ret) {
336 if ((ircc_fir>0)&&(ircc_sir>0)) { 350 IRDA_ERROR("%s, Can't register driver!\n", driver_name);
351 return ret;
352 }
353
354 dev_count = 0;
355
356 if (ircc_fir > 0 && ircc_sir > 0) {
337 IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir); 357 IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
338 IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir); 358 IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
339 359
340 if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq) == 0) 360 if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
341 return 0; 361 ret = -ENODEV;
342 362 } else {
343 return -ENODEV; 363 ret = -ENODEV;
344 } 364
365 /* try user provided configuration register base address */
366 if (ircc_cfg > 0) {
367 IRDA_MESSAGE(" Overriding configuration address "
368 "0x%04x\n", ircc_cfg);
369 if (!smsc_superio_fdc(ircc_cfg))
370 ret = 0;
371 if (!smsc_superio_lpc(ircc_cfg))
372 ret = 0;
373 }
345 374
346 /* try user provided configuration register base address */ 375 if (smsc_ircc_look_for_chips() > 0)
347 if (ircc_cfg>0) {
348 IRDA_MESSAGE(" Overriding configuration address 0x%04x\n",
349 ircc_cfg);
350 if (!smsc_superio_fdc(ircc_cfg))
351 ret = 0;
352 if (!smsc_superio_lpc(ircc_cfg))
353 ret = 0; 376 ret = 0;
354 } 377 }
355 378
356 if(smsc_ircc_look_for_chips()>0) ret = 0; 379 if (ret)
357 380 driver_unregister(&smsc_ircc_driver);
381
358 return ret; 382 return ret;
359} 383}
360 384
@@ -369,15 +393,15 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
369 struct smsc_ircc_cb *self; 393 struct smsc_ircc_cb *self;
370 struct net_device *dev; 394 struct net_device *dev;
371 int err; 395 int err;
372 396
373 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 397 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
374 398
375 err = smsc_ircc_present(fir_base, sir_base); 399 err = smsc_ircc_present(fir_base, sir_base);
376 if(err) 400 if (err)
377 goto err_out; 401 goto err_out;
378 402
379 err = -ENOMEM; 403 err = -ENOMEM;
380 if (dev_count > DIM(dev_self)) { 404 if (dev_count >= ARRAY_SIZE(dev_self)) {
381 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__); 405 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__);
382 goto err_out1; 406 goto err_out1;
383 } 407 }
@@ -396,14 +420,14 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
396 dev->hard_start_xmit = smsc_ircc_hard_xmit_sir; 420 dev->hard_start_xmit = smsc_ircc_hard_xmit_sir;
397#if SMSC_IRCC2_C_NET_TIMEOUT 421#if SMSC_IRCC2_C_NET_TIMEOUT
398 dev->tx_timeout = smsc_ircc_timeout; 422 dev->tx_timeout = smsc_ircc_timeout;
399 dev->watchdog_timeo = HZ*2; /* Allow enough time for speed change */ 423 dev->watchdog_timeo = HZ * 2; /* Allow enough time for speed change */
400#endif 424#endif
401 dev->open = smsc_ircc_net_open; 425 dev->open = smsc_ircc_net_open;
402 dev->stop = smsc_ircc_net_close; 426 dev->stop = smsc_ircc_net_close;
403 dev->do_ioctl = smsc_ircc_net_ioctl; 427 dev->do_ioctl = smsc_ircc_net_ioctl;
404 dev->get_stats = smsc_ircc_net_get_stats; 428 dev->get_stats = smsc_ircc_net_get_stats;
405 429
406 self = dev->priv; 430 self = netdev_priv(dev);
407 self->netdev = dev; 431 self->netdev = dev;
408 432
409 /* Make ifconfig display some details */ 433 /* Make ifconfig display some details */
@@ -411,10 +435,10 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
411 dev->irq = self->io.irq = irq; 435 dev->irq = self->io.irq = irq;
412 436
413 /* Need to store self somewhere */ 437 /* Need to store self somewhere */
414 dev_self[dev_count++] = self; 438 dev_self[dev_count] = self;
415 spin_lock_init(&self->lock); 439 spin_lock_init(&self->lock);
416 440
417 self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE; 441 self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
418 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE; 442 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
419 443
420 self->rx_buff.head = 444 self->rx_buff.head =
@@ -442,33 +466,40 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
442 self->rx_buff.state = OUTSIDE_FRAME; 466 self->rx_buff.state = OUTSIDE_FRAME;
443 self->tx_buff.data = self->tx_buff.head; 467 self->tx_buff.data = self->tx_buff.head;
444 self->rx_buff.data = self->rx_buff.head; 468 self->rx_buff.data = self->rx_buff.head;
445
446 smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
447 469
470 smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
448 smsc_ircc_setup_qos(self); 471 smsc_ircc_setup_qos(self);
449
450 smsc_ircc_init_chip(self); 472 smsc_ircc_init_chip(self);
451 473
452 if(ircc_transceiver > 0 && 474 if (ircc_transceiver > 0 &&
453 ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS) 475 ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
454 self->transceiver = ircc_transceiver; 476 self->transceiver = ircc_transceiver;
455 else 477 else
456 smsc_ircc_probe_transceiver(self); 478 smsc_ircc_probe_transceiver(self);
457 479
458 err = register_netdev(self->netdev); 480 err = register_netdev(self->netdev);
459 if(err) { 481 if (err) {
460 IRDA_ERROR("%s, Network device registration failed!\n", 482 IRDA_ERROR("%s, Network device registration failed!\n",
461 driver_name); 483 driver_name);
462 goto err_out4; 484 goto err_out4;
463 } 485 }
464 486
465 self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, smsc_ircc_pmproc); 487 self->pldev = platform_device_register_simple(SMSC_IRCC2_DRIVER_NAME,
466 if (self->pmdev) 488 dev_count, NULL, 0);
467 self->pmdev->data = self; 489 if (IS_ERR(self->pldev)) {
490 err = PTR_ERR(self->pldev);
491 goto err_out5;
492 }
493 dev_set_drvdata(&self->pldev->dev, self);
468 494
469 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 495 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
496 dev_count++;
470 497
471 return 0; 498 return 0;
499
500 err_out5:
501 unregister_netdev(self->netdev);
502
472 err_out4: 503 err_out4:
473 dma_free_coherent(NULL, self->tx_buff.truesize, 504 dma_free_coherent(NULL, self->tx_buff.truesize,
474 self->tx_buff.head, self->tx_buff_dma); 505 self->tx_buff.head, self->tx_buff_dma);
@@ -477,7 +508,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
477 self->rx_buff.head, self->rx_buff_dma); 508 self->rx_buff.head, self->rx_buff_dma);
478 err_out2: 509 err_out2:
479 free_netdev(self->netdev); 510 free_netdev(self->netdev);
480 dev_self[--dev_count] = NULL; 511 dev_self[dev_count] = NULL;
481 err_out1: 512 err_out1:
482 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT); 513 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
483 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT); 514 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
@@ -511,16 +542,16 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
511 542
512 register_bank(fir_base, 3); 543 register_bank(fir_base, 3);
513 544
514 high = inb(fir_base+IRCC_ID_HIGH); 545 high = inb(fir_base + IRCC_ID_HIGH);
515 low = inb(fir_base+IRCC_ID_LOW); 546 low = inb(fir_base + IRCC_ID_LOW);
516 chip = inb(fir_base+IRCC_CHIP_ID); 547 chip = inb(fir_base + IRCC_CHIP_ID);
517 version = inb(fir_base+IRCC_VERSION); 548 version = inb(fir_base + IRCC_VERSION);
518 config = inb(fir_base+IRCC_INTERFACE); 549 config = inb(fir_base + IRCC_INTERFACE);
519 dma = config & IRCC_INTERFACE_DMA_MASK; 550 dma = config & IRCC_INTERFACE_DMA_MASK;
520 irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; 551 irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
521 552
522 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { 553 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
523 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", 554 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
524 __FUNCTION__, fir_base); 555 __FUNCTION__, fir_base);
525 goto out3; 556 goto out3;
526 } 557 }
@@ -529,6 +560,7 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
529 chip & 0x0f, version, fir_base, sir_base, dma, irq); 560 chip & 0x0f, version, fir_base, sir_base, dma, irq);
530 561
531 return 0; 562 return 0;
563
532 out3: 564 out3:
533 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT); 565 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
534 out2: 566 out2:
@@ -543,16 +575,16 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
543 * Setup I/O 575 * Setup I/O
544 * 576 *
545 */ 577 */
546static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, 578static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
547 unsigned int fir_base, unsigned int sir_base, 579 unsigned int fir_base, unsigned int sir_base,
548 u8 dma, u8 irq) 580 u8 dma, u8 irq)
549{ 581{
550 unsigned char config, chip_dma, chip_irq; 582 unsigned char config, chip_dma, chip_irq;
551 583
552 register_bank(fir_base, 3); 584 register_bank(fir_base, 3);
553 config = inb(fir_base+IRCC_INTERFACE); 585 config = inb(fir_base + IRCC_INTERFACE);
554 chip_dma = config & IRCC_INTERFACE_DMA_MASK; 586 chip_dma = config & IRCC_INTERFACE_DMA_MASK;
555 chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; 587 chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
556 588
557 self->io.fir_base = fir_base; 589 self->io.fir_base = fir_base;
558 self->io.sir_base = sir_base; 590 self->io.sir_base = sir_base;
@@ -566,17 +598,15 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
566 IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n", 598 IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
567 driver_name, chip_irq, irq); 599 driver_name, chip_irq, irq);
568 self->io.irq = irq; 600 self->io.irq = irq;
569 } 601 } else
570 else
571 self->io.irq = chip_irq; 602 self->io.irq = chip_irq;
572 603
573 if (dma < 255) { 604 if (dma < 255) {
574 if (dma != chip_dma) 605 if (dma != chip_dma)
575 IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n", 606 IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
576 driver_name, chip_dma, dma); 607 driver_name, chip_dma, dma);
577 self->io.dma = dma; 608 self->io.dma = dma;
578 } 609 } else
579 else
580 self->io.dma = chip_dma; 610 self->io.dma = chip_dma;
581 611
582} 612}
@@ -591,7 +621,7 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
591{ 621{
592 /* Initialize QoS for this device */ 622 /* Initialize QoS for this device */
593 irda_init_max_qos_capabilies(&self->qos); 623 irda_init_max_qos_capabilies(&self->qos);
594 624
595 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 625 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
596 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); 626 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
597 627
@@ -608,43 +638,43 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
608 */ 638 */
609static void smsc_ircc_init_chip(struct smsc_ircc_cb *self) 639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
610{ 640{
611 int iobase, ir_mode, ctrl, fast; 641 int iobase, ir_mode, ctrl, fast;
612 642
613 IRDA_ASSERT( self != NULL, return; ); 643 IRDA_ASSERT(self != NULL, return;);
614 iobase = self->io.fir_base;
615 644
645 iobase = self->io.fir_base;
616 ir_mode = IRCC_CFGA_IRDA_SIR_A; 646 ir_mode = IRCC_CFGA_IRDA_SIR_A;
617 ctrl = 0; 647 ctrl = 0;
618 fast = 0; 648 fast = 0;
619 649
620 register_bank(iobase, 0); 650 register_bank(iobase, 0);
621 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER); 651 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
622 outb(0x00, iobase+IRCC_MASTER); 652 outb(0x00, iobase + IRCC_MASTER);
623 653
624 register_bank(iobase, 1); 654 register_bank(iobase, 1);
625 outb(((inb(iobase+IRCC_SCE_CFGA) & 0x87) | ir_mode), 655 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | ir_mode),
626 iobase+IRCC_SCE_CFGA); 656 iobase + IRCC_SCE_CFGA);
627 657
628#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */ 658#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
629 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM), 659 outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
630 iobase+IRCC_SCE_CFGB); 660 iobase + IRCC_SCE_CFGB);
631#else 661#else
632 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR), 662 outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
633 iobase+IRCC_SCE_CFGB); 663 iobase + IRCC_SCE_CFGB);
634#endif 664#endif
635 (void) inb(iobase+IRCC_FIFO_THRESHOLD); 665 (void) inb(iobase + IRCC_FIFO_THRESHOLD);
636 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase+IRCC_FIFO_THRESHOLD); 666 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
637 667
638 register_bank(iobase, 4); 668 register_bank(iobase, 4);
639 outb((inb(iobase+IRCC_CONTROL) & 0x30) | ctrl, iobase+IRCC_CONTROL); 669 outb((inb(iobase + IRCC_CONTROL) & 0x30) | ctrl, iobase + IRCC_CONTROL);
640 670
641 register_bank(iobase, 0); 671 register_bank(iobase, 0);
642 outb(fast, iobase+IRCC_LCR_A); 672 outb(fast, iobase + IRCC_LCR_A);
643 673
644 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 674 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
645 675
646 /* Power on device */ 676 /* Power on device */
647 outb(0x00, iobase+IRCC_MASTER); 677 outb(0x00, iobase + IRCC_MASTER);
648} 678}
649 679
650/* 680/*
@@ -662,12 +692,12 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
662 692
663 IRDA_ASSERT(dev != NULL, return -1;); 693 IRDA_ASSERT(dev != NULL, return -1;);
664 694
665 self = dev->priv; 695 self = netdev_priv(dev);
666 696
667 IRDA_ASSERT(self != NULL, return -1;); 697 IRDA_ASSERT(self != NULL, return -1;);
668 698
669 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 699 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
670 700
671 switch (cmd) { 701 switch (cmd) {
672 case SIOCSBANDWIDTH: /* Set bandwidth */ 702 case SIOCSBANDWIDTH: /* Set bandwidth */
673 if (!capable(CAP_NET_ADMIN)) 703 if (!capable(CAP_NET_ADMIN))
@@ -703,14 +733,14 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
703 default: 733 default:
704 ret = -EOPNOTSUPP; 734 ret = -EOPNOTSUPP;
705 } 735 }
706 736
707 return ret; 737 return ret;
708} 738}
709 739
710static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev) 740static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
711{ 741{
712 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) dev->priv; 742 struct smsc_ircc_cb *self = netdev_priv(dev);
713 743
714 return &self->stats; 744 return &self->stats;
715} 745}
716 746
@@ -724,11 +754,9 @@ static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
724 754
725static void smsc_ircc_timeout(struct net_device *dev) 755static void smsc_ircc_timeout(struct net_device *dev)
726{ 756{
727 struct smsc_ircc_cb *self; 757 struct smsc_ircc_cb *self = netdev_priv(dev);
728 unsigned long flags; 758 unsigned long flags;
729 759
730 self = (struct smsc_ircc_cb *) dev->priv;
731
732 IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n", 760 IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
733 dev->name, self->io.speed); 761 dev->name, self->io.speed);
734 spin_lock_irqsave(&self->lock, flags); 762 spin_lock_irqsave(&self->lock, flags);
@@ -751,26 +779,23 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
751{ 779{
752 struct smsc_ircc_cb *self; 780 struct smsc_ircc_cb *self;
753 unsigned long flags; 781 unsigned long flags;
754 int iobase;
755 s32 speed; 782 s32 speed;
756 783
757 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 784 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
758 785
759 IRDA_ASSERT(dev != NULL, return 0;); 786 IRDA_ASSERT(dev != NULL, return 0;);
760
761 self = (struct smsc_ircc_cb *) dev->priv;
762 IRDA_ASSERT(self != NULL, return 0;);
763 787
764 iobase = self->io.sir_base; 788 self = netdev_priv(dev);
789 IRDA_ASSERT(self != NULL, return 0;);
765 790
766 netif_stop_queue(dev); 791 netif_stop_queue(dev);
767 792
768 /* Make sure test of self->io.speed & speed change are atomic */ 793 /* Make sure test of self->io.speed & speed change are atomic */
769 spin_lock_irqsave(&self->lock, flags); 794 spin_lock_irqsave(&self->lock, flags);
770 795
771 /* Check if we need to change the speed */ 796 /* Check if we need to change the speed */
772 speed = irda_get_next_speed(skb); 797 speed = irda_get_next_speed(skb);
773 if ((speed != self->io.speed) && (speed != -1)) { 798 if (speed != self->io.speed && speed != -1) {
774 /* Check for empty frame */ 799 /* Check for empty frame */
775 if (!skb->len) { 800 if (!skb->len) {
776 /* 801 /*
@@ -787,27 +812,26 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
787 spin_unlock_irqrestore(&self->lock, flags); 812 spin_unlock_irqrestore(&self->lock, flags);
788 dev_kfree_skb(skb); 813 dev_kfree_skb(skb);
789 return 0; 814 return 0;
790 } else {
791 self->new_speed = speed;
792 } 815 }
816 self->new_speed = speed;
793 } 817 }
794 818
795 /* Init tx buffer */ 819 /* Init tx buffer */
796 self->tx_buff.data = self->tx_buff.head; 820 self->tx_buff.data = self->tx_buff.head;
797 821
798 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ 822 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
799 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 823 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
800 self->tx_buff.truesize); 824 self->tx_buff.truesize);
801 825
802 self->stats.tx_bytes += self->tx_buff.len; 826 self->stats.tx_bytes += self->tx_buff.len;
803 827
804 /* Turn on transmit finished interrupt. Will fire immediately! */ 828 /* Turn on transmit finished interrupt. Will fire immediately! */
805 outb(UART_IER_THRI, iobase+UART_IER); 829 outb(UART_IER_THRI, self->io.sir_base + UART_IER);
806 830
807 spin_unlock_irqrestore(&self->lock, flags); 831 spin_unlock_irqrestore(&self->lock, flags);
808 832
809 dev_kfree_skb(skb); 833 dev_kfree_skb(skb);
810 834
811 return 0; 835 return 0;
812} 836}
813 837
@@ -826,9 +850,9 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
826 850
827 self->io.speed = speed; 851 self->io.speed = speed;
828 852
829 switch(speed) { 853 switch (speed) {
830 default: 854 default:
831 case 576000: 855 case 576000:
832 ir_mode = IRCC_CFGA_IRDA_HDLC; 856 ir_mode = IRCC_CFGA_IRDA_HDLC;
833 ctrl = IRCC_CRC; 857 ctrl = IRCC_CRC;
834 fast = 0; 858 fast = 0;
@@ -853,14 +877,14 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
853 Now in tranceiver! 877 Now in tranceiver!
854 /* This causes an interrupt */ 878 /* This causes an interrupt */
855 register_bank(fir_base, 0); 879 register_bank(fir_base, 0);
856 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast, fir_base+IRCC_LCR_A); 880 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast, fir_base + IRCC_LCR_A);
857 #endif 881 #endif
858 882
859 register_bank(fir_base, 1); 883 register_bank(fir_base, 1);
860 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base+IRCC_SCE_CFGA); 884 outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base + IRCC_SCE_CFGA);
861 885
862 register_bank(fir_base, 4); 886 register_bank(fir_base, 4);
863 outb((inb(fir_base+IRCC_CONTROL) & 0x30) | ctrl, fir_base+IRCC_CONTROL); 887 outb((inb(fir_base + IRCC_CONTROL) & 0x30) | ctrl, fir_base + IRCC_CONTROL);
864} 888}
865 889
866/* 890/*
@@ -885,31 +909,31 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
885 /* Reset everything */ 909 /* Reset everything */
886 910
887 /* Install FIR transmit handler */ 911 /* Install FIR transmit handler */
888 dev->hard_start_xmit = smsc_ircc_hard_xmit_fir; 912 dev->hard_start_xmit = smsc_ircc_hard_xmit_fir;
889 913
890 /* Clear FIFO */ 914 /* Clear FIFO */
891 outb(inb(fir_base+IRCC_LCR_A)|IRCC_LCR_A_FIFO_RESET, fir_base+IRCC_LCR_A); 915 outb(inb(fir_base + IRCC_LCR_A) | IRCC_LCR_A_FIFO_RESET, fir_base + IRCC_LCR_A);
892 916
893 /* Enable interrupt */ 917 /* Enable interrupt */
894 /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);*/ 918 /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base + IRCC_IER);*/
895 919
896 register_bank(fir_base, 1); 920 register_bank(fir_base, 1);
897 921
898 /* Select the TX/RX interface */ 922 /* Select the TX/RX interface */
899#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */ 923#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
900 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM), 924 outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
901 fir_base+IRCC_SCE_CFGB); 925 fir_base + IRCC_SCE_CFGB);
902#else 926#else
903 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR), 927 outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
904 fir_base+IRCC_SCE_CFGB); 928 fir_base + IRCC_SCE_CFGB);
905#endif 929#endif
906 (void) inb(fir_base+IRCC_FIFO_THRESHOLD); 930 (void) inb(fir_base + IRCC_FIFO_THRESHOLD);
907 931
908 /* Enable SCE interrupts */ 932 /* Enable SCE interrupts */
909 outb(0, fir_base+IRCC_MASTER); 933 outb(0, fir_base + IRCC_MASTER);
910 register_bank(fir_base, 0); 934 register_bank(fir_base, 0);
911 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER); 935 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, fir_base + IRCC_IER);
912 outb(IRCC_MASTER_INT_EN, fir_base+IRCC_MASTER); 936 outb(IRCC_MASTER_INT_EN, fir_base + IRCC_MASTER);
913} 937}
914 938
915/* 939/*
@@ -923,13 +947,13 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
923 int fir_base; 947 int fir_base;
924 948
925 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 949 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
926 950
927 IRDA_ASSERT(self != NULL, return;); 951 IRDA_ASSERT(self != NULL, return;);
928 952
929 fir_base = self->io.fir_base; 953 fir_base = self->io.fir_base;
930 register_bank(fir_base, 0); 954 register_bank(fir_base, 0);
931 /*outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);*/ 955 /*outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);*/
932 outb(inb(fir_base+IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base+IRCC_LCR_B); 956 outb(inb(fir_base + IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base + IRCC_LCR_B);
933} 957}
934 958
935 959
@@ -941,18 +965,15 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
941 * This function *must* be called with spinlock held, because it may 965 * This function *must* be called with spinlock held, because it may
942 * be called from the irq handler. - Jean II 966 * be called from the irq handler. - Jean II
943 */ 967 */
944static void smsc_ircc_change_speed(void *priv, u32 speed) 968static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
945{ 969{
946 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
947 struct net_device *dev; 970 struct net_device *dev;
948 int iobase;
949 int last_speed_was_sir; 971 int last_speed_was_sir;
950 972
951 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed); 973 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);
952 974
953 IRDA_ASSERT(self != NULL, return;); 975 IRDA_ASSERT(self != NULL, return;);
954 dev = self->netdev; 976 dev = self->netdev;
955 iobase = self->io.fir_base;
956 977
957 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED; 978 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
958 979
@@ -961,30 +982,30 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
961 speed= 1152000; 982 speed= 1152000;
962 self->io.speed = speed; 983 self->io.speed = speed;
963 last_speed_was_sir = 0; 984 last_speed_was_sir = 0;
964 smsc_ircc_fir_start(self); 985 smsc_ircc_fir_start(self);
965 #endif 986 #endif
966 987
967 if(self->io.speed == 0) 988 if (self->io.speed == 0)
968 smsc_ircc_sir_start(self); 989 smsc_ircc_sir_start(self);
969 990
970 #if 0 991 #if 0
971 if(!last_speed_was_sir) speed = self->io.speed; 992 if (!last_speed_was_sir) speed = self->io.speed;
972 #endif 993 #endif
973 994
974 if(self->io.speed != speed) smsc_ircc_set_transceiver_for_speed(self, speed); 995 if (self->io.speed != speed)
996 smsc_ircc_set_transceiver_for_speed(self, speed);
975 997
976 self->io.speed = speed; 998 self->io.speed = speed;
977 999
978 if(speed <= SMSC_IRCC2_MAX_SIR_SPEED) { 1000 if (speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
979 if(!last_speed_was_sir) { 1001 if (!last_speed_was_sir) {
980 smsc_ircc_fir_stop(self); 1002 smsc_ircc_fir_stop(self);
981 smsc_ircc_sir_start(self); 1003 smsc_ircc_sir_start(self);
982 } 1004 }
983 smsc_ircc_set_sir_speed(self, speed); 1005 smsc_ircc_set_sir_speed(self, speed);
984 } 1006 } else {
985 else { 1007 if (last_speed_was_sir) {
986 if(last_speed_was_sir) { 1008 #if SMSC_IRCC2_C_SIR_STOP
987 #if SMSC_IRCC2_C_SIR_STOP
988 smsc_ircc_sir_stop(self); 1009 smsc_ircc_sir_stop(self);
989 #endif 1010 #endif
990 smsc_ircc_fir_start(self); 1011 smsc_ircc_fir_start(self);
@@ -994,13 +1015,13 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
994 #if 0 1015 #if 0
995 self->tx_buff.len = 10; 1016 self->tx_buff.len = 10;
996 self->tx_buff.data = self->tx_buff.head; 1017 self->tx_buff.data = self->tx_buff.head;
997 1018
998 smsc_ircc_dma_xmit(self, iobase, 4000); 1019 smsc_ircc_dma_xmit(self, 4000);
999 #endif 1020 #endif
1000 /* Be ready for incoming frames */ 1021 /* Be ready for incoming frames */
1001 smsc_ircc_dma_receive(self, iobase); 1022 smsc_ircc_dma_receive(self);
1002 } 1023 }
1003 1024
1004 netif_wake_queue(dev); 1025 netif_wake_queue(dev);
1005} 1026}
1006 1027
@@ -1010,10 +1031,9 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
1010 * Set speed of IrDA port to specified baudrate 1031 * Set speed of IrDA port to specified baudrate
1011 * 1032 *
1012 */ 1033 */
1013void smsc_ircc_set_sir_speed(void *priv, __u32 speed) 1034void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1014{ 1035{
1015 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv; 1036 int iobase;
1016 int iobase;
1017 int fcr; /* FIFO control reg */ 1037 int fcr; /* FIFO control reg */
1018 int lcr; /* Line control reg */ 1038 int lcr; /* Line control reg */
1019 int divisor; 1039 int divisor;
@@ -1022,38 +1042,36 @@ void smsc_ircc_set_sir_speed(void *priv, __u32 speed)
1022 1042
1023 IRDA_ASSERT(self != NULL, return;); 1043 IRDA_ASSERT(self != NULL, return;);
1024 iobase = self->io.sir_base; 1044 iobase = self->io.sir_base;
1025 1045
1026 /* Update accounting for new speed */ 1046 /* Update accounting for new speed */
1027 self->io.speed = speed; 1047 self->io.speed = speed;
1028 1048
1029 /* Turn off interrupts */ 1049 /* Turn off interrupts */
1030 outb(0, iobase+UART_IER); 1050 outb(0, iobase + UART_IER);
1051
1052 divisor = SMSC_IRCC2_MAX_SIR_SPEED / speed;
1031 1053
1032 divisor = SMSC_IRCC2_MAX_SIR_SPEED/speed;
1033
1034 fcr = UART_FCR_ENABLE_FIFO; 1054 fcr = UART_FCR_ENABLE_FIFO;
1035 1055
1036 /* 1056 /*
1037 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and 1057 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
1038 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget 1058 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
1039 * about this timeout since it will always be fast enough. 1059 * about this timeout since it will always be fast enough.
1040 */ 1060 */
1041 if (self->io.speed < 38400) 1061 fcr |= self->io.speed < 38400 ?
1042 fcr |= UART_FCR_TRIGGER_1; 1062 UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
1043 else 1063
1044 fcr |= UART_FCR_TRIGGER_14;
1045
1046 /* IrDA ports use 8N1 */ 1064 /* IrDA ports use 8N1 */
1047 lcr = UART_LCR_WLEN8; 1065 lcr = UART_LCR_WLEN8;
1048 1066
1049 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */ 1067 outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */
1050 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */ 1068 outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */
1051 outb(divisor >> 8, iobase+UART_DLM); 1069 outb(divisor >> 8, iobase + UART_DLM);
1052 outb(lcr, iobase+UART_LCR); /* Set 8N1 */ 1070 outb(lcr, iobase + UART_LCR); /* Set 8N1 */
1053 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */ 1071 outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
1054 1072
1055 /* Turn on interrups */ 1073 /* Turn on interrups */
1056 outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, iobase+UART_IER); 1074 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
1057 1075
1058 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed); 1076 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);
1059} 1077}
@@ -1070,15 +1088,12 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1070 struct smsc_ircc_cb *self; 1088 struct smsc_ircc_cb *self;
1071 unsigned long flags; 1089 unsigned long flags;
1072 s32 speed; 1090 s32 speed;
1073 int iobase;
1074 int mtt; 1091 int mtt;
1075 1092
1076 IRDA_ASSERT(dev != NULL, return 0;); 1093 IRDA_ASSERT(dev != NULL, return 0;);
1077 self = (struct smsc_ircc_cb *) dev->priv; 1094 self = netdev_priv(dev);
1078 IRDA_ASSERT(self != NULL, return 0;); 1095 IRDA_ASSERT(self != NULL, return 0;);
1079 1096
1080 iobase = self->io.fir_base;
1081
1082 netif_stop_queue(dev); 1097 netif_stop_queue(dev);
1083 1098
1084 /* Make sure test of self->io.speed & speed change are atomic */ 1099 /* Make sure test of self->io.speed & speed change are atomic */
@@ -1086,30 +1101,31 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1086 1101
1087 /* Check if we need to change the speed after this frame */ 1102 /* Check if we need to change the speed after this frame */
1088 speed = irda_get_next_speed(skb); 1103 speed = irda_get_next_speed(skb);
1089 if ((speed != self->io.speed) && (speed != -1)) { 1104 if (speed != self->io.speed && speed != -1) {
1090 /* Check for empty frame */ 1105 /* Check for empty frame */
1091 if (!skb->len) { 1106 if (!skb->len) {
1092 /* Note : you should make sure that speed changes 1107 /* Note : you should make sure that speed changes
1093 * are not going to corrupt any outgoing frame. 1108 * are not going to corrupt any outgoing frame.
1094 * Look at nsc-ircc for the gory details - Jean II */ 1109 * Look at nsc-ircc for the gory details - Jean II */
1095 smsc_ircc_change_speed(self, speed); 1110 smsc_ircc_change_speed(self, speed);
1096 spin_unlock_irqrestore(&self->lock, flags); 1111 spin_unlock_irqrestore(&self->lock, flags);
1097 dev_kfree_skb(skb); 1112 dev_kfree_skb(skb);
1098 return 0; 1113 return 0;
1099 } else 1114 }
1100 self->new_speed = speed; 1115
1116 self->new_speed = speed;
1101 } 1117 }
1102 1118
1103 memcpy(self->tx_buff.head, skb->data, skb->len); 1119 memcpy(self->tx_buff.head, skb->data, skb->len);
1104 1120
1105 self->tx_buff.len = skb->len; 1121 self->tx_buff.len = skb->len;
1106 self->tx_buff.data = self->tx_buff.head; 1122 self->tx_buff.data = self->tx_buff.head;
1107 1123
1108 mtt = irda_get_mtt(skb); 1124 mtt = irda_get_mtt(skb);
1109 if (mtt) { 1125 if (mtt) {
1110 int bofs; 1126 int bofs;
1111 1127
1112 /* 1128 /*
1113 * Compute how many BOFs (STA or PA's) we need to waste the 1129 * Compute how many BOFs (STA or PA's) we need to waste the
1114 * min turn time given the speed of the link. 1130 * min turn time given the speed of the link.
1115 */ 1131 */
@@ -1117,11 +1133,12 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1117 if (bofs > 4095) 1133 if (bofs > 4095)
1118 bofs = 4095; 1134 bofs = 4095;
1119 1135
1120 smsc_ircc_dma_xmit(self, iobase, bofs); 1136 smsc_ircc_dma_xmit(self, bofs);
1121 } else { 1137 } else {
1122 /* Transmit frame */ 1138 /* Transmit frame */
1123 smsc_ircc_dma_xmit(self, iobase, 0); 1139 smsc_ircc_dma_xmit(self, 0);
1124 } 1140 }
1141
1125 spin_unlock_irqrestore(&self->lock, flags); 1142 spin_unlock_irqrestore(&self->lock, flags);
1126 dev_kfree_skb(skb); 1143 dev_kfree_skb(skb);
1127 1144
@@ -1129,43 +1146,44 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1129} 1146}
1130 1147
1131/* 1148/*
1132 * Function smsc_ircc_dma_xmit (self, iobase) 1149 * Function smsc_ircc_dma_xmit (self, bofs)
1133 * 1150 *
1134 * Transmit data using DMA 1151 * Transmit data using DMA
1135 * 1152 *
1136 */ 1153 */
1137static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs) 1154static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
1138{ 1155{
1156 int iobase = self->io.fir_base;
1139 u8 ctrl; 1157 u8 ctrl;
1140 1158
1141 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1159 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1142#if 1 1160#if 1
1143 /* Disable Rx */ 1161 /* Disable Rx */
1144 register_bank(iobase, 0); 1162 register_bank(iobase, 0);
1145 outb(0x00, iobase+IRCC_LCR_B); 1163 outb(0x00, iobase + IRCC_LCR_B);
1146#endif 1164#endif
1147 register_bank(iobase, 1); 1165 register_bank(iobase, 1);
1148 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1166 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1149 iobase+IRCC_SCE_CFGB); 1167 iobase + IRCC_SCE_CFGB);
1150 1168
1151 self->io.direction = IO_XMIT; 1169 self->io.direction = IO_XMIT;
1152 1170
1153 /* Set BOF additional count for generating the min turn time */ 1171 /* Set BOF additional count for generating the min turn time */
1154 register_bank(iobase, 4); 1172 register_bank(iobase, 4);
1155 outb(bofs & 0xff, iobase+IRCC_BOF_COUNT_LO); 1173 outb(bofs & 0xff, iobase + IRCC_BOF_COUNT_LO);
1156 ctrl = inb(iobase+IRCC_CONTROL) & 0xf0; 1174 ctrl = inb(iobase + IRCC_CONTROL) & 0xf0;
1157 outb(ctrl | ((bofs >> 8) & 0x0f), iobase+IRCC_BOF_COUNT_HI); 1175 outb(ctrl | ((bofs >> 8) & 0x0f), iobase + IRCC_BOF_COUNT_HI);
1158 1176
1159 /* Set max Tx frame size */ 1177 /* Set max Tx frame size */
1160 outb(self->tx_buff.len >> 8, iobase+IRCC_TX_SIZE_HI); 1178 outb(self->tx_buff.len >> 8, iobase + IRCC_TX_SIZE_HI);
1161 outb(self->tx_buff.len & 0xff, iobase+IRCC_TX_SIZE_LO); 1179 outb(self->tx_buff.len & 0xff, iobase + IRCC_TX_SIZE_LO);
1162 1180
1163 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/ 1181 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
1164 1182
1165 /* Enable burst mode chip Tx DMA */ 1183 /* Enable burst mode chip Tx DMA */
1166 register_bank(iobase, 1); 1184 register_bank(iobase, 1);
1167 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE | 1185 outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1168 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB); 1186 IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
1169 1187
1170 /* Setup DMA controller (must be done after enabling chip DMA) */ 1188 /* Setup DMA controller (must be done after enabling chip DMA) */
1171 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, 1189 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
@@ -1174,50 +1192,52 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs)
1174 /* Enable interrupt */ 1192 /* Enable interrupt */
1175 1193
1176 register_bank(iobase, 0); 1194 register_bank(iobase, 0);
1177 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER); 1195 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1178 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER); 1196 outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
1179 1197
1180 /* Enable transmit */ 1198 /* Enable transmit */
1181 outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase+IRCC_LCR_B); 1199 outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);
1182} 1200}
1183 1201
1184/* 1202/*
1185 * Function smsc_ircc_dma_xmit_complete (self) 1203 * Function smsc_ircc_dma_xmit_complete (self)
1186 * 1204 *
1187 * The transfer of a frame in finished. This function will only be called 1205 * The transfer of a frame in finished. This function will only be called
1188 * by the interrupt handler 1206 * by the interrupt handler
1189 * 1207 *
1190 */ 1208 */
1191static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase) 1209static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
1192{ 1210{
1211 int iobase = self->io.fir_base;
1212
1193 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1213 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1194#if 0 1214#if 0
1195 /* Disable Tx */ 1215 /* Disable Tx */
1196 register_bank(iobase, 0); 1216 register_bank(iobase, 0);
1197 outb(0x00, iobase+IRCC_LCR_B); 1217 outb(0x00, iobase + IRCC_LCR_B);
1198#endif 1218#endif
1199 register_bank(self->io.fir_base, 1); 1219 register_bank(iobase, 1);
1200 outb(inb(self->io.fir_base+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1220 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1201 self->io.fir_base+IRCC_SCE_CFGB); 1221 iobase + IRCC_SCE_CFGB);
1202 1222
1203 /* Check for underrun! */ 1223 /* Check for underrun! */
1204 register_bank(iobase, 0); 1224 register_bank(iobase, 0);
1205 if (inb(iobase+IRCC_LSR) & IRCC_LSR_UNDERRUN) { 1225 if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
1206 self->stats.tx_errors++; 1226 self->stats.tx_errors++;
1207 self->stats.tx_fifo_errors++; 1227 self->stats.tx_fifo_errors++;
1208 1228
1209 /* Reset error condition */ 1229 /* Reset error condition */
1210 register_bank(iobase, 0); 1230 register_bank(iobase, 0);
1211 outb(IRCC_MASTER_ERROR_RESET, iobase+IRCC_MASTER); 1231 outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
1212 outb(0x00, iobase+IRCC_MASTER); 1232 outb(0x00, iobase + IRCC_MASTER);
1213 } else { 1233 } else {
1214 self->stats.tx_packets++; 1234 self->stats.tx_packets++;
1215 self->stats.tx_bytes += self->tx_buff.len; 1235 self->stats.tx_bytes += self->tx_buff.len;
1216 } 1236 }
1217 1237
1218 /* Check if it's time to change the speed */ 1238 /* Check if it's time to change the speed */
1219 if (self->new_speed) { 1239 if (self->new_speed) {
1220 smsc_ircc_change_speed(self, self->new_speed); 1240 smsc_ircc_change_speed(self, self->new_speed);
1221 self->new_speed = 0; 1241 self->new_speed = 0;
1222 } 1242 }
1223 1243
@@ -1231,31 +1251,32 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase)
1231 * if it starts to receive a frame. 1251 * if it starts to receive a frame.
1232 * 1252 *
1233 */ 1253 */
1234static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase) 1254static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self)
1235{ 1255{
1256 int iobase = self->io.fir_base;
1236#if 0 1257#if 0
1237 /* Turn off chip DMA */ 1258 /* Turn off chip DMA */
1238 register_bank(iobase, 1); 1259 register_bank(iobase, 1);
1239 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1260 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1240 iobase+IRCC_SCE_CFGB); 1261 iobase + IRCC_SCE_CFGB);
1241#endif 1262#endif
1242 1263
1243 /* Disable Tx */ 1264 /* Disable Tx */
1244 register_bank(iobase, 0); 1265 register_bank(iobase, 0);
1245 outb(0x00, iobase+IRCC_LCR_B); 1266 outb(0x00, iobase + IRCC_LCR_B);
1246 1267
1247 /* Turn off chip DMA */ 1268 /* Turn off chip DMA */
1248 register_bank(iobase, 1); 1269 register_bank(iobase, 1);
1249 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1270 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1250 iobase+IRCC_SCE_CFGB); 1271 iobase + IRCC_SCE_CFGB);
1251 1272
1252 self->io.direction = IO_RECV; 1273 self->io.direction = IO_RECV;
1253 self->rx_buff.data = self->rx_buff.head; 1274 self->rx_buff.data = self->rx_buff.head;
1254 1275
1255 /* Set max Rx frame size */ 1276 /* Set max Rx frame size */
1256 register_bank(iobase, 4); 1277 register_bank(iobase, 4);
1257 outb((2050 >> 8) & 0x0f, iobase+IRCC_RX_SIZE_HI); 1278 outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI);
1258 outb(2050 & 0xff, iobase+IRCC_RX_SIZE_LO); 1279 outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO);
1259 1280
1260 /* Setup DMA controller */ 1281 /* Setup DMA controller */
1261 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, 1282 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
@@ -1263,83 +1284,83 @@ static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase)
1263 1284
1264 /* Enable burst mode chip Rx DMA */ 1285 /* Enable burst mode chip Rx DMA */
1265 register_bank(iobase, 1); 1286 register_bank(iobase, 1);
1266 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE | 1287 outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1267 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB); 1288 IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
1268 1289
1269 /* Enable interrupt */ 1290 /* Enable interrupt */
1270 register_bank(iobase, 0); 1291 register_bank(iobase, 0);
1271 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER); 1292 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1272 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER); 1293 outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
1273
1274 1294
1275 /* Enable receiver */ 1295 /* Enable receiver */
1276 register_bank(iobase, 0); 1296 register_bank(iobase, 0);
1277 outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE, 1297 outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
1278 iobase+IRCC_LCR_B); 1298 iobase + IRCC_LCR_B);
1279 1299
1280 return 0; 1300 return 0;
1281} 1301}
1282 1302
1283/* 1303/*
1284 * Function smsc_ircc_dma_receive_complete(self, iobase) 1304 * Function smsc_ircc_dma_receive_complete(self)
1285 * 1305 *
1286 * Finished with receiving frames 1306 * Finished with receiving frames
1287 * 1307 *
1288 */ 1308 */
1289static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase) 1309static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1290{ 1310{
1291 struct sk_buff *skb; 1311 struct sk_buff *skb;
1292 int len, msgcnt, lsr; 1312 int len, msgcnt, lsr;
1293 1313 int iobase = self->io.fir_base;
1314
1294 register_bank(iobase, 0); 1315 register_bank(iobase, 0);
1295 1316
1296 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1317 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1297#if 0 1318#if 0
1298 /* Disable Rx */ 1319 /* Disable Rx */
1299 register_bank(iobase, 0); 1320 register_bank(iobase, 0);
1300 outb(0x00, iobase+IRCC_LCR_B); 1321 outb(0x00, iobase + IRCC_LCR_B);
1301#endif 1322#endif
1302 register_bank(iobase, 0); 1323 register_bank(iobase, 0);
1303 outb(inb(iobase+IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase+IRCC_LSAR); 1324 outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR);
1304 lsr= inb(iobase+IRCC_LSR); 1325 lsr= inb(iobase + IRCC_LSR);
1305 msgcnt = inb(iobase+IRCC_LCR_B) & 0x08; 1326 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
1306 1327
1307 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__, 1328 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,
1308 get_dma_residue(self->io.dma)); 1329 get_dma_residue(self->io.dma));
1309 1330
1310 len = self->rx_buff.truesize - get_dma_residue(self->io.dma); 1331 len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
1311 1332
1312 /* Look for errors 1333 /* Look for errors */
1313 */ 1334 if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1314
1315 if(lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1316 self->stats.rx_errors++; 1335 self->stats.rx_errors++;
1317 if(lsr & IRCC_LSR_FRAME_ERROR) self->stats.rx_frame_errors++; 1336 if (lsr & IRCC_LSR_FRAME_ERROR)
1318 if(lsr & IRCC_LSR_CRC_ERROR) self->stats.rx_crc_errors++; 1337 self->stats.rx_frame_errors++;
1319 if(lsr & IRCC_LSR_SIZE_ERROR) self->stats.rx_length_errors++; 1338 if (lsr & IRCC_LSR_CRC_ERROR)
1320 if(lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) self->stats.rx_length_errors++; 1339 self->stats.rx_crc_errors++;
1340 if (lsr & IRCC_LSR_SIZE_ERROR)
1341 self->stats.rx_length_errors++;
1342 if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
1343 self->stats.rx_length_errors++;
1321 return; 1344 return;
1322 } 1345 }
1346
1323 /* Remove CRC */ 1347 /* Remove CRC */
1324 if (self->io.speed < 4000000) 1348 len -= self->io.speed < 4000000 ? 2 : 4;
1325 len -= 2;
1326 else
1327 len -= 4;
1328 1349
1329 if ((len < 2) || (len > 2050)) { 1350 if (len < 2 || len > 2050) {
1330 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len); 1351 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
1331 return; 1352 return;
1332 } 1353 }
1333 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len); 1354 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);
1334 1355
1335 skb = dev_alloc_skb(len+1); 1356 skb = dev_alloc_skb(len + 1);
1336 if (!skb) { 1357 if (!skb) {
1337 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", 1358 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
1338 __FUNCTION__); 1359 __FUNCTION__);
1339 return; 1360 return;
1340 } 1361 }
1341 /* Make sure IP header gets aligned */ 1362 /* Make sure IP header gets aligned */
1342 skb_reserve(skb, 1); 1363 skb_reserve(skb, 1);
1343 1364
1344 memcpy(skb_put(skb, len), self->rx_buff.data, len); 1365 memcpy(skb_put(skb, len), self->rx_buff.data, len);
1345 self->stats.rx_packets++; 1366 self->stats.rx_packets++;
@@ -1357,7 +1378,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase
1357 * Receive one frame from the infrared port 1378 * Receive one frame from the infrared port
1358 * 1379 *
1359 */ 1380 */
1360static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self) 1381static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1361{ 1382{
1362 int boguscount = 0; 1383 int boguscount = 0;
1363 int iobase; 1384 int iobase;
@@ -1366,20 +1387,20 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1366 1387
1367 iobase = self->io.sir_base; 1388 iobase = self->io.sir_base;
1368 1389
1369 /* 1390 /*
1370 * Receive all characters in Rx FIFO, unwrap and unstuff them. 1391 * Receive all characters in Rx FIFO, unwrap and unstuff them.
1371 * async_unwrap_char will deliver all found frames 1392 * async_unwrap_char will deliver all found frames
1372 */ 1393 */
1373 do { 1394 do {
1374 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1395 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
1375 inb(iobase+UART_RX)); 1396 inb(iobase + UART_RX));
1376 1397
1377 /* Make sure we don't stay here to long */ 1398 /* Make sure we don't stay here to long */
1378 if (boguscount++ > 32) { 1399 if (boguscount++ > 32) {
1379 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__); 1400 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);
1380 break; 1401 break;
1381 } 1402 }
1382 } while (inb(iobase+UART_LSR) & UART_LSR_DR); 1403 } while (inb(iobase + UART_LSR) & UART_LSR_DR);
1383} 1404}
1384 1405
1385 1406
@@ -1397,18 +1418,19 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1397 irqreturn_t ret = IRQ_NONE; 1418 irqreturn_t ret = IRQ_NONE;
1398 1419
1399 if (dev == NULL) { 1420 if (dev == NULL) {
1400 printk(KERN_WARNING "%s: irq %d for unknown device.\n", 1421 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1401 driver_name, irq); 1422 driver_name, irq);
1402 goto irq_ret; 1423 goto irq_ret;
1403 } 1424 }
1404 self = (struct smsc_ircc_cb *) dev->priv; 1425
1426 self = netdev_priv(dev);
1405 IRDA_ASSERT(self != NULL, return IRQ_NONE;); 1427 IRDA_ASSERT(self != NULL, return IRQ_NONE;);
1406 1428
1407 /* Serialise the interrupt handler in various CPUs, stop Tx path */ 1429 /* Serialise the interrupt handler in various CPUs, stop Tx path */
1408 spin_lock(&self->lock); 1430 spin_lock(&self->lock);
1409 1431
1410 /* Check if we should use the SIR interrupt handler */ 1432 /* Check if we should use the SIR interrupt handler */
1411 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) { 1433 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
1412 ret = smsc_ircc_interrupt_sir(dev); 1434 ret = smsc_ircc_interrupt_sir(dev);
1413 goto irq_ret_unlock; 1435 goto irq_ret_unlock;
1414 } 1436 }
@@ -1416,25 +1438,25 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1416 iobase = self->io.fir_base; 1438 iobase = self->io.fir_base;
1417 1439
1418 register_bank(iobase, 0); 1440 register_bank(iobase, 0);
1419 iir = inb(iobase+IRCC_IIR); 1441 iir = inb(iobase + IRCC_IIR);
1420 if (iir == 0) 1442 if (iir == 0)
1421 goto irq_ret_unlock; 1443 goto irq_ret_unlock;
1422 ret = IRQ_HANDLED; 1444 ret = IRQ_HANDLED;
1423 1445
1424 /* Disable interrupts */ 1446 /* Disable interrupts */
1425 outb(0, iobase+IRCC_IER); 1447 outb(0, iobase + IRCC_IER);
1426 lcra = inb(iobase+IRCC_LCR_A); 1448 lcra = inb(iobase + IRCC_LCR_A);
1427 lsr = inb(iobase+IRCC_LSR); 1449 lsr = inb(iobase + IRCC_LSR);
1428 1450
1429 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir); 1451 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir);
1430 1452
1431 if (iir & IRCC_IIR_EOM) { 1453 if (iir & IRCC_IIR_EOM) {
1432 if (self->io.direction == IO_RECV) 1454 if (self->io.direction == IO_RECV)
1433 smsc_ircc_dma_receive_complete(self, iobase); 1455 smsc_ircc_dma_receive_complete(self);
1434 else 1456 else
1435 smsc_ircc_dma_xmit_complete(self, iobase); 1457 smsc_ircc_dma_xmit_complete(self);
1436 1458
1437 smsc_ircc_dma_receive(self, iobase); 1459 smsc_ircc_dma_receive(self);
1438 } 1460 }
1439 1461
1440 if (iir & IRCC_IIR_ACTIVE_FRAME) { 1462 if (iir & IRCC_IIR_ACTIVE_FRAME) {
@@ -1444,7 +1466,7 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1444 /* Enable interrupts again */ 1466 /* Enable interrupts again */
1445 1467
1446 register_bank(iobase, 0); 1468 register_bank(iobase, 0);
1447 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, iobase+IRCC_IER); 1469 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1448 1470
1449 irq_ret_unlock: 1471 irq_ret_unlock:
1450 spin_unlock(&self->lock); 1472 spin_unlock(&self->lock);
@@ -1459,7 +1481,7 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1459 */ 1481 */
1460static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) 1482static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1461{ 1483{
1462 struct smsc_ircc_cb *self = dev->priv; 1484 struct smsc_ircc_cb *self = netdev_priv(dev);
1463 int boguscount = 0; 1485 int boguscount = 0;
1464 int iobase; 1486 int iobase;
1465 int iir, lsr; 1487 int iir, lsr;
@@ -1469,14 +1491,14 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1469 1491
1470 iobase = self->io.sir_base; 1492 iobase = self->io.sir_base;
1471 1493
1472 iir = inb(iobase+UART_IIR) & UART_IIR_ID; 1494 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
1473 if (iir == 0) 1495 if (iir == 0)
1474 return IRQ_NONE; 1496 return IRQ_NONE;
1475 while (iir) { 1497 while (iir) {
1476 /* Clear interrupt */ 1498 /* Clear interrupt */
1477 lsr = inb(iobase+UART_LSR); 1499 lsr = inb(iobase + UART_LSR);
1478 1500
1479 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 1501 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
1480 __FUNCTION__, iir, lsr, iobase); 1502 __FUNCTION__, iir, lsr, iobase);
1481 1503
1482 switch (iir) { 1504 switch (iir) {
@@ -1496,13 +1518,13 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1496 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", 1518 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
1497 __FUNCTION__, iir); 1519 __FUNCTION__, iir);
1498 break; 1520 break;
1499 } 1521 }
1500 1522
1501 /* Make sure we don't stay here to long */ 1523 /* Make sure we don't stay here to long */
1502 if (boguscount++ > 100) 1524 if (boguscount++ > 100)
1503 break; 1525 break;
1504 1526
1505 iir = inb(iobase + UART_IIR) & UART_IIR_ID; 1527 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
1506 } 1528 }
1507 /*spin_unlock(&self->lock);*/ 1529 /*spin_unlock(&self->lock);*/
1508 return IRQ_HANDLED; 1530 return IRQ_HANDLED;
@@ -1529,7 +1551,7 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1529 get_dma_residue(self->io.dma)); 1551 get_dma_residue(self->io.dma));
1530 1552
1531 status = (self->rx_buff.state != OUTSIDE_FRAME); 1553 status = (self->rx_buff.state != OUTSIDE_FRAME);
1532 1554
1533 return status; 1555 return status;
1534} 1556}
1535#endif /* unused */ 1557#endif /* unused */
@@ -1544,19 +1566,16 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1544static int smsc_ircc_net_open(struct net_device *dev) 1566static int smsc_ircc_net_open(struct net_device *dev)
1545{ 1567{
1546 struct smsc_ircc_cb *self; 1568 struct smsc_ircc_cb *self;
1547 int iobase;
1548 char hwname[16]; 1569 char hwname[16];
1549 unsigned long flags; 1570 unsigned long flags;
1550 1571
1551 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1572 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1552 1573
1553 IRDA_ASSERT(dev != NULL, return -1;); 1574 IRDA_ASSERT(dev != NULL, return -1;);
1554 self = (struct smsc_ircc_cb *) dev->priv; 1575 self = netdev_priv(dev);
1555 IRDA_ASSERT(self != NULL, return 0;); 1576 IRDA_ASSERT(self != NULL, return 0;);
1556
1557 iobase = self->io.fir_base;
1558 1577
1559 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1578 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1560 (void *) dev)) { 1579 (void *) dev)) {
1561 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1580 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
1562 __FUNCTION__, self->io.irq); 1581 __FUNCTION__, self->io.irq);
@@ -1568,14 +1587,14 @@ static int smsc_ircc_net_open(struct net_device *dev)
1568 self->io.speed = 0; 1587 self->io.speed = 0;
1569 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 1588 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1570 spin_unlock_irqrestore(&self->lock, flags); 1589 spin_unlock_irqrestore(&self->lock, flags);
1571 1590
1572 /* Give self a hardware name */ 1591 /* Give self a hardware name */
1573 /* It would be cool to offer the chip revision here - Jean II */ 1592 /* It would be cool to offer the chip revision here - Jean II */
1574 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base); 1593 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
1575 1594
1576 /* 1595 /*
1577 * Open new IrLAP layer instance, now that everything should be 1596 * Open new IrLAP layer instance, now that everything should be
1578 * initialized properly 1597 * initialized properly
1579 */ 1598 */
1580 self->irlap = irlap_open(dev, &self->qos, hwname); 1599 self->irlap = irlap_open(dev, &self->qos, hwname);
1581 1600
@@ -1590,7 +1609,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1590 __FUNCTION__, self->io.dma); 1609 __FUNCTION__, self->io.dma);
1591 return -EAGAIN; 1610 return -EAGAIN;
1592 } 1611 }
1593 1612
1594 netif_start_queue(dev); 1613 netif_start_queue(dev);
1595 1614
1596 return 0; 1615 return 0;
@@ -1605,73 +1624,53 @@ static int smsc_ircc_net_open(struct net_device *dev)
1605static int smsc_ircc_net_close(struct net_device *dev) 1624static int smsc_ircc_net_close(struct net_device *dev)
1606{ 1625{
1607 struct smsc_ircc_cb *self; 1626 struct smsc_ircc_cb *self;
1608 int iobase;
1609 1627
1610 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1628 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1611 1629
1612 IRDA_ASSERT(dev != NULL, return -1;); 1630 IRDA_ASSERT(dev != NULL, return -1;);
1613 self = (struct smsc_ircc_cb *) dev->priv; 1631 self = netdev_priv(dev);
1614 IRDA_ASSERT(self != NULL, return 0;); 1632 IRDA_ASSERT(self != NULL, return 0;);
1615
1616 iobase = self->io.fir_base;
1617 1633
1618 /* Stop device */ 1634 /* Stop device */
1619 netif_stop_queue(dev); 1635 netif_stop_queue(dev);
1620 1636
1621 /* Stop and remove instance of IrLAP */ 1637 /* Stop and remove instance of IrLAP */
1622 if (self->irlap) 1638 if (self->irlap)
1623 irlap_close(self->irlap); 1639 irlap_close(self->irlap);
1624 self->irlap = NULL; 1640 self->irlap = NULL;
1625 1641
1626 free_irq(self->io.irq, dev); 1642 free_irq(self->io.irq, dev);
1627
1628 disable_dma(self->io.dma); 1643 disable_dma(self->io.dma);
1629
1630 free_dma(self->io.dma); 1644 free_dma(self->io.dma);
1631 1645
1632 return 0; 1646 return 0;
1633} 1647}
1634 1648
1635 1649static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level)
1636static void smsc_ircc_suspend(struct smsc_ircc_cb *self)
1637{ 1650{
1638 IRDA_MESSAGE("%s, Suspending\n", driver_name); 1651 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1639 1652
1640 if (self->io.suspended) 1653 IRDA_MESSAGE("%s, Suspending\n", driver_name);
1641 return;
1642 1654
1643 smsc_ircc_net_close(self->netdev); 1655 if (level == SUSPEND_DISABLE && !self->io.suspended) {
1656 smsc_ircc_net_close(self->netdev);
1657 self->io.suspended = 1;
1658 }
1644 1659
1645 self->io.suspended = 1; 1660 return 0;
1646} 1661}
1647 1662
1648static void smsc_ircc_wakeup(struct smsc_ircc_cb *self) 1663static int smsc_ircc_resume(struct device *dev, u32 level)
1649{ 1664{
1650 if (!self->io.suspended) 1665 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1651 return;
1652 1666
1653 /* The code was doing a "cli()" here, but this can't be right. 1667 if (level == RESUME_ENABLE && self->io.suspended) {
1654 * If you need protection, do it in net_open with a spinlock
1655 * or give a good reason. - Jean II */
1656 1668
1657 smsc_ircc_net_open(self->netdev); 1669 smsc_ircc_net_open(self->netdev);
1658 1670 self->io.suspended = 0;
1659 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1660}
1661 1671
1662static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data) 1672 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1663{ 1673 }
1664 struct smsc_ircc_cb *self = (struct smsc_ircc_cb*) dev->data;
1665 if (self) {
1666 switch (rqst) {
1667 case PM_SUSPEND:
1668 smsc_ircc_suspend(self);
1669 break;
1670 case PM_RESUME:
1671 smsc_ircc_wakeup(self);
1672 break;
1673 }
1674 }
1675 return 0; 1674 return 0;
1676} 1675}
1677 1676
@@ -1690,10 +1689,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1690 1689
1691 IRDA_ASSERT(self != NULL, return -1;); 1690 IRDA_ASSERT(self != NULL, return -1;);
1692 1691
1693 iobase = self->io.fir_base; 1692 platform_device_unregister(self->pldev);
1694
1695 if (self->pmdev)
1696 pm_unregister(self->pmdev);
1697 1693
1698 /* Remove netdevice */ 1694 /* Remove netdevice */
1699 unregister_netdev(self->netdev); 1695 unregister_netdev(self->netdev);
@@ -1702,15 +1698,16 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1702 spin_lock_irqsave(&self->lock, flags); 1698 spin_lock_irqsave(&self->lock, flags);
1703 1699
1704 /* Stop interrupts */ 1700 /* Stop interrupts */
1701 iobase = self->io.fir_base;
1705 register_bank(iobase, 0); 1702 register_bank(iobase, 0);
1706 outb(0, iobase+IRCC_IER); 1703 outb(0, iobase + IRCC_IER);
1707 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER); 1704 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1708 outb(0x00, iobase+IRCC_MASTER); 1705 outb(0x00, iobase + IRCC_MASTER);
1709#if 0 1706#if 0
1710 /* Reset to SIR mode */ 1707 /* Reset to SIR mode */
1711 register_bank(iobase, 1); 1708 register_bank(iobase, 1);
1712 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase+IRCC_SCE_CFGA); 1709 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA);
1713 outb(IRCC_CFGB_IR, iobase+IRCC_SCE_CFGB); 1710 outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);
1714#endif 1711#endif
1715 spin_unlock_irqrestore(&self->lock, flags); 1712 spin_unlock_irqrestore(&self->lock, flags);
1716 1713
@@ -1720,7 +1717,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1720 1717
1721 release_region(self->io.fir_base, self->io.fir_ext); 1718 release_region(self->io.fir_base, self->io.fir_ext);
1722 1719
1723 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1720 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
1724 self->io.sir_base); 1721 self->io.sir_base);
1725 1722
1726 release_region(self->io.sir_base, self->io.sir_ext); 1723 release_region(self->io.sir_base, self->io.sir_ext);
@@ -1728,7 +1725,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1728 if (self->tx_buff.head) 1725 if (self->tx_buff.head)
1729 dma_free_coherent(NULL, self->tx_buff.truesize, 1726 dma_free_coherent(NULL, self->tx_buff.truesize,
1730 self->tx_buff.head, self->tx_buff_dma); 1727 self->tx_buff.head, self->tx_buff_dma);
1731 1728
1732 if (self->rx_buff.head) 1729 if (self->rx_buff.head)
1733 dma_free_coherent(NULL, self->rx_buff.truesize, 1730 dma_free_coherent(NULL, self->rx_buff.truesize,
1734 self->rx_buff.head, self->rx_buff_dma); 1731 self->rx_buff.head, self->rx_buff_dma);
@@ -1744,10 +1741,12 @@ static void __exit smsc_ircc_cleanup(void)
1744 1741
1745 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1742 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1746 1743
1747 for (i=0; i < 2; i++) { 1744 for (i = 0; i < 2; i++) {
1748 if (dev_self[i]) 1745 if (dev_self[i])
1749 smsc_ircc_close(dev_self[i]); 1746 smsc_ircc_close(dev_self[i]);
1750 } 1747 }
1748
1749 driver_unregister(&smsc_ircc_driver);
1751} 1750}
1752 1751
1753/* 1752/*
@@ -1763,34 +1762,34 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1763 1762
1764 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1763 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1765 1764
1766 IRDA_ASSERT(self != NULL, return;); 1765 IRDA_ASSERT(self != NULL, return;);
1767 dev= self->netdev; 1766 dev = self->netdev;
1768 IRDA_ASSERT(dev != NULL, return;); 1767 IRDA_ASSERT(dev != NULL, return;);
1769 dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir; 1768 dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir;
1770 1769
1771 fir_base = self->io.fir_base; 1770 fir_base = self->io.fir_base;
1772 sir_base = self->io.sir_base; 1771 sir_base = self->io.sir_base;
1773 1772
1774 /* Reset everything */ 1773 /* Reset everything */
1775 outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER); 1774 outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);
1776 1775
1777 #if SMSC_IRCC2_C_SIR_STOP 1776 #if SMSC_IRCC2_C_SIR_STOP
1778 /*smsc_ircc_sir_stop(self);*/ 1777 /*smsc_ircc_sir_stop(self);*/
1779 #endif 1778 #endif
1780 1779
1781 register_bank(fir_base, 1); 1780 register_bank(fir_base, 1);
1782 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base+IRCC_SCE_CFGA); 1781 outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base + IRCC_SCE_CFGA);
1783 1782
1784 /* Initialize UART */ 1783 /* Initialize UART */
1785 outb(UART_LCR_WLEN8, sir_base+UART_LCR); /* Reset DLAB */ 1784 outb(UART_LCR_WLEN8, sir_base + UART_LCR); /* Reset DLAB */
1786 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base+UART_MCR); 1785 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base + UART_MCR);
1787 1786
1788 /* Turn on interrups */ 1787 /* Turn on interrups */
1789 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base+UART_IER); 1788 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
1790 1789
1791 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__); 1790 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__);
1792 1791
1793 outb(0x00, fir_base+IRCC_MASTER); 1792 outb(0x00, fir_base + IRCC_MASTER);
1794} 1793}
1795 1794
1796#if SMSC_IRCC2_C_SIR_STOP 1795#if SMSC_IRCC2_C_SIR_STOP
@@ -1802,10 +1801,10 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
1802 iobase = self->io.sir_base; 1801 iobase = self->io.sir_base;
1803 1802
1804 /* Reset UART */ 1803 /* Reset UART */
1805 outb(0, iobase+UART_MCR); 1804 outb(0, iobase + UART_MCR);
1806 1805
1807 /* Turn off interrupts */ 1806 /* Turn off interrupts */
1808 outb(0, iobase+UART_IER); 1807 outb(0, iobase + UART_IER);
1809} 1808}
1810#endif 1809#endif
1811 1810
@@ -1831,16 +1830,16 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1831 /* Finished with frame? */ 1830 /* Finished with frame? */
1832 if (self->tx_buff.len > 0) { 1831 if (self->tx_buff.len > 0) {
1833 /* Write data left in transmit buffer */ 1832 /* Write data left in transmit buffer */
1834 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size, 1833 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
1835 self->tx_buff.data, self->tx_buff.len); 1834 self->tx_buff.data, self->tx_buff.len);
1836 self->tx_buff.data += actual; 1835 self->tx_buff.data += actual;
1837 self->tx_buff.len -= actual; 1836 self->tx_buff.len -= actual;
1838 } else { 1837 } else {
1839 1838
1840 /*if (self->tx_buff.len ==0) {*/ 1839 /*if (self->tx_buff.len ==0) {*/
1841 1840
1842 /* 1841 /*
1843 * Now serial buffer is almost free & we can start 1842 * Now serial buffer is almost free & we can start
1844 * transmission of another packet. But first we must check 1843 * transmission of another packet. But first we must check
1845 * if we need to change the speed of the hardware 1844 * if we need to change the speed of the hardware
1846 */ 1845 */
@@ -1856,21 +1855,19 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1856 } 1855 }
1857 self->stats.tx_packets++; 1856 self->stats.tx_packets++;
1858 1857
1859 if(self->io.speed <= 115200) { 1858 if (self->io.speed <= 115200) {
1860 /* 1859 /*
1861 * Reset Rx FIFO to make sure that all reflected transmit data 1860 * Reset Rx FIFO to make sure that all reflected transmit data
1862 * is discarded. This is needed for half duplex operation 1861 * is discarded. This is needed for half duplex operation
1863 */ 1862 */
1864 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR; 1863 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
1865 if (self->io.speed < 38400) 1864 fcr |= self->io.speed < 38400 ?
1866 fcr |= UART_FCR_TRIGGER_1; 1865 UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
1867 else
1868 fcr |= UART_FCR_TRIGGER_14;
1869 1866
1870 outb(fcr, iobase+UART_FCR); 1867 outb(fcr, iobase + UART_FCR);
1871 1868
1872 /* Turn on receive interrupts */ 1869 /* Turn on receive interrupts */
1873 outb(UART_IER_RDI, iobase+UART_IER); 1870 outb(UART_IER_RDI, iobase + UART_IER);
1874 } 1871 }
1875 } 1872 }
1876} 1873}
@@ -1884,17 +1881,17 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1884static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) 1881static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1885{ 1882{
1886 int actual = 0; 1883 int actual = 0;
1887 1884
1888 /* Tx FIFO should be empty! */ 1885 /* Tx FIFO should be empty! */
1889 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { 1886 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
1890 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__); 1887 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__);
1891 return 0; 1888 return 0;
1892 } 1889 }
1893 1890
1894 /* Fill FIFO with current frame */ 1891 /* Fill FIFO with current frame */
1895 while ((fifo_size-- > 0) && (actual < len)) { 1892 while (fifo_size-- > 0 && actual < len) {
1896 /* Transmit next byte */ 1893 /* Transmit next byte */
1897 outb(buf[actual], iobase+UART_TX); 1894 outb(buf[actual], iobase + UART_TX);
1898 actual++; 1895 actual++;
1899 } 1896 }
1900 return actual; 1897 return actual;
@@ -1921,20 +1918,21 @@ static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
1921static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self) 1918static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
1922{ 1919{
1923 unsigned int i; 1920 unsigned int i;
1924 1921
1925 IRDA_ASSERT(self != NULL, return;); 1922 IRDA_ASSERT(self != NULL, return;);
1926 1923
1927 for(i=0; smsc_transceivers[i].name!=NULL; i++) 1924 for (i = 0; smsc_transceivers[i].name != NULL; i++)
1928 if((*smsc_transceivers[i].probe)(self->io.fir_base)) { 1925 if (smsc_transceivers[i].probe(self->io.fir_base)) {
1929 IRDA_MESSAGE(" %s transceiver found\n", 1926 IRDA_MESSAGE(" %s transceiver found\n",
1930 smsc_transceivers[i].name); 1927 smsc_transceivers[i].name);
1931 self->transceiver= i+1; 1928 self->transceiver= i + 1;
1932 return; 1929 return;
1933 } 1930 }
1931
1934 IRDA_MESSAGE("No transceiver found. Defaulting to %s\n", 1932 IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
1935 smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); 1933 smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
1936 1934
1937 self->transceiver= SMSC_IRCC2_C_DEFAULT_TRANSCEIVER; 1935 self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
1938} 1936}
1939 1937
1940 1938
@@ -1947,9 +1945,10 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
1947static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed) 1945static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
1948{ 1946{
1949 unsigned int trx; 1947 unsigned int trx;
1950 1948
1951 trx = self->transceiver; 1949 trx = self->transceiver;
1952 if(trx>0) (*smsc_transceivers[trx-1].set_for_speed)(self->io.fir_base, speed); 1950 if (trx > 0)
1951 smsc_transceivers[trx - 1].set_for_speed(self->io.fir_base, speed);
1953} 1952}
1954 1953
1955/* 1954/*
@@ -1977,16 +1976,14 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
1977 1976
1978static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) 1977static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
1979{ 1978{
1980 int iobase; 1979 int iobase = self->io.sir_base;
1981 int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US; 1980 int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
1982 1981
1983 iobase = self->io.sir_base;
1984
1985 /* Calibrated busy loop */ 1982 /* Calibrated busy loop */
1986 while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT)) 1983 while (count-- > 0 && !(inb(iobase + UART_LSR) & UART_LSR_TEMT))
1987 udelay(1); 1984 udelay(1);
1988 1985
1989 if(count == 0) 1986 if (count == 0)
1990 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__); 1987 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
1991} 1988}
1992 1989
@@ -1998,40 +1995,42 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
1998 1995
1999static int __init smsc_ircc_look_for_chips(void) 1996static int __init smsc_ircc_look_for_chips(void)
2000{ 1997{
2001 smsc_chip_address_t *address; 1998 struct smsc_chip_address *address;
2002 char *type; 1999 char *type;
2003 unsigned int cfg_base, found; 2000 unsigned int cfg_base, found;
2004 2001
2005 found = 0; 2002 found = 0;
2006 address = possible_addresses; 2003 address = possible_addresses;
2007 2004
2008 while(address->cfg_base){ 2005 while (address->cfg_base) {
2009 cfg_base = address->cfg_base; 2006 cfg_base = address->cfg_base;
2010 2007
2011 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/ 2008 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/
2012 2009
2013 if( address->type & SMSCSIO_TYPE_FDC){ 2010 if (address->type & SMSCSIO_TYPE_FDC) {
2014 type = "FDC"; 2011 type = "FDC";
2015 if((address->type) & SMSCSIO_TYPE_FLAT) { 2012 if (address->type & SMSCSIO_TYPE_FLAT)
2016 if(!smsc_superio_flat(fdc_chips_flat,cfg_base, type)) found++; 2013 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, type))
2017 } 2014 found++;
2018 if((address->type) & SMSCSIO_TYPE_PAGED) { 2015
2019 if(!smsc_superio_paged(fdc_chips_paged,cfg_base, type)) found++; 2016 if (address->type & SMSCSIO_TYPE_PAGED)
2020 } 2017 if (!smsc_superio_paged(fdc_chips_paged, cfg_base, type))
2018 found++;
2021 } 2019 }
2022 if( address->type & SMSCSIO_TYPE_LPC){ 2020 if (address->type & SMSCSIO_TYPE_LPC) {
2023 type = "LPC"; 2021 type = "LPC";
2024 if((address->type) & SMSCSIO_TYPE_FLAT) { 2022 if (address->type & SMSCSIO_TYPE_FLAT)
2025 if(!smsc_superio_flat(lpc_chips_flat,cfg_base,type)) found++; 2023 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, type))
2026 } 2024 found++;
2027 if((address->type) & SMSCSIO_TYPE_PAGED) { 2025
2028 if(!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) found++; 2026 if (address->type & SMSCSIO_TYPE_PAGED)
2029 } 2027 if (!smsc_superio_paged(lpc_chips_paged, cfg_base, type))
2028 found++;
2030 } 2029 }
2031 address++; 2030 address++;
2032 } 2031 }
2033 return found; 2032 return found;
2034} 2033}
2035 2034
2036/* 2035/*
2037 * Function smsc_superio_flat (chip, base, type) 2036 * Function smsc_superio_flat (chip, base, type)
@@ -2039,7 +2038,7 @@ static int __init smsc_ircc_look_for_chips(void)
2039 * Try to get configuration of a smc SuperIO chip with flat register model 2038 * Try to get configuration of a smc SuperIO chip with flat register model
2040 * 2039 *
2041 */ 2040 */
2042static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfgbase, char *type) 2041static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfgbase, char *type)
2043{ 2042{
2044 unsigned short firbase, sirbase; 2043 unsigned short firbase, sirbase;
2045 u8 mode, dma, irq; 2044 u8 mode, dma, irq;
@@ -2047,39 +2046,37 @@ static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg
2047 2046
2048 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2047 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2049 2048
2050 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type)==NULL) 2049 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
2051 return ret; 2050 return ret;
2052 2051
2053 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase); 2052 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
2054 mode = inb(cfgbase+1); 2053 mode = inb(cfgbase + 1);
2055 2054
2056 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/ 2055 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/
2057 2056
2058 if(!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) 2057 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
2059 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__); 2058 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__);
2060 2059
2061 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); 2060 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
2062 sirbase = inb(cfgbase+1) << 2; 2061 sirbase = inb(cfgbase + 1) << 2;
2063 2062
2064 /* FIR iobase */ 2063 /* FIR iobase */
2065 outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase); 2064 outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
2066 firbase = inb(cfgbase+1) << 3; 2065 firbase = inb(cfgbase + 1) << 3;
2067 2066
2068 /* DMA */ 2067 /* DMA */
2069 outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase); 2068 outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
2070 dma = inb(cfgbase+1) & SMSCSIOFLAT_FIRDMASELECT_MASK; 2069 dma = inb(cfgbase + 1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
2071 2070
2072 /* IRQ */ 2071 /* IRQ */
2073 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); 2072 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
2074 irq = inb(cfgbase+1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; 2073 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
2075 2074
2076 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode); 2075 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode);
2077 2076
2078 if (firbase) { 2077 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
2079 if (smsc_ircc_open(firbase, sirbase, dma, irq) == 0) 2078 ret = 0;
2080 ret=0; 2079
2081 }
2082
2083 /* Exit configuration */ 2080 /* Exit configuration */
2084 outb(SMSCSIO_CFGEXITKEY, cfgbase); 2081 outb(SMSCSIO_CFGEXITKEY, cfgbase);
2085 2082
@@ -2092,26 +2089,26 @@ static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg
2092 * Try to get configuration of a smc SuperIO chip with paged register model 2089 * Try to get configuration of a smc SuperIO chip with paged register model
2093 * 2090 *
2094 */ 2091 */
2095static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type) 2092static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type)
2096{ 2093{
2097 unsigned short fir_io, sir_io; 2094 unsigned short fir_io, sir_io;
2098 int ret = -ENODEV; 2095 int ret = -ENODEV;
2099 2096
2100 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2097 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2101 2098
2102 if (smsc_ircc_probe(cfg_base,0x20,chips,type)==NULL) 2099 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
2103 return ret; 2100 return ret;
2104 2101
2105 /* Select logical device (UART2) */ 2102 /* Select logical device (UART2) */
2106 outb(0x07, cfg_base); 2103 outb(0x07, cfg_base);
2107 outb(0x05, cfg_base + 1); 2104 outb(0x05, cfg_base + 1);
2108 2105
2109 /* SIR iobase */ 2106 /* SIR iobase */
2110 outb(0x60, cfg_base); 2107 outb(0x60, cfg_base);
2111 sir_io = inb(cfg_base + 1) << 8; 2108 sir_io = inb(cfg_base + 1) << 8;
2112 outb(0x61, cfg_base); 2109 outb(0x61, cfg_base);
2113 sir_io |= inb(cfg_base + 1); 2110 sir_io |= inb(cfg_base + 1);
2114 2111
2115 /* Read FIR base */ 2112 /* Read FIR base */
2116 outb(0x62, cfg_base); 2113 outb(0x62, cfg_base);
2117 fir_io = inb(cfg_base + 1) << 8; 2114 fir_io = inb(cfg_base + 1) << 8;
@@ -2119,11 +2116,9 @@ static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cf
2119 fir_io |= inb(cfg_base + 1); 2116 fir_io |= inb(cfg_base + 1);
2120 outb(0x2b, cfg_base); /* ??? */ 2117 outb(0x2b, cfg_base); /* ??? */
2121 2118
2122 if (fir_io) { 2119 if (fir_io && smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
2123 if (smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0) 2120 ret = 0;
2124 ret=0; 2121
2125 }
2126
2127 /* Exit configuration */ 2122 /* Exit configuration */
2128 outb(SMSCSIO_CFGEXITKEY, cfg_base); 2123 outb(SMSCSIO_CFGEXITKEY, cfg_base);
2129 2124
@@ -2131,21 +2126,17 @@ static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cf
2131} 2126}
2132 2127
2133 2128
2134static int __init smsc_access(unsigned short cfg_base,unsigned char reg) 2129static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2135{ 2130{
2136 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2131 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2137 2132
2138 outb(reg, cfg_base); 2133 outb(reg, cfg_base);
2139 2134 return inb(cfg_base) != reg ? -1 : 0;
2140 if (inb(cfg_base)!=reg)
2141 return -1;
2142
2143 return 0;
2144} 2135}
2145 2136
2146static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type) 2137static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
2147{ 2138{
2148 u8 devid,xdevid,rev; 2139 u8 devid, xdevid, rev;
2149 2140
2150 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2141 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2151 2142
@@ -2158,7 +2149,7 @@ static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg
2158 2149
2159 outb(reg, cfg_base); 2150 outb(reg, cfg_base);
2160 2151
2161 xdevid=inb(cfg_base+1); 2152 xdevid = inb(cfg_base + 1);
2162 2153
2163 /* Enter configuration */ 2154 /* Enter configuration */
2164 2155
@@ -2168,51 +2159,49 @@ static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg
2168 if (smsc_access(cfg_base,0x55)) /* send second key and check */ 2159 if (smsc_access(cfg_base,0x55)) /* send second key and check */
2169 return NULL; 2160 return NULL;
2170 #endif 2161 #endif
2171 2162
2172 /* probe device ID */ 2163 /* probe device ID */
2173 2164
2174 if (smsc_access(cfg_base,reg)) 2165 if (smsc_access(cfg_base, reg))
2175 return NULL; 2166 return NULL;
2176 2167
2177 devid=inb(cfg_base+1); 2168 devid = inb(cfg_base + 1);
2178
2179 if (devid==0) /* typical value for unused port */
2180 return NULL;
2181 2169
2182 if (devid==0xff) /* typical value for unused port */ 2170 if (devid == 0 || devid == 0xff) /* typical values for unused port */
2183 return NULL; 2171 return NULL;
2184 2172
2185 /* probe revision ID */ 2173 /* probe revision ID */
2186 2174
2187 if (smsc_access(cfg_base,reg+1)) 2175 if (smsc_access(cfg_base, reg + 1))
2188 return NULL; 2176 return NULL;
2189 2177
2190 rev=inb(cfg_base+1); 2178 rev = inb(cfg_base + 1);
2191 2179
2192 if (rev>=128) /* i think this will make no sense */ 2180 if (rev >= 128) /* i think this will make no sense */
2193 return NULL; 2181 return NULL;
2194 2182
2195 if (devid==xdevid) /* protection against false positives */ 2183 if (devid == xdevid) /* protection against false positives */
2196 return NULL; 2184 return NULL;
2197 2185
2198 /* Check for expected device ID; are there others? */ 2186 /* Check for expected device ID; are there others? */
2199 2187
2200 while(chip->devid!=devid) { 2188 while (chip->devid != devid) {
2201 2189
2202 chip++; 2190 chip++;
2203 2191
2204 if (chip->name==NULL) 2192 if (chip->name == NULL)
2205 return NULL; 2193 return NULL;
2206 } 2194 }
2207 2195
2208 IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",devid,rev,cfg_base,type,chip->name); 2196 IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
2197 devid, rev, cfg_base, type, chip->name);
2209 2198
2210 if (chip->rev>rev){ 2199 if (chip->rev > rev) {
2211 IRDA_MESSAGE("Revision higher than expected\n"); 2200 IRDA_MESSAGE("Revision higher than expected\n");
2212 return NULL; 2201 return NULL;
2213 } 2202 }
2214 2203
2215 if (chip->flags&NoIRDA) 2204 if (chip->flags & NoIRDA)
2216 IRDA_MESSAGE("chipset does not support IRDA\n"); 2205 IRDA_MESSAGE("chipset does not support IRDA\n");
2217 2206
2218 return chip; 2207 return chip;
@@ -2226,8 +2215,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
2226 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2215 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2227 __FUNCTION__, cfg_base); 2216 __FUNCTION__, cfg_base);
2228 } else { 2217 } else {
2229 if (!smsc_superio_flat(fdc_chips_flat,cfg_base,"FDC") 2218 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
2230 ||!smsc_superio_paged(fdc_chips_paged,cfg_base,"FDC")) 2219 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
2231 ret = 0; 2220 ret = 0;
2232 2221
2233 release_region(cfg_base, 2); 2222 release_region(cfg_base, 2);
@@ -2244,9 +2233,10 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2244 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2233 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2245 __FUNCTION__, cfg_base); 2234 __FUNCTION__, cfg_base);
2246 } else { 2235 } else {
2247 if (!smsc_superio_flat(lpc_chips_flat,cfg_base,"LPC") 2236 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
2248 ||!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) 2237 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
2249 ret = 0; 2238 ret = 0;
2239
2250 release_region(cfg_base, 2); 2240 release_region(cfg_base, 2);
2251 } 2241 }
2252 return ret; 2242 return ret;
@@ -2269,18 +2259,23 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2269static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed) 2259static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
2270{ 2260{
2271 unsigned long jiffies_now, jiffies_timeout; 2261 unsigned long jiffies_now, jiffies_timeout;
2272 u8 val; 2262 u8 val;
2273 2263
2274 jiffies_now= jiffies; 2264 jiffies_now = jiffies;
2275 jiffies_timeout= jiffies+SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES; 2265 jiffies_timeout = jiffies + SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
2276 2266
2277 /* ATC */ 2267 /* ATC */
2278 register_bank(fir_base, 4); 2268 register_bank(fir_base, 4);
2279 outb((inb(fir_base+IRCC_ATC) & IRCC_ATC_MASK) |IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE, fir_base+IRCC_ATC); 2269 outb((inb(fir_base + IRCC_ATC) & IRCC_ATC_MASK) | IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE,
2280 while((val=(inb(fir_base+IRCC_ATC) & IRCC_ATC_nPROGREADY)) && !time_after(jiffies, jiffies_timeout)); 2270 fir_base + IRCC_ATC);
2281 if(val) 2271
2272 while ((val = (inb(fir_base + IRCC_ATC) & IRCC_ATC_nPROGREADY)) &&
2273 !time_after(jiffies, jiffies_timeout))
2274 /* empty */;
2275
2276 if (val)
2282 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__, 2277 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__,
2283 inb(fir_base+IRCC_ATC)); 2278 inb(fir_base + IRCC_ATC));
2284} 2279}
2285 2280
2286/* 2281/*
@@ -2298,34 +2293,32 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
2298/* 2293/*
2299 * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed) 2294 * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
2300 * 2295 *
2301 * Set transceiver 2296 * Set transceiver
2302 * 2297 *
2303 */ 2298 */
2304 2299
2305static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed) 2300static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
2306{ 2301{
2307 u8 fast_mode; 2302 u8 fast_mode;
2308 2303
2309 switch(speed) 2304 switch (speed) {
2310 { 2305 default:
2311 default: 2306 case 576000 :
2312 case 576000 : 2307 fast_mode = 0;
2313 fast_mode = 0;
2314 break; 2308 break;
2315 case 1152000 : 2309 case 1152000 :
2316 case 4000000 : 2310 case 4000000 :
2317 fast_mode = IRCC_LCR_A_FAST; 2311 fast_mode = IRCC_LCR_A_FAST;
2318 break; 2312 break;
2319
2320 } 2313 }
2321 register_bank(fir_base, 0); 2314 register_bank(fir_base, 0);
2322 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A); 2315 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
2323} 2316}
2324 2317
2325/* 2318/*
2326 * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base) 2319 * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
2327 * 2320 *
2328 * Probe transceiver 2321 * Probe transceiver
2329 * 2322 *
2330 */ 2323 */
2331 2324
@@ -2337,35 +2330,34 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
2337/* 2330/*
2338 * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed) 2331 * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
2339 * 2332 *
2340 * Set transceiver 2333 * Set transceiver
2341 * 2334 *
2342 */ 2335 */
2343 2336
2344static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed) 2337static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
2345{ 2338{
2346 u8 fast_mode; 2339 u8 fast_mode;
2347 2340
2348 switch(speed) 2341 switch (speed) {
2349 { 2342 default:
2350 default: 2343 case 576000 :
2351 case 576000 : 2344 fast_mode = 0;
2352 fast_mode = 0;
2353 break; 2345 break;
2354 case 1152000 : 2346 case 1152000 :
2355 case 4000000 : 2347 case 4000000 :
2356 fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA; 2348 fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
2357 break; 2349 break;
2358 2350
2359 } 2351 }
2360 /* This causes an interrupt */ 2352 /* This causes an interrupt */
2361 register_bank(fir_base, 0); 2353 register_bank(fir_base, 0);
2362 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A); 2354 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
2363} 2355}
2364 2356
2365/* 2357/*
2366 * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base) 2358 * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
2367 * 2359 *
2368 * Probe transceiver 2360 * Probe transceiver
2369 * 2361 *
2370 */ 2362 */
2371 2363
@@ -2377,20 +2369,3 @@ static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
2377 2369
2378module_init(smsc_ircc_init); 2370module_init(smsc_ircc_init);
2379module_exit(smsc_ircc_cleanup); 2371module_exit(smsc_ircc_cleanup);
2380
2381MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
2382MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
2383MODULE_LICENSE("GPL");
2384
2385module_param(ircc_dma, int, 0);
2386MODULE_PARM_DESC(ircc_dma, "DMA channel");
2387module_param(ircc_irq, int, 0);
2388MODULE_PARM_DESC(ircc_irq, "IRQ line");
2389module_param(ircc_fir, int, 0);
2390MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
2391module_param(ircc_sir, int, 0);
2392MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
2393module_param(ircc_cfg, int, 0);
2394MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
2395module_param(ircc_transceiver, int, 0);
2396MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 458611cc0d40..0c36286d87f7 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $ 2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $
3 * 3 *
4 * Description: Definitions for the SMC IrCC chipset 4 * Description: Definitions for the SMC IrCC chipset
5 * Status: Experimental. 5 * Status: Experimental.
@@ -9,25 +9,25 @@
9 * All Rights Reserved. 9 * All Rights Reserved.
10 * 10 *
11 * Based on smc-ircc.h: 11 * Based on smc-ircc.h:
12 * 12 *
13 * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no> 13 * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
14 * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net> 14 * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
15 * All Rights Reserved 15 * All Rights Reserved
16 * 16 *
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as 19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of 20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version. 21 * the License, or (at your option) any later version.
22 * 22 *
23 * This program is distributed in the hope that it will be useful, 23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details. 26 * GNU General Public License for more details.
27 * 27 *
28 * You should have received a copy of the GNU General Public License 28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
31 * MA 02111-1307 USA 31 * MA 02111-1307 USA
32 * 32 *
33 ********************************************************************/ 33 ********************************************************************/
@@ -112,10 +112,10 @@
112 112
113#define IRCC_CFGA_COM 0x00 113#define IRCC_CFGA_COM 0x00
114#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87 114#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
115#define IRCC_CFGA_IRDA_SIR_A 0x08 115#define IRCC_CFGA_IRDA_SIR_A 0x08
116#define IRCC_CFGA_ASK_SIR 0x10 116#define IRCC_CFGA_ASK_SIR 0x10
117#define IRCC_CFGA_IRDA_SIR_B 0x18 117#define IRCC_CFGA_IRDA_SIR_B 0x18
118#define IRCC_CFGA_IRDA_HDLC 0x20 118#define IRCC_CFGA_IRDA_HDLC 0x20
119#define IRCC_CFGA_IRDA_4PPM 0x28 119#define IRCC_CFGA_IRDA_4PPM 0x28
120#define IRCC_CFGA_CONSUMER 0x30 120#define IRCC_CFGA_CONSUMER 0x30
121#define IRCC_CFGA_RAW_IR 0x38 121#define IRCC_CFGA_RAW_IR 0x38
@@ -130,7 +130,7 @@
130#define IRCC_CFGB_LPBCK_TX_CRC 0x10 130#define IRCC_CFGB_LPBCK_TX_CRC 0x10
131#define IRCC_CFGB_NOWAIT 0x08 131#define IRCC_CFGB_NOWAIT 0x08
132#define IRCC_CFGB_STRING_MOVE 0x04 132#define IRCC_CFGB_STRING_MOVE 0x04
133#define IRCC_CFGB_DMA_BURST 0x02 133#define IRCC_CFGB_DMA_BURST 0x02
134#define IRCC_CFGB_DMA_ENABLE 0x01 134#define IRCC_CFGB_DMA_ENABLE 0x01
135 135
136#define IRCC_CFGB_MUX_COM 0x00 136#define IRCC_CFGB_MUX_COM 0x00
@@ -141,11 +141,11 @@
141/* Register block 3 - Identification Registers! */ 141/* Register block 3 - Identification Registers! */
142#define IRCC_ID_HIGH 0x00 /* 0x10 */ 142#define IRCC_ID_HIGH 0x00 /* 0x10 */
143#define IRCC_ID_LOW 0x01 /* 0xB8 */ 143#define IRCC_ID_LOW 0x01 /* 0xB8 */
144#define IRCC_CHIP_ID 0x02 /* 0xF1 */ 144#define IRCC_CHIP_ID 0x02 /* 0xF1 */
145#define IRCC_VERSION 0x03 /* 0x01 */ 145#define IRCC_VERSION 0x03 /* 0x01 */
146#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */ 146#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
147#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */ 147#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
148#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */ 148#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
149 149
150/* Register block 4 - IrDA */ 150/* Register block 4 - IrDA */
151#define IRCC_CONTROL 0x00 151#define IRCC_CONTROL 0x00
@@ -163,10 +163,10 @@
163 163
164/* Register block 5 - IrDA */ 164/* Register block 5 - IrDA */
165#define IRCC_ATC 0x00 165#define IRCC_ATC 0x00
166#define IRCC_ATC_nPROGREADY 0x80 166#define IRCC_ATC_nPROGREADY 0x80
167#define IRCC_ATC_SPEED 0x40 167#define IRCC_ATC_SPEED 0x40
168#define IRCC_ATC_ENABLE 0x20 168#define IRCC_ATC_ENABLE 0x20
169#define IRCC_ATC_MASK 0xE0 169#define IRCC_ATC_MASK 0xE0
170 170
171 171
172#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01 172#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
@@ -178,8 +178,8 @@
178 */ 178 */
179 179
180#define SMSC_IRCC2_MAX_SIR_SPEED 115200 180#define SMSC_IRCC2_MAX_SIR_SPEED 115200
181#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8 181#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
182#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8 182#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
183#define SMSC_IRCC2_FIFO_SIZE 16 183#define SMSC_IRCC2_FIFO_SIZE 16
184#define SMSC_IRCC2_FIFO_THRESHOLD 64 184#define SMSC_IRCC2_FIFO_THRESHOLD 64
185/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ 185/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 006e4f575606..6d9de626c967 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1749,11 +1749,6 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1749 struct net_device *ndev = pci_get_drvdata(pdev); 1749 struct net_device *ndev = pci_get_drvdata(pdev);
1750 vlsi_irda_dev_t *idev; 1750 vlsi_irda_dev_t *idev;
1751 1751
1752 if (state < 1 || state > 3 ) {
1753 IRDA_ERROR("%s - %s: invalid pm state request: %u\n",
1754 __FUNCTION__, PCIDEV_NAME(pdev), state);
1755 return 0;
1756 }
1757 if (!ndev) { 1752 if (!ndev) {
1758 IRDA_ERROR("%s - %s: no netdevice \n", 1753 IRDA_ERROR("%s - %s: no netdevice \n",
1759 __FUNCTION__, PCIDEV_NAME(pdev)); 1754 __FUNCTION__, PCIDEV_NAME(pdev));
@@ -1762,12 +1757,12 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1762 idev = ndev->priv; 1757 idev = ndev->priv;
1763 down(&idev->sem); 1758 down(&idev->sem);
1764 if (pdev->current_state != 0) { /* already suspended */ 1759 if (pdev->current_state != 0) { /* already suspended */
1765 if (state > pdev->current_state) { /* simply go deeper */ 1760 if (state.event > pdev->current_state) { /* simply go deeper */
1766 pci_set_power_state(pdev,state); 1761 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1767 pdev->current_state = state; 1762 pdev->current_state = state.event;
1768 } 1763 }
1769 else 1764 else
1770 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, PCIDEV_NAME(pdev), pdev->current_state, state); 1765 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, PCIDEV_NAME(pdev), pdev->current_state, state.event);
1771 up(&idev->sem); 1766 up(&idev->sem);
1772 return 0; 1767 return 0;
1773 } 1768 }
@@ -1781,8 +1776,8 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1781 idev->new_baud = idev->baud; 1776 idev->new_baud = idev->baud;
1782 } 1777 }
1783 1778
1784 pci_set_power_state(pdev,state); 1779 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1785 pdev->current_state = state; 1780 pdev->current_state = state.event;
1786 idev->resume_ok = 1; 1781 idev->resume_ok = 1;
1787 up(&idev->sem); 1782 up(&idev->sem);
1788 return 0; 1783 return 0;
@@ -1807,8 +1802,8 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1807 return 0; 1802 return 0;
1808 } 1803 }
1809 1804
1810 pci_set_power_state(pdev, 0); 1805 pci_set_power_state(pdev, PCI_D0);
1811 pdev->current_state = 0; 1806 pdev->current_state = PM_EVENT_ON;
1812 1807
1813 if (!idev->resume_ok) { 1808 if (!idev->resume_ok) {
1814 /* should be obsolete now - but used to happen due to: 1809 /* should be obsolete now - but used to happen due to:
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 55af32e9bf08..3d56cf5a4e23 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp. 4 * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
5 * Substantially cleaned up by: 5 * Substantially cleaned up by:
6 * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation. 6 * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
7 * Copyright (C) 2004-2005 Michael Ellerman, IBM Corporation.
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as 10 * modify it under the terms of the GNU General Public License as
@@ -79,12 +80,55 @@
79#include <asm/iommu.h> 80#include <asm/iommu.h>
80#include <asm/vio.h> 81#include <asm/vio.h>
81 82
82#include "iseries_veth.h" 83#undef DEBUG
83 84
84MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>"); 85MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
85MODULE_DESCRIPTION("iSeries Virtual ethernet driver"); 86MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
86MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
87 88
89#define VETH_EVENT_CAP (0)
90#define VETH_EVENT_FRAMES (1)
91#define VETH_EVENT_MONITOR (2)
92#define VETH_EVENT_FRAMES_ACK (3)
93
94#define VETH_MAX_ACKS_PER_MSG (20)
95#define VETH_MAX_FRAMES_PER_MSG (6)
96
97struct veth_frames_data {
98 u32 addr[VETH_MAX_FRAMES_PER_MSG];
99 u16 len[VETH_MAX_FRAMES_PER_MSG];
100 u32 eofmask;
101};
102#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
103
104struct veth_frames_ack_data {
105 u16 token[VETH_MAX_ACKS_PER_MSG];
106};
107
108struct veth_cap_data {
109 u8 caps_version;
110 u8 rsvd1;
111 u16 num_buffers;
112 u16 ack_threshold;
113 u16 rsvd2;
114 u32 ack_timeout;
115 u32 rsvd3;
116 u64 rsvd4[3];
117};
118
119struct veth_lpevent {
120 struct HvLpEvent base_event;
121 union {
122 struct veth_cap_data caps_data;
123 struct veth_frames_data frames_data;
124 struct veth_frames_ack_data frames_ack_data;
125 } u;
126
127};
128
129#define DRV_NAME "iseries_veth"
130#define DRV_VERSION "2.0"
131
88#define VETH_NUMBUFFERS (120) 132#define VETH_NUMBUFFERS (120)
89#define VETH_ACKTIMEOUT (1000000) /* microseconds */ 133#define VETH_ACKTIMEOUT (1000000) /* microseconds */
90#define VETH_MAX_MCAST (12) 134#define VETH_MAX_MCAST (12)
@@ -113,9 +157,9 @@ MODULE_LICENSE("GPL");
113 157
114struct veth_msg { 158struct veth_msg {
115 struct veth_msg *next; 159 struct veth_msg *next;
116 struct VethFramesData data; 160 struct veth_frames_data data;
117 int token; 161 int token;
118 unsigned long in_use; 162 int in_use;
119 struct sk_buff *skb; 163 struct sk_buff *skb;
120 struct device *dev; 164 struct device *dev;
121}; 165};
@@ -125,23 +169,28 @@ struct veth_lpar_connection {
125 struct work_struct statemachine_wq; 169 struct work_struct statemachine_wq;
126 struct veth_msg *msgs; 170 struct veth_msg *msgs;
127 int num_events; 171 int num_events;
128 struct VethCapData local_caps; 172 struct veth_cap_data local_caps;
129 173
174 struct kobject kobject;
130 struct timer_list ack_timer; 175 struct timer_list ack_timer;
131 176
177 struct timer_list reset_timer;
178 unsigned int reset_timeout;
179 unsigned long last_contact;
180 int outstanding_tx;
181
132 spinlock_t lock; 182 spinlock_t lock;
133 unsigned long state; 183 unsigned long state;
134 HvLpInstanceId src_inst; 184 HvLpInstanceId src_inst;
135 HvLpInstanceId dst_inst; 185 HvLpInstanceId dst_inst;
136 struct VethLpEvent cap_event, cap_ack_event; 186 struct veth_lpevent cap_event, cap_ack_event;
137 u16 pending_acks[VETH_MAX_ACKS_PER_MSG]; 187 u16 pending_acks[VETH_MAX_ACKS_PER_MSG];
138 u32 num_pending_acks; 188 u32 num_pending_acks;
139 189
140 int num_ack_events; 190 int num_ack_events;
141 struct VethCapData remote_caps; 191 struct veth_cap_data remote_caps;
142 u32 ack_timeout; 192 u32 ack_timeout;
143 193
144 spinlock_t msg_stack_lock;
145 struct veth_msg *msg_stack_head; 194 struct veth_msg *msg_stack_head;
146}; 195};
147 196
@@ -151,15 +200,17 @@ struct veth_port {
151 u64 mac_addr; 200 u64 mac_addr;
152 HvLpIndexMap lpar_map; 201 HvLpIndexMap lpar_map;
153 202
154 spinlock_t pending_gate; 203 /* queue_lock protects the stopped_map and dev's queue. */
155 struct sk_buff *pending_skb; 204 spinlock_t queue_lock;
156 HvLpIndexMap pending_lpmask; 205 HvLpIndexMap stopped_map;
157 206
207 /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */
158 rwlock_t mcast_gate; 208 rwlock_t mcast_gate;
159 int promiscuous; 209 int promiscuous;
160 int all_mcast;
161 int num_mcast; 210 int num_mcast;
162 u64 mcast_addr[VETH_MAX_MCAST]; 211 u64 mcast_addr[VETH_MAX_MCAST];
212
213 struct kobject kobject;
163}; 214};
164 215
165static HvLpIndex this_lp; 216static HvLpIndex this_lp;
@@ -168,44 +219,56 @@ static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */
168 219
169static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev); 220static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
170static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *); 221static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *);
171static void veth_flush_pending(struct veth_lpar_connection *cnx); 222static void veth_wake_queues(struct veth_lpar_connection *cnx);
172static void veth_receive(struct veth_lpar_connection *, struct VethLpEvent *); 223static void veth_stop_queues(struct veth_lpar_connection *cnx);
173static void veth_timed_ack(unsigned long connectionPtr); 224static void veth_receive(struct veth_lpar_connection *, struct veth_lpevent *);
225static void veth_release_connection(struct kobject *kobject);
226static void veth_timed_ack(unsigned long ptr);
227static void veth_timed_reset(unsigned long ptr);
174 228
175/* 229/*
176 * Utility functions 230 * Utility functions
177 */ 231 */
178 232
179#define veth_printk(prio, fmt, args...) \ 233#define veth_info(fmt, args...) \
180 printk(prio "%s: " fmt, __FILE__, ## args) 234 printk(KERN_INFO DRV_NAME ": " fmt, ## args)
181 235
182#define veth_error(fmt, args...) \ 236#define veth_error(fmt, args...) \
183 printk(KERN_ERR "(%s:%3.3d) ERROR: " fmt, __FILE__, __LINE__ , ## args) 237 printk(KERN_ERR DRV_NAME ": Error: " fmt, ## args)
238
239#ifdef DEBUG
240#define veth_debug(fmt, args...) \
241 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
242#else
243#define veth_debug(fmt, args...) do {} while (0)
244#endif
184 245
246/* You must hold the connection's lock when you call this function. */
185static inline void veth_stack_push(struct veth_lpar_connection *cnx, 247static inline void veth_stack_push(struct veth_lpar_connection *cnx,
186 struct veth_msg *msg) 248 struct veth_msg *msg)
187{ 249{
188 unsigned long flags;
189
190 spin_lock_irqsave(&cnx->msg_stack_lock, flags);
191 msg->next = cnx->msg_stack_head; 250 msg->next = cnx->msg_stack_head;
192 cnx->msg_stack_head = msg; 251 cnx->msg_stack_head = msg;
193 spin_unlock_irqrestore(&cnx->msg_stack_lock, flags);
194} 252}
195 253
254/* You must hold the connection's lock when you call this function. */
196static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx) 255static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx)
197{ 256{
198 unsigned long flags;
199 struct veth_msg *msg; 257 struct veth_msg *msg;
200 258
201 spin_lock_irqsave(&cnx->msg_stack_lock, flags);
202 msg = cnx->msg_stack_head; 259 msg = cnx->msg_stack_head;
203 if (msg) 260 if (msg)
204 cnx->msg_stack_head = cnx->msg_stack_head->next; 261 cnx->msg_stack_head = cnx->msg_stack_head->next;
205 spin_unlock_irqrestore(&cnx->msg_stack_lock, flags); 262
206 return msg; 263 return msg;
207} 264}
208 265
266/* You must hold the connection's lock when you call this function. */
267static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx)
268{
269 return cnx->msg_stack_head == NULL;
270}
271
209static inline HvLpEvent_Rc 272static inline HvLpEvent_Rc
210veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype, 273veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype,
211 HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype, 274 HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype,
@@ -249,7 +312,7 @@ static int veth_allocate_events(HvLpIndex rlp, int number)
249 struct veth_allocation vc = { COMPLETION_INITIALIZER(vc.c), 0 }; 312 struct veth_allocation vc = { COMPLETION_INITIALIZER(vc.c), 0 };
250 313
251 mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan, 314 mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan,
252 sizeof(struct VethLpEvent), number, 315 sizeof(struct veth_lpevent), number,
253 &veth_complete_allocation, &vc); 316 &veth_complete_allocation, &vc);
254 wait_for_completion(&vc.c); 317 wait_for_completion(&vc.c);
255 318
@@ -257,6 +320,137 @@ static int veth_allocate_events(HvLpIndex rlp, int number)
257} 320}
258 321
259/* 322/*
323 * sysfs support
324 */
325
326struct veth_cnx_attribute {
327 struct attribute attr;
328 ssize_t (*show)(struct veth_lpar_connection *, char *buf);
329 ssize_t (*store)(struct veth_lpar_connection *, const char *buf);
330};
331
332static ssize_t veth_cnx_attribute_show(struct kobject *kobj,
333 struct attribute *attr, char *buf)
334{
335 struct veth_cnx_attribute *cnx_attr;
336 struct veth_lpar_connection *cnx;
337
338 cnx_attr = container_of(attr, struct veth_cnx_attribute, attr);
339 cnx = container_of(kobj, struct veth_lpar_connection, kobject);
340
341 if (!cnx_attr->show)
342 return -EIO;
343
344 return cnx_attr->show(cnx, buf);
345}
346
347#define CUSTOM_CNX_ATTR(_name, _format, _expression) \
348static ssize_t _name##_show(struct veth_lpar_connection *cnx, char *buf)\
349{ \
350 return sprintf(buf, _format, _expression); \
351} \
352struct veth_cnx_attribute veth_cnx_attr_##_name = __ATTR_RO(_name)
353
354#define SIMPLE_CNX_ATTR(_name) \
355 CUSTOM_CNX_ATTR(_name, "%lu\n", (unsigned long)cnx->_name)
356
357SIMPLE_CNX_ATTR(outstanding_tx);
358SIMPLE_CNX_ATTR(remote_lp);
359SIMPLE_CNX_ATTR(num_events);
360SIMPLE_CNX_ATTR(src_inst);
361SIMPLE_CNX_ATTR(dst_inst);
362SIMPLE_CNX_ATTR(num_pending_acks);
363SIMPLE_CNX_ATTR(num_ack_events);
364CUSTOM_CNX_ATTR(ack_timeout, "%d\n", jiffies_to_msecs(cnx->ack_timeout));
365CUSTOM_CNX_ATTR(reset_timeout, "%d\n", jiffies_to_msecs(cnx->reset_timeout));
366CUSTOM_CNX_ATTR(state, "0x%.4lX\n", cnx->state);
367CUSTOM_CNX_ATTR(last_contact, "%d\n", cnx->last_contact ?
368 jiffies_to_msecs(jiffies - cnx->last_contact) : 0);
369
370#define GET_CNX_ATTR(_name) (&veth_cnx_attr_##_name.attr)
371
372static struct attribute *veth_cnx_default_attrs[] = {
373 GET_CNX_ATTR(outstanding_tx),
374 GET_CNX_ATTR(remote_lp),
375 GET_CNX_ATTR(num_events),
376 GET_CNX_ATTR(reset_timeout),
377 GET_CNX_ATTR(last_contact),
378 GET_CNX_ATTR(state),
379 GET_CNX_ATTR(src_inst),
380 GET_CNX_ATTR(dst_inst),
381 GET_CNX_ATTR(num_pending_acks),
382 GET_CNX_ATTR(num_ack_events),
383 GET_CNX_ATTR(ack_timeout),
384 NULL
385};
386
387static struct sysfs_ops veth_cnx_sysfs_ops = {
388 .show = veth_cnx_attribute_show
389};
390
391static struct kobj_type veth_lpar_connection_ktype = {
392 .release = veth_release_connection,
393 .sysfs_ops = &veth_cnx_sysfs_ops,
394 .default_attrs = veth_cnx_default_attrs
395};
396
397struct veth_port_attribute {
398 struct attribute attr;
399 ssize_t (*show)(struct veth_port *, char *buf);
400 ssize_t (*store)(struct veth_port *, const char *buf);
401};
402
403static ssize_t veth_port_attribute_show(struct kobject *kobj,
404 struct attribute *attr, char *buf)
405{
406 struct veth_port_attribute *port_attr;
407 struct veth_port *port;
408
409 port_attr = container_of(attr, struct veth_port_attribute, attr);
410 port = container_of(kobj, struct veth_port, kobject);
411
412 if (!port_attr->show)
413 return -EIO;
414
415 return port_attr->show(port, buf);
416}
417
418#define CUSTOM_PORT_ATTR(_name, _format, _expression) \
419static ssize_t _name##_show(struct veth_port *port, char *buf) \
420{ \
421 return sprintf(buf, _format, _expression); \
422} \
423struct veth_port_attribute veth_port_attr_##_name = __ATTR_RO(_name)
424
425#define SIMPLE_PORT_ATTR(_name) \
426 CUSTOM_PORT_ATTR(_name, "%lu\n", (unsigned long)port->_name)
427
428SIMPLE_PORT_ATTR(promiscuous);
429SIMPLE_PORT_ATTR(num_mcast);
430CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map);
431CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map);
432CUSTOM_PORT_ATTR(mac_addr, "0x%lX\n", port->mac_addr);
433
434#define GET_PORT_ATTR(_name) (&veth_port_attr_##_name.attr)
435static struct attribute *veth_port_default_attrs[] = {
436 GET_PORT_ATTR(mac_addr),
437 GET_PORT_ATTR(lpar_map),
438 GET_PORT_ATTR(stopped_map),
439 GET_PORT_ATTR(promiscuous),
440 GET_PORT_ATTR(num_mcast),
441 NULL
442};
443
444static struct sysfs_ops veth_port_sysfs_ops = {
445 .show = veth_port_attribute_show
446};
447
448static struct kobj_type veth_port_ktype = {
449 .sysfs_ops = &veth_port_sysfs_ops,
450 .default_attrs = veth_port_default_attrs
451};
452
453/*
260 * LPAR connection code 454 * LPAR connection code
261 */ 455 */
262 456
@@ -266,7 +460,7 @@ static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
266} 460}
267 461
268static void veth_take_cap(struct veth_lpar_connection *cnx, 462static void veth_take_cap(struct veth_lpar_connection *cnx,
269 struct VethLpEvent *event) 463 struct veth_lpevent *event)
270{ 464{
271 unsigned long flags; 465 unsigned long flags;
272 466
@@ -278,7 +472,7 @@ static void veth_take_cap(struct veth_lpar_connection *cnx,
278 HvLpEvent_Type_VirtualLan); 472 HvLpEvent_Type_VirtualLan);
279 473
280 if (cnx->state & VETH_STATE_GOTCAPS) { 474 if (cnx->state & VETH_STATE_GOTCAPS) {
281 veth_error("Received a second capabilities from lpar %d\n", 475 veth_error("Received a second capabilities from LPAR %d.\n",
282 cnx->remote_lp); 476 cnx->remote_lp);
283 event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable; 477 event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable;
284 HvCallEvent_ackLpEvent((struct HvLpEvent *) event); 478 HvCallEvent_ackLpEvent((struct HvLpEvent *) event);
@@ -291,13 +485,13 @@ static void veth_take_cap(struct veth_lpar_connection *cnx,
291} 485}
292 486
293static void veth_take_cap_ack(struct veth_lpar_connection *cnx, 487static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
294 struct VethLpEvent *event) 488 struct veth_lpevent *event)
295{ 489{
296 unsigned long flags; 490 unsigned long flags;
297 491
298 spin_lock_irqsave(&cnx->lock, flags); 492 spin_lock_irqsave(&cnx->lock, flags);
299 if (cnx->state & VETH_STATE_GOTCAPACK) { 493 if (cnx->state & VETH_STATE_GOTCAPACK) {
300 veth_error("Received a second capabilities ack from lpar %d\n", 494 veth_error("Received a second capabilities ack from LPAR %d.\n",
301 cnx->remote_lp); 495 cnx->remote_lp);
302 } else { 496 } else {
303 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
@@ -309,19 +503,24 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
309} 503}
310 504
311static void veth_take_monitor_ack(struct veth_lpar_connection *cnx, 505static void veth_take_monitor_ack(struct veth_lpar_connection *cnx,
312 struct VethLpEvent *event) 506 struct veth_lpevent *event)
313{ 507{
314 unsigned long flags; 508 unsigned long flags;
315 509
316 spin_lock_irqsave(&cnx->lock, flags); 510 spin_lock_irqsave(&cnx->lock, flags);
317 veth_printk(KERN_DEBUG, "Monitor ack returned for lpar %d\n", 511 veth_debug("cnx %d: lost connection.\n", cnx->remote_lp);
318 cnx->remote_lp); 512
319 cnx->state |= VETH_STATE_RESET; 513 /* Avoid kicking the statemachine once we're shutdown.
320 veth_kick_statemachine(cnx); 514 * It's unnecessary and it could break veth_stop_connection(). */
515
516 if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
517 cnx->state |= VETH_STATE_RESET;
518 veth_kick_statemachine(cnx);
519 }
321 spin_unlock_irqrestore(&cnx->lock, flags); 520 spin_unlock_irqrestore(&cnx->lock, flags);
322} 521}
323 522
324static void veth_handle_ack(struct VethLpEvent *event) 523static void veth_handle_ack(struct veth_lpevent *event)
325{ 524{
326 HvLpIndex rlp = event->base_event.xTargetLp; 525 HvLpIndex rlp = event->base_event.xTargetLp;
327 struct veth_lpar_connection *cnx = veth_cnx[rlp]; 526 struct veth_lpar_connection *cnx = veth_cnx[rlp];
@@ -329,58 +528,67 @@ static void veth_handle_ack(struct VethLpEvent *event)
329 BUG_ON(! cnx); 528 BUG_ON(! cnx);
330 529
331 switch (event->base_event.xSubtype) { 530 switch (event->base_event.xSubtype) {
332 case VethEventTypeCap: 531 case VETH_EVENT_CAP:
333 veth_take_cap_ack(cnx, event); 532 veth_take_cap_ack(cnx, event);
334 break; 533 break;
335 case VethEventTypeMonitor: 534 case VETH_EVENT_MONITOR:
336 veth_take_monitor_ack(cnx, event); 535 veth_take_monitor_ack(cnx, event);
337 break; 536 break;
338 default: 537 default:
339 veth_error("Unknown ack type %d from lpar %d\n", 538 veth_error("Unknown ack type %d from LPAR %d.\n",
340 event->base_event.xSubtype, rlp); 539 event->base_event.xSubtype, rlp);
341 }; 540 };
342} 541}
343 542
344static void veth_handle_int(struct VethLpEvent *event) 543static void veth_handle_int(struct veth_lpevent *event)
345{ 544{
346 HvLpIndex rlp = event->base_event.xSourceLp; 545 HvLpIndex rlp = event->base_event.xSourceLp;
347 struct veth_lpar_connection *cnx = veth_cnx[rlp]; 546 struct veth_lpar_connection *cnx = veth_cnx[rlp];
348 unsigned long flags; 547 unsigned long flags;
349 int i; 548 int i, acked = 0;
350 549
351 BUG_ON(! cnx); 550 BUG_ON(! cnx);
352 551
353 switch (event->base_event.xSubtype) { 552 switch (event->base_event.xSubtype) {
354 case VethEventTypeCap: 553 case VETH_EVENT_CAP:
355 veth_take_cap(cnx, event); 554 veth_take_cap(cnx, event);
356 break; 555 break;
357 case VethEventTypeMonitor: 556 case VETH_EVENT_MONITOR:
358 /* do nothing... this'll hang out here til we're dead, 557 /* do nothing... this'll hang out here til we're dead,
359 * and the hypervisor will return it for us. */ 558 * and the hypervisor will return it for us. */
360 break; 559 break;
361 case VethEventTypeFramesAck: 560 case VETH_EVENT_FRAMES_ACK:
362 spin_lock_irqsave(&cnx->lock, flags); 561 spin_lock_irqsave(&cnx->lock, flags);
562
363 for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) { 563 for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) {
364 u16 msgnum = event->u.frames_ack_data.token[i]; 564 u16 msgnum = event->u.frames_ack_data.token[i];
365 565
366 if (msgnum < VETH_NUMBUFFERS) 566 if (msgnum < VETH_NUMBUFFERS) {
367 veth_recycle_msg(cnx, cnx->msgs + msgnum); 567 veth_recycle_msg(cnx, cnx->msgs + msgnum);
568 cnx->outstanding_tx--;
569 acked++;
570 }
571 }
572
573 if (acked > 0) {
574 cnx->last_contact = jiffies;
575 veth_wake_queues(cnx);
368 } 576 }
577
369 spin_unlock_irqrestore(&cnx->lock, flags); 578 spin_unlock_irqrestore(&cnx->lock, flags);
370 veth_flush_pending(cnx);
371 break; 579 break;
372 case VethEventTypeFrames: 580 case VETH_EVENT_FRAMES:
373 veth_receive(cnx, event); 581 veth_receive(cnx, event);
374 break; 582 break;
375 default: 583 default:
376 veth_error("Unknown interrupt type %d from lpar %d\n", 584 veth_error("Unknown interrupt type %d from LPAR %d.\n",
377 event->base_event.xSubtype, rlp); 585 event->base_event.xSubtype, rlp);
378 }; 586 };
379} 587}
380 588
381static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs) 589static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
382{ 590{
383 struct VethLpEvent *veth_event = (struct VethLpEvent *)event; 591 struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
384 592
385 if (event->xFlags.xFunction == HvLpEvent_Function_Ack) 593 if (event->xFlags.xFunction == HvLpEvent_Function_Ack)
386 veth_handle_ack(veth_event); 594 veth_handle_ack(veth_event);
@@ -390,7 +598,7 @@ static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
390 598
391static int veth_process_caps(struct veth_lpar_connection *cnx) 599static int veth_process_caps(struct veth_lpar_connection *cnx)
392{ 600{
393 struct VethCapData *remote_caps = &cnx->remote_caps; 601 struct veth_cap_data *remote_caps = &cnx->remote_caps;
394 int num_acks_needed; 602 int num_acks_needed;
395 603
396 /* Convert timer to jiffies */ 604 /* Convert timer to jiffies */
@@ -400,8 +608,8 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
400 || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) 608 || (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG)
401 || (remote_caps->ack_threshold == 0) 609 || (remote_caps->ack_threshold == 0)
402 || (cnx->ack_timeout == 0) ) { 610 || (cnx->ack_timeout == 0) ) {
403 veth_error("Received incompatible capabilities from lpar %d\n", 611 veth_error("Received incompatible capabilities from LPAR %d.\n",
404 cnx->remote_lp); 612 cnx->remote_lp);
405 return HvLpEvent_Rc_InvalidSubtypeData; 613 return HvLpEvent_Rc_InvalidSubtypeData;
406 } 614 }
407 615
@@ -418,8 +626,8 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
418 cnx->num_ack_events += num; 626 cnx->num_ack_events += num;
419 627
420 if (cnx->num_ack_events < num_acks_needed) { 628 if (cnx->num_ack_events < num_acks_needed) {
421 veth_error("Couldn't allocate enough ack events for lpar %d\n", 629 veth_error("Couldn't allocate enough ack events "
422 cnx->remote_lp); 630 "for LPAR %d.\n", cnx->remote_lp);
423 631
424 return HvLpEvent_Rc_BufferNotAvailable; 632 return HvLpEvent_Rc_BufferNotAvailable;
425 } 633 }
@@ -440,15 +648,15 @@ static void veth_statemachine(void *p)
440 648
441 restart: 649 restart:
442 if (cnx->state & VETH_STATE_RESET) { 650 if (cnx->state & VETH_STATE_RESET) {
443 int i;
444
445 del_timer(&cnx->ack_timer);
446
447 if (cnx->state & VETH_STATE_OPEN) 651 if (cnx->state & VETH_STATE_OPEN)
448 HvCallEvent_closeLpEventPath(cnx->remote_lp, 652 HvCallEvent_closeLpEventPath(cnx->remote_lp,
449 HvLpEvent_Type_VirtualLan); 653 HvLpEvent_Type_VirtualLan);
450 654
451 /* reset ack data */ 655 /*
656 * Reset ack data. This prevents the ack_timer actually
657 * doing anything, even if it runs one more time when
658 * we drop the lock below.
659 */
452 memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); 660 memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
453 cnx->num_pending_acks = 0; 661 cnx->num_pending_acks = 0;
454 662
@@ -458,14 +666,32 @@ static void veth_statemachine(void *p)
458 | VETH_STATE_SENTCAPACK | VETH_STATE_READY); 666 | VETH_STATE_SENTCAPACK | VETH_STATE_READY);
459 667
460 /* Clean up any leftover messages */ 668 /* Clean up any leftover messages */
461 if (cnx->msgs) 669 if (cnx->msgs) {
670 int i;
462 for (i = 0; i < VETH_NUMBUFFERS; ++i) 671 for (i = 0; i < VETH_NUMBUFFERS; ++i)
463 veth_recycle_msg(cnx, cnx->msgs + i); 672 veth_recycle_msg(cnx, cnx->msgs + i);
673 }
674
675 cnx->outstanding_tx = 0;
676 veth_wake_queues(cnx);
677
678 /* Drop the lock so we can do stuff that might sleep or
679 * take other locks. */
464 spin_unlock_irq(&cnx->lock); 680 spin_unlock_irq(&cnx->lock);
465 veth_flush_pending(cnx); 681
682 del_timer_sync(&cnx->ack_timer);
683 del_timer_sync(&cnx->reset_timer);
684
466 spin_lock_irq(&cnx->lock); 685 spin_lock_irq(&cnx->lock);
686
467 if (cnx->state & VETH_STATE_RESET) 687 if (cnx->state & VETH_STATE_RESET)
468 goto restart; 688 goto restart;
689
690 /* Hack, wait for the other end to reset itself. */
691 if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
692 schedule_delayed_work(&cnx->statemachine_wq, 5 * HZ);
693 goto out;
694 }
469 } 695 }
470 696
471 if (cnx->state & VETH_STATE_SHUTDOWN) 697 if (cnx->state & VETH_STATE_SHUTDOWN)
@@ -488,7 +714,7 @@ static void veth_statemachine(void *p)
488 714
489 if ( (cnx->state & VETH_STATE_OPEN) 715 if ( (cnx->state & VETH_STATE_OPEN)
490 && !(cnx->state & VETH_STATE_SENTMON) ) { 716 && !(cnx->state & VETH_STATE_SENTMON) ) {
491 rc = veth_signalevent(cnx, VethEventTypeMonitor, 717 rc = veth_signalevent(cnx, VETH_EVENT_MONITOR,
492 HvLpEvent_AckInd_DoAck, 718 HvLpEvent_AckInd_DoAck,
493 HvLpEvent_AckType_DeferredAck, 719 HvLpEvent_AckType_DeferredAck,
494 0, 0, 0, 0, 0, 0); 720 0, 0, 0, 0, 0, 0);
@@ -498,9 +724,8 @@ static void veth_statemachine(void *p)
498 } else { 724 } else {
499 if ( (rc != HvLpEvent_Rc_PartitionDead) 725 if ( (rc != HvLpEvent_Rc_PartitionDead)
500 && (rc != HvLpEvent_Rc_PathClosed) ) 726 && (rc != HvLpEvent_Rc_PathClosed) )
501 veth_error("Error sending monitor to " 727 veth_error("Error sending monitor to LPAR %d, "
502 "lpar %d, rc=%x\n", 728 "rc = %d\n", rlp, rc);
503 rlp, (int) rc);
504 729
505 /* Oh well, hope we get a cap from the other 730 /* Oh well, hope we get a cap from the other
506 * end and do better when that kicks us */ 731 * end and do better when that kicks us */
@@ -512,7 +737,7 @@ static void veth_statemachine(void *p)
512 && !(cnx->state & VETH_STATE_SENTCAPS)) { 737 && !(cnx->state & VETH_STATE_SENTCAPS)) {
513 u64 *rawcap = (u64 *)&cnx->local_caps; 738 u64 *rawcap = (u64 *)&cnx->local_caps;
514 739
515 rc = veth_signalevent(cnx, VethEventTypeCap, 740 rc = veth_signalevent(cnx, VETH_EVENT_CAP,
516 HvLpEvent_AckInd_DoAck, 741 HvLpEvent_AckInd_DoAck,
517 HvLpEvent_AckType_ImmediateAck, 742 HvLpEvent_AckType_ImmediateAck,
518 0, rawcap[0], rawcap[1], rawcap[2], 743 0, rawcap[0], rawcap[1], rawcap[2],
@@ -523,9 +748,9 @@ static void veth_statemachine(void *p)
523 } else { 748 } else {
524 if ( (rc != HvLpEvent_Rc_PartitionDead) 749 if ( (rc != HvLpEvent_Rc_PartitionDead)
525 && (rc != HvLpEvent_Rc_PathClosed) ) 750 && (rc != HvLpEvent_Rc_PathClosed) )
526 veth_error("Error sending caps to " 751 veth_error("Error sending caps to LPAR %d, "
527 "lpar %d, rc=%x\n", 752 "rc = %d\n", rlp, rc);
528 rlp, (int) rc); 753
529 /* Oh well, hope we get a cap from the other 754 /* Oh well, hope we get a cap from the other
530 * end and do better when that kicks us */ 755 * end and do better when that kicks us */
531 goto out; 756 goto out;
@@ -534,7 +759,7 @@ static void veth_statemachine(void *p)
534 759
535 if ((cnx->state & VETH_STATE_GOTCAPS) 760 if ((cnx->state & VETH_STATE_GOTCAPS)
536 && !(cnx->state & VETH_STATE_SENTCAPACK)) { 761 && !(cnx->state & VETH_STATE_SENTCAPACK)) {
537 struct VethCapData *remote_caps = &cnx->remote_caps; 762 struct veth_cap_data *remote_caps = &cnx->remote_caps;
538 763
539 memcpy(remote_caps, &cnx->cap_event.u.caps_data, 764 memcpy(remote_caps, &cnx->cap_event.u.caps_data,
540 sizeof(*remote_caps)); 765 sizeof(*remote_caps));
@@ -565,10 +790,8 @@ static void veth_statemachine(void *p)
565 add_timer(&cnx->ack_timer); 790 add_timer(&cnx->ack_timer);
566 cnx->state |= VETH_STATE_READY; 791 cnx->state |= VETH_STATE_READY;
567 } else { 792 } else {
568 veth_printk(KERN_ERR, "Caps rejected (rc=%d) by " 793 veth_error("Caps rejected by LPAR %d, rc = %d\n",
569 "lpar %d\n", 794 rlp, cnx->cap_ack_event.base_event.xRc);
570 cnx->cap_ack_event.base_event.xRc,
571 rlp);
572 goto cant_cope; 795 goto cant_cope;
573 } 796 }
574 } 797 }
@@ -581,8 +804,8 @@ static void veth_statemachine(void *p)
581 /* FIXME: we get here if something happens we really can't 804 /* FIXME: we get here if something happens we really can't
582 * cope with. The link will never work once we get here, and 805 * cope with. The link will never work once we get here, and
583 * all we can do is not lock the rest of the system up */ 806 * all we can do is not lock the rest of the system up */
584 veth_error("Badness on connection to lpar %d (state=%04lx) " 807 veth_error("Unrecoverable error on connection to LPAR %d, shutting down"
585 " - shutting down\n", rlp, cnx->state); 808 " (state = 0x%04lx)\n", rlp, cnx->state);
586 cnx->state |= VETH_STATE_SHUTDOWN; 809 cnx->state |= VETH_STATE_SHUTDOWN;
587 spin_unlock_irq(&cnx->lock); 810 spin_unlock_irq(&cnx->lock);
588} 811}
@@ -591,7 +814,7 @@ static int veth_init_connection(u8 rlp)
591{ 814{
592 struct veth_lpar_connection *cnx; 815 struct veth_lpar_connection *cnx;
593 struct veth_msg *msgs; 816 struct veth_msg *msgs;
594 int i; 817 int i, rc;
595 818
596 if ( (rlp == this_lp) 819 if ( (rlp == this_lp)
597 || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) ) 820 || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
@@ -605,22 +828,36 @@ static int veth_init_connection(u8 rlp)
605 cnx->remote_lp = rlp; 828 cnx->remote_lp = rlp;
606 spin_lock_init(&cnx->lock); 829 spin_lock_init(&cnx->lock);
607 INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); 830 INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
831
608 init_timer(&cnx->ack_timer); 832 init_timer(&cnx->ack_timer);
609 cnx->ack_timer.function = veth_timed_ack; 833 cnx->ack_timer.function = veth_timed_ack;
610 cnx->ack_timer.data = (unsigned long) cnx; 834 cnx->ack_timer.data = (unsigned long) cnx;
835
836 init_timer(&cnx->reset_timer);
837 cnx->reset_timer.function = veth_timed_reset;
838 cnx->reset_timer.data = (unsigned long) cnx;
839 cnx->reset_timeout = 5 * HZ * (VETH_ACKTIMEOUT / 1000000);
840
611 memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks)); 841 memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
612 842
613 veth_cnx[rlp] = cnx; 843 veth_cnx[rlp] = cnx;
614 844
845 /* This gets us 1 reference, which is held on behalf of the driver
846 * infrastructure. It's released at module unload. */
847 kobject_init(&cnx->kobject);
848 cnx->kobject.ktype = &veth_lpar_connection_ktype;
849 rc = kobject_set_name(&cnx->kobject, "cnx%.2d", rlp);
850 if (rc != 0)
851 return rc;
852
615 msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL); 853 msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL);
616 if (! msgs) { 854 if (! msgs) {
617 veth_error("Can't allocate buffers for lpar %d\n", rlp); 855 veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
618 return -ENOMEM; 856 return -ENOMEM;
619 } 857 }
620 858
621 cnx->msgs = msgs; 859 cnx->msgs = msgs;
622 memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg)); 860 memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg));
623 spin_lock_init(&cnx->msg_stack_lock);
624 861
625 for (i = 0; i < VETH_NUMBUFFERS; i++) { 862 for (i = 0; i < VETH_NUMBUFFERS; i++) {
626 msgs[i].token = i; 863 msgs[i].token = i;
@@ -630,8 +867,7 @@ static int veth_init_connection(u8 rlp)
630 cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS); 867 cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS);
631 868
632 if (cnx->num_events < (2 + VETH_NUMBUFFERS)) { 869 if (cnx->num_events < (2 + VETH_NUMBUFFERS)) {
633 veth_error("Can't allocate events for lpar %d, only got %d\n", 870 veth_error("Can't allocate enough events for LPAR %d.\n", rlp);
634 rlp, cnx->num_events);
635 return -ENOMEM; 871 return -ENOMEM;
636 } 872 }
637 873
@@ -642,11 +878,9 @@ static int veth_init_connection(u8 rlp)
642 return 0; 878 return 0;
643} 879}
644 880
645static void veth_stop_connection(u8 rlp) 881static void veth_stop_connection(struct veth_lpar_connection *cnx)
646{ 882{
647 struct veth_lpar_connection *cnx = veth_cnx[rlp]; 883 if (!cnx)
648
649 if (! cnx)
650 return; 884 return;
651 885
652 spin_lock_irq(&cnx->lock); 886 spin_lock_irq(&cnx->lock);
@@ -654,12 +888,23 @@ static void veth_stop_connection(u8 rlp)
654 veth_kick_statemachine(cnx); 888 veth_kick_statemachine(cnx);
655 spin_unlock_irq(&cnx->lock); 889 spin_unlock_irq(&cnx->lock);
656 890
891 /* There's a slim chance the reset code has just queued the
892 * statemachine to run in five seconds. If so we need to cancel
893 * that and requeue the work to run now. */
894 if (cancel_delayed_work(&cnx->statemachine_wq)) {
895 spin_lock_irq(&cnx->lock);
896 veth_kick_statemachine(cnx);
897 spin_unlock_irq(&cnx->lock);
898 }
899
900 /* Wait for the state machine to run. */
657 flush_scheduled_work(); 901 flush_scheduled_work();
902}
658 903
659 /* FIXME: not sure if this is necessary - will already have 904static void veth_destroy_connection(struct veth_lpar_connection *cnx)
660 * been deleted by the state machine, just want to make sure 905{
661 * its not running any more */ 906 if (!cnx)
662 del_timer_sync(&cnx->ack_timer); 907 return;
663 908
664 if (cnx->num_events > 0) 909 if (cnx->num_events > 0)
665 mf_deallocate_lp_events(cnx->remote_lp, 910 mf_deallocate_lp_events(cnx->remote_lp,
@@ -671,18 +916,18 @@ static void veth_stop_connection(u8 rlp)
671 HvLpEvent_Type_VirtualLan, 916 HvLpEvent_Type_VirtualLan,
672 cnx->num_ack_events, 917 cnx->num_ack_events,
673 NULL, NULL); 918 NULL, NULL);
674}
675
676static void veth_destroy_connection(u8 rlp)
677{
678 struct veth_lpar_connection *cnx = veth_cnx[rlp];
679
680 if (! cnx)
681 return;
682 919
683 kfree(cnx->msgs); 920 kfree(cnx->msgs);
921 veth_cnx[cnx->remote_lp] = NULL;
684 kfree(cnx); 922 kfree(cnx);
685 veth_cnx[rlp] = NULL; 923}
924
925static void veth_release_connection(struct kobject *kobj)
926{
927 struct veth_lpar_connection *cnx;
928 cnx = container_of(kobj, struct veth_lpar_connection, kobject);
929 veth_stop_connection(cnx);
930 veth_destroy_connection(cnx);
686} 931}
687 932
688/* 933/*
@@ -726,17 +971,15 @@ static void veth_set_multicast_list(struct net_device *dev)
726 971
727 write_lock_irqsave(&port->mcast_gate, flags); 972 write_lock_irqsave(&port->mcast_gate, flags);
728 973
729 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ 974 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
730 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", 975 (dev->mc_count > VETH_MAX_MCAST)) {
731 dev->name);
732 port->promiscuous = 1; 976 port->promiscuous = 1;
733 } else if ( (dev->flags & IFF_ALLMULTI)
734 || (dev->mc_count > VETH_MAX_MCAST) ) {
735 port->all_mcast = 1;
736 } else { 977 } else {
737 struct dev_mc_list *dmi = dev->mc_list; 978 struct dev_mc_list *dmi = dev->mc_list;
738 int i; 979 int i;
739 980
981 port->promiscuous = 0;
982
740 /* Update table */ 983 /* Update table */
741 port->num_mcast = 0; 984 port->num_mcast = 0;
742 985
@@ -758,9 +1001,10 @@ static void veth_set_multicast_list(struct net_device *dev)
758 1001
759static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1002static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
760{ 1003{
761 strncpy(info->driver, "veth", sizeof(info->driver) - 1); 1004 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
762 info->driver[sizeof(info->driver) - 1] = '\0'; 1005 info->driver[sizeof(info->driver) - 1] = '\0';
763 strncpy(info->version, "1.0", sizeof(info->version) - 1); 1006 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
1007 info->version[sizeof(info->version) - 1] = '\0';
764} 1008}
765 1009
766static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1010static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -791,49 +1035,6 @@ static struct ethtool_ops ops = {
791 .get_link = veth_get_link, 1035 .get_link = veth_get_link,
792}; 1036};
793 1037
794static void veth_tx_timeout(struct net_device *dev)
795{
796 struct veth_port *port = (struct veth_port *)dev->priv;
797 struct net_device_stats *stats = &port->stats;
798 unsigned long flags;
799 int i;
800
801 stats->tx_errors++;
802
803 spin_lock_irqsave(&port->pending_gate, flags);
804
805 if (!port->pending_lpmask) {
806 spin_unlock_irqrestore(&port->pending_gate, flags);
807 return;
808 }
809
810 printk(KERN_WARNING "%s: Tx timeout! Resetting lp connections: %08x\n",
811 dev->name, port->pending_lpmask);
812
813 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
814 struct veth_lpar_connection *cnx = veth_cnx[i];
815
816 if (! (port->pending_lpmask & (1<<i)))
817 continue;
818
819 /* If we're pending on it, we must be connected to it,
820 * so we should certainly have a structure for it. */
821 BUG_ON(! cnx);
822
823 /* Theoretically we could be kicking a connection
824 * which doesn't deserve it, but in practice if we've
825 * had a Tx timeout, the pending_lpmask will have
826 * exactly one bit set - the connection causing the
827 * problem. */
828 spin_lock(&cnx->lock);
829 cnx->state |= VETH_STATE_RESET;
830 veth_kick_statemachine(cnx);
831 spin_unlock(&cnx->lock);
832 }
833
834 spin_unlock_irqrestore(&port->pending_gate, flags);
835}
836
837static struct net_device * __init veth_probe_one(int vlan, struct device *vdev) 1038static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
838{ 1039{
839 struct net_device *dev; 1040 struct net_device *dev;
@@ -848,8 +1049,9 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
848 1049
849 port = (struct veth_port *) dev->priv; 1050 port = (struct veth_port *) dev->priv;
850 1051
851 spin_lock_init(&port->pending_gate); 1052 spin_lock_init(&port->queue_lock);
852 rwlock_init(&port->mcast_gate); 1053 rwlock_init(&port->mcast_gate);
1054 port->stopped_map = 0;
853 1055
854 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { 1056 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
855 HvLpVirtualLanIndexMap map; 1057 HvLpVirtualLanIndexMap map;
@@ -882,22 +1084,24 @@ static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
882 dev->set_multicast_list = veth_set_multicast_list; 1084 dev->set_multicast_list = veth_set_multicast_list;
883 SET_ETHTOOL_OPS(dev, &ops); 1085 SET_ETHTOOL_OPS(dev, &ops);
884 1086
885 dev->watchdog_timeo = 2 * (VETH_ACKTIMEOUT * HZ / 1000000);
886 dev->tx_timeout = veth_tx_timeout;
887
888 SET_NETDEV_DEV(dev, vdev); 1087 SET_NETDEV_DEV(dev, vdev);
889 1088
890 rc = register_netdev(dev); 1089 rc = register_netdev(dev);
891 if (rc != 0) { 1090 if (rc != 0) {
892 veth_printk(KERN_ERR, 1091 veth_error("Failed registering net device for vlan%d.\n", vlan);
893 "Failed to register ethernet device for vlan %d\n",
894 vlan);
895 free_netdev(dev); 1092 free_netdev(dev);
896 return NULL; 1093 return NULL;
897 } 1094 }
898 1095
899 veth_printk(KERN_DEBUG, "%s attached to iSeries vlan %d (lpar_map=0x%04x)\n", 1096 kobject_init(&port->kobject);
900 dev->name, vlan, port->lpar_map); 1097 port->kobject.parent = &dev->class_dev.kobj;
1098 port->kobject.ktype = &veth_port_ktype;
1099 kobject_set_name(&port->kobject, "veth_port");
1100 if (0 != kobject_add(&port->kobject))
1101 veth_error("Failed adding port for %s to sysfs.\n", dev->name);
1102
1103 veth_info("%s attached to iSeries vlan %d (LPAR map = 0x%.4X)\n",
1104 dev->name, vlan, port->lpar_map);
901 1105
902 return dev; 1106 return dev;
903} 1107}
@@ -912,98 +1116,95 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
912 struct veth_lpar_connection *cnx = veth_cnx[rlp]; 1116 struct veth_lpar_connection *cnx = veth_cnx[rlp];
913 struct veth_port *port = (struct veth_port *) dev->priv; 1117 struct veth_port *port = (struct veth_port *) dev->priv;
914 HvLpEvent_Rc rc; 1118 HvLpEvent_Rc rc;
915 u32 dma_address, dma_length;
916 struct veth_msg *msg = NULL; 1119 struct veth_msg *msg = NULL;
917 int err = 0;
918 unsigned long flags; 1120 unsigned long flags;
919 1121
920 if (! cnx) { 1122 if (! cnx)
921 port->stats.tx_errors++;
922 dev_kfree_skb(skb);
923 return 0; 1123 return 0;
924 }
925 1124
926 spin_lock_irqsave(&cnx->lock, flags); 1125 spin_lock_irqsave(&cnx->lock, flags);
927 1126
928 if (! (cnx->state & VETH_STATE_READY)) 1127 if (! (cnx->state & VETH_STATE_READY))
929 goto drop; 1128 goto no_error;
930 1129
931 if ((skb->len - 14) > VETH_MAX_MTU) 1130 if ((skb->len - ETH_HLEN) > VETH_MAX_MTU)
932 goto drop; 1131 goto drop;
933 1132
934 msg = veth_stack_pop(cnx); 1133 msg = veth_stack_pop(cnx);
935 1134 if (! msg)
936 if (! msg) {
937 err = 1;
938 goto drop; 1135 goto drop;
939 }
940 1136
941 dma_length = skb->len; 1137 msg->in_use = 1;
942 dma_address = dma_map_single(port->dev, skb->data, 1138 msg->skb = skb_get(skb);
943 dma_length, DMA_TO_DEVICE); 1139
1140 msg->data.addr[0] = dma_map_single(port->dev, skb->data,
1141 skb->len, DMA_TO_DEVICE);
944 1142
945 if (dma_mapping_error(dma_address)) 1143 if (dma_mapping_error(msg->data.addr[0]))
946 goto recycle_and_drop; 1144 goto recycle_and_drop;
947 1145
948 /* Is it really necessary to check the length and address
949 * fields of the first entry here? */
950 msg->skb = skb;
951 msg->dev = port->dev; 1146 msg->dev = port->dev;
952 msg->data.addr[0] = dma_address; 1147 msg->data.len[0] = skb->len;
953 msg->data.len[0] = dma_length;
954 msg->data.eofmask = 1 << VETH_EOF_SHIFT; 1148 msg->data.eofmask = 1 << VETH_EOF_SHIFT;
955 set_bit(0, &(msg->in_use)); 1149
956 rc = veth_signaldata(cnx, VethEventTypeFrames, msg->token, &msg->data); 1150 rc = veth_signaldata(cnx, VETH_EVENT_FRAMES, msg->token, &msg->data);
957 1151
958 if (rc != HvLpEvent_Rc_Good) 1152 if (rc != HvLpEvent_Rc_Good)
959 goto recycle_and_drop; 1153 goto recycle_and_drop;
960 1154
1155 /* If the timer's not already running, start it now. */
1156 if (0 == cnx->outstanding_tx)
1157 mod_timer(&cnx->reset_timer, jiffies + cnx->reset_timeout);
1158
1159 cnx->last_contact = jiffies;
1160 cnx->outstanding_tx++;
1161
1162 if (veth_stack_is_empty(cnx))
1163 veth_stop_queues(cnx);
1164
1165 no_error:
961 spin_unlock_irqrestore(&cnx->lock, flags); 1166 spin_unlock_irqrestore(&cnx->lock, flags);
962 return 0; 1167 return 0;
963 1168
964 recycle_and_drop: 1169 recycle_and_drop:
965 msg->skb = NULL;
966 /* need to set in use to make veth_recycle_msg in case this
967 * was a mapping failure */
968 set_bit(0, &msg->in_use);
969 veth_recycle_msg(cnx, msg); 1170 veth_recycle_msg(cnx, msg);
970 drop: 1171 drop:
971 port->stats.tx_errors++;
972 dev_kfree_skb(skb);
973 spin_unlock_irqrestore(&cnx->lock, flags); 1172 spin_unlock_irqrestore(&cnx->lock, flags);
974 return err; 1173 return 1;
975} 1174}
976 1175
977static HvLpIndexMap veth_transmit_to_many(struct sk_buff *skb, 1176static void veth_transmit_to_many(struct sk_buff *skb,
978 HvLpIndexMap lpmask, 1177 HvLpIndexMap lpmask,
979 struct net_device *dev) 1178 struct net_device *dev)
980{ 1179{
981 struct veth_port *port = (struct veth_port *) dev->priv; 1180 struct veth_port *port = (struct veth_port *) dev->priv;
982 int i; 1181 int i, success, error;
983 int rc; 1182
1183 success = error = 0;
984 1184
985 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { 1185 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
986 if ((lpmask & (1 << i)) == 0) 1186 if ((lpmask & (1 << i)) == 0)
987 continue; 1187 continue;
988 1188
989 rc = veth_transmit_to_one(skb_get(skb), i, dev); 1189 if (veth_transmit_to_one(skb, i, dev))
990 if (! rc) 1190 error = 1;
991 lpmask &= ~(1<<i); 1191 else
1192 success = 1;
992 } 1193 }
993 1194
994 if (! lpmask) { 1195 if (error)
1196 port->stats.tx_errors++;
1197
1198 if (success) {
995 port->stats.tx_packets++; 1199 port->stats.tx_packets++;
996 port->stats.tx_bytes += skb->len; 1200 port->stats.tx_bytes += skb->len;
997 } 1201 }
998
999 return lpmask;
1000} 1202}
1001 1203
1002static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1204static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1003{ 1205{
1004 unsigned char *frame = skb->data; 1206 unsigned char *frame = skb->data;
1005 struct veth_port *port = (struct veth_port *) dev->priv; 1207 struct veth_port *port = (struct veth_port *) dev->priv;
1006 unsigned long flags;
1007 HvLpIndexMap lpmask; 1208 HvLpIndexMap lpmask;
1008 1209
1009 if (! (frame[0] & 0x01)) { 1210 if (! (frame[0] & 0x01)) {
@@ -1020,44 +1221,27 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1020 lpmask = port->lpar_map; 1221 lpmask = port->lpar_map;
1021 } 1222 }
1022 1223
1023 spin_lock_irqsave(&port->pending_gate, flags); 1224 veth_transmit_to_many(skb, lpmask, dev);
1024
1025 lpmask = veth_transmit_to_many(skb, lpmask, dev);
1026
1027 dev->trans_start = jiffies;
1028 1225
1029 if (! lpmask) { 1226 dev_kfree_skb(skb);
1030 dev_kfree_skb(skb);
1031 } else {
1032 if (port->pending_skb) {
1033 veth_error("%s: Tx while skb was pending!\n",
1034 dev->name);
1035 dev_kfree_skb(skb);
1036 spin_unlock_irqrestore(&port->pending_gate, flags);
1037 return 1;
1038 }
1039
1040 port->pending_skb = skb;
1041 port->pending_lpmask = lpmask;
1042 netif_stop_queue(dev);
1043 }
1044
1045 spin_unlock_irqrestore(&port->pending_gate, flags);
1046 1227
1047 return 0; 1228 return 0;
1048} 1229}
1049 1230
1231/* You must hold the connection's lock when you call this function. */
1050static void veth_recycle_msg(struct veth_lpar_connection *cnx, 1232static void veth_recycle_msg(struct veth_lpar_connection *cnx,
1051 struct veth_msg *msg) 1233 struct veth_msg *msg)
1052{ 1234{
1053 u32 dma_address, dma_length; 1235 u32 dma_address, dma_length;
1054 1236
1055 if (test_and_clear_bit(0, &msg->in_use)) { 1237 if (msg->in_use) {
1238 msg->in_use = 0;
1056 dma_address = msg->data.addr[0]; 1239 dma_address = msg->data.addr[0];
1057 dma_length = msg->data.len[0]; 1240 dma_length = msg->data.len[0];
1058 1241
1059 dma_unmap_single(msg->dev, dma_address, dma_length, 1242 if (!dma_mapping_error(dma_address))
1060 DMA_TO_DEVICE); 1243 dma_unmap_single(msg->dev, dma_address, dma_length,
1244 DMA_TO_DEVICE);
1061 1245
1062 if (msg->skb) { 1246 if (msg->skb) {
1063 dev_kfree_skb_any(msg->skb); 1247 dev_kfree_skb_any(msg->skb);
@@ -1066,15 +1250,16 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
1066 1250
1067 memset(&msg->data, 0, sizeof(msg->data)); 1251 memset(&msg->data, 0, sizeof(msg->data));
1068 veth_stack_push(cnx, msg); 1252 veth_stack_push(cnx, msg);
1069 } else 1253 } else if (cnx->state & VETH_STATE_OPEN) {
1070 if (cnx->state & VETH_STATE_OPEN) 1254 veth_error("Non-pending frame (# %d) acked by LPAR %d.\n",
1071 veth_error("Bogus frames ack from lpar %d (#%d)\n", 1255 cnx->remote_lp, msg->token);
1072 cnx->remote_lp, msg->token); 1256 }
1073} 1257}
1074 1258
1075static void veth_flush_pending(struct veth_lpar_connection *cnx) 1259static void veth_wake_queues(struct veth_lpar_connection *cnx)
1076{ 1260{
1077 int i; 1261 int i;
1262
1078 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { 1263 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
1079 struct net_device *dev = veth_dev[i]; 1264 struct net_device *dev = veth_dev[i];
1080 struct veth_port *port; 1265 struct veth_port *port;
@@ -1088,20 +1273,77 @@ static void veth_flush_pending(struct veth_lpar_connection *cnx)
1088 if (! (port->lpar_map & (1<<cnx->remote_lp))) 1273 if (! (port->lpar_map & (1<<cnx->remote_lp)))
1089 continue; 1274 continue;
1090 1275
1091 spin_lock_irqsave(&port->pending_gate, flags); 1276 spin_lock_irqsave(&port->queue_lock, flags);
1092 if (port->pending_skb) { 1277
1093 port->pending_lpmask = 1278 port->stopped_map &= ~(1 << cnx->remote_lp);
1094 veth_transmit_to_many(port->pending_skb, 1279
1095 port->pending_lpmask, 1280 if (0 == port->stopped_map && netif_queue_stopped(dev)) {
1096 dev); 1281 veth_debug("cnx %d: woke queue for %s.\n",
1097 if (! port->pending_lpmask) { 1282 cnx->remote_lp, dev->name);
1098 dev_kfree_skb_any(port->pending_skb); 1283 netif_wake_queue(dev);
1099 port->pending_skb = NULL; 1284 }
1100 netif_wake_queue(dev); 1285 spin_unlock_irqrestore(&port->queue_lock, flags);
1101 } 1286 }
1287}
1288
1289static void veth_stop_queues(struct veth_lpar_connection *cnx)
1290{
1291 int i;
1292
1293 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
1294 struct net_device *dev = veth_dev[i];
1295 struct veth_port *port;
1296
1297 if (! dev)
1298 continue;
1299
1300 port = (struct veth_port *)dev->priv;
1301
1302 /* If this cnx is not on the vlan for this port, continue */
1303 if (! (port->lpar_map & (1 << cnx->remote_lp)))
1304 continue;
1305
1306 spin_lock(&port->queue_lock);
1307
1308 netif_stop_queue(dev);
1309 port->stopped_map |= (1 << cnx->remote_lp);
1310
1311 veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n",
1312 cnx->remote_lp, dev->name, port->stopped_map);
1313
1314 spin_unlock(&port->queue_lock);
1315 }
1316}
1317
1318static void veth_timed_reset(unsigned long ptr)
1319{
1320 struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)ptr;
1321 unsigned long trigger_time, flags;
1322
1323 /* FIXME is it possible this fires after veth_stop_connection()?
1324 * That would reschedule the statemachine for 5 seconds and probably
1325 * execute it after the module's been unloaded. Hmm. */
1326
1327 spin_lock_irqsave(&cnx->lock, flags);
1328
1329 if (cnx->outstanding_tx > 0) {
1330 trigger_time = cnx->last_contact + cnx->reset_timeout;
1331
1332 if (trigger_time < jiffies) {
1333 cnx->state |= VETH_STATE_RESET;
1334 veth_kick_statemachine(cnx);
1335 veth_error("%d packets not acked by LPAR %d within %d "
1336 "seconds, resetting.\n",
1337 cnx->outstanding_tx, cnx->remote_lp,
1338 cnx->reset_timeout / HZ);
1339 } else {
1340 /* Reschedule the timer */
1341 trigger_time = jiffies + cnx->reset_timeout;
1342 mod_timer(&cnx->reset_timer, trigger_time);
1102 } 1343 }
1103 spin_unlock_irqrestore(&port->pending_gate, flags);
1104 } 1344 }
1345
1346 spin_unlock_irqrestore(&cnx->lock, flags);
1105} 1347}
1106 1348
1107/* 1349/*
@@ -1117,12 +1359,9 @@ static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr)
1117 if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) ) 1359 if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) )
1118 return 1; 1360 return 1;
1119 1361
1120 if (! (((char *) &mac_addr)[0] & 0x01))
1121 return 0;
1122
1123 read_lock_irqsave(&port->mcast_gate, flags); 1362 read_lock_irqsave(&port->mcast_gate, flags);
1124 1363
1125 if (port->promiscuous || port->all_mcast) { 1364 if (port->promiscuous) {
1126 wanted = 1; 1365 wanted = 1;
1127 goto out; 1366 goto out;
1128 } 1367 }
@@ -1175,21 +1414,21 @@ static void veth_flush_acks(struct veth_lpar_connection *cnx)
1175{ 1414{
1176 HvLpEvent_Rc rc; 1415 HvLpEvent_Rc rc;
1177 1416
1178 rc = veth_signaldata(cnx, VethEventTypeFramesAck, 1417 rc = veth_signaldata(cnx, VETH_EVENT_FRAMES_ACK,
1179 0, &cnx->pending_acks); 1418 0, &cnx->pending_acks);
1180 1419
1181 if (rc != HvLpEvent_Rc_Good) 1420 if (rc != HvLpEvent_Rc_Good)
1182 veth_error("Error 0x%x acking frames from lpar %d!\n", 1421 veth_error("Failed acking frames from LPAR %d, rc = %d\n",
1183 (unsigned)rc, cnx->remote_lp); 1422 cnx->remote_lp, (int)rc);
1184 1423
1185 cnx->num_pending_acks = 0; 1424 cnx->num_pending_acks = 0;
1186 memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks)); 1425 memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks));
1187} 1426}
1188 1427
1189static void veth_receive(struct veth_lpar_connection *cnx, 1428static void veth_receive(struct veth_lpar_connection *cnx,
1190 struct VethLpEvent *event) 1429 struct veth_lpevent *event)
1191{ 1430{
1192 struct VethFramesData *senddata = &event->u.frames_data; 1431 struct veth_frames_data *senddata = &event->u.frames_data;
1193 int startchunk = 0; 1432 int startchunk = 0;
1194 int nchunks; 1433 int nchunks;
1195 unsigned long flags; 1434 unsigned long flags;
@@ -1216,9 +1455,10 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1216 /* make sure that we have at least 1 EOF entry in the 1455 /* make sure that we have at least 1 EOF entry in the
1217 * remaining entries */ 1456 * remaining entries */
1218 if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) { 1457 if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) {
1219 veth_error("missing EOF frag in event " 1458 veth_error("Missing EOF fragment in event "
1220 "eofmask=0x%x startchunk=%d\n", 1459 "eofmask = 0x%x startchunk = %d\n",
1221 (unsigned) senddata->eofmask, startchunk); 1460 (unsigned)senddata->eofmask,
1461 startchunk);
1222 break; 1462 break;
1223 } 1463 }
1224 1464
@@ -1237,8 +1477,9 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1237 /* nchunks == # of chunks in this frame */ 1477 /* nchunks == # of chunks in this frame */
1238 1478
1239 if ((length - ETH_HLEN) > VETH_MAX_MTU) { 1479 if ((length - ETH_HLEN) > VETH_MAX_MTU) {
1240 veth_error("Received oversize frame from lpar %d " 1480 veth_error("Received oversize frame from LPAR %d "
1241 "(length=%d)\n", cnx->remote_lp, length); 1481 "(length = %d)\n",
1482 cnx->remote_lp, length);
1242 continue; 1483 continue;
1243 } 1484 }
1244 1485
@@ -1331,15 +1572,33 @@ static void veth_timed_ack(unsigned long ptr)
1331 1572
1332static int veth_remove(struct vio_dev *vdev) 1573static int veth_remove(struct vio_dev *vdev)
1333{ 1574{
1334 int i = vdev->unit_address; 1575 struct veth_lpar_connection *cnx;
1335 struct net_device *dev; 1576 struct net_device *dev;
1577 struct veth_port *port;
1578 int i;
1336 1579
1337 dev = veth_dev[i]; 1580 dev = veth_dev[vdev->unit_address];
1338 if (dev != NULL) { 1581
1339 veth_dev[i] = NULL; 1582 if (! dev)
1340 unregister_netdev(dev); 1583 return 0;
1341 free_netdev(dev); 1584
1585 port = netdev_priv(dev);
1586
1587 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
1588 cnx = veth_cnx[i];
1589
1590 if (cnx && (port->lpar_map & (1 << i))) {
1591 /* Drop our reference to connections on our VLAN */
1592 kobject_put(&cnx->kobject);
1593 }
1342 } 1594 }
1595
1596 veth_dev[vdev->unit_address] = NULL;
1597 kobject_del(&port->kobject);
1598 kobject_put(&port->kobject);
1599 unregister_netdev(dev);
1600 free_netdev(dev);
1601
1343 return 0; 1602 return 0;
1344} 1603}
1345 1604
@@ -1347,6 +1606,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1347{ 1606{
1348 int i = vdev->unit_address; 1607 int i = vdev->unit_address;
1349 struct net_device *dev; 1608 struct net_device *dev;
1609 struct veth_port *port;
1350 1610
1351 dev = veth_probe_one(i, &vdev->dev); 1611 dev = veth_probe_one(i, &vdev->dev);
1352 if (dev == NULL) { 1612 if (dev == NULL) {
@@ -1355,11 +1615,23 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1355 } 1615 }
1356 veth_dev[i] = dev; 1616 veth_dev[i] = dev;
1357 1617
1358 /* Start the state machine on each connection, to commence 1618 port = (struct veth_port*)netdev_priv(dev);
1359 * link negotiation */ 1619
1360 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) 1620 /* Start the state machine on each connection on this vlan. If we're
1361 if (veth_cnx[i]) 1621 * the first dev to do so this will commence link negotiation */
1362 veth_kick_statemachine(veth_cnx[i]); 1622 for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
1623 struct veth_lpar_connection *cnx;
1624
1625 if (! (port->lpar_map & (1 << i)))
1626 continue;
1627
1628 cnx = veth_cnx[i];
1629 if (!cnx)
1630 continue;
1631
1632 kobject_get(&cnx->kobject);
1633 veth_kick_statemachine(cnx);
1634 }
1363 1635
1364 return 0; 1636 return 0;
1365} 1637}
@@ -1370,12 +1642,12 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1370 */ 1642 */
1371static struct vio_device_id veth_device_table[] __devinitdata = { 1643static struct vio_device_id veth_device_table[] __devinitdata = {
1372 { "vlan", "" }, 1644 { "vlan", "" },
1373 { NULL, NULL } 1645 { "", "" }
1374}; 1646};
1375MODULE_DEVICE_TABLE(vio, veth_device_table); 1647MODULE_DEVICE_TABLE(vio, veth_device_table);
1376 1648
1377static struct vio_driver veth_driver = { 1649static struct vio_driver veth_driver = {
1378 .name = "iseries_veth", 1650 .name = DRV_NAME,
1379 .id_table = veth_device_table, 1651 .id_table = veth_device_table,
1380 .probe = veth_probe, 1652 .probe = veth_probe,
1381 .remove = veth_remove 1653 .remove = veth_remove
@@ -1388,29 +1660,29 @@ static struct vio_driver veth_driver = {
1388void __exit veth_module_cleanup(void) 1660void __exit veth_module_cleanup(void)
1389{ 1661{
1390 int i; 1662 int i;
1663 struct veth_lpar_connection *cnx;
1391 1664
1392 /* Stop the queues first to stop any new packets being sent. */ 1665 /* Disconnect our "irq" to stop events coming from the Hypervisor. */
1393 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++)
1394 if (veth_dev[i])
1395 netif_stop_queue(veth_dev[i]);
1396
1397 /* Stop the connections before we unregister the driver. This
1398 * ensures there's no skbs lying around holding the device open. */
1399 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
1400 veth_stop_connection(i);
1401
1402 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); 1666 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
1403 1667
1404 /* Hypervisor callbacks may have scheduled more work while we 1668 /* Make sure any work queued from Hypervisor callbacks is finished. */
1405 * were stoping connections. Now that we've disconnected from
1406 * the hypervisor make sure everything's finished. */
1407 flush_scheduled_work(); 1669 flush_scheduled_work();
1408 1670
1409 vio_unregister_driver(&veth_driver); 1671 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1672 cnx = veth_cnx[i];
1673
1674 if (!cnx)
1675 continue;
1410 1676
1411 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) 1677 /* Remove the connection from sysfs */
1412 veth_destroy_connection(i); 1678 kobject_del(&cnx->kobject);
1679 /* Drop the driver's reference to the connection */
1680 kobject_put(&cnx->kobject);
1681 }
1413 1682
1683 /* Unregister the driver, which will close all the netdevs and stop
1684 * the connections when they're no longer referenced. */
1685 vio_unregister_driver(&veth_driver);
1414} 1686}
1415module_exit(veth_module_cleanup); 1687module_exit(veth_module_cleanup);
1416 1688
@@ -1423,15 +1695,37 @@ int __init veth_module_init(void)
1423 1695
1424 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { 1696 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1425 rc = veth_init_connection(i); 1697 rc = veth_init_connection(i);
1426 if (rc != 0) { 1698 if (rc != 0)
1427 veth_module_cleanup(); 1699 goto error;
1428 return rc;
1429 }
1430 } 1700 }
1431 1701
1432 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan, 1702 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
1433 &veth_handle_event); 1703 &veth_handle_event);
1434 1704
1435 return vio_register_driver(&veth_driver); 1705 rc = vio_register_driver(&veth_driver);
1706 if (rc != 0)
1707 goto error;
1708
1709 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1710 struct kobject *kobj;
1711
1712 if (!veth_cnx[i])
1713 continue;
1714
1715 kobj = &veth_cnx[i]->kobject;
1716 kobj->parent = &veth_driver.driver.kobj;
1717 /* If the add failes, complain but otherwise continue */
1718 if (0 != kobject_add(kobj))
1719 veth_error("cnx %d: Failed adding to sysfs.\n", i);
1720 }
1721
1722 return 0;
1723
1724error:
1725 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1726 veth_destroy_connection(veth_cnx[i]);
1727 }
1728
1729 return rc;
1436} 1730}
1437module_init(veth_module_init); 1731module_init(veth_module_init);
diff --git a/drivers/net/iseries_veth.h b/drivers/net/iseries_veth.h
deleted file mode 100644
index d9370f79b83e..000000000000
--- a/drivers/net/iseries_veth.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/* File veth.h created by Kyle A. Lucke on Mon Aug 7 2000. */
2
3#ifndef _ISERIES_VETH_H
4#define _ISERIES_VETH_H
5
6#define VethEventTypeCap (0)
7#define VethEventTypeFrames (1)
8#define VethEventTypeMonitor (2)
9#define VethEventTypeFramesAck (3)
10
11#define VETH_MAX_ACKS_PER_MSG (20)
12#define VETH_MAX_FRAMES_PER_MSG (6)
13
14struct VethFramesData {
15 u32 addr[VETH_MAX_FRAMES_PER_MSG];
16 u16 len[VETH_MAX_FRAMES_PER_MSG];
17 u32 eofmask;
18};
19#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
20
21struct VethFramesAckData {
22 u16 token[VETH_MAX_ACKS_PER_MSG];
23};
24
25struct VethCapData {
26 u8 caps_version;
27 u8 rsvd1;
28 u16 num_buffers;
29 u16 ack_threshold;
30 u16 rsvd2;
31 u32 ack_timeout;
32 u32 rsvd3;
33 u64 rsvd4[3];
34};
35
36struct VethLpEvent {
37 struct HvLpEvent base_event;
38 union {
39 struct VethCapData caps_data;
40 struct VethFramesData frames_data;
41 struct VethFramesAckData frames_ack_data;
42 } u;
43
44};
45
46#endif /* _ISERIES_VETH_H */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index fb6b232069d6..7c9dbc8c9423 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -58,11 +58,10 @@
58 58
59#define INT_CAUSE_UNMASK_ALL 0x0007ffff 59#define INT_CAUSE_UNMASK_ALL 0x0007ffff
60#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff 60#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
61#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
62#define INT_CAUSE_MASK_ALL 0x00000000 61#define INT_CAUSE_MASK_ALL 0x00000000
62#define INT_CAUSE_MASK_ALL_EXT 0x00000000
63#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL 63#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
64#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT 64#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
65#endif
66 65
67#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 66#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
68#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) 67#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
@@ -259,14 +258,13 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev)
259static void mv643xx_eth_set_rx_mode(struct net_device *dev) 258static void mv643xx_eth_set_rx_mode(struct net_device *dev)
260{ 259{
261 struct mv643xx_private *mp = netdev_priv(dev); 260 struct mv643xx_private *mp = netdev_priv(dev);
262 u32 config_reg;
263 261
264 config_reg = ethernet_get_config_reg(mp->port_num);
265 if (dev->flags & IFF_PROMISC) 262 if (dev->flags & IFF_PROMISC)
266 config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 263 mp->port_config |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
267 else 264 else
268 config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; 265 mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
269 ethernet_set_config_reg(mp->port_num, config_reg); 266
267 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
270} 268}
271 269
272/* 270/*
@@ -369,15 +367,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
369 367
370 dev_kfree_skb_irq(pkt_info.return_info); 368 dev_kfree_skb_irq(pkt_info.return_info);
371 released = 0; 369 released = 0;
372
373 /*
374 * Decrement the number of outstanding skbs counter on
375 * the TX queue.
376 */
377 if (mp->tx_ring_skbs == 0)
378 panic("ERROR - TX outstanding SKBs"
379 " counter is corrupted");
380 mp->tx_ring_skbs--;
381 } else 370 } else
382 dma_unmap_page(NULL, pkt_info.buf_ptr, 371 dma_unmap_page(NULL, pkt_info.buf_ptr,
383 pkt_info.byte_cnt, DMA_TO_DEVICE); 372 pkt_info.byte_cnt, DMA_TO_DEVICE);
@@ -412,15 +401,13 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
412 struct pkt_info pkt_info; 401 struct pkt_info pkt_info;
413 402
414#ifdef MV643XX_NAPI 403#ifdef MV643XX_NAPI
415 while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) { 404 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
416#else 405#else
417 while (eth_port_receive(mp, &pkt_info) == ETH_OK) { 406 while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
418#endif 407#endif
419 mp->rx_ring_skbs--; 408 mp->rx_ring_skbs--;
420 received_packets++; 409 received_packets++;
421#ifdef MV643XX_NAPI 410
422 budget--;
423#endif
424 /* Update statistics. Note byte count includes 4 byte CRC count */ 411 /* Update statistics. Note byte count includes 4 byte CRC count */
425 stats->rx_packets++; 412 stats->rx_packets++;
426 stats->rx_bytes += pkt_info.byte_cnt; 413 stats->rx_bytes += pkt_info.byte_cnt;
@@ -1044,9 +1031,6 @@ static void mv643xx_tx(struct net_device *dev)
1044 DMA_TO_DEVICE); 1031 DMA_TO_DEVICE);
1045 1032
1046 dev_kfree_skb_irq(pkt_info.return_info); 1033 dev_kfree_skb_irq(pkt_info.return_info);
1047
1048 if (mp->tx_ring_skbs)
1049 mp->tx_ring_skbs--;
1050 } else 1034 } else
1051 dma_unmap_page(NULL, pkt_info.buf_ptr, 1035 dma_unmap_page(NULL, pkt_info.buf_ptr,
1052 pkt_info.byte_cnt, DMA_TO_DEVICE); 1036 pkt_info.byte_cnt, DMA_TO_DEVICE);
@@ -1189,7 +1173,6 @@ linear:
1189 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, 1173 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
1190 DMA_TO_DEVICE); 1174 DMA_TO_DEVICE);
1191 pkt_info.return_info = skb; 1175 pkt_info.return_info = skb;
1192 mp->tx_ring_skbs++;
1193 status = eth_port_send(mp, &pkt_info); 1176 status = eth_port_send(mp, &pkt_info);
1194 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) 1177 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1195 printk(KERN_ERR "%s: Error on transmitting packet\n", 1178 printk(KERN_ERR "%s: Error on transmitting packet\n",
@@ -1274,7 +1257,6 @@ linear:
1274 pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | 1257 pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
1275 ETH_TX_LAST_DESC; 1258 ETH_TX_LAST_DESC;
1276 pkt_info.return_info = skb; 1259 pkt_info.return_info = skb;
1277 mp->tx_ring_skbs++;
1278 } else { 1260 } else {
1279 pkt_info.return_info = 0; 1261 pkt_info.return_info = 0;
1280 } 1262 }
@@ -1311,7 +1293,6 @@ linear:
1311 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, 1293 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len,
1312 DMA_TO_DEVICE); 1294 DMA_TO_DEVICE);
1313 pkt_info.return_info = skb; 1295 pkt_info.return_info = skb;
1314 mp->tx_ring_skbs++;
1315 status = eth_port_send(mp, &pkt_info); 1296 status = eth_port_send(mp, &pkt_info);
1316 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) 1297 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1317 printk(KERN_ERR "%s: Error on transmitting packet\n", 1298 printk(KERN_ERR "%s: Error on transmitting packet\n",
@@ -1356,6 +1337,43 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1356 return &mp->stats; 1337 return &mp->stats;
1357} 1338}
1358 1339
1340#ifdef CONFIG_NET_POLL_CONTROLLER
1341static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
1342{
1343 int port_num = mp->port_num;
1344 unsigned long flags;
1345
1346 spin_lock_irqsave(&mp->lock, flags);
1347 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1348 INT_CAUSE_UNMASK_ALL);
1349 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1350 INT_CAUSE_UNMASK_ALL_EXT);
1351 spin_unlock_irqrestore(&mp->lock, flags);
1352}
1353
1354static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
1355{
1356 int port_num = mp->port_num;
1357 unsigned long flags;
1358
1359 spin_lock_irqsave(&mp->lock, flags);
1360 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1361 INT_CAUSE_MASK_ALL);
1362 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1363 INT_CAUSE_MASK_ALL_EXT);
1364 spin_unlock_irqrestore(&mp->lock, flags);
1365}
1366
1367static void mv643xx_netpoll(struct net_device *netdev)
1368{
1369 struct mv643xx_private *mp = netdev_priv(netdev);
1370
1371 mv643xx_disable_irq(mp);
1372 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1373 mv643xx_enable_irq(mp);
1374}
1375#endif
1376
1359/*/ 1377/*/
1360 * mv643xx_eth_probe 1378 * mv643xx_eth_probe
1361 * 1379 *
@@ -1406,6 +1424,10 @@ static int mv643xx_eth_probe(struct device *ddev)
1406 dev->weight = 64; 1424 dev->weight = 64;
1407#endif 1425#endif
1408 1426
1427#ifdef CONFIG_NET_POLL_CONTROLLER
1428 dev->poll_controller = mv643xx_netpoll;
1429#endif
1430
1409 dev->watchdog_timeo = 2 * HZ; 1431 dev->watchdog_timeo = 2 * HZ;
1410 dev->tx_queue_len = mp->tx_ring_size; 1432 dev->tx_queue_len = mp->tx_ring_size;
1411 dev->base_addr = 0; 1433 dev->base_addr = 0;
@@ -1883,6 +1905,9 @@ static void eth_port_start(struct mv643xx_private *mp)
1883 /* Enable port Rx. */ 1905 /* Enable port Rx. */
1884 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 1906 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
1885 mp->port_rx_queue_command); 1907 mp->port_rx_queue_command);
1908
1909 /* Disable port bandwidth limits by clearing MTU register */
1910 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
1886} 1911}
1887 1912
1888/* 1913/*
@@ -2292,34 +2317,6 @@ static void eth_port_reset(unsigned int port_num)
2292 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2317 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2293} 2318}
2294 2319
2295/*
2296 * ethernet_set_config_reg - Set specified bits in configuration register.
2297 *
2298 * DESCRIPTION:
2299 * This function sets specified bits in the given ethernet
2300 * configuration register.
2301 *
2302 * INPUT:
2303 * unsigned int eth_port_num Ethernet Port number.
2304 * unsigned int value 32 bit value.
2305 *
2306 * OUTPUT:
2307 * The set bits in the value parameter are set in the configuration
2308 * register.
2309 *
2310 * RETURN:
2311 * None.
2312 *
2313 */
2314static void ethernet_set_config_reg(unsigned int eth_port_num,
2315 unsigned int value)
2316{
2317 unsigned int eth_config_reg;
2318
2319 eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num));
2320 eth_config_reg |= value;
2321 mv_write(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num), eth_config_reg);
2322}
2323 2320
2324static int eth_port_autoneg_supported(unsigned int eth_port_num) 2321static int eth_port_autoneg_supported(unsigned int eth_port_num)
2325{ 2322{
@@ -2346,31 +2343,6 @@ static int eth_port_link_is_up(unsigned int eth_port_num)
2346} 2343}
2347 2344
2348/* 2345/*
2349 * ethernet_get_config_reg - Get the port configuration register
2350 *
2351 * DESCRIPTION:
2352 * This function returns the configuration register value of the given
2353 * ethernet port.
2354 *
2355 * INPUT:
2356 * unsigned int eth_port_num Ethernet Port number.
2357 *
2358 * OUTPUT:
2359 * None.
2360 *
2361 * RETURN:
2362 * Port configuration register value.
2363 */
2364static unsigned int ethernet_get_config_reg(unsigned int eth_port_num)
2365{
2366 unsigned int eth_config_reg;
2367
2368 eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_EXTEND_REG
2369 (eth_port_num));
2370 return eth_config_reg;
2371}
2372
2373/*
2374 * eth_port_read_smi_reg - Read PHY registers 2346 * eth_port_read_smi_reg - Read PHY registers
2375 * 2347 *
2376 * DESCRIPTION: 2348 * DESCRIPTION:
@@ -2528,6 +2500,9 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2528 return ETH_ERROR; 2500 return ETH_ERROR;
2529 } 2501 }
2530 2502
2503 mp->tx_ring_skbs++;
2504 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2505
2531 /* Get the Tx Desc ring indexes */ 2506 /* Get the Tx Desc ring indexes */
2532 tx_desc_curr = mp->tx_curr_desc_q; 2507 tx_desc_curr = mp->tx_curr_desc_q;
2533 tx_desc_used = mp->tx_used_desc_q; 2508 tx_desc_used = mp->tx_used_desc_q;
@@ -2594,6 +2569,9 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2594 if (mp->tx_resource_err) 2569 if (mp->tx_resource_err)
2595 return ETH_QUEUE_FULL; 2570 return ETH_QUEUE_FULL;
2596 2571
2572 mp->tx_ring_skbs++;
2573 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2574
2597 /* Get the Tx Desc ring indexes */ 2575 /* Get the Tx Desc ring indexes */
2598 tx_desc_curr = mp->tx_curr_desc_q; 2576 tx_desc_curr = mp->tx_curr_desc_q;
2599 tx_desc_used = mp->tx_used_desc_q; 2577 tx_desc_used = mp->tx_used_desc_q;
@@ -2694,6 +2672,9 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2694 /* Any Tx return cancels the Tx resource error status */ 2672 /* Any Tx return cancels the Tx resource error status */
2695 mp->tx_resource_err = 0; 2673 mp->tx_resource_err = 0;
2696 2674
2675 BUG_ON(mp->tx_ring_skbs == 0);
2676 mp->tx_ring_skbs--;
2677
2697 return ETH_OK; 2678 return ETH_OK;
2698} 2679}
2699 2680
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 7678b59c2952..bcfda5192da0 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -408,10 +408,6 @@ static void eth_port_init(struct mv643xx_private *mp);
408static void eth_port_reset(unsigned int eth_port_num); 408static void eth_port_reset(unsigned int eth_port_num);
409static void eth_port_start(struct mv643xx_private *mp); 409static void eth_port_start(struct mv643xx_private *mp);
410 410
411static void ethernet_set_config_reg(unsigned int eth_port_num,
412 unsigned int value);
413static unsigned int ethernet_get_config_reg(unsigned int eth_port_num);
414
415/* Port MAC address routines */ 411/* Port MAC address routines */
416static void eth_port_uc_addr_set(unsigned int eth_port_num, 412static void eth_port_uc_addr_set(unsigned int eth_port_num,
417 unsigned char *p_addr); 413 unsigned char *p_addr);
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index 6c92f0969015..73501d846588 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -26,9 +26,6 @@
26 Updated to EISA probing API 5/2003 by Marc Zyngier. 26 Updated to EISA probing API 5/2003 by Marc Zyngier.
27*/ 27*/
28 28
29static const char *version =
30 "ne3210.c: Driver revision v0.03, 30/09/98\n";
31
32#include <linux/module.h> 29#include <linux/module.h>
33#include <linux/eisa.h> 30#include <linux/eisa.h>
34#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -197,7 +194,7 @@ static int __init ne3210_eisa_probe (struct device *device)
197 ei_status.priv = phys_mem; 194 ei_status.priv = phys_mem;
198 195
199 if (ei_debug > 0) 196 if (ei_debug > 0)
200 printk(version); 197 printk("ne3210 loaded.\n");
201 198
202 ei_status.reset_8390 = &ne3210_reset_8390; 199 ei_status.reset_8390 = &ne3210_reset_8390;
203 ei_status.block_input = &ne3210_block_input; 200 ei_status.block_input = &ne3210_block_input;
@@ -360,12 +357,12 @@ MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
360MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
361MODULE_DEVICE_TABLE(eisa, ne3210_ids); 358MODULE_DEVICE_TABLE(eisa, ne3210_ids);
362 359
363int ne3210_init(void) 360static int ne3210_init(void)
364{ 361{
365 return eisa_driver_register (&ne3210_eisa_driver); 362 return eisa_driver_register (&ne3210_eisa_driver);
366} 363}
367 364
368void ne3210_cleanup(void) 365static void ne3210_cleanup(void)
369{ 366{
370 eisa_driver_unregister (&ne3210_eisa_driver); 367 eisa_driver_unregister (&ne3210_eisa_driver);
371} 368}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6a2fe3583478..14f4de1a8180 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -6,7 +6,7 @@ menu "PHY device support"
6 6
7config PHYLIB 7config PHYLIB
8 tristate "PHY Device support and infrastructure" 8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET 9 depends on NET_ETHERNET && (BROKEN || !ARCH_S390)
10 help 10 help
11 Ethernet controllers are usually attached to PHY 11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for 12 devices. This option provides infrastructure for
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 41f62c0c5fcb..90630672703d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -128,7 +128,7 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
128/* Suspend and resume. Copied from platform_suspend and 128/* Suspend and resume. Copied from platform_suspend and
129 * platform_resume 129 * platform_resume
130 */ 130 */
131static int mdio_bus_suspend(struct device * dev, u32 state) 131static int mdio_bus_suspend(struct device * dev, pm_message_t state)
132{ 132{
133 int ret = 0; 133 int ret = 0;
134 struct device_driver *drv = dev->driver; 134 struct device_driver *drv = dev->driver;
@@ -170,7 +170,7 @@ int __init mdio_bus_init(void)
170 return bus_register(&mdio_bus_type); 170 return bus_register(&mdio_bus_type);
171} 171}
172 172
173void __exit mdio_bus_exit(void) 173void mdio_bus_exit(void)
174{ 174{
175 bus_unregister(&mdio_bus_type); 175 bus_unregister(&mdio_bus_type);
176} 176}
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index a32668e88e09..bb71638a7c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1657,7 +1657,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1657 skb->dev = ppp->dev; 1657 skb->dev = ppp->dev;
1658 skb->protocol = htons(npindex_to_ethertype[npi]); 1658 skb->protocol = htons(npindex_to_ethertype[npi]);
1659 skb->mac.raw = skb->data; 1659 skb->mac.raw = skb->data;
1660 skb->input_dev = ppp->dev;
1661 netif_rx(skb); 1660 netif_rx(skb);
1662 ppp->dev->last_rx = jiffies; 1661 ppp->dev->last_rx = jiffies;
1663 } 1662 }
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index ce1a9bf7b9a7..82f236cc3b9b 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -377,7 +377,8 @@ abort_kfree:
377 ***********************************************************************/ 377 ***********************************************************************/
378static int pppoe_rcv(struct sk_buff *skb, 378static int pppoe_rcv(struct sk_buff *skb,
379 struct net_device *dev, 379 struct net_device *dev,
380 struct packet_type *pt) 380 struct packet_type *pt,
381 struct net_device *orig_dev)
381 382
382{ 383{
383 struct pppoe_hdr *ph; 384 struct pppoe_hdr *ph;
@@ -426,7 +427,8 @@ out:
426 ***********************************************************************/ 427 ***********************************************************************/
427static int pppoe_disc_rcv(struct sk_buff *skb, 428static int pppoe_disc_rcv(struct sk_buff *skb,
428 struct net_device *dev, 429 struct net_device *dev,
429 struct packet_type *pt) 430 struct packet_type *pt,
431 struct net_device *orig_dev)
430 432
431{ 433{
432 struct pppoe_hdr *ph; 434 struct pppoe_hdr *ph;
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 12a86f96d973..ec1a18d189a1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1429,6 +1429,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1429{ 1429{
1430 struct rr_private *rrpriv = netdev_priv(dev); 1430 struct rr_private *rrpriv = netdev_priv(dev);
1431 struct rr_regs __iomem *regs = rrpriv->regs; 1431 struct rr_regs __iomem *regs = rrpriv->regs;
1432 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
1432 struct ring_ctrl *txctrl; 1433 struct ring_ctrl *txctrl;
1433 unsigned long flags; 1434 unsigned long flags;
1434 u32 index, len = skb->len; 1435 u32 index, len = skb->len;
@@ -1460,7 +1461,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1460 ifield = (u32 *)skb_push(skb, 8); 1461 ifield = (u32 *)skb_push(skb, 8);
1461 1462
1462 ifield[0] = 0; 1463 ifield[0] = 0;
1463 ifield[1] = skb->private.ifield; 1464 ifield[1] = hcb->ifield;
1464 1465
1465 /* 1466 /*
1466 * We don't need the lock before we are actually going to start 1467 * We don't need the lock before we are actually going to start
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 2234a8f05eb2..7cefe5507b9e 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * regs.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -713,13 +713,16 @@ typedef struct _XENA_dev_config {
713 u64 mc_err_reg; 713 u64 mc_err_reg;
714#define MC_ERR_REG_ECC_DB_ERR_L BIT(14) 714#define MC_ERR_REG_ECC_DB_ERR_L BIT(14)
715#define MC_ERR_REG_ECC_DB_ERR_U BIT(15) 715#define MC_ERR_REG_ECC_DB_ERR_U BIT(15)
716#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 BIT(18)
717#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 BIT(20)
716#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22) 718#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
717#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 719#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
718#define MC_ERR_REG_SM_ERR BIT(31) 720#define MC_ERR_REG_SM_ERR BIT(31)
719#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \ 721#define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\
720 BIT(7) | BIT(17) | BIT(19)) 722 BIT(6) | BIT(7) | BIT(17) | BIT(19))
721#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \ 723#define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\
722 BIT(15) | BIT(18) | BIT(20)) 724 BIT(13) | BIT(14) | BIT(15) |\
725 BIT(18) | BIT(20))
723 u64 mc_err_mask; 726 u64 mc_err_mask;
724 u64 mc_err_alarm; 727 u64 mc_err_alarm;
725 728
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 7ca78228b104..c829e6a2e8a6 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -28,7 +28,7 @@
28 * explaination of all the variables. 28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver. 30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
@@ -67,7 +67,7 @@
67 67
68/* S2io Driver name & version. */ 68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "Neterion"; 69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 2.0.3.1"; 70static char s2io_driver_version[] = "Version 2.0.8.1";
71 71
72static inline int RXD_IS_UP2DT(RxD_t *rxdp) 72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{ 73{
@@ -354,7 +354,7 @@ static int init_shared_mem(struct s2io_nic *nic)
354 int lst_size, lst_per_page; 354 int lst_size, lst_per_page;
355 struct net_device *dev = nic->dev; 355 struct net_device *dev = nic->dev;
356#ifdef CONFIG_2BUFF_MODE 356#ifdef CONFIG_2BUFF_MODE
357 u64 tmp; 357 unsigned long tmp;
358 buffAdd_t *ba; 358 buffAdd_t *ba;
359#endif 359#endif
360 360
@@ -404,7 +404,7 @@ static int init_shared_mem(struct s2io_nic *nic)
404 config->tx_cfg[i].fifo_len - 1; 404 config->tx_cfg[i].fifo_len - 1;
405 mac_control->fifos[i].fifo_no = i; 405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic; 406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS; 407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
408 408
409 for (j = 0; j < page_num; j++) { 409 for (j = 0; j < page_num; j++) {
410 int k = 0; 410 int k = 0;
@@ -418,6 +418,26 @@ static int init_shared_mem(struct s2io_nic *nic)
418 DBG_PRINT(ERR_DBG, "failed for TxDL\n"); 418 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
419 return -ENOMEM; 419 return -ENOMEM;
420 } 420 }
421 /* If we got a zero DMA address(can happen on
422 * certain platforms like PPC), reallocate.
423 * Store virtual address of page we don't want,
424 * to be freed later.
425 */
426 if (!tmp_p) {
427 mac_control->zerodma_virt_addr = tmp_v;
428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) {
435 DBG_PRINT(ERR_DBG,
436 "pci_alloc_consistent ");
437 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
438 return -ENOMEM;
439 }
440 }
421 while (k < lst_per_page) { 441 while (k < lst_per_page) {
422 int l = (j * lst_per_page) + k; 442 int l = (j * lst_per_page) + k;
423 if (l == config->tx_cfg[i].fifo_len) 443 if (l == config->tx_cfg[i].fifo_len)
@@ -542,18 +562,18 @@ static int init_shared_mem(struct s2io_nic *nic)
542 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 562 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
543 if (!ba->ba_0_org) 563 if (!ba->ba_0_org)
544 return -ENOMEM; 564 return -ENOMEM;
545 tmp = (u64) ba->ba_0_org; 565 tmp = (unsigned long) ba->ba_0_org;
546 tmp += ALIGN_SIZE; 566 tmp += ALIGN_SIZE;
547 tmp &= ~((u64) ALIGN_SIZE); 567 tmp &= ~((unsigned long) ALIGN_SIZE);
548 ba->ba_0 = (void *) tmp; 568 ba->ba_0 = (void *) tmp;
549 569
550 ba->ba_1_org = (void *) kmalloc 570 ba->ba_1_org = (void *) kmalloc
551 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 571 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
552 if (!ba->ba_1_org) 572 if (!ba->ba_1_org)
553 return -ENOMEM; 573 return -ENOMEM;
554 tmp = (u64) ba->ba_1_org; 574 tmp = (unsigned long) ba->ba_1_org;
555 tmp += ALIGN_SIZE; 575 tmp += ALIGN_SIZE;
556 tmp &= ~((u64) ALIGN_SIZE); 576 tmp &= ~((unsigned long) ALIGN_SIZE);
557 ba->ba_1 = (void *) tmp; 577 ba->ba_1 = (void *) tmp;
558 k++; 578 k++;
559 } 579 }
@@ -600,7 +620,7 @@ static void free_shared_mem(struct s2io_nic *nic)
600 mac_info_t *mac_control; 620 mac_info_t *mac_control;
601 struct config_param *config; 621 struct config_param *config;
602 int lst_size, lst_per_page; 622 int lst_size, lst_per_page;
603 623 struct net_device *dev = nic->dev;
604 624
605 if (!nic) 625 if (!nic)
606 return; 626 return;
@@ -616,9 +636,10 @@ static void free_shared_mem(struct s2io_nic *nic)
616 lst_per_page); 636 lst_per_page);
617 for (j = 0; j < page_num; j++) { 637 for (j = 0; j < page_num; j++) {
618 int mem_blks = (j * lst_per_page); 638 int mem_blks = (j * lst_per_page);
619 if ((!mac_control->fifos[i].list_info) || 639 if (!mac_control->fifos[i].list_info)
620 (!mac_control->fifos[i].list_info[mem_blks]. 640 return;
621 list_virt_addr)) 641 if (!mac_control->fifos[i].list_info[mem_blks].
642 list_virt_addr)
622 break; 643 break;
623 pci_free_consistent(nic->pdev, PAGE_SIZE, 644 pci_free_consistent(nic->pdev, PAGE_SIZE,
624 mac_control->fifos[i]. 645 mac_control->fifos[i].
@@ -628,6 +649,18 @@ static void free_shared_mem(struct s2io_nic *nic)
628 list_info[mem_blks]. 649 list_info[mem_blks].
629 list_phy_addr); 650 list_phy_addr);
630 } 651 }
652 /* If we got a zero DMA address during allocation,
653 * free the page now
654 */
655 if (mac_control->zerodma_virt_addr) {
656 pci_free_consistent(nic->pdev, PAGE_SIZE,
657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name);
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n",
662 (u64)(mac_control->zerodma_virt_addr));
663 }
631 kfree(mac_control->fifos[i].list_info); 664 kfree(mac_control->fifos[i].list_info);
632 } 665 }
633 666
@@ -686,7 +719,7 @@ static void free_shared_mem(struct s2io_nic *nic)
686 719
687static int s2io_verify_pci_mode(nic_t *nic) 720static int s2io_verify_pci_mode(nic_t *nic)
688{ 721{
689 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 722 XENA_dev_config_t __iomem *bar0 = nic->bar0;
690 register u64 val64 = 0; 723 register u64 val64 = 0;
691 int mode; 724 int mode;
692 725
@@ -704,7 +737,7 @@ static int s2io_verify_pci_mode(nic_t *nic)
704 */ 737 */
705static int s2io_print_pci_mode(nic_t *nic) 738static int s2io_print_pci_mode(nic_t *nic)
706{ 739{
707 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 740 XENA_dev_config_t __iomem *bar0 = nic->bar0;
708 register u64 val64 = 0; 741 register u64 val64 = 0;
709 int mode; 742 int mode;
710 struct config_param *config = &nic->config; 743 struct config_param *config = &nic->config;
@@ -1403,7 +1436,7 @@ static int init_nic(struct s2io_nic *nic)
1403 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); 1436 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1404 1437
1405 /* Disable RMAC PAD STRIPPING */ 1438 /* Disable RMAC PAD STRIPPING */
1406 add = (void *) &bar0->mac_cfg; 1439 add = &bar0->mac_cfg;
1407 val64 = readq(&bar0->mac_cfg); 1440 val64 = readq(&bar0->mac_cfg);
1408 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); 1441 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1409 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1442 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -1934,7 +1967,7 @@ static int start_nic(struct s2io_nic *nic)
1934 val64 |= 0x0000800000000000ULL; 1967 val64 |= 0x0000800000000000ULL;
1935 writeq(val64, &bar0->gpio_control); 1968 writeq(val64, &bar0->gpio_control);
1936 val64 = 0x0411040400000000ULL; 1969 val64 = 0x0411040400000000ULL;
1937 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700)); 1970 writeq(val64, (void __iomem *)bar0 + 0x2700);
1938 } 1971 }
1939 1972
1940 /* 1973 /*
@@ -2395,7 +2428,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
2395 int pkt_cnt = 0, org_pkts_to_process; 2428 int pkt_cnt = 0, org_pkts_to_process;
2396 mac_info_t *mac_control; 2429 mac_info_t *mac_control;
2397 struct config_param *config; 2430 struct config_param *config;
2398 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 2431 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2399 u64 val64; 2432 u64 val64;
2400 int i; 2433 int i;
2401 2434
@@ -2479,9 +2512,10 @@ static void rx_intr_handler(ring_info_t *ring_data)
2479#endif 2512#endif
2480 spin_lock(&nic->rx_lock); 2513 spin_lock(&nic->rx_lock);
2481 if (atomic_read(&nic->card_state) == CARD_DOWN) { 2514 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2482 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n", 2515 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2483 __FUNCTION__, dev->name); 2516 __FUNCTION__, dev->name);
2484 spin_unlock(&nic->rx_lock); 2517 spin_unlock(&nic->rx_lock);
2518 return;
2485 } 2519 }
2486 2520
2487 get_info = ring_data->rx_curr_get_info; 2521 get_info = ring_data->rx_curr_get_info;
@@ -2596,8 +2630,14 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2596 if (txdlp->Control_1 & TXD_T_CODE) { 2630 if (txdlp->Control_1 & TXD_T_CODE) {
2597 unsigned long long err; 2631 unsigned long long err;
2598 err = txdlp->Control_1 & TXD_T_CODE; 2632 err = txdlp->Control_1 & TXD_T_CODE;
2599 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", 2633 if ((err >> 48) == 0xA) {
2600 err); 2634 DBG_PRINT(TX_DBG, "TxD returned due \
2635 to loss of link\n");
2636 }
2637 else {
2638 DBG_PRINT(ERR_DBG, "***TxD error \
2639 %llx\n", err);
2640 }
2601 } 2641 }
2602 2642
2603 skb = (struct sk_buff *) ((unsigned long) 2643 skb = (struct sk_buff *) ((unsigned long)
@@ -2689,12 +2729,16 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2689 if (val64 & MC_ERR_REG_ECC_ALL_DBL) { 2729 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2690 nic->mac_control.stats_info->sw_stat. 2730 nic->mac_control.stats_info->sw_stat.
2691 double_ecc_errs++; 2731 double_ecc_errs++;
2692 DBG_PRINT(ERR_DBG, "%s: Device indicates ", 2732 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2693 dev->name); 2733 dev->name);
2694 DBG_PRINT(ERR_DBG, "double ECC error!!\n"); 2734 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2695 if (nic->device_type != XFRAME_II_DEVICE) { 2735 if (nic->device_type != XFRAME_II_DEVICE) {
2696 netif_stop_queue(dev); 2736 /* Reset XframeI only if critical error */
2697 schedule_work(&nic->rst_timer_task); 2737 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2738 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2739 netif_stop_queue(dev);
2740 schedule_work(&nic->rst_timer_task);
2741 }
2698 } 2742 }
2699 } else { 2743 } else {
2700 nic->mac_control.stats_info->sw_stat. 2744 nic->mac_control.stats_info->sw_stat.
@@ -2706,7 +2750,8 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2706 val64 = readq(&bar0->serr_source); 2750 val64 = readq(&bar0->serr_source);
2707 if (val64 & SERR_SOURCE_ANY) { 2751 if (val64 & SERR_SOURCE_ANY) {
2708 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); 2752 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2709 DBG_PRINT(ERR_DBG, "serious error!!\n"); 2753 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2754 (unsigned long long)val64);
2710 netif_stop_queue(dev); 2755 netif_stop_queue(dev);
2711 schedule_work(&nic->rst_timer_task); 2756 schedule_work(&nic->rst_timer_task);
2712 } 2757 }
@@ -2831,7 +2876,7 @@ void s2io_reset(nic_t * sp)
2831 val64 |= 0x0000800000000000ULL; 2876 val64 |= 0x0000800000000000ULL;
2832 writeq(val64, &bar0->gpio_control); 2877 writeq(val64, &bar0->gpio_control);
2833 val64 = 0x0411040400000000ULL; 2878 val64 = 0x0411040400000000ULL;
2834 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700)); 2879 writeq(val64, (void __iomem *)bar0 + 0x2700);
2835 } 2880 }
2836 2881
2837 /* 2882 /*
@@ -3130,7 +3175,7 @@ int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3130 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; 3175 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3131 /* Avoid "put" pointer going beyond "get" pointer */ 3176 /* Avoid "put" pointer going beyond "get" pointer */
3132 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { 3177 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3133 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n"); 3178 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3134 netif_stop_queue(dev); 3179 netif_stop_queue(dev);
3135 dev_kfree_skb(skb); 3180 dev_kfree_skb(skb);
3136 spin_unlock_irqrestore(&sp->tx_lock, flags); 3181 spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -3234,7 +3279,7 @@ s2io_alarm_handle(unsigned long data)
3234 3279
3235static void s2io_txpic_intr_handle(nic_t *sp) 3280static void s2io_txpic_intr_handle(nic_t *sp)
3236{ 3281{
3237 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0; 3282 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3238 u64 val64; 3283 u64 val64;
3239 3284
3240 val64 = readq(&bar0->pic_int_status); 3285 val64 = readq(&bar0->pic_int_status);
@@ -3528,7 +3573,7 @@ static void s2io_set_multicast(struct net_device *dev)
3528 3573
3529 val64 = readq(&bar0->mac_cfg); 3574 val64 = readq(&bar0->mac_cfg);
3530 sp->promisc_flg = 1; 3575 sp->promisc_flg = 1;
3531 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n", 3576 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3532 dev->name); 3577 dev->name);
3533 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { 3578 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3534 /* Remove the NIC from promiscuous mode */ 3579 /* Remove the NIC from promiscuous mode */
@@ -3543,7 +3588,7 @@ static void s2io_set_multicast(struct net_device *dev)
3543 3588
3544 val64 = readq(&bar0->mac_cfg); 3589 val64 = readq(&bar0->mac_cfg);
3545 sp->promisc_flg = 0; 3590 sp->promisc_flg = 0;
3546 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n", 3591 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3547 dev->name); 3592 dev->name);
3548 } 3593 }
3549 3594
@@ -5325,7 +5370,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5325 break; 5370 break;
5326 } 5371 }
5327 } 5372 }
5328 config->max_txds = MAX_SKB_FRAGS; 5373 config->max_txds = MAX_SKB_FRAGS + 1;
5329 5374
5330 /* Rx side parameters. */ 5375 /* Rx side parameters. */
5331 if (rx_ring_sz[0] == 0) 5376 if (rx_ring_sz[0] == 0)
@@ -5525,9 +5570,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5525 if (sp->device_type & XFRAME_II_DEVICE) { 5570 if (sp->device_type & XFRAME_II_DEVICE) {
5526 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ", 5571 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5527 dev->name); 5572 dev->name);
5528 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n", 5573 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5529 get_xena_rev_id(sp->pdev), 5574 get_xena_rev_id(sp->pdev),
5530 s2io_driver_version); 5575 s2io_driver_version);
5576#ifdef CONFIG_2BUFF_MODE
5577 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5578#endif
5579
5580 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5531 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 5581 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5532 sp->def_mac_addr[0].mac_addr[0], 5582 sp->def_mac_addr[0].mac_addr[0],
5533 sp->def_mac_addr[0].mac_addr[1], 5583 sp->def_mac_addr[0].mac_addr[1],
@@ -5544,9 +5594,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5544 } else { 5594 } else {
5545 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ", 5595 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5546 dev->name); 5596 dev->name);
5547 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n", 5597 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5548 get_xena_rev_id(sp->pdev), 5598 get_xena_rev_id(sp->pdev),
5549 s2io_driver_version); 5599 s2io_driver_version);
5600#ifdef CONFIG_2BUFF_MODE
5601 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5602#endif
5603 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5550 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 5604 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5551 sp->def_mac_addr[0].mac_addr[0], 5605 sp->def_mac_addr[0].mac_addr[0],
5552 sp->def_mac_addr[0].mac_addr[1], 5606 sp->def_mac_addr[0].mac_addr[1],
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 5d9270730ca2..89151cb52181 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -622,6 +622,9 @@ typedef struct mac_info {
622 /* Fifo specific structure */ 622 /* Fifo specific structure */
623 fifo_info_t fifos[MAX_TX_FIFOS]; 623 fifo_info_t fifos[MAX_TX_FIFOS];
624 624
625 /* Save virtual address of TxD page with zero DMA addr(if any) */
626 void *zerodma_virt_addr;
627
625/* rx side stuff */ 628/* rx side stuff */
626 /* Ring specific structure */ 629 /* Ring specific structure */
627 ring_info_t rings[MAX_RX_RINGS]; 630 ring_info_t rings[MAX_RX_RINGS];
@@ -762,8 +765,8 @@ static inline u64 readq(void __iomem *addr)
762{ 765{
763 u64 ret = 0; 766 u64 ret = 0;
764 ret = readl(addr + 4); 767 ret = readl(addr + 4);
765 (u64) ret <<= 32; 768 ret <<= 32;
766 (u64) ret |= readl(addr); 769 ret |= readl(addr);
767 770
768 return ret; 771 return ret;
769} 772}
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 3ad0b6751f6f..221354eea21f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
156 156
157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); 157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
158 158
159#ifdef SHAPER_COMPLEX /* and broken.. */
160
161 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
162 {
163 if(ptr->pri<skb->pri
164 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
165 {
166 struct sk_buff *tmp=ptr->prev;
167
168 /*
169 * It goes before us therefore we slip the length
170 * of the new frame.
171 */
172
173 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
174 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
175
176 /*
177 * The packet may have slipped so far back it
178 * fell off.
179 */
180 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
181 {
182 skb_unlink(ptr);
183 dev_kfree_skb(ptr);
184 }
185 ptr=tmp;
186 }
187 else
188 break;
189 }
190 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
191 skb_queue_head(&shaper->sendq,skb);
192 else
193 {
194 struct sk_buff *tmp;
195 /*
196 * Set the packet clock out time according to the
197 * frames ahead. Im sure a bit of thought could drop
198 * this loop.
199 */
200 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
201 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
202 skb_append(ptr,skb);
203 }
204#else
205 { 159 {
206 struct sk_buff *tmp; 160 struct sk_buff *tmp;
207 /* 161 /*
@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 } else 174 } else
221 skb_queue_tail(&shaper->sendq, skb); 175 skb_queue_tail(&shaper->sendq, skb);
222 } 176 }
223#endif 177
224 if(sh_debug) 178 if(sh_debug)
225 printk("Frame queued.\n"); 179 printk("Frame queued.\n");
226 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) 180 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
302 * Pull the frame and get interrupts back on. 256 * Pull the frame and get interrupts back on.
303 */ 257 */
304 258
305 skb_unlink(skb); 259 skb_unlink(skb, &shaper->sendq);
306 if (shaper->recovery < 260 if (shaper->recovery <
307 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) 261 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
308 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; 262 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
index 000000000000..92f75529eff8
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1884 @@
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#define PHY_MAX_ADDR 32
47#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f
49
50#ifdef CONFIG_SIS190_NAPI
51#define NAPI_SUFFIX "-NAPI"
52#else
53#define NAPI_SUFFIX ""
54#endif
55
56#define DRV_VERSION "1.2" NAPI_SUFFIX
57#define DRV_NAME "sis190"
58#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59#define PFX DRV_NAME ": "
60
61#ifdef CONFIG_SIS190_NAPI
62#define sis190_rx_skb netif_receive_skb
63#define sis190_rx_quota(count, quota) min(count, quota)
64#else
65#define sis190_rx_skb netif_rx
66#define sis190_rx_quota(count, quota) count
67#endif
68
69#define MAC_ADDR_LEN 6
70
71#define NUM_TX_DESC 64 /* [8..1024] */
72#define NUM_RX_DESC 64 /* [8..8192] */
73#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75#define RX_BUF_SIZE 1536
76#define RX_BUF_MASK 0xfff8
77
78#define SIS190_REGS_SIZE 0x80
79#define SIS190_TX_TIMEOUT (6*HZ)
80#define SIS190_PHY_TIMEOUT (10*HZ)
81#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
84
85/* Enhanced PHY access register bit definitions */
86#define EhnMIIread 0x0000
87#define EhnMIIwrite 0x0020
88#define EhnMIIdataShift 16
89#define EhnMIIpmdShift 6 /* 7016 only */
90#define EhnMIIregShift 11
91#define EhnMIIreq 0x0010
92#define EhnMIInotDone 0x0010
93
94/* Write/read MMIO register */
95#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98#define SIS_R8(reg) readb (ioaddr + (reg))
99#define SIS_R16(reg) readw (ioaddr + (reg))
100#define SIS_R32(reg) readl (ioaddr + (reg))
101
102#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
103
104enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
137};
138
139enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
184
185 /* TBICSRBit */
186 TBILinkOK = 0x02000000, // unused
187};
188
189struct TxDesc {
190 __le32 PSize;
191 __le32 status;
192 __le32 addr;
193 __le32 size;
194};
195
196struct RxDesc {
197 __le32 PSize;
198 __le32 status;
199 __le32 addr;
200 __le32 size;
201};
202
203enum _DescStatusBit {
204 /* _Desc.status */
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
209 /* _Desc.size */
210 RingEnd = 0x80000000,
211 /* TxDesc.status */
212 LSEN = 0x08000000, // TSO ? -- FR
213 IPCS = 0x04000000,
214 TCPCS = 0x02000000,
215 UDPCS = 0x01000000,
216 BSTEN = 0x00800000,
217 EXTEN = 0x00400000,
218 DEFEN = 0x00200000,
219 BKFEN = 0x00100000,
220 CRSEN = 0x00080000,
221 COLEN = 0x00040000,
222 THOL3 = 0x30000000,
223 THOL2 = 0x20000000,
224 THOL1 = 0x10000000,
225 THOL0 = 0x00000000,
226 /* RxDesc.status */
227 IPON = 0x20000000,
228 TCPON = 0x10000000,
229 UDPON = 0x08000000,
230 Wakup = 0x00400000,
231 Magic = 0x00200000,
232 Pause = 0x00100000,
233 DEFbit = 0x00200000,
234 BCAST = 0x000c0000,
235 MCAST = 0x00080000,
236 UCAST = 0x00040000,
237 /* RxDesc.PSize */
238 TAGON = 0x80000000,
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240 ABORT = 0x00800000,
241 SHORT = 0x00400000,
242 LIMIT = 0x00200000,
243 MIIER = 0x00100000,
244 OVRUN = 0x00080000,
245 NIBON = 0x00040000,
246 COLON = 0x00020000,
247 CRCOK = 0x00010000,
248 RxSizeMask = 0x0000ffff
249 /*
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
253 */
254};
255
256enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
261 EEREQ = 0x00000080,
262 EEROP = 0x00000200,
263 EEWOP = 0x00000100 // unused
264};
265
266/* EEPROM Addresses */
267enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
270 EEPROMInfo = 0x02,
271 EEPROMMACAddr = 0x03
272};
273
274enum sis190_feature {
275 F_HAS_RGMII = 1,
276 F_PHY_88E1111 = 2,
277 F_PHY_BCM5461 = 4
278};
279
280struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device_stats stats;
284 spinlock_t lock;
285 u32 rx_buf_sz;
286 u32 cur_rx;
287 u32 cur_tx;
288 u32 dirty_rx;
289 u32 dirty_tx;
290 dma_addr_t rx_dma;
291 dma_addr_t tx_dma;
292 struct RxDesc *RxDescRing;
293 struct TxDesc *TxDescRing;
294 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
295 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
296 struct work_struct phy_task;
297 struct timer_list timer;
298 u32 msg_enable;
299 struct mii_if_info mii_if;
300 struct list_head first_phy;
301 u32 features;
302};
303
304struct sis190_phy {
305 struct list_head list;
306 int phy_id;
307 u16 id[2];
308 u16 status;
309 u8 type;
310};
311
312enum sis190_phy_type {
313 UNKNOWN = 0x00,
314 HOME = 0x01,
315 LAN = 0x02,
316 MIX = 0x03
317};
318
319static struct mii_chip_info {
320 const char *name;
321 u16 id[2];
322 unsigned int type;
323 u32 feature;
324} mii_chip_table[] = {
325 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
326 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
327 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
328 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
329 { NULL, }
330};
331
332const static struct {
333 const char *name;
334} sis_chip_info[] = {
335 { "SiS 190 PCI Fast Ethernet adapter" },
336 { "SiS 191 PCI Gigabit Ethernet adapter" },
337};
338
339static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
340 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
342 { 0, },
343};
344
345MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
346
347static int rx_copybreak = 200;
348
349static struct {
350 u32 msg_enable;
351} debug = { -1 };
352
353MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
354module_param(rx_copybreak, int, 0);
355MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
356module_param_named(debug, debug.msg_enable, int, 0);
357MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
358MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
359MODULE_VERSION(DRV_VERSION);
360MODULE_LICENSE("GPL");
361
362static const u32 sis190_intr_mask =
363 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
364
365/*
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC.
368 */
369static int multicast_filter_limit = 32;
370
371static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
372{
373 unsigned int i;
374
375 SIS_W32(GMIIControl, ctl);
376
377 msleep(1);
378
379 for (i = 0; i < 100; i++) {
380 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
381 break;
382 msleep(1);
383 }
384
385 if (i > 999)
386 printk(KERN_ERR PFX "PHY command failed !\n");
387}
388
389static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
390{
391 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
392 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
393 (((u32) val) << EhnMIIdataShift));
394}
395
396static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
397{
398 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
399 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
400
401 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
402}
403
404static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
405{
406 struct sis190_private *tp = netdev_priv(dev);
407
408 mdio_write(tp->mmio_addr, phy_id, reg, val);
409}
410
411static int __mdio_read(struct net_device *dev, int phy_id, int reg)
412{
413 struct sis190_private *tp = netdev_priv(dev);
414
415 return mdio_read(tp->mmio_addr, phy_id, reg);
416}
417
418static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
419{
420 mdio_read(ioaddr, phy_id, reg);
421 return mdio_read(ioaddr, phy_id, reg);
422}
423
424static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
425{
426 u16 data = 0xffff;
427 unsigned int i;
428
429 if (!(SIS_R32(ROMControl) & 0x0002))
430 return 0;
431
432 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
433
434 for (i = 0; i < 200; i++) {
435 if (!(SIS_R32(ROMInterface) & EEREQ)) {
436 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
437 break;
438 }
439 msleep(1);
440 }
441
442 return data;
443}
444
445static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
446{
447 SIS_W32(IntrMask, 0x00);
448 SIS_W32(IntrStatus, 0xffffffff);
449 SIS_PCI_COMMIT();
450}
451
452static void sis190_asic_down(void __iomem *ioaddr)
453{
454 /* Stop the chip's Tx and Rx DMA processes. */
455
456 SIS_W32(TxControl, 0x1a00);
457 SIS_W32(RxControl, 0x1a00);
458
459 sis190_irq_mask_and_ack(ioaddr);
460}
461
462static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
463{
464 desc->size |= cpu_to_le32(RingEnd);
465}
466
467static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
468{
469 u32 eor = le32_to_cpu(desc->size) & RingEnd;
470
471 desc->PSize = 0x0;
472 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 wmb();
474 desc->status = cpu_to_le32(OWNbit | INTbit);
475}
476
477static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
478 u32 rx_buf_sz)
479{
480 desc->addr = cpu_to_le32(mapping);
481 sis190_give_to_asic(desc, rx_buf_sz);
482}
483
484static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
485{
486 desc->PSize = 0x0;
487 desc->addr = 0xdeadbeef;
488 desc->size &= cpu_to_le32(RingEnd);
489 wmb();
490 desc->status = 0x0;
491}
492
493static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
494 struct RxDesc *desc, u32 rx_buf_sz)
495{
496 struct sk_buff *skb;
497 dma_addr_t mapping;
498 int ret = 0;
499
500 skb = dev_alloc_skb(rx_buf_sz);
501 if (!skb)
502 goto err_out;
503
504 *sk_buff = skb;
505
506 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
507 PCI_DMA_FROMDEVICE);
508
509 sis190_map_to_asic(desc, mapping, rx_buf_sz);
510out:
511 return ret;
512
513err_out:
514 ret = -ENOMEM;
515 sis190_make_unusable_by_asic(desc);
516 goto out;
517}
518
519static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
520 u32 start, u32 end)
521{
522 u32 cur;
523
524 for (cur = start; cur < end; cur++) {
525 int ret, i = cur % NUM_RX_DESC;
526
527 if (tp->Rx_skbuff[i])
528 continue;
529
530 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
531 tp->RxDescRing + i, tp->rx_buf_sz);
532 if (ret < 0)
533 break;
534 }
535 return cur - start;
536}
537
538static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
539 struct RxDesc *desc, int rx_buf_sz)
540{
541 int ret = -1;
542
543 if (pkt_size < rx_copybreak) {
544 struct sk_buff *skb;
545
546 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
547 if (skb) {
548 skb_reserve(skb, NET_IP_ALIGN);
549 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
550 *sk_buff = skb;
551 sis190_give_to_asic(desc, rx_buf_sz);
552 ret = 0;
553 }
554 }
555 return ret;
556}
557
558static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
559{
560#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
561
562 if ((status & CRCOK) && !(status & ErrMask))
563 return 0;
564
565 if (!(status & CRCOK))
566 stats->rx_crc_errors++;
567 else if (status & OVRUN)
568 stats->rx_over_errors++;
569 else if (status & (SHORT | LIMIT))
570 stats->rx_length_errors++;
571 else if (status & (MIIER | NIBON | COLON))
572 stats->rx_frame_errors++;
573
574 stats->rx_errors++;
575 return -1;
576}
577
578static int sis190_rx_interrupt(struct net_device *dev,
579 struct sis190_private *tp, void __iomem *ioaddr)
580{
581 struct net_device_stats *stats = &tp->stats;
582 u32 rx_left, cur_rx = tp->cur_rx;
583 u32 delta, count;
584
585 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
586 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
587
588 for (; rx_left > 0; rx_left--, cur_rx++) {
589 unsigned int entry = cur_rx % NUM_RX_DESC;
590 struct RxDesc *desc = tp->RxDescRing + entry;
591 u32 status;
592
593 if (desc->status & OWNbit)
594 break;
595
596 status = le32_to_cpu(desc->PSize);
597
598 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
599 // status);
600
601 if (sis190_rx_pkt_err(status, stats) < 0)
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
603 else {
604 struct sk_buff *skb = tp->Rx_skbuff[entry];
605 int pkt_size = (status & RxSizeMask) - 4;
606 void (*pci_action)(struct pci_dev *, dma_addr_t,
607 size_t, int) = pci_dma_sync_single_for_device;
608
609 if (unlikely(pkt_size > tp->rx_buf_sz)) {
610 net_intr(tp, KERN_INFO
611 "%s: (frag) status = %08x.\n",
612 dev->name, status);
613 stats->rx_dropped++;
614 stats->rx_length_errors++;
615 sis190_give_to_asic(desc, tp->rx_buf_sz);
616 continue;
617 }
618
619 pci_dma_sync_single_for_cpu(tp->pci_dev,
620 le32_to_cpu(desc->addr), tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622
623 if (sis190_try_rx_copy(&skb, pkt_size, desc,
624 tp->rx_buf_sz)) {
625 pci_action = pci_unmap_single;
626 tp->Rx_skbuff[entry] = NULL;
627 sis190_make_unusable_by_asic(desc);
628 }
629
630 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
631 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
632
633 skb->dev = dev;
634 skb_put(skb, pkt_size);
635 skb->protocol = eth_type_trans(skb, dev);
636
637 sis190_rx_skb(skb);
638
639 dev->last_rx = jiffies;
640 stats->rx_packets++;
641 stats->rx_bytes += pkt_size;
642 if ((status & BCAST) == MCAST)
643 stats->multicast++;
644 }
645 }
646 count = cur_rx - tp->cur_rx;
647 tp->cur_rx = cur_rx;
648
649 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
650 if (!delta && count && netif_msg_intr(tp))
651 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
652 tp->dirty_rx += delta;
653
654 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
655 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
656
657 return count;
658}
659
660static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
661 struct TxDesc *desc)
662{
663 unsigned int len;
664
665 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
666
667 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
668
669 memset(desc, 0x00, sizeof(*desc));
670}
671
672static void sis190_tx_interrupt(struct net_device *dev,
673 struct sis190_private *tp, void __iomem *ioaddr)
674{
675 u32 pending, dirty_tx = tp->dirty_tx;
676 /*
677 * It would not be needed if queueing was allowed to be enabled
678 * again too early (hint: think preempt and unclocked smp systems).
679 */
680 unsigned int queue_stopped;
681
682 smp_rmb();
683 pending = tp->cur_tx - dirty_tx;
684 queue_stopped = (pending == NUM_TX_DESC);
685
686 for (; pending; pending--, dirty_tx++) {
687 unsigned int entry = dirty_tx % NUM_TX_DESC;
688 struct TxDesc *txd = tp->TxDescRing + entry;
689 struct sk_buff *skb;
690
691 if (le32_to_cpu(txd->status) & OWNbit)
692 break;
693
694 skb = tp->Tx_skbuff[entry];
695
696 tp->stats.tx_packets++;
697 tp->stats.tx_bytes += skb->len;
698
699 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
700 tp->Tx_skbuff[entry] = NULL;
701 dev_kfree_skb_irq(skb);
702 }
703
704 if (tp->dirty_tx != dirty_tx) {
705 tp->dirty_tx = dirty_tx;
706 smp_wmb();
707 if (queue_stopped)
708 netif_wake_queue(dev);
709 }
710}
711
712/*
713 * The interrupt handler does all of the Rx thread work and cleans up after
714 * the Tx thread.
715 */
716static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
717{
718 struct net_device *dev = __dev;
719 struct sis190_private *tp = netdev_priv(dev);
720 void __iomem *ioaddr = tp->mmio_addr;
721 unsigned int handled = 0;
722 u32 status;
723
724 status = SIS_R32(IntrStatus);
725
726 if ((status == 0xffffffff) || !status)
727 goto out;
728
729 handled = 1;
730
731 if (unlikely(!netif_running(dev))) {
732 sis190_asic_down(ioaddr);
733 goto out;
734 }
735
736 SIS_W32(IntrStatus, status);
737
738 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
739
740 if (status & LinkChange) {
741 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
742 schedule_work(&tp->phy_task);
743 }
744
745 if (status & RxQInt)
746 sis190_rx_interrupt(dev, tp, ioaddr);
747
748 if (status & TxQ0Int)
749 sis190_tx_interrupt(dev, tp, ioaddr);
750out:
751 return IRQ_RETVAL(handled);
752}
753
754#ifdef CONFIG_NET_POLL_CONTROLLER
755static void sis190_netpoll(struct net_device *dev)
756{
757 struct sis190_private *tp = netdev_priv(dev);
758 struct pci_dev *pdev = tp->pci_dev;
759
760 disable_irq(pdev->irq);
761 sis190_interrupt(pdev->irq, dev, NULL);
762 enable_irq(pdev->irq);
763}
764#endif
765
766static void sis190_free_rx_skb(struct sis190_private *tp,
767 struct sk_buff **sk_buff, struct RxDesc *desc)
768{
769 struct pci_dev *pdev = tp->pci_dev;
770
771 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
772 PCI_DMA_FROMDEVICE);
773 dev_kfree_skb(*sk_buff);
774 *sk_buff = NULL;
775 sis190_make_unusable_by_asic(desc);
776}
777
778static void sis190_rx_clear(struct sis190_private *tp)
779{
780 unsigned int i;
781
782 for (i = 0; i < NUM_RX_DESC; i++) {
783 if (!tp->Rx_skbuff[i])
784 continue;
785 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
786 }
787}
788
789static void sis190_init_ring_indexes(struct sis190_private *tp)
790{
791 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
792}
793
794static int sis190_init_ring(struct net_device *dev)
795{
796 struct sis190_private *tp = netdev_priv(dev);
797
798 sis190_init_ring_indexes(tp);
799
800 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
801 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
802
803 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
804 goto err_rx_clear;
805
806 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
807
808 return 0;
809
810err_rx_clear:
811 sis190_rx_clear(tp);
812 return -ENOMEM;
813}
814
815static void sis190_set_rx_mode(struct net_device *dev)
816{
817 struct sis190_private *tp = netdev_priv(dev);
818 void __iomem *ioaddr = tp->mmio_addr;
819 unsigned long flags;
820 u32 mc_filter[2]; /* Multicast hash filter */
821 u16 rx_mode;
822
823 if (dev->flags & IFF_PROMISC) {
824 /* Unconditionally log net taps. */
825 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
826 dev->name);
827 rx_mode =
828 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
829 AcceptAllPhys;
830 mc_filter[1] = mc_filter[0] = 0xffffffff;
831 } else if ((dev->mc_count > multicast_filter_limit) ||
832 (dev->flags & IFF_ALLMULTI)) {
833 /* Too many to filter perfectly -- accept all multicasts. */
834 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
835 mc_filter[1] = mc_filter[0] = 0xffffffff;
836 } else {
837 struct dev_mc_list *mclist;
838 unsigned int i;
839
840 rx_mode = AcceptBroadcast | AcceptMyPhys;
841 mc_filter[1] = mc_filter[0] = 0;
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) {
844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast;
848 }
849 }
850
851 spin_lock_irqsave(&tp->lock, flags);
852
853 SIS_W16(RxMacControl, rx_mode | 0x2);
854 SIS_W32(RxHashTable, mc_filter[0]);
855 SIS_W32(RxHashTable + 4, mc_filter[1]);
856
857 spin_unlock_irqrestore(&tp->lock, flags);
858}
859
860static void sis190_soft_reset(void __iomem *ioaddr)
861{
862 SIS_W32(IntrControl, 0x8000);
863 SIS_PCI_COMMIT();
864 msleep(1);
865 SIS_W32(IntrControl, 0x0);
866 sis190_asic_down(ioaddr);
867 msleep(1);
868}
869
870static void sis190_hw_start(struct net_device *dev)
871{
872 struct sis190_private *tp = netdev_priv(dev);
873 void __iomem *ioaddr = tp->mmio_addr;
874
875 sis190_soft_reset(ioaddr);
876
877 SIS_W32(TxDescStartAddr, tp->tx_dma);
878 SIS_W32(RxDescStartAddr, tp->rx_dma);
879
880 SIS_W32(IntrStatus, 0xffffffff);
881 SIS_W32(IntrMask, 0x0);
882 SIS_W32(GMIIControl, 0x0);
883 SIS_W32(TxMacControl, 0x60);
884 SIS_W16(RxMacControl, 0x02);
885 SIS_W32(RxHashTable, 0x0);
886 SIS_W32(0x6c, 0x0);
887 SIS_W32(RxWolCtrl, 0x0);
888 SIS_W32(RxWolData, 0x0);
889
890 SIS_PCI_COMMIT();
891
892 sis190_set_rx_mode(dev);
893
894 /* Enable all known interrupts by setting the interrupt mask. */
895 SIS_W32(IntrMask, sis190_intr_mask);
896
897 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
898 SIS_W32(RxControl, 0x1a1d);
899
900 netif_start_queue(dev);
901}
902
903static void sis190_phy_task(void * data)
904{
905 struct net_device *dev = data;
906 struct sis190_private *tp = netdev_priv(dev);
907 void __iomem *ioaddr = tp->mmio_addr;
908 int phy_id = tp->mii_if.phy_id;
909 u16 val;
910
911 rtnl_lock();
912
913 val = mdio_read(ioaddr, phy_id, MII_BMCR);
914 if (val & BMCR_RESET) {
915 // FIXME: needlessly high ? -- FR 02/07/2005
916 mod_timer(&tp->timer, jiffies + HZ/10);
917 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
918 BMSR_ANEGCOMPLETE)) {
919 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
920 dev->name);
921 netif_carrier_off(dev);
922 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
923 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
924 } else {
925 /* Rejoice ! */
926 struct {
927 int val;
928 u32 ctl;
929 const char *msg;
930 } reg31[] = {
931 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
932 "1000 Mbps Full Duplex" },
933 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
934 "1000 Mbps Half Duplex" },
935 { LPA_100FULL, 0x04000800 | 0x00001000,
936 "100 Mbps Full Duplex" },
937 { LPA_100HALF, 0x04000800,
938 "100 Mbps Half Duplex" },
939 { LPA_10FULL, 0x04000400 | 0x00001000,
940 "10 Mbps Full Duplex" },
941 { LPA_10HALF, 0x04000400,
942 "10 Mbps Half Duplex" },
943 { 0, 0x04000400, "unknown" }
944 }, *p;
945 u16 adv;
946
947 val = mdio_read(ioaddr, phy_id, 0x1f);
948 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
949
950 val = mdio_read(ioaddr, phy_id, MII_LPA);
951 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
952 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
953 dev->name, val, adv);
954
955 val &= adv;
956
957 for (p = reg31; p->val; p++) {
958 if ((val & p->val) == p->val)
959 break;
960 }
961
962 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
963
964 if ((tp->features & F_HAS_RGMII) &&
965 (tp->features & F_PHY_BCM5461)) {
966 // Set Tx Delay in RGMII mode.
967 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
968 udelay(200);
969 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
970 p->ctl |= 0x03000000;
971 }
972
973 SIS_W32(StationControl, p->ctl);
974
975 if (tp->features & F_HAS_RGMII) {
976 SIS_W32(RGDelay, 0x0441);
977 SIS_W32(RGDelay, 0x0440);
978 }
979
980 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
981 p->msg);
982 netif_carrier_on(dev);
983 }
984
985 rtnl_unlock();
986}
987
988static void sis190_phy_timer(unsigned long __opaque)
989{
990 struct net_device *dev = (struct net_device *)__opaque;
991 struct sis190_private *tp = netdev_priv(dev);
992
993 if (likely(netif_running(dev)))
994 schedule_work(&tp->phy_task);
995}
996
997static inline void sis190_delete_timer(struct net_device *dev)
998{
999 struct sis190_private *tp = netdev_priv(dev);
1000
1001 del_timer_sync(&tp->timer);
1002}
1003
1004static inline void sis190_request_timer(struct net_device *dev)
1005{
1006 struct sis190_private *tp = netdev_priv(dev);
1007 struct timer_list *timer = &tp->timer;
1008
1009 init_timer(timer);
1010 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1011 timer->data = (unsigned long)dev;
1012 timer->function = sis190_phy_timer;
1013 add_timer(timer);
1014}
1015
1016static void sis190_set_rxbufsize(struct sis190_private *tp,
1017 struct net_device *dev)
1018{
1019 unsigned int mtu = dev->mtu;
1020
1021 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1022 /* RxDesc->size has a licence to kill the lower bits */
1023 if (tp->rx_buf_sz & 0x07) {
1024 tp->rx_buf_sz += 8;
1025 tp->rx_buf_sz &= RX_BUF_MASK;
1026 }
1027}
1028
1029static int sis190_open(struct net_device *dev)
1030{
1031 struct sis190_private *tp = netdev_priv(dev);
1032 struct pci_dev *pdev = tp->pci_dev;
1033 int rc = -ENOMEM;
1034
1035 sis190_set_rxbufsize(tp, dev);
1036
1037 /*
1038 * Rx and Tx descriptors need 256 bytes alignment.
1039 * pci_alloc_consistent() guarantees a stronger alignment.
1040 */
1041 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1042 if (!tp->TxDescRing)
1043 goto out;
1044
1045 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1046 if (!tp->RxDescRing)
1047 goto err_free_tx_0;
1048
1049 rc = sis190_init_ring(dev);
1050 if (rc < 0)
1051 goto err_free_rx_1;
1052
1053 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1054
1055 sis190_request_timer(dev);
1056
1057 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1058 if (rc < 0)
1059 goto err_release_timer_2;
1060
1061 sis190_hw_start(dev);
1062out:
1063 return rc;
1064
1065err_release_timer_2:
1066 sis190_delete_timer(dev);
1067 sis190_rx_clear(tp);
1068err_free_rx_1:
1069 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1070 tp->rx_dma);
1071err_free_tx_0:
1072 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1073 tp->tx_dma);
1074 goto out;
1075}
1076
1077static void sis190_tx_clear(struct sis190_private *tp)
1078{
1079 unsigned int i;
1080
1081 for (i = 0; i < NUM_TX_DESC; i++) {
1082 struct sk_buff *skb = tp->Tx_skbuff[i];
1083
1084 if (!skb)
1085 continue;
1086
1087 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1088 tp->Tx_skbuff[i] = NULL;
1089 dev_kfree_skb(skb);
1090
1091 tp->stats.tx_dropped++;
1092 }
1093 tp->cur_tx = tp->dirty_tx = 0;
1094}
1095
1096static void sis190_down(struct net_device *dev)
1097{
1098 struct sis190_private *tp = netdev_priv(dev);
1099 void __iomem *ioaddr = tp->mmio_addr;
1100 unsigned int poll_locked = 0;
1101
1102 sis190_delete_timer(dev);
1103
1104 netif_stop_queue(dev);
1105
1106 flush_scheduled_work();
1107
1108 do {
1109 spin_lock_irq(&tp->lock);
1110
1111 sis190_asic_down(ioaddr);
1112
1113 spin_unlock_irq(&tp->lock);
1114
1115 synchronize_irq(dev->irq);
1116
1117 if (!poll_locked) {
1118 netif_poll_disable(dev);
1119 poll_locked++;
1120 }
1121
1122 synchronize_sched();
1123
1124 } while (SIS_R32(IntrMask));
1125
1126 sis190_tx_clear(tp);
1127 sis190_rx_clear(tp);
1128}
1129
1130static int sis190_close(struct net_device *dev)
1131{
1132 struct sis190_private *tp = netdev_priv(dev);
1133 struct pci_dev *pdev = tp->pci_dev;
1134
1135 sis190_down(dev);
1136
1137 free_irq(dev->irq, dev);
1138
1139 netif_poll_enable(dev);
1140
1141 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1142 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1143
1144 tp->TxDescRing = NULL;
1145 tp->RxDescRing = NULL;
1146
1147 return 0;
1148}
1149
1150static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1151{
1152 struct sis190_private *tp = netdev_priv(dev);
1153 void __iomem *ioaddr = tp->mmio_addr;
1154 u32 len, entry, dirty_tx;
1155 struct TxDesc *desc;
1156 dma_addr_t mapping;
1157
1158 if (unlikely(skb->len < ETH_ZLEN)) {
1159 skb = skb_padto(skb, ETH_ZLEN);
1160 if (!skb) {
1161 tp->stats.tx_dropped++;
1162 goto out;
1163 }
1164 len = ETH_ZLEN;
1165 } else {
1166 len = skb->len;
1167 }
1168
1169 entry = tp->cur_tx % NUM_TX_DESC;
1170 desc = tp->TxDescRing + entry;
1171
1172 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1173 netif_stop_queue(dev);
1174 net_tx_err(tp, KERN_ERR PFX
1175 "%s: BUG! Tx Ring full when queue awake!\n",
1176 dev->name);
1177 return NETDEV_TX_BUSY;
1178 }
1179
1180 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1181
1182 tp->Tx_skbuff[entry] = skb;
1183
1184 desc->PSize = cpu_to_le32(len);
1185 desc->addr = cpu_to_le32(mapping);
1186
1187 desc->size = cpu_to_le32(len);
1188 if (entry == (NUM_TX_DESC - 1))
1189 desc->size |= cpu_to_le32(RingEnd);
1190
1191 wmb();
1192
1193 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1194
1195 tp->cur_tx++;
1196
1197 smp_wmb();
1198
1199 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1200
1201 dev->trans_start = jiffies;
1202
1203 dirty_tx = tp->dirty_tx;
1204 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1205 netif_stop_queue(dev);
1206 smp_rmb();
1207 if (dirty_tx != tp->dirty_tx)
1208 netif_wake_queue(dev);
1209 }
1210out:
1211 return NETDEV_TX_OK;
1212}
1213
1214static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1215{
1216 struct sis190_private *tp = netdev_priv(dev);
1217
1218 return &tp->stats;
1219}
1220
1221static void sis190_free_phy(struct list_head *first_phy)
1222{
1223 struct sis190_phy *cur, *next;
1224
1225 list_for_each_entry_safe(cur, next, first_phy, list) {
1226 kfree(cur);
1227 }
1228}
1229
1230/**
1231 * sis190_default_phy - Select default PHY for sis190 mac.
1232 * @dev: the net device to probe for
1233 *
1234 * Select first detected PHY with link as default.
1235 * If no one is link on, select PHY whose types is HOME as default.
1236 * If HOME doesn't exist, select LAN.
1237 */
1238static u16 sis190_default_phy(struct net_device *dev)
1239{
1240 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1241 struct sis190_private *tp = netdev_priv(dev);
1242 struct mii_if_info *mii_if = &tp->mii_if;
1243 void __iomem *ioaddr = tp->mmio_addr;
1244 u16 status;
1245
1246 phy_home = phy_default = phy_lan = NULL;
1247
1248 list_for_each_entry(phy, &tp->first_phy, list) {
1249 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1250
1251 // Link ON & Not select default PHY & not ghost PHY.
1252 if ((status & BMSR_LSTATUS) &&
1253 !phy_default &&
1254 (phy->type != UNKNOWN)) {
1255 phy_default = phy;
1256 } else {
1257 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1258 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1259 status | BMCR_ANENABLE | BMCR_ISOLATE);
1260 if (phy->type == HOME)
1261 phy_home = phy;
1262 else if (phy->type == LAN)
1263 phy_lan = phy;
1264 }
1265 }
1266
1267 if (!phy_default) {
1268 if (phy_home)
1269 phy_default = phy_home;
1270 else if (phy_lan)
1271 phy_default = phy_lan;
1272 else
1273 phy_default = list_entry(&tp->first_phy,
1274 struct sis190_phy, list);
1275 }
1276
1277 if (mii_if->phy_id != phy_default->phy_id) {
1278 mii_if->phy_id = phy_default->phy_id;
1279 net_probe(tp, KERN_INFO
1280 "%s: Using transceiver at address %d as default.\n",
1281 pci_name(tp->pci_dev), mii_if->phy_id);
1282 }
1283
1284 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1285 status &= (~BMCR_ISOLATE);
1286
1287 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1288 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1289
1290 return status;
1291}
1292
1293static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1294 struct sis190_phy *phy, unsigned int phy_id,
1295 u16 mii_status)
1296{
1297 void __iomem *ioaddr = tp->mmio_addr;
1298 struct mii_chip_info *p;
1299
1300 INIT_LIST_HEAD(&phy->list);
1301 phy->status = mii_status;
1302 phy->phy_id = phy_id;
1303
1304 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1305 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1306
1307 for (p = mii_chip_table; p->type; p++) {
1308 if ((p->id[0] == phy->id[0]) &&
1309 (p->id[1] == (phy->id[1] & 0xfff0))) {
1310 break;
1311 }
1312 }
1313
1314 if (p->id[1]) {
1315 phy->type = (p->type == MIX) ?
1316 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1317 LAN : HOME) : p->type;
1318 tp->features |= p->feature;
1319 } else
1320 phy->type = UNKNOWN;
1321
1322 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1323 pci_name(tp->pci_dev),
1324 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1325}
1326
1327static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1328{
1329 if (tp->features & F_PHY_88E1111) {
1330 void __iomem *ioaddr = tp->mmio_addr;
1331 int phy_id = tp->mii_if.phy_id;
1332 u16 reg[2][2] = {
1333 { 0x808b, 0x0ce1 },
1334 { 0x808f, 0x0c60 }
1335 }, *p;
1336
1337 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1338
1339 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1340 udelay(200);
1341 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1342 udelay(200);
1343 }
1344}
1345
1346/**
1347 * sis190_mii_probe - Probe MII PHY for sis190
1348 * @dev: the net device to probe for
1349 *
1350 * Search for total of 32 possible mii phy addresses.
1351 * Identify and set current phy if found one,
1352 * return error if it failed to found.
1353 */
1354static int __devinit sis190_mii_probe(struct net_device *dev)
1355{
1356 struct sis190_private *tp = netdev_priv(dev);
1357 struct mii_if_info *mii_if = &tp->mii_if;
1358 void __iomem *ioaddr = tp->mmio_addr;
1359 int phy_id;
1360 int rc = 0;
1361
1362 INIT_LIST_HEAD(&tp->first_phy);
1363
1364 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1365 struct sis190_phy *phy;
1366 u16 status;
1367
1368 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1369
1370 // Try next mii if the current one is not accessible.
1371 if (status == 0xffff || status == 0x0000)
1372 continue;
1373
1374 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1375 if (!phy) {
1376 sis190_free_phy(&tp->first_phy);
1377 rc = -ENOMEM;
1378 goto out;
1379 }
1380
1381 sis190_init_phy(dev, tp, phy, phy_id, status);
1382
1383 list_add(&tp->first_phy, &phy->list);
1384 }
1385
1386 if (list_empty(&tp->first_phy)) {
1387 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1388 pci_name(tp->pci_dev));
1389 rc = -EIO;
1390 goto out;
1391 }
1392
1393 /* Select default PHY for mac */
1394 sis190_default_phy(dev);
1395
1396 sis190_mii_probe_88e1111_fixup(tp);
1397
1398 mii_if->dev = dev;
1399 mii_if->mdio_read = __mdio_read;
1400 mii_if->mdio_write = __mdio_write;
1401 mii_if->phy_id_mask = PHY_ID_ANY;
1402 mii_if->reg_num_mask = MII_REG_ANY;
1403out:
1404 return rc;
1405}
1406
1407static void __devexit sis190_mii_remove(struct net_device *dev)
1408{
1409 struct sis190_private *tp = netdev_priv(dev);
1410
1411 sis190_free_phy(&tp->first_phy);
1412}
1413
1414static void sis190_release_board(struct pci_dev *pdev)
1415{
1416 struct net_device *dev = pci_get_drvdata(pdev);
1417 struct sis190_private *tp = netdev_priv(dev);
1418
1419 iounmap(tp->mmio_addr);
1420 pci_release_regions(pdev);
1421 pci_disable_device(pdev);
1422 free_netdev(dev);
1423}
1424
1425static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1426{
1427 struct sis190_private *tp;
1428 struct net_device *dev;
1429 void __iomem *ioaddr;
1430 int rc;
1431
1432 dev = alloc_etherdev(sizeof(*tp));
1433 if (!dev) {
1434 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1435 rc = -ENOMEM;
1436 goto err_out_0;
1437 }
1438
1439 SET_MODULE_OWNER(dev);
1440 SET_NETDEV_DEV(dev, &pdev->dev);
1441
1442 tp = netdev_priv(dev);
1443 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1444
1445 rc = pci_enable_device(pdev);
1446 if (rc < 0) {
1447 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1448 goto err_free_dev_1;
1449 }
1450
1451 rc = -ENODEV;
1452
1453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1454 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1455 pci_name(pdev));
1456 goto err_pci_disable_2;
1457 }
1458 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1459 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1460 pci_name(pdev));
1461 goto err_pci_disable_2;
1462 }
1463
1464 rc = pci_request_regions(pdev, DRV_NAME);
1465 if (rc < 0) {
1466 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1467 pci_name(pdev));
1468 goto err_pci_disable_2;
1469 }
1470
1471 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1472 if (rc < 0) {
1473 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1474 pci_name(pdev));
1475 goto err_free_res_3;
1476 }
1477
1478 pci_set_master(pdev);
1479
1480 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1481 if (!ioaddr) {
1482 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1483 pci_name(pdev));
1484 rc = -EIO;
1485 goto err_free_res_3;
1486 }
1487
1488 tp->pci_dev = pdev;
1489 tp->mmio_addr = ioaddr;
1490
1491 sis190_irq_mask_and_ack(ioaddr);
1492
1493 sis190_soft_reset(ioaddr);
1494out:
1495 return dev;
1496
1497err_free_res_3:
1498 pci_release_regions(pdev);
1499err_pci_disable_2:
1500 pci_disable_device(pdev);
1501err_free_dev_1:
1502 free_netdev(dev);
1503err_out_0:
1504 dev = ERR_PTR(rc);
1505 goto out;
1506}
1507
1508static void sis190_tx_timeout(struct net_device *dev)
1509{
1510 struct sis190_private *tp = netdev_priv(dev);
1511 void __iomem *ioaddr = tp->mmio_addr;
1512 u8 tmp8;
1513
1514 /* Disable Tx, if not already */
1515 tmp8 = SIS_R8(TxControl);
1516 if (tmp8 & CmdTxEnb)
1517 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1518
1519
1520 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1521 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1522
1523 /* Disable interrupts by clearing the interrupt mask. */
1524 SIS_W32(IntrMask, 0x0000);
1525
1526 /* Stop a shared interrupt from scavenging while we are. */
1527 spin_lock_irq(&tp->lock);
1528 sis190_tx_clear(tp);
1529 spin_unlock_irq(&tp->lock);
1530
1531 /* ...and finally, reset everything. */
1532 sis190_hw_start(dev);
1533
1534 netif_wake_queue(dev);
1535}
1536
1537static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1538{
1539 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1540}
1541
1542static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1543 struct net_device *dev)
1544{
1545 struct sis190_private *tp = netdev_priv(dev);
1546 void __iomem *ioaddr = tp->mmio_addr;
1547 u16 sig;
1548 int i;
1549
1550 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1551 pci_name(pdev));
1552
1553 /* Check to see if there is a sane EEPROM */
1554 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1555
1556 if ((sig == 0xffff) || (sig == 0x0000)) {
1557 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1558 pci_name(pdev), sig);
1559 return -EIO;
1560 }
1561
1562 /* Get MAC address from EEPROM */
1563 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1564 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1565
1566 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1567 }
1568
1569 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1570
1571 return 0;
1572}
1573
1574/**
1575 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1576 * @pdev: PCI device
1577 * @dev: network device to get address for
1578 *
1579 * SiS965 model, use APC CMOS RAM to store MAC address.
1580 * APC CMOS RAM is accessed through ISA bridge.
1581 * MAC address is read into @net_dev->dev_addr.
1582 */
1583static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1584 struct net_device *dev)
1585{
1586 struct sis190_private *tp = netdev_priv(dev);
1587 struct pci_dev *isa_bridge;
1588 u8 reg, tmp8;
1589 int i;
1590
1591 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1592 pci_name(pdev));
1593
1594 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1595 if (!isa_bridge) {
1596 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1597 pci_name(pdev));
1598 return -EIO;
1599 }
1600
1601 /* Enable port 78h & 79h to access APC Registers. */
1602 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1603 reg = (tmp8 & ~0x02);
1604 pci_write_config_byte(isa_bridge, 0x48, reg);
1605 udelay(50);
1606 pci_read_config_byte(isa_bridge, 0x48, &reg);
1607
1608 for (i = 0; i < MAC_ADDR_LEN; i++) {
1609 outb(0x9 + i, 0x78);
1610 dev->dev_addr[i] = inb(0x79);
1611 }
1612
1613 outb(0x12, 0x78);
1614 reg = inb(0x79);
1615
1616 sis190_set_rgmii(tp, reg);
1617
1618 /* Restore the value to ISA Bridge */
1619 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1620 pci_dev_put(isa_bridge);
1621
1622 return 0;
1623}
1624
1625/**
1626 * sis190_init_rxfilter - Initialize the Rx filter
1627 * @dev: network device to initialize
1628 *
1629 * Set receive filter address to our MAC address
1630 * and enable packet filtering.
1631 */
1632static inline void sis190_init_rxfilter(struct net_device *dev)
1633{
1634 struct sis190_private *tp = netdev_priv(dev);
1635 void __iomem *ioaddr = tp->mmio_addr;
1636 u16 ctl;
1637 int i;
1638
1639 ctl = SIS_R16(RxMacControl);
1640 /*
1641 * Disable packet filtering before setting filter.
1642 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1643 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1644 */
1645 SIS_W16(RxMacControl, ctl & ~0x0f00);
1646
1647 for (i = 0; i < MAC_ADDR_LEN; i++)
1648 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1649
1650 SIS_W16(RxMacControl, ctl);
1651 SIS_PCI_COMMIT();
1652}
1653
1654static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1655{
1656 u8 from;
1657
1658 pci_read_config_byte(pdev, 0x73, &from);
1659
1660 return (from & 0x00000001) ?
1661 sis190_get_mac_addr_from_apc(pdev, dev) :
1662 sis190_get_mac_addr_from_eeprom(pdev, dev);
1663}
1664
1665static void sis190_set_speed_auto(struct net_device *dev)
1666{
1667 struct sis190_private *tp = netdev_priv(dev);
1668 void __iomem *ioaddr = tp->mmio_addr;
1669 int phy_id = tp->mii_if.phy_id;
1670 int val;
1671
1672 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1673
1674 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1675
1676 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1677 // unchanged.
1678 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1679 ADVERTISE_100FULL | ADVERTISE_10FULL |
1680 ADVERTISE_100HALF | ADVERTISE_10HALF);
1681
1682 // Enable 1000 Full Mode.
1683 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1684
1685 // Enable auto-negotiation and restart auto-negotiation.
1686 mdio_write(ioaddr, phy_id, MII_BMCR,
1687 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1688}
1689
1690static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1691{
1692 struct sis190_private *tp = netdev_priv(dev);
1693
1694 return mii_ethtool_gset(&tp->mii_if, cmd);
1695}
1696
1697static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1698{
1699 struct sis190_private *tp = netdev_priv(dev);
1700
1701 return mii_ethtool_sset(&tp->mii_if, cmd);
1702}
1703
1704static void sis190_get_drvinfo(struct net_device *dev,
1705 struct ethtool_drvinfo *info)
1706{
1707 struct sis190_private *tp = netdev_priv(dev);
1708
1709 strcpy(info->driver, DRV_NAME);
1710 strcpy(info->version, DRV_VERSION);
1711 strcpy(info->bus_info, pci_name(tp->pci_dev));
1712}
1713
1714static int sis190_get_regs_len(struct net_device *dev)
1715{
1716 return SIS190_REGS_SIZE;
1717}
1718
1719static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1720 void *p)
1721{
1722 struct sis190_private *tp = netdev_priv(dev);
1723 unsigned long flags;
1724
1725 if (regs->len > SIS190_REGS_SIZE)
1726 regs->len = SIS190_REGS_SIZE;
1727
1728 spin_lock_irqsave(&tp->lock, flags);
1729 memcpy_fromio(p, tp->mmio_addr, regs->len);
1730 spin_unlock_irqrestore(&tp->lock, flags);
1731}
1732
1733static int sis190_nway_reset(struct net_device *dev)
1734{
1735 struct sis190_private *tp = netdev_priv(dev);
1736
1737 return mii_nway_restart(&tp->mii_if);
1738}
1739
1740static u32 sis190_get_msglevel(struct net_device *dev)
1741{
1742 struct sis190_private *tp = netdev_priv(dev);
1743
1744 return tp->msg_enable;
1745}
1746
1747static void sis190_set_msglevel(struct net_device *dev, u32 value)
1748{
1749 struct sis190_private *tp = netdev_priv(dev);
1750
1751 tp->msg_enable = value;
1752}
1753
1754static struct ethtool_ops sis190_ethtool_ops = {
1755 .get_settings = sis190_get_settings,
1756 .set_settings = sis190_set_settings,
1757 .get_drvinfo = sis190_get_drvinfo,
1758 .get_regs_len = sis190_get_regs_len,
1759 .get_regs = sis190_get_regs,
1760 .get_link = ethtool_op_get_link,
1761 .get_msglevel = sis190_get_msglevel,
1762 .set_msglevel = sis190_set_msglevel,
1763 .nway_reset = sis190_nway_reset,
1764};
1765
1766static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1767{
1768 struct sis190_private *tp = netdev_priv(dev);
1769
1770 return !netif_running(dev) ? -EINVAL :
1771 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1772}
1773
1774static int __devinit sis190_init_one(struct pci_dev *pdev,
1775 const struct pci_device_id *ent)
1776{
1777 static int printed_version = 0;
1778 struct sis190_private *tp;
1779 struct net_device *dev;
1780 void __iomem *ioaddr;
1781 int rc;
1782
1783 if (!printed_version) {
1784 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1785 printed_version = 1;
1786 }
1787
1788 dev = sis190_init_board(pdev);
1789 if (IS_ERR(dev)) {
1790 rc = PTR_ERR(dev);
1791 goto out;
1792 }
1793
1794 tp = netdev_priv(dev);
1795 ioaddr = tp->mmio_addr;
1796
1797 rc = sis190_get_mac_addr(pdev, dev);
1798 if (rc < 0)
1799 goto err_release_board;
1800
1801 sis190_init_rxfilter(dev);
1802
1803 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1804
1805 dev->open = sis190_open;
1806 dev->stop = sis190_close;
1807 dev->do_ioctl = sis190_ioctl;
1808 dev->get_stats = sis190_get_stats;
1809 dev->tx_timeout = sis190_tx_timeout;
1810 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1811 dev->hard_start_xmit = sis190_start_xmit;
1812#ifdef CONFIG_NET_POLL_CONTROLLER
1813 dev->poll_controller = sis190_netpoll;
1814#endif
1815 dev->set_multicast_list = sis190_set_rx_mode;
1816 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1817 dev->irq = pdev->irq;
1818 dev->base_addr = (unsigned long) 0xdead;
1819
1820 spin_lock_init(&tp->lock);
1821
1822 rc = sis190_mii_probe(dev);
1823 if (rc < 0)
1824 goto err_release_board;
1825
1826 rc = register_netdev(dev);
1827 if (rc < 0)
1828 goto err_remove_mii;
1829
1830 pci_set_drvdata(pdev, dev);
1831
1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1834 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1835 ioaddr, dev->irq,
1836 dev->dev_addr[0], dev->dev_addr[1],
1837 dev->dev_addr[2], dev->dev_addr[3],
1838 dev->dev_addr[4], dev->dev_addr[5]);
1839
1840 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1841 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1842
1843 netif_carrier_off(dev);
1844
1845 sis190_set_speed_auto(dev);
1846out:
1847 return rc;
1848
1849err_remove_mii:
1850 sis190_mii_remove(dev);
1851err_release_board:
1852 sis190_release_board(pdev);
1853 goto out;
1854}
1855
1856static void __devexit sis190_remove_one(struct pci_dev *pdev)
1857{
1858 struct net_device *dev = pci_get_drvdata(pdev);
1859
1860 sis190_mii_remove(dev);
1861 unregister_netdev(dev);
1862 sis190_release_board(pdev);
1863 pci_set_drvdata(pdev, NULL);
1864}
1865
1866static struct pci_driver sis190_pci_driver = {
1867 .name = DRV_NAME,
1868 .id_table = sis190_pci_tbl,
1869 .probe = sis190_init_one,
1870 .remove = __devexit_p(sis190_remove_one),
1871};
1872
1873static int __init sis190_init_module(void)
1874{
1875 return pci_module_init(&sis190_pci_driver);
1876}
1877
1878static void __exit sis190_cleanup_module(void)
1879{
1880 pci_unregister_driver(&sis190_pci_driver);
1881}
1882
1883module_init(sis190_init_module);
1884module_exit(sis190_cleanup_module);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a9b06b8d8e3f..ac9ce6509eee 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -986,7 +986,7 @@ static const char * chip_ids[ 16 ] = {
986 }) 986 })
987#endif 987#endif
988 988
989#if SMC_CAN_USE_DATACS 989#ifdef SMC_CAN_USE_DATACS
990#define SMC_PUSH_DATA(p, l) \ 990#define SMC_PUSH_DATA(p, l) \
991 if ( lp->datacs ) { \ 991 if ( lp->datacs ) { \
992 unsigned char *__ptr = (p); \ 992 unsigned char *__ptr = (p); \
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
new file mode 100644
index 000000000000..4e19220473d0
--- /dev/null
+++ b/drivers/net/spider_net.c
@@ -0,0 +1,2334 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/config.h>
25
26#include <linux/compiler.h>
27#include <linux/crc32.h>
28#include <linux/delay.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/firmware.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/ioport.h>
35#include <linux/ip.h>
36#include <linux/kernel.h>
37#include <linux/mii.h>
38#include <linux/module.h>
39#include <linux/netdevice.h>
40#include <linux/device.h>
41#include <linux/pci.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/tcp.h>
45#include <linux/types.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <asm/bitops.h>
49#include <asm/pci-bridge.h>
50#include <net/checksum.h>
51
52#include "spider_net.h"
53
54MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
55 "<Jens.Osterkamp@de.ibm.com>");
56MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
57MODULE_LICENSE("GPL");
58
59static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
60static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
61
62module_param(rx_descriptors, int, 0644);
63module_param(tx_descriptors, int, 0644);
64
65MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
66 "in rx chains");
67MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
68 "in tx chain");
69
70char spider_net_driver_name[] = "spidernet";
71
72static struct pci_device_id spider_net_pci_tbl[] = {
73 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
74 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
75 { 0, }
76};
77
78MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
79
80/**
81 * spider_net_read_reg - reads an SMMIO register of a card
82 * @card: device structure
83 * @reg: register to read from
84 *
85 * returns the content of the specified SMMIO register.
86 */
87static u32
88spider_net_read_reg(struct spider_net_card *card, u32 reg)
89{
90 u32 value;
91
92 value = readl(card->regs + reg);
93 value = le32_to_cpu(value);
94
95 return value;
96}
97
98/**
99 * spider_net_write_reg - writes to an SMMIO register of a card
100 * @card: device structure
101 * @reg: register to write to
102 * @value: value to write into the specified SMMIO register
103 */
104static void
105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
106{
107 value = cpu_to_le32(value);
108 writel(value, card->regs + reg);
109}
110
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to
149 * @mii_id: id of MII
150 * @reg: PHY register
151 * @val: value to be written to phy register
152 *
153 * spider_net_write_phy_register writes to an arbitrary PHY
154 * register via the spider GPCWOPCMD register. We assume the queue does
155 * not run full (not more than 15 commands outstanding).
156 **/
157static void
158spider_net_write_phy(struct net_device *netdev, int mii_id,
159 int reg, int val)
160{
161 struct spider_net_card *card = netdev_priv(netdev);
162 u32 writevalue;
163
164 writevalue = ((u32)mii_id << 21) |
165 ((u32)reg << 16) | ((u32)val);
166
167 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
168}
169
170/** spider_net_read_phy - read from phy register
171 * @netdev: network device to be read from
172 * @mii_id: id of MII
173 * @reg: PHY register
174 *
175 * Returns value read from PHY register
176 *
177 * spider_net_write_phy reads from an arbitrary PHY
178 * register via the spider GPCROPCMD register
179 **/
180static int
181spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
182{
183 struct spider_net_card *card = netdev_priv(netdev);
184 u32 readvalue;
185
186 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
187 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
188
189 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
190 * interrupt, as we poll for the completion of the read operation
191 * in spider_net_read_phy. Should take about 50 us */
192 do {
193 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
194 } while (readvalue & SPIDER_NET_GPREXEC);
195
196 readvalue &= SPIDER_NET_GPRDAT_MASK;
197
198 return readvalue;
199}
200
201/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure
223 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register
225 */
226static void
227spider_net_tx_irq_off(struct spider_net_card *card)
228{
229 u32 regvalue;
230 unsigned long flags;
231
232 spin_lock_irqsave(&card->intmask_lock, flags);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237}
238
239/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card
241 * @card: device structure
242 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register
244 */
245static void
246spider_net_tx_irq_on(struct spider_net_card *card)
247{
248 u32 regvalue;
249 unsigned long flags;
250
251 spin_lock_irqsave(&card->intmask_lock, flags);
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256}
257
258/**
259 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
260 * @card: card structure
261 *
262 * spider_net_set_promisc sets the unicast destination address filter and
263 * thus either allows for non-promisc mode or promisc mode
264 */
265static void
266spider_net_set_promisc(struct spider_net_card *card)
267{
268 u32 macu, macl;
269 struct net_device *netdev = card->netdev;
270
271 if (netdev->flags & IFF_PROMISC) {
272 /* clear destination entry 0 */
273 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
274 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
275 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
276 SPIDER_NET_PROMISC_VALUE);
277 } else {
278 macu = netdev->dev_addr[0];
279 macu <<= 8;
280 macu |= netdev->dev_addr[1];
281 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
282
283 macu |= SPIDER_NET_UA_DESCR_VALUE;
284 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
285 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
286 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
287 SPIDER_NET_NONPROMISC_VALUE);
288 }
289}
290
291/**
292 * spider_net_get_mac_address - read mac address from spider card
293 * @card: device structure
294 *
295 * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
296 */
297static int
298spider_net_get_mac_address(struct net_device *netdev)
299{
300 struct spider_net_card *card = netdev_priv(netdev);
301 u32 macl, macu;
302
303 macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
304 macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
305
306 netdev->dev_addr[0] = (macu >> 24) & 0xff;
307 netdev->dev_addr[1] = (macu >> 16) & 0xff;
308 netdev->dev_addr[2] = (macu >> 8) & 0xff;
309 netdev->dev_addr[3] = macu & 0xff;
310 netdev->dev_addr[4] = (macl >> 8) & 0xff;
311 netdev->dev_addr[5] = macl & 0xff;
312
313 if (!is_valid_ether_addr(&netdev->dev_addr[0]))
314 return -EINVAL;
315
316 return 0;
317}
318
319/**
320 * spider_net_get_descr_status -- returns the status of a descriptor
321 * @descr: descriptor to look at
322 *
323 * returns the status as in the dmac_cmd_status field of the descriptor
324 */
325static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr)
327{
328 u32 cmd_status;
329 rmb();
330 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */
335 return cmd_status;
336}
337
338/**
339 * spider_net_set_descr_status -- sets the status of a descriptor
340 * @descr: descriptor to change
341 * @status: status to set in the descriptor
342 *
343 * changes the status to the specified value. Doesn't change other bits
344 * in the status
345 */
346static void
347spider_net_set_descr_status(struct spider_net_descr *descr,
348 enum spider_net_descr_status status)
349{
350 u32 cmd_status;
351 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
356 /* add the status to it */
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */
359 descr->dmac_cmd_status = cmd_status;
360 wmb();
361}
362
363/**
364 * spider_net_free_chain - free descriptor chain
365 * @card: card structure
366 * @chain: address of chain
367 *
368 */
369static void
370spider_net_free_chain(struct spider_net_card *card,
371 struct spider_net_descr_chain *chain)
372{
373 struct spider_net_descr *descr;
374
375 for (descr = chain->tail; !descr->bus_addr; descr = descr->next) {
376 pci_unmap_single(card->pdev, descr->bus_addr,
377 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
378 descr->bus_addr = 0;
379 }
380}
381
382/**
383 * spider_net_init_chain - links descriptor chain
384 * @card: card structure
385 * @chain: address of chain
386 * @start_descr: address of descriptor array
387 * @no: number of descriptors
388 *
389 * we manage a circular list that mirrors the hardware structure,
390 * except that the hardware uses bus addresses.
391 *
392 * returns 0 on success, <0 on failure
393 */
394static int
395spider_net_init_chain(struct spider_net_card *card,
396 struct spider_net_descr_chain *chain,
397 struct spider_net_descr *start_descr, int no)
398{
399 int i;
400 struct spider_net_descr *descr;
401
402 spin_lock_init(&card->chain_lock);
403
404 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no);
406
407 /* set up the hardware pointers in each descriptor */
408 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410
411 descr->bus_addr =
412 pci_map_single(card->pdev, descr,
413 SPIDER_NET_DESCR_SIZE,
414 PCI_DMA_BIDIRECTIONAL);
415
416 if (descr->bus_addr == DMA_ERROR_CODE)
417 goto iommu_error;
418
419 descr->next = descr + 1;
420 descr->prev = descr - 1;
421
422 }
423 /* do actual circular list */
424 (descr-1)->next = start_descr;
425 start_descr->prev = descr-1;
426
427 descr = start_descr;
428 for (i=0; i < no; i++, descr++) {
429 descr->next_descr_addr = descr->next->bus_addr;
430 }
431
432 chain->head = start_descr;
433 chain->tail = start_descr;
434
435 return 0;
436
437iommu_error:
438 descr = start_descr;
439 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM;
444}
445
446/**
447 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
448 * @card: card structure
449 *
450 * returns 0 on success, <0 on failure
451 */
452static void
453spider_net_free_rx_chain_contents(struct spider_net_card *card)
454{
455 struct spider_net_descr *descr;
456
457 descr = card->rx_chain.head;
458 while (descr->next != card->rx_chain.head) {
459 if (descr->skb) {
460 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU,
463 PCI_DMA_BIDIRECTIONAL);
464 }
465 descr = descr->next;
466 }
467}
468
469/**
470 * spider_net_prepare_rx_descr - reinitializes a rx descriptor
471 * @card: card structure
472 * @descr: descriptor to re-init
473 *
474 * return 0 on succes, <0 on failure
475 *
476 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
477 * Activate the descriptor state-wise
478 */
479static int
480spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr)
482{
483 int error = 0;
484 int offset;
485 int bufsize;
486
487 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490
491 /* and we need to have it 128 byte aligned, therefore we allocate a
492 * bit more */
493 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) {
496 if (net_ratelimit())
497 if (netif_msg_rx_err(card))
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM;
501 }
502 descr->buf_size = bufsize;
503 descr->result_size = 0;
504 descr->valid_size = 0;
505 descr->data_status = 0;
506 descr->data_error = 0;
507
508 offset = ((unsigned long)descr->skb->data) &
509 (SPIDER_NET_RXBUF_ALIGN - 1);
510 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU,
515 PCI_DMA_BIDIRECTIONAL);
516 if (descr->buf_addr == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card))
519 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else {
522 descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;
523 }
524
525 return error;
526}
527
528/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses
530 * @card: card structure
531 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac.
535 */
536static void
537spider_net_enable_rxchtails(struct spider_net_card *card)
538{
539 /* assume chain is aligned correctly */
540 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
541 card->rx_chain.tail->bus_addr);
542}
543
544/**
545 * spider_net_enable_rxdmac - enables a receive DMA controller
546 * @card: card structure
547 *
548 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
549 * in the GDADMACCNTR register
550 */
551static void
552spider_net_enable_rxdmac(struct spider_net_card *card)
553{
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE);
556}
557
558/**
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure
561 *
562 * refills descriptors in all chains (last used chain first): allocates skbs
563 * and iommu-maps them.
564 */
565static void
566spider_net_refill_rx_chain(struct spider_net_card *card)
567{
568 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571
572 chain = &card->rx_chain;
573
574 spin_lock_irqsave(&card->chain_lock, flags);
575 while (spider_net_get_descr_status(chain->head) ==
576 SPIDER_NET_DESCR_NOT_IN_USE) {
577 if (spider_net_prepare_rx_descr(card, chain->head))
578 break;
579 count++;
580 chain->head = chain->head->next;
581 }
582 spin_unlock_irqrestore(&card->chain_lock, flags);
583
584 /* could be optimized, only do that, if we know the DMA processing
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588}
589
590/**
591 * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
592 * @card: card structure
593 *
594 * returns 0 on success, <0 on failure
595 */
596static int
597spider_net_alloc_rx_skbs(struct spider_net_card *card)
598{
599 int result;
600 struct spider_net_descr_chain *chain;
601
602 result = -ENOMEM;
603
604 chain = &card->rx_chain;
605 /* put at least one buffer into the chain. if this fails,
606 * we've got a problem. if not, spider_net_refill_rx_chain
607 * will do the rest at the end of this function */
608 if (spider_net_prepare_rx_descr(card, chain->head))
609 goto error;
610 else
611 chain->head = chain->head->next;
612
613 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */
615 spider_net_refill_rx_chain(card);
616 return 0;
617
618error:
619 spider_net_free_rx_chain_contents(card);
620 return result;
621}
622
623/**
624 * spider_net_release_tx_descr - processes a used tx descriptor
625 * @card: card structure
626 * @descr: descriptor to release
627 *
628 * releases a used tx descriptor (unmapping, freeing of skb)
629 */
630static void
631spider_net_release_tx_descr(struct spider_net_card *card,
632 struct spider_net_descr *descr)
633{
634 struct sk_buff *skb;
635
636 /* unmap the skb */
637 skb = descr->skb;
638 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
639 PCI_DMA_BIDIRECTIONAL);
640
641 dev_kfree_skb_any(skb);
642
643 /* set status to not used */
644 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
645}
646
647/**
648 * spider_net_release_tx_chain - processes sent tx descriptors
649 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use
651 *
652 * releases the tx descriptors that spider has finished with (if non-brutal)
653 * or simply release tx descriptors (if brutal)
654 */
655static void
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status;
660
661 spider_net_tx_irq_off(card);
662
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out;
670 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */
673 case SPIDER_NET_DESCR_RESPONSE_ERROR:
674 case SPIDER_NET_DESCR_PROTECTION_ERROR:
675 case SPIDER_NET_DESCR_FORCE_END:
676 if (netif_msg_tx_err(card))
677 pr_err("%s: forcing end of tx descriptor "
678 "with status x%02x\n",
679 card->netdev->name, status);
680 card->netdev_stats.tx_dropped++;
681 break;
682
683 case SPIDER_NET_DESCR_COMPLETE:
684 card->netdev_stats.tx_packets++;
685 card->netdev_stats.tx_bytes +=
686 tx_chain->tail->skb->len;
687 break;
688
689 default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
690 goto out;
691 }
692 spider_net_release_tx_descr(card, tx_chain->tail);
693 tx_chain->tail = tx_chain->tail->next;
694 }
695out:
696 netif_wake_queue(card->netdev);
697
698 if (!brutal) {
699 /* switch on tx irqs (while we are still in the interrupt
700 * handler, so we don't get an interrupt), check again
701 * for done descriptors. This results in fewer interrupts */
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714
715}
716
717/**
718 * spider_net_get_multicast_hash - generates hash for multicast filter table
719 * @addr: multicast address
720 *
721 * returns the hash value.
722 *
723 * spider_net_get_multicast_hash calculates a hash value for a given multicast
724 * address, that is used to set the multicast filter tables
725 */
726static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc;
732 u8 hash;
733
734 crc = crc32_be(~0, addr, netdev->addr_len);
735
736 hash = (crc >> 27);
737 hash <<= 3;
738 hash |= crc & 7;
739
740 return hash;
741}
742
743/**
744 * spider_net_set_multi - sets multicast addresses and promisc flags
745 * @netdev: interface device structure
746 *
747 * spider_net_set_multi configures multicast addresses as needed for the
748 * netdev interface. It also sets up multicast, allmulti and promisc
749 * flags appropriately
750 */
751static void
752spider_net_set_multi(struct net_device *netdev)
753{
754 struct dev_mc_list *mc;
755 u8 hash;
756 int i;
757 u32 reg;
758 struct spider_net_card *card = netdev_priv(netdev);
759 unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
760 {0, };
761
762 spider_net_set_promisc(card);
763
764 if (netdev->flags & IFF_ALLMULTI) {
765 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
766 set_bit(i, bitmask);
767 }
768 goto write_hash;
769 }
770
771 /* well, we know, what the broadcast hash value is: it's xfd
772 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
773 set_bit(0xfd, bitmask);
774
775 for (mc = netdev->mc_list; mc; mc = mc->next) {
776 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
777 set_bit(hash, bitmask);
778 }
779
780write_hash:
781 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
782 reg = 0;
783 if (test_bit(i * 4, bitmask))
784 reg += 0x08;
785 reg <<= 8;
786 if (test_bit(i * 4 + 1, bitmask))
787 reg += 0x08;
788 reg <<= 8;
789 if (test_bit(i * 4 + 2, bitmask))
790 reg += 0x08;
791 reg <<= 8;
792 if (test_bit(i * 4 + 3, bitmask))
793 reg += 0x08;
794
795 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
796 }
797}
798
799/**
800 * spider_net_disable_rxdmac - disables the receive DMA controller
801 * @card: card structure
802 *
803 * spider_net_disable_rxdmac terminates processing on the DMA controller by
804 * turing off DMA and issueing a force end
805 */
806static void
807spider_net_disable_rxdmac(struct spider_net_card *card)
808{
809 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
810 SPIDER_NET_DMA_RX_FEND_VALUE);
811}
812
813/**
814 * spider_net_stop - called upon ifconfig down
815 * @netdev: interface device structure
816 *
817 * always returns 0
818 */
819int
820spider_net_stop(struct net_device *netdev)
821{
822 struct spider_net_card *card = netdev_priv(netdev);
823
824 netif_poll_disable(netdev);
825 netif_carrier_off(netdev);
826 netif_stop_queue(netdev);
827
828 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
830 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
831 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
832
833 /* free_irq(netdev->irq, netdev);*/
834 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
835
836 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
837 SPIDER_NET_DMA_TX_FEND_VALUE);
838
839 /* turn off DMA, force end */
840 spider_net_disable_rxdmac(card);
841
842 /* release chains */
843 spider_net_release_tx_chain(card, 1);
844
845 spider_net_free_chain(card, &card->tx_chain);
846 spider_net_free_chain(card, &card->rx_chain);
847
848 return 0;
849}
850
851/**
852 * spider_net_get_next_tx_descr - returns the next available tx descriptor
853 * @card: device structure to get descriptor from
854 *
855 * returns the address of the next descriptor, or NULL if not available.
856 */
857static struct spider_net_descr *
858spider_net_get_next_tx_descr(struct spider_net_card *card)
859{
860 /* check, if head points to not-in-use descr */
861 if ( spider_net_get_descr_status(card->tx_chain.head) ==
862 SPIDER_NET_DESCR_NOT_IN_USE ) {
863 return card->tx_chain.head;
864 } else {
865 return NULL;
866 }
867}
868
869/**
870 * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
871 * @descr: descriptor structure to fill out
872 * @skb: packet to consider
873 *
874 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb()
876 * has executed before.
877 */
878static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb)
881{
882 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return;
885 }
886
887 /* is packet ip?
888 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) {
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 }
899}
900
901/**
902 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
903 * @card: card structure
904 * @descr: descriptor structure to fill out
905 * @skb: packet to use
906 *
907 * returns 0 on success, <0 on failure.
908 *
909 * fills out the descriptor structure with skb data and len. Copies data,
910 * if needed (32bit DMA!)
911 */
912static int
913spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr,
915 struct sk_buff *skb)
916{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data,
918 skb->len, PCI_DMA_BIDIRECTIONAL);
919 if (descr->buf_addr == DMA_ERROR_CODE) {
920 if (netif_msg_tx_err(card))
921 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM;
924 }
925
926 descr->buf_size = skb->len;
927 descr->skb = skb;
928 descr->data_status = 0;
929
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb);
935
936 return 0;
937}
938
939/**
940 * spider_net_kick_tx_dma - enables TX DMA processing
941 * @card: card structure
942 * @descr: descriptor address to enable TX processing at
943 *
944 * spider_net_kick_tx_dma writes the current tx chain head as start address
945 * of the tx descriptor chain and enables the transmission DMA engine
946 */
947static void
948spider_net_kick_tx_dma(struct spider_net_card *card,
949 struct spider_net_descr *descr)
950{
951 /* this is the only descriptor in the output chain.
952 * Enable TX DMA */
953
954 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
955 descr->bus_addr);
956
957 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
958 SPIDER_NET_DMA_TX_VALUE);
959}
960
961/**
962 * spider_net_xmit - transmits a frame over the device
963 * @skb: packet to send out
964 * @netdev: interface device structure
965 *
966 * returns 0 on success, <0 on failure
967 */
968static int
969spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
970{
971 struct spider_net_card *card = netdev_priv(netdev);
972 struct spider_net_descr *descr;
973 int result;
974
975 descr = spider_net_get_next_tx_descr(card);
976
977 if (!descr) {
978 netif_stop_queue(netdev);
979
980 descr = spider_net_get_next_tx_descr(card);
981 if (!descr)
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986
987 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result)
989 goto error;
990
991 card->tx_chain.head = card->tx_chain.head->next;
992
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED)
999 spider_net_kick_tx_dma(card, descr);
1000
1001 return NETDEV_TX_OK;
1002
1003error:
1004 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED;
1006}
1007
1008/**
1009 * spider_net_do_ioctl - called for device ioctls
1010 * @netdev: interface device structure
1011 * @ifr: request parameter structure for ioctl
1012 * @cmd: command code for ioctl
1013 *
1014 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
1015 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
1016 */
1017static int
1018spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1019{
1020 switch (cmd) {
1021 default:
1022 return -EOPNOTSUPP;
1023 }
1024}
1025
1026/**
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process
1029 * @card: card structure
1030 *
1031 * returns 1 on success, 0 if no packet was passed to the stack
1032 *
1033 * iommu-unmaps the skb, fills out skb structure and passes the data to the
1034 * stack. The descriptor state is not changed.
1035 */
1036static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card)
1039{
1040 struct sk_buff *skb;
1041 struct net_device *netdev;
1042 u32 data_status, data_error;
1043
1044 data_status = descr->data_status;
1045 data_error = descr->data_error;
1046
1047 netdev = card->netdev;
1048
1049 /* check for errors in the data_error flag */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL);
1060
1061 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
1063 return 0;
1064
1065 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size);
1067
1068 /* the card seems to add 2 bytes of junk in front
1069 * of the ethernet frame */
1070#define SPIDER_MISALIGN 2
1071 skb_pull(skb, SPIDER_MISALIGN);
1072 skb->protocol = eth_type_trans(skb, netdev);
1073
1074 /* checksum offload */
1075 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
1078 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else
1080 skb->ip_summed = CHECKSUM_NONE;
1081 } else {
1082 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084
1085 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN
1087 * vlan_hwaccel_receive_skb
1088 */
1089 }
1090
1091 /* pass skb up to stack */
1092 netif_receive_skb(skb);
1093
1094 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++;
1096 card->netdev_stats.rx_bytes += skb->len;
1097
1098 return 1;
1099}
1100
1101/**
1102 * spider_net_decode_descr - processes an rx descriptor
1103 * @card: card structure
1104 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack
1109 */
1110static int
1111spider_net_decode_one_descr(struct spider_net_card *card)
1112{
1113 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr;
1115 struct spider_net_descr_chain *chain;
1116 int result;
1117
1118 chain = &card->rx_chain;
1119 descr = chain->tail;
1120
1121 status = spider_net_get_descr_status(descr);
1122
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */
1125 return 0;
1126 }
1127
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card);
1132 return 0;
1133 }
1134
1135 /* descriptor definitively used -- move on head */
1136 chain->tail = descr->next;
1137
1138 result = 0;
1139 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1140 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1141 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1142 if (netif_msg_rx_err(card))
1143 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++;
1146 goto refill;
1147 }
1148
1149 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1150 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1151 if (netif_msg_rx_err(card))
1152 pr_err("%s: RX descriptor with state %d\n",
1153 card->netdev->name, status);
1154 goto refill;
1155 }
1156
1157 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card);
1159refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card);
1163
1164 return result;
1165}
1166
1167/**
1168 * spider_net_poll - NAPI poll function called by the stack to return packets
1169 * @netdev: interface device structure
1170 * @budget: number of packets we can pass to the stack at most
1171 *
1172 * returns 0 if no more packets available to the driver/stack. Returns 1,
1173 * if the quota is exceeded, but the driver has still packets.
1174 *
1175 * spider_net_poll returns all packets from the rx descriptors to the stack
1176 * (using netif_receive_skb). If all/enough packets are up, the driver
1177 * reenables interrupts and returns 0. If not, 1 is returned.
1178 */
1179static int
1180spider_net_poll(struct net_device *netdev, int *budget)
1181{
1182 struct spider_net_card *card = netdev_priv(netdev);
1183 int packets_to_do, packets_done = 0;
1184 int no_more_packets = 0;
1185
1186 packets_to_do = min(*budget, netdev->quota);
1187
1188 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) {
1190 packets_done++;
1191 packets_to_do--;
1192 } else {
1193 /* no more packets for the stack */
1194 no_more_packets = 1;
1195 break;
1196 }
1197 }
1198
1199 netdev->quota -= packets_done;
1200 *budget -= packets_done;
1201
1202 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */
1204 if (no_more_packets) {
1205 netif_rx_complete(netdev);
1206 spider_net_rx_irq_on(card);
1207 return 0;
1208 }
1209
1210 return 1;
1211}
1212
1213/**
1214 * spider_net_vlan_rx_reg - initializes VLAN structures in the driver and card
1215 * @netdev: interface device structure
1216 * @grp: vlan_group structure that is registered (NULL on destroying interface)
1217 */
1218static void
1219spider_net_vlan_rx_reg(struct net_device *netdev, struct vlan_group *grp)
1220{
1221 /* further enhancement... yet to do */
1222 return;
1223}
1224
1225/**
1226 * spider_net_vlan_rx_add - adds VLAN id to the card filter
1227 * @netdev: interface device structure
1228 * @vid: VLAN id to add
1229 */
1230static void
1231spider_net_vlan_rx_add(struct net_device *netdev, uint16_t vid)
1232{
1233 /* further enhancement... yet to do */
1234 /* add vid to card's VLAN filter table */
1235 return;
1236}
1237
1238/**
1239 * spider_net_vlan_rx_kill - removes VLAN id to the card filter
1240 * @netdev: interface device structure
1241 * @vid: VLAN id to remove
1242 */
1243static void
1244spider_net_vlan_rx_kill(struct net_device *netdev, uint16_t vid)
1245{
1246 /* further enhancement... yet to do */
1247 /* remove vid from card's VLAN filter table */
1248}
1249
1250/**
1251 * spider_net_get_stats - get interface statistics
1252 * @netdev: interface device structure
1253 *
1254 * returns the interface statistics residing in the spider_net_card struct
1255 */
1256static struct net_device_stats *
1257spider_net_get_stats(struct net_device *netdev)
1258{
1259 struct spider_net_card *card = netdev_priv(netdev);
1260 struct net_device_stats *stats = &card->netdev_stats;
1261 return stats;
1262}
1263
1264/**
1265 * spider_net_change_mtu - changes the MTU of an interface
1266 * @netdev: interface device structure
1267 * @new_mtu: new MTU value
1268 *
1269 * returns 0 on success, <0 on failure
1270 */
1271static int
1272spider_net_change_mtu(struct net_device *netdev, int new_mtu)
1273{
1274 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
1275 * and mtu is outbound only anyway */
1276 if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
1277 (new_mtu > SPIDER_NET_MAX_MTU) )
1278 return -EINVAL;
1279 netdev->mtu = new_mtu;
1280 return 0;
1281}
1282
1283/**
1284 * spider_net_set_mac - sets the MAC of an interface
1285 * @netdev: interface device structure
1286 * @ptr: pointer to new MAC address
1287 *
1288 * Returns 0 on success, <0 on failure. Currently, we don't support this
1289 * and will always return EOPNOTSUPP.
1290 */
1291static int
1292spider_net_set_mac(struct net_device *netdev, void *p)
1293{
1294 struct spider_net_card *card = netdev_priv(netdev);
1295 u32 macl, macu, regvalue;
1296 struct sockaddr *addr = p;
1297
1298 if (!is_valid_ether_addr(addr->sa_data))
1299 return -EADDRNOTAVAIL;
1300
1301 /* switch off GMACTPE and GMACRPE */
1302 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1303 regvalue &= ~((1 << 5) | (1 << 6));
1304 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1305
1306 /* write mac */
1307 macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
1308 (addr->sa_data[2]<<8) + (addr->sa_data[3]);
1309 macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
1310 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1311 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1312
1313 /* switch GMACTPE and GMACRPE back on */
1314 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1315 regvalue |= ((1 << 5) | (1 << 6));
1316 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1317
1318 spider_net_set_promisc(card);
1319
1320 /* look up, whether we have been successful */
1321 if (spider_net_get_mac_address(netdev))
1322 return -EADDRNOTAVAIL;
1323 if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
1324 return -EADDRNOTAVAIL;
1325
1326 return 0;
1327}
1328
1329/**
1330 * spider_net_enable_txdmac - enables a TX DMA controller
1331 * @card: card structure
1332 *
1333 * spider_net_enable_txdmac enables the TX DMA controller by setting the
1334 * descriptor chain tail address
1335 */
1336static void
1337spider_net_enable_txdmac(struct spider_net_card *card)
1338{
1339 /* assume chain is aligned correctly */
1340 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
1341 card->tx_chain.tail->bus_addr);
1342}
1343
1344/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS)
1348 *
1349 * spider_net_handle_error_irq treats or ignores all error conditions
1350 * found when an interrupt is presented
1351 */
1352static void
1353spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1354{
1355 u32 error_reg1, error_reg2;
1356 u32 i;
1357 int show_error = 1;
1358
1359 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1360 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1361
1362 /* check GHIINT0STS ************************************/
1363 if (status_reg)
1364 for (i = 0; i < 32; i++)
1365 if (status_reg & (1<<i))
1366 switch (i)
1367 {
1368 /* let error_reg1 and error_reg2 evaluation decide, what to do
1369 case SPIDER_NET_PHYINT:
1370 case SPIDER_NET_GMAC2INT:
1371 case SPIDER_NET_GMAC1INT:
1372 case SPIDER_NET_GIPSINT:
1373 case SPIDER_NET_GFIFOINT:
1374 case SPIDER_NET_DMACINT:
1375 case SPIDER_NET_GSYSINT:
1376 break; */
1377
1378 case SPIDER_NET_GPWOPCMPINT:
1379 /* PHY write operation completed */
1380 show_error = 0;
1381 break;
1382 case SPIDER_NET_GPROPCMPINT:
1383 /* PHY read operation completed */
1384 /* we don't use semaphores, as we poll for the completion
1385 * of the read operation in spider_net_read_phy. Should take
1386 * about 50 us */
1387 show_error = 0;
1388 break;
1389 case SPIDER_NET_GPWFFINT:
1390 /* PHY command queue full */
1391 if (netif_msg_intr(card))
1392 pr_err("PHY write queue full\n");
1393 show_error = 0;
1394 break;
1395
1396 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1397 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1398 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1399
1400 case SPIDER_NET_GDTDEN0INT:
1401 /* someone has set TX_DMA_EN to 0 */
1402 show_error = 0;
1403 break;
1404
1405 case SPIDER_NET_GDDDEN0INT: /* fallthrough */
1406 case SPIDER_NET_GDCDEN0INT: /* fallthrough */
1407 case SPIDER_NET_GDBDEN0INT: /* fallthrough */
1408 case SPIDER_NET_GDADEN0INT:
1409 /* someone has set RX_DMA_EN to 0 */
1410 show_error = 0;
1411 break;
1412
1413 /* RX interrupts */
1414 case SPIDER_NET_GDDFDCINT:
1415 case SPIDER_NET_GDCFDCINT:
1416 case SPIDER_NET_GDBFDCINT:
1417 case SPIDER_NET_GDAFDCINT:
1418 /* case SPIDER_NET_GDNMINT: not used. print a message */
1419 /* case SPIDER_NET_GCNMINT: not used. print a message */
1420 /* case SPIDER_NET_GBNMINT: not used. print a message */
1421 /* case SPIDER_NET_GANMINT: not used. print a message */
1422 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1423 show_error = 0;
1424 break;
1425
1426 /* TX interrupts */
1427 case SPIDER_NET_GDTFDCINT:
1428 show_error = 0;
1429 break;
1430 case SPIDER_NET_GTTEDINT:
1431 show_error = 0;
1432 break;
1433 case SPIDER_NET_GDTDCEINT:
1434 /* chain end. If a descriptor should be sent, kick off
1435 * tx dma
1436 if (card->tx_chain.tail == card->tx_chain.head)
1437 spider_net_kick_tx_dma(card);
1438 show_error = 0; */
1439 break;
1440
1441 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1442 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1443 }
1444
1445 /* check GHIINT1STS ************************************/
1446 if (error_reg1)
1447 for (i = 0; i < 32; i++)
1448 if (error_reg1 & (1<<i))
1449 switch (i)
1450 {
1451 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card))
1453 pr_err("Spider TX RAM full\n");
1454 show_error = 0;
1455 break;
1456 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card))
1458 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n");
1460 netif_rx_schedule(card->netdev);
1461 spider_net_enable_rxchtails(card);
1462 spider_net_enable_rxdmac(card);
1463 break;
1464
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1466 case SPIDER_NET_GDTINVDINT:
1467 /* allrighty. tx from previous descr ok */
1468 show_error = 0;
1469 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474
1475 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */
1477 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1478 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1479 case SPIDER_NET_GDADCEINT:
1480 if (netif_msg_intr(card))
1481 pr_err("got descriptor chain end interrupt, "
1482 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card);
1485 show_error = 0;
1486 break;
1487
1488 /* invalid descriptor */
1489 case SPIDER_NET_GDDINVDINT: /* fallthrough */
1490 case SPIDER_NET_GDCINVDINT: /* fallthrough */
1491 case SPIDER_NET_GDBINVDINT: /* fallthrough */
1492 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card);
1495 show_error = 0;
1496 break;
1497
1498 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1499 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1500 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1501 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1502 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1503 /* case SPIDER_NET_GDSERINT: problem, print a message */
1504 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1505 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1506 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1507 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1508 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1509 default:
1510 show_error = 1;
1511 break;
1512 }
1513
1514 /* check GHIINT2STS ************************************/
1515 if (error_reg2)
1516 for (i = 0; i < 32; i++)
1517 if (error_reg2 & (1<<i))
1518 switch (i)
1519 {
1520 /* there is nothing we can (want to) do at this time. Log a
1521 * message, we can switch on and off the specific values later on
1522 case SPIDER_NET_GPROPERINT:
1523 case SPIDER_NET_GMCTCRSNGINT:
1524 case SPIDER_NET_GMCTLCOLINT:
1525 case SPIDER_NET_GMCTTMOTINT:
1526 case SPIDER_NET_GMCRCAERINT:
1527 case SPIDER_NET_GMCRCALERINT:
1528 case SPIDER_NET_GMCRALNERINT:
1529 case SPIDER_NET_GMCROVRINT:
1530 case SPIDER_NET_GMCRRNTINT:
1531 case SPIDER_NET_GMCRRXERINT:
1532 case SPIDER_NET_GTITCSERINT:
1533 case SPIDER_NET_GTIFMTERINT:
1534 case SPIDER_NET_GTIPKTRVKINT:
1535 case SPIDER_NET_GTISPINGINT:
1536 case SPIDER_NET_GTISADNGINT:
1537 case SPIDER_NET_GTISPDNGINT:
1538 case SPIDER_NET_GRIFMTERINT:
1539 case SPIDER_NET_GRIPKTRVKINT:
1540 case SPIDER_NET_GRISPINGINT:
1541 case SPIDER_NET_GRISADNGINT:
1542 case SPIDER_NET_GRISPDNGINT:
1543 break;
1544 */
1545 default:
1546 break;
1547 }
1548
1549 if ((show_error) && (netif_msg_intr(card)))
1550 pr_err("Got error interrupt, GHIINT0STS = 0x%08x, "
1551 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1552 status_reg, error_reg1, error_reg2);
1553
1554 /* clear interrupt sources */
1555 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1556 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1557}
1558
1559/**
1560 * spider_net_interrupt - interrupt handler for spider_net
1561 * @irq: interupt number
1562 * @ptr: pointer to net_device
1563 * @regs: PU registers
1564 *
1565 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1566 * interrupt found raised by card.
1567 *
1568 * This is the interrupt handler, that turns off
1569 * interrupts for this device and makes the stack poll the driver
1570 */
1571static irqreturn_t
1572spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1573{
1574 struct net_device *netdev = ptr;
1575 struct spider_net_card *card = netdev_priv(netdev);
1576 u32 status_reg;
1577
1578 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1579
1580 if (!status_reg)
1581 return IRQ_NONE;
1582
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev);
1589 }
1590
1591 /* we do this after rx and tx processing, as we want the tx chain
1592 * processed to see, whether we should restart tx dma processing */
1593 spider_net_handle_error_irq(card, status_reg);
1594
1595 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1597
1598 return IRQ_HANDLED;
1599}
1600
1601#ifdef CONFIG_NET_POLL_CONTROLLER
1602/**
1603 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1604 * @netdev: interface device structure
1605 *
1606 * see Documentation/networking/netconsole.txt
1607 */
1608static void
1609spider_net_poll_controller(struct net_device *netdev)
1610{
1611 disable_irq(netdev->irq);
1612 spider_net_interrupt(netdev->irq, netdev, NULL);
1613 enable_irq(netdev->irq);
1614}
1615#endif /* CONFIG_NET_POLL_CONTROLLER */
1616
1617/**
1618 * spider_net_init_card - initializes the card
1619 * @card: card structure
1620 *
1621 * spider_net_init_card initializes the card so that other registers can
1622 * be used
1623 */
1624static void
1625spider_net_init_card(struct spider_net_card *card)
1626{
1627 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1628 SPIDER_NET_CKRCTRL_STOP_VALUE);
1629
1630 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1631 SPIDER_NET_CKRCTRL_RUN_VALUE);
1632}
1633
1634/**
1635 * spider_net_enable_card - enables the card by setting all kinds of regs
1636 * @card: card structure
1637 *
1638 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1639 */
1640static void
1641spider_net_enable_card(struct spider_net_card *card)
1642{
1643 int i;
1644 /* the following array consists of (register),(value) pairs
1645 * that are set in this function. A register of 0 ends the list */
1646 u32 regs[][2] = {
1647 { SPIDER_NET_GRESUMINTNUM, 0 },
1648 { SPIDER_NET_GREINTNUM, 0 },
1649
1650 /* set interrupt frame number registers */
1651 /* clear the single DMA engine registers first */
1652 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1653 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1654 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1655 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1656 /* then set, what we really need */
1657 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1658
1659 /* timer counter registers and stuff */
1660 { SPIDER_NET_GFREECNNUM, 0 },
1661 { SPIDER_NET_GONETIMENUM, 0 },
1662 { SPIDER_NET_GTOUTFRMNUM, 0 },
1663
1664 /* RX mode setting */
1665 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1666 /* TX mode setting */
1667 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1668 /* IPSEC mode setting */
1669 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1670
1671 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1672
1673 { SPIDER_NET_GMRWOLCTRL, 0 },
1674 { SPIDER_NET_GTESTMD, 0 },
1675
1676 { SPIDER_NET_GMACINTEN, 0 },
1677
1678 /* flow control stuff */
1679 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1680 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1681
1682 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1683 { 0, 0}
1684 };
1685
1686 i = 0;
1687 while (regs[i][0]) {
1688 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1689 i++;
1690 }
1691
1692 /* clear unicast filter table entries 1 to 14 */
1693 for (i = 1; i <= 14; i++) {
1694 spider_net_write_reg(card,
1695 SPIDER_NET_GMRUAFILnR + i * 8,
1696 0x00080000);
1697 spider_net_write_reg(card,
1698 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1699 0x00000000);
1700 }
1701
1702 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1703
1704 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1705
1706 /* set chain tail adress for RX chains and
1707 * enable DMA */
1708 spider_net_enable_rxchtails(card);
1709 spider_net_enable_rxdmac(card);
1710
1711 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1712
1713 /* set chain tail adress for TX chain */
1714 spider_net_enable_txdmac(card);
1715
1716 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1717 SPIDER_NET_LENLMT_VALUE);
1718 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
1719 SPIDER_NET_MACMODE_VALUE);
1720 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1721 SPIDER_NET_OPMODE_VALUE);
1722
1723 /* set interrupt mask registers */
1724 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1725 SPIDER_NET_INT0_MASK_VALUE);
1726 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1727 SPIDER_NET_INT1_MASK_VALUE);
1728 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1729 SPIDER_NET_INT2_MASK_VALUE);
1730}
1731
1732/**
1733 * spider_net_open - called upon ifonfig up
1734 * @netdev: interface device structure
1735 *
1736 * returns 0 on success, <0 on failure
1737 *
1738 * spider_net_open allocates all the descriptors and memory needed for
1739 * operation, sets up multicast list and enables interrupts
1740 */
1741int
1742spider_net_open(struct net_device *netdev)
1743{
1744 struct spider_net_card *card = netdev_priv(netdev);
1745 int result;
1746
1747 result = -ENOMEM;
1748 if (spider_net_init_chain(card, &card->tx_chain,
1749 card->descr, tx_descriptors))
1750 goto alloc_tx_failed;
1751 if (spider_net_init_chain(card, &card->rx_chain,
1752 card->descr + tx_descriptors, rx_descriptors))
1753 goto alloc_rx_failed;
1754
1755 /* allocate rx skbs */
1756 if (spider_net_alloc_rx_skbs(card))
1757 goto alloc_skbs_failed;
1758
1759 spider_net_set_multi(netdev);
1760
1761 /* further enhancement: setup hw vlan, if needed */
1762
1763 result = -EBUSY;
1764 if (request_irq(netdev->irq, spider_net_interrupt,
1765 SA_SHIRQ, netdev->name, netdev))
1766 goto register_int_failed;
1767
1768 spider_net_enable_card(card);
1769
1770 netif_start_queue(netdev);
1771 netif_carrier_on(netdev);
1772 netif_poll_enable(netdev);
1773
1774 return 0;
1775
1776register_int_failed:
1777 spider_net_free_rx_chain_contents(card);
1778alloc_skbs_failed:
1779 spider_net_free_chain(card, &card->rx_chain);
1780alloc_rx_failed:
1781 spider_net_free_chain(card, &card->tx_chain);
1782alloc_tx_failed:
1783 return result;
1784}
1785
1786/**
1787 * spider_net_setup_phy - setup PHY
1788 * @card: card structure
1789 *
1790 * returns 0 on success, <0 on failure
1791 *
1792 * spider_net_setup_phy is used as part of spider_net_probe. Sets
1793 * the PHY to 1000 Mbps
1794 **/
1795static int
1796spider_net_setup_phy(struct spider_net_card *card)
1797{
1798 struct mii_phy *phy = &card->phy;
1799
1800 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
1801 SPIDER_NET_DMASEL_VALUE);
1802 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
1803 SPIDER_NET_PHY_CTRL_VALUE);
1804 phy->mii_id = 1;
1805 phy->dev = card->netdev;
1806 phy->mdio_read = spider_net_read_phy;
1807 phy->mdio_write = spider_net_write_phy;
1808
1809 mii_phy_probe(phy, phy->mii_id);
1810
1811 if (phy->def->ops->setup_forced)
1812 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1813
1814 /* the following two writes could be moved to sungem_phy.c */
1815 /* enable fiber mode */
1816 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
1817 /* LEDs active in both modes, autosense prio = fiber */
1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1819
1820 phy->def->ops->read_link(phy);
1821 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1822 phy->speed, phy->duplex==1 ? "Full" : "Half");
1823
1824 return 0;
1825}
1826
1827/**
1828 * spider_net_download_firmware - loads firmware into the adapter
1829 * @card: card structure
1830 * @firmware: firmware pointer
1831 *
1832 * spider_net_download_firmware loads the firmware opened by
1833 * spider_net_init_firmware into the adapter.
1834 */
1835static void
1836spider_net_download_firmware(struct spider_net_card *card,
1837 const struct firmware *firmware)
1838{
1839 int sequencer, i;
1840 u32 *fw_ptr = (u32 *)firmware->data;
1841
1842 /* stop sequencers */
1843 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1844 SPIDER_NET_STOP_SEQ_VALUE);
1845
1846 for (sequencer = 0; sequencer < 6; sequencer++) {
1847 spider_net_write_reg(card,
1848 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1849 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
1850 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1851 sequencer * 8, *fw_ptr);
1852 fw_ptr++;
1853 }
1854 }
1855
1856 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1857 SPIDER_NET_RUN_SEQ_VALUE);
1858}
1859
1860/**
1861 * spider_net_init_firmware - reads in firmware parts
1862 * @card: card structure
1863 *
1864 * Returns 0 on success, <0 on failure
1865 *
1866 * spider_net_init_firmware opens the sequencer firmware and does some basic
1867 * checks. This function opens and releases the firmware structure. A call
1868 * to download the firmware is performed before the release.
1869 *
1870 * Firmware format
1871 * ===============
1872 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1873 * the program for each sequencer. Use the command
1874 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1875 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1876 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1877 *
1878 * to generate spider_fw.bin, if you have sequencer programs with something
1879 * like the following contents for each sequencer:
1880 * <ONE LINE COMMENT>
1881 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1882 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1883 * ...
1884 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1885 */
1886static int
1887spider_net_init_firmware(struct spider_net_card *card)
1888{
1889 const struct firmware *firmware;
1890 int err = -EIO;
1891
1892 if (request_firmware(&firmware,
1893 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
1894 if (netif_msg_probe(card))
1895 pr_err("Couldn't read in sequencer data file %s.\n",
1896 SPIDER_NET_FIRMWARE_NAME);
1897 firmware = NULL;
1898 goto out;
1899 }
1900
1901 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
1902 if (netif_msg_probe(card))
1903 pr_err("Invalid size of sequencer data file %s.\n",
1904 SPIDER_NET_FIRMWARE_NAME);
1905 goto out;
1906 }
1907
1908 spider_net_download_firmware(card, firmware);
1909
1910 err = 0;
1911out:
1912 release_firmware(firmware);
1913
1914 return err;
1915}
1916
1917/**
1918 * spider_net_workaround_rxramfull - work around firmware bug
1919 * @card: card structure
1920 *
1921 * no return value
1922 **/
1923static void
1924spider_net_workaround_rxramfull(struct spider_net_card *card)
1925{
1926 int i, sequencer = 0;
1927
1928 /* cancel reset */
1929 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1930 SPIDER_NET_CKRCTRL_RUN_VALUE);
1931
1932 /* empty sequencer data */
1933 for (sequencer = 0; sequencer < 6; sequencer++) {
1934 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1935 sequencer * 8, 0x0);
1936 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
1937 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1938 sequencer * 8, 0x0);
1939 }
1940 }
1941
1942 /* set sequencer operation */
1943 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
1944
1945 /* reset */
1946 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1947 SPIDER_NET_CKRCTRL_STOP_VALUE);
1948}
1949
1950/**
1951 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
1952 * function (to be called not under interrupt status)
1953 * @data: data, is interface device structure
1954 *
1955 * called as task when tx hangs, resets interface (if interface is up)
1956 */
1957static void
1958spider_net_tx_timeout_task(void *data)
1959{
1960 struct net_device *netdev = data;
1961 struct spider_net_card *card = netdev_priv(netdev);
1962
1963 if (!(netdev->flags & IFF_UP))
1964 goto out;
1965
1966 netif_device_detach(netdev);
1967 spider_net_stop(netdev);
1968
1969 spider_net_workaround_rxramfull(card);
1970 spider_net_init_card(card);
1971
1972 if (spider_net_setup_phy(card))
1973 goto out;
1974 if (spider_net_init_firmware(card))
1975 goto out;
1976
1977 spider_net_open(netdev);
1978 spider_net_kick_tx_dma(card, card->tx_chain.head);
1979 netif_device_attach(netdev);
1980
1981out:
1982 atomic_dec(&card->tx_timeout_task_counter);
1983}
1984
1985/**
1986 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
1987 * @netdev: interface device structure
1988 *
1989 * called, if tx hangs. Schedules a task that resets the interface
1990 */
1991static void
1992spider_net_tx_timeout(struct net_device *netdev)
1993{
1994 struct spider_net_card *card;
1995
1996 card = netdev_priv(netdev);
1997 atomic_inc(&card->tx_timeout_task_counter);
1998 if (netdev->flags & IFF_UP)
1999 schedule_work(&card->tx_timeout_task);
2000 else
2001 atomic_dec(&card->tx_timeout_task_counter);
2002}
2003
2004/**
2005 * spider_net_setup_netdev_ops - initialization of net_device operations
2006 * @netdev: net_device structure
2007 *
2008 * fills out function pointers in the net_device structure
2009 */
2010static void
2011spider_net_setup_netdev_ops(struct net_device *netdev)
2012{
2013 netdev->open = &spider_net_open;
2014 netdev->stop = &spider_net_stop;
2015 netdev->hard_start_xmit = &spider_net_xmit;
2016 netdev->get_stats = &spider_net_get_stats;
2017 netdev->set_multicast_list = &spider_net_set_multi;
2018 netdev->set_mac_address = &spider_net_set_mac;
2019 netdev->change_mtu = &spider_net_change_mtu;
2020 netdev->do_ioctl = &spider_net_do_ioctl;
2021 /* tx watchdog */
2022 netdev->tx_timeout = &spider_net_tx_timeout;
2023 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2024 /* NAPI */
2025 netdev->poll = &spider_net_poll;
2026 netdev->weight = SPIDER_NET_NAPI_WEIGHT;
2027 /* HW VLAN */
2028 netdev->vlan_rx_register = &spider_net_vlan_rx_reg;
2029 netdev->vlan_rx_add_vid = &spider_net_vlan_rx_add;
2030 netdev->vlan_rx_kill_vid = &spider_net_vlan_rx_kill;
2031#ifdef CONFIG_NET_POLL_CONTROLLER
2032 /* poll controller */
2033 netdev->poll_controller = &spider_net_poll_controller;
2034#endif /* CONFIG_NET_POLL_CONTROLLER */
2035 /* ethtool ops */
2036 netdev->ethtool_ops = &spider_net_ethtool_ops;
2037}
2038
2039/**
2040 * spider_net_setup_netdev - initialization of net_device
2041 * @card: card structure
2042 *
2043 * Returns 0 on success or <0 on failure
2044 *
2045 * spider_net_setup_netdev initializes the net_device structure
2046 **/
2047static int
2048spider_net_setup_netdev(struct spider_net_card *card)
2049{
2050 int result;
2051 struct net_device *netdev = card->netdev;
2052 struct device_node *dn;
2053 struct sockaddr addr;
2054 u8 *mac;
2055
2056 SET_MODULE_OWNER(netdev);
2057 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2058
2059 pci_set_drvdata(card->pdev, netdev);
2060 spin_lock_init(&card->intmask_lock);
2061 netdev->irq = card->pdev->irq;
2062
2063 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2064
2065 spider_net_setup_netdev_ops(netdev);
2066
2067 netdev->features = 0;
2068 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2069 * NETIF_F_HW_VLAN_FILTER */
2070
2071 netdev->irq = card->pdev->irq;
2072
2073 dn = pci_device_to_OF_node(card->pdev);
2074 if (!dn)
2075 return -EIO;
2076
2077 mac = (u8 *)get_property(dn, "local-mac-address", NULL);
2078 if (!mac)
2079 return -EIO;
2080 memcpy(addr.sa_data, mac, ETH_ALEN);
2081
2082 result = spider_net_set_mac(netdev, &addr);
2083 if ((result) && (netif_msg_probe(card)))
2084 pr_err("Failed to set MAC address: %i\n", result);
2085
2086 result = register_netdev(netdev);
2087 if (result) {
2088 if (netif_msg_probe(card))
2089 pr_err("Couldn't register net_device: %i\n",
2090 result);
2091 return result;
2092 }
2093
2094 if (netif_msg_probe(card))
2095 pr_info("Initialized device %s.\n", netdev->name);
2096
2097 return 0;
2098}
2099
2100/**
2101 * spider_net_alloc_card - allocates net_device and card structure
2102 *
2103 * returns the card structure or NULL in case of errors
2104 *
2105 * the card and net_device structures are linked to each other
2106 */
2107static struct spider_net_card *
2108spider_net_alloc_card(void)
2109{
2110 struct net_device *netdev;
2111 struct spider_net_card *card;
2112 size_t alloc_size;
2113
2114 alloc_size = sizeof (*card) +
2115 sizeof (struct spider_net_descr) * rx_descriptors +
2116 sizeof (struct spider_net_descr) * tx_descriptors;
2117 netdev = alloc_etherdev(alloc_size);
2118 if (!netdev)
2119 return NULL;
2120
2121 card = netdev_priv(netdev);
2122 card->netdev = netdev;
2123 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2124 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
2125 init_waitqueue_head(&card->waitq);
2126 atomic_set(&card->tx_timeout_task_counter, 0);
2127
2128 return card;
2129}
2130
2131/**
2132 * spider_net_undo_pci_setup - releases PCI ressources
2133 * @card: card structure
2134 *
2135 * spider_net_undo_pci_setup releases the mapped regions
2136 */
2137static void
2138spider_net_undo_pci_setup(struct spider_net_card *card)
2139{
2140 iounmap(card->regs);
2141 pci_release_regions(card->pdev);
2142}
2143
2144/**
2145 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2146 * @card: card structure
2147 * @pdev: PCI device
2148 *
2149 * Returns the card structure or NULL if any errors occur
2150 *
2151 * spider_net_setup_pci_dev initializes pdev and together with the
2152 * functions called in spider_net_open configures the device so that
2153 * data can be transferred over it
2154 * The net_device structure is attached to the card structure, if the
2155 * function returns without error.
2156 **/
2157static struct spider_net_card *
2158spider_net_setup_pci_dev(struct pci_dev *pdev)
2159{
2160 struct spider_net_card *card;
2161 unsigned long mmio_start, mmio_len;
2162
2163 if (pci_enable_device(pdev)) {
2164 pr_err("Couldn't enable PCI device\n");
2165 return NULL;
2166 }
2167
2168 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2169 pr_err("Couldn't find proper PCI device base address.\n");
2170 goto out_disable_dev;
2171 }
2172
2173 if (pci_request_regions(pdev, spider_net_driver_name)) {
2174 pr_err("Couldn't obtain PCI resources, aborting.\n");
2175 goto out_disable_dev;
2176 }
2177
2178 pci_set_master(pdev);
2179
2180 card = spider_net_alloc_card();
2181 if (!card) {
2182 pr_err("Couldn't allocate net_device structure, "
2183 "aborting.\n");
2184 goto out_release_regions;
2185 }
2186 card->pdev = pdev;
2187
2188 /* fetch base address and length of first resource */
2189 mmio_start = pci_resource_start(pdev, 0);
2190 mmio_len = pci_resource_len(pdev, 0);
2191
2192 card->netdev->mem_start = mmio_start;
2193 card->netdev->mem_end = mmio_start + mmio_len;
2194 card->regs = ioremap(mmio_start, mmio_len);
2195
2196 if (!card->regs) {
2197 pr_err("Couldn't obtain PCI resources, aborting.\n");
2198 goto out_release_regions;
2199 }
2200
2201 return card;
2202
2203out_release_regions:
2204 pci_release_regions(pdev);
2205out_disable_dev:
2206 pci_disable_device(pdev);
2207 pci_set_drvdata(pdev, NULL);
2208 return NULL;
2209}
2210
2211/**
2212 * spider_net_probe - initialization of a device
2213 * @pdev: PCI device
2214 * @ent: entry in the device id list
2215 *
2216 * Returns 0 on success, <0 on failure
2217 *
2218 * spider_net_probe initializes pdev and registers a net_device
2219 * structure for it. After that, the device can be ifconfig'ed up
2220 **/
2221static int __devinit
2222spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2223{
2224 int err = -EIO;
2225 struct spider_net_card *card;
2226
2227 card = spider_net_setup_pci_dev(pdev);
2228 if (!card)
2229 goto out;
2230
2231 spider_net_workaround_rxramfull(card);
2232 spider_net_init_card(card);
2233
2234 err = spider_net_setup_phy(card);
2235 if (err)
2236 goto out_undo_pci;
2237
2238 err = spider_net_init_firmware(card);
2239 if (err)
2240 goto out_undo_pci;
2241
2242 err = spider_net_setup_netdev(card);
2243 if (err)
2244 goto out_undo_pci;
2245
2246 return 0;
2247
2248out_undo_pci:
2249 spider_net_undo_pci_setup(card);
2250 free_netdev(card->netdev);
2251out:
2252 return err;
2253}
2254
2255/**
2256 * spider_net_remove - removal of a device
2257 * @pdev: PCI device
2258 *
2259 * Returns 0 on success, <0 on failure
2260 *
2261 * spider_net_remove is called to remove the device and unregisters the
2262 * net_device
2263 **/
2264static void __devexit
2265spider_net_remove(struct pci_dev *pdev)
2266{
2267 struct net_device *netdev;
2268 struct spider_net_card *card;
2269
2270 netdev = pci_get_drvdata(pdev);
2271 card = netdev_priv(netdev);
2272
2273 wait_event(card->waitq,
2274 atomic_read(&card->tx_timeout_task_counter) == 0);
2275
2276 unregister_netdev(netdev);
2277
2278 /* switch off card */
2279 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2280 SPIDER_NET_CKRCTRL_STOP_VALUE);
2281 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2282 SPIDER_NET_CKRCTRL_RUN_VALUE);
2283
2284 spider_net_undo_pci_setup(card);
2285 free_netdev(netdev);
2286}
2287
2288static struct pci_driver spider_net_driver = {
2289 .owner = THIS_MODULE,
2290 .name = spider_net_driver_name,
2291 .id_table = spider_net_pci_tbl,
2292 .probe = spider_net_probe,
2293 .remove = __devexit_p(spider_net_remove)
2294};
2295
2296/**
2297 * spider_net_init - init function when the driver is loaded
2298 *
2299 * spider_net_init registers the device driver
2300 */
2301static int __init spider_net_init(void)
2302{
2303 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2304 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2305 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2306 }
2307 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2308 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2309 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2310 }
2311 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2312 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2313 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2314 }
2315 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2316 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2317 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2318 }
2319
2320 return pci_register_driver(&spider_net_driver);
2321}
2322
2323/**
2324 * spider_net_cleanup - exit function when driver is unloaded
2325 *
2326 * spider_net_cleanup unregisters the device driver
2327 */
2328static void __exit spider_net_cleanup(void)
2329{
2330 pci_unregister_driver(&spider_net_driver);
2331}
2332
2333module_init(spider_net_init);
2334module_exit(spider_net_cleanup);
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
new file mode 100644
index 000000000000..22b2f2347351
--- /dev/null
+++ b/drivers/net/spider_net.h
@@ -0,0 +1,469 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H
26
27#include "sungem_phy.h"
28
29extern int spider_net_stop(struct net_device *netdev);
30extern int spider_net_open(struct net_device *netdev);
31
32extern struct ethtool_ops spider_net_ethtool_ops;
33
34extern char spider_net_driver_name[];
35
36#define SPIDER_NET_MAX_MTU 2308
37#define SPIDER_NET_MIN_MTU 64
38
39#define SPIDER_NET_RXBUF_ALIGN 128
40
41#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64
42#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
43#define SPIDER_NET_RX_DESCRIPTORS_MAX 256
44
45#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64
46#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
47#define SPIDER_NET_TX_DESCRIPTORS_MAX 256
48
49#define SPIDER_NET_RX_CSUM_DEFAULT 1
50
51#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
52#define SPIDER_NET_NAPI_WEIGHT 64
53
54#define SPIDER_NET_FIRMWARE_LEN 1024
55#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
56
57/** spider_net SMMIO registers */
58#define SPIDER_NET_GHIINT0STS 0x00000000
59#define SPIDER_NET_GHIINT1STS 0x00000004
60#define SPIDER_NET_GHIINT2STS 0x00000008
61#define SPIDER_NET_GHIINT0MSK 0x00000010
62#define SPIDER_NET_GHIINT1MSK 0x00000014
63#define SPIDER_NET_GHIINT2MSK 0x00000018
64
65#define SPIDER_NET_GRESUMINTNUM 0x00000020
66#define SPIDER_NET_GREINTNUM 0x00000024
67
68#define SPIDER_NET_GFFRMNUM 0x00000028
69#define SPIDER_NET_GFAFRMNUM 0x0000002c
70#define SPIDER_NET_GFBFRMNUM 0x00000030
71#define SPIDER_NET_GFCFRMNUM 0x00000034
72#define SPIDER_NET_GFDFRMNUM 0x00000038
73
74/* clear them (don't use it) */
75#define SPIDER_NET_GFREECNNUM 0x0000003c
76#define SPIDER_NET_GONETIMENUM 0x00000040
77
78#define SPIDER_NET_GTOUTFRMNUM 0x00000044
79
80#define SPIDER_NET_GTXMDSET 0x00000050
81#define SPIDER_NET_GPCCTRL 0x00000054
82#define SPIDER_NET_GRXMDSET 0x00000058
83#define SPIDER_NET_GIPSECINIT 0x0000005c
84#define SPIDER_NET_GFTRESTRT 0x00000060
85#define SPIDER_NET_GRXDMAEN 0x00000064
86#define SPIDER_NET_GMRWOLCTRL 0x00000068
87#define SPIDER_NET_GPCWOPCMD 0x0000006c
88#define SPIDER_NET_GPCROPCMD 0x00000070
89#define SPIDER_NET_GTTFRMCNT 0x00000078
90#define SPIDER_NET_GTESTMD 0x0000007c
91
92#define SPIDER_NET_GSINIT 0x00000080
93#define SPIDER_NET_GSnPRGADR 0x00000084
94#define SPIDER_NET_GSnPRGDAT 0x00000088
95
96#define SPIDER_NET_GMACOPEMD 0x00000100
97#define SPIDER_NET_GMACLENLMT 0x00000108
98#define SPIDER_NET_GMACINTEN 0x00000118
99#define SPIDER_NET_GMACPHYCTRL 0x00000120
100
101#define SPIDER_NET_GMACAPAUSE 0x00000154
102#define SPIDER_NET_GMACTXPAUSE 0x00000164
103
104#define SPIDER_NET_GMACMODE 0x000001b0
105#define SPIDER_NET_GMACBSTLMT 0x000001b4
106
107#define SPIDER_NET_GMACUNIMACU 0x000001c0
108#define SPIDER_NET_GMACUNIMACL 0x000001c8
109
110#define SPIDER_NET_GMRMHFILnR 0x00000400
111#define SPIDER_NET_MULTICAST_HASHES 256
112
113#define SPIDER_NET_GMRUAFILnR 0x00000500
114#define SPIDER_NET_GMRUA0FIL15R 0x00000578
115
116/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
117 * 0x00000b.. for DMA controller B, etc. */
118#define SPIDER_NET_GDADCHA 0x00000a00
119#define SPIDER_NET_GDADMACCNTR 0x00000a04
120#define SPIDER_NET_GDACTDPA 0x00000a08
121#define SPIDER_NET_GDACTDCNT 0x00000a0c
122#define SPIDER_NET_GDACDBADDR 0x00000a20
123#define SPIDER_NET_GDACDBSIZE 0x00000a24
124#define SPIDER_NET_GDACNEXTDA 0x00000a28
125#define SPIDER_NET_GDACCOMST 0x00000a2c
126#define SPIDER_NET_GDAWBCOMST 0x00000a30
127#define SPIDER_NET_GDAWBRSIZE 0x00000a34
128#define SPIDER_NET_GDAWBVSIZE 0x00000a38
129#define SPIDER_NET_GDAWBTRST 0x00000a3c
130#define SPIDER_NET_GDAWBTRERR 0x00000a40
131
132/* TX DMA controller registers */
133#define SPIDER_NET_GDTDCHA 0x00000e00
134#define SPIDER_NET_GDTDMACCNTR 0x00000e04
135#define SPIDER_NET_GDTCDPA 0x00000e08
136#define SPIDER_NET_GDTDMASEL 0x00000e14
137
138#define SPIDER_NET_ECMODE 0x00000f00
139/* clock and reset control register */
140#define SPIDER_NET_CKRCTRL 0x00000ff0
141
142/** SCONFIG registers */
143#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
144
145/** hardcoded register values */
146#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff
147#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff
148/* no MAC aborts -> auto retransmission */
149#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1
150
151/* clear counter when interrupt sources are cleared
152#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
153/* we rely on flagged descriptor interrupts */
154#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
155/* set this first, then the FRAMENUM_VALUE */
156#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
157
158#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
159#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
160
161#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
162/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
163#define SPIDER_NET_RXMODE_VALUE 0x00000011
164/* auto retransmission in case of MAC aborts */
165#define SPIDER_NET_TXMODE_VALUE 0x00010000
166#define SPIDER_NET_RESTART_VALUE 0x00000000
167#define SPIDER_NET_WOL_VALUE 0x00001111
168#if 0
169#define SPIDER_NET_WOL_VALUE 0x00000000
170#endif
171#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8
172
173/* pause frames: automatic, no upper retransmission count */
174/* outside loopback mode: ETOMOD signal dont matter, not connected */
175#define SPIDER_NET_OPMODE_VALUE 0x00000063
176/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
177#define SPIDER_NET_LENLMT_VALUE 0x00000908
178
179#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
180#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
181
182#define SPIDER_NET_MACMODE_VALUE 0x00000001
183#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
184
185/* 1(0) enable r/tx dma
186 * 0000000 fixed to 0
187 *
188 * 000000 fixed to 0
189 * 0(1) en/disable descr writeback on force end
190 * 0(1) force end
191 *
192 * 000000 fixed to 0
193 * 00 burst alignment: 128 bytes
194 *
195 * 00000 fixed to 0
196 * 0 descr writeback size 32 bytes
197 * 0(1) descr chain end interrupt enable
198 * 0(1) descr status writeback enable */
199
200/* to set RX_DMA_EN */
201#define SPIDER_NET_DMA_RX_VALUE 0x80000000
202#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
203/* to set TX_DMA_EN */
204#define SPIDER_NET_DMA_TX_VALUE 0x80000000
205#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
206
207/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
208#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
209#define SPIDER_NET_PROMISC_VALUE 0x00080000
210#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
211
212#define SPIDER_NET_DMASEL_VALUE 0x00000001
213
214#define SPIDER_NET_ECMODE_VALUE 0x00000000
215
216#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
217#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
218
219#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
220#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
221
222/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
223 * with 1 << SPIDER_NET_... */
224enum spider_net_int0_status {
225 SPIDER_NET_GPHYINT = 0,
226 SPIDER_NET_GMAC2INT,
227 SPIDER_NET_GMAC1INT,
228 SPIDER_NET_GIPSINT,
229 SPIDER_NET_GFIFOINT,
230 SPIDER_NET_GDMACINT,
231 SPIDER_NET_GSYSINT,
232 SPIDER_NET_GPWOPCMPINT,
233 SPIDER_NET_GPROPCMPINT,
234 SPIDER_NET_GPWFFINT,
235 SPIDER_NET_GRMDADRINT,
236 SPIDER_NET_GRMARPINT,
237 SPIDER_NET_GRMMPINT,
238 SPIDER_NET_GDTDEN0INT,
239 SPIDER_NET_GDDDEN0INT,
240 SPIDER_NET_GDCDEN0INT,
241 SPIDER_NET_GDBDEN0INT,
242 SPIDER_NET_GDADEN0INT,
243 SPIDER_NET_GDTFDCINT,
244 SPIDER_NET_GDDFDCINT,
245 SPIDER_NET_GDCFDCINT,
246 SPIDER_NET_GDBFDCINT,
247 SPIDER_NET_GDAFDCINT,
248 SPIDER_NET_GTTEDINT,
249 SPIDER_NET_GDTDCEINT,
250 SPIDER_NET_GRFDNMINT,
251 SPIDER_NET_GRFCNMINT,
252 SPIDER_NET_GRFBNMINT,
253 SPIDER_NET_GRFANMINT,
254 SPIDER_NET_GRFNMINT,
255 SPIDER_NET_G1TMCNTINT,
256 SPIDER_NET_GFREECNTINT
257};
258/* GHIINT1STS bits */
259enum spider_net_int1_status {
260 SPIDER_NET_GTMFLLINT = 0,
261 SPIDER_NET_GRMFLLINT,
262 SPIDER_NET_GTMSHTINT,
263 SPIDER_NET_GDTINVDINT,
264 SPIDER_NET_GRFDFLLINT,
265 SPIDER_NET_GDDDCEINT,
266 SPIDER_NET_GDDINVDINT,
267 SPIDER_NET_GRFCFLLINT,
268 SPIDER_NET_GDCDCEINT,
269 SPIDER_NET_GDCINVDINT,
270 SPIDER_NET_GRFBFLLINT,
271 SPIDER_NET_GDBDCEINT,
272 SPIDER_NET_GDBINVDINT,
273 SPIDER_NET_GRFAFLLINT,
274 SPIDER_NET_GDADCEINT,
275 SPIDER_NET_GDAINVDINT,
276 SPIDER_NET_GDTRSERINT,
277 SPIDER_NET_GDDRSERINT,
278 SPIDER_NET_GDCRSERINT,
279 SPIDER_NET_GDBRSERINT,
280 SPIDER_NET_GDARSERINT,
281 SPIDER_NET_GDSERINT,
282 SPIDER_NET_GDTPTERINT,
283 SPIDER_NET_GDDPTERINT,
284 SPIDER_NET_GDCPTERINT,
285 SPIDER_NET_GDBPTERINT,
286 SPIDER_NET_GDAPTERINT
287};
288/* GHIINT2STS bits */
289enum spider_net_int2_status {
290 SPIDER_NET_GPROPERINT = 0,
291 SPIDER_NET_GMCTCRSNGINT,
292 SPIDER_NET_GMCTLCOLINT,
293 SPIDER_NET_GMCTTMOTINT,
294 SPIDER_NET_GMCRCAERINT,
295 SPIDER_NET_GMCRCALERINT,
296 SPIDER_NET_GMCRALNERINT,
297 SPIDER_NET_GMCROVRINT,
298 SPIDER_NET_GMCRRNTINT,
299 SPIDER_NET_GMCRRXERINT,
300 SPIDER_NET_GTITCSERINT,
301 SPIDER_NET_GTIFMTERINT,
302 SPIDER_NET_GTIPKTRVKINT,
303 SPIDER_NET_GTISPINGINT,
304 SPIDER_NET_GTISADNGINT,
305 SPIDER_NET_GTISPDNGINT,
306 SPIDER_NET_GRIFMTERINT,
307 SPIDER_NET_GRIPKTRVKINT,
308 SPIDER_NET_GRISPINGINT,
309 SPIDER_NET_GRISADNGINT,
310 SPIDER_NET_GRISPDNGINT
311};
312
313#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \
314 (1 << SPIDER_NET_GDTDCEINT) | \
315 (1 << SPIDER_NET_GDTFDCINT) )
316
317/* we rely on flagged descriptor interrupts*/
318#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
319 (1 << SPIDER_NET_GRMFLLINT) )
320
321#define SPIDER_NET_GPREXEC 0x80000000
322#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
323
324/* descriptor bits
325 *
326 * 1010 descriptor ready
327 * 0 descr in middle of chain
328 * 000 fixed to 0
329 *
330 * 0 no interrupt on completion
331 * 000 fixed to 0
332 * 1 no ipsec processing
333 * 1 last descriptor for this frame
334 * 00 no checksum
335 * 10 tcp checksum
336 * 11 udp checksum
337 *
338 * 00 fixed to 0
339 * 0 fixed to 0
340 * 0 no interrupt on response errors
341 * 0 no interrupt on invalid descr
342 * 0 no interrupt on dma process termination
343 * 0 no interrupt on descr chain end
344 * 0 no interrupt on descr complete
345 *
346 * 000 fixed to 0
347 * 0 response error interrupt status
348 * 0 invalid descr status
349 * 0 dma termination status
350 * 0 descr chain end status
351 * 0 descr complete status */
352#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
353#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
354#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
355#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
356#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
357
358/* descr ready, descr is in middle of chain, get interrupt on completion */
359#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
360
361/* multicast is no problem */
362#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
363
364enum spider_net_descr_status {
365 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
366 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
367 SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
368 SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
369 SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
370 SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
371 SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
372};
373
374struct spider_net_descr {
375 /* as defined by the hardware */
376 dma_addr_t buf_addr;
377 u32 buf_size;
378 dma_addr_t next_descr_addr;
379 u32 dmac_cmd_status;
380 u32 result_size;
381 u32 valid_size; /* all zeroes for tx */
382 u32 data_status;
383 u32 data_error; /* all zeroes for tx */
384
385 /* used in the driver */
386 struct sk_buff *skb;
387 dma_addr_t bus_addr;
388 struct spider_net_descr *next;
389 struct spider_net_descr *prev;
390} __attribute__((aligned(32)));
391
392struct spider_net_descr_chain {
393 /* we walk from tail to head */
394 struct spider_net_descr *head;
395 struct spider_net_descr *tail;
396};
397
398/* descriptor data_status bits */
399#define SPIDER_NET_RXIPCHK 29
400#define SPIDER_NET_TCPUDPIPCHK 28
401#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
402 1 << SPIDER_NET_TCPUDPIPCHK)
403
404#define SPIDER_NET_VLAN_PACKET 21
405
406/* descriptor data_error bits */
407#define SPIDER_NET_RXIPCHKERR 27
408#define SPIDER_NET_RXTCPCHKERR 26
409#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
410 1 << SPIDER_NET_RXTCPCHKERR)
411
412/* the cases we don't pass the packet to the stack */
413#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000
414
415#define SPIDER_NET_DESCR_SIZE 32
416
417/* this will be bigger some time */
418struct spider_net_options {
419 int rx_csum; /* for rx: if 0 ip_summed=NONE,
420 if 1 and hw has verified, ip_summed=UNNECESSARY */
421};
422
423#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
424 NETIF_MSG_PROBE | \
425 NETIF_MSG_LINK | \
426 NETIF_MSG_TIMER | \
427 NETIF_MSG_IFDOWN | \
428 NETIF_MSG_IFUP | \
429 NETIF_MSG_RX_ERR | \
430 NETIF_MSG_TX_ERR | \
431 NETIF_MSG_TX_QUEUED | \
432 NETIF_MSG_INTR | \
433 NETIF_MSG_TX_DONE | \
434 NETIF_MSG_RX_STATUS | \
435 NETIF_MSG_PKTDATA | \
436 NETIF_MSG_HW | \
437 NETIF_MSG_WOL )
438
439struct spider_net_card {
440 struct net_device *netdev;
441 struct pci_dev *pdev;
442 struct mii_phy phy;
443
444 void __iomem *regs;
445
446 struct spider_net_descr_chain tx_chain;
447 struct spider_net_descr_chain rx_chain;
448 spinlock_t chain_lock;
449
450 struct net_device_stats netdev_stats;
451
452 struct spider_net_options options;
453
454 spinlock_t intmask_lock;
455
456 struct work_struct tx_timeout_task;
457 atomic_t tx_timeout_task_counter;
458 wait_queue_head_t waitq;
459
460 /* for ethtool */
461 int msg_enable;
462
463 struct spider_net_descr descr[0];
464};
465
466#define pr_err(fmt,arg...) \
467 printk(KERN_ERR fmt ,##arg)
468
469#endif
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
new file mode 100644
index 000000000000..d42e60ba74ce
--- /dev/null
+++ b/drivers/net/spider_net_ethtool.c
@@ -0,0 +1,126 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/netdevice.h>
25#include <linux/ethtool.h>
26#include <linux/pci.h>
27
28#include "spider_net.h"
29
30static int
31spider_net_ethtool_get_settings(struct net_device *netdev,
32 struct ethtool_cmd *cmd)
33{
34 struct spider_net_card *card;
35 card = netdev_priv(netdev);
36
37 cmd->supported = (SUPPORTED_1000baseT_Full |
38 SUPPORTED_FIBRE);
39 cmd->advertising = (ADVERTISED_1000baseT_Full |
40 ADVERTISED_FIBRE);
41 cmd->port = PORT_FIBRE;
42 cmd->speed = card->phy.speed;
43 cmd->duplex = DUPLEX_FULL;
44
45 return 0;
46}
47
48static void
49spider_net_ethtool_get_drvinfo(struct net_device *netdev,
50 struct ethtool_drvinfo *drvinfo)
51{
52 struct spider_net_card *card;
53 card = netdev_priv(netdev);
54
55 /* clear and fill out info */
56 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
57 strncpy(drvinfo->driver, spider_net_driver_name, 32);
58 strncpy(drvinfo->version, "0.1", 32);
59 strcpy(drvinfo->fw_version, "no information");
60 strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
61}
62
63static void
64spider_net_ethtool_get_wol(struct net_device *netdev,
65 struct ethtool_wolinfo *wolinfo)
66{
67 /* no support for wol */
68 wolinfo->supported = 0;
69 wolinfo->wolopts = 0;
70}
71
72static u32
73spider_net_ethtool_get_msglevel(struct net_device *netdev)
74{
75 struct spider_net_card *card;
76 card = netdev_priv(netdev);
77 return card->msg_enable;
78}
79
80static void
81spider_net_ethtool_set_msglevel(struct net_device *netdev,
82 u32 level)
83{
84 struct spider_net_card *card;
85 card = netdev_priv(netdev);
86 card->msg_enable = level;
87}
88
89static int
90spider_net_ethtool_nway_reset(struct net_device *netdev)
91{
92 if (netif_running(netdev)) {
93 spider_net_stop(netdev);
94 spider_net_open(netdev);
95 }
96 return 0;
97}
98
99static u32
100spider_net_ethtool_get_rx_csum(struct net_device *netdev)
101{
102 struct spider_net_card *card = netdev->priv;
103
104 return card->options.rx_csum;
105}
106
107static int
108spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
109{
110 struct spider_net_card *card = netdev->priv;
111
112 card->options.rx_csum = n;
113 return 0;
114}
115
116struct ethtool_ops spider_net_ethtool_ops = {
117 .get_settings = spider_net_ethtool_get_settings,
118 .get_drvinfo = spider_net_ethtool_get_drvinfo,
119 .get_wol = spider_net_ethtool_get_wol,
120 .get_msglevel = spider_net_ethtool_get_msglevel,
121 .set_msglevel = spider_net_ethtool_set_msglevel,
122 .nway_reset = spider_net_ethtool_nway_reset,
123 .get_rx_csum = spider_net_ethtool_get_rx_csum,
124 .set_rx_csum = spider_net_ethtool_set_rx_csum,
125};
126
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1f43bbfbc1c7..5c8fcd40ef4d 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -162,7 +162,7 @@ struct lance_private {
162#define MEM lp->mem 162#define MEM lp->mem
163#define DREG lp->iobase[0] 163#define DREG lp->iobase[0]
164#define AREG lp->iobase[1] 164#define AREG lp->iobase[1]
165#define REGA(a) ( AREG = (a), DREG ) 165#define REGA(a) (*( AREG = (a), &DREG ))
166 166
167/* Definitions for the Lance */ 167/* Definitions for the Lance */
168 168
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 2608e7a3d214..3f67a42e8503 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -948,6 +948,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
948 u32 gem_status = readl(gp->regs + GREG_STAT); 948 u32 gem_status = readl(gp->regs + GREG_STAT);
949 949
950 if (gem_status == 0) { 950 if (gem_status == 0) {
951 netif_poll_enable(dev);
951 spin_unlock_irqrestore(&gp->lock, flags); 952 spin_unlock_irqrestore(&gp->lock, flags);
952 return IRQ_NONE; 953 return IRQ_NONE;
953 } 954 }
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 7143fd7cf3f8..ff8ae5f79970 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1020,7 +1020,7 @@ struct gem {
1020 1020
1021 struct gem_init_block *init_block; 1021 struct gem_init_block *init_block;
1022 struct sk_buff *rx_skbs[RX_RING_SIZE]; 1022 struct sk_buff *rx_skbs[RX_RING_SIZE];
1023 struct sk_buff *tx_skbs[RX_RING_SIZE]; 1023 struct sk_buff *tx_skbs[TX_RING_SIZE];
1024 dma_addr_t gblock_dvma; 1024 dma_addr_t gblock_dvma;
1025 1025
1026 struct pci_dev *pdev; 1026 struct pci_dev *pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6d4ab1e333b5..dc57352e5a97 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -36,6 +36,7 @@
36#include <linux/ip.h> 36#include <linux/ip.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/prefetch.h>
39 40
40#include <net/checksum.h> 41#include <net/checksum.h>
41 42
@@ -66,8 +67,8 @@
66 67
67#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
69#define DRV_MODULE_VERSION "3.37" 70#define DRV_MODULE_VERSION "3.39"
70#define DRV_MODULE_RELDATE "August 25, 2005" 71#define DRV_MODULE_RELDATE "September 5, 2005"
71 72
72#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -121,12 +122,9 @@
121 TG3_RX_RCB_RING_SIZE(tp)) 122 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 123#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE) 124 TG3_TX_RING_SIZE)
124#define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126#define TX_BUFFS_AVAIL(TP) \ 125#define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \ 126 ((TP)->tx_pending - \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \ 127 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 128#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 129
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 130#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -340,41 +338,92 @@ static struct {
340 338
341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 339static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342{ 340{
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 341 unsigned long flags;
344 spin_lock_bh(&tp->indirect_lock); 342
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 343 spin_lock_irqsave(&tp->indirect_lock, flags);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 344 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
347 spin_unlock_bh(&tp->indirect_lock); 345 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
348 } else { 346 spin_unlock_irqrestore(&tp->indirect_lock, flags);
349 writel(val, tp->regs + off); 347}
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 348
351 readl(tp->regs + off); 349static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
350{
351 writel(val, tp->regs + off);
352 readl(tp->regs + off);
353}
354
355static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
356{
357 unsigned long flags;
358 u32 val;
359
360 spin_lock_irqsave(&tp->indirect_lock, flags);
361 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
362 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
363 spin_unlock_irqrestore(&tp->indirect_lock, flags);
364 return val;
365}
366
367static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
368{
369 unsigned long flags;
370
371 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
372 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
373 TG3_64BIT_REG_LOW, val);
374 return;
352 } 375 }
376 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
377 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
378 TG3_64BIT_REG_LOW, val);
379 return;
380 }
381
382 spin_lock_irqsave(&tp->indirect_lock, flags);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
385 spin_unlock_irqrestore(&tp->indirect_lock, flags);
386
387 /* In indirect mode when disabling interrupts, we also need
388 * to clear the interrupt bit in the GRC local ctrl register.
389 */
390 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
391 (val == 0x1)) {
392 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
393 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
394 }
395}
396
397static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
398{
399 unsigned long flags;
400 u32 val;
401
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406 return val;
353} 407}
354 408
355static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 409static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356{ 410{
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 411 tp->write32(tp, off, val);
358 spin_lock_bh(&tp->indirect_lock); 412 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 413 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 414 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
361 spin_unlock_bh(&tp->indirect_lock); 415 tp->read32(tp, off); /* flush */
362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367} 416}
368 417
369static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val) 418static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
370{ 419{
371 void __iomem *mbox = tp->regs + off; 420 tp->write32_mbox(tp, off, val);
372 writel(val, mbox); 421 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 422 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
374 readl(mbox); 423 tp->read32_mbox(tp, off);
375} 424}
376 425
377static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 426static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378{ 427{
379 void __iomem *mbox = tp->regs + off; 428 void __iomem *mbox = tp->regs + off;
380 writel(val, mbox); 429 writel(val, mbox);
@@ -384,51 +433,63 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
384 readl(mbox); 433 readl(mbox);
385} 434}
386 435
387#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg)) 436static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
388#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val) 437{
389#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val) 438 writel(val, tp->regs + off);
439}
440
441static u32 tg3_read32(struct tg3 *tp, u32 off)
442{
443 return (readl(tp->regs + off));
444}
390 445
391#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val)) 446#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
447#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
448#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
449#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
450#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
451
452#define tw32(reg,val) tp->write32(tp, reg, val)
392#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val)) 453#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg)) 454#define tr32(reg) tp->read32(tp, reg)
394#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395#define tr32(reg) readl(tp->regs + (reg))
396#define tr16(reg) readw(tp->regs + (reg))
397#define tr8(reg) readb(tp->regs + (reg))
398 455
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 456static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{ 457{
401 spin_lock_bh(&tp->indirect_lock); 458 unsigned long flags;
459
460 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 461 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 462 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404 463
405 /* Always leave this as zero. */ 464 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 465 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407 spin_unlock_bh(&tp->indirect_lock); 466 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408} 467}
409 468
410static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 469static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411{ 470{
412 spin_lock_bh(&tp->indirect_lock); 471 unsigned long flags;
472
473 spin_lock_irqsave(&tp->indirect_lock, flags);
413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 475 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415 476
416 /* Always leave this as zero. */ 477 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418 spin_unlock_bh(&tp->indirect_lock); 479 spin_unlock_irqrestore(&tp->indirect_lock, flags);
419} 480}
420 481
421static void tg3_disable_ints(struct tg3 *tp) 482static void tg3_disable_ints(struct tg3 *tp)
422{ 483{
423 tw32(TG3PCI_MISC_HOST_CTRL, 484 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 485 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 486 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427} 487}
428 488
429static inline void tg3_cond_int(struct tg3 *tp) 489static inline void tg3_cond_int(struct tg3 *tp)
430{ 490{
431 if (tp->hw_status->status & SD_STATUS_UPDATED) 491 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
492 (tp->hw_status->status & SD_STATUS_UPDATED))
432 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 493 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433} 494}
434 495
@@ -439,9 +500,8 @@ static void tg3_enable_ints(struct tg3 *tp)
439 500
440 tw32(TG3PCI_MISC_HOST_CTRL, 501 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 502 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 503 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24)); 504 (tp->last_tag << 24));
444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445 tg3_cond_int(tp); 505 tg3_cond_int(tp);
446} 506}
447 507
@@ -472,8 +532,6 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
472 */ 532 */
473static void tg3_restart_ints(struct tg3 *tp) 533static void tg3_restart_ints(struct tg3 *tp)
474{ 534{
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 535 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478 tp->last_tag << 24); 536 tp->last_tag << 24);
479 mmiowb(); 537 mmiowb();
@@ -2821,9 +2879,13 @@ static void tg3_tx(struct tg3 *tp)
2821 2879
2822 tp->tx_cons = sw_idx; 2880 tp->tx_cons = sw_idx;
2823 2881
2824 if (netif_queue_stopped(tp->dev) && 2882 if (unlikely(netif_queue_stopped(tp->dev))) {
2825 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 2883 spin_lock(&tp->tx_lock);
2826 netif_wake_queue(tp->dev); 2884 if (netif_queue_stopped(tp->dev) &&
2885 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2886 netif_wake_queue(tp->dev);
2887 spin_unlock(&tp->tx_lock);
2888 }
2827} 2889}
2828 2890
2829/* Returns size of skb allocated or < 0 on error. 2891/* Returns size of skb allocated or < 0 on error.
@@ -3139,9 +3201,7 @@ static int tg3_poll(struct net_device *netdev, int *budget)
3139 3201
3140 /* run TX completion thread */ 3202 /* run TX completion thread */
3141 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3203 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3142 spin_lock(&tp->tx_lock);
3143 tg3_tx(tp); 3204 tg3_tx(tp);
3144 spin_unlock(&tp->tx_lock);
3145 } 3205 }
3146 3206
3147 /* run RX thread, within the bounds set by NAPI. 3207 /* run RX thread, within the bounds set by NAPI.
@@ -3161,18 +3221,17 @@ static int tg3_poll(struct net_device *netdev, int *budget)
3161 netdev->quota -= work_done; 3221 netdev->quota -= work_done;
3162 } 3222 }
3163 3223
3164 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 3224 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3165 tp->last_tag = sblk->status_tag; 3225 tp->last_tag = sblk->status_tag;
3166 rmb(); 3226 rmb();
3167 sblk->status &= ~SD_STATUS_UPDATED; 3227 } else
3228 sblk->status &= ~SD_STATUS_UPDATED;
3168 3229
3169 /* if no more work, tell net stack and NIC we're done */ 3230 /* if no more work, tell net stack and NIC we're done */
3170 done = !tg3_has_work(tp); 3231 done = !tg3_has_work(tp);
3171 if (done) { 3232 if (done) {
3172 spin_lock(&tp->lock);
3173 netif_rx_complete(netdev); 3233 netif_rx_complete(netdev);
3174 tg3_restart_ints(tp); 3234 tg3_restart_ints(tp);
3175 spin_unlock(&tp->lock);
3176 } 3235 }
3177 3236
3178 return (done ? 0 : 1); 3237 return (done ? 0 : 1);
@@ -3220,8 +3279,9 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3220{ 3279{
3221 struct net_device *dev = dev_id; 3280 struct net_device *dev = dev_id;
3222 struct tg3 *tp = netdev_priv(dev); 3281 struct tg3 *tp = netdev_priv(dev);
3223 struct tg3_hw_status *sblk = tp->hw_status;
3224 3282
3283 prefetch(tp->hw_status);
3284 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3225 /* 3285 /*
3226 * Writing any value to intr-mbox-0 clears PCI INTA# and 3286 * Writing any value to intr-mbox-0 clears PCI INTA# and
3227 * chip-internal interrupt pending events. 3287 * chip-internal interrupt pending events.
@@ -3230,19 +3290,9 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3230 * event coalescing. 3290 * event coalescing.
3231 */ 3291 */
3232 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 3292 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3233 tp->last_tag = sblk->status_tag; 3293 if (likely(!tg3_irq_sync(tp)))
3234 rmb();
3235 if (tg3_irq_sync(tp))
3236 goto out;
3237 sblk->status &= ~SD_STATUS_UPDATED;
3238 if (likely(tg3_has_work(tp)))
3239 netif_rx_schedule(dev); /* schedule NAPI poll */ 3294 netif_rx_schedule(dev); /* schedule NAPI poll */
3240 else { 3295
3241 /* No work, re-enable interrupts. */
3242 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3243 tp->last_tag << 24);
3244 }
3245out:
3246 return IRQ_RETVAL(1); 3296 return IRQ_RETVAL(1);
3247} 3297}
3248 3298
@@ -3272,15 +3322,15 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3272 if (tg3_irq_sync(tp)) 3322 if (tg3_irq_sync(tp))
3273 goto out; 3323 goto out;
3274 sblk->status &= ~SD_STATUS_UPDATED; 3324 sblk->status &= ~SD_STATUS_UPDATED;
3275 if (likely(tg3_has_work(tp))) 3325 if (likely(tg3_has_work(tp))) {
3326 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3276 netif_rx_schedule(dev); /* schedule NAPI poll */ 3327 netif_rx_schedule(dev); /* schedule NAPI poll */
3277 else { 3328 } else {
3278 /* No work, shared interrupt perhaps? re-enable 3329 /* No work, shared interrupt perhaps? re-enable
3279 * interrupts, and flush that PCI write 3330 * interrupts, and flush that PCI write
3280 */ 3331 */
3281 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3332 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3282 0x00000000); 3333 0x00000000);
3283 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3284 } 3334 }
3285 } else { /* shared interrupt */ 3335 } else { /* shared interrupt */
3286 handled = 0; 3336 handled = 0;
@@ -3301,7 +3351,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3301 * Reading the PCI State register will confirm whether the 3351 * Reading the PCI State register will confirm whether the
3302 * interrupt is ours and will flush the status block. 3352 * interrupt is ours and will flush the status block.
3303 */ 3353 */
3304 if ((sblk->status & SD_STATUS_UPDATED) || 3354 if ((sblk->status_tag != tp->last_tag) ||
3305 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 3355 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3306 /* 3356 /*
3307 * writing any value to intr-mbox-0 clears PCI INTA# and 3357 * writing any value to intr-mbox-0 clears PCI INTA# and
@@ -3312,20 +3362,17 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3312 */ 3362 */
3313 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3363 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3314 0x00000001); 3364 0x00000001);
3315 tp->last_tag = sblk->status_tag;
3316 rmb();
3317 if (tg3_irq_sync(tp)) 3365 if (tg3_irq_sync(tp))
3318 goto out; 3366 goto out;
3319 sblk->status &= ~SD_STATUS_UPDATED; 3367 if (netif_rx_schedule_prep(dev)) {
3320 if (likely(tg3_has_work(tp))) 3368 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3321 netif_rx_schedule(dev); /* schedule NAPI poll */ 3369 /* Update last_tag to mark that this status has been
3322 else { 3370 * seen. Because interrupt may be shared, we may be
3323 /* no work, shared interrupt perhaps? re-enable 3371 * racing with tg3_poll(), so only update last_tag
3324 * interrupts, and flush that PCI write 3372 * if tg3_poll() is not scheduled.
3325 */ 3373 */
3326 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3374 tp->last_tag = sblk->status_tag;
3327 tp->last_tag << 24); 3375 __netif_rx_schedule(dev);
3328 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3329 } 3376 }
3330 } else { /* shared interrupt */ 3377 } else { /* shared interrupt */
3331 handled = 0; 3378 handled = 0;
@@ -3659,8 +3706,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3659 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3706 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3660 3707
3661 tp->tx_prod = entry; 3708 tp->tx_prod = entry;
3662 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) 3709 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3663 netif_stop_queue(dev); 3710 netif_stop_queue(dev);
3711 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3712 netif_wake_queue(tp->dev);
3713 }
3664 3714
3665out_unlock: 3715out_unlock:
3666 mmiowb(); 3716 mmiowb();
@@ -4216,7 +4266,7 @@ static void tg3_stop_fw(struct tg3 *);
4216static int tg3_chip_reset(struct tg3 *tp) 4266static int tg3_chip_reset(struct tg3 *tp)
4217{ 4267{
4218 u32 val; 4268 u32 val;
4219 u32 flags_save; 4269 void (*write_op)(struct tg3 *, u32, u32);
4220 int i; 4270 int i;
4221 4271
4222 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4272 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
@@ -4228,8 +4278,9 @@ static int tg3_chip_reset(struct tg3 *tp)
4228 * fun things. So, temporarily disable the 5701 4278 * fun things. So, temporarily disable the 5701
4229 * hardware workaround, while we do the reset. 4279 * hardware workaround, while we do the reset.
4230 */ 4280 */
4231 flags_save = tp->tg3_flags; 4281 write_op = tp->write32;
4232 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG; 4282 if (write_op == tg3_write_flush_reg32)
4283 tp->write32 = tg3_write32;
4233 4284
4234 /* do the reset */ 4285 /* do the reset */
4235 val = GRC_MISC_CFG_CORECLK_RESET; 4286 val = GRC_MISC_CFG_CORECLK_RESET;
@@ -4248,8 +4299,8 @@ static int tg3_chip_reset(struct tg3 *tp)
4248 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 4299 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4249 tw32(GRC_MISC_CFG, val); 4300 tw32(GRC_MISC_CFG, val);
4250 4301
4251 /* restore 5701 hardware bug workaround flag */ 4302 /* restore 5701 hardware bug workaround write method */
4252 tp->tg3_flags = flags_save; 4303 tp->write32 = write_op;
4253 4304
4254 /* Unfortunately, we have to delay before the PCI read back. 4305 /* Unfortunately, we have to delay before the PCI read back.
4255 * Some 575X chips even will not respond to a PCI cfg access 4306 * Some 575X chips even will not respond to a PCI cfg access
@@ -4635,7 +4686,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4635 int cpu_scratch_size, struct fw_info *info) 4686 int cpu_scratch_size, struct fw_info *info)
4636{ 4687{
4637 int err, i; 4688 int err, i;
4638 u32 orig_tg3_flags = tp->tg3_flags;
4639 void (*write_op)(struct tg3 *, u32, u32); 4689 void (*write_op)(struct tg3 *, u32, u32);
4640 4690
4641 if (cpu_base == TX_CPU_BASE && 4691 if (cpu_base == TX_CPU_BASE &&
@@ -4651,11 +4701,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4651 else 4701 else
4652 write_op = tg3_write_indirect_reg32; 4702 write_op = tg3_write_indirect_reg32;
4653 4703
4654 /* Force use of PCI config space for indirect register
4655 * write calls.
4656 */
4657 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4658
4659 /* It is possible that bootcode is still loading at this point. 4704 /* It is possible that bootcode is still loading at this point.
4660 * Get the nvram lock first before halting the cpu. 4705 * Get the nvram lock first before halting the cpu.
4661 */ 4706 */
@@ -4691,7 +4736,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4691 err = 0; 4736 err = 0;
4692 4737
4693out: 4738out:
4694 tp->tg3_flags = orig_tg3_flags;
4695 return err; 4739 return err;
4696} 4740}
4697 4741
@@ -5808,8 +5852,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5808 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 5852 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5809 udelay(100); 5853 udelay(100);
5810 5854
5811 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5855 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5812 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5813 tp->last_tag = 0; 5856 tp->last_tag = 0;
5814 5857
5815 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5858 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
@@ -5910,7 +5953,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5910 tw32(MAC_LED_CTRL, tp->led_ctrl); 5953 tw32(MAC_LED_CTRL, tp->led_ctrl);
5911 5954
5912 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 5955 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5913 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 5956 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
5914 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 5957 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5915 udelay(10); 5958 udelay(10);
5916 } 5959 }
@@ -6192,13 +6235,15 @@ static int tg3_test_interrupt(struct tg3 *tp)
6192 if (err) 6235 if (err)
6193 return err; 6236 return err;
6194 6237
6238 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6195 tg3_enable_ints(tp); 6239 tg3_enable_ints(tp);
6196 6240
6197 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 6241 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6198 HOSTCC_MODE_NOW); 6242 HOSTCC_MODE_NOW);
6199 6243
6200 for (i = 0; i < 5; i++) { 6244 for (i = 0; i < 5; i++) {
6201 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 6245 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6246 TG3_64BIT_REG_LOW);
6202 if (int_mbox != 0) 6247 if (int_mbox != 0)
6203 break; 6248 break;
6204 msleep(10); 6249 msleep(10);
@@ -6598,10 +6643,10 @@ static int tg3_open(struct net_device *dev)
6598 6643
6599 /* Mailboxes */ 6644 /* Mailboxes */
6600 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", 6645 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6601 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), 6646 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6602 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), 6647 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6603 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), 6648 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6604 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); 6649 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6605 6650
6606 /* NIC side send descriptors. */ 6651 /* NIC side send descriptors. */
6607 for (i = 0; i < 6; i++) { 6652 for (i = 0; i < 6; i++) {
@@ -7506,6 +7551,38 @@ static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7506 } 7551 }
7507} 7552}
7508 7553
7554static int tg3_phys_id(struct net_device *dev, u32 data)
7555{
7556 struct tg3 *tp = netdev_priv(dev);
7557 int i;
7558
7559 if (!netif_running(tp->dev))
7560 return -EAGAIN;
7561
7562 if (data == 0)
7563 data = 2;
7564
7565 for (i = 0; i < (data * 2); i++) {
7566 if ((i % 2) == 0)
7567 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7568 LED_CTRL_1000MBPS_ON |
7569 LED_CTRL_100MBPS_ON |
7570 LED_CTRL_10MBPS_ON |
7571 LED_CTRL_TRAFFIC_OVERRIDE |
7572 LED_CTRL_TRAFFIC_BLINK |
7573 LED_CTRL_TRAFFIC_LED);
7574
7575 else
7576 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7577 LED_CTRL_TRAFFIC_OVERRIDE);
7578
7579 if (msleep_interruptible(500))
7580 break;
7581 }
7582 tw32(MAC_LED_CTRL, tp->led_ctrl);
7583 return 0;
7584}
7585
7509static void tg3_get_ethtool_stats (struct net_device *dev, 7586static void tg3_get_ethtool_stats (struct net_device *dev,
7510 struct ethtool_stats *estats, u64 *tmp_stats) 7587 struct ethtool_stats *estats, u64 *tmp_stats)
7511{ 7588{
@@ -7565,7 +7642,7 @@ static int tg3_test_link(struct tg3 *tp)
7565 if (!netif_running(tp->dev)) 7642 if (!netif_running(tp->dev))
7566 return -ENODEV; 7643 return -ENODEV;
7567 7644
7568 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7645 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7569 max = TG3_SERDES_TIMEOUT_SEC; 7646 max = TG3_SERDES_TIMEOUT_SEC;
7570 else 7647 else
7571 max = TG3_COPPER_TIMEOUT_SEC; 7648 max = TG3_COPPER_TIMEOUT_SEC;
@@ -7850,9 +7927,12 @@ static int tg3_test_memory(struct tg3 *tp)
7850 return err; 7927 return err;
7851} 7928}
7852 7929
7853static int tg3_test_loopback(struct tg3 *tp) 7930#define TG3_MAC_LOOPBACK 0
7931#define TG3_PHY_LOOPBACK 1
7932
7933static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7854{ 7934{
7855 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key; 7935 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7856 u32 desc_idx; 7936 u32 desc_idx;
7857 struct sk_buff *skb, *rx_skb; 7937 struct sk_buff *skb, *rx_skb;
7858 u8 *tx_data; 7938 u8 *tx_data;
@@ -7860,18 +7940,26 @@ static int tg3_test_loopback(struct tg3 *tp)
7860 int num_pkts, tx_len, rx_len, i, err; 7940 int num_pkts, tx_len, rx_len, i, err;
7861 struct tg3_rx_buffer_desc *desc; 7941 struct tg3_rx_buffer_desc *desc;
7862 7942
7863 if (!netif_running(tp->dev)) 7943 if (loopback_mode == TG3_MAC_LOOPBACK) {
7864 return -ENODEV; 7944 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7945 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7946 MAC_MODE_PORT_MODE_GMII;
7947 tw32(MAC_MODE, mac_mode);
7948 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7949 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7950 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7951 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7952 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7953 tw32(MAC_MODE, mac_mode);
7954
7955 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7956 BMCR_SPEED1000);
7957 }
7958 else
7959 return -EINVAL;
7865 7960
7866 err = -EIO; 7961 err = -EIO;
7867 7962
7868 tg3_reset_hw(tp);
7869
7870 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7871 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7872 MAC_MODE_PORT_MODE_GMII;
7873 tw32(MAC_MODE, mac_mode);
7874
7875 tx_len = 1514; 7963 tx_len = 1514;
7876 skb = dev_alloc_skb(tx_len); 7964 skb = dev_alloc_skb(tx_len);
7877 tx_data = skb_put(skb, tx_len); 7965 tx_data = skb_put(skb, tx_len);
@@ -7892,16 +7980,16 @@ static int tg3_test_loopback(struct tg3 *tp)
7892 7980
7893 rx_start_idx = tp->hw_status->idx[0].rx_producer; 7981 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7894 7982
7895 send_idx = 0;
7896 num_pkts = 0; 7983 num_pkts = 0;
7897 7984
7898 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1); 7985 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
7899 7986
7900 send_idx++; 7987 tp->tx_prod++;
7901 num_pkts++; 7988 num_pkts++;
7902 7989
7903 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); 7990 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
7904 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); 7991 tp->tx_prod);
7992 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7905 7993
7906 udelay(10); 7994 udelay(10);
7907 7995
@@ -7913,7 +8001,7 @@ static int tg3_test_loopback(struct tg3 *tp)
7913 8001
7914 tx_idx = tp->hw_status->idx[0].tx_consumer; 8002 tx_idx = tp->hw_status->idx[0].tx_consumer;
7915 rx_idx = tp->hw_status->idx[0].rx_producer; 8003 rx_idx = tp->hw_status->idx[0].rx_producer;
7916 if ((tx_idx == send_idx) && 8004 if ((tx_idx == tp->tx_prod) &&
7917 (rx_idx == (rx_start_idx + num_pkts))) 8005 (rx_idx == (rx_start_idx + num_pkts)))
7918 break; 8006 break;
7919 } 8007 }
@@ -7921,7 +8009,7 @@ static int tg3_test_loopback(struct tg3 *tp)
7921 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 8009 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7922 dev_kfree_skb(skb); 8010 dev_kfree_skb(skb);
7923 8011
7924 if (tx_idx != send_idx) 8012 if (tx_idx != tp->tx_prod)
7925 goto out; 8013 goto out;
7926 8014
7927 if (rx_idx != rx_start_idx + num_pkts) 8015 if (rx_idx != rx_start_idx + num_pkts)
@@ -7957,6 +8045,30 @@ out:
7957 return err; 8045 return err;
7958} 8046}
7959 8047
8048#define TG3_MAC_LOOPBACK_FAILED 1
8049#define TG3_PHY_LOOPBACK_FAILED 2
8050#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8051 TG3_PHY_LOOPBACK_FAILED)
8052
8053static int tg3_test_loopback(struct tg3 *tp)
8054{
8055 int err = 0;
8056
8057 if (!netif_running(tp->dev))
8058 return TG3_LOOPBACK_FAILED;
8059
8060 tg3_reset_hw(tp);
8061
8062 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8063 err |= TG3_MAC_LOOPBACK_FAILED;
8064 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8065 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8066 err |= TG3_PHY_LOOPBACK_FAILED;
8067 }
8068
8069 return err;
8070}
8071
7960static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 8072static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7961 u64 *data) 8073 u64 *data)
7962{ 8074{
@@ -7997,10 +8109,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7997 etest->flags |= ETH_TEST_FL_FAILED; 8109 etest->flags |= ETH_TEST_FL_FAILED;
7998 data[3] = 1; 8110 data[3] = 1;
7999 } 8111 }
8000 if (tg3_test_loopback(tp) != 0) { 8112 if ((data[4] = tg3_test_loopback(tp)) != 0)
8001 etest->flags |= ETH_TEST_FL_FAILED; 8113 etest->flags |= ETH_TEST_FL_FAILED;
8002 data[4] = 1;
8003 }
8004 8114
8005 tg3_full_unlock(tp); 8115 tg3_full_unlock(tp);
8006 8116
@@ -8188,6 +8298,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
8188 .self_test_count = tg3_get_test_count, 8298 .self_test_count = tg3_get_test_count,
8189 .self_test = tg3_self_test, 8299 .self_test = tg3_self_test,
8190 .get_strings = tg3_get_strings, 8300 .get_strings = tg3_get_strings,
8301 .phys_id = tg3_phys_id,
8191 .get_stats_count = tg3_get_stats_count, 8302 .get_stats_count = tg3_get_stats_count,
8192 .get_ethtool_stats = tg3_get_ethtool_stats, 8303 .get_ethtool_stats = tg3_get_ethtool_stats,
8193 .get_coalesce = tg3_get_coalesce, 8304 .get_coalesce = tg3_get_coalesce,
@@ -8252,7 +8363,8 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8252 tw32(NVRAM_CFG1, nvcfg1); 8363 tw32(NVRAM_CFG1, nvcfg1);
8253 } 8364 }
8254 8365
8255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { 8366 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8367 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) {
8256 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 8368 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8257 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 8369 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8258 tp->nvram_jedecnum = JEDEC_ATMEL; 8370 tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -8666,8 +8778,9 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8666 if (i == (len - 4)) 8778 if (i == (len - 4))
8667 nvram_cmd |= NVRAM_CMD_LAST; 8779 nvram_cmd |= NVRAM_CMD_LAST;
8668 8780
8669 if ((tp->nvram_jedecnum == JEDEC_ST) && 8781 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8670 (nvram_cmd & NVRAM_CMD_FIRST)) { 8782 (tp->nvram_jedecnum == JEDEC_ST) &&
8783 (nvram_cmd & NVRAM_CMD_FIRST)) {
8671 8784
8672 if ((ret = tg3_nvram_exec_cmd(tp, 8785 if ((ret = tg3_nvram_exec_cmd(tp,
8673 NVRAM_CMD_WREN | NVRAM_CMD_GO | 8786 NVRAM_CMD_WREN | NVRAM_CMD_GO |
@@ -9153,14 +9266,6 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9153static int __devinit tg3_get_invariants(struct tg3 *tp) 9266static int __devinit tg3_get_invariants(struct tg3 *tp)
9154{ 9267{
9155 static struct pci_device_id write_reorder_chipsets[] = { 9268 static struct pci_device_id write_reorder_chipsets[] = {
9156 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9157 PCI_DEVICE_ID_INTEL_82801AA_8) },
9158 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159 PCI_DEVICE_ID_INTEL_82801AB_8) },
9160 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9161 PCI_DEVICE_ID_INTEL_82801BA_11) },
9162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163 PCI_DEVICE_ID_INTEL_82801BA_6) },
9164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9269 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9165 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9270 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9166 { }, 9271 { },
@@ -9177,7 +9282,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9177 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9282 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9178#endif 9283#endif
9179 9284
9180 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write 9285 /* If we have an AMD 762 chipset, write
9181 * reordering to the mailbox registers done by the host 9286 * reordering to the mailbox registers done by the host
9182 * controller can cause major troubles. We read back from 9287 * controller can cause major troubles. We read back from
9183 * every mailbox register write to force the writes to be 9288 * every mailbox register write to force the writes to be
@@ -9215,6 +9320,69 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9215 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 9320 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9216 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 9321 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9217 9322
9323 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9324 * we need to disable memory and use config. cycles
9325 * only to access all registers. The 5702/03 chips
9326 * can mistakenly decode the special cycles from the
9327 * ICH chipsets as memory write cycles, causing corruption
9328 * of register and memory space. Only certain ICH bridges
9329 * will drive special cycles with non-zero data during the
9330 * address phase which can fall within the 5703's address
9331 * range. This is not an ICH bug as the PCI spec allows
9332 * non-zero address during special cycles. However, only
9333 * these ICH bridges are known to drive non-zero addresses
9334 * during special cycles.
9335 *
9336 * Since special cycles do not cross PCI bridges, we only
9337 * enable this workaround if the 5703 is on the secondary
9338 * bus of these ICH bridges.
9339 */
9340 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9341 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9342 static struct tg3_dev_id {
9343 u32 vendor;
9344 u32 device;
9345 u32 rev;
9346 } ich_chipsets[] = {
9347 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9348 PCI_ANY_ID },
9349 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9350 PCI_ANY_ID },
9351 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9352 0xa },
9353 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9354 PCI_ANY_ID },
9355 { },
9356 };
9357 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9358 struct pci_dev *bridge = NULL;
9359
9360 while (pci_id->vendor != 0) {
9361 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9362 bridge);
9363 if (!bridge) {
9364 pci_id++;
9365 continue;
9366 }
9367 if (pci_id->rev != PCI_ANY_ID) {
9368 u8 rev;
9369
9370 pci_read_config_byte(bridge, PCI_REVISION_ID,
9371 &rev);
9372 if (rev > pci_id->rev)
9373 continue;
9374 }
9375 if (bridge->subordinate &&
9376 (bridge->subordinate->number ==
9377 tp->pdev->bus->number)) {
9378
9379 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9380 pci_dev_put(bridge);
9381 break;
9382 }
9383 }
9384 }
9385
9218 /* Find msi capability. */ 9386 /* Find msi capability. */
9219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9220 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9388 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
@@ -9302,6 +9470,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9302 } 9470 }
9303 } 9471 }
9304 9472
9473 /* 5700 BX chips need to have their TX producer index mailboxes
9474 * written twice to workaround a bug.
9475 */
9476 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9477 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9478
9305 /* Back to back register writes can cause problems on this chip, 9479 /* Back to back register writes can cause problems on this chip,
9306 * the workaround is to read back all reg writes except those to 9480 * the workaround is to read back all reg writes except those to
9307 * mailbox regs. See tg3_write_indirect_reg32(). 9481 * mailbox regs. See tg3_write_indirect_reg32().
@@ -9325,6 +9499,43 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9325 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 9499 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9326 } 9500 }
9327 9501
9502 /* Default fast path register access methods */
9503 tp->read32 = tg3_read32;
9504 tp->write32 = tg3_write32;
9505 tp->read32_mbox = tg3_read32;
9506 tp->write32_mbox = tg3_write32;
9507 tp->write32_tx_mbox = tg3_write32;
9508 tp->write32_rx_mbox = tg3_write32;
9509
9510 /* Various workaround register access methods */
9511 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9512 tp->write32 = tg3_write_indirect_reg32;
9513 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9514 tp->write32 = tg3_write_flush_reg32;
9515
9516 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9517 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9518 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9519 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9520 tp->write32_rx_mbox = tg3_write_flush_reg32;
9521 }
9522
9523 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9524 tp->read32 = tg3_read_indirect_reg32;
9525 tp->write32 = tg3_write_indirect_reg32;
9526 tp->read32_mbox = tg3_read_indirect_mbox;
9527 tp->write32_mbox = tg3_write_indirect_mbox;
9528 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9529 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9530
9531 iounmap(tp->regs);
9532 tp->regs = 0;
9533
9534 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9535 pci_cmd &= ~PCI_COMMAND_MEMORY;
9536 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9537 }
9538
9328 /* Get eeprom hw config before calling tg3_set_power_state(). 9539 /* Get eeprom hw config before calling tg3_set_power_state().
9329 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be 9540 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9330 * determined before calling tg3_set_power_state() so that 9541 * determined before calling tg3_set_power_state() so that
@@ -9539,14 +9750,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9539 else 9750 else
9540 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 9751 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9541 9752
9542 /* 5700 BX chips need to have their TX producer index mailboxes
9543 * written twice to workaround a bug.
9544 */
9545 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9546 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9547 else
9548 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9549
9550 /* It seems all chips can get confused if TX buffers 9753 /* It seems all chips can get confused if TX buffers
9551 * straddle the 4GB address boundary in some cases. 9754 * straddle the 4GB address boundary in some cases.
9552 */ 9755 */
@@ -10469,7 +10672,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10469 return 0; 10672 return 0;
10470 10673
10471err_out_iounmap: 10674err_out_iounmap:
10472 iounmap(tp->regs); 10675 if (tp->regs) {
10676 iounmap(tp->regs);
10677 tp->regs = 0;
10678 }
10473 10679
10474err_out_free_dev: 10680err_out_free_dev:
10475 free_netdev(dev); 10681 free_netdev(dev);
@@ -10491,7 +10697,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10491 struct tg3 *tp = netdev_priv(dev); 10697 struct tg3 *tp = netdev_priv(dev);
10492 10698
10493 unregister_netdev(dev); 10699 unregister_netdev(dev);
10494 iounmap(tp->regs); 10700 if (tp->regs) {
10701 iounmap(tp->regs);
10702 tp->regs = 0;
10703 }
10495 free_netdev(dev); 10704 free_netdev(dev);
10496 pci_release_regions(pdev); 10705 pci_release_regions(pdev);
10497 pci_disable_device(pdev); 10706 pci_disable_device(pdev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5c4433c147fa..c184b773e585 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2049,6 +2049,11 @@ struct tg3 {
2049 spinlock_t lock; 2049 spinlock_t lock;
2050 spinlock_t indirect_lock; 2050 spinlock_t indirect_lock;
2051 2051
2052 u32 (*read32) (struct tg3 *, u32);
2053 void (*write32) (struct tg3 *, u32, u32);
2054 u32 (*read32_mbox) (struct tg3 *, u32);
2055 void (*write32_mbox) (struct tg3 *, u32,
2056 u32);
2052 void __iomem *regs; 2057 void __iomem *regs;
2053 struct net_device *dev; 2058 struct net_device *dev;
2054 struct pci_dev *pdev; 2059 struct pci_dev *pdev;
@@ -2060,6 +2065,8 @@ struct tg3 {
2060 u32 msg_enable; 2065 u32 msg_enable;
2061 2066
2062 /* begin "tx thread" cacheline section */ 2067 /* begin "tx thread" cacheline section */
2068 void (*write32_tx_mbox) (struct tg3 *, u32,
2069 u32);
2063 u32 tx_prod; 2070 u32 tx_prod;
2064 u32 tx_cons; 2071 u32 tx_cons;
2065 u32 tx_pending; 2072 u32 tx_pending;
@@ -2071,6 +2078,8 @@ struct tg3 {
2071 dma_addr_t tx_desc_mapping; 2078 dma_addr_t tx_desc_mapping;
2072 2079
2073 /* begin "rx thread" cacheline section */ 2080 /* begin "rx thread" cacheline section */
2081 void (*write32_rx_mbox) (struct tg3 *, u32,
2082 u32);
2074 u32 rx_rcb_ptr; 2083 u32 rx_rcb_ptr;
2075 u32 rx_std_ptr; 2084 u32 rx_std_ptr;
2076 u32 rx_jumbo_ptr; 2085 u32 rx_jumbo_ptr;
@@ -2165,6 +2174,7 @@ struct tg3 {
2165#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ 2174#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \
2166 TG3_FLG2_MII_SERDES) 2175 TG3_FLG2_MII_SERDES)
2167#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2168 2178
2169 u32 split_mode_max_reqs; 2179 u32 split_mode_max_reqs;
2170#define SPLIT_MODE_5704_MAX_REQ 3 2180#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index e2cdaf876201..8c9634a98c11 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -135,6 +135,18 @@ config DM9102
135 <file:Documentation/networking/net-modules.txt>. The module will 135 <file:Documentation/networking/net-modules.txt>. The module will
136 be called dmfe. 136 be called dmfe.
137 137
138config ULI526X
139 tristate "ULi M526x controller support"
140 depends on NET_TULIP && PCI
141 select CRC32
142 ---help---
143 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
144 (<http://www.uli.com.tw/>).
145
146 To compile this driver as a module, choose M here and read
147 <file:Documentation/networking/net-modules.txt>. The module will
148 be called uli526x.
149
138config PCMCIA_XIRCOM 150config PCMCIA_XIRCOM
139 tristate "Xircom CardBus support (new driver)" 151 tristate "Xircom CardBus support (new driver)"
140 depends on NET_TULIP && CARDBUS 152 depends on NET_TULIP && CARDBUS
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 8bb9b4683979..451090d6fcca 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840) += winbond-840.o
9obj-$(CONFIG_DE2104X) += de2104x.o 9obj-$(CONFIG_DE2104X) += de2104x.o
10obj-$(CONFIG_TULIP) += tulip.o 10obj-$(CONFIG_TULIP) += tulip.o
11obj-$(CONFIG_DE4X5) += de4x5.o 11obj-$(CONFIG_DE4X5) += de4x5.o
12obj-$(CONFIG_ULI526X) += uli526x.o
12 13
13# Declare multi-part drivers. 14# Declare multi-part drivers.
14 15
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index fc353e348f9a..a22d00198e4d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1934,7 +1934,7 @@ static int __init de_init_one (struct pci_dev *pdev,
1934 struct de_private *de; 1934 struct de_private *de;
1935 int rc; 1935 int rc;
1936 void __iomem *regs; 1936 void __iomem *regs;
1937 long pciaddr; 1937 unsigned long pciaddr;
1938 static int board_idx = -1; 1938 static int board_idx = -1;
1939 1939
1940 board_idx++; 1940 board_idx++;
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e26c31f944bf..f53396fe79c9 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
81 return retval & 0xffff; 81 return retval & 0xffff;
82 } 82 }
83 83
84 if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
85 int value;
86 int i = 1000;
87
88 value = ioread32(ioaddr + CSR9);
89 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
90
91 value = (phy_id << 21) | (location << 16) | 0x08000000;
92 iowrite32(value, ioaddr + CSR10);
93
94 while(--i > 0) {
95 mdio_delay();
96 if(ioread32(ioaddr + CSR10) & 0x10000000)
97 break;
98 }
99 retval = ioread32(ioaddr + CSR10);
100 spin_unlock_irqrestore(&tp->mii_lock, flags);
101 return retval & 0xFFFF;
102 }
103 /* Establish sync by sending at least 32 logic ones. */ 84 /* Establish sync by sending at least 32 logic ones. */
104 for (i = 32; i >= 0; i--) { 85 for (i = 32; i >= 0; i--) {
105 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); 86 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
159 spin_unlock_irqrestore(&tp->mii_lock, flags); 140 spin_unlock_irqrestore(&tp->mii_lock, flags);
160 return; 141 return;
161 } 142 }
162 if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
163 int value;
164 int i = 1000;
165
166 value = ioread32(ioaddr + CSR9);
167 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
168
169 value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
170 iowrite32(value, ioaddr + CSR10);
171
172 while(--i > 0) {
173 if (ioread32(ioaddr + CSR10) & 0x10000000)
174 break;
175 }
176 spin_unlock_irqrestore(&tp->mii_lock, flags);
177 return;
178 }
179 143
180 /* Establish sync by sending 32 logic ones. */ 144 /* Establish sync by sending 32 logic ones. */
181 for (i = 32; i >= 0; i--) { 145 for (i = 32; i >= 0; i--) {
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 691568283553..e058a9fbfe88 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
39 case MX98713: 39 case MX98713:
40 case COMPEX9881: 40 case COMPEX9881:
41 case DM910X: 41 case DM910X:
42 case ULI526X:
43 default: { 42 default: {
44 struct medialeaf *mleaf; 43 struct medialeaf *mleaf;
45 unsigned char *p; 44 unsigned char *p;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 20346d847d9e..05d2d96f7be2 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -88,7 +88,6 @@ enum chips {
88 I21145, 88 I21145,
89 DM910X, 89 DM910X,
90 CONEXANT, 90 CONEXANT,
91 ULI526X
92}; 91};
93 92
94 93
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
482 481
483static inline void tulip_restart_rxtx(struct tulip_private *tp) 482static inline void tulip_restart_rxtx(struct tulip_private *tp)
484{ 483{
485 if(!(tp->chip_id == ULI526X && 484 tulip_stop_rxtx(tp);
486 (tp->revision == 0x40 || tp->revision == 0x50))) { 485 udelay(5);
487 tulip_stop_rxtx(tp);
488 udelay(5);
489 }
490 tulip_start_rxtx(tp); 486 tulip_start_rxtx(tp);
491} 487}
492 488
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d45d8f56e5b4..6266a9a7e6e3 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
199 { "Conexant LANfinity", 256, 0x0001ebef, 199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer }, 200 HAS_MII | HAS_ACPI, tulip_timer },
201 201
202 /* ULi526X */
203 { "ULi M5261/M5263", 128, 0x0001ebef,
204 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
205}; 202};
206 203
207 204
@@ -239,10 +236,9 @@ static struct pci_device_id tulip_pci_tbl[] = {
239 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
243 { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
244 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
245 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
241 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { } /* terminate list */ 242 { } /* terminate list */
247}; 243};
248MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); 244MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
@@ -522,7 +518,7 @@ static void tulip_tx_timeout(struct net_device *dev)
522 dev->name); 518 dev->name);
523 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 519 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
524 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 520 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
525 || tp->chip_id == DM910X || tp->chip_id == ULI526X) { 521 || tp->chip_id == DM910X) {
526 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 522 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
527 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 523 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
528 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 524 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1099,16 @@ static void set_rx_mode(struct net_device *dev)
1103 entry = tp->cur_tx++ % TX_RING_SIZE; 1099 entry = tp->cur_tx++ % TX_RING_SIZE;
1104 1100
1105 if (entry != 0) { 1101 if (entry != 0) {
1106 /* Avoid a chip errata by prefixing a dummy entry. Don't do 1102 /* Avoid a chip errata by prefixing a dummy entry. */
1107 this on the ULI526X as it triggers a different problem */ 1103 tp->tx_buffers[entry].skb = NULL;
1108 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) { 1104 tp->tx_buffers[entry].mapping = 0;
1109 tp->tx_buffers[entry].skb = NULL; 1105 tp->tx_ring[entry].length =
1110 tp->tx_buffers[entry].mapping = 0; 1106 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1111 tp->tx_ring[entry].length = 1107 tp->tx_ring[entry].buffer1 = 0;
1112 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; 1108 /* Must set DescOwned later to avoid race with chip */
1113 tp->tx_ring[entry].buffer1 = 0; 1109 dummy = entry;
1114 /* Must set DescOwned later to avoid race with chip */ 1110 entry = tp->cur_tx++ % TX_RING_SIZE;
1115 dummy = entry; 1111
1116 entry = tp->cur_tx++ % TX_RING_SIZE;
1117 }
1118 } 1112 }
1119 1113
1120 tp->tx_buffers[entry].skb = NULL; 1114 tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1229,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1235{ 1229{
1236 if (pdev->vendor == 0x1282 && pdev->device == 0x9102) 1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1237 return 1; 1231 return 1;
1238 if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
1239 return 1;
1240 if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
1241 return 1;
1242 return 0; 1232 return 0;
1243} 1233}
1244 1234
@@ -1680,7 +1670,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1680 switch (chip_idx) { 1670 switch (chip_idx) {
1681 case DC21140: 1671 case DC21140:
1682 case DM910X: 1672 case DM910X:
1683 case ULI526X:
1684 default: 1673 default:
1685 if (tp->mtable) 1674 if (tp->mtable)
1686 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); 1675 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644
index 000000000000..1a4316336256
--- /dev/null
+++ b/drivers/net/tulip/uli526x.c
@@ -0,0 +1,1749 @@
1/*
2 This program is free software; you can redistribute it and/or
3 modify it under the terms of the GNU General Public License
4 as published by the Free Software Foundation; either version 2
5 of the License, or (at your option) any later version.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12
13*/
14
15#define DRV_NAME "uli526x"
16#define DRV_VERSION "0.9.3"
17#define DRV_RELDATE "2005-7-29"
18
19#include <linux/module.h>
20
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/timer.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ethtool.h>
33#include <linux/skbuff.h>
34#include <linux/delay.h>
35#include <linux/spinlock.h>
36#include <linux/dma-mapping.h>
37
38#include <asm/processor.h>
39#include <asm/bitops.h>
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/uaccess.h>
43
44
45/* Board/System/Debug information/definition ---------------- */
46#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
47#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/
48
49#define ULI526X_IO_SIZE 0x100
50#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */
51#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */
52#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
53#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
54#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
55#define TX_BUF_ALLOC 0x600
56#define RX_ALLOC_SIZE 0x620
57#define ULI526X_RESET 1
58#define CR0_DEFAULT 0
59#define CR6_DEFAULT 0x22200000
60#define CR7_DEFAULT 0x180c1
61#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
62#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
63#define MAX_PACKET_SIZE 1514
64#define ULI5261_MAX_MULTICAST 14
65#define RX_COPY_SIZE 100
66#define MAX_CHECK_PACKET 0x8000
67
68#define ULI526X_10MHF 0
69#define ULI526X_100MHF 1
70#define ULI526X_10MFD 4
71#define ULI526X_100MFD 5
72#define ULI526X_AUTO 8
73
74#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */
75#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */
76#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */
77#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */
78#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */
79#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */
80
81#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
82#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
83#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
84
85#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
86
87#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
88
89
90/* CR9 definition: SROM/MII */
91#define CR9_SROM_READ 0x4800
92#define CR9_SRCS 0x1
93#define CR9_SRCLK 0x2
94#define CR9_CRDOUT 0x8
95#define SROM_DATA_0 0x0
96#define SROM_DATA_1 0x4
97#define PHY_DATA_1 0x20000
98#define PHY_DATA_0 0x00000
99#define MDCLKH 0x10000
100
101#define PHY_POWER_DOWN 0x800
102
103#define SROM_V41_CODE 0x14
104
105#define SROM_CLK_WRITE(data, ioaddr) \
106 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
107 udelay(5); \
108 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
109 udelay(5); \
110 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
111 udelay(5);
112
113/* Structure/enum declaration ------------------------------- */
114struct tx_desc {
115 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
116 char *tx_buf_ptr; /* Data for us */
117 struct tx_desc *next_tx_desc;
118} __attribute__(( aligned(32) ));
119
120struct rx_desc {
121 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
122 struct sk_buff *rx_skb_ptr; /* Data for us */
123 struct rx_desc *next_rx_desc;
124} __attribute__(( aligned(32) ));
125
126struct uli526x_board_info {
127 u32 chip_id; /* Chip vendor/Device ID */
128 struct net_device *next_dev; /* next device */
129 struct pci_dev *pdev; /* PCI device */
130 spinlock_t lock;
131
132 long ioaddr; /* I/O base address */
133 u32 cr0_data;
134 u32 cr5_data;
135 u32 cr6_data;
136 u32 cr7_data;
137 u32 cr15_data;
138
139 /* pointer for memory physical address */
140 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
141 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
142 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
143 dma_addr_t first_tx_desc_dma;
144 dma_addr_t first_rx_desc_dma;
145
146 /* descriptor pointer */
147 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
148 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
149 unsigned char *desc_pool_ptr; /* descriptor pool memory */
150 struct tx_desc *first_tx_desc;
151 struct tx_desc *tx_insert_ptr;
152 struct tx_desc *tx_remove_ptr;
153 struct rx_desc *first_rx_desc;
154 struct rx_desc *rx_insert_ptr;
155 struct rx_desc *rx_ready_ptr; /* packet come pointer */
156 unsigned long tx_packet_cnt; /* transmitted packet count */
157 unsigned long rx_avail_cnt; /* available rx descriptor count */
158 unsigned long interval_rx_cnt; /* rx packet count a callback time */
159
160 u16 dbug_cnt;
161 u16 NIC_capability; /* NIC media capability */
162 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
163
164 u8 media_mode; /* user specify media mode */
165 u8 op_mode; /* real work media mode */
166 u8 phy_addr;
167 u8 link_failed; /* Ever link failed */
168 u8 wait_reset; /* Hardware failed, need to reset */
169 struct timer_list timer;
170
171 /* System defined statistic counter */
172 struct net_device_stats stats;
173
174 /* Driver defined statistic counter */
175 unsigned long tx_fifo_underrun;
176 unsigned long tx_loss_carrier;
177 unsigned long tx_no_carrier;
178 unsigned long tx_late_collision;
179 unsigned long tx_excessive_collision;
180 unsigned long tx_jabber_timeout;
181 unsigned long reset_count;
182 unsigned long reset_cr8;
183 unsigned long reset_fatal;
184 unsigned long reset_TXtimeout;
185
186 /* NIC SROM data */
187 unsigned char srom[128];
188 u8 init;
189};
190
191enum uli526x_offsets {
192 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
193 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
194 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
195 DCR15 = 0x78
196};
197
198enum uli526x_CR6_bits {
199 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
200 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
201 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
202};
203
204/* Global variable declaration ----------------------------- */
205static int __devinitdata printed_version;
206static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
208 DRV_VERSION " (" DRV_RELDATE ")\n";
209
210static int uli526x_debug;
211static unsigned char uli526x_media_mode = ULI526X_AUTO;
212static u32 uli526x_cr6_user_set;
213
214/* For module input parameter */
215static int debug;
216static u32 cr6set;
217static unsigned char mode = 8;
218
219/* function declaration ------------------------------------- */
220static int uli526x_open(struct net_device *);
221static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
222static int uli526x_stop(struct net_device *);
223static struct net_device_stats * uli526x_get_stats(struct net_device *);
224static void uli526x_set_filter_mode(struct net_device *);
225static struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int);
227static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *);
228static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
229static void allocate_rx_buffer(struct uli526x_board_info *);
230static void update_cr6(u32, unsigned long);
231static void send_filter_frame(struct net_device *, int);
232static u16 phy_read(unsigned long, u8, u8, u32);
233static u16 phy_readby_cr10(unsigned long, u8, u8);
234static void phy_write(unsigned long, u8, u8, u16, u32);
235static void phy_writeby_cr10(unsigned long, u8, u8, u16);
236static void phy_write_1bit(unsigned long, u32, u32);
237static u16 phy_read_1bit(unsigned long, u32);
238static u8 uli526x_sense_speed(struct uli526x_board_info *);
239static void uli526x_process_mode(struct uli526x_board_info *);
240static void uli526x_timer(unsigned long);
241static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
242static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
243static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
244static void uli526x_dynamic_reset(struct net_device *);
245static void uli526x_free_rxbuffer(struct uli526x_board_info *);
246static void uli526x_init(struct net_device *);
247static void uli526x_set_phyxcer(struct uli526x_board_info *);
248
249/* ULI526X network board routine ---------------------------- */
250
251/*
252 * Search ULI526X board, allocate space and register it
253 */
254
255static int __devinit uli526x_init_one (struct pci_dev *pdev,
256 const struct pci_device_id *ent)
257{
258 struct uli526x_board_info *db; /* board information structure */
259 struct net_device *dev;
260 int i, err;
261
262 ULI526X_DBUG(0, "uli526x_init_one()", 0);
263
264 if (!printed_version++)
265 printk(version);
266
267 /* Init network device */
268 dev = alloc_etherdev(sizeof(*db));
269 if (dev == NULL)
270 return -ENOMEM;
271 SET_MODULE_OWNER(dev);
272 SET_NETDEV_DEV(dev, &pdev->dev);
273
274 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
275 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
276 err = -ENODEV;
277 goto err_out_free;
278 }
279
280 /* Enable Master/IO access, Disable memory access */
281 err = pci_enable_device(pdev);
282 if (err)
283 goto err_out_free;
284
285 if (!pci_resource_start(pdev, 0)) {
286 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
287 err = -ENODEV;
288 goto err_out_disable;
289 }
290
291 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
292 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
293 err = -ENODEV;
294 goto err_out_disable;
295 }
296
297 if (pci_request_regions(pdev, DRV_NAME)) {
298 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
299 err = -ENODEV;
300 goto err_out_disable;
301 }
302
303 /* Init system & device */
304 db = netdev_priv(dev);
305
306 /* Allocate Tx/Rx descriptor memory */
307 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
308 if(db->desc_pool_ptr == NULL)
309 {
310 err = -ENOMEM;
311 goto err_out_nomem;
312 }
313 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
314 if(db->buf_pool_ptr == NULL)
315 {
316 err = -ENOMEM;
317 goto err_out_nomem;
318 }
319
320 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
321 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
322 db->buf_pool_start = db->buf_pool_ptr;
323 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
324
325 db->chip_id = ent->driver_data;
326 db->ioaddr = pci_resource_start(pdev, 0);
327
328 db->pdev = pdev;
329 db->init = 1;
330
331 dev->base_addr = db->ioaddr;
332 dev->irq = pdev->irq;
333 pci_set_drvdata(pdev, dev);
334
335 /* Register some necessary functions */
336 dev->open = &uli526x_open;
337 dev->hard_start_xmit = &uli526x_start_xmit;
338 dev->stop = &uli526x_stop;
339 dev->get_stats = &uli526x_get_stats;
340 dev->set_multicast_list = &uli526x_set_filter_mode;
341 dev->ethtool_ops = &netdev_ethtool_ops;
342 spin_lock_init(&db->lock);
343
344
345 /* read 64 word srom data */
346 for (i = 0; i < 64; i++)
347 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
348
349 /* Set Node address */
350 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
351 {
352 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode
353 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port
354 outl(0, db->ioaddr + DCR14); //Clear reset port
355 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer
356 outl(0, db->ioaddr + DCR14); //Clear reset port
357 outl(0, db->ioaddr + DCR13); //Clear CR13
358 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port
359 //Read MAC address from CR14
360 for (i = 0; i < 6; i++)
361 dev->dev_addr[i] = inl(db->ioaddr + DCR14);
362 //Read end
363 outl(0, db->ioaddr + DCR13); //Clear CR13
364 outl(0, db->ioaddr + DCR0); //Clear CR0
365 udelay(10);
366 }
367 else /*Exist SROM*/
368 {
369 for (i = 0; i < 6; i++)
370 dev->dev_addr[i] = db->srom[20 + i];
371 }
372 err = register_netdev (dev);
373 if (err)
374 goto err_out_res;
375
376 printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
377
378 for (i = 0; i < 6; i++)
379 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
380 printk(", irq %d.\n", dev->irq);
381
382 pci_set_master(pdev);
383
384 return 0;
385
386err_out_res:
387 pci_release_regions(pdev);
388err_out_nomem:
389 if(db->desc_pool_ptr)
390 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
391 db->desc_pool_ptr, db->desc_pool_dma_ptr);
392
393 if(db->buf_pool_ptr != NULL)
394 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
395 db->buf_pool_ptr, db->buf_pool_dma_ptr);
396err_out_disable:
397 pci_disable_device(pdev);
398err_out_free:
399 pci_set_drvdata(pdev, NULL);
400 free_netdev(dev);
401
402 return err;
403}
404
405
406static void __devexit uli526x_remove_one (struct pci_dev *pdev)
407{
408 struct net_device *dev = pci_get_drvdata(pdev);
409 struct uli526x_board_info *db = netdev_priv(dev);
410
411 ULI526X_DBUG(0, "uli526x_remove_one()", 0);
412
413 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
414 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
415 db->desc_pool_dma_ptr);
416 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
417 db->buf_pool_ptr, db->buf_pool_dma_ptr);
418 unregister_netdev(dev);
419 pci_release_regions(pdev);
420 free_netdev(dev); /* free board information */
421 pci_set_drvdata(pdev, NULL);
422 pci_disable_device(pdev);
423 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
424}
425
426
427/*
428 * Open the interface.
429 * The interface is opened whenever "ifconfig" activates it.
430 */
431
432static int uli526x_open(struct net_device *dev)
433{
434 int ret;
435 struct uli526x_board_info *db = netdev_priv(dev);
436
437 ULI526X_DBUG(0, "uli526x_open", 0);
438
439 ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
440 if (ret)
441 return ret;
442
443 /* system variable init */
444 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
445 db->tx_packet_cnt = 0;
446 db->rx_avail_cnt = 0;
447 db->link_failed = 1;
448 netif_carrier_off(dev);
449 db->wait_reset = 0;
450
451 db->NIC_capability = 0xf; /* All capability*/
452 db->PHY_reg4 = 0x1e0;
453
454 /* CR6 operation mode decision */
455 db->cr6_data |= ULI526X_TXTH_256;
456 db->cr0_data = CR0_DEFAULT;
457
458 /* Initialize ULI526X board */
459 uli526x_init(dev);
460
461 /* Active System Interface */
462 netif_wake_queue(dev);
463
464 /* set and active a timer process */
465 init_timer(&db->timer);
466 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
467 db->timer.data = (unsigned long)dev;
468 db->timer.function = &uli526x_timer;
469 add_timer(&db->timer);
470
471 return 0;
472}
473
474
475/* Initialize ULI526X board
476 * Reset ULI526X board
477 * Initialize TX/Rx descriptor chain structure
478 * Send the set-up frame
479 * Enable Tx/Rx machine
480 */
481
482static void uli526x_init(struct net_device *dev)
483{
484 struct uli526x_board_info *db = netdev_priv(dev);
485 unsigned long ioaddr = db->ioaddr;
486 u8 phy_tmp;
487 u16 phy_value;
488 u16 phy_reg_reset;
489
490 ULI526X_DBUG(0, "uli526x_init()", 0);
491
492 /* Reset M526x MAC controller */
493 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */
494 udelay(100);
495 outl(db->cr0_data, ioaddr + DCR0);
496 udelay(5);
497
498 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
499 db->phy_addr = 1;
500 for(phy_tmp=0;phy_tmp<32;phy_tmp++)
501 {
502 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
503 if(phy_value != 0xffff&&phy_value!=0)
504 {
505 db->phy_addr = phy_tmp;
506 break;
507 }
508 }
509 if(phy_tmp == 32)
510 printk(KERN_WARNING "Can not find the phy address!!!");
511 /* Parser SROM and media mode */
512 db->media_mode = uli526x_media_mode;
513
514 /* Phyxcer capability setting */
515 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
516 phy_reg_reset = (phy_reg_reset | 0x8000);
517 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
518 udelay(500);
519
520 /* Process Phyxcer Media Mode */
521 uli526x_set_phyxcer(db);
522
523 /* Media Mode Process */
524 if ( !(db->media_mode & ULI526X_AUTO) )
525 db->op_mode = db->media_mode; /* Force Mode */
526
527 /* Initialize Transmit/Receive decriptor and CR3/4 */
528 uli526x_descriptor_init(db, ioaddr);
529
530 /* Init CR6 to program M526X operation */
531 update_cr6(db->cr6_data, ioaddr);
532
533 /* Send setup frame */
534 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
535
536 /* Init CR7, interrupt active bit */
537 db->cr7_data = CR7_DEFAULT;
538 outl(db->cr7_data, ioaddr + DCR7);
539
540 /* Init CR15, Tx jabber and Rx watchdog timer */
541 outl(db->cr15_data, ioaddr + DCR15);
542
543 /* Enable ULI526X Tx/Rx function */
544 db->cr6_data |= CR6_RXSC | CR6_TXSC;
545 update_cr6(db->cr6_data, ioaddr);
546}
547
548
549/*
550 * Hardware start transmission.
551 * Send a packet to media from the upper layer.
552 */
553
554static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
555{
556 struct uli526x_board_info *db = netdev_priv(dev);
557 struct tx_desc *txptr;
558 unsigned long flags;
559
560 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
561
562 /* Resource flag check */
563 netif_stop_queue(dev);
564
565 /* Too large packet check */
566 if (skb->len > MAX_PACKET_SIZE) {
567 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
568 dev_kfree_skb(skb);
569 return 0;
570 }
571
572 spin_lock_irqsave(&db->lock, flags);
573
574 /* No Tx resource check, it never happen nromally */
575 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
576 spin_unlock_irqrestore(&db->lock, flags);
577 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
578 return 1;
579 }
580
581 /* Disable NIC interrupt */
582 outl(0, dev->base_addr + DCR7);
583
584 /* transmit this packet */
585 txptr = db->tx_insert_ptr;
586 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
588
589 /* Point to next transmit free descriptor */
590 db->tx_insert_ptr = txptr->next_tx_desc;
591
592 /* Transmit Packet Process */
593 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
594 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
595 db->tx_packet_cnt++; /* Ready to send */
596 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
597 dev->trans_start = jiffies; /* saved time stamp */
598 }
599
600 /* Tx resource check */
601 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
602 netif_wake_queue(dev);
603
604 /* Restore CR7 to enable interrupt */
605 spin_unlock_irqrestore(&db->lock, flags);
606 outl(db->cr7_data, dev->base_addr + DCR7);
607
608 /* free this SKB */
609 dev_kfree_skb(skb);
610
611 return 0;
612}
613
614
615/*
616 * Stop the interface.
617 * The interface is stopped when it is brought.
618 */
619
620static int uli526x_stop(struct net_device *dev)
621{
622 struct uli526x_board_info *db = netdev_priv(dev);
623 unsigned long ioaddr = dev->base_addr;
624
625 ULI526X_DBUG(0, "uli526x_stop", 0);
626
627 /* disable system */
628 netif_stop_queue(dev);
629
630 /* deleted timer */
631 del_timer_sync(&db->timer);
632
633 /* Reset & stop ULI526X board */
634 outl(ULI526X_RESET, ioaddr + DCR0);
635 udelay(5);
636 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
637
638 /* free interrupt */
639 free_irq(dev->irq, dev);
640
641 /* free allocated rx buffer */
642 uli526x_free_rxbuffer(db);
643
644#if 0
645 /* show statistic counter */
646 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
647 db->tx_fifo_underrun, db->tx_excessive_collision,
648 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
649 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
650 db->reset_fatal, db->reset_TXtimeout);
651#endif
652
653 return 0;
654}
655
656
657/*
658 * M5261/M5263 insterrupt handler
659 * receive the packet to upper layer, free the transmitted packet
660 */
661
662static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
663{
664 struct net_device *dev = dev_id;
665 struct uli526x_board_info *db = netdev_priv(dev);
666 unsigned long ioaddr = dev->base_addr;
667 unsigned long flags;
668
669 if (!dev) {
670 ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
671 return IRQ_NONE;
672 }
673
674 spin_lock_irqsave(&db->lock, flags);
675 outl(0, ioaddr + DCR7);
676
677 /* Got ULI526X status */
678 db->cr5_data = inl(ioaddr + DCR5);
679 outl(db->cr5_data, ioaddr + DCR5);
680 if ( !(db->cr5_data & 0x180c1) ) {
681 spin_unlock_irqrestore(&db->lock, flags);
682 outl(db->cr7_data, ioaddr + DCR7);
683 return IRQ_HANDLED;
684 }
685
686 /* Check system status */
687 if (db->cr5_data & 0x2000) {
688 /* system bus error happen */
689 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
690 db->reset_fatal++;
691 db->wait_reset = 1; /* Need to RESET */
692 spin_unlock_irqrestore(&db->lock, flags);
693 return IRQ_HANDLED;
694 }
695
696 /* Received the coming packet */
697 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
698 uli526x_rx_packet(dev, db);
699
700 /* reallocate rx descriptor buffer */
701 if (db->rx_avail_cnt<RX_DESC_CNT)
702 allocate_rx_buffer(db);
703
704 /* Free the transmitted descriptor */
705 if ( db->cr5_data & 0x01)
706 uli526x_free_tx_pkt(dev, db);
707
708 /* Restore CR7 to enable interrupt mask */
709 outl(db->cr7_data, ioaddr + DCR7);
710
711 spin_unlock_irqrestore(&db->lock, flags);
712 return IRQ_HANDLED;
713}
714
715
716/*
717 * Free TX resource after TX complete
718 */
719
720static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
721{
722 struct tx_desc *txptr;
723 u32 tdes0;
724
725 txptr = db->tx_remove_ptr;
726 while(db->tx_packet_cnt) {
727 tdes0 = le32_to_cpu(txptr->tdes0);
728 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
729 if (tdes0 & 0x80000000)
730 break;
731
732 /* A packet sent completed */
733 db->tx_packet_cnt--;
734 db->stats.tx_packets++;
735
736 /* Transmit statistic counter */
737 if ( tdes0 != 0x7fffffff ) {
738 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
739 db->stats.collisions += (tdes0 >> 3) & 0xf;
740 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
741 if (tdes0 & TDES0_ERR_MASK) {
742 db->stats.tx_errors++;
743 if (tdes0 & 0x0002) { /* UnderRun */
744 db->tx_fifo_underrun++;
745 if ( !(db->cr6_data & CR6_SFT) ) {
746 db->cr6_data = db->cr6_data | CR6_SFT;
747 update_cr6(db->cr6_data, db->ioaddr);
748 }
749 }
750 if (tdes0 & 0x0100)
751 db->tx_excessive_collision++;
752 if (tdes0 & 0x0200)
753 db->tx_late_collision++;
754 if (tdes0 & 0x0400)
755 db->tx_no_carrier++;
756 if (tdes0 & 0x0800)
757 db->tx_loss_carrier++;
758 if (tdes0 & 0x4000)
759 db->tx_jabber_timeout++;
760 }
761 }
762
763 txptr = txptr->next_tx_desc;
764 }/* End of while */
765
766 /* Update TX remove pointer to next */
767 db->tx_remove_ptr = txptr;
768
769 /* Resource available check */
770 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
771 netif_wake_queue(dev); /* Active upper layer, send again */
772}
773
774
775/*
776 * Receive the come packet and pass to upper layer
777 */
778
779static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
780{
781 struct rx_desc *rxptr;
782 struct sk_buff *skb;
783 int rxlen;
784 u32 rdes0;
785
786 rxptr = db->rx_ready_ptr;
787
788 while(db->rx_avail_cnt) {
789 rdes0 = le32_to_cpu(rxptr->rdes0);
790 if (rdes0 & 0x80000000) /* packet owner check */
791 {
792 break;
793 }
794
795 db->rx_avail_cnt--;
796 db->interval_rx_cnt++;
797
798 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
799 if ( (rdes0 & 0x300) != 0x300) {
800 /* A packet without First/Last flag */
801 /* reuse this SKB */
802 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
803 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
804 } else {
805 /* A packet with First/Last flag */
806 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
807
808 /* error summary bit check */
809 if (rdes0 & 0x8000) {
810 /* This is a error packet */
811 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
812 db->stats.rx_errors++;
813 if (rdes0 & 1)
814 db->stats.rx_fifo_errors++;
815 if (rdes0 & 2)
816 db->stats.rx_crc_errors++;
817 if (rdes0 & 0x80)
818 db->stats.rx_length_errors++;
819 }
820
821 if ( !(rdes0 & 0x8000) ||
822 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
823 skb = rxptr->rx_skb_ptr;
824
825 /* Good packet, send to upper layer */
826 /* Shorst packet used new SKB */
827 if ( (rxlen < RX_COPY_SIZE) &&
828 ( (skb = dev_alloc_skb(rxlen + 2) )
829 != NULL) ) {
830 /* size less than COPY_SIZE, allocate a rxlen SKB */
831 skb->dev = dev;
832 skb_reserve(skb, 2); /* 16byte align */
833 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
834 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
835 } else {
836 skb->dev = dev;
837 skb_put(skb, rxlen);
838 }
839 skb->protocol = eth_type_trans(skb, dev);
840 netif_rx(skb);
841 dev->last_rx = jiffies;
842 db->stats.rx_packets++;
843 db->stats.rx_bytes += rxlen;
844
845 } else {
846 /* Reuse SKB buffer when the packet is error */
847 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
848 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
849 }
850 }
851
852 rxptr = rxptr->next_rx_desc;
853 }
854
855 db->rx_ready_ptr = rxptr;
856}
857
858
859/*
860 * Get statistics from driver.
861 */
862
863static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
864{
865 struct uli526x_board_info *db = netdev_priv(dev);
866
867 ULI526X_DBUG(0, "uli526x_get_stats", 0);
868 return &db->stats;
869}
870
871
872/*
873 * Set ULI526X multicast address
874 */
875
876static void uli526x_set_filter_mode(struct net_device * dev)
877{
878 struct uli526x_board_info *db = dev->priv;
879 unsigned long flags;
880
881 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
882 spin_lock_irqsave(&db->lock, flags);
883
884 if (dev->flags & IFF_PROMISC) {
885 ULI526X_DBUG(0, "Enable PROM Mode", 0);
886 db->cr6_data |= CR6_PM | CR6_PBF;
887 update_cr6(db->cr6_data, db->ioaddr);
888 spin_unlock_irqrestore(&db->lock, flags);
889 return;
890 }
891
892 if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
893 ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
894 db->cr6_data &= ~(CR6_PM | CR6_PBF);
895 db->cr6_data |= CR6_PAM;
896 spin_unlock_irqrestore(&db->lock, flags);
897 return;
898 }
899
900 ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
901 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
902 spin_unlock_irqrestore(&db->lock, flags);
903}
904
905static void
906ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
907{
908 ecmd->supported = (SUPPORTED_10baseT_Half |
909 SUPPORTED_10baseT_Full |
910 SUPPORTED_100baseT_Half |
911 SUPPORTED_100baseT_Full |
912 SUPPORTED_Autoneg |
913 SUPPORTED_MII);
914
915 ecmd->advertising = (ADVERTISED_10baseT_Half |
916 ADVERTISED_10baseT_Full |
917 ADVERTISED_100baseT_Half |
918 ADVERTISED_100baseT_Full |
919 ADVERTISED_Autoneg |
920 ADVERTISED_MII);
921
922
923 ecmd->port = PORT_MII;
924 ecmd->phy_address = db->phy_addr;
925
926 ecmd->transceiver = XCVR_EXTERNAL;
927
928 ecmd->speed = 10;
929 ecmd->duplex = DUPLEX_HALF;
930
931 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
932 {
933 ecmd->speed = 100;
934 }
935 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
936 {
937 ecmd->duplex = DUPLEX_FULL;
938 }
939 if(db->link_failed)
940 {
941 ecmd->speed = -1;
942 ecmd->duplex = -1;
943 }
944
945 if (db->media_mode & ULI526X_AUTO)
946 {
947 ecmd->autoneg = AUTONEG_ENABLE;
948 }
949}
950
951static void netdev_get_drvinfo(struct net_device *dev,
952 struct ethtool_drvinfo *info)
953{
954 struct uli526x_board_info *np = netdev_priv(dev);
955
956 strcpy(info->driver, DRV_NAME);
957 strcpy(info->version, DRV_VERSION);
958 if (np->pdev)
959 strcpy(info->bus_info, pci_name(np->pdev));
960 else
961 sprintf(info->bus_info, "EISA 0x%lx %d",
962 dev->base_addr, dev->irq);
963}
964
965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
966 struct uli526x_board_info *np = netdev_priv(dev);
967
968 ULi_ethtool_gset(np, cmd);
969
970 return 0;
971}
972
973static u32 netdev_get_link(struct net_device *dev) {
974 struct uli526x_board_info *np = netdev_priv(dev);
975
976 if(np->link_failed)
977 return 0;
978 else
979 return 1;
980}
981
982static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
983{
984 wol->supported = WAKE_PHY | WAKE_MAGIC;
985 wol->wolopts = 0;
986}
987
988static struct ethtool_ops netdev_ethtool_ops = {
989 .get_drvinfo = netdev_get_drvinfo,
990 .get_settings = netdev_get_settings,
991 .get_link = netdev_get_link,
992 .get_wol = uli526x_get_wol,
993};
994
995/*
996 * A periodic timer routine
997 * Dynamic media sense, allocate Rx buffer...
998 */
999
1000static void uli526x_timer(unsigned long data)
1001{
1002 u32 tmp_cr8;
1003 unsigned char tmp_cr12=0;
1004 struct net_device *dev = (struct net_device *) data;
1005 struct uli526x_board_info *db = netdev_priv(dev);
1006 unsigned long flags;
1007 u8 TmpSpeed=10;
1008
1009 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1010 spin_lock_irqsave(&db->lock, flags);
1011
1012
1013 /* Dynamic reset ULI526X : system error or transmit time-out */
1014 tmp_cr8 = inl(db->ioaddr + DCR8);
1015 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1016 db->reset_cr8++;
1017 db->wait_reset = 1;
1018 }
1019 db->interval_rx_cnt = 0;
1020
1021 /* TX polling kick monitor */
1022 if ( db->tx_packet_cnt &&
1023 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
1024 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1025
1026 // TX Timeout
1027 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
1028 db->reset_TXtimeout++;
1029 db->wait_reset = 1;
1030 printk( "%s: Tx timeout - resetting\n",
1031 dev->name);
1032 }
1033 }
1034
1035 if (db->wait_reset) {
1036 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1037 db->reset_count++;
1038 uli526x_dynamic_reset(dev);
1039 db->timer.expires = ULI526X_TIMER_WUT;
1040 add_timer(&db->timer);
1041 spin_unlock_irqrestore(&db->lock, flags);
1042 return;
1043 }
1044
1045 /* Link status check, Dynamic media type change */
1046 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
1047 tmp_cr12 = 3;
1048
1049 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1050 /* Link Failed */
1051 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1052 netif_carrier_off(dev);
1053 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1054 db->link_failed = 1;
1055
1056 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1057 /* AUTO don't need */
1058 if ( !(db->media_mode & 0x8) )
1059 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1060
1061 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1062 if (db->media_mode & ULI526X_AUTO) {
1063 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1064 update_cr6(db->cr6_data, db->ioaddr);
1065 }
1066 } else
1067 if ((tmp_cr12 & 0x3) && db->link_failed) {
1068 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1069 db->link_failed = 0;
1070
1071 /* Auto Sense Speed */
1072 if ( (db->media_mode & ULI526X_AUTO) &&
1073 uli526x_sense_speed(db) )
1074 db->link_failed = 1;
1075 uli526x_process_mode(db);
1076
1077 if(db->link_failed==0)
1078 {
1079 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
1080 {
1081 TmpSpeed = 100;
1082 }
1083 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
1084 {
1085 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
1086 }
1087 else
1088 {
1089 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1090 }
1091 netif_carrier_on(dev);
1092 }
1093 /* SHOW_MEDIA_TYPE(db->op_mode); */
1094 }
1095 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1096 {
1097 if(db->init==1)
1098 {
1099 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1100 netif_carrier_off(dev);
1101 }
1102 }
1103 db->init=0;
1104
1105 /* Timer active again */
1106 db->timer.expires = ULI526X_TIMER_WUT;
1107 add_timer(&db->timer);
1108 spin_unlock_irqrestore(&db->lock, flags);
1109}
1110
1111
1112/*
1113 * Dynamic reset the ULI526X board
1114 * Stop ULI526X board
1115 * Free Tx/Rx allocated memory
1116 * Reset ULI526X board
1117 * Re-initialize ULI526X board
1118 */
1119
1120static void uli526x_dynamic_reset(struct net_device *dev)
1121{
1122 struct uli526x_board_info *db = netdev_priv(dev);
1123
1124 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1125
1126 /* Sopt MAC controller */
1127 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1128 update_cr6(db->cr6_data, dev->base_addr);
1129 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1130 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1131
1132 /* Disable upper layer interface */
1133 netif_stop_queue(dev);
1134
1135 /* Free Rx Allocate buffer */
1136 uli526x_free_rxbuffer(db);
1137
1138 /* system variable init */
1139 db->tx_packet_cnt = 0;
1140 db->rx_avail_cnt = 0;
1141 db->link_failed = 1;
1142 db->init=1;
1143 db->wait_reset = 0;
1144
1145 /* Re-initialize ULI526X board */
1146 uli526x_init(dev);
1147
1148 /* Restart upper layer interface */
1149 netif_wake_queue(dev);
1150}
1151
1152
1153/*
1154 * free all allocated rx buffer
1155 */
1156
1157static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1158{
1159 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1160
1161 /* free allocated rx buffer */
1162 while (db->rx_avail_cnt) {
1163 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1164 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1165 db->rx_avail_cnt--;
1166 }
1167}
1168
1169
1170/*
1171 * Reuse the SK buffer
1172 */
1173
1174static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1175{
1176 struct rx_desc *rxptr = db->rx_insert_ptr;
1177
1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1179 rxptr->rx_skb_ptr = skb;
1180 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1181 wmb();
1182 rxptr->rdes0 = cpu_to_le32(0x80000000);
1183 db->rx_avail_cnt++;
1184 db->rx_insert_ptr = rxptr->next_rx_desc;
1185 } else
1186 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1187}
1188
1189
1190/*
1191 * Initialize transmit/Receive descriptor
1192 * Using Chain structure, and allocate Tx/Rx buffer
1193 */
1194
1195static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
1196{
1197 struct tx_desc *tmp_tx;
1198 struct rx_desc *tmp_rx;
1199 unsigned char *tmp_buf;
1200 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1201 dma_addr_t tmp_buf_dma;
1202 int i;
1203
1204 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1205
1206 /* tx descriptor start pointer */
1207 db->tx_insert_ptr = db->first_tx_desc;
1208 db->tx_remove_ptr = db->first_tx_desc;
1209 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1210
1211 /* rx descriptor start pointer */
1212 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1213 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1214 db->rx_insert_ptr = db->first_rx_desc;
1215 db->rx_ready_ptr = db->first_rx_desc;
1216 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1217
1218 /* Init Transmit chain */
1219 tmp_buf = db->buf_pool_start;
1220 tmp_buf_dma = db->buf_pool_dma_start;
1221 tmp_tx_dma = db->first_tx_desc_dma;
1222 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1223 tmp_tx->tx_buf_ptr = tmp_buf;
1224 tmp_tx->tdes0 = cpu_to_le32(0);
1225 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1226 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1227 tmp_tx_dma += sizeof(struct tx_desc);
1228 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1229 tmp_tx->next_tx_desc = tmp_tx + 1;
1230 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1231 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1232 }
1233 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1234 tmp_tx->next_tx_desc = db->first_tx_desc;
1235
1236 /* Init Receive descriptor chain */
1237 tmp_rx_dma=db->first_rx_desc_dma;
1238 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1239 tmp_rx->rdes0 = cpu_to_le32(0);
1240 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1241 tmp_rx_dma += sizeof(struct rx_desc);
1242 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1243 tmp_rx->next_rx_desc = tmp_rx + 1;
1244 }
1245 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1246 tmp_rx->next_rx_desc = db->first_rx_desc;
1247
1248 /* pre-allocate Rx buffer */
1249 allocate_rx_buffer(db);
1250}
1251
1252
1253/*
1254 * Update CR6 value
1255 * Firstly stop ULI526X, then written value and start
1256 */
1257
1258static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1259{
1260
1261 outl(cr6_data, ioaddr + DCR6);
1262 udelay(5);
1263}
1264
1265
1266/*
1267 * Send a setup frame for M5261/M5263
1268 * This setup frame initialize ULI526X address filter mode
1269 */
1270
1271static void send_filter_frame(struct net_device *dev, int mc_cnt)
1272{
1273 struct uli526x_board_info *db = netdev_priv(dev);
1274 struct dev_mc_list *mcptr;
1275 struct tx_desc *txptr;
1276 u16 * addrptr;
1277 u32 * suptr;
1278 int i;
1279
1280 ULI526X_DBUG(0, "send_filter_frame()", 0);
1281
1282 txptr = db->tx_insert_ptr;
1283 suptr = (u32 *) txptr->tx_buf_ptr;
1284
1285 /* Node address */
1286 addrptr = (u16 *) dev->dev_addr;
1287 *suptr++ = addrptr[0];
1288 *suptr++ = addrptr[1];
1289 *suptr++ = addrptr[2];
1290
1291 /* broadcast address */
1292 *suptr++ = 0xffff;
1293 *suptr++ = 0xffff;
1294 *suptr++ = 0xffff;
1295
1296 /* fit the multicast address */
1297 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1298 addrptr = (u16 *) mcptr->dmi_addr;
1299 *suptr++ = addrptr[0];
1300 *suptr++ = addrptr[1];
1301 *suptr++ = addrptr[2];
1302 }
1303
1304 for (; i<14; i++) {
1305 *suptr++ = 0xffff;
1306 *suptr++ = 0xffff;
1307 *suptr++ = 0xffff;
1308 }
1309
1310 /* prepare the setup frame */
1311 db->tx_insert_ptr = txptr->next_tx_desc;
1312 txptr->tdes1 = cpu_to_le32(0x890000c0);
1313
1314 /* Resource Check and Send the setup packet */
1315 if (db->tx_packet_cnt < TX_DESC_CNT) {
1316 /* Resource Empty */
1317 db->tx_packet_cnt++;
1318 txptr->tdes0 = cpu_to_le32(0x80000000);
1319 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1320 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1321 update_cr6(db->cr6_data, dev->base_addr);
1322 dev->trans_start = jiffies;
1323 } else
1324 printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
1325}
1326
1327
1328/*
1329 * Allocate rx buffer,
1330 * As possible as allocate maxiumn Rx buffer
1331 */
1332
1333static void allocate_rx_buffer(struct uli526x_board_info *db)
1334{
1335 struct rx_desc *rxptr;
1336 struct sk_buff *skb;
1337
1338 rxptr = db->rx_insert_ptr;
1339
1340 while(db->rx_avail_cnt < RX_DESC_CNT) {
1341 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1342 break;
1343 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1344 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1345 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 rxptr = rxptr->next_rx_desc;
1348 db->rx_avail_cnt++;
1349 }
1350
1351 db->rx_insert_ptr = rxptr;
1352}
1353
1354
1355/*
1356 * Read one word data from the serial ROM
1357 */
1358
1359static u16 read_srom_word(long ioaddr, int offset)
1360{
1361 int i;
1362 u16 srom_data = 0;
1363 long cr9_ioaddr = ioaddr + DCR9;
1364
1365 outl(CR9_SROM_READ, cr9_ioaddr);
1366 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1367
1368 /* Send the Read Command 110b */
1369 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1370 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1371 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1372
1373 /* Send the offset */
1374 for (i = 5; i >= 0; i--) {
1375 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1376 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1377 }
1378
1379 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1380
1381 for (i = 16; i > 0; i--) {
1382 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1383 udelay(5);
1384 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1385 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1386 udelay(5);
1387 }
1388
1389 outl(CR9_SROM_READ, cr9_ioaddr);
1390 return srom_data;
1391}
1392
1393
1394/*
1395 * Auto sense the media mode
1396 */
1397
1398static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1399{
1400 u8 ErrFlag = 0;
1401 u16 phy_mode;
1402
1403 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1404 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1405
1406 if ( (phy_mode & 0x24) == 0x24 ) {
1407
1408 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
1409 if(phy_mode&0x8000)
1410 phy_mode = 0x8000;
1411 else if(phy_mode&0x4000)
1412 phy_mode = 0x4000;
1413 else if(phy_mode&0x2000)
1414 phy_mode = 0x2000;
1415 else
1416 phy_mode = 0x1000;
1417
1418 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1419 switch (phy_mode) {
1420 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1421 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1422 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1423 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1424 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1425 }
1426 } else {
1427 db->op_mode = ULI526X_10MHF;
1428 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1429 ErrFlag = 1;
1430 }
1431
1432 return ErrFlag;
1433}
1434
1435
1436/*
1437 * Set 10/100 phyxcer capability
1438 * AUTO mode : phyxcer register4 is NIC capability
1439 * Force mode: phyxcer register4 is the force media
1440 */
1441
1442static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1443{
1444 u16 phy_reg;
1445
1446 /* Phyxcer capability setting */
1447 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1448
1449 if (db->media_mode & ULI526X_AUTO) {
1450 /* AUTO Mode */
1451 phy_reg |= db->PHY_reg4;
1452 } else {
1453 /* Force Mode */
1454 switch(db->media_mode) {
1455 case ULI526X_10MHF: phy_reg |= 0x20; break;
1456 case ULI526X_10MFD: phy_reg |= 0x40; break;
1457 case ULI526X_100MHF: phy_reg |= 0x80; break;
1458 case ULI526X_100MFD: phy_reg |= 0x100; break;
1459 }
1460
1461 }
1462
1463 /* Write new capability to Phyxcer Reg4 */
1464 if ( !(phy_reg & 0x01e0)) {
1465 phy_reg|=db->PHY_reg4;
1466 db->media_mode|=ULI526X_AUTO;
1467 }
1468 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1469
1470 /* Restart Auto-Negotiation */
1471 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1472 udelay(50);
1473}
1474
1475
1476/*
1477 * Process op-mode
1478 AUTO mode : PHY controller in Auto-negotiation Mode
1479 * Force mode: PHY controller in force mode with HUB
1480 * N-way force capability with SWITCH
1481 */
1482
1483static void uli526x_process_mode(struct uli526x_board_info *db)
1484{
1485 u16 phy_reg;
1486
1487 /* Full Duplex Mode Check */
1488 if (db->op_mode & 0x4)
1489 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1490 else
1491 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1492
1493 update_cr6(db->cr6_data, db->ioaddr);
1494
1495 /* 10/100M phyxcer force mode need */
1496 if ( !(db->media_mode & 0x8)) {
1497 /* Forece Mode */
1498 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1499 if ( !(phy_reg & 0x1) ) {
1500 /* parter without N-Way capability */
1501 phy_reg = 0x0;
1502 switch(db->op_mode) {
1503 case ULI526X_10MHF: phy_reg = 0x0; break;
1504 case ULI526X_10MFD: phy_reg = 0x100; break;
1505 case ULI526X_100MHF: phy_reg = 0x2000; break;
1506 case ULI526X_100MFD: phy_reg = 0x2100; break;
1507 }
1508 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1509 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1510 }
1511 }
1512}
1513
1514
1515/*
1516 * Write a word to Phy register
1517 */
1518
1519static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1520{
1521 u16 i;
1522 unsigned long ioaddr;
1523
1524 if(chip_id == PCI_ULI5263_ID)
1525 {
1526 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1527 return;
1528 }
1529 /* M5261/M5263 Chip */
1530 ioaddr = iobase + DCR9;
1531
1532 /* Send 33 synchronization clock to Phy controller */
1533 for (i = 0; i < 35; i++)
1534 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1535
1536 /* Send start command(01) to Phy */
1537 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1538 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1539
1540 /* Send write command(01) to Phy */
1541 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1542 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1543
1544 /* Send Phy address */
1545 for (i = 0x10; i > 0; i = i >> 1)
1546 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1547
1548 /* Send register address */
1549 for (i = 0x10; i > 0; i = i >> 1)
1550 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1551
1552 /* written trasnition */
1553 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1554 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1555
1556 /* Write a word data to PHY controller */
1557 for ( i = 0x8000; i > 0; i >>= 1)
1558 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1559
1560}
1561
1562
1563/*
1564 * Read a word data from phy register
1565 */
1566
1567static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1568{
1569 int i;
1570 u16 phy_data;
1571 unsigned long ioaddr;
1572
1573 if(chip_id == PCI_ULI5263_ID)
1574 return phy_readby_cr10(iobase, phy_addr, offset);
1575 /* M5261/M5263 Chip */
1576 ioaddr = iobase + DCR9;
1577
1578 /* Send 33 synchronization clock to Phy controller */
1579 for (i = 0; i < 35; i++)
1580 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1581
1582 /* Send start command(01) to Phy */
1583 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1584 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1585
1586 /* Send read command(10) to Phy */
1587 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1588 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1589
1590 /* Send Phy address */
1591 for (i = 0x10; i > 0; i = i >> 1)
1592 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1593
1594 /* Send register address */
1595 for (i = 0x10; i > 0; i = i >> 1)
1596 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1597
1598 /* Skip transition state */
1599 phy_read_1bit(ioaddr, chip_id);
1600
1601 /* read 16bit data */
1602 for (phy_data = 0, i = 0; i < 16; i++) {
1603 phy_data <<= 1;
1604 phy_data |= phy_read_1bit(ioaddr, chip_id);
1605 }
1606
1607 return phy_data;
1608}
1609
1610static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1611{
1612 unsigned long ioaddr,cr10_value;
1613
1614 ioaddr = iobase + DCR10;
1615 cr10_value = phy_addr;
1616 cr10_value = (cr10_value<<5) + offset;
1617 cr10_value = (cr10_value<<16) + 0x08000000;
1618 outl(cr10_value,ioaddr);
1619 udelay(1);
1620 while(1)
1621 {
1622 cr10_value = inl(ioaddr);
1623 if(cr10_value&0x10000000)
1624 break;
1625 }
1626 return (cr10_value&0x0ffff);
1627}
1628
1629static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
1630{
1631 unsigned long ioaddr,cr10_value;
1632
1633 ioaddr = iobase + DCR10;
1634 cr10_value = phy_addr;
1635 cr10_value = (cr10_value<<5) + offset;
1636 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1637 outl(cr10_value,ioaddr);
1638 udelay(1);
1639}
1640/*
1641 * Write one bit data to Phy Controller
1642 */
1643
1644static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1645{
1646 outl(phy_data , ioaddr); /* MII Clock Low */
1647 udelay(1);
1648 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1649 udelay(1);
1650 outl(phy_data , ioaddr); /* MII Clock Low */
1651 udelay(1);
1652}
1653
1654
1655/*
1656 * Read one bit phy data from PHY controller
1657 */
1658
1659static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1660{
1661 u16 phy_data;
1662
1663 outl(0x50000 , ioaddr);
1664 udelay(1);
1665 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1666 outl(0x40000 , ioaddr);
1667 udelay(1);
1668
1669 return phy_data;
1670}
1671
1672
1673static struct pci_device_id uli526x_pci_tbl[] = {
1674 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1675 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1676 { 0, }
1677};
1678MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1679
1680
1681static struct pci_driver uli526x_driver = {
1682 .name = "uli526x",
1683 .id_table = uli526x_pci_tbl,
1684 .probe = uli526x_init_one,
1685 .remove = __devexit_p(uli526x_remove_one),
1686};
1687
1688MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1689MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1690MODULE_LICENSE("GPL");
1691
1692MODULE_PARM(debug, "i");
1693MODULE_PARM(mode, "i");
1694MODULE_PARM(cr6set, "i");
1695MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1696MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1697
1698/* Description:
1699 * when user used insmod to add module, system invoked init_module()
1700 * to register the services.
1701 */
1702
1703static int __init uli526x_init_module(void)
1704{
1705 int rc;
1706
1707 printk(version);
1708 printed_version = 1;
1709
1710 ULI526X_DBUG(0, "init_module() ", debug);
1711
1712 if (debug)
1713 uli526x_debug = debug; /* set debug flag */
1714 if (cr6set)
1715 uli526x_cr6_user_set = cr6set;
1716
1717 switch(mode) {
1718 case ULI526X_10MHF:
1719 case ULI526X_100MHF:
1720 case ULI526X_10MFD:
1721 case ULI526X_100MFD:
1722 uli526x_media_mode = mode;
1723 break;
1724 default:uli526x_media_mode = ULI526X_AUTO;
1725 break;
1726 }
1727
1728 rc = pci_module_init(&uli526x_driver);
1729 if (rc < 0)
1730 return rc;
1731
1732 return 0;
1733}
1734
1735
1736/*
1737 * Description:
1738 * when user used rmmod to delete module, system invoked clean_module()
1739 * to un-register all registered services.
1740 */
1741
1742static void __exit uli526x_cleanup_module(void)
1743{
1744 ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
1745 pci_unregister_driver(&uli526x_driver);
1746}
1747
1748module_init(uli526x_init_module);
1749module_exit(uli526x_cleanup_module);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index effab0b9adca..50b8c6754b1e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -18,6 +18,9 @@
18/* 18/*
19 * Changes: 19 * Changes:
20 * 20 *
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
23 *
21 * Mark Smith <markzzzsmith@yahoo.com.au> 24 * Mark Smith <markzzzsmith@yahoo.com.au>
22 * Use random_ether_addr() for tap MAC address. 25 * Use random_ether_addr() for tap MAC address.
23 * 26 *
@@ -612,6 +615,18 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
612 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 615 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
613 break; 616 break;
614 617
618 case TUNSETLINK:
619 /* Only allow setting the type when the interface is down */
620 if (tun->dev->flags & IFF_UP) {
621 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
622 tun->dev->name);
623 return -EBUSY;
624 } else {
625 tun->dev->type = (int) arg;
626 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
627 }
628 break;
629
615#ifdef TUN_DEBUG 630#ifdef TUN_DEBUG
616 case TUNSETDEBUG: 631 case TUNSETDEBUG:
617 tun->debug = arg; 632 tun->debug = arg;
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index a63f6a2cc4f7..cdd4c09c2d90 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -61,7 +61,7 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
61 61
62 62
63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
64 struct packet_type *p) 64 struct packet_type *p, struct net_device *orig_dev)
65{ 65{
66 hdlc_device *hdlc = dev_to_hdlc(dev); 66 hdlc_device *hdlc = dev_to_hdlc(dev);
67 if (hdlc->proto.netif_rx) 67 if (hdlc->proto.netif_rx)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 7f2e3653c5e5..6c302e9dbca2 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -86,7 +86,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
86/* 86/*
87 * Receive a LAPB frame via an ethernet interface. 87 * Receive a LAPB frame via an ethernet interface.
88 */ 88 */
89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
90{ 90{
91 int len, err; 91 int len, err;
92 struct lapbethdev *lapbeth; 92 struct lapbethdev *lapbeth;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index c5f5e62aab8b..0497dbdb8631 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); 445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
446 446
447unsigned short calc_checksum (char *, int); 447unsigned short calc_checksum (char *, int);
448static int setup_fr_header(struct sk_buff** skb, 448static int setup_fr_header(struct sk_buff *skb,
449 struct net_device* dev, char op_mode); 449 struct net_device* dev, char op_mode);
450 450
451 451
@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
1372 /* Move the if_header() code to here. By inserting frame 1372 /* Move the if_header() code to here. By inserting frame
1373 * relay header in if_header() we would break the 1373 * relay header in if_header() we would break the
1374 * tcpdump and other packet sniffers */ 1374 * tcpdump and other packet sniffers */
1375 chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); 1375 chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
1376 if (chan->fr_header_len < 0 ){ 1376 if (chan->fr_header_len < 0 ){
1377 ++chan->ifstats.tx_dropped; 1377 ++chan->ifstats.tx_dropped;
1378 ++card->wandev.stats.tx_dropped; 1378 ++card->wandev.stats.tx_dropped;
@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
1597 return 1; 1597 return 1;
1598 } 1598 }
1599 1599
1600 skb_unlink(skb);
1601
1602 chan->transmit_length = len; 1600 chan->transmit_length = len;
1603 chan->delay_skb = skb; 1601 chan->delay_skb = skb;
1604 1602
@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
4871 } 4869 }
4872} 4870}
4873 4871
4874static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, 4872static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
4875 char op_mode) 4873 char op_mode)
4876{ 4874{
4877 struct sk_buff *skb = *skb_orig;
4878 fr_channel_t *chan=dev->priv; 4875 fr_channel_t *chan=dev->priv;
4879 4876
4880 if (op_mode == WANPIPE){ 4877 if (op_mode == WANPIPE) {
4881
4882 chan->fr_header[0]=Q922_UI; 4878 chan->fr_header[0]=Q922_UI;
4883 4879
4884 switch (htons(skb->protocol)){ 4880 switch (htons(skb->protocol)){
4885
4886 case ETH_P_IP: 4881 case ETH_P_IP:
4887 chan->fr_header[1]=NLPID_IP; 4882 chan->fr_header[1]=NLPID_IP;
4888 break; 4883 break;
@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4894 } 4889 }
4895 4890
4896 /* If we are in bridging mode, we must apply 4891 /* If we are in bridging mode, we must apply
4897 * an Ethernet header */ 4892 * an Ethernet header
4898 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ 4893 */
4899 4894 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
4900
4901 /* Encapsulate the packet as a bridged Ethernet frame. */ 4895 /* Encapsulate the packet as a bridged Ethernet frame. */
4902#ifdef DEBUG 4896#ifdef DEBUG
4903 printk(KERN_INFO "%s: encapsulating skb for frame relay\n", 4897 printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
4904 dev->name); 4898 dev->name);
4905#endif 4899#endif
4906
4907 chan->fr_header[0] = 0x03; 4900 chan->fr_header[0] = 0x03;
4908 chan->fr_header[1] = 0x00; 4901 chan->fr_header[1] = 0x00;
4909 chan->fr_header[2] = 0x80; 4902 chan->fr_header[2] = 0x80;
@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4916 /* Yuck. */ 4909 /* Yuck. */
4917 skb->protocol = ETH_P_802_3; 4910 skb->protocol = ETH_P_802_3;
4918 return 8; 4911 return 8;
4919
4920 } 4912 }
4921 4913
4922 return 0; 4914 return 0;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 84b65c60c799..b56a7b516d24 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -1440,6 +1440,7 @@ static void sppp_print_bytes (u_char *p, u16 len)
1440 * @skb: The buffer to process 1440 * @skb: The buffer to process
1441 * @dev: The device it arrived on 1441 * @dev: The device it arrived on
1442 * @p: Unused 1442 * @p: Unused
1443 * @orig_dev: Unused
1443 * 1444 *
1444 * Protocol glue. This drives the deferred processing mode the poorer 1445 * Protocol glue. This drives the deferred processing mode the poorer
1445 * cards use. This can be called directly by cards that do not have 1446 * cards use. This can be called directly by cards that do not have
@@ -1447,7 +1448,7 @@ static void sppp_print_bytes (u_char *p, u16 len)
1447 * after interrupt servicing to process frames queued via netif_rx. 1448 * after interrupt servicing to process frames queued via netif_rx.
1448 */ 1449 */
1449 1450
1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) 1451static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1451{ 1452{
1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1453 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1453 return NET_RX_DROP; 1454 return NET_RX_DROP;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index ec3f75a030d2..00a07f32a81e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -137,6 +137,110 @@ config PCMCIA_RAYCS
137comment "Wireless 802.11b ISA/PCI cards support" 137comment "Wireless 802.11b ISA/PCI cards support"
138 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) 138 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
139 139
140config IPW2100
141 tristate "Intel PRO/Wireless 2100 Network Connection"
142 depends on NET_RADIO && PCI && IEEE80211
143 select FW_LOADER
144 ---help---
145 A driver for the Intel PRO/Wireless 2100 Network
146 Connection 802.11b wireless network adapter.
147
148 See <file:Documentation/networking/README.ipw2100> for information on
149 the capabilities currently enabled in this driver and for tips
150 for debugging issues and problems.
151
152 In order to use this driver, you will need a firmware image for it.
153 You can obtain the firmware from
154 <http://ipw2100.sf.net/>. Once you have the firmware image, you
155 will need to place it in /etc/firmware.
156
157 You will also very likely need the Wireless Tools in order to
158 configure your card:
159
160 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
161
162 If you want to compile the driver as a module ( = code which can be
163 inserted in and remvoed from the running kernel whenever you want),
164 say M here and read <file:Documentation/modules.txt>. The module
165 will be called ipw2100.ko.
166
167config IPW2100_MONITOR
168 bool "Enable promiscuous mode"
169 depends on IPW2100
170 ---help---
171 Enables promiscuous/monitor mode support for the ipw2100 driver.
172 With this feature compiled into the driver, you can switch to
173 promiscuous mode via the Wireless Tool's Monitor mode. While in this
174 mode, no packets can be sent.
175
176config IPW_DEBUG
177 bool "Enable full debugging output in IPW2100 module."
178 depends on IPW2100
179 ---help---
180 This option will enable debug tracing output for the IPW2100.
181
182 This will result in the kernel module being ~60k larger. You can
183 control which debug output is sent to the kernel log by setting the
184 value in
185
186 /sys/bus/pci/drivers/ipw2100/debug_level
187
188 This entry will only exist if this option is enabled.
189
190 If you are not trying to debug or develop the IPW2100 driver, you
191 most likely want to say N here.
192
193config IPW2200
194 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
195 depends on IEEE80211 && PCI
196 select FW_LOADER
197 ---help---
198 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
199 Connection adapters.
200
201 See <file:Documentation/networking/README.ipw2200> for
202 information on the capabilities currently enabled in this
203 driver and for tips for debugging issues and problems.
204
205 In order to use this driver, you will need a firmware image for it.
206 You can obtain the firmware from
207 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200
208 for information on where to install the firmare images.
209
210 You will also very likely need the Wireless Tools in order to
211 configure your card:
212
213 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
214
215 If you want to compile the driver as a module ( = code which can be
216 inserted in and remvoed from the running kernel whenever you want),
217 say M here and read <file:Documentation/modules.txt>. The module
218 will be called ipw2200.ko.
219
220config IPW_DEBUG
221 bool "Enable full debugging output in IPW2200 module."
222 depends on IPW2200
223 ---help---
224 This option will enable debug tracing output for the IPW2200.
225
226 This will result in the kernel module being ~100k larger. You can
227 control which debug output is sent to the kernel log by setting the
228 value in
229
230 /sys/bus/pci/drivers/ipw2200/debug_level
231
232 This entry will only exist if this option is enabled.
233
234 To set a value, simply echo an 8-byte hex value to the same file:
235
236 % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
237
238 You can find the list of debug mask values in
239 drivers/net/wireless/ipw2200.h
240
241 If you are not trying to debug or develop the IPW2200 driver, you
242 most likely want to say N here.
243
140config AIRO 244config AIRO
141 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 245 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
142 depends on NET_RADIO && ISA && (PCI || BROKEN) 246 depends on NET_RADIO && ISA && (PCI || BROKEN)
@@ -185,8 +289,8 @@ config APPLE_AIRPORT
185 a non-standard interface 289 a non-standard interface
186 290
187config PLX_HERMES 291config PLX_HERMES
188 tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.) (EXPERIMENTAL)" 292 tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.)"
189 depends on PCI && HERMES && EXPERIMENTAL 293 depends on PCI && HERMES
190 help 294 help
191 Enable support for PCMCIA cards supported by the "Hermes" (aka 295 Enable support for PCMCIA cards supported by the "Hermes" (aka
192 orinoco) driver when used in PLX9052 based PCI adaptors. These 296 orinoco) driver when used in PLX9052 based PCI adaptors. These
@@ -195,12 +299,9 @@ config PLX_HERMES
195 802.11b PCMCIA cards can be used in desktop machines. The Netgear 299 802.11b PCMCIA cards can be used in desktop machines. The Netgear
196 MA301 is such an adaptor. 300 MA301 is such an adaptor.
197 301
198 Support for these adaptors is so far still incomplete and buggy.
199 You have been warned.
200
201config TMD_HERMES 302config TMD_HERMES
202 tristate "Hermes in TMD7160 based PCI adaptor support (EXPERIMENTAL)" 303 tristate "Hermes in TMD7160 based PCI adaptor support"
203 depends on PCI && HERMES && EXPERIMENTAL 304 depends on PCI && HERMES
204 help 305 help
205 Enable support for PCMCIA cards supported by the "Hermes" (aka 306 Enable support for PCMCIA cards supported by the "Hermes" (aka
206 orinoco) driver when used in TMD7160 based PCI adaptors. These 307 orinoco) driver when used in TMD7160 based PCI adaptors. These
@@ -208,12 +309,18 @@ config TMD_HERMES
208 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that 309 PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
209 802.11b PCMCIA cards can be used in desktop machines. 310 802.11b PCMCIA cards can be used in desktop machines.
210 311
211 Support for these adaptors is so far still incomplete and buggy. 312config NORTEL_HERMES
212 You have been warned. 313 tristate "Nortel emobility PCI adaptor support"
314 depends on PCI && HERMES
315 help
316 Enable support for PCMCIA cards supported by the "Hermes" (aka
317 orinoco) driver when used in Nortel emobility PCI adaptors. These
318 adaptors are not full PCMCIA controllers, but act as a more limited
319 PCI <-> PCMCIA bridge.
213 320
214config PCI_HERMES 321config PCI_HERMES
215 tristate "Prism 2.5 PCI 802.11b adaptor support (EXPERIMENTAL)" 322 tristate "Prism 2.5 PCI 802.11b adaptor support"
216 depends on PCI && HERMES && EXPERIMENTAL 323 depends on PCI && HERMES
217 help 324 help
218 Enable support for PCI and mini-PCI 802.11b wireless NICs based on 325 Enable support for PCI and mini-PCI 802.11b wireless NICs based on
219 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b 326 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
@@ -268,6 +375,19 @@ config PCMCIA_HERMES
268 configure your card and that /etc/pcmcia/wireless.opts works: 375 configure your card and that /etc/pcmcia/wireless.opts works:
269 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 376 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
270 377
378config PCMCIA_SPECTRUM
379 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
380 depends on NET_RADIO && PCMCIA && HERMES
381 ---help---
382
383 This is a driver for 802.11b cards using RAM-loadable Symbol
384 firmware, such as Symbol Wireless Networker LA4100, CompactFlash
385 cards by Socket Communications and Intel PRO/Wireless 2011B.
386
387 This driver requires firmware download on startup. Utilities
388 for downloading Symbol firmware are available at
389 <http://sourceforge.net/projects/orinoco/>
390
271config AIRO_CS 391config AIRO_CS
272 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 392 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
273 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 393 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
@@ -355,6 +475,8 @@ config PRISM54
355 say M here and read <file:Documentation/modules.txt>. The module 475 say M here and read <file:Documentation/modules.txt>. The module
356 will be called prism54.ko. 476 will be called prism54.ko.
357 477
478source "drivers/net/wireless/hostap/Kconfig"
479
358# yes, this works even when no drivers are selected 480# yes, this works even when no drivers are selected
359config NET_WIRELESS 481config NET_WIRELESS
360 bool 482 bool
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 2b87841322cc..3a6f7ba326ca 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -2,6 +2,10 @@
2# Makefile for the Linux Wireless network device drivers. 2# Makefile for the Linux Wireless network device drivers.
3# 3#
4 4
5obj-$(CONFIG_IPW2100) += ipw2100.o
6
7obj-$(CONFIG_IPW2200) += ipw2200.o
8
5obj-$(CONFIG_STRIP) += strip.o 9obj-$(CONFIG_STRIP) += strip.o
6obj-$(CONFIG_ARLAN) += arlan.o 10obj-$(CONFIG_ARLAN) += arlan.o
7 11
@@ -18,6 +22,8 @@ obj-$(CONFIG_APPLE_AIRPORT) += airport.o
18obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o 22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
19obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o 23obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
20obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o 24obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
25obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
26obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
21 27
22obj-$(CONFIG_AIRO) += airo.o 28obj-$(CONFIG_AIRO) += airo.o
23obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o 29obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
@@ -28,6 +34,8 @@ obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
28 34
29obj-$(CONFIG_PRISM54) += prism54/ 35obj-$(CONFIG_PRISM54) += prism54/
30 36
37obj-$(CONFIG_HOSTAP) += hostap/
38
31# 16-bit wireless PCMCIA client drivers 39# 16-bit wireless PCMCIA client drivers
32obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 40obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
33obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o 41obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index df20adcd0730..2be65d308fbe 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1040,7 +1040,7 @@ typedef struct {
1040 u16 status; 1040 u16 status;
1041} WifiCtlHdr; 1041} WifiCtlHdr;
1042 1042
1043WifiCtlHdr wifictlhdr8023 = { 1043static WifiCtlHdr wifictlhdr8023 = {
1044 .ctlhdr = { 1044 .ctlhdr = {
1045 .ctl = HOST_DONT_RLSE, 1045 .ctl = HOST_DONT_RLSE,
1046 } 1046 }
@@ -1111,13 +1111,13 @@ static int airo_thread(void *data);
1111static void timer_func( struct net_device *dev ); 1111static void timer_func( struct net_device *dev );
1112static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 1112static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1113#ifdef WIRELESS_EXT 1113#ifdef WIRELESS_EXT
1114struct iw_statistics *airo_get_wireless_stats (struct net_device *dev); 1114static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
1115static void airo_read_wireless_stats (struct airo_info *local); 1115static void airo_read_wireless_stats (struct airo_info *local);
1116#endif /* WIRELESS_EXT */ 1116#endif /* WIRELESS_EXT */
1117#ifdef CISCO_EXT 1117#ifdef CISCO_EXT
1118static int readrids(struct net_device *dev, aironet_ioctl *comp); 1118static int readrids(struct net_device *dev, aironet_ioctl *comp);
1119static int writerids(struct net_device *dev, aironet_ioctl *comp); 1119static int writerids(struct net_device *dev, aironet_ioctl *comp);
1120int flashcard(struct net_device *dev, aironet_ioctl *comp); 1120static int flashcard(struct net_device *dev, aironet_ioctl *comp);
1121#endif /* CISCO_EXT */ 1121#endif /* CISCO_EXT */
1122#ifdef MICSUPPORT 1122#ifdef MICSUPPORT
1123static void micinit(struct airo_info *ai); 1123static void micinit(struct airo_info *ai);
@@ -1226,6 +1226,12 @@ static int setup_proc_entry( struct net_device *dev,
1226static int takedown_proc_entry( struct net_device *dev, 1226static int takedown_proc_entry( struct net_device *dev,
1227 struct airo_info *apriv ); 1227 struct airo_info *apriv );
1228 1228
1229static int cmdreset(struct airo_info *ai);
1230static int setflashmode (struct airo_info *ai);
1231static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
1232static int flashputbuf(struct airo_info *ai);
1233static int flashrestart(struct airo_info *ai,struct net_device *dev);
1234
1229#ifdef MICSUPPORT 1235#ifdef MICSUPPORT
1230/*********************************************************************** 1236/***********************************************************************
1231 * MIC ROUTINES * 1237 * MIC ROUTINES *
@@ -1234,10 +1240,11 @@ static int takedown_proc_entry( struct net_device *dev,
1234 1240
1235static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); 1241static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
1236static void MoveWindow(miccntx *context, u32 micSeq); 1242static void MoveWindow(miccntx *context, u32 micSeq);
1237void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); 1243static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
1238void emmh32_init(emmh32_context *context); 1244static void emmh32_init(emmh32_context *context);
1239void emmh32_update(emmh32_context *context, u8 *pOctets, int len); 1245static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
1240void emmh32_final(emmh32_context *context, u8 digest[4]); 1246static void emmh32_final(emmh32_context *context, u8 digest[4]);
1247static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
1241 1248
1242/* micinit - Initialize mic seed */ 1249/* micinit - Initialize mic seed */
1243 1250
@@ -1301,7 +1308,7 @@ static int micsetup(struct airo_info *ai) {
1301 int i; 1308 int i;
1302 1309
1303 if (ai->tfm == NULL) 1310 if (ai->tfm == NULL)
1304 ai->tfm = crypto_alloc_tfm("aes", 0); 1311 ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP);
1305 1312
1306 if (ai->tfm == NULL) { 1313 if (ai->tfm == NULL) {
1307 printk(KERN_ERR "airo: failed to load transform for AES\n"); 1314 printk(KERN_ERR "airo: failed to load transform for AES\n");
@@ -1315,7 +1322,7 @@ static int micsetup(struct airo_info *ai) {
1315 return SUCCESS; 1322 return SUCCESS;
1316} 1323}
1317 1324
1318char micsnap[]= {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02}; 1325static char micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
1319 1326
1320/*=========================================================================== 1327/*===========================================================================
1321 * Description: Mic a packet 1328 * Description: Mic a packet
@@ -1570,7 +1577,7 @@ static void MoveWindow(miccntx *context, u32 micSeq)
1570static unsigned char aes_counter[16]; 1577static unsigned char aes_counter[16];
1571 1578
1572/* expand the key to fill the MMH coefficient array */ 1579/* expand the key to fill the MMH coefficient array */
1573void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) 1580static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
1574{ 1581{
1575 /* take the keying material, expand if necessary, truncate at 16-bytes */ 1582 /* take the keying material, expand if necessary, truncate at 16-bytes */
1576 /* run through AES counter mode to generate context->coeff[] */ 1583 /* run through AES counter mode to generate context->coeff[] */
@@ -1602,7 +1609,7 @@ void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto
1602} 1609}
1603 1610
1604/* prepare for calculation of a new mic */ 1611/* prepare for calculation of a new mic */
1605void emmh32_init(emmh32_context *context) 1612static void emmh32_init(emmh32_context *context)
1606{ 1613{
1607 /* prepare for new mic calculation */ 1614 /* prepare for new mic calculation */
1608 context->accum = 0; 1615 context->accum = 0;
@@ -1610,7 +1617,7 @@ void emmh32_init(emmh32_context *context)
1610} 1617}
1611 1618
1612/* add some bytes to the mic calculation */ 1619/* add some bytes to the mic calculation */
1613void emmh32_update(emmh32_context *context, u8 *pOctets, int len) 1620static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
1614{ 1621{
1615 int coeff_position, byte_position; 1622 int coeff_position, byte_position;
1616 1623
@@ -1652,7 +1659,7 @@ void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
1652static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L }; 1659static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L };
1653 1660
1654/* calculate the mic */ 1661/* calculate the mic */
1655void emmh32_final(emmh32_context *context, u8 digest[4]) 1662static void emmh32_final(emmh32_context *context, u8 digest[4])
1656{ 1663{
1657 int coeff_position, byte_position; 1664 int coeff_position, byte_position;
1658 u32 val; 1665 u32 val;
@@ -2232,7 +2239,7 @@ static void airo_read_stats(struct airo_info *ai) {
2232 u32 *vals = stats_rid.vals; 2239 u32 *vals = stats_rid.vals;
2233 2240
2234 clear_bit(JOB_STATS, &ai->flags); 2241 clear_bit(JOB_STATS, &ai->flags);
2235 if (ai->power) { 2242 if (ai->power.event) {
2236 up(&ai->sem); 2243 up(&ai->sem);
2237 return; 2244 return;
2238 } 2245 }
@@ -2255,7 +2262,7 @@ static void airo_read_stats(struct airo_info *ai) {
2255 ai->stats.rx_fifo_errors = vals[0]; 2262 ai->stats.rx_fifo_errors = vals[0];
2256} 2263}
2257 2264
2258struct net_device_stats *airo_get_stats(struct net_device *dev) 2265static struct net_device_stats *airo_get_stats(struct net_device *dev)
2259{ 2266{
2260 struct airo_info *local = dev->priv; 2267 struct airo_info *local = dev->priv;
2261 2268
@@ -2403,8 +2410,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
2403 } 2410 }
2404 } 2411 }
2405#ifdef MICSUPPORT 2412#ifdef MICSUPPORT
2406 if (ai->tfm) 2413 crypto_free_tfm(ai->tfm);
2407 crypto_free_tfm(ai->tfm);
2408#endif 2414#endif
2409 del_airo_dev( dev ); 2415 del_airo_dev( dev );
2410 free_netdev( dev ); 2416 free_netdev( dev );
@@ -2414,7 +2420,7 @@ EXPORT_SYMBOL(stop_airo_card);
2414 2420
2415static int add_airo_dev( struct net_device *dev ); 2421static int add_airo_dev( struct net_device *dev );
2416 2422
2417int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) 2423static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
2418{ 2424{
2419 memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); 2425 memcpy(haddr, skb->mac.raw + 10, ETH_ALEN);
2420 return ETH_ALEN; 2426 return ETH_ALEN;
@@ -2681,7 +2687,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2681 return dev; 2687 return dev;
2682} 2688}
2683 2689
2684int reset_card( struct net_device *dev , int lock) { 2690static int reset_card( struct net_device *dev , int lock) {
2685 struct airo_info *ai = dev->priv; 2691 struct airo_info *ai = dev->priv;
2686 2692
2687 if (lock && down_interruptible(&ai->sem)) 2693 if (lock && down_interruptible(&ai->sem))
@@ -2696,9 +2702,9 @@ int reset_card( struct net_device *dev , int lock) {
2696 return 0; 2702 return 0;
2697} 2703}
2698 2704
2699struct net_device *_init_airo_card( unsigned short irq, int port, 2705static struct net_device *_init_airo_card( unsigned short irq, int port,
2700 int is_pcmcia, struct pci_dev *pci, 2706 int is_pcmcia, struct pci_dev *pci,
2701 struct device *dmdev ) 2707 struct device *dmdev )
2702{ 2708{
2703 struct net_device *dev; 2709 struct net_device *dev;
2704 struct airo_info *ai; 2710 struct airo_info *ai;
@@ -2962,7 +2968,7 @@ static int airo_thread(void *data) {
2962 break; 2968 break;
2963 } 2969 }
2964 2970
2965 if (ai->power || test_bit(FLAG_FLASHING, &ai->flags)) { 2971 if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) {
2966 up(&ai->sem); 2972 up(&ai->sem);
2967 continue; 2973 continue;
2968 } 2974 }
@@ -3252,7 +3258,7 @@ badrx:
3252 wstats.noise = apriv->wstats.qual.noise; 3258 wstats.noise = apriv->wstats.qual.noise;
3253 wstats.updated = IW_QUAL_LEVEL_UPDATED 3259 wstats.updated = IW_QUAL_LEVEL_UPDATED
3254 | IW_QUAL_QUAL_UPDATED 3260 | IW_QUAL_QUAL_UPDATED
3255 | IW_QUAL_NOISE_UPDATED; 3261 | IW_QUAL_DBM;
3256 /* Update spy records */ 3262 /* Update spy records */
3257 wireless_spy_update(dev, sa, &wstats); 3263 wireless_spy_update(dev, sa, &wstats);
3258 } 3264 }
@@ -3598,7 +3604,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3598 wstats.noise = ai->wstats.qual.noise; 3604 wstats.noise = ai->wstats.qual.noise;
3599 wstats.updated = IW_QUAL_QUAL_UPDATED 3605 wstats.updated = IW_QUAL_QUAL_UPDATED
3600 | IW_QUAL_LEVEL_UPDATED 3606 | IW_QUAL_LEVEL_UPDATED
3601 | IW_QUAL_NOISE_UPDATED; 3607 | IW_QUAL_DBM;
3602 /* Update spy records */ 3608 /* Update spy records */
3603 wireless_spy_update(ai->dev, sa, &wstats); 3609 wireless_spy_update(ai->dev, sa, &wstats);
3604 } 3610 }
@@ -5514,7 +5520,7 @@ static int airo_pci_resume(struct pci_dev *pdev)
5514 pci_restore_state(pdev); 5520 pci_restore_state(pdev);
5515 pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0); 5521 pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0);
5516 5522
5517 if (ai->power > 1) { 5523 if (ai->power.event > 1) {
5518 reset_card(dev, 0); 5524 reset_card(dev, 0);
5519 mpi_init_descriptors(ai); 5525 mpi_init_descriptors(ai);
5520 setup_card(ai, dev->dev_addr, 0); 5526 setup_card(ai, dev->dev_addr, 0);
@@ -6483,22 +6489,20 @@ static int airo_get_range(struct net_device *dev,
6483 range->max_qual.qual = 100; /* % */ 6489 range->max_qual.qual = 100; /* % */
6484 else 6490 else
6485 range->max_qual.qual = airo_get_max_quality(&cap_rid); 6491 range->max_qual.qual = airo_get_max_quality(&cap_rid);
6486 range->max_qual.level = 0; /* 0 means we use dBm */ 6492 range->max_qual.level = 0x100 - 120; /* -120 dBm */
6487 range->max_qual.noise = 0; 6493 range->max_qual.noise = 0x100 - 120; /* -120 dBm */
6488 range->max_qual.updated = 0;
6489 6494
6490 /* Experimental measurements - boundary 11/5.5 Mb/s */ 6495 /* Experimental measurements - boundary 11/5.5 Mb/s */
6491 /* Note : with or without the (local->rssi), results 6496 /* Note : with or without the (local->rssi), results
6492 * are somewhat different. - Jean II */ 6497 * are somewhat different. - Jean II */
6493 if (local->rssi) { 6498 if (local->rssi) {
6494 range->avg_qual.qual = 50; /* % */ 6499 range->avg_qual.qual = 50; /* % */
6495 range->avg_qual.level = 186; /* -70 dBm */ 6500 range->avg_qual.level = 0x100 - 70; /* -70 dBm */
6496 } else { 6501 } else {
6497 range->avg_qual.qual = airo_get_avg_quality(&cap_rid); 6502 range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
6498 range->avg_qual.level = 176; /* -80 dBm */ 6503 range->avg_qual.level = 0x100 - 80; /* -80 dBm */
6499 } 6504 }
6500 range->avg_qual.noise = 0; 6505 range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
6501 range->avg_qual.updated = 0;
6502 6506
6503 for(i = 0 ; i < 8 ; i++) { 6507 for(i = 0 ; i < 8 ; i++) {
6504 range->bitrate[i] = cap_rid.supportedRates[i] * 500000; 6508 range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
@@ -6721,15 +6725,17 @@ static int airo_get_aplist(struct net_device *dev,
6721 if (local->rssi) { 6725 if (local->rssi) {
6722 qual[i].level = 0x100 - BSSList.dBm; 6726 qual[i].level = 0x100 - BSSList.dBm;
6723 qual[i].qual = airo_dbm_to_pct( local->rssi, BSSList.dBm ); 6727 qual[i].qual = airo_dbm_to_pct( local->rssi, BSSList.dBm );
6724 qual[i].updated = IW_QUAL_QUAL_UPDATED; 6728 qual[i].updated = IW_QUAL_QUAL_UPDATED
6729 | IW_QUAL_LEVEL_UPDATED
6730 | IW_QUAL_DBM;
6725 } else { 6731 } else {
6726 qual[i].level = (BSSList.dBm + 321) / 2; 6732 qual[i].level = (BSSList.dBm + 321) / 2;
6727 qual[i].qual = 0; 6733 qual[i].qual = 0;
6728 qual[i].updated = IW_QUAL_QUAL_INVALID; 6734 qual[i].updated = IW_QUAL_QUAL_INVALID
6735 | IW_QUAL_LEVEL_UPDATED
6736 | IW_QUAL_DBM;
6729 } 6737 }
6730 qual[i].noise = local->wstats.qual.noise; 6738 qual[i].noise = local->wstats.qual.noise;
6731 qual[i].updated = IW_QUAL_LEVEL_UPDATED
6732 | IW_QUAL_NOISE_UPDATED;
6733 if (BSSList.index == 0xffff) 6739 if (BSSList.index == 0xffff)
6734 break; 6740 break;
6735 } 6741 }
@@ -6855,15 +6861,17 @@ static inline char *airo_translate_scan(struct net_device *dev,
6855 if (ai->rssi) { 6861 if (ai->rssi) {
6856 iwe.u.qual.level = 0x100 - bss->dBm; 6862 iwe.u.qual.level = 0x100 - bss->dBm;
6857 iwe.u.qual.qual = airo_dbm_to_pct( ai->rssi, bss->dBm ); 6863 iwe.u.qual.qual = airo_dbm_to_pct( ai->rssi, bss->dBm );
6858 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED; 6864 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
6865 | IW_QUAL_LEVEL_UPDATED
6866 | IW_QUAL_DBM;
6859 } else { 6867 } else {
6860 iwe.u.qual.level = (bss->dBm + 321) / 2; 6868 iwe.u.qual.level = (bss->dBm + 321) / 2;
6861 iwe.u.qual.qual = 0; 6869 iwe.u.qual.qual = 0;
6862 iwe.u.qual.updated = IW_QUAL_QUAL_INVALID; 6870 iwe.u.qual.updated = IW_QUAL_QUAL_INVALID
6871 | IW_QUAL_LEVEL_UPDATED
6872 | IW_QUAL_DBM;
6863 } 6873 }
6864 iwe.u.qual.noise = ai->wstats.qual.noise; 6874 iwe.u.qual.noise = ai->wstats.qual.noise;
6865 iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
6866 | IW_QUAL_NOISE_UPDATED;
6867 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); 6875 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
6868 6876
6869 /* Add encryption capability */ 6877 /* Add encryption capability */
@@ -7116,7 +7124,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
7116 int rc = 0; 7124 int rc = 0;
7117 struct airo_info *ai = (struct airo_info *)dev->priv; 7125 struct airo_info *ai = (struct airo_info *)dev->priv;
7118 7126
7119 if (ai->power) 7127 if (ai->power.event)
7120 return 0; 7128 return 0;
7121 7129
7122 switch (cmd) { 7130 switch (cmd) {
@@ -7195,7 +7203,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
7195 7203
7196 /* Get stats out of the card */ 7204 /* Get stats out of the card */
7197 clear_bit(JOB_WSTATS, &local->flags); 7205 clear_bit(JOB_WSTATS, &local->flags);
7198 if (local->power) { 7206 if (local->power.event) {
7199 up(&local->sem); 7207 up(&local->sem);
7200 return; 7208 return;
7201 } 7209 }
@@ -7216,13 +7224,12 @@ static void airo_read_wireless_stats(struct airo_info *local)
7216 local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2; 7224 local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2;
7217 local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid); 7225 local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
7218 } 7226 }
7219 local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED;
7220 if (status_rid.len >= 124) { 7227 if (status_rid.len >= 124) {
7221 local->wstats.qual.noise = 0x100 - status_rid.noisedBm; 7228 local->wstats.qual.noise = 0x100 - status_rid.noisedBm;
7222 local->wstats.qual.updated |= IW_QUAL_NOISE_UPDATED; 7229 local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
7223 } else { 7230 } else {
7224 local->wstats.qual.noise = 0; 7231 local->wstats.qual.noise = 0;
7225 local->wstats.qual.updated |= IW_QUAL_NOISE_INVALID; 7232 local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM;
7226 } 7233 }
7227 7234
7228 /* Packets discarded in the wireless adapter due to wireless 7235 /* Packets discarded in the wireless adapter due to wireless
@@ -7235,7 +7242,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
7235 local->wstats.miss.beacon = vals[34]; 7242 local->wstats.miss.beacon = vals[34];
7236} 7243}
7237 7244
7238struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) 7245static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
7239{ 7246{
7240 struct airo_info *local = dev->priv; 7247 struct airo_info *local = dev->priv;
7241 7248
@@ -7450,14 +7457,8 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
7450 * Flash command switch table 7457 * Flash command switch table
7451 */ 7458 */
7452 7459
7453int flashcard(struct net_device *dev, aironet_ioctl *comp) { 7460static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
7454 int z; 7461 int z;
7455 int cmdreset(struct airo_info *);
7456 int setflashmode(struct airo_info *);
7457 int flashgchar(struct airo_info *,int,int);
7458 int flashpchar(struct airo_info *,int,int);
7459 int flashputbuf(struct airo_info *);
7460 int flashrestart(struct airo_info *,struct net_device *);
7461 7462
7462 /* Only super-user can modify flash */ 7463 /* Only super-user can modify flash */
7463 if (!capable(CAP_NET_ADMIN)) 7464 if (!capable(CAP_NET_ADMIN))
@@ -7515,7 +7516,7 @@ int flashcard(struct net_device *dev, aironet_ioctl *comp) {
7515 * card. 7516 * card.
7516 */ 7517 */
7517 7518
7518int cmdreset(struct airo_info *ai) { 7519static int cmdreset(struct airo_info *ai) {
7519 disable_MAC(ai, 1); 7520 disable_MAC(ai, 1);
7520 7521
7521 if(!waitbusy (ai)){ 7522 if(!waitbusy (ai)){
@@ -7539,7 +7540,7 @@ int cmdreset(struct airo_info *ai) {
7539 * mode 7540 * mode
7540 */ 7541 */
7541 7542
7542int setflashmode (struct airo_info *ai) { 7543static int setflashmode (struct airo_info *ai) {
7543 set_bit (FLAG_FLASHING, &ai->flags); 7544 set_bit (FLAG_FLASHING, &ai->flags);
7544 7545
7545 OUT4500(ai, SWS0, FLASH_COMMAND); 7546 OUT4500(ai, SWS0, FLASH_COMMAND);
@@ -7566,7 +7567,7 @@ int setflashmode (struct airo_info *ai) {
7566 * x 50us for echo . 7567 * x 50us for echo .
7567 */ 7568 */
7568 7569
7569int flashpchar(struct airo_info *ai,int byte,int dwelltime) { 7570static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
7570 int echo; 7571 int echo;
7571 int waittime; 7572 int waittime;
7572 7573
@@ -7606,7 +7607,7 @@ int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
7606 * Get a character from the card matching matchbyte 7607 * Get a character from the card matching matchbyte
7607 * Step 3) 7608 * Step 3)
7608 */ 7609 */
7609int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){ 7610static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
7610 int rchar; 7611 int rchar;
7611 unsigned char rbyte=0; 7612 unsigned char rbyte=0;
7612 7613
@@ -7637,7 +7638,7 @@ int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
7637 * send to the card 7638 * send to the card
7638 */ 7639 */
7639 7640
7640int flashputbuf(struct airo_info *ai){ 7641static int flashputbuf(struct airo_info *ai){
7641 int nwords; 7642 int nwords;
7642 7643
7643 /* Write stuff */ 7644 /* Write stuff */
@@ -7659,7 +7660,7 @@ int flashputbuf(struct airo_info *ai){
7659/* 7660/*
7660 * 7661 *
7661 */ 7662 */
7662int flashrestart(struct airo_info *ai,struct net_device *dev){ 7663static int flashrestart(struct airo_info *ai,struct net_device *dev){
7663 int i,status; 7664 int i,status;
7664 7665
7665 ssleep(1); /* Added 12/7/00 */ 7666 ssleep(1); /* Added 12/7/00 */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 18a7d38d2a13..587869d86eee 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -68,7 +68,7 @@
68#include <linux/device.h> 68#include <linux/device.h>
69#include <linux/moduleparam.h> 69#include <linux/moduleparam.h>
70#include <linux/firmware.h> 70#include <linux/firmware.h>
71#include "ieee802_11.h" 71#include <net/ieee80211.h>
72#include "atmel.h" 72#include "atmel.h"
73 73
74#define DRIVER_MAJOR 0 74#define DRIVER_MAJOR 0
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv);
618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); 618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
619static void atmel_command_irq(struct atmel_private *priv); 619static void atmel_command_irq(struct atmel_private *priv);
620static int atmel_validate_channel(struct atmel_private *priv, int channel); 620static int atmel_validate_channel(struct atmel_private *priv, int channel);
621static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
622 u16 frame_len, u8 rssi); 622 u16 frame_len, u8 rssi);
623static void atmel_management_timer(u_long a); 623static void atmel_management_timer(u_long a);
624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); 624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); 625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
627 u8 *body, int body_len); 627 u8 *body, int body_len);
628 628
629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
827static int start_tx (struct sk_buff *skb, struct net_device *dev) 827static int start_tx (struct sk_buff *skb, struct net_device *dev)
828{ 828{
829 struct atmel_private *priv = netdev_priv(dev); 829 struct atmel_private *priv = netdev_priv(dev);
830 struct ieee802_11_hdr header; 830 struct ieee80211_hdr header;
831 unsigned long flags; 831 unsigned long flags;
832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; 833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -863,17 +863,17 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
863 return 1; 863 return 1;
864 } 864 }
865 865
866 frame_ctl = IEEE802_11_FTYPE_DATA; 866 frame_ctl = IEEE80211_FTYPE_DATA;
867 header.duration_id = 0; 867 header.duration_id = 0;
868 header.seq_ctl = 0; 868 header.seq_ctl = 0;
869 if (priv->wep_is_on) 869 if (priv->wep_is_on)
870 frame_ctl |= IEEE802_11_FCTL_WEP; 870 frame_ctl |= IEEE80211_FCTL_PROTECTED;
871 if (priv->operating_mode == IW_MODE_ADHOC) { 871 if (priv->operating_mode == IW_MODE_ADHOC) {
872 memcpy(&header.addr1, skb->data, 6); 872 memcpy(&header.addr1, skb->data, 6);
873 memcpy(&header.addr2, dev->dev_addr, 6); 873 memcpy(&header.addr2, dev->dev_addr, 6);
874 memcpy(&header.addr3, priv->BSSID, 6); 874 memcpy(&header.addr3, priv->BSSID, 6);
875 } else { 875 } else {
876 frame_ctl |= IEEE802_11_FCTL_TODS; 876 frame_ctl |= IEEE80211_FCTL_TODS;
877 memcpy(&header.addr1, priv->CurrentBSSID, 6); 877 memcpy(&header.addr1, priv->CurrentBSSID, 6);
878 memcpy(&header.addr2, dev->dev_addr, 6); 878 memcpy(&header.addr2, dev->dev_addr, 6);
879 memcpy(&header.addr3, skb->data, 6); 879 memcpy(&header.addr3, skb->data, 6);
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
902} 902}
903 903
904static void atmel_transmit_management_frame(struct atmel_private *priv, 904static void atmel_transmit_management_frame(struct atmel_private *priv,
905 struct ieee802_11_hdr *header, 905 struct ieee80211_hdr *header,
906 u8 *body, int body_len) 906 u8 *body, int body_len)
907{ 907{
908 u16 buff; 908 u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); 917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
918} 918}
919 919
920static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
921 u16 msdu_size, u16 rx_packet_loc, u32 crc) 921 u16 msdu_size, u16 rx_packet_loc, u32 crc)
922{ 922{
923 /* fast path: unfragmented packet copy directly into skbuf */ 923 /* fast path: unfragmented packet copy directly into skbuf */
@@ -955,7 +955,7 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *head
955 } 955 }
956 956
957 memcpy(skbp, header->addr1, 6); /* destination address */ 957 memcpy(skbp, header->addr1, 6); /* destination address */
958 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 958 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
959 memcpy(&skbp[6], header->addr3, 6); 959 memcpy(&skbp[6], header->addr3, 6);
960 else 960 else
961 memcpy(&skbp[6], header->addr2, 6); /* source address */ 961 memcpy(&skbp[6], header->addr2, 6); /* source address */
@@ -990,14 +990,14 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
990 return (crc ^ 0xffffffff) == netcrc; 990 return (crc ^ 0xffffffff) == netcrc;
991} 991}
992 992
993static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) 994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
995{ 995{
996 u8 mac4[6]; 996 u8 mac4[6];
997 u8 source[6]; 997 u8 source[6];
998 struct sk_buff *skb; 998 struct sk_buff *skb;
999 999
1000 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 1000 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
1001 memcpy(source, header->addr3, 6); 1001 memcpy(source, header->addr3, 6);
1002 else 1002 else
1003 memcpy(source, header->addr2, 6); 1003 memcpy(source, header->addr2, 6);
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *head
1082static void rx_done_irq(struct atmel_private *priv) 1082static void rx_done_irq(struct atmel_private *priv)
1083{ 1083{
1084 int i; 1084 int i;
1085 struct ieee802_11_hdr header; 1085 struct ieee80211_hdr header;
1086 1086
1087 for (i = 0; 1087 for (i = 0;
1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -1117,7 +1117,7 @@ static void rx_done_irq(struct atmel_private *priv)
1117 /* probe for CRC use here if needed once five packets have arrived with 1117 /* probe for CRC use here if needed once five packets have arrived with
1118 the same crc status, we assume we know what's happening and stop probing */ 1118 the same crc status, we assume we know what's happening and stop probing */
1119 if (priv->probe_crc) { 1119 if (priv->probe_crc) {
1120 if (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP)) { 1120 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) {
1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size); 1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
1122 } else { 1122 } else {
1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24); 1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
@@ -1132,16 +1132,16 @@ static void rx_done_irq(struct atmel_private *priv)
1132 } 1132 }
1133 1133
1134 /* don't CRC header when WEP in use */ 1134 /* don't CRC header when WEP in use */
1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP))) { 1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) {
1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24); 1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
1137 } 1137 }
1138 msdu_size -= 24; /* header */ 1138 msdu_size -= 24; /* header */
1139 1139
1140 if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_DATA) { 1140 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
1141 1141
1142 int more_fragments = frame_ctl & IEEE802_11_FCTL_MOREFRAGS; 1142 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
1143 u8 packet_fragment_no = seq_control & IEEE802_11_SCTL_FRAG; 1143 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
1144 u16 packet_sequence_no = (seq_control & IEEE802_11_SCTL_SEQ) >> 4; 1144 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
1145 1145
1146 if (!more_fragments && packet_fragment_no == 0 ) { 1146 if (!more_fragments && packet_fragment_no == 0 ) {
1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc); 1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
@@ -1151,7 +1151,7 @@ static void rx_done_irq(struct atmel_private *priv)
1151 } 1151 }
1152 } 1152 }
1153 1153
1154 if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_MGMT) { 1154 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
1155 /* copy rest of packet into buffer */ 1155 /* copy rest of packet into buffer */
1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); 1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
1157 1157
@@ -1593,7 +1593,6 @@ struct net_device *init_atmel_card( unsigned short irq, int port, const AtmelFWT
1593 dev->set_mac_address = atmel_set_mac_address; 1593 dev->set_mac_address = atmel_set_mac_address;
1594 dev->hard_start_xmit = start_tx; 1594 dev->hard_start_xmit = start_tx;
1595 dev->get_stats = atmel_get_stats; 1595 dev->get_stats = atmel_get_stats;
1596 dev->get_wireless_stats = atmel_get_wireless_stats;
1597 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def; 1596 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def;
1598 dev->do_ioctl = atmel_ioctl; 1597 dev->do_ioctl = atmel_ioctl;
1599 dev->irq = irq; 1598 dev->irq = irq;
@@ -2411,7 +2410,8 @@ static const struct iw_handler_def atmel_handler_def =
2411 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args), 2410 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
2412 .standard = (iw_handler *) atmel_handler, 2411 .standard = (iw_handler *) atmel_handler,
2413 .private = (iw_handler *) atmel_private_handler, 2412 .private = (iw_handler *) atmel_private_handler,
2414 .private_args = (struct iw_priv_args *) atmel_private_args 2413 .private_args = (struct iw_priv_args *) atmel_private_args,
2414 .get_wireless_stats = atmel_get_wireless_stats
2415}; 2415};
2416 2416
2417static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2417static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2424,19 +2424,6 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2424 char domain[REGDOMAINSZ+1]; 2424 char domain[REGDOMAINSZ+1];
2425 2425
2426 switch (cmd) { 2426 switch (cmd) {
2427 case SIOCGIWPRIV:
2428 if(wrq->u.data.pointer) {
2429 /* Set the number of ioctl available */
2430 wrq->u.data.length = sizeof(atmel_private_args) / sizeof(atmel_private_args[0]);
2431
2432 /* Copy structure to the user buffer */
2433 if (copy_to_user(wrq->u.data.pointer,
2434 (u_char *) atmel_private_args,
2435 sizeof(atmel_private_args)))
2436 rc = -EFAULT;
2437 }
2438 break;
2439
2440 case ATMELIDIFC: 2427 case ATMELIDIFC:
2441 wrq->u.param.value = ATMELMAGIC; 2428 wrq->u.param.value = ATMELMAGIC;
2442 break; 2429 break;
@@ -2663,10 +2650,10 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
2663 2650
2664static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len) 2651static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
2665{ 2652{
2666 struct ieee802_11_hdr header; 2653 struct ieee80211_hdr header;
2667 struct auth_body auth; 2654 struct auth_body auth;
2668 2655
2669 header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | IEEE802_11_STYPE_AUTH); 2656 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2670 header.duration_id = cpu_to_le16(0x8000); 2657 header.duration_id = cpu_to_le16(0x8000);
2671 header.seq_ctl = 0; 2658 header.seq_ctl = 0;
2672 memcpy(header.addr1, priv->CurrentBSSID, 6); 2659 memcpy(header.addr1, priv->CurrentBSSID, 6);
@@ -2677,7 +2664,7 @@ static void send_authentication_request(struct atmel_private *priv, u8 *challeng
2677 auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY); 2664 auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY);
2678 /* no WEP for authentication frames with TrSeqNo 1 */ 2665 /* no WEP for authentication frames with TrSeqNo 1 */
2679 if (priv->CurrentAuthentTransactionSeqNum != 1) 2666 if (priv->CurrentAuthentTransactionSeqNum != 1)
2680 header.frame_ctl |= cpu_to_le16(IEEE802_11_FCTL_WEP); 2667 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2681 } else { 2668 } else {
2682 auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM); 2669 auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
2683 } 2670 }
@@ -2701,7 +2688,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2701{ 2688{
2702 u8 *ssid_el_p; 2689 u8 *ssid_el_p;
2703 int bodysize; 2690 int bodysize;
2704 struct ieee802_11_hdr header; 2691 struct ieee80211_hdr header;
2705 struct ass_req_format { 2692 struct ass_req_format {
2706 u16 capability; 2693 u16 capability;
2707 u16 listen_interval; 2694 u16 listen_interval;
@@ -2714,8 +2701,8 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2714 u8 rates[4]; 2701 u8 rates[4];
2715 } body; 2702 } body;
2716 2703
2717 header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | 2704 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2718 (is_reassoc ? IEEE802_11_STYPE_REASSOC_REQ : IEEE802_11_STYPE_ASSOC_REQ)); 2705 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
2719 header.duration_id = cpu_to_le16(0x8000); 2706 header.duration_id = cpu_to_le16(0x8000);
2720 header.seq_ctl = 0; 2707 header.seq_ctl = 0;
2721 2708
@@ -2751,9 +2738,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2751 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); 2738 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
2752} 2739}
2753 2740
2754static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee802_11_hdr *header) 2741static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header)
2755{ 2742{
2756 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 2743 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
2757 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2744 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
2758 else 2745 else
2759 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0; 2746 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
@@ -2801,7 +2788,7 @@ static int retrieve_bss(struct atmel_private *priv)
2801} 2788}
2802 2789
2803 2790
2804static void store_bss_info(struct atmel_private *priv, struct ieee802_11_hdr *header, 2791static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header,
2805 u16 capability, u16 beacon_period, u8 channel, u8 rssi, 2792 u16 capability, u16 beacon_period, u8 channel, u8 rssi,
2806 u8 ssid_len, u8 *ssid, int is_beacon) 2793 u8 ssid_len, u8 *ssid, int is_beacon)
2807{ 2794{
@@ -3085,12 +3072,12 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3085} 3072}
3086 3073
3087/* deals with incoming managment frames. */ 3074/* deals with incoming managment frames. */
3088static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 3075static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
3089 u16 frame_len, u8 rssi) 3076 u16 frame_len, u8 rssi)
3090{ 3077{
3091 u16 subtype; 3078 u16 subtype;
3092 3079
3093 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_STYPE) { 3080 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE) {
3094 case C80211_SUBTYPE_MGMT_BEACON : 3081 case C80211_SUBTYPE_MGMT_BEACON :
3095 case C80211_SUBTYPE_MGMT_ProbeResponse: 3082 case C80211_SUBTYPE_MGMT_ProbeResponse:
3096 3083
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
new file mode 100644
index 000000000000..56f41c714d38
--- /dev/null
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -0,0 +1,73 @@
1config HOSTAP
2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
3 depends on NET_RADIO
4 select IEEE80211
5 select IEEE80211_CRYPT_WEP
6 ---help---
7 Shared driver code for IEEE 802.11b wireless cards based on
8 Intersil Prism2/2.5/3 chipset. This driver supports so called
9 Host AP mode that allows the card to act as an IEEE 802.11
10 access point.
11
12 See <http://hostap.epitest.fi/> for more information about the
13 Host AP driver configuration and tools. This site includes
14 information and tools (hostapd and wpa_supplicant) for WPA/WPA2
15 support.
16
17 This option includes the base Host AP driver code that is shared by
18 different hardware models. You will also need to enable support for
19 PLX/PCI/CS version of the driver to actually use the driver.
20
21 The driver can be compiled as a module and it will be called
22 "hostap.ko".
23
24config HOSTAP_FIRMWARE
25 bool "Support downloading firmware images with Host AP driver"
26 depends on HOSTAP
27 ---help---
28 Configure Host AP driver to include support for firmware image
29 download. Current version supports only downloading to volatile, i.e.,
30 RAM memory. Flash upgrade is not yet supported.
31
32 Firmware image downloading needs user space tool, prism2_srec. It is
33 available from http://hostap.epitest.fi/.
34
35config HOSTAP_PLX
36 tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
37 depends on PCI && HOSTAP
38 ---help---
39 Host AP driver's version for Prism2/2.5/3 PC Cards in PLX9052 based
40 PCI adaptors.
41
42 "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
43 driver and its help text includes more information about the Host AP
44 driver.
45
46 The driver can be compiled as a module and will be named
47 "hostap_plx.ko".
48
49config HOSTAP_PCI
50 tristate "Host AP driver for Prism2.5 PCI adaptors"
51 depends on PCI && HOSTAP
52 ---help---
53 Host AP driver's version for Prism2.5 PCI adaptors.
54
55 "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
56 driver and its help text includes more information about the Host AP
57 driver.
58
59 The driver can be compiled as a module and will be named
60 "hostap_pci.ko".
61
62config HOSTAP_CS
63 tristate "Host AP driver for Prism2/2.5/3 PC Cards"
64 depends on PCMCIA!=n && HOSTAP
65 ---help---
66 Host AP driver's version for Prism2/2.5/3 PC Cards.
67
68 "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
69 driver and its help text includes more information about the Host AP
70 driver.
71
72 The driver can be compiled as a module and will be named
73 "hostap_cs.ko".
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
new file mode 100644
index 000000000000..fc62235bfc24
--- /dev/null
+++ b/drivers/net/wireless/hostap/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_HOSTAP) += hostap.o
2
3obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
4obj-$(CONFIG_HOSTAP_PLX) += hostap_plx.o
5obj-$(CONFIG_HOSTAP_PCI) += hostap_pci.o
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap.c
new file mode 100644
index 000000000000..e7f5821b4942
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap.c
@@ -0,0 +1,1198 @@
1/*
2 * Host AP (software wireless LAN access point) driver for
3 * Intersil Prism2/2.5/3 - hostap.o module, common routines
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. See README and COPYING for
12 * more details.
13 */
14
15#include <linux/config.h>
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/proc_fs.h>
21#include <linux/if_arp.h>
22#include <linux/delay.h>
23#include <linux/random.h>
24#include <linux/workqueue.h>
25#include <linux/kmod.h>
26#include <linux/rtnetlink.h>
27#include <linux/wireless.h>
28#include <net/iw_handler.h>
29#include <net/ieee80211.h>
30#include <net/ieee80211_crypt.h>
31#include <asm/uaccess.h>
32
33#include "hostap_wlan.h"
34#include "hostap_80211.h"
35#include "hostap_ap.h"
36#include "hostap.h"
37
38MODULE_AUTHOR("Jouni Malinen");
39MODULE_DESCRIPTION("Host AP common routines");
40MODULE_LICENSE("GPL");
41MODULE_VERSION(PRISM2_VERSION);
42
43#define TX_TIMEOUT (2 * HZ)
44
45#define PRISM2_MAX_FRAME_SIZE 2304
46#define PRISM2_MIN_MTU 256
47/* FIX: */
48#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
49
50
51/* hostap.c */
52static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
53 int rtnl_locked);
54static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
55 int rtnl_locked, int do_not_remove);
56
57/* hostap_ap.c */
58static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
59 struct iw_quality qual[], int buf_size,
60 int aplist);
61static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
62static int prism2_hostapd(struct ap_data *ap,
63 struct prism2_hostapd_param *param);
64static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
65 struct ieee80211_crypt_data ***crypt);
66static void ap_control_kickall(struct ap_data *ap);
67#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
68static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
69 u8 *mac);
70static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
71 u8 *mac);
72static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
73static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
74 u8 *mac);
75#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
76
77
78static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
79 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
80#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
81
82
83/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
84/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
85static unsigned char rfc1042_header[] =
86{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
87/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
88static unsigned char bridge_tunnel_header[] =
89{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
90/* No encapsulation header if EtherType < 0x600 (=length) */
91
92
93/* FIX: these could be compiled separately and linked together to hostap.o */
94#include "hostap_ap.c"
95#include "hostap_info.c"
96#include "hostap_ioctl.c"
97#include "hostap_proc.c"
98#include "hostap_80211_rx.c"
99#include "hostap_80211_tx.c"
100
101
102struct net_device * hostap_add_interface(struct local_info *local,
103 int type, int rtnl_locked,
104 const char *prefix,
105 const char *name)
106{
107 struct net_device *dev, *mdev;
108 struct hostap_interface *iface;
109 int ret;
110
111 dev = alloc_etherdev(sizeof(struct hostap_interface));
112 if (dev == NULL)
113 return NULL;
114
115 iface = netdev_priv(dev);
116 iface->dev = dev;
117 iface->local = local;
118 iface->type = type;
119 list_add(&iface->list, &local->hostap_interfaces);
120
121 mdev = local->dev;
122 memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN);
123 dev->base_addr = mdev->base_addr;
124 dev->irq = mdev->irq;
125 dev->mem_start = mdev->mem_start;
126 dev->mem_end = mdev->mem_end;
127
128 hostap_setup_dev(dev, local, 0);
129 dev->destructor = free_netdev;
130
131 sprintf(dev->name, "%s%s", prefix, name);
132 if (!rtnl_locked)
133 rtnl_lock();
134
135 ret = 0;
136 if (strchr(dev->name, '%'))
137 ret = dev_alloc_name(dev, dev->name);
138
139 SET_NETDEV_DEV(dev, mdev->class_dev.dev);
140 if (ret >= 0)
141 ret = register_netdevice(dev);
142
143 if (!rtnl_locked)
144 rtnl_unlock();
145
146 if (ret < 0) {
147 printk(KERN_WARNING "%s: failed to add new netdevice!\n",
148 dev->name);
149 free_netdev(dev);
150 return NULL;
151 }
152
153 printk(KERN_DEBUG "%s: registered netdevice %s\n",
154 mdev->name, dev->name);
155
156 return dev;
157}
158
159
160void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
161 int remove_from_list)
162{
163 struct hostap_interface *iface;
164
165 if (!dev)
166 return;
167
168 iface = netdev_priv(dev);
169
170 if (remove_from_list) {
171 list_del(&iface->list);
172 }
173
174 if (dev == iface->local->ddev)
175 iface->local->ddev = NULL;
176 else if (dev == iface->local->apdev)
177 iface->local->apdev = NULL;
178 else if (dev == iface->local->stadev)
179 iface->local->stadev = NULL;
180
181 if (rtnl_locked)
182 unregister_netdevice(dev);
183 else
184 unregister_netdev(dev);
185
186 /* dev->destructor = free_netdev() will free the device data, including
187 * private data, when removing the device */
188}
189
190
191static inline int prism2_wds_special_addr(u8 *addr)
192{
193 if (addr[0] || addr[1] || addr[2] || addr[3] || addr[4] || addr[5])
194 return 0;
195
196 return 1;
197}
198
199
200static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
201 int rtnl_locked)
202{
203 struct net_device *dev;
204 struct list_head *ptr;
205 struct hostap_interface *iface, *empty, *match;
206
207 empty = match = NULL;
208 read_lock_bh(&local->iface_lock);
209 list_for_each(ptr, &local->hostap_interfaces) {
210 iface = list_entry(ptr, struct hostap_interface, list);
211 if (iface->type != HOSTAP_INTERFACE_WDS)
212 continue;
213
214 if (prism2_wds_special_addr(iface->u.wds.remote_addr))
215 empty = iface;
216 else if (memcmp(iface->u.wds.remote_addr, remote_addr,
217 ETH_ALEN) == 0) {
218 match = iface;
219 break;
220 }
221 }
222 if (!match && empty && !prism2_wds_special_addr(remote_addr)) {
223 /* take pre-allocated entry into use */
224 memcpy(empty->u.wds.remote_addr, remote_addr, ETH_ALEN);
225 read_unlock_bh(&local->iface_lock);
226 printk(KERN_DEBUG "%s: using pre-allocated WDS netdevice %s\n",
227 local->dev->name, empty->dev->name);
228 return 0;
229 }
230 read_unlock_bh(&local->iface_lock);
231
232 if (!prism2_wds_special_addr(remote_addr)) {
233 if (match)
234 return -EEXIST;
235 hostap_add_sta(local->ap, remote_addr);
236 }
237
238 if (local->wds_connections >= local->wds_max_connections)
239 return -ENOBUFS;
240
241 /* verify that there is room for wds# postfix in the interface name */
242 if (strlen(local->dev->name) > IFNAMSIZ - 5) {
243 printk(KERN_DEBUG "'%s' too long base device name\n",
244 local->dev->name);
245 return -EINVAL;
246 }
247
248 dev = hostap_add_interface(local, HOSTAP_INTERFACE_WDS, rtnl_locked,
249 local->ddev->name, "wds%d");
250 if (dev == NULL)
251 return -ENOMEM;
252
253 iface = netdev_priv(dev);
254 memcpy(iface->u.wds.remote_addr, remote_addr, ETH_ALEN);
255
256 local->wds_connections++;
257
258 return 0;
259}
260
261
262static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
263 int rtnl_locked, int do_not_remove)
264{
265 unsigned long flags;
266 struct list_head *ptr;
267 struct hostap_interface *iface, *selected = NULL;
268
269 write_lock_irqsave(&local->iface_lock, flags);
270 list_for_each(ptr, &local->hostap_interfaces) {
271 iface = list_entry(ptr, struct hostap_interface, list);
272 if (iface->type != HOSTAP_INTERFACE_WDS)
273 continue;
274
275 if (memcmp(iface->u.wds.remote_addr, remote_addr,
276 ETH_ALEN) == 0) {
277 selected = iface;
278 break;
279 }
280 }
281 if (selected && !do_not_remove)
282 list_del(&selected->list);
283 write_unlock_irqrestore(&local->iface_lock, flags);
284
285 if (selected) {
286 if (do_not_remove)
287 memset(selected->u.wds.remote_addr, 0, ETH_ALEN);
288 else {
289 hostap_remove_interface(selected->dev, rtnl_locked, 0);
290 local->wds_connections--;
291 }
292 }
293
294 return selected ? 0 : -ENODEV;
295}
296
297
298u16 hostap_tx_callback_register(local_info_t *local,
299 void (*func)(struct sk_buff *, int ok, void *),
300 void *data)
301{
302 unsigned long flags;
303 struct hostap_tx_callback_info *entry;
304
305 entry = (struct hostap_tx_callback_info *) kmalloc(sizeof(*entry),
306 GFP_ATOMIC);
307 if (entry == NULL)
308 return 0;
309
310 entry->func = func;
311 entry->data = data;
312
313 spin_lock_irqsave(&local->lock, flags);
314 entry->idx = local->tx_callback ? local->tx_callback->idx + 1 : 1;
315 entry->next = local->tx_callback;
316 local->tx_callback = entry;
317 spin_unlock_irqrestore(&local->lock, flags);
318
319 return entry->idx;
320}
321
322
323int hostap_tx_callback_unregister(local_info_t *local, u16 idx)
324{
325 unsigned long flags;
326 struct hostap_tx_callback_info *cb, *prev = NULL;
327
328 spin_lock_irqsave(&local->lock, flags);
329 cb = local->tx_callback;
330 while (cb != NULL && cb->idx != idx) {
331 prev = cb;
332 cb = cb->next;
333 }
334 if (cb) {
335 if (prev == NULL)
336 local->tx_callback = cb->next;
337 else
338 prev->next = cb->next;
339 kfree(cb);
340 }
341 spin_unlock_irqrestore(&local->lock, flags);
342
343 return cb ? 0 : -1;
344}
345
346
347/* val is in host byte order */
348int hostap_set_word(struct net_device *dev, int rid, u16 val)
349{
350 struct hostap_interface *iface;
351 u16 tmp = cpu_to_le16(val);
352 iface = netdev_priv(dev);
353 return iface->local->func->set_rid(dev, rid, &tmp, 2);
354}
355
356
357int hostap_set_string(struct net_device *dev, int rid, const char *val)
358{
359 struct hostap_interface *iface;
360 char buf[MAX_SSID_LEN + 2];
361 int len;
362
363 iface = netdev_priv(dev);
364 len = strlen(val);
365 if (len > MAX_SSID_LEN)
366 return -1;
367 memset(buf, 0, sizeof(buf));
368 buf[0] = len; /* little endian 16 bit word */
369 memcpy(buf + 2, val, len);
370
371 return iface->local->func->set_rid(dev, rid, &buf, MAX_SSID_LEN + 2);
372}
373
374
375u16 hostap_get_porttype(local_info_t *local)
376{
377 if (local->iw_mode == IW_MODE_ADHOC && local->pseudo_adhoc)
378 return HFA384X_PORTTYPE_PSEUDO_IBSS;
379 if (local->iw_mode == IW_MODE_ADHOC)
380 return HFA384X_PORTTYPE_IBSS;
381 if (local->iw_mode == IW_MODE_INFRA)
382 return HFA384X_PORTTYPE_BSS;
383 if (local->iw_mode == IW_MODE_REPEAT)
384 return HFA384X_PORTTYPE_WDS;
385 if (local->iw_mode == IW_MODE_MONITOR)
386 return HFA384X_PORTTYPE_PSEUDO_IBSS;
387 return HFA384X_PORTTYPE_HOSTAP;
388}
389
390
391int hostap_set_encryption(local_info_t *local)
392{
393 u16 val, old_val;
394 int i, keylen, len, idx;
395 char keybuf[WEP_KEY_LEN + 1];
396 enum { NONE, WEP, OTHER } encrypt_type;
397
398 idx = local->tx_keyidx;
399 if (local->crypt[idx] == NULL || local->crypt[idx]->ops == NULL)
400 encrypt_type = NONE;
401 else if (strcmp(local->crypt[idx]->ops->name, "WEP") == 0)
402 encrypt_type = WEP;
403 else
404 encrypt_type = OTHER;
405
406 if (local->func->get_rid(local->dev, HFA384X_RID_CNFWEPFLAGS, &val, 2,
407 1) < 0) {
408 printk(KERN_DEBUG "Could not read current WEP flags.\n");
409 goto fail;
410 }
411 le16_to_cpus(&val);
412 old_val = val;
413
414 if (encrypt_type != NONE || local->privacy_invoked)
415 val |= HFA384X_WEPFLAGS_PRIVACYINVOKED;
416 else
417 val &= ~HFA384X_WEPFLAGS_PRIVACYINVOKED;
418
419 if (local->open_wep || encrypt_type == NONE ||
420 ((local->ieee_802_1x || local->wpa) && local->host_decrypt))
421 val &= ~HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
422 else
423 val |= HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
424
425 if ((encrypt_type != NONE || local->privacy_invoked) &&
426 (encrypt_type == OTHER || local->host_encrypt))
427 val |= HFA384X_WEPFLAGS_HOSTENCRYPT;
428 else
429 val &= ~HFA384X_WEPFLAGS_HOSTENCRYPT;
430 if ((encrypt_type != NONE || local->privacy_invoked) &&
431 (encrypt_type == OTHER || local->host_decrypt))
432 val |= HFA384X_WEPFLAGS_HOSTDECRYPT;
433 else
434 val &= ~HFA384X_WEPFLAGS_HOSTDECRYPT;
435
436 if (val != old_val &&
437 hostap_set_word(local->dev, HFA384X_RID_CNFWEPFLAGS, val)) {
438 printk(KERN_DEBUG "Could not write new WEP flags (0x%x)\n",
439 val);
440 goto fail;
441 }
442
443 if (encrypt_type != WEP)
444 return 0;
445
446 /* 104-bit support seems to require that all the keys are set to the
447 * same keylen */
448 keylen = 6; /* first 5 octets */
449 len = local->crypt[idx]->ops->get_key(keybuf, sizeof(keybuf),
450 NULL, local->crypt[idx]->priv);
451 if (idx >= 0 && idx < WEP_KEYS && len > 5)
452 keylen = WEP_KEY_LEN + 1; /* first 13 octets */
453
454 for (i = 0; i < WEP_KEYS; i++) {
455 memset(keybuf, 0, sizeof(keybuf));
456 if (local->crypt[i]) {
457 (void) local->crypt[i]->ops->get_key(
458 keybuf, sizeof(keybuf),
459 NULL, local->crypt[i]->priv);
460 }
461 if (local->func->set_rid(local->dev,
462 HFA384X_RID_CNFDEFAULTKEY0 + i,
463 keybuf, keylen)) {
464 printk(KERN_DEBUG "Could not set key %d (len=%d)\n",
465 i, keylen);
466 goto fail;
467 }
468 }
469 if (hostap_set_word(local->dev, HFA384X_RID_CNFWEPDEFAULTKEYID, idx)) {
470 printk(KERN_DEBUG "Could not set default keyid %d\n", idx);
471 goto fail;
472 }
473
474 return 0;
475
476 fail:
477 printk(KERN_DEBUG "%s: encryption setup failed\n", local->dev->name);
478 return -1;
479}
480
481
482int hostap_set_antsel(local_info_t *local)
483{
484 u16 val;
485 int ret = 0;
486
487 if (local->antsel_tx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
488 local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
489 HFA386X_CR_TX_CONFIGURE,
490 NULL, &val) == 0) {
491 val &= ~(BIT(2) | BIT(1));
492 switch (local->antsel_tx) {
493 case HOSTAP_ANTSEL_DIVERSITY:
494 val |= BIT(1);
495 break;
496 case HOSTAP_ANTSEL_LOW:
497 break;
498 case HOSTAP_ANTSEL_HIGH:
499 val |= BIT(2);
500 break;
501 }
502
503 if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
504 HFA386X_CR_TX_CONFIGURE, &val, NULL)) {
505 printk(KERN_INFO "%s: setting TX AntSel failed\n",
506 local->dev->name);
507 ret = -1;
508 }
509 }
510
511 if (local->antsel_rx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
512 local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
513 HFA386X_CR_RX_CONFIGURE,
514 NULL, &val) == 0) {
515 val &= ~(BIT(1) | BIT(0));
516 switch (local->antsel_rx) {
517 case HOSTAP_ANTSEL_DIVERSITY:
518 break;
519 case HOSTAP_ANTSEL_LOW:
520 val |= BIT(0);
521 break;
522 case HOSTAP_ANTSEL_HIGH:
523 val |= BIT(0) | BIT(1);
524 break;
525 }
526
527 if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
528 HFA386X_CR_RX_CONFIGURE, &val, NULL)) {
529 printk(KERN_INFO "%s: setting RX AntSel failed\n",
530 local->dev->name);
531 ret = -1;
532 }
533 }
534
535 return ret;
536}
537
538
539int hostap_set_roaming(local_info_t *local)
540{
541 u16 val;
542
543 switch (local->host_roaming) {
544 case 1:
545 val = HFA384X_ROAMING_HOST;
546 break;
547 case 2:
548 val = HFA384X_ROAMING_DISABLED;
549 break;
550 case 0:
551 default:
552 val = HFA384X_ROAMING_FIRMWARE;
553 break;
554 }
555
556 return hostap_set_word(local->dev, HFA384X_RID_CNFROAMINGMODE, val);
557}
558
559
560int hostap_set_auth_algs(local_info_t *local)
561{
562 int val = local->auth_algs;
563 /* At least STA f/w v0.6.2 seems to have issues with cnfAuthentication
564 * set to include both Open and Shared Key flags. It tries to use
565 * Shared Key authentication in that case even if WEP keys are not
566 * configured.. STA f/w v0.7.6 is able to handle such configuration,
567 * but it is unknown when this was fixed between 0.6.2 .. 0.7.6. */
568 if (local->sta_fw_ver < PRISM2_FW_VER(0,7,0) &&
569 val != PRISM2_AUTH_OPEN && val != PRISM2_AUTH_SHARED_KEY)
570 val = PRISM2_AUTH_OPEN;
571
572 if (hostap_set_word(local->dev, HFA384X_RID_CNFAUTHENTICATION, val)) {
573 printk(KERN_INFO "%s: cnfAuthentication setting to 0x%x "
574 "failed\n", local->dev->name, local->auth_algs);
575 return -EINVAL;
576 }
577
578 return 0;
579}
580
581
582void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
583{
584 u16 status, fc;
585
586 status = __le16_to_cpu(rx->status);
587
588 printk(KERN_DEBUG "%s: RX status=0x%04x (port=%d, type=%d, "
589 "fcserr=%d) silence=%d signal=%d rate=%d rxflow=%d; "
590 "jiffies=%ld\n",
591 name, status, (status >> 8) & 0x07, status >> 13, status & 1,
592 rx->silence, rx->signal, rx->rate, rx->rxflow, jiffies);
593
594 fc = __le16_to_cpu(rx->frame_control);
595 printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
596 "data_len=%d%s%s\n",
597 fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
598 __le16_to_cpu(rx->duration_id), __le16_to_cpu(rx->seq_ctrl),
599 __le16_to_cpu(rx->data_len),
600 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
601 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
602
603 printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4="
604 MACSTR "\n",
605 MAC2STR(rx->addr1), MAC2STR(rx->addr2), MAC2STR(rx->addr3),
606 MAC2STR(rx->addr4));
607
608 printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n",
609 MAC2STR(rx->dst_addr), MAC2STR(rx->src_addr),
610 __be16_to_cpu(rx->len));
611}
612
613
614void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
615{
616 u16 fc;
617
618 printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d "
619 "tx_control=0x%04x; jiffies=%ld\n",
620 name, __le16_to_cpu(tx->status), tx->retry_count, tx->tx_rate,
621 __le16_to_cpu(tx->tx_control), jiffies);
622
623 fc = __le16_to_cpu(tx->frame_control);
624 printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
625 "data_len=%d%s%s\n",
626 fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
627 __le16_to_cpu(tx->duration_id), __le16_to_cpu(tx->seq_ctrl),
628 __le16_to_cpu(tx->data_len),
629 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
630 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
631
632 printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4="
633 MACSTR "\n",
634 MAC2STR(tx->addr1), MAC2STR(tx->addr2), MAC2STR(tx->addr3),
635 MAC2STR(tx->addr4));
636
637 printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n",
638 MAC2STR(tx->dst_addr), MAC2STR(tx->src_addr),
639 __be16_to_cpu(tx->len));
640}
641
642
643int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr)
644{
645 memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */
646 return ETH_ALEN;
647}
648
649
650int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr)
651{
652 if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) {
653 memcpy(haddr, skb->mac.raw +
654 sizeof(struct linux_wlan_ng_prism_hdr) + 10,
655 ETH_ALEN); /* addr2 */
656 } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */
657 memcpy(haddr, skb->mac.raw +
658 sizeof(struct linux_wlan_ng_cap_hdr) + 10,
659 ETH_ALEN); /* addr2 */
660 }
661 return ETH_ALEN;
662}
663
664
665int hostap_80211_get_hdrlen(u16 fc)
666{
667 int hdrlen = 24;
668
669 switch (WLAN_FC_GET_TYPE(fc)) {
670 case IEEE80211_FTYPE_DATA:
671 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
672 hdrlen = 30; /* Addr4 */
673 break;
674 case IEEE80211_FTYPE_CTL:
675 switch (WLAN_FC_GET_STYPE(fc)) {
676 case IEEE80211_STYPE_CTS:
677 case IEEE80211_STYPE_ACK:
678 hdrlen = 10;
679 break;
680 default:
681 hdrlen = 16;
682 break;
683 }
684 break;
685 }
686
687 return hdrlen;
688}
689
690
691struct net_device_stats *hostap_get_stats(struct net_device *dev)
692{
693 struct hostap_interface *iface;
694 iface = netdev_priv(dev);
695 return &iface->stats;
696}
697
698
699static int prism2_close(struct net_device *dev)
700{
701 struct hostap_interface *iface;
702 local_info_t *local;
703
704 PDEBUG(DEBUG_FLOW, "%s: prism2_close\n", dev->name);
705
706 iface = netdev_priv(dev);
707 local = iface->local;
708
709 if (dev == local->ddev) {
710 prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
711 }
712#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
713 if (!local->hostapd && dev == local->dev &&
714 (!local->func->card_present || local->func->card_present(local)) &&
715 local->hw_ready && local->ap && local->iw_mode == IW_MODE_MASTER)
716 hostap_deauth_all_stas(dev, local->ap, 1);
717#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
718
719 if (local->func->dev_close && local->func->dev_close(local))
720 return 0;
721
722 if (dev == local->dev) {
723 local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
724 }
725
726 if (netif_running(dev)) {
727 netif_stop_queue(dev);
728 netif_device_detach(dev);
729 }
730
731 flush_scheduled_work();
732
733 module_put(local->hw_module);
734
735 local->num_dev_open--;
736
737 if (dev != local->dev && local->dev->flags & IFF_UP &&
738 local->master_dev_auto_open && local->num_dev_open == 1) {
739 /* Close master radio interface automatically if it was also
740 * opened automatically and we are now closing the last
741 * remaining non-master device. */
742 dev_close(local->dev);
743 }
744
745 return 0;
746}
747
748
749static int prism2_open(struct net_device *dev)
750{
751 struct hostap_interface *iface;
752 local_info_t *local;
753
754 PDEBUG(DEBUG_FLOW, "%s: prism2_open\n", dev->name);
755
756 iface = netdev_priv(dev);
757 local = iface->local;
758
759 if (local->no_pri) {
760 printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
761 "f/w\n", dev->name);
762 return 1;
763 }
764
765 if ((local->func->card_present && !local->func->card_present(local)) ||
766 local->hw_downloading)
767 return -ENODEV;
768
769 if (local->func->dev_open && local->func->dev_open(local))
770 return 1;
771
772 if (!try_module_get(local->hw_module))
773 return -ENODEV;
774 local->num_dev_open++;
775
776 if (!local->dev_enabled && local->func->hw_enable(dev, 1)) {
777 printk(KERN_WARNING "%s: could not enable MAC port\n",
778 dev->name);
779 prism2_close(dev);
780 return 1;
781 }
782 if (!local->dev_enabled)
783 prism2_callback(local, PRISM2_CALLBACK_ENABLE);
784 local->dev_enabled = 1;
785
786 if (dev != local->dev && !(local->dev->flags & IFF_UP)) {
787 /* Master radio interface is needed for all operation, so open
788 * it automatically when any virtual net_device is opened. */
789 local->master_dev_auto_open = 1;
790 dev_open(local->dev);
791 }
792
793 netif_device_attach(dev);
794 netif_start_queue(dev);
795
796 return 0;
797}
798
799
800static int prism2_set_mac_address(struct net_device *dev, void *p)
801{
802 struct hostap_interface *iface;
803 local_info_t *local;
804 struct list_head *ptr;
805 struct sockaddr *addr = p;
806
807 iface = netdev_priv(dev);
808 local = iface->local;
809
810 if (local->func->set_rid(dev, HFA384X_RID_CNFOWNMACADDR, addr->sa_data,
811 ETH_ALEN) < 0 || local->func->reset_port(dev))
812 return -EINVAL;
813
814 read_lock_bh(&local->iface_lock);
815 list_for_each(ptr, &local->hostap_interfaces) {
816 iface = list_entry(ptr, struct hostap_interface, list);
817 memcpy(iface->dev->dev_addr, addr->sa_data, ETH_ALEN);
818 }
819 memcpy(local->dev->dev_addr, addr->sa_data, ETH_ALEN);
820 read_unlock_bh(&local->iface_lock);
821
822 return 0;
823}
824
825
826/* TODO: to be further implemented as soon as Prism2 fully supports
827 * GroupAddresses and correct documentation is available */
828void hostap_set_multicast_list_queue(void *data)
829{
830 struct net_device *dev = (struct net_device *) data;
831 struct hostap_interface *iface;
832 local_info_t *local;
833
834 iface = netdev_priv(dev);
835 local = iface->local;
836 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
837 local->is_promisc)) {
838 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
839 dev->name, local->is_promisc ? "en" : "dis");
840 }
841}
842
843
844static void hostap_set_multicast_list(struct net_device *dev)
845{
846#if 0
847 /* FIX: promiscuous mode seems to be causing a lot of problems with
848 * some station firmware versions (FCSErr frames, invalid MACPort, etc.
849 * corrupted incoming frames). This code is now commented out while the
850 * problems are investigated. */
851 struct hostap_interface *iface;
852 local_info_t *local;
853
854 iface = netdev_priv(dev);
855 local = iface->local;
856 if ((dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) {
857 local->is_promisc = 1;
858 } else {
859 local->is_promisc = 0;
860 }
861
862 schedule_work(&local->set_multicast_list_queue);
863#endif
864}
865
866
867static int prism2_change_mtu(struct net_device *dev, int new_mtu)
868{
869 if (new_mtu < PRISM2_MIN_MTU || new_mtu > PRISM2_MAX_MTU)
870 return -EINVAL;
871
872 dev->mtu = new_mtu;
873 return 0;
874}
875
876
877static void prism2_tx_timeout(struct net_device *dev)
878{
879 struct hostap_interface *iface;
880 local_info_t *local;
881 struct hfa384x_regs regs;
882
883 iface = netdev_priv(dev);
884 local = iface->local;
885
886 printk(KERN_WARNING "%s Tx timed out! Resetting card\n", dev->name);
887 netif_stop_queue(local->dev);
888
889 local->func->read_regs(dev, &regs);
890 printk(KERN_DEBUG "%s: CMD=%04x EVSTAT=%04x "
891 "OFFSET0=%04x OFFSET1=%04x SWSUPPORT0=%04x\n",
892 dev->name, regs.cmd, regs.evstat, regs.offset0, regs.offset1,
893 regs.swsupport0);
894
895 local->func->schedule_reset(local);
896}
897
898
899void hostap_setup_dev(struct net_device *dev, local_info_t *local,
900 int main_dev)
901{
902 struct hostap_interface *iface;
903
904 iface = netdev_priv(dev);
905 ether_setup(dev);
906
907 /* kernel callbacks */
908 dev->get_stats = hostap_get_stats;
909 if (iface) {
910 /* Currently, we point to the proper spy_data only on
911 * the main_dev. This could be fixed. Jean II */
912 iface->wireless_data.spy_data = &iface->spy_data;
913 dev->wireless_data = &iface->wireless_data;
914 }
915 dev->wireless_handlers =
916 (struct iw_handler_def *) &hostap_iw_handler_def;
917 dev->do_ioctl = hostap_ioctl;
918 dev->open = prism2_open;
919 dev->stop = prism2_close;
920 dev->hard_start_xmit = hostap_data_start_xmit;
921 dev->set_mac_address = prism2_set_mac_address;
922 dev->set_multicast_list = hostap_set_multicast_list;
923 dev->change_mtu = prism2_change_mtu;
924 dev->tx_timeout = prism2_tx_timeout;
925 dev->watchdog_timeo = TX_TIMEOUT;
926
927 dev->mtu = local->mtu;
928 if (!main_dev) {
929 /* use main radio device queue */
930 dev->tx_queue_len = 0;
931 }
932
933 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
934
935 netif_stop_queue(dev);
936}
937
938
939static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
940{
941 struct net_device *dev = local->dev;
942
943 if (local->apdev)
944 return -EEXIST;
945
946 printk(KERN_DEBUG "%s: enabling hostapd mode\n", dev->name);
947
948 local->apdev = hostap_add_interface(local, HOSTAP_INTERFACE_AP,
949 rtnl_locked, local->ddev->name,
950 "ap");
951 if (local->apdev == NULL)
952 return -ENOMEM;
953
954 local->apdev->hard_start_xmit = hostap_mgmt_start_xmit;
955 local->apdev->type = ARPHRD_IEEE80211;
956 local->apdev->hard_header_parse = hostap_80211_header_parse;
957
958 return 0;
959}
960
961
962static int hostap_disable_hostapd(local_info_t *local, int rtnl_locked)
963{
964 struct net_device *dev = local->dev;
965
966 printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
967
968 hostap_remove_interface(local->apdev, rtnl_locked, 1);
969 local->apdev = NULL;
970
971 return 0;
972}
973
974
975static int hostap_enable_hostapd_sta(local_info_t *local, int rtnl_locked)
976{
977 struct net_device *dev = local->dev;
978
979 if (local->stadev)
980 return -EEXIST;
981
982 printk(KERN_DEBUG "%s: enabling hostapd STA mode\n", dev->name);
983
984 local->stadev = hostap_add_interface(local, HOSTAP_INTERFACE_STA,
985 rtnl_locked, local->ddev->name,
986 "sta");
987 if (local->stadev == NULL)
988 return -ENOMEM;
989
990 return 0;
991}
992
993
994static int hostap_disable_hostapd_sta(local_info_t *local, int rtnl_locked)
995{
996 struct net_device *dev = local->dev;
997
998 printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
999
1000 hostap_remove_interface(local->stadev, rtnl_locked, 1);
1001 local->stadev = NULL;
1002
1003 return 0;
1004}
1005
1006
1007int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked)
1008{
1009 int ret;
1010
1011 if (val < 0 || val > 1)
1012 return -EINVAL;
1013
1014 if (local->hostapd == val)
1015 return 0;
1016
1017 if (val) {
1018 ret = hostap_enable_hostapd(local, rtnl_locked);
1019 if (ret == 0)
1020 local->hostapd = 1;
1021 } else {
1022 local->hostapd = 0;
1023 ret = hostap_disable_hostapd(local, rtnl_locked);
1024 if (ret != 0)
1025 local->hostapd = 1;
1026 }
1027
1028 return ret;
1029}
1030
1031
1032int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked)
1033{
1034 int ret;
1035
1036 if (val < 0 || val > 1)
1037 return -EINVAL;
1038
1039 if (local->hostapd_sta == val)
1040 return 0;
1041
1042 if (val) {
1043 ret = hostap_enable_hostapd_sta(local, rtnl_locked);
1044 if (ret == 0)
1045 local->hostapd_sta = 1;
1046 } else {
1047 local->hostapd_sta = 0;
1048 ret = hostap_disable_hostapd_sta(local, rtnl_locked);
1049 if (ret != 0)
1050 local->hostapd_sta = 1;
1051 }
1052
1053
1054 return ret;
1055}
1056
1057
1058int prism2_update_comms_qual(struct net_device *dev)
1059{
1060 struct hostap_interface *iface;
1061 local_info_t *local;
1062 int ret = 0;
1063 struct hfa384x_comms_quality sq;
1064
1065 iface = netdev_priv(dev);
1066 local = iface->local;
1067 if (!local->sta_fw_ver)
1068 ret = -1;
1069 else if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
1070 if (local->func->get_rid(local->dev,
1071 HFA384X_RID_DBMCOMMSQUALITY,
1072 &sq, sizeof(sq), 1) >= 0) {
1073 local->comms_qual = (s16) le16_to_cpu(sq.comm_qual);
1074 local->avg_signal = (s16) le16_to_cpu(sq.signal_level);
1075 local->avg_noise = (s16) le16_to_cpu(sq.noise_level);
1076 local->last_comms_qual_update = jiffies;
1077 } else
1078 ret = -1;
1079 } else {
1080 if (local->func->get_rid(local->dev, HFA384X_RID_COMMSQUALITY,
1081 &sq, sizeof(sq), 1) >= 0) {
1082 local->comms_qual = le16_to_cpu(sq.comm_qual);
1083 local->avg_signal = HFA384X_LEVEL_TO_dBm(
1084 le16_to_cpu(sq.signal_level));
1085 local->avg_noise = HFA384X_LEVEL_TO_dBm(
1086 le16_to_cpu(sq.noise_level));
1087 local->last_comms_qual_update = jiffies;
1088 } else
1089 ret = -1;
1090 }
1091
1092 return ret;
1093}
1094
1095
1096int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
1097 u8 *body, size_t bodylen)
1098{
1099 struct sk_buff *skb;
1100 struct hostap_ieee80211_mgmt *mgmt;
1101 struct hostap_skb_tx_data *meta;
1102 struct net_device *dev = local->dev;
1103
1104 skb = dev_alloc_skb(IEEE80211_MGMT_HDR_LEN + bodylen);
1105 if (skb == NULL)
1106 return -ENOMEM;
1107
1108 mgmt = (struct hostap_ieee80211_mgmt *)
1109 skb_put(skb, IEEE80211_MGMT_HDR_LEN);
1110 memset(mgmt, 0, IEEE80211_MGMT_HDR_LEN);
1111 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
1112 memcpy(mgmt->da, dst, ETH_ALEN);
1113 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1114 memcpy(mgmt->bssid, dst, ETH_ALEN);
1115 if (body)
1116 memcpy(skb_put(skb, bodylen), body, bodylen);
1117
1118 meta = (struct hostap_skb_tx_data *) skb->cb;
1119 memset(meta, 0, sizeof(*meta));
1120 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
1121 meta->iface = netdev_priv(dev);
1122
1123 skb->dev = dev;
1124 skb->mac.raw = skb->nh.raw = skb->data;
1125 dev_queue_xmit(skb);
1126
1127 return 0;
1128}
1129
1130
1131int prism2_sta_deauth(local_info_t *local, u16 reason)
1132{
1133 union iwreq_data wrqu;
1134 int ret;
1135
1136 if (local->iw_mode != IW_MODE_INFRA ||
1137 memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 ||
1138 memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
1139 return 0;
1140
1141 reason = cpu_to_le16(reason);
1142 ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
1143 (u8 *) &reason, 2);
1144 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1145 wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
1146 return ret;
1147}
1148
1149
1150struct proc_dir_entry *hostap_proc;
1151
1152static int __init hostap_init(void)
1153{
1154 if (proc_net != NULL) {
1155 hostap_proc = proc_mkdir("hostap", proc_net);
1156 if (!hostap_proc)
1157 printk(KERN_WARNING "Failed to mkdir "
1158 "/proc/net/hostap\n");
1159 } else
1160 hostap_proc = NULL;
1161
1162 return 0;
1163}
1164
1165
1166static void __exit hostap_exit(void)
1167{
1168 if (hostap_proc != NULL) {
1169 hostap_proc = NULL;
1170 remove_proc_entry("hostap", proc_net);
1171 }
1172}
1173
1174
1175EXPORT_SYMBOL(hostap_set_word);
1176EXPORT_SYMBOL(hostap_set_string);
1177EXPORT_SYMBOL(hostap_get_porttype);
1178EXPORT_SYMBOL(hostap_set_encryption);
1179EXPORT_SYMBOL(hostap_set_antsel);
1180EXPORT_SYMBOL(hostap_set_roaming);
1181EXPORT_SYMBOL(hostap_set_auth_algs);
1182EXPORT_SYMBOL(hostap_dump_rx_header);
1183EXPORT_SYMBOL(hostap_dump_tx_header);
1184EXPORT_SYMBOL(hostap_80211_header_parse);
1185EXPORT_SYMBOL(hostap_80211_prism_header_parse);
1186EXPORT_SYMBOL(hostap_80211_get_hdrlen);
1187EXPORT_SYMBOL(hostap_get_stats);
1188EXPORT_SYMBOL(hostap_setup_dev);
1189EXPORT_SYMBOL(hostap_proc);
1190EXPORT_SYMBOL(hostap_set_multicast_list_queue);
1191EXPORT_SYMBOL(hostap_set_hostapd);
1192EXPORT_SYMBOL(hostap_set_hostapd_sta);
1193EXPORT_SYMBOL(hostap_add_interface);
1194EXPORT_SYMBOL(hostap_remove_interface);
1195EXPORT_SYMBOL(prism2_update_comms_qual);
1196
1197module_init(hostap_init);
1198module_exit(hostap_exit);
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
new file mode 100644
index 000000000000..5fac89b8ce3a
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -0,0 +1,57 @@
1#ifndef HOSTAP_H
2#define HOSTAP_H
3
4/* hostap.c */
5
6extern struct proc_dir_entry *hostap_proc;
7
8u16 hostap_tx_callback_register(local_info_t *local,
9 void (*func)(struct sk_buff *, int ok, void *),
10 void *data);
11int hostap_tx_callback_unregister(local_info_t *local, u16 idx);
12int hostap_set_word(struct net_device *dev, int rid, u16 val);
13int hostap_set_string(struct net_device *dev, int rid, const char *val);
14u16 hostap_get_porttype(local_info_t *local);
15int hostap_set_encryption(local_info_t *local);
16int hostap_set_antsel(local_info_t *local);
17int hostap_set_roaming(local_info_t *local);
18int hostap_set_auth_algs(local_info_t *local);
19void hostap_dump_rx_header(const char *name,
20 const struct hfa384x_rx_frame *rx);
21void hostap_dump_tx_header(const char *name,
22 const struct hfa384x_tx_frame *tx);
23int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr);
24int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr);
25int hostap_80211_get_hdrlen(u16 fc);
26struct net_device_stats *hostap_get_stats(struct net_device *dev);
27void hostap_setup_dev(struct net_device *dev, local_info_t *local,
28 int main_dev);
29void hostap_set_multicast_list_queue(void *data);
30int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
31int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
32void hostap_cleanup(local_info_t *local);
33void hostap_cleanup_handler(void *data);
34struct net_device * hostap_add_interface(struct local_info *local,
35 int type, int rtnl_locked,
36 const char *prefix, const char *name);
37void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
38 int remove_from_list);
39int prism2_update_comms_qual(struct net_device *dev);
40int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
41 u8 *body, size_t bodylen);
42int prism2_sta_deauth(local_info_t *local, u16 reason);
43
44
45/* hostap_proc.c */
46
47void hostap_init_proc(local_info_t *local);
48void hostap_remove_proc(local_info_t *local);
49
50
51/* hostap_info.c */
52
53void hostap_info_init(local_info_t *local);
54void hostap_info_process(local_info_t *local, struct sk_buff *skb);
55
56
57#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
new file mode 100644
index 000000000000..bf506f50d722
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -0,0 +1,96 @@
1#ifndef HOSTAP_80211_H
2#define HOSTAP_80211_H
3
4struct hostap_ieee80211_mgmt {
5 u16 frame_control;
6 u16 duration;
7 u8 da[6];
8 u8 sa[6];
9 u8 bssid[6];
10 u16 seq_ctrl;
11 union {
12 struct {
13 u16 auth_alg;
14 u16 auth_transaction;
15 u16 status_code;
16 /* possibly followed by Challenge text */
17 u8 variable[0];
18 } __attribute__ ((packed)) auth;
19 struct {
20 u16 reason_code;
21 } __attribute__ ((packed)) deauth;
22 struct {
23 u16 capab_info;
24 u16 listen_interval;
25 /* followed by SSID and Supported rates */
26 u8 variable[0];
27 } __attribute__ ((packed)) assoc_req;
28 struct {
29 u16 capab_info;
30 u16 status_code;
31 u16 aid;
32 /* followed by Supported rates */
33 u8 variable[0];
34 } __attribute__ ((packed)) assoc_resp, reassoc_resp;
35 struct {
36 u16 capab_info;
37 u16 listen_interval;
38 u8 current_ap[6];
39 /* followed by SSID and Supported rates */
40 u8 variable[0];
41 } __attribute__ ((packed)) reassoc_req;
42 struct {
43 u16 reason_code;
44 } __attribute__ ((packed)) disassoc;
45 struct {
46 } __attribute__ ((packed)) probe_req;
47 struct {
48 u8 timestamp[8];
49 u16 beacon_int;
50 u16 capab_info;
51 /* followed by some of SSID, Supported rates,
52 * FH Params, DS Params, CF Params, IBSS Params, TIM */
53 u8 variable[0];
54 } __attribute__ ((packed)) beacon, probe_resp;
55 } u;
56} __attribute__ ((packed));
57
58
59#define IEEE80211_MGMT_HDR_LEN 24
60#define IEEE80211_DATA_HDR3_LEN 24
61#define IEEE80211_DATA_HDR4_LEN 30
62
63
64struct hostap_80211_rx_status {
65 u32 mac_time;
66 u8 signal;
67 u8 noise;
68 u16 rate; /* in 100 kbps */
69};
70
71
72void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
73 struct hostap_80211_rx_status *rx_stats);
74
75
76/* prism2_rx_80211 'type' argument */
77enum {
78 PRISM2_RX_MONITOR, PRISM2_RX_MGMT, PRISM2_RX_NON_ASSOC,
79 PRISM2_RX_NULLFUNC_ACK
80};
81
82int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
83 struct hostap_80211_rx_status *rx_stats, int type);
84void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
85 struct hostap_80211_rx_status *rx_stats);
86void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
87 struct hostap_80211_rx_status *rx_stats);
88
89void hostap_dump_tx_80211(const char *name, struct sk_buff *skb);
90int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev);
91int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev);
92struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
93 struct ieee80211_crypt_data *crypt);
94int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
95
96#endif /* HOSTAP_80211_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
new file mode 100644
index 000000000000..b0501243b175
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -0,0 +1,1091 @@
1#include <linux/etherdevice.h>
2
3#include "hostap_80211.h"
4#include "hostap.h"
5
6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
7 struct hostap_80211_rx_status *rx_stats)
8{
9 struct ieee80211_hdr *hdr;
10 u16 fc;
11
12 hdr = (struct ieee80211_hdr *) skb->data;
13
14 printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
15 "jiffies=%ld\n",
16 name, rx_stats->signal, rx_stats->noise, rx_stats->rate,
17 skb->len, jiffies);
18
19 if (skb->len < 2)
20 return;
21
22 fc = le16_to_cpu(hdr->frame_ctl);
23 printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
24 fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
25 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
26 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
27
28 if (skb->len < IEEE80211_DATA_HDR3_LEN) {
29 printk("\n");
30 return;
31 }
32
33 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
34 le16_to_cpu(hdr->seq_ctl));
35
36 printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR,
37 MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3));
38 if (skb->len >= 30)
39 printk(" A4=" MACSTR, MAC2STR(hdr->addr4));
40 printk("\n");
41}
42
43
44/* Send RX frame to netif with 802.11 (and possible prism) header.
45 * Called from hardware or software IRQ context. */
46int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
47 struct hostap_80211_rx_status *rx_stats, int type)
48{
49 struct hostap_interface *iface;
50 local_info_t *local;
51 int hdrlen, phdrlen, head_need, tail_need;
52 u16 fc;
53 int prism_header, ret;
54 struct ieee80211_hdr *hdr;
55
56 iface = netdev_priv(dev);
57 local = iface->local;
58 dev->last_rx = jiffies;
59
60 if (dev->type == ARPHRD_IEEE80211_PRISM) {
61 if (local->monitor_type == PRISM2_MONITOR_PRISM) {
62 prism_header = 1;
63 phdrlen = sizeof(struct linux_wlan_ng_prism_hdr);
64 } else { /* local->monitor_type == PRISM2_MONITOR_CAPHDR */
65 prism_header = 2;
66 phdrlen = sizeof(struct linux_wlan_ng_cap_hdr);
67 }
68 } else {
69 prism_header = 0;
70 phdrlen = 0;
71 }
72
73 hdr = (struct ieee80211_hdr *) skb->data;
74 fc = le16_to_cpu(hdr->frame_ctl);
75
76 if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
77 printk(KERN_DEBUG "%s: dropped management frame with header "
78 "version %d\n", dev->name, fc & IEEE80211_FCTL_VERS);
79 dev_kfree_skb_any(skb);
80 return 0;
81 }
82
83 hdrlen = hostap_80211_get_hdrlen(fc);
84
85 /* check if there is enough room for extra data; if not, expand skb
86 * buffer to be large enough for the changes */
87 head_need = phdrlen;
88 tail_need = 0;
89#ifdef PRISM2_ADD_BOGUS_CRC
90 tail_need += 4;
91#endif /* PRISM2_ADD_BOGUS_CRC */
92
93 head_need -= skb_headroom(skb);
94 tail_need -= skb_tailroom(skb);
95
96 if (head_need > 0 || tail_need > 0) {
97 if (pskb_expand_head(skb, head_need > 0 ? head_need : 0,
98 tail_need > 0 ? tail_need : 0,
99 GFP_ATOMIC)) {
100 printk(KERN_DEBUG "%s: prism2_rx_80211 failed to "
101 "reallocate skb buffer\n", dev->name);
102 dev_kfree_skb_any(skb);
103 return 0;
104 }
105 }
106
107 /* We now have an skb with enough head and tail room, so just insert
108 * the extra data */
109
110#ifdef PRISM2_ADD_BOGUS_CRC
111 memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */
112#endif /* PRISM2_ADD_BOGUS_CRC */
113
114 if (prism_header == 1) {
115 struct linux_wlan_ng_prism_hdr *hdr;
116 hdr = (struct linux_wlan_ng_prism_hdr *)
117 skb_push(skb, phdrlen);
118 memset(hdr, 0, phdrlen);
119 hdr->msgcode = LWNG_CAP_DID_BASE;
120 hdr->msglen = sizeof(*hdr);
121 memcpy(hdr->devname, dev->name, sizeof(hdr->devname));
122#define LWNG_SETVAL(f,i,s,l,d) \
123hdr->f.did = LWNG_CAP_DID_BASE | (i << 12); \
124hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
125 LWNG_SETVAL(hosttime, 1, 0, 4, jiffies);
126 LWNG_SETVAL(mactime, 2, 0, 4, rx_stats->mac_time);
127 LWNG_SETVAL(channel, 3, 1 /* no value */, 4, 0);
128 LWNG_SETVAL(rssi, 4, 1 /* no value */, 4, 0);
129 LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0);
130 LWNG_SETVAL(signal, 6, 0, 4, rx_stats->signal);
131 LWNG_SETVAL(noise, 7, 0, 4, rx_stats->noise);
132 LWNG_SETVAL(rate, 8, 0, 4, rx_stats->rate / 5);
133 LWNG_SETVAL(istx, 9, 0, 4, 0);
134 LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen);
135#undef LWNG_SETVAL
136 } else if (prism_header == 2) {
137 struct linux_wlan_ng_cap_hdr *hdr;
138 hdr = (struct linux_wlan_ng_cap_hdr *)
139 skb_push(skb, phdrlen);
140 memset(hdr, 0, phdrlen);
141 hdr->version = htonl(LWNG_CAPHDR_VERSION);
142 hdr->length = htonl(phdrlen);
143 hdr->mactime = __cpu_to_be64(rx_stats->mac_time);
144 hdr->hosttime = __cpu_to_be64(jiffies);
145 hdr->phytype = htonl(4); /* dss_dot11_b */
146 hdr->channel = htonl(local->channel);
147 hdr->datarate = htonl(rx_stats->rate);
148 hdr->antenna = htonl(0); /* unknown */
149 hdr->priority = htonl(0); /* unknown */
150 hdr->ssi_type = htonl(3); /* raw */
151 hdr->ssi_signal = htonl(rx_stats->signal);
152 hdr->ssi_noise = htonl(rx_stats->noise);
153 hdr->preamble = htonl(0); /* unknown */
154 hdr->encoding = htonl(1); /* cck */
155 }
156
157 ret = skb->len - phdrlen;
158 skb->dev = dev;
159 skb->mac.raw = skb->data;
160 skb_pull(skb, hdrlen);
161 if (prism_header)
162 skb_pull(skb, phdrlen);
163 skb->pkt_type = PACKET_OTHERHOST;
164 skb->protocol = __constant_htons(ETH_P_802_2);
165 memset(skb->cb, 0, sizeof(skb->cb));
166 netif_rx(skb);
167
168 return ret;
169}
170
171
172/* Called only as a tasklet (software IRQ) */
173static void monitor_rx(struct net_device *dev, struct sk_buff *skb,
174 struct hostap_80211_rx_status *rx_stats)
175{
176 struct net_device_stats *stats;
177 int len;
178
179 len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR);
180 stats = hostap_get_stats(dev);
181 stats->rx_packets++;
182 stats->rx_bytes += len;
183}
184
185
186/* Called only as a tasklet (software IRQ) */
187static struct prism2_frag_entry *
188prism2_frag_cache_find(local_info_t *local, unsigned int seq,
189 unsigned int frag, u8 *src, u8 *dst)
190{
191 struct prism2_frag_entry *entry;
192 int i;
193
194 for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
195 entry = &local->frag_cache[i];
196 if (entry->skb != NULL &&
197 time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
198 printk(KERN_DEBUG "%s: expiring fragment cache entry "
199 "seq=%u last_frag=%u\n",
200 local->dev->name, entry->seq, entry->last_frag);
201 dev_kfree_skb(entry->skb);
202 entry->skb = NULL;
203 }
204
205 if (entry->skb != NULL && entry->seq == seq &&
206 (entry->last_frag + 1 == frag || frag == -1) &&
207 memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
208 memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
209 return entry;
210 }
211
212 return NULL;
213}
214
215
216/* Called only as a tasklet (software IRQ) */
217static struct sk_buff *
218prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
219{
220 struct sk_buff *skb = NULL;
221 u16 sc;
222 unsigned int frag, seq;
223 struct prism2_frag_entry *entry;
224
225 sc = le16_to_cpu(hdr->seq_ctl);
226 frag = WLAN_GET_SEQ_FRAG(sc);
227 seq = WLAN_GET_SEQ_SEQ(sc) >> 4;
228
229 if (frag == 0) {
230 /* Reserve enough space to fit maximum frame length */
231 skb = dev_alloc_skb(local->dev->mtu +
232 sizeof(struct ieee80211_hdr) +
233 8 /* LLC */ +
234 2 /* alignment */ +
235 8 /* WEP */ + ETH_ALEN /* WDS */);
236 if (skb == NULL)
237 return NULL;
238
239 entry = &local->frag_cache[local->frag_next_idx];
240 local->frag_next_idx++;
241 if (local->frag_next_idx >= PRISM2_FRAG_CACHE_LEN)
242 local->frag_next_idx = 0;
243
244 if (entry->skb != NULL)
245 dev_kfree_skb(entry->skb);
246
247 entry->first_frag_time = jiffies;
248 entry->seq = seq;
249 entry->last_frag = frag;
250 entry->skb = skb;
251 memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
252 memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
253 } else {
254 /* received a fragment of a frame for which the head fragment
255 * should have already been received */
256 entry = prism2_frag_cache_find(local, seq, frag, hdr->addr2,
257 hdr->addr1);
258 if (entry != NULL) {
259 entry->last_frag = frag;
260 skb = entry->skb;
261 }
262 }
263
264 return skb;
265}
266
267
268/* Called only as a tasklet (software IRQ) */
269static int prism2_frag_cache_invalidate(local_info_t *local,
270 struct ieee80211_hdr *hdr)
271{
272 u16 sc;
273 unsigned int seq;
274 struct prism2_frag_entry *entry;
275
276 sc = le16_to_cpu(hdr->seq_ctl);
277 seq = WLAN_GET_SEQ_SEQ(sc) >> 4;
278
279 entry = prism2_frag_cache_find(local, seq, -1, hdr->addr2, hdr->addr1);
280
281 if (entry == NULL) {
282 printk(KERN_DEBUG "%s: could not invalidate fragment cache "
283 "entry (seq=%u)\n",
284 local->dev->name, seq);
285 return -1;
286 }
287
288 entry->skb = NULL;
289 return 0;
290}
291
292
293static struct hostap_bss_info *__hostap_get_bss(local_info_t *local, u8 *bssid,
294 u8 *ssid, size_t ssid_len)
295{
296 struct list_head *ptr;
297 struct hostap_bss_info *bss;
298
299 list_for_each(ptr, &local->bss_list) {
300 bss = list_entry(ptr, struct hostap_bss_info, list);
301 if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0 &&
302 (ssid == NULL ||
303 (ssid_len == bss->ssid_len &&
304 memcmp(ssid, bss->ssid, ssid_len) == 0))) {
305 list_move(&bss->list, &local->bss_list);
306 return bss;
307 }
308 }
309
310 return NULL;
311}
312
313
314static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
315 u8 *ssid, size_t ssid_len)
316{
317 struct hostap_bss_info *bss;
318
319 if (local->num_bss_info >= HOSTAP_MAX_BSS_COUNT) {
320 bss = list_entry(local->bss_list.prev,
321 struct hostap_bss_info, list);
322 list_del(&bss->list);
323 local->num_bss_info--;
324 } else {
325 bss = (struct hostap_bss_info *)
326 kmalloc(sizeof(*bss), GFP_ATOMIC);
327 if (bss == NULL)
328 return NULL;
329 }
330
331 memset(bss, 0, sizeof(*bss));
332 memcpy(bss->bssid, bssid, ETH_ALEN);
333 memcpy(bss->ssid, ssid, ssid_len);
334 bss->ssid_len = ssid_len;
335 local->num_bss_info++;
336 list_add(&bss->list, &local->bss_list);
337 return bss;
338}
339
340
341static void __hostap_expire_bss(local_info_t *local)
342{
343 struct hostap_bss_info *bss;
344
345 while (local->num_bss_info > 0) {
346 bss = list_entry(local->bss_list.prev,
347 struct hostap_bss_info, list);
348 if (!time_after(jiffies, bss->last_update + 60 * HZ))
349 break;
350
351 list_del(&bss->list);
352 local->num_bss_info--;
353 kfree(bss);
354 }
355}
356
357
358/* Both IEEE 802.11 Beacon and Probe Response frames have similar structure, so
359 * the same routine can be used to parse both of them. */
360static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
361 int stype)
362{
363 struct hostap_ieee80211_mgmt *mgmt;
364 int left, chan = 0;
365 u8 *pos;
366 u8 *ssid = NULL, *wpa = NULL, *rsn = NULL;
367 size_t ssid_len = 0, wpa_len = 0, rsn_len = 0;
368 struct hostap_bss_info *bss;
369
370 if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon))
371 return;
372
373 mgmt = (struct hostap_ieee80211_mgmt *) skb->data;
374 pos = mgmt->u.beacon.variable;
375 left = skb->len - (pos - skb->data);
376
377 while (left >= 2) {
378 if (2 + pos[1] > left)
379 return; /* parse failed */
380 switch (*pos) {
381 case WLAN_EID_SSID:
382 ssid = pos + 2;
383 ssid_len = pos[1];
384 break;
385 case WLAN_EID_GENERIC:
386 if (pos[1] >= 4 &&
387 pos[2] == 0x00 && pos[3] == 0x50 &&
388 pos[4] == 0xf2 && pos[5] == 1) {
389 wpa = pos;
390 wpa_len = pos[1] + 2;
391 }
392 break;
393 case WLAN_EID_RSN:
394 rsn = pos;
395 rsn_len = pos[1] + 2;
396 break;
397 case WLAN_EID_DS_PARAMS:
398 if (pos[1] >= 1)
399 chan = pos[2];
400 break;
401 }
402 left -= 2 + pos[1];
403 pos += 2 + pos[1];
404 }
405
406 if (wpa_len > MAX_WPA_IE_LEN)
407 wpa_len = MAX_WPA_IE_LEN;
408 if (rsn_len > MAX_WPA_IE_LEN)
409 rsn_len = MAX_WPA_IE_LEN;
410 if (ssid_len > sizeof(bss->ssid))
411 ssid_len = sizeof(bss->ssid);
412
413 spin_lock(&local->lock);
414 bss = __hostap_get_bss(local, mgmt->bssid, ssid, ssid_len);
415 if (bss == NULL)
416 bss = __hostap_add_bss(local, mgmt->bssid, ssid, ssid_len);
417 if (bss) {
418 bss->last_update = jiffies;
419 bss->count++;
420 bss->capab_info = le16_to_cpu(mgmt->u.beacon.capab_info);
421 if (wpa) {
422 memcpy(bss->wpa_ie, wpa, wpa_len);
423 bss->wpa_ie_len = wpa_len;
424 } else
425 bss->wpa_ie_len = 0;
426 if (rsn) {
427 memcpy(bss->rsn_ie, rsn, rsn_len);
428 bss->rsn_ie_len = rsn_len;
429 } else
430 bss->rsn_ie_len = 0;
431 bss->chan = chan;
432 }
433 __hostap_expire_bss(local);
434 spin_unlock(&local->lock);
435}
436
437
438static inline int
439hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
440 struct hostap_80211_rx_status *rx_stats, u16 type,
441 u16 stype)
442{
443 if (local->iw_mode == IW_MODE_MASTER) {
444 hostap_update_sta_ps(local, (struct ieee80211_hdr *)
445 skb->data);
446 }
447
448 if (local->hostapd && type == IEEE80211_FTYPE_MGMT) {
449 if (stype == IEEE80211_STYPE_BEACON &&
450 local->iw_mode == IW_MODE_MASTER) {
451 struct sk_buff *skb2;
452 /* Process beacon frames also in kernel driver to
453 * update STA(AP) table statistics */
454 skb2 = skb_clone(skb, GFP_ATOMIC);
455 if (skb2)
456 hostap_rx(skb2->dev, skb2, rx_stats);
457 }
458
459 /* send management frames to the user space daemon for
460 * processing */
461 local->apdevstats.rx_packets++;
462 local->apdevstats.rx_bytes += skb->len;
463 if (local->apdev == NULL)
464 return -1;
465 prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT);
466 return 0;
467 }
468
469 if (local->iw_mode == IW_MODE_MASTER) {
470 if (type != IEEE80211_FTYPE_MGMT &&
471 type != IEEE80211_FTYPE_CTL) {
472 printk(KERN_DEBUG "%s: unknown management frame "
473 "(type=0x%02x, stype=0x%02x) dropped\n",
474 skb->dev->name, type >> 2, stype >> 4);
475 return -1;
476 }
477
478 hostap_rx(skb->dev, skb, rx_stats);
479 return 0;
480 } else if (type == IEEE80211_FTYPE_MGMT &&
481 (stype == IEEE80211_STYPE_BEACON ||
482 stype == IEEE80211_STYPE_PROBE_RESP)) {
483 hostap_rx_sta_beacon(local, skb, stype);
484 return -1;
485 } else if (type == IEEE80211_FTYPE_MGMT &&
486 (stype == IEEE80211_STYPE_ASSOC_RESP ||
487 stype == IEEE80211_STYPE_REASSOC_RESP)) {
488 /* Ignore (Re)AssocResp silently since these are not currently
489 * needed but are still received when WPA/RSN mode is enabled.
490 */
491 return -1;
492 } else {
493 printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: dropped unhandled"
494 " management frame in non-Host AP mode (type=%d:%d)\n",
495 skb->dev->name, type >> 2, stype >> 4);
496 return -1;
497 }
498}
499
500
501/* Called only as a tasklet (software IRQ) */
502static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
503 u8 *addr)
504{
505 struct hostap_interface *iface = NULL;
506 struct list_head *ptr;
507
508 read_lock_bh(&local->iface_lock);
509 list_for_each(ptr, &local->hostap_interfaces) {
510 iface = list_entry(ptr, struct hostap_interface, list);
511 if (iface->type == HOSTAP_INTERFACE_WDS &&
512 memcmp(iface->u.wds.remote_addr, addr, ETH_ALEN) == 0)
513 break;
514 iface = NULL;
515 }
516 read_unlock_bh(&local->iface_lock);
517
518 return iface ? iface->dev : NULL;
519}
520
521
522static inline int
523hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr,
524 u16 fc, struct net_device **wds)
525{
526 /* FIX: is this really supposed to accept WDS frames only in Master
527 * mode? What about Repeater or Managed with WDS frames? */
528 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) !=
529 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS) &&
530 (local->iw_mode != IW_MODE_MASTER || !(fc & IEEE80211_FCTL_TODS)))
531 return 0; /* not a WDS frame */
532
533 /* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
534 * or own non-standard frame with 4th address after payload */
535 if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
536 (hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
537 hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
538 hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
539 /* RA (or BSSID) is not ours - drop */
540 PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with "
541 "not own or broadcast %s=" MACSTR "\n",
542 local->dev->name,
543 fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",
544 MAC2STR(hdr->addr1));
545 return -1;
546 }
547
548 /* check if the frame came from a registered WDS connection */
549 *wds = prism2_rx_get_wds(local, hdr->addr2);
550 if (*wds == NULL && fc & IEEE80211_FCTL_FROMDS &&
551 (local->iw_mode != IW_MODE_INFRA ||
552 !(local->wds_type & HOSTAP_WDS_AP_CLIENT) ||
553 memcmp(hdr->addr2, local->bssid, ETH_ALEN) != 0)) {
554 /* require that WDS link has been registered with TA or the
555 * frame is from current AP when using 'AP client mode' */
556 PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame "
557 "from unknown TA=" MACSTR "\n",
558 local->dev->name, MAC2STR(hdr->addr2));
559 if (local->ap && local->ap->autom_ap_wds)
560 hostap_wds_link_oper(local, hdr->addr2, WDS_ADD);
561 return -1;
562 }
563
564 if (*wds && !(fc & IEEE80211_FCTL_FROMDS) && local->ap &&
565 hostap_is_sta_assoc(local->ap, hdr->addr2)) {
566 /* STA is actually associated with us even though it has a
567 * registered WDS link. Assume it is in 'AP client' mode.
568 * Since this is a 3-addr frame, assume it is not (bogus) WDS
569 * frame and process it like any normal ToDS frame from
570 * associated STA. */
571 *wds = NULL;
572 }
573
574 return 0;
575}
576
577
578static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
579{
580 struct net_device *dev = local->dev;
581 u16 fc, ethertype;
582 struct ieee80211_hdr *hdr;
583 u8 *pos;
584
585 if (skb->len < 24)
586 return 0;
587
588 hdr = (struct ieee80211_hdr *) skb->data;
589 fc = le16_to_cpu(hdr->frame_ctl);
590
591 /* check that the frame is unicast frame to us */
592 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
593 IEEE80211_FCTL_TODS &&
594 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
595 memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
596 /* ToDS frame with own addr BSSID and DA */
597 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
598 IEEE80211_FCTL_FROMDS &&
599 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
600 /* FromDS frame with own addr as DA */
601 } else
602 return 0;
603
604 if (skb->len < 24 + 8)
605 return 0;
606
607 /* check for port access entity Ethernet type */
608 pos = skb->data + 24;
609 ethertype = (pos[6] << 8) | pos[7];
610 if (ethertype == ETH_P_PAE)
611 return 1;
612
613 return 0;
614}
615
616
617/* Called only as a tasklet (software IRQ) */
618static inline int
619hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
620 struct ieee80211_crypt_data *crypt)
621{
622 struct ieee80211_hdr *hdr;
623 int res, hdrlen;
624
625 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
626 return 0;
627
628 hdr = (struct ieee80211_hdr *) skb->data;
629 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
630
631 if (local->tkip_countermeasures &&
632 strcmp(crypt->ops->name, "TKIP") == 0) {
633 if (net_ratelimit()) {
634 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
635 "received packet from " MACSTR "\n",
636 local->dev->name, MAC2STR(hdr->addr2));
637 }
638 return -1;
639 }
640
641 atomic_inc(&crypt->refcnt);
642 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
643 atomic_dec(&crypt->refcnt);
644 if (res < 0) {
645 printk(KERN_DEBUG "%s: decryption failed (SA=" MACSTR
646 ") res=%d\n",
647 local->dev->name, MAC2STR(hdr->addr2), res);
648 local->comm_tallies.rx_discards_wep_undecryptable++;
649 return -1;
650 }
651
652 return res;
653}
654
655
656/* Called only as a tasklet (software IRQ) */
657static inline int
658hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
659 int keyidx, struct ieee80211_crypt_data *crypt)
660{
661 struct ieee80211_hdr *hdr;
662 int res, hdrlen;
663
664 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
665 return 0;
666
667 hdr = (struct ieee80211_hdr *) skb->data;
668 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
669
670 atomic_inc(&crypt->refcnt);
671 res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
672 atomic_dec(&crypt->refcnt);
673 if (res < 0) {
674 printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
675 " (SA=" MACSTR " keyidx=%d)\n",
676 local->dev->name, MAC2STR(hdr->addr2), keyidx);
677 return -1;
678 }
679
680 return 0;
681}
682
683
684/* All received frames are sent to this function. @skb contains the frame in
685 * IEEE 802.11 format, i.e., in the format it was sent over air.
686 * This function is called only as a tasklet (software IRQ). */
687void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
688 struct hostap_80211_rx_status *rx_stats)
689{
690 struct hostap_interface *iface;
691 local_info_t *local;
692 struct ieee80211_hdr *hdr;
693 size_t hdrlen;
694 u16 fc, type, stype, sc;
695 struct net_device *wds = NULL;
696 struct net_device_stats *stats;
697 unsigned int frag;
698 u8 *payload;
699 struct sk_buff *skb2 = NULL;
700 u16 ethertype;
701 int frame_authorized = 0;
702 int from_assoc_ap = 0;
703 u8 dst[ETH_ALEN];
704 u8 src[ETH_ALEN];
705 struct ieee80211_crypt_data *crypt = NULL;
706 void *sta = NULL;
707 int keyidx = 0;
708
709 iface = netdev_priv(dev);
710 local = iface->local;
711 iface->stats.rx_packets++;
712 iface->stats.rx_bytes += skb->len;
713
714 /* dev is the master radio device; change this to be the default
715 * virtual interface (this may be changed to WDS device below) */
716 dev = local->ddev;
717 iface = netdev_priv(dev);
718
719 hdr = (struct ieee80211_hdr *) skb->data;
720 stats = hostap_get_stats(dev);
721
722 if (skb->len < 10)
723 goto rx_dropped;
724
725 fc = le16_to_cpu(hdr->frame_ctl);
726 type = WLAN_FC_GET_TYPE(fc);
727 stype = WLAN_FC_GET_STYPE(fc);
728 sc = le16_to_cpu(hdr->seq_ctl);
729 frag = WLAN_GET_SEQ_FRAG(sc);
730 hdrlen = hostap_80211_get_hdrlen(fc);
731
732 /* Put this code here so that we avoid duplicating it in all
733 * Rx paths. - Jean II */
734#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
735 /* If spy monitoring on */
736 if (iface->spy_data.spy_number > 0) {
737 struct iw_quality wstats;
738 wstats.level = rx_stats->signal;
739 wstats.noise = rx_stats->noise;
740 wstats.updated = 6; /* No qual value */
741 /* Update spy records */
742 wireless_spy_update(dev, hdr->addr2, &wstats);
743 }
744#endif /* IW_WIRELESS_SPY */
745 hostap_update_rx_stats(local->ap, hdr, rx_stats);
746
747 if (local->iw_mode == IW_MODE_MONITOR) {
748 monitor_rx(dev, skb, rx_stats);
749 return;
750 }
751
752 if (local->host_decrypt) {
753 int idx = 0;
754 if (skb->len >= hdrlen + 3)
755 idx = skb->data[hdrlen + 3] >> 6;
756 crypt = local->crypt[idx];
757 sta = NULL;
758
759 /* Use station specific key to override default keys if the
760 * receiver address is a unicast address ("individual RA"). If
761 * bcrx_sta_key parameter is set, station specific key is used
762 * even with broad/multicast targets (this is against IEEE
763 * 802.11, but makes it easier to use different keys with
764 * stations that do not support WEP key mapping). */
765
766 if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
767 (void) hostap_handle_sta_crypto(local, hdr, &crypt,
768 &sta);
769
770 /* allow NULL decrypt to indicate an station specific override
771 * for default encryption */
772 if (crypt && (crypt->ops == NULL ||
773 crypt->ops->decrypt_mpdu == NULL))
774 crypt = NULL;
775
776 if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
777#if 0
778 /* This seems to be triggered by some (multicast?)
779 * frames from other than current BSS, so just drop the
780 * frames silently instead of filling system log with
781 * these reports. */
782 printk(KERN_DEBUG "%s: WEP decryption failed (not set)"
783 " (SA=" MACSTR ")\n",
784 local->dev->name, MAC2STR(hdr->addr2));
785#endif
786 local->comm_tallies.rx_discards_wep_undecryptable++;
787 goto rx_dropped;
788 }
789 }
790
791 if (type != IEEE80211_FTYPE_DATA) {
792 if (type == IEEE80211_FTYPE_MGMT &&
793 stype == IEEE80211_STYPE_AUTH &&
794 fc & IEEE80211_FCTL_PROTECTED && local->host_decrypt &&
795 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
796 {
797 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
798 "from " MACSTR "\n", dev->name,
799 MAC2STR(hdr->addr2));
800 /* TODO: could inform hostapd about this so that it
801 * could send auth failure report */
802 goto rx_dropped;
803 }
804
805 if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype))
806 goto rx_dropped;
807 else
808 goto rx_exit;
809 }
810
811 /* Data frame - extract src/dst addresses */
812 if (skb->len < IEEE80211_DATA_HDR3_LEN)
813 goto rx_dropped;
814
815 switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
816 case IEEE80211_FCTL_FROMDS:
817 memcpy(dst, hdr->addr1, ETH_ALEN);
818 memcpy(src, hdr->addr3, ETH_ALEN);
819 break;
820 case IEEE80211_FCTL_TODS:
821 memcpy(dst, hdr->addr3, ETH_ALEN);
822 memcpy(src, hdr->addr2, ETH_ALEN);
823 break;
824 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
825 if (skb->len < IEEE80211_DATA_HDR4_LEN)
826 goto rx_dropped;
827 memcpy(dst, hdr->addr3, ETH_ALEN);
828 memcpy(src, hdr->addr4, ETH_ALEN);
829 break;
830 case 0:
831 memcpy(dst, hdr->addr1, ETH_ALEN);
832 memcpy(src, hdr->addr2, ETH_ALEN);
833 break;
834 }
835
836 if (hostap_rx_frame_wds(local, hdr, fc, &wds))
837 goto rx_dropped;
838 if (wds) {
839 skb->dev = dev = wds;
840 stats = hostap_get_stats(dev);
841 }
842
843 if (local->iw_mode == IW_MODE_MASTER && !wds &&
844 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
845 IEEE80211_FCTL_FROMDS &&
846 local->stadev &&
847 memcmp(hdr->addr2, local->assoc_ap_addr, ETH_ALEN) == 0) {
848 /* Frame from BSSID of the AP for which we are a client */
849 skb->dev = dev = local->stadev;
850 stats = hostap_get_stats(dev);
851 from_assoc_ap = 1;
852 }
853
854 dev->last_rx = jiffies;
855
856 if ((local->iw_mode == IW_MODE_MASTER ||
857 local->iw_mode == IW_MODE_REPEAT) &&
858 !from_assoc_ap) {
859 switch (hostap_handle_sta_rx(local, dev, skb, rx_stats,
860 wds != NULL)) {
861 case AP_RX_CONTINUE_NOT_AUTHORIZED:
862 frame_authorized = 0;
863 break;
864 case AP_RX_CONTINUE:
865 frame_authorized = 1;
866 break;
867 case AP_RX_DROP:
868 goto rx_dropped;
869 case AP_RX_EXIT:
870 goto rx_exit;
871 }
872 }
873
874 /* Nullfunc frames may have PS-bit set, so they must be passed to
875 * hostap_handle_sta_rx() before being dropped here. */
876 if (stype != IEEE80211_STYPE_DATA &&
877 stype != IEEE80211_STYPE_DATA_CFACK &&
878 stype != IEEE80211_STYPE_DATA_CFPOLL &&
879 stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
880 if (stype != IEEE80211_STYPE_NULLFUNC)
881 printk(KERN_DEBUG "%s: RX: dropped data frame "
882 "with no data (type=0x%02x, subtype=0x%02x)\n",
883 dev->name, type >> 2, stype >> 4);
884 goto rx_dropped;
885 }
886
887 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
888
889 if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
890 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
891 goto rx_dropped;
892 hdr = (struct ieee80211_hdr *) skb->data;
893
894 /* skb: hdr + (possibly fragmented) plaintext payload */
895
896 if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
897 (frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
898 int flen;
899 struct sk_buff *frag_skb =
900 prism2_frag_cache_get(local, hdr);
901 if (!frag_skb) {
902 printk(KERN_DEBUG "%s: Rx cannot get skb from "
903 "fragment cache (morefrag=%d seq=%u frag=%u)\n",
904 dev->name, (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
905 WLAN_GET_SEQ_SEQ(sc) >> 4, frag);
906 goto rx_dropped;
907 }
908
909 flen = skb->len;
910 if (frag != 0)
911 flen -= hdrlen;
912
913 if (frag_skb->tail + flen > frag_skb->end) {
914 printk(KERN_WARNING "%s: host decrypted and "
915 "reassembled frame did not fit skb\n",
916 dev->name);
917 prism2_frag_cache_invalidate(local, hdr);
918 goto rx_dropped;
919 }
920
921 if (frag == 0) {
922 /* copy first fragment (including full headers) into
923 * beginning of the fragment cache skb */
924 memcpy(skb_put(frag_skb, flen), skb->data, flen);
925 } else {
926 /* append frame payload to the end of the fragment
927 * cache skb */
928 memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
929 flen);
930 }
931 dev_kfree_skb(skb);
932 skb = NULL;
933
934 if (fc & IEEE80211_FCTL_MOREFRAGS) {
935 /* more fragments expected - leave the skb in fragment
936 * cache for now; it will be delivered to upper layers
937 * after all fragments have been received */
938 goto rx_exit;
939 }
940
941 /* this was the last fragment and the frame will be
942 * delivered, so remove skb from fragment cache */
943 skb = frag_skb;
944 hdr = (struct ieee80211_hdr *) skb->data;
945 prism2_frag_cache_invalidate(local, hdr);
946 }
947
948 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
949 * encrypted/authenticated */
950
951 if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
952 hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
953 goto rx_dropped;
954
955 hdr = (struct ieee80211_hdr *) skb->data;
956 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
957 if (local->ieee_802_1x &&
958 hostap_is_eapol_frame(local, skb)) {
959 /* pass unencrypted EAPOL frames even if encryption is
960 * configured */
961 PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X - passing "
962 "unencrypted EAPOL frame\n", local->dev->name);
963 } else {
964 printk(KERN_DEBUG "%s: encryption configured, but RX "
965 "frame not encrypted (SA=" MACSTR ")\n",
966 local->dev->name, MAC2STR(hdr->addr2));
967 goto rx_dropped;
968 }
969 }
970
971 if (local->drop_unencrypted && !(fc & IEEE80211_FCTL_PROTECTED) &&
972 !hostap_is_eapol_frame(local, skb)) {
973 if (net_ratelimit()) {
974 printk(KERN_DEBUG "%s: dropped unencrypted RX data "
975 "frame from " MACSTR " (drop_unencrypted=1)\n",
976 dev->name, MAC2STR(hdr->addr2));
977 }
978 goto rx_dropped;
979 }
980
981 /* skb: hdr + (possible reassembled) full plaintext payload */
982
983 payload = skb->data + hdrlen;
984 ethertype = (payload[6] << 8) | payload[7];
985
986 /* If IEEE 802.1X is used, check whether the port is authorized to send
987 * the received frame. */
988 if (local->ieee_802_1x && local->iw_mode == IW_MODE_MASTER) {
989 if (ethertype == ETH_P_PAE) {
990 PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X frame\n",
991 dev->name);
992 if (local->hostapd && local->apdev) {
993 /* Send IEEE 802.1X frames to the user
994 * space daemon for processing */
995 prism2_rx_80211(local->apdev, skb, rx_stats,
996 PRISM2_RX_MGMT);
997 local->apdevstats.rx_packets++;
998 local->apdevstats.rx_bytes += skb->len;
999 goto rx_exit;
1000 }
1001 } else if (!frame_authorized) {
1002 printk(KERN_DEBUG "%s: dropped frame from "
1003 "unauthorized port (IEEE 802.1X): "
1004 "ethertype=0x%04x\n",
1005 dev->name, ethertype);
1006 goto rx_dropped;
1007 }
1008 }
1009
1010 /* convert hdr + possible LLC headers into Ethernet header */
1011 if (skb->len - hdrlen >= 8 &&
1012 ((memcmp(payload, rfc1042_header, 6) == 0 &&
1013 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1014 memcmp(payload, bridge_tunnel_header, 6) == 0)) {
1015 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1016 * replace EtherType */
1017 skb_pull(skb, hdrlen + 6);
1018 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1019 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1020 } else {
1021 u16 len;
1022 /* Leave Ethernet header part of hdr and full payload */
1023 skb_pull(skb, hdrlen);
1024 len = htons(skb->len);
1025 memcpy(skb_push(skb, 2), &len, 2);
1026 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1027 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1028 }
1029
1030 if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
1031 IEEE80211_FCTL_TODS) &&
1032 skb->len >= ETH_HLEN + ETH_ALEN) {
1033 /* Non-standard frame: get addr4 from its bogus location after
1034 * the payload */
1035 memcpy(skb->data + ETH_ALEN,
1036 skb->data + skb->len - ETH_ALEN, ETH_ALEN);
1037 skb_trim(skb, skb->len - ETH_ALEN);
1038 }
1039
1040 stats->rx_packets++;
1041 stats->rx_bytes += skb->len;
1042
1043 if (local->iw_mode == IW_MODE_MASTER && !wds &&
1044 local->ap->bridge_packets) {
1045 if (dst[0] & 0x01) {
1046 /* copy multicast frame both to the higher layers and
1047 * to the wireless media */
1048 local->ap->bridged_multicast++;
1049 skb2 = skb_clone(skb, GFP_ATOMIC);
1050 if (skb2 == NULL)
1051 printk(KERN_DEBUG "%s: skb_clone failed for "
1052 "multicast frame\n", dev->name);
1053 } else if (hostap_is_sta_authorized(local->ap, dst)) {
1054 /* send frame directly to the associated STA using
1055 * wireless media and not passing to higher layers */
1056 local->ap->bridged_unicast++;
1057 skb2 = skb;
1058 skb = NULL;
1059 }
1060 }
1061
1062 if (skb2 != NULL) {
1063 /* send to wireless media */
1064 skb2->protocol = __constant_htons(ETH_P_802_3);
1065 skb2->mac.raw = skb2->nh.raw = skb2->data;
1066 /* skb2->nh.raw = skb2->data + ETH_HLEN; */
1067 skb2->dev = dev;
1068 dev_queue_xmit(skb2);
1069 }
1070
1071 if (skb) {
1072 skb->protocol = eth_type_trans(skb, dev);
1073 memset(skb->cb, 0, sizeof(skb->cb));
1074 skb->dev = dev;
1075 netif_rx(skb);
1076 }
1077
1078 rx_exit:
1079 if (sta)
1080 hostap_handle_sta_release(sta);
1081 return;
1082
1083 rx_dropped:
1084 dev_kfree_skb(skb);
1085
1086 stats->rx_dropped++;
1087 goto rx_exit;
1088}
1089
1090
1091EXPORT_SYMBOL(hostap_80211_rx);
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
new file mode 100644
index 000000000000..6358015f6526
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -0,0 +1,524 @@
1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
2{
3 struct ieee80211_hdr *hdr;
4 u16 fc;
5
6 hdr = (struct ieee80211_hdr *) skb->data;
7
8 printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
9 name, skb->len, jiffies);
10
11 if (skb->len < 2)
12 return;
13
14 fc = le16_to_cpu(hdr->frame_ctl);
15 printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
16 fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
17 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
18 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
19
20 if (skb->len < IEEE80211_DATA_HDR3_LEN) {
21 printk("\n");
22 return;
23 }
24
25 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
26 le16_to_cpu(hdr->seq_ctl));
27
28 printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR,
29 MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3));
30 if (skb->len >= 30)
31 printk(" A4=" MACSTR, MAC2STR(hdr->addr4));
32 printk("\n");
33}
34
35
36/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
37 * Convert Ethernet header into a suitable IEEE 802.11 header depending on
38 * device configuration. */
39int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
40{
41 struct hostap_interface *iface;
42 local_info_t *local;
43 int need_headroom, need_tailroom = 0;
44 struct ieee80211_hdr hdr;
45 u16 fc, ethertype = 0;
46 enum {
47 WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
48 } use_wds = WDS_NO;
49 u8 *encaps_data;
50 int hdr_len, encaps_len, skip_header_bytes;
51 int to_assoc_ap = 0;
52 struct hostap_skb_tx_data *meta;
53
54 iface = netdev_priv(dev);
55 local = iface->local;
56
57 if (skb->len < ETH_HLEN) {
58 printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
59 "(len=%d)\n", dev->name, skb->len);
60 kfree_skb(skb);
61 return 0;
62 }
63
64 if (local->ddev != dev) {
65 use_wds = (local->iw_mode == IW_MODE_MASTER &&
66 !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
67 WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
68 if (dev == local->stadev) {
69 to_assoc_ap = 1;
70 use_wds = WDS_NO;
71 } else if (dev == local->apdev) {
72 printk(KERN_DEBUG "%s: prism2_tx: trying to use "
73 "AP device with Ethernet net dev\n", dev->name);
74 kfree_skb(skb);
75 return 0;
76 }
77 } else {
78 if (local->iw_mode == IW_MODE_REPEAT) {
79 printk(KERN_DEBUG "%s: prism2_tx: trying to use "
80 "non-WDS link in Repeater mode\n", dev->name);
81 kfree_skb(skb);
82 return 0;
83 } else if (local->iw_mode == IW_MODE_INFRA &&
84 (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
85 memcmp(skb->data + ETH_ALEN, dev->dev_addr,
86 ETH_ALEN) != 0) {
87 /* AP client mode: send frames with foreign src addr
88 * using 4-addr WDS frames */
89 use_wds = WDS_COMPLIANT_FRAME;
90 }
91 }
92
93 /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
94 * ==>
95 * Prism2 TX frame with 802.11 header:
96 * txdesc (address order depending on used mode; includes dst_addr and
97 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
98 * proto[2], payload {, possible addr4[6]} */
99
100 ethertype = (skb->data[12] << 8) | skb->data[13];
101
102 memset(&hdr, 0, sizeof(hdr));
103
104 /* Length of data after IEEE 802.11 header */
105 encaps_data = NULL;
106 encaps_len = 0;
107 skip_header_bytes = ETH_HLEN;
108 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
109 encaps_data = bridge_tunnel_header;
110 encaps_len = sizeof(bridge_tunnel_header);
111 skip_header_bytes -= 2;
112 } else if (ethertype >= 0x600) {
113 encaps_data = rfc1042_header;
114 encaps_len = sizeof(rfc1042_header);
115 skip_header_bytes -= 2;
116 }
117
118 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
119 hdr_len = IEEE80211_DATA_HDR3_LEN;
120
121 if (use_wds != WDS_NO) {
122 /* Note! Prism2 station firmware has problems with sending real
123 * 802.11 frames with four addresses; until these problems can
124 * be fixed or worked around, 4-addr frames needed for WDS are
125 * using incompatible format: FromDS flag is not set and the
126 * fourth address is added after the frame payload; it is
127 * assumed, that the receiving station knows how to handle this
128 * frame format */
129
130 if (use_wds == WDS_COMPLIANT_FRAME) {
131 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
132 /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
133 * Addr4 = SA */
134 memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
135 hdr_len += ETH_ALEN;
136 } else {
137 /* bogus 4-addr format to workaround Prism2 station
138 * f/w bug */
139 fc |= IEEE80211_FCTL_TODS;
140 /* From DS: Addr1 = DA (used as RA),
141 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
142 */
143
144 /* SA from skb->data + ETH_ALEN will be added after
145 * frame payload; use hdr.addr4 as a temporary buffer
146 */
147 memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
148 need_tailroom += ETH_ALEN;
149 }
150
151 /* send broadcast and multicast frames to broadcast RA, if
152 * configured; otherwise, use unicast RA of the WDS link */
153 if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
154 skb->data[0] & 0x01)
155 memset(&hdr.addr1, 0xff, ETH_ALEN);
156 else if (iface->type == HOSTAP_INTERFACE_WDS)
157 memcpy(&hdr.addr1, iface->u.wds.remote_addr,
158 ETH_ALEN);
159 else
160 memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
161 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
162 memcpy(&hdr.addr3, skb->data, ETH_ALEN);
163 } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
164 fc |= IEEE80211_FCTL_FROMDS;
165 /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
166 memcpy(&hdr.addr1, skb->data, ETH_ALEN);
167 memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
168 memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
169 } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
170 fc |= IEEE80211_FCTL_TODS;
171 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
172 memcpy(&hdr.addr1, to_assoc_ap ?
173 local->assoc_ap_addr : local->bssid, ETH_ALEN);
174 memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
175 memcpy(&hdr.addr3, skb->data, ETH_ALEN);
176 } else if (local->iw_mode == IW_MODE_ADHOC) {
177 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
178 memcpy(&hdr.addr1, skb->data, ETH_ALEN);
179 memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
180 memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
181 }
182
183 hdr.frame_ctl = cpu_to_le16(fc);
184
185 skb_pull(skb, skip_header_bytes);
186 need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
187 if (skb_tailroom(skb) < need_tailroom) {
188 skb = skb_unshare(skb, GFP_ATOMIC);
189 if (skb == NULL) {
190 iface->stats.tx_dropped++;
191 return 0;
192 }
193 if (pskb_expand_head(skb, need_headroom, need_tailroom,
194 GFP_ATOMIC)) {
195 kfree_skb(skb);
196 iface->stats.tx_dropped++;
197 return 0;
198 }
199 } else if (skb_headroom(skb) < need_headroom) {
200 struct sk_buff *tmp = skb;
201 skb = skb_realloc_headroom(skb, need_headroom);
202 kfree_skb(tmp);
203 if (skb == NULL) {
204 iface->stats.tx_dropped++;
205 return 0;
206 }
207 } else {
208 skb = skb_unshare(skb, GFP_ATOMIC);
209 if (skb == NULL) {
210 iface->stats.tx_dropped++;
211 return 0;
212 }
213 }
214
215 if (encaps_data)
216 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
217 memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
218 if (use_wds == WDS_OWN_FRAME) {
219 memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
220 }
221
222 iface->stats.tx_packets++;
223 iface->stats.tx_bytes += skb->len;
224
225 skb->mac.raw = skb->data;
226 meta = (struct hostap_skb_tx_data *) skb->cb;
227 memset(meta, 0, sizeof(*meta));
228 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
229 if (use_wds)
230 meta->flags |= HOSTAP_TX_FLAGS_WDS;
231 meta->ethertype = ethertype;
232 meta->iface = iface;
233
234 /* Send IEEE 802.11 encapsulated frame using the master radio device */
235 skb->dev = local->dev;
236 dev_queue_xmit(skb);
237 return 0;
238}
239
240
241/* hard_start_xmit function for hostapd wlan#ap interfaces */
242int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
243{
244 struct hostap_interface *iface;
245 local_info_t *local;
246 struct hostap_skb_tx_data *meta;
247 struct ieee80211_hdr *hdr;
248 u16 fc;
249
250 iface = netdev_priv(dev);
251 local = iface->local;
252
253 if (skb->len < 10) {
254 printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb "
255 "(len=%d)\n", dev->name, skb->len);
256 kfree_skb(skb);
257 return 0;
258 }
259
260 iface->stats.tx_packets++;
261 iface->stats.tx_bytes += skb->len;
262
263 meta = (struct hostap_skb_tx_data *) skb->cb;
264 memset(meta, 0, sizeof(*meta));
265 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
266 meta->iface = iface;
267
268 if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
269 hdr = (struct ieee80211_hdr *) skb->data;
270 fc = le16_to_cpu(hdr->frame_ctl);
271 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
272 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) {
273 u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN +
274 sizeof(rfc1042_header)];
275 meta->ethertype = (pos[0] << 8) | pos[1];
276 }
277 }
278
279 /* Send IEEE 802.11 encapsulated frame using the master radio device */
280 skb->dev = local->dev;
281 dev_queue_xmit(skb);
282 return 0;
283}
284
285
286/* Called only from software IRQ */
287struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
288 struct ieee80211_crypt_data *crypt)
289{
290 struct hostap_interface *iface;
291 local_info_t *local;
292 struct ieee80211_hdr *hdr;
293 u16 fc;
294 int hdr_len, res;
295
296 iface = netdev_priv(skb->dev);
297 local = iface->local;
298
299 if (skb->len < IEEE80211_DATA_HDR3_LEN) {
300 kfree_skb(skb);
301 return NULL;
302 }
303
304 if (local->tkip_countermeasures &&
305 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
306 hdr = (struct ieee80211_hdr *) skb->data;
307 if (net_ratelimit()) {
308 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
309 "TX packet to " MACSTR "\n",
310 local->dev->name, MAC2STR(hdr->addr1));
311 }
312 kfree_skb(skb);
313 return NULL;
314 }
315
316 skb = skb_unshare(skb, GFP_ATOMIC);
317 if (skb == NULL)
318 return NULL;
319
320 if ((skb_headroom(skb) < crypt->ops->extra_prefix_len ||
321 skb_tailroom(skb) < crypt->ops->extra_postfix_len) &&
322 pskb_expand_head(skb, crypt->ops->extra_prefix_len,
323 crypt->ops->extra_postfix_len, GFP_ATOMIC)) {
324 kfree_skb(skb);
325 return NULL;
326 }
327
328 hdr = (struct ieee80211_hdr *) skb->data;
329 fc = le16_to_cpu(hdr->frame_ctl);
330 hdr_len = hostap_80211_get_hdrlen(fc);
331
332 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
333 * call both MSDU and MPDU encryption functions from here. */
334 atomic_inc(&crypt->refcnt);
335 res = 0;
336 if (crypt->ops->encrypt_msdu)
337 res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
338 if (res == 0 && crypt->ops->encrypt_mpdu)
339 res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
340 atomic_dec(&crypt->refcnt);
341 if (res < 0) {
342 kfree_skb(skb);
343 return NULL;
344 }
345
346 return skb;
347}
348
349
350/* hard_start_xmit function for master radio interface wifi#.
351 * AP processing (TX rate control, power save buffering, etc.).
352 * Use hardware TX function to send the frame. */
353int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
354{
355 struct hostap_interface *iface;
356 local_info_t *local;
357 int ret = 1;
358 u16 fc;
359 struct hostap_tx_data tx;
360 ap_tx_ret tx_ret;
361 struct hostap_skb_tx_data *meta;
362 int no_encrypt = 0;
363 struct ieee80211_hdr *hdr;
364
365 iface = netdev_priv(dev);
366 local = iface->local;
367
368 tx.skb = skb;
369 tx.sta_ptr = NULL;
370
371 meta = (struct hostap_skb_tx_data *) skb->cb;
372 if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
373 printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
374 "expected 0x%08x)\n",
375 dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
376 ret = 0;
377 iface->stats.tx_dropped++;
378 goto fail;
379 }
380
381 if (local->host_encrypt) {
382 /* Set crypt to default algorithm and key; will be replaced in
383 * AP code if STA has own alg/key */
384 tx.crypt = local->crypt[local->tx_keyidx];
385 tx.host_encrypt = 1;
386 } else {
387 tx.crypt = NULL;
388 tx.host_encrypt = 0;
389 }
390
391 if (skb->len < 24) {
392 printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
393 "(len=%d)\n", dev->name, skb->len);
394 ret = 0;
395 iface->stats.tx_dropped++;
396 goto fail;
397 }
398
399 /* FIX (?):
400 * Wi-Fi 802.11b test plan suggests that AP should ignore power save
401 * bit in authentication and (re)association frames and assume tha
402 * STA remains awake for the response. */
403 tx_ret = hostap_handle_sta_tx(local, &tx);
404 skb = tx.skb;
405 meta = (struct hostap_skb_tx_data *) skb->cb;
406 hdr = (struct ieee80211_hdr *) skb->data;
407 fc = le16_to_cpu(hdr->frame_ctl);
408 switch (tx_ret) {
409 case AP_TX_CONTINUE:
410 break;
411 case AP_TX_CONTINUE_NOT_AUTHORIZED:
412 if (local->ieee_802_1x &&
413 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
414 meta->ethertype != ETH_P_PAE &&
415 !(meta->flags & HOSTAP_TX_FLAGS_WDS)) {
416 printk(KERN_DEBUG "%s: dropped frame to unauthorized "
417 "port (IEEE 802.1X): ethertype=0x%04x\n",
418 dev->name, meta->ethertype);
419 hostap_dump_tx_80211(dev->name, skb);
420
421 ret = 0; /* drop packet */
422 iface->stats.tx_dropped++;
423 goto fail;
424 }
425 break;
426 case AP_TX_DROP:
427 ret = 0; /* drop packet */
428 iface->stats.tx_dropped++;
429 goto fail;
430 case AP_TX_RETRY:
431 goto fail;
432 case AP_TX_BUFFERED:
433 /* do not free skb here, it will be freed when the
434 * buffered frame is sent/timed out */
435 ret = 0;
436 goto tx_exit;
437 }
438
439 /* Request TX callback if protocol version is 2 in 802.11 header;
440 * this version 2 is a special case used between hostapd and kernel
441 * driver */
442 if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) &&
443 local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) {
444 meta->tx_cb_idx = local->ap->tx_callback_idx;
445
446 /* remove special version from the frame header */
447 fc &= ~IEEE80211_FCTL_VERS;
448 hdr->frame_ctl = cpu_to_le16(fc);
449 }
450
451 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_DATA) {
452 no_encrypt = 1;
453 tx.crypt = NULL;
454 }
455
456 if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt &&
457 !(fc & IEEE80211_FCTL_VERS)) {
458 no_encrypt = 1;
459 PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing "
460 "unencrypted EAPOL frame\n", dev->name);
461 tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */
462 }
463
464 if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
465 tx.crypt = NULL;
466 else if ((tx.crypt || local->crypt[local->tx_keyidx]) && !no_encrypt) {
467 /* Add ISWEP flag both for firmware and host based encryption
468 */
469 fc |= IEEE80211_FCTL_PROTECTED;
470 hdr->frame_ctl = cpu_to_le16(fc);
471 } else if (local->drop_unencrypted &&
472 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
473 meta->ethertype != ETH_P_PAE) {
474 if (net_ratelimit()) {
475 printk(KERN_DEBUG "%s: dropped unencrypted TX data "
476 "frame (drop_unencrypted=1)\n", dev->name);
477 }
478 iface->stats.tx_dropped++;
479 ret = 0;
480 goto fail;
481 }
482
483 if (tx.crypt) {
484 skb = hostap_tx_encrypt(skb, tx.crypt);
485 if (skb == NULL) {
486 printk(KERN_DEBUG "%s: TX - encryption failed\n",
487 dev->name);
488 ret = 0;
489 goto fail;
490 }
491 meta = (struct hostap_skb_tx_data *) skb->cb;
492 if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
493 printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
494 "expected 0x%08x) after hostap_tx_encrypt\n",
495 dev->name, meta->magic,
496 HOSTAP_SKB_TX_DATA_MAGIC);
497 ret = 0;
498 iface->stats.tx_dropped++;
499 goto fail;
500 }
501 }
502
503 if (local->func->tx == NULL || local->func->tx(skb, dev)) {
504 ret = 0;
505 iface->stats.tx_dropped++;
506 } else {
507 ret = 0;
508 iface->stats.tx_packets++;
509 iface->stats.tx_bytes += skb->len;
510 }
511
512 fail:
513 if (!ret && skb)
514 dev_kfree_skb(skb);
515 tx_exit:
516 if (tx.sta_ptr)
517 hostap_handle_sta_release(tx.sta_ptr);
518 return ret;
519}
520
521
522EXPORT_SYMBOL(hostap_dump_tx_80211);
523EXPORT_SYMBOL(hostap_tx_encrypt);
524EXPORT_SYMBOL(hostap_master_start_xmit);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
new file mode 100644
index 000000000000..930cef8367f2
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -0,0 +1,3288 @@
1/*
2 * Intersil Prism2 driver with Host AP (software access point) support
3 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
4 * <jkmaline@cc.hut.fi>
5 * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
6 *
7 * This file is to be included into hostap.c when S/W AP functionality is
8 * compiled.
9 *
10 * AP: FIX:
11 * - if unicast Class 2 (assoc,reassoc,disassoc) frame received from
12 * unauthenticated STA, send deauth. frame (8802.11: 5.5)
13 * - if unicast Class 3 (data with to/from DS,deauth,pspoll) frame received
14 * from authenticated, but unassoc STA, send disassoc frame (8802.11: 5.5)
15 * - if unicast Class 3 received from unauthenticated STA, send deauth. frame
16 * (8802.11: 5.5)
17 */
18
19static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
20 DEF_INTS };
21module_param_array(other_ap_policy, int, NULL, 0444);
22MODULE_PARM_DESC(other_ap_policy, "Other AP beacon monitoring policy (0-3)");
23
24static int ap_max_inactivity[MAX_PARM_DEVICES] = { AP_MAX_INACTIVITY_SEC,
25 DEF_INTS };
26module_param_array(ap_max_inactivity, int, NULL, 0444);
27MODULE_PARM_DESC(ap_max_inactivity, "AP timeout (in seconds) for station "
28 "inactivity");
29
30static int ap_bridge_packets[MAX_PARM_DEVICES] = { 1, DEF_INTS };
31module_param_array(ap_bridge_packets, int, NULL, 0444);
32MODULE_PARM_DESC(ap_bridge_packets, "Bridge packets directly between "
33 "stations");
34
35static int autom_ap_wds[MAX_PARM_DEVICES] = { 0, DEF_INTS };
36module_param_array(autom_ap_wds, int, NULL, 0444);
37MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
38 "automatically");
39
40
41static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
42static void hostap_event_expired_sta(struct net_device *dev,
43 struct sta_info *sta);
44static void handle_add_proc_queue(void *data);
45
46#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
47static void handle_wds_oper_queue(void *data);
48static void prism2_send_mgmt(struct net_device *dev,
49 u16 type_subtype, char *body,
50 int body_len, u8 *addr, u16 tx_cb_idx);
51#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
52
53
54#ifndef PRISM2_NO_PROCFS_DEBUG
55static int ap_debug_proc_read(char *page, char **start, off_t off,
56 int count, int *eof, void *data)
57{
58 char *p = page;
59 struct ap_data *ap = (struct ap_data *) data;
60
61 if (off != 0) {
62 *eof = 1;
63 return 0;
64 }
65
66 p += sprintf(p, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
67 p += sprintf(p, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
68 p += sprintf(p, "max_inactivity=%u\n", ap->max_inactivity / HZ);
69 p += sprintf(p, "bridge_packets=%u\n", ap->bridge_packets);
70 p += sprintf(p, "nullfunc_ack=%u\n", ap->nullfunc_ack);
71 p += sprintf(p, "autom_ap_wds=%u\n", ap->autom_ap_wds);
72 p += sprintf(p, "auth_algs=%u\n", ap->local->auth_algs);
73 p += sprintf(p, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
74
75 return (p - page);
76}
77#endif /* PRISM2_NO_PROCFS_DEBUG */
78
79
80static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
81{
82 sta->hnext = ap->sta_hash[STA_HASH(sta->addr)];
83 ap->sta_hash[STA_HASH(sta->addr)] = sta;
84}
85
86static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
87{
88 struct sta_info *s;
89
90 s = ap->sta_hash[STA_HASH(sta->addr)];
91 if (s == NULL) return;
92 if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
93 ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
94 return;
95 }
96
97 while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
98 != 0)
99 s = s->hnext;
100 if (s->hnext != NULL)
101 s->hnext = s->hnext->hnext;
102 else
103 printk("AP: could not remove STA " MACSTR " from hash table\n",
104 MAC2STR(sta->addr));
105}
106
107static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
108{
109 if (sta->ap && sta->local)
110 hostap_event_expired_sta(sta->local->dev, sta);
111
112 if (ap->proc != NULL) {
113 char name[20];
114 sprintf(name, MACSTR, MAC2STR(sta->addr));
115 remove_proc_entry(name, ap->proc);
116 }
117
118 if (sta->crypt) {
119 sta->crypt->ops->deinit(sta->crypt->priv);
120 kfree(sta->crypt);
121 sta->crypt = NULL;
122 }
123
124 skb_queue_purge(&sta->tx_buf);
125
126 ap->num_sta--;
127#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
128 if (sta->aid > 0)
129 ap->sta_aid[sta->aid - 1] = NULL;
130
131 if (!sta->ap && sta->u.sta.challenge)
132 kfree(sta->u.sta.challenge);
133 del_timer(&sta->timer);
134#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
135
136 kfree(sta);
137}
138
139
140static void hostap_set_tim(local_info_t *local, int aid, int set)
141{
142 if (local->func->set_tim)
143 local->func->set_tim(local->dev, aid, set);
144}
145
146
147static void hostap_event_new_sta(struct net_device *dev, struct sta_info *sta)
148{
149 union iwreq_data wrqu;
150 memset(&wrqu, 0, sizeof(wrqu));
151 memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
152 wrqu.addr.sa_family = ARPHRD_ETHER;
153 wireless_send_event(dev, IWEVREGISTERED, &wrqu, NULL);
154}
155
156
157static void hostap_event_expired_sta(struct net_device *dev,
158 struct sta_info *sta)
159{
160 union iwreq_data wrqu;
161 memset(&wrqu, 0, sizeof(wrqu));
162 memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
163 wrqu.addr.sa_family = ARPHRD_ETHER;
164 wireless_send_event(dev, IWEVEXPIRED, &wrqu, NULL);
165}
166
167
168#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
169
170static void ap_handle_timer(unsigned long data)
171{
172 struct sta_info *sta = (struct sta_info *) data;
173 local_info_t *local;
174 struct ap_data *ap;
175 unsigned long next_time = 0;
176 int was_assoc;
177
178 if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) {
179 PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n");
180 return;
181 }
182
183 local = sta->local;
184 ap = local->ap;
185 was_assoc = sta->flags & WLAN_STA_ASSOC;
186
187 if (atomic_read(&sta->users) != 0)
188 next_time = jiffies + HZ;
189 else if ((sta->flags & WLAN_STA_PERM) && !(sta->flags & WLAN_STA_AUTH))
190 next_time = jiffies + ap->max_inactivity;
191
192 if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) {
193 /* station activity detected; reset timeout state */
194 sta->timeout_next = STA_NULLFUNC;
195 next_time = sta->last_rx + ap->max_inactivity;
196 } else if (sta->timeout_next == STA_DISASSOC &&
197 !(sta->flags & WLAN_STA_PENDING_POLL)) {
198 /* STA ACKed data nullfunc frame poll */
199 sta->timeout_next = STA_NULLFUNC;
200 next_time = jiffies + ap->max_inactivity;
201 }
202
203 if (next_time) {
204 sta->timer.expires = next_time;
205 add_timer(&sta->timer);
206 return;
207 }
208
209 if (sta->ap)
210 sta->timeout_next = STA_DEAUTH;
211
212 if (sta->timeout_next == STA_DEAUTH && !(sta->flags & WLAN_STA_PERM)) {
213 spin_lock(&ap->sta_table_lock);
214 ap_sta_hash_del(ap, sta);
215 list_del(&sta->list);
216 spin_unlock(&ap->sta_table_lock);
217 sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
218 } else if (sta->timeout_next == STA_DISASSOC)
219 sta->flags &= ~WLAN_STA_ASSOC;
220
221 if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap)
222 hostap_event_expired_sta(local->dev, sta);
223
224 if (sta->timeout_next == STA_DEAUTH && sta->aid > 0 &&
225 !skb_queue_empty(&sta->tx_buf)) {
226 hostap_set_tim(local, sta->aid, 0);
227 sta->flags &= ~WLAN_STA_TIM;
228 }
229
230 if (sta->ap) {
231 if (ap->autom_ap_wds) {
232 PDEBUG(DEBUG_AP, "%s: removing automatic WDS "
233 "connection to AP " MACSTR "\n",
234 local->dev->name, MAC2STR(sta->addr));
235 hostap_wds_link_oper(local, sta->addr, WDS_DEL);
236 }
237 } else if (sta->timeout_next == STA_NULLFUNC) {
238 /* send data frame to poll STA and check whether this frame
239 * is ACKed */
240 /* FIX: IEEE80211_STYPE_NULLFUNC would be more appropriate, but
241 * it is apparently not retried so TX Exc events are not
242 * received for it */
243 sta->flags |= WLAN_STA_PENDING_POLL;
244 prism2_send_mgmt(local->dev, IEEE80211_FTYPE_DATA |
245 IEEE80211_STYPE_DATA, NULL, 0,
246 sta->addr, ap->tx_callback_poll);
247 } else {
248 int deauth = sta->timeout_next == STA_DEAUTH;
249 u16 resp;
250 PDEBUG(DEBUG_AP, "%s: sending %s info to STA " MACSTR
251 "(last=%lu, jiffies=%lu)\n",
252 local->dev->name,
253 deauth ? "deauthentication" : "disassociation",
254 MAC2STR(sta->addr), sta->last_rx, jiffies);
255
256 resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID :
257 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
258 prism2_send_mgmt(local->dev, IEEE80211_FTYPE_MGMT |
259 (deauth ? IEEE80211_STYPE_DEAUTH :
260 IEEE80211_STYPE_DISASSOC),
261 (char *) &resp, 2, sta->addr, 0);
262 }
263
264 if (sta->timeout_next == STA_DEAUTH) {
265 if (sta->flags & WLAN_STA_PERM) {
266 PDEBUG(DEBUG_AP, "%s: STA " MACSTR " would have been "
267 "removed, but it has 'perm' flag\n",
268 local->dev->name, MAC2STR(sta->addr));
269 } else
270 ap_free_sta(ap, sta);
271 return;
272 }
273
274 if (sta->timeout_next == STA_NULLFUNC) {
275 sta->timeout_next = STA_DISASSOC;
276 sta->timer.expires = jiffies + AP_DISASSOC_DELAY;
277 } else {
278 sta->timeout_next = STA_DEAUTH;
279 sta->timer.expires = jiffies + AP_DEAUTH_DELAY;
280 }
281
282 add_timer(&sta->timer);
283}
284
285
286void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
287 int resend)
288{
289 u8 addr[ETH_ALEN];
290 u16 resp;
291 int i;
292
293 PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
294 memset(addr, 0xff, ETH_ALEN);
295
296 resp = __constant_cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
297
298 /* deauth message sent; try to resend it few times; the message is
299 * broadcast, so it may be delayed until next DTIM; there is not much
300 * else we can do at this point since the driver is going to be shut
301 * down */
302 for (i = 0; i < 5; i++) {
303 prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
304 IEEE80211_STYPE_DEAUTH,
305 (char *) &resp, 2, addr, 0);
306
307 if (!resend || ap->num_sta <= 0)
308 return;
309
310 mdelay(50);
311 }
312}
313
314
315static int ap_control_proc_read(char *page, char **start, off_t off,
316 int count, int *eof, void *data)
317{
318 char *p = page;
319 struct ap_data *ap = (struct ap_data *) data;
320 char *policy_txt;
321 struct list_head *ptr;
322 struct mac_entry *entry;
323
324 if (off != 0) {
325 *eof = 1;
326 return 0;
327 }
328
329 switch (ap->mac_restrictions.policy) {
330 case MAC_POLICY_OPEN:
331 policy_txt = "open";
332 break;
333 case MAC_POLICY_ALLOW:
334 policy_txt = "allow";
335 break;
336 case MAC_POLICY_DENY:
337 policy_txt = "deny";
338 break;
339 default:
340 policy_txt = "unknown";
341 break;
342 };
343 p += sprintf(p, "MAC policy: %s\n", policy_txt);
344 p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
345 p += sprintf(p, "MAC list:\n");
346 spin_lock_bh(&ap->mac_restrictions.lock);
347 for (ptr = ap->mac_restrictions.mac_list.next;
348 ptr != &ap->mac_restrictions.mac_list; ptr = ptr->next) {
349 if (p - page > PAGE_SIZE - 80) {
350 p += sprintf(p, "All entries did not fit one page.\n");
351 break;
352 }
353
354 entry = list_entry(ptr, struct mac_entry, list);
355 p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr));
356 }
357 spin_unlock_bh(&ap->mac_restrictions.lock);
358
359 return (p - page);
360}
361
362
363static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
364 u8 *mac)
365{
366 struct mac_entry *entry;
367
368 entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
369 if (entry == NULL)
370 return -1;
371
372 memcpy(entry->addr, mac, ETH_ALEN);
373
374 spin_lock_bh(&mac_restrictions->lock);
375 list_add_tail(&entry->list, &mac_restrictions->mac_list);
376 mac_restrictions->entries++;
377 spin_unlock_bh(&mac_restrictions->lock);
378
379 return 0;
380}
381
382
383static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
384 u8 *mac)
385{
386 struct list_head *ptr;
387 struct mac_entry *entry;
388
389 spin_lock_bh(&mac_restrictions->lock);
390 for (ptr = mac_restrictions->mac_list.next;
391 ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
392 entry = list_entry(ptr, struct mac_entry, list);
393
394 if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
395 list_del(ptr);
396 kfree(entry);
397 mac_restrictions->entries--;
398 spin_unlock_bh(&mac_restrictions->lock);
399 return 0;
400 }
401 }
402 spin_unlock_bh(&mac_restrictions->lock);
403 return -1;
404}
405
406
407static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
408 u8 *mac)
409{
410 struct list_head *ptr;
411 struct mac_entry *entry;
412 int found = 0;
413
414 if (mac_restrictions->policy == MAC_POLICY_OPEN)
415 return 0;
416
417 spin_lock_bh(&mac_restrictions->lock);
418 for (ptr = mac_restrictions->mac_list.next;
419 ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
420 entry = list_entry(ptr, struct mac_entry, list);
421
422 if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
423 found = 1;
424 break;
425 }
426 }
427 spin_unlock_bh(&mac_restrictions->lock);
428
429 if (mac_restrictions->policy == MAC_POLICY_ALLOW)
430 return !found;
431 else
432 return found;
433}
434
435
436static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
437{
438 struct list_head *ptr, *n;
439 struct mac_entry *entry;
440
441 if (mac_restrictions->entries == 0)
442 return;
443
444 spin_lock_bh(&mac_restrictions->lock);
445 for (ptr = mac_restrictions->mac_list.next, n = ptr->next;
446 ptr != &mac_restrictions->mac_list;
447 ptr = n, n = ptr->next) {
448 entry = list_entry(ptr, struct mac_entry, list);
449 list_del(ptr);
450 kfree(entry);
451 }
452 mac_restrictions->entries = 0;
453 spin_unlock_bh(&mac_restrictions->lock);
454}
455
456
457static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
458 u8 *mac)
459{
460 struct sta_info *sta;
461 u16 resp;
462
463 spin_lock_bh(&ap->sta_table_lock);
464 sta = ap_get_sta(ap, mac);
465 if (sta) {
466 ap_sta_hash_del(ap, sta);
467 list_del(&sta->list);
468 }
469 spin_unlock_bh(&ap->sta_table_lock);
470
471 if (!sta)
472 return -EINVAL;
473
474 resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
475 prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH,
476 (char *) &resp, 2, sta->addr, 0);
477
478 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
479 hostap_event_expired_sta(dev, sta);
480
481 ap_free_sta(ap, sta);
482
483 return 0;
484}
485
486#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
487
488
489static void ap_control_kickall(struct ap_data *ap)
490{
491 struct list_head *ptr, *n;
492 struct sta_info *sta;
493
494 spin_lock_bh(&ap->sta_table_lock);
495 for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list;
496 ptr = n, n = ptr->next) {
497 sta = list_entry(ptr, struct sta_info, list);
498 ap_sta_hash_del(ap, sta);
499 list_del(&sta->list);
500 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
501 hostap_event_expired_sta(sta->local->dev, sta);
502 ap_free_sta(ap, sta);
503 }
504 spin_unlock_bh(&ap->sta_table_lock);
505}
506
507
508#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
509
510#define PROC_LIMIT (PAGE_SIZE - 80)
511
512static int prism2_ap_proc_read(char *page, char **start, off_t off,
513 int count, int *eof, void *data)
514{
515 char *p = page;
516 struct ap_data *ap = (struct ap_data *) data;
517 struct list_head *ptr;
518 int i;
519
520 if (off > PROC_LIMIT) {
521 *eof = 1;
522 return 0;
523 }
524
525 p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
526 spin_lock_bh(&ap->sta_table_lock);
527 for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
528 struct sta_info *sta = (struct sta_info *) ptr;
529
530 if (!sta->ap)
531 continue;
532
533 p += sprintf(p, MACSTR " %d %d %d %d '", MAC2STR(sta->addr),
534 sta->u.ap.channel, sta->last_rx_signal,
535 sta->last_rx_silence, sta->last_rx_rate);
536 for (i = 0; i < sta->u.ap.ssid_len; i++)
537 p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
538 sta->u.ap.ssid[i] < 127) ?
539 "%c" : "<%02x>"),
540 sta->u.ap.ssid[i]);
541 p += sprintf(p, "'");
542 if (sta->capability & WLAN_CAPABILITY_ESS)
543 p += sprintf(p, " [ESS]");
544 if (sta->capability & WLAN_CAPABILITY_IBSS)
545 p += sprintf(p, " [IBSS]");
546 if (sta->capability & WLAN_CAPABILITY_PRIVACY)
547 p += sprintf(p, " [WEP]");
548 p += sprintf(p, "\n");
549
550 if ((p - page) > PROC_LIMIT) {
551 printk(KERN_DEBUG "hostap: ap proc did not fit\n");
552 break;
553 }
554 }
555 spin_unlock_bh(&ap->sta_table_lock);
556
557 if ((p - page) <= off) {
558 *eof = 1;
559 return 0;
560 }
561
562 *start = page + off;
563
564 return (p - page - off);
565}
566#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
567
568
569void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver)
570{
571 if (!ap)
572 return;
573
574 if (sta_fw_ver == PRISM2_FW_VER(0,8,0)) {
575 PDEBUG(DEBUG_AP, "Using data::nullfunc ACK workaround - "
576 "firmware upgrade recommended\n");
577 ap->nullfunc_ack = 1;
578 } else
579 ap->nullfunc_ack = 0;
580
581 if (sta_fw_ver == PRISM2_FW_VER(1,4,2)) {
582 printk(KERN_WARNING "%s: Warning: secondary station firmware "
583 "version 1.4.2 does not seem to work in Host AP mode\n",
584 ap->local->dev->name);
585 }
586}
587
588
589/* Called only as a tasklet (software IRQ) */
590static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
591{
592 struct ap_data *ap = data;
593 u16 fc;
594 struct ieee80211_hdr *hdr;
595
596 if (!ap->local->hostapd || !ap->local->apdev) {
597 dev_kfree_skb(skb);
598 return;
599 }
600
601 hdr = (struct ieee80211_hdr *) skb->data;
602 fc = le16_to_cpu(hdr->frame_ctl);
603
604 /* Pass the TX callback frame to the hostapd; use 802.11 header version
605 * 1 to indicate failure (no ACK) and 2 success (frame ACKed) */
606
607 fc &= ~IEEE80211_FCTL_VERS;
608 fc |= ok ? BIT(1) : BIT(0);
609 hdr->frame_ctl = cpu_to_le16(fc);
610
611 skb->dev = ap->local->apdev;
612 skb_pull(skb, hostap_80211_get_hdrlen(fc));
613 skb->pkt_type = PACKET_OTHERHOST;
614 skb->protocol = __constant_htons(ETH_P_802_2);
615 memset(skb->cb, 0, sizeof(skb->cb));
616 netif_rx(skb);
617}
618
619
620#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
621/* Called only as a tasklet (software IRQ) */
622static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
623{
624 struct ap_data *ap = data;
625 struct net_device *dev = ap->local->dev;
626 struct ieee80211_hdr *hdr;
627 u16 fc, *pos, auth_alg, auth_transaction, status;
628 struct sta_info *sta = NULL;
629 char *txt = NULL;
630
631 if (ap->local->hostapd) {
632 dev_kfree_skb(skb);
633 return;
634 }
635
636 hdr = (struct ieee80211_hdr *) skb->data;
637 fc = le16_to_cpu(hdr->frame_ctl);
638 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
639 WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH ||
640 skb->len < IEEE80211_MGMT_HDR_LEN + 6) {
641 printk(KERN_DEBUG "%s: hostap_ap_tx_cb_auth received invalid "
642 "frame\n", dev->name);
643 dev_kfree_skb(skb);
644 return;
645 }
646
647 pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
648 auth_alg = le16_to_cpu(*pos++);
649 auth_transaction = le16_to_cpu(*pos++);
650 status = le16_to_cpu(*pos++);
651
652 if (!ok) {
653 txt = "frame was not ACKed";
654 goto done;
655 }
656
657 spin_lock(&ap->sta_table_lock);
658 sta = ap_get_sta(ap, hdr->addr1);
659 if (sta)
660 atomic_inc(&sta->users);
661 spin_unlock(&ap->sta_table_lock);
662
663 if (!sta) {
664 txt = "STA not found";
665 goto done;
666 }
667
668 if (status == WLAN_STATUS_SUCCESS &&
669 ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 2) ||
670 (auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 4))) {
671 txt = "STA authenticated";
672 sta->flags |= WLAN_STA_AUTH;
673 sta->last_auth = jiffies;
674 } else if (status != WLAN_STATUS_SUCCESS)
675 txt = "authentication failed";
676
677 done:
678 if (sta)
679 atomic_dec(&sta->users);
680 if (txt) {
681 PDEBUG(DEBUG_AP, "%s: " MACSTR " auth_cb - alg=%d trans#=%d "
682 "status=%d - %s\n",
683 dev->name, MAC2STR(hdr->addr1), auth_alg,
684 auth_transaction, status, txt);
685 }
686 dev_kfree_skb(skb);
687}
688
689
690/* Called only as a tasklet (software IRQ) */
691static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
692{
693 struct ap_data *ap = data;
694 struct net_device *dev = ap->local->dev;
695 struct ieee80211_hdr *hdr;
696 u16 fc, *pos, status;
697 struct sta_info *sta = NULL;
698 char *txt = NULL;
699
700 if (ap->local->hostapd) {
701 dev_kfree_skb(skb);
702 return;
703 }
704
705 hdr = (struct ieee80211_hdr *) skb->data;
706 fc = le16_to_cpu(hdr->frame_ctl);
707 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
708 (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP &&
709 WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_REASSOC_RESP) ||
710 skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
711 printk(KERN_DEBUG "%s: hostap_ap_tx_cb_assoc received invalid "
712 "frame\n", dev->name);
713 dev_kfree_skb(skb);
714 return;
715 }
716
717 if (!ok) {
718 txt = "frame was not ACKed";
719 goto done;
720 }
721
722 spin_lock(&ap->sta_table_lock);
723 sta = ap_get_sta(ap, hdr->addr1);
724 if (sta)
725 atomic_inc(&sta->users);
726 spin_unlock(&ap->sta_table_lock);
727
728 if (!sta) {
729 txt = "STA not found";
730 goto done;
731 }
732
733 pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
734 pos++;
735 status = le16_to_cpu(*pos++);
736 if (status == WLAN_STATUS_SUCCESS) {
737 if (!(sta->flags & WLAN_STA_ASSOC))
738 hostap_event_new_sta(dev, sta);
739 txt = "STA associated";
740 sta->flags |= WLAN_STA_ASSOC;
741 sta->last_assoc = jiffies;
742 } else
743 txt = "association failed";
744
745 done:
746 if (sta)
747 atomic_dec(&sta->users);
748 if (txt) {
749 PDEBUG(DEBUG_AP, "%s: " MACSTR " assoc_cb - %s\n",
750 dev->name, MAC2STR(hdr->addr1), txt);
751 }
752 dev_kfree_skb(skb);
753}
754
755/* Called only as a tasklet (software IRQ); TX callback for poll frames used
756 * in verifying whether the STA is still present. */
757static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
758{
759 struct ap_data *ap = data;
760 struct ieee80211_hdr *hdr;
761 struct sta_info *sta;
762
763 if (skb->len < 24)
764 goto fail;
765 hdr = (struct ieee80211_hdr *) skb->data;
766 if (ok) {
767 spin_lock(&ap->sta_table_lock);
768 sta = ap_get_sta(ap, hdr->addr1);
769 if (sta)
770 sta->flags &= ~WLAN_STA_PENDING_POLL;
771 spin_unlock(&ap->sta_table_lock);
772 } else {
773 PDEBUG(DEBUG_AP, "%s: STA " MACSTR " did not ACK activity "
774 "poll frame\n", ap->local->dev->name,
775 MAC2STR(hdr->addr1));
776 }
777
778 fail:
779 dev_kfree_skb(skb);
780}
781#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
782
783
784void hostap_init_data(local_info_t *local)
785{
786 struct ap_data *ap = local->ap;
787
788 if (ap == NULL) {
789 printk(KERN_WARNING "hostap_init_data: ap == NULL\n");
790 return;
791 }
792 memset(ap, 0, sizeof(struct ap_data));
793 ap->local = local;
794
795 ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx);
796 ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx);
797 ap->max_inactivity =
798 GET_INT_PARM(ap_max_inactivity, local->card_idx) * HZ;
799 ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx);
800
801 spin_lock_init(&ap->sta_table_lock);
802 INIT_LIST_HEAD(&ap->sta_list);
803
804 /* Initialize task queue structure for AP management */
805 INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
806
807 ap->tx_callback_idx =
808 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
809 if (ap->tx_callback_idx == 0)
810 printk(KERN_WARNING "%s: failed to register TX callback for "
811 "AP\n", local->dev->name);
812#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
813 INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
814
815 ap->tx_callback_auth =
816 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
817 ap->tx_callback_assoc =
818 hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap);
819 ap->tx_callback_poll =
820 hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap);
821 if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 ||
822 ap->tx_callback_poll == 0)
823 printk(KERN_WARNING "%s: failed to register TX callback for "
824 "AP\n", local->dev->name);
825
826 spin_lock_init(&ap->mac_restrictions.lock);
827 INIT_LIST_HEAD(&ap->mac_restrictions.mac_list);
828#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
829
830 ap->initialized = 1;
831}
832
833
834void hostap_init_ap_proc(local_info_t *local)
835{
836 struct ap_data *ap = local->ap;
837
838 ap->proc = local->proc;
839 if (ap->proc == NULL)
840 return;
841
842#ifndef PRISM2_NO_PROCFS_DEBUG
843 create_proc_read_entry("ap_debug", 0, ap->proc,
844 ap_debug_proc_read, ap);
845#endif /* PRISM2_NO_PROCFS_DEBUG */
846
847#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
848 create_proc_read_entry("ap_control", 0, ap->proc,
849 ap_control_proc_read, ap);
850 create_proc_read_entry("ap", 0, ap->proc,
851 prism2_ap_proc_read, ap);
852#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
853
854}
855
856
857void hostap_free_data(struct ap_data *ap)
858{
859 struct list_head *n, *ptr;
860
861 if (ap == NULL || !ap->initialized) {
862 printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
863 "initialized - skip resource freeing\n");
864 return;
865 }
866
867#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
868 if (ap->crypt)
869 ap->crypt->deinit(ap->crypt_priv);
870 ap->crypt = ap->crypt_priv = NULL;
871#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
872
873 list_for_each_safe(ptr, n, &ap->sta_list) {
874 struct sta_info *sta = list_entry(ptr, struct sta_info, list);
875 ap_sta_hash_del(ap, sta);
876 list_del(&sta->list);
877 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
878 hostap_event_expired_sta(sta->local->dev, sta);
879 ap_free_sta(ap, sta);
880 }
881
882#ifndef PRISM2_NO_PROCFS_DEBUG
883 if (ap->proc != NULL) {
884 remove_proc_entry("ap_debug", ap->proc);
885 }
886#endif /* PRISM2_NO_PROCFS_DEBUG */
887
888#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
889 if (ap->proc != NULL) {
890 remove_proc_entry("ap", ap->proc);
891 remove_proc_entry("ap_control", ap->proc);
892 }
893 ap_control_flush_macs(&ap->mac_restrictions);
894#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
895
896 ap->initialized = 0;
897}
898
899
900/* caller should have mutex for AP STA list handling */
901static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
902{
903 struct sta_info *s;
904
905 s = ap->sta_hash[STA_HASH(sta)];
906 while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
907 s = s->hnext;
908 return s;
909}
910
911
912#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
913
914/* Called from timer handler and from scheduled AP queue handlers */
915static void prism2_send_mgmt(struct net_device *dev,
916 u16 type_subtype, char *body,
917 int body_len, u8 *addr, u16 tx_cb_idx)
918{
919 struct hostap_interface *iface;
920 local_info_t *local;
921 struct ieee80211_hdr *hdr;
922 u16 fc;
923 struct sk_buff *skb;
924 struct hostap_skb_tx_data *meta;
925 int hdrlen;
926
927 iface = netdev_priv(dev);
928 local = iface->local;
929 dev = local->dev; /* always use master radio device */
930 iface = netdev_priv(dev);
931
932 if (!(dev->flags & IFF_UP)) {
933 PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt - device is not UP - "
934 "cannot send frame\n", dev->name);
935 return;
936 }
937
938 skb = dev_alloc_skb(sizeof(*hdr) + body_len);
939 if (skb == NULL) {
940 PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt failed to allocate "
941 "skb\n", dev->name);
942 return;
943 }
944
945 fc = type_subtype;
946 hdrlen = hostap_80211_get_hdrlen(fc);
947 hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
948 if (body)
949 memcpy(skb_put(skb, body_len), body, body_len);
950
951 memset(hdr, 0, hdrlen);
952
953 /* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11
954 * tx_control instead of using local->tx_control */
955
956
957 memcpy(hdr->addr1, addr, ETH_ALEN); /* DA / RA */
958 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) {
959 fc |= IEEE80211_FCTL_FROMDS;
960 memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* BSSID */
961 memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
962 } else if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_CTL) {
963 /* control:ACK does not have addr2 or addr3 */
964 memset(hdr->addr2, 0, ETH_ALEN);
965 memset(hdr->addr3, 0, ETH_ALEN);
966 } else {
967 memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
968 memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
969 }
970
971 hdr->frame_ctl = cpu_to_le16(fc);
972
973 meta = (struct hostap_skb_tx_data *) skb->cb;
974 memset(meta, 0, sizeof(*meta));
975 meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
976 meta->iface = iface;
977 meta->tx_cb_idx = tx_cb_idx;
978
979 skb->dev = dev;
980 skb->mac.raw = skb->nh.raw = skb->data;
981 dev_queue_xmit(skb);
982}
983#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
984
985
986static int prism2_sta_proc_read(char *page, char **start, off_t off,
987 int count, int *eof, void *data)
988{
989 char *p = page;
990 struct sta_info *sta = (struct sta_info *) data;
991 int i;
992
993 /* FIX: possible race condition.. the STA data could have just expired,
994 * but proc entry was still here so that the read could have started;
995 * some locking should be done here.. */
996
997 if (off != 0) {
998 *eof = 1;
999 return 0;
1000 }
1001
1002 p += sprintf(p, "%s=" MACSTR "\nusers=%d\naid=%d\n"
1003 "flags=0x%04x%s%s%s%s%s%s%s\n"
1004 "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
1005 sta->ap ? "AP" : "STA",
1006 MAC2STR(sta->addr), atomic_read(&sta->users), sta->aid,
1007 sta->flags,
1008 sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
1009 sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
1010 sta->flags & WLAN_STA_PS ? " PS" : "",
1011 sta->flags & WLAN_STA_TIM ? " TIM" : "",
1012 sta->flags & WLAN_STA_PERM ? " PERM" : "",
1013 sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
1014 sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
1015 sta->capability, sta->listen_interval);
1016 /* supported_rates: 500 kbit/s units with msb ignored */
1017 for (i = 0; i < sizeof(sta->supported_rates); i++)
1018 if (sta->supported_rates[i] != 0)
1019 p += sprintf(p, "%d%sMbps ",
1020 (sta->supported_rates[i] & 0x7f) / 2,
1021 sta->supported_rates[i] & 1 ? ".5" : "");
1022 p += sprintf(p, "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
1023 "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
1024 "tx_packets=%lu\n"
1025 "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
1026 "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
1027 "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
1028 "tx[11M]=%d\n"
1029 "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
1030 jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
1031 sta->last_tx,
1032 sta->rx_packets, sta->tx_packets, sta->rx_bytes,
1033 sta->tx_bytes, skb_queue_len(&sta->tx_buf),
1034 sta->last_rx_silence,
1035 sta->last_rx_signal, sta->last_rx_rate / 10,
1036 sta->last_rx_rate % 10 ? ".5" : "",
1037 sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
1038 sta->tx_count[2], sta->tx_count[3], sta->rx_count[0],
1039 sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
1040 if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats)
1041 p = sta->crypt->ops->print_stats(p, sta->crypt->priv);
1042#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1043 if (sta->ap) {
1044 if (sta->u.ap.channel >= 0)
1045 p += sprintf(p, "channel=%d\n", sta->u.ap.channel);
1046 p += sprintf(p, "ssid=");
1047 for (i = 0; i < sta->u.ap.ssid_len; i++)
1048 p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
1049 sta->u.ap.ssid[i] < 127) ?
1050 "%c" : "<%02x>"),
1051 sta->u.ap.ssid[i]);
1052 p += sprintf(p, "\n");
1053 }
1054#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
1055
1056 return (p - page);
1057}
1058
1059
1060static void handle_add_proc_queue(void *data)
1061{
1062 struct ap_data *ap = (struct ap_data *) data;
1063 struct sta_info *sta;
1064 char name[20];
1065 struct add_sta_proc_data *entry, *prev;
1066
1067 entry = ap->add_sta_proc_entries;
1068 ap->add_sta_proc_entries = NULL;
1069
1070 while (entry) {
1071 spin_lock_bh(&ap->sta_table_lock);
1072 sta = ap_get_sta(ap, entry->addr);
1073 if (sta)
1074 atomic_inc(&sta->users);
1075 spin_unlock_bh(&ap->sta_table_lock);
1076
1077 if (sta) {
1078 sprintf(name, MACSTR, MAC2STR(sta->addr));
1079 sta->proc = create_proc_read_entry(
1080 name, 0, ap->proc,
1081 prism2_sta_proc_read, sta);
1082
1083 atomic_dec(&sta->users);
1084 }
1085
1086 prev = entry;
1087 entry = entry->next;
1088 kfree(prev);
1089 }
1090}
1091
1092
1093static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
1094{
1095 struct sta_info *sta;
1096
1097 sta = (struct sta_info *)
1098 kmalloc(sizeof(struct sta_info), GFP_ATOMIC);
1099 if (sta == NULL) {
1100 PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
1101 return NULL;
1102 }
1103
1104 /* initialize STA info data */
1105 memset(sta, 0, sizeof(struct sta_info));
1106 sta->local = ap->local;
1107 skb_queue_head_init(&sta->tx_buf);
1108 memcpy(sta->addr, addr, ETH_ALEN);
1109
1110 atomic_inc(&sta->users);
1111 spin_lock_bh(&ap->sta_table_lock);
1112 list_add(&sta->list, &ap->sta_list);
1113 ap->num_sta++;
1114 ap_sta_hash_add(ap, sta);
1115 spin_unlock_bh(&ap->sta_table_lock);
1116
1117 if (ap->proc) {
1118 struct add_sta_proc_data *entry;
1119 /* schedule a non-interrupt context process to add a procfs
1120 * entry for the STA since procfs code use GFP_KERNEL */
1121 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1122 if (entry) {
1123 memcpy(entry->addr, sta->addr, ETH_ALEN);
1124 entry->next = ap->add_sta_proc_entries;
1125 ap->add_sta_proc_entries = entry;
1126 schedule_work(&ap->add_sta_proc_queue);
1127 } else
1128 printk(KERN_DEBUG "Failed to add STA proc data\n");
1129 }
1130
1131#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1132 init_timer(&sta->timer);
1133 sta->timer.expires = jiffies + ap->max_inactivity;
1134 sta->timer.data = (unsigned long) sta;
1135 sta->timer.function = ap_handle_timer;
1136 if (!ap->local->hostapd)
1137 add_timer(&sta->timer);
1138#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
1139
1140 return sta;
1141}
1142
1143
1144static int ap_tx_rate_ok(int rateidx, struct sta_info *sta,
1145 local_info_t *local)
1146{
1147 if (rateidx > sta->tx_max_rate ||
1148 !(sta->tx_supp_rates & (1 << rateidx)))
1149 return 0;
1150
1151 if (local->tx_rate_control != 0 &&
1152 !(local->tx_rate_control & (1 << rateidx)))
1153 return 0;
1154
1155 return 1;
1156}
1157
1158
1159static void prism2_check_tx_rates(struct sta_info *sta)
1160{
1161 int i;
1162
1163 sta->tx_supp_rates = 0;
1164 for (i = 0; i < sizeof(sta->supported_rates); i++) {
1165 if ((sta->supported_rates[i] & 0x7f) == 2)
1166 sta->tx_supp_rates |= WLAN_RATE_1M;
1167 if ((sta->supported_rates[i] & 0x7f) == 4)
1168 sta->tx_supp_rates |= WLAN_RATE_2M;
1169 if ((sta->supported_rates[i] & 0x7f) == 11)
1170 sta->tx_supp_rates |= WLAN_RATE_5M5;
1171 if ((sta->supported_rates[i] & 0x7f) == 22)
1172 sta->tx_supp_rates |= WLAN_RATE_11M;
1173 }
1174 sta->tx_max_rate = sta->tx_rate = sta->tx_rate_idx = 0;
1175 if (sta->tx_supp_rates & WLAN_RATE_1M) {
1176 sta->tx_max_rate = 0;
1177 if (ap_tx_rate_ok(0, sta, sta->local)) {
1178 sta->tx_rate = 10;
1179 sta->tx_rate_idx = 0;
1180 }
1181 }
1182 if (sta->tx_supp_rates & WLAN_RATE_2M) {
1183 sta->tx_max_rate = 1;
1184 if (ap_tx_rate_ok(1, sta, sta->local)) {
1185 sta->tx_rate = 20;
1186 sta->tx_rate_idx = 1;
1187 }
1188 }
1189 if (sta->tx_supp_rates & WLAN_RATE_5M5) {
1190 sta->tx_max_rate = 2;
1191 if (ap_tx_rate_ok(2, sta, sta->local)) {
1192 sta->tx_rate = 55;
1193 sta->tx_rate_idx = 2;
1194 }
1195 }
1196 if (sta->tx_supp_rates & WLAN_RATE_11M) {
1197 sta->tx_max_rate = 3;
1198 if (ap_tx_rate_ok(3, sta, sta->local)) {
1199 sta->tx_rate = 110;
1200 sta->tx_rate_idx = 3;
1201 }
1202 }
1203}
1204
1205
1206#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1207
1208static void ap_crypt_init(struct ap_data *ap)
1209{
1210 ap->crypt = ieee80211_get_crypto_ops("WEP");
1211
1212 if (ap->crypt) {
1213 if (ap->crypt->init) {
1214 ap->crypt_priv = ap->crypt->init(0);
1215 if (ap->crypt_priv == NULL)
1216 ap->crypt = NULL;
1217 else {
1218 u8 key[WEP_KEY_LEN];
1219 get_random_bytes(key, WEP_KEY_LEN);
1220 ap->crypt->set_key(key, WEP_KEY_LEN, NULL,
1221 ap->crypt_priv);
1222 }
1223 }
1224 }
1225
1226 if (ap->crypt == NULL) {
1227 printk(KERN_WARNING "AP could not initialize WEP: load module "
1228 "ieee80211_crypt_wep.ko\n");
1229 }
1230}
1231
1232
1233/* Generate challenge data for shared key authentication. IEEE 802.11 specifies
1234 * that WEP algorithm is used for generating challange. This should be unique,
1235 * but otherwise there is not really need for randomness etc. Initialize WEP
1236 * with pseudo random key and then use increasing IV to get unique challenge
1237 * streams.
1238 *
1239 * Called only as a scheduled task for pending AP frames.
1240 */
1241static char * ap_auth_make_challenge(struct ap_data *ap)
1242{
1243 char *tmpbuf;
1244 struct sk_buff *skb;
1245
1246 if (ap->crypt == NULL) {
1247 ap_crypt_init(ap);
1248 if (ap->crypt == NULL)
1249 return NULL;
1250 }
1251
1252 tmpbuf = (char *) kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
1253 if (tmpbuf == NULL) {
1254 PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
1255 return NULL;
1256 }
1257
1258 skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
1259 ap->crypt->extra_prefix_len +
1260 ap->crypt->extra_postfix_len);
1261 if (skb == NULL) {
1262 kfree(tmpbuf);
1263 return NULL;
1264 }
1265
1266 skb_reserve(skb, ap->crypt->extra_prefix_len);
1267 memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
1268 WLAN_AUTH_CHALLENGE_LEN);
1269 if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
1270 dev_kfree_skb(skb);
1271 kfree(tmpbuf);
1272 return NULL;
1273 }
1274
1275 memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len,
1276 WLAN_AUTH_CHALLENGE_LEN);
1277 dev_kfree_skb(skb);
1278
1279 return tmpbuf;
1280}
1281
1282
1283/* Called only as a scheduled task for pending AP frames. */
1284static void handle_authen(local_info_t *local, struct sk_buff *skb,
1285 struct hostap_80211_rx_status *rx_stats)
1286{
1287 struct net_device *dev = local->dev;
1288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1289 size_t hdrlen;
1290 struct ap_data *ap = local->ap;
1291 char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
1292 int len, olen;
1293 u16 auth_alg, auth_transaction, status_code, *pos;
1294 u16 resp = WLAN_STATUS_SUCCESS, fc;
1295 struct sta_info *sta = NULL;
1296 struct ieee80211_crypt_data *crypt;
1297 char *txt = "";
1298
1299 len = skb->len - IEEE80211_MGMT_HDR_LEN;
1300
1301 fc = le16_to_cpu(hdr->frame_ctl);
1302 hdrlen = hostap_80211_get_hdrlen(fc);
1303
1304 if (len < 6) {
1305 PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload "
1306 "(len=%d) from " MACSTR "\n", dev->name, len,
1307 MAC2STR(hdr->addr2));
1308 return;
1309 }
1310
1311 spin_lock_bh(&local->ap->sta_table_lock);
1312 sta = ap_get_sta(local->ap, hdr->addr2);
1313 if (sta)
1314 atomic_inc(&sta->users);
1315 spin_unlock_bh(&local->ap->sta_table_lock);
1316
1317 if (sta && sta->crypt)
1318 crypt = sta->crypt;
1319 else {
1320 int idx = 0;
1321 if (skb->len >= hdrlen + 3)
1322 idx = skb->data[hdrlen + 3] >> 6;
1323 crypt = local->crypt[idx];
1324 }
1325
1326 pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
1327 auth_alg = __le16_to_cpu(*pos);
1328 pos++;
1329 auth_transaction = __le16_to_cpu(*pos);
1330 pos++;
1331 status_code = __le16_to_cpu(*pos);
1332 pos++;
1333
1334 if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
1335 ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
1336 txt = "authentication denied";
1337 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1338 goto fail;
1339 }
1340
1341 if (((local->auth_algs & PRISM2_AUTH_OPEN) &&
1342 auth_alg == WLAN_AUTH_OPEN) ||
1343 ((local->auth_algs & PRISM2_AUTH_SHARED_KEY) &&
1344 crypt && auth_alg == WLAN_AUTH_SHARED_KEY)) {
1345 } else {
1346 txt = "unsupported algorithm";
1347 resp = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
1348 goto fail;
1349 }
1350
1351 if (len >= 8) {
1352 u8 *u = (u8 *) pos;
1353 if (*u == WLAN_EID_CHALLENGE) {
1354 if (*(u + 1) != WLAN_AUTH_CHALLENGE_LEN) {
1355 txt = "invalid challenge len";
1356 resp = WLAN_STATUS_CHALLENGE_FAIL;
1357 goto fail;
1358 }
1359 if (len - 8 < WLAN_AUTH_CHALLENGE_LEN) {
1360 txt = "challenge underflow";
1361 resp = WLAN_STATUS_CHALLENGE_FAIL;
1362 goto fail;
1363 }
1364 challenge = (char *) (u + 2);
1365 }
1366 }
1367
1368 if (sta && sta->ap) {
1369 if (time_after(jiffies, sta->u.ap.last_beacon +
1370 (10 * sta->listen_interval * HZ) / 1024)) {
1371 PDEBUG(DEBUG_AP, "%s: no beacons received for a while,"
1372 " assuming AP " MACSTR " is now STA\n",
1373 dev->name, MAC2STR(sta->addr));
1374 sta->ap = 0;
1375 sta->flags = 0;
1376 sta->u.sta.challenge = NULL;
1377 } else {
1378 txt = "AP trying to authenticate?";
1379 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1380 goto fail;
1381 }
1382 }
1383
1384 if ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) ||
1385 (auth_alg == WLAN_AUTH_SHARED_KEY &&
1386 (auth_transaction == 1 ||
1387 (auth_transaction == 3 && sta != NULL &&
1388 sta->u.sta.challenge != NULL)))) {
1389 } else {
1390 txt = "unknown authentication transaction number";
1391 resp = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
1392 goto fail;
1393 }
1394
1395 if (sta == NULL) {
1396 txt = "new STA";
1397
1398 if (local->ap->num_sta >= MAX_STA_COUNT) {
1399 /* FIX: might try to remove some old STAs first? */
1400 txt = "no more room for new STAs";
1401 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1402 goto fail;
1403 }
1404
1405 sta = ap_add_sta(local->ap, hdr->addr2);
1406 if (sta == NULL) {
1407 txt = "ap_add_sta failed";
1408 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1409 goto fail;
1410 }
1411 }
1412
1413 switch (auth_alg) {
1414 case WLAN_AUTH_OPEN:
1415 txt = "authOK";
1416 /* IEEE 802.11 standard is not completely clear about
1417 * whether STA is considered authenticated after
1418 * authentication OK frame has been send or after it
1419 * has been ACKed. In order to reduce interoperability
1420 * issues, mark the STA authenticated before ACK. */
1421 sta->flags |= WLAN_STA_AUTH;
1422 break;
1423
1424 case WLAN_AUTH_SHARED_KEY:
1425 if (auth_transaction == 1) {
1426 if (sta->u.sta.challenge == NULL) {
1427 sta->u.sta.challenge =
1428 ap_auth_make_challenge(local->ap);
1429 if (sta->u.sta.challenge == NULL) {
1430 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1431 goto fail;
1432 }
1433 }
1434 } else {
1435 if (sta->u.sta.challenge == NULL ||
1436 challenge == NULL ||
1437 memcmp(sta->u.sta.challenge, challenge,
1438 WLAN_AUTH_CHALLENGE_LEN) != 0 ||
1439 !(fc & IEEE80211_FCTL_PROTECTED)) {
1440 txt = "challenge response incorrect";
1441 resp = WLAN_STATUS_CHALLENGE_FAIL;
1442 goto fail;
1443 }
1444
1445 txt = "challenge OK - authOK";
1446 /* IEEE 802.11 standard is not completely clear about
1447 * whether STA is considered authenticated after
1448 * authentication OK frame has been send or after it
1449 * has been ACKed. In order to reduce interoperability
1450 * issues, mark the STA authenticated before ACK. */
1451 sta->flags |= WLAN_STA_AUTH;
1452 kfree(sta->u.sta.challenge);
1453 sta->u.sta.challenge = NULL;
1454 }
1455 break;
1456 }
1457
1458 fail:
1459 pos = (u16 *) body;
1460 *pos = cpu_to_le16(auth_alg);
1461 pos++;
1462 *pos = cpu_to_le16(auth_transaction + 1);
1463 pos++;
1464 *pos = cpu_to_le16(resp); /* status_code */
1465 pos++;
1466 olen = 6;
1467
1468 if (resp == WLAN_STATUS_SUCCESS && sta != NULL &&
1469 sta->u.sta.challenge != NULL &&
1470 auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 1) {
1471 u8 *tmp = (u8 *) pos;
1472 *tmp++ = WLAN_EID_CHALLENGE;
1473 *tmp++ = WLAN_AUTH_CHALLENGE_LEN;
1474 pos++;
1475 memcpy(pos, sta->u.sta.challenge, WLAN_AUTH_CHALLENGE_LEN);
1476 olen += 2 + WLAN_AUTH_CHALLENGE_LEN;
1477 }
1478
1479 prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH,
1480 body, olen, hdr->addr2, ap->tx_callback_auth);
1481
1482 if (sta) {
1483 sta->last_rx = jiffies;
1484 atomic_dec(&sta->users);
1485 }
1486
1487 if (resp) {
1488 PDEBUG(DEBUG_AP, "%s: " MACSTR " auth (alg=%d trans#=%d "
1489 "stat=%d len=%d fc=%04x) ==> %d (%s)\n",
1490 dev->name, MAC2STR(hdr->addr2), auth_alg,
1491 auth_transaction, status_code, len, fc, resp, txt);
1492 }
1493}
1494
1495
1496/* Called only as a scheduled task for pending AP frames. */
1497static void handle_assoc(local_info_t *local, struct sk_buff *skb,
1498 struct hostap_80211_rx_status *rx_stats, int reassoc)
1499{
1500 struct net_device *dev = local->dev;
1501 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1502 char body[12], *p, *lpos;
1503 int len, left;
1504 u16 *pos;
1505 u16 resp = WLAN_STATUS_SUCCESS;
1506 struct sta_info *sta = NULL;
1507 int send_deauth = 0;
1508 char *txt = "";
1509 u8 prev_ap[ETH_ALEN];
1510
1511 left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
1512
1513 if (len < (reassoc ? 10 : 4)) {
1514 PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload "
1515 "(len=%d, reassoc=%d) from " MACSTR "\n",
1516 dev->name, len, reassoc, MAC2STR(hdr->addr2));
1517 return;
1518 }
1519
1520 spin_lock_bh(&local->ap->sta_table_lock);
1521 sta = ap_get_sta(local->ap, hdr->addr2);
1522 if (sta == NULL || (sta->flags & WLAN_STA_AUTH) == 0) {
1523 spin_unlock_bh(&local->ap->sta_table_lock);
1524 txt = "trying to associate before authentication";
1525 send_deauth = 1;
1526 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1527 sta = NULL; /* do not decrement sta->users */
1528 goto fail;
1529 }
1530 atomic_inc(&sta->users);
1531 spin_unlock_bh(&local->ap->sta_table_lock);
1532
1533 pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
1534 sta->capability = __le16_to_cpu(*pos);
1535 pos++; left -= 2;
1536 sta->listen_interval = __le16_to_cpu(*pos);
1537 pos++; left -= 2;
1538
1539 if (reassoc) {
1540 memcpy(prev_ap, pos, ETH_ALEN);
1541 pos++; pos++; pos++; left -= 6;
1542 } else
1543 memset(prev_ap, 0, ETH_ALEN);
1544
1545 if (left >= 2) {
1546 unsigned int ileft;
1547 unsigned char *u = (unsigned char *) pos;
1548
1549 if (*u == WLAN_EID_SSID) {
1550 u++; left--;
1551 ileft = *u;
1552 u++; left--;
1553
1554 if (ileft > left || ileft > MAX_SSID_LEN) {
1555 txt = "SSID overflow";
1556 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1557 goto fail;
1558 }
1559
1560 if (ileft != strlen(local->essid) ||
1561 memcmp(local->essid, u, ileft) != 0) {
1562 txt = "not our SSID";
1563 resp = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
1564 goto fail;
1565 }
1566
1567 u += ileft;
1568 left -= ileft;
1569 }
1570
1571 if (left >= 2 && *u == WLAN_EID_SUPP_RATES) {
1572 u++; left--;
1573 ileft = *u;
1574 u++; left--;
1575
1576 if (ileft > left || ileft == 0 ||
1577 ileft > WLAN_SUPP_RATES_MAX) {
1578 txt = "SUPP_RATES len error";
1579 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1580 goto fail;
1581 }
1582
1583 memset(sta->supported_rates, 0,
1584 sizeof(sta->supported_rates));
1585 memcpy(sta->supported_rates, u, ileft);
1586 prism2_check_tx_rates(sta);
1587
1588 u += ileft;
1589 left -= ileft;
1590 }
1591
1592 if (left > 0) {
1593 PDEBUG(DEBUG_AP, "%s: assoc from " MACSTR " with extra"
1594 " data (%d bytes) [",
1595 dev->name, MAC2STR(hdr->addr2), left);
1596 while (left > 0) {
1597 PDEBUG2(DEBUG_AP, "<%02x>", *u);
1598 u++; left--;
1599 }
1600 PDEBUG2(DEBUG_AP, "]\n");
1601 }
1602 } else {
1603 txt = "frame underflow";
1604 resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
1605 goto fail;
1606 }
1607
1608 /* get a unique AID */
1609 if (sta->aid > 0)
1610 txt = "OK, old AID";
1611 else {
1612 spin_lock_bh(&local->ap->sta_table_lock);
1613 for (sta->aid = 1; sta->aid <= MAX_AID_TABLE_SIZE; sta->aid++)
1614 if (local->ap->sta_aid[sta->aid - 1] == NULL)
1615 break;
1616 if (sta->aid > MAX_AID_TABLE_SIZE) {
1617 sta->aid = 0;
1618 spin_unlock_bh(&local->ap->sta_table_lock);
1619 resp = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
1620 txt = "no room for more AIDs";
1621 } else {
1622 local->ap->sta_aid[sta->aid - 1] = sta;
1623 spin_unlock_bh(&local->ap->sta_table_lock);
1624 txt = "OK, new AID";
1625 }
1626 }
1627
1628 fail:
1629 pos = (u16 *) body;
1630
1631 if (send_deauth) {
1632 *pos = __constant_cpu_to_le16(
1633 WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH);
1634 pos++;
1635 } else {
1636 /* FIX: CF-Pollable and CF-PollReq should be set to match the
1637 * values in beacons/probe responses */
1638 /* FIX: how about privacy and WEP? */
1639 /* capability */
1640 *pos = __constant_cpu_to_le16(WLAN_CAPABILITY_ESS);
1641 pos++;
1642
1643 /* status_code */
1644 *pos = __cpu_to_le16(resp);
1645 pos++;
1646
1647 *pos = __cpu_to_le16((sta && sta->aid > 0 ? sta->aid : 0) |
1648 BIT(14) | BIT(15)); /* AID */
1649 pos++;
1650
1651 /* Supported rates (Information element) */
1652 p = (char *) pos;
1653 *p++ = WLAN_EID_SUPP_RATES;
1654 lpos = p;
1655 *p++ = 0; /* len */
1656 if (local->tx_rate_control & WLAN_RATE_1M) {
1657 *p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02;
1658 (*lpos)++;
1659 }
1660 if (local->tx_rate_control & WLAN_RATE_2M) {
1661 *p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04;
1662 (*lpos)++;
1663 }
1664 if (local->tx_rate_control & WLAN_RATE_5M5) {
1665 *p++ = local->basic_rates & WLAN_RATE_5M5 ?
1666 0x8b : 0x0b;
1667 (*lpos)++;
1668 }
1669 if (local->tx_rate_control & WLAN_RATE_11M) {
1670 *p++ = local->basic_rates & WLAN_RATE_11M ?
1671 0x96 : 0x16;
1672 (*lpos)++;
1673 }
1674 pos = (u16 *) p;
1675 }
1676
1677 prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
1678 (send_deauth ? IEEE80211_STYPE_DEAUTH :
1679 (reassoc ? IEEE80211_STYPE_REASSOC_RESP :
1680 IEEE80211_STYPE_ASSOC_RESP)),
1681 body, (u8 *) pos - (u8 *) body,
1682 hdr->addr2,
1683 send_deauth ? 0 : local->ap->tx_callback_assoc);
1684
1685 if (sta) {
1686 if (resp == WLAN_STATUS_SUCCESS) {
1687 sta->last_rx = jiffies;
1688 /* STA will be marked associated from TX callback, if
1689 * AssocResp is ACKed */
1690 }
1691 atomic_dec(&sta->users);
1692 }
1693
1694#if 0
1695 PDEBUG(DEBUG_AP, "%s: " MACSTR " %sassoc (len=%d prev_ap=" MACSTR
1696 ") => %d(%d) (%s)\n",
1697 dev->name, MAC2STR(hdr->addr2), reassoc ? "re" : "", len,
1698 MAC2STR(prev_ap), resp, send_deauth, txt);
1699#endif
1700}
1701
1702
1703/* Called only as a scheduled task for pending AP frames. */
1704static void handle_deauth(local_info_t *local, struct sk_buff *skb,
1705 struct hostap_80211_rx_status *rx_stats)
1706{
1707 struct net_device *dev = local->dev;
1708 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1709 char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
1710 int len;
1711 u16 reason_code, *pos;
1712 struct sta_info *sta = NULL;
1713
1714 len = skb->len - IEEE80211_MGMT_HDR_LEN;
1715
1716 if (len < 2) {
1717 printk("handle_deauth - too short payload (len=%d)\n", len);
1718 return;
1719 }
1720
1721 pos = (u16 *) body;
1722 reason_code = __le16_to_cpu(*pos);
1723
1724 PDEBUG(DEBUG_AP, "%s: deauthentication: " MACSTR " len=%d, "
1725 "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len,
1726 reason_code);
1727
1728 spin_lock_bh(&local->ap->sta_table_lock);
1729 sta = ap_get_sta(local->ap, hdr->addr2);
1730 if (sta != NULL) {
1731 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
1732 hostap_event_expired_sta(local->dev, sta);
1733 sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
1734 }
1735 spin_unlock_bh(&local->ap->sta_table_lock);
1736 if (sta == NULL) {
1737 printk("%s: deauthentication from " MACSTR ", "
1738 "reason_code=%d, but STA not authenticated\n", dev->name,
1739 MAC2STR(hdr->addr2), reason_code);
1740 }
1741}
1742
1743
1744/* Called only as a scheduled task for pending AP frames. */
1745static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
1746 struct hostap_80211_rx_status *rx_stats)
1747{
1748 struct net_device *dev = local->dev;
1749 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1750 char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
1751 int len;
1752 u16 reason_code, *pos;
1753 struct sta_info *sta = NULL;
1754
1755 len = skb->len - IEEE80211_MGMT_HDR_LEN;
1756
1757 if (len < 2) {
1758 printk("handle_disassoc - too short payload (len=%d)\n", len);
1759 return;
1760 }
1761
1762 pos = (u16 *) body;
1763 reason_code = __le16_to_cpu(*pos);
1764
1765 PDEBUG(DEBUG_AP, "%s: disassociation: " MACSTR " len=%d, "
1766 "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len,
1767 reason_code);
1768
1769 spin_lock_bh(&local->ap->sta_table_lock);
1770 sta = ap_get_sta(local->ap, hdr->addr2);
1771 if (sta != NULL) {
1772 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
1773 hostap_event_expired_sta(local->dev, sta);
1774 sta->flags &= ~WLAN_STA_ASSOC;
1775 }
1776 spin_unlock_bh(&local->ap->sta_table_lock);
1777 if (sta == NULL) {
1778 printk("%s: disassociation from " MACSTR ", "
1779 "reason_code=%d, but STA not authenticated\n",
1780 dev->name, MAC2STR(hdr->addr2), reason_code);
1781 }
1782}
1783
1784
1785/* Called only as a scheduled task for pending AP frames. */
1786static void ap_handle_data_nullfunc(local_info_t *local,
1787 struct ieee80211_hdr *hdr)
1788{
1789 struct net_device *dev = local->dev;
1790
1791 /* some STA f/w's seem to require control::ACK frame for
1792 * data::nullfunc, but at least Prism2 station f/w version 0.8.0 does
1793 * not send this..
1794 * send control::ACK for the data::nullfunc */
1795
1796 printk(KERN_DEBUG "Sending control::ACK for data::nullfunc\n");
1797 prism2_send_mgmt(dev, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK,
1798 NULL, 0, hdr->addr2, 0);
1799}
1800
1801
1802/* Called only as a scheduled task for pending AP frames. */
1803static void ap_handle_dropped_data(local_info_t *local,
1804 struct ieee80211_hdr *hdr)
1805{
1806 struct net_device *dev = local->dev;
1807 struct sta_info *sta;
1808 u16 reason;
1809
1810 spin_lock_bh(&local->ap->sta_table_lock);
1811 sta = ap_get_sta(local->ap, hdr->addr2);
1812 if (sta)
1813 atomic_inc(&sta->users);
1814 spin_unlock_bh(&local->ap->sta_table_lock);
1815
1816 if (sta != NULL && (sta->flags & WLAN_STA_ASSOC)) {
1817 PDEBUG(DEBUG_AP, "ap_handle_dropped_data: STA is now okay?\n");
1818 atomic_dec(&sta->users);
1819 return;
1820 }
1821
1822 reason = __constant_cpu_to_le16(
1823 WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
1824 prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
1825 ((sta == NULL || !(sta->flags & WLAN_STA_ASSOC)) ?
1826 IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC),
1827 (char *) &reason, sizeof(reason), hdr->addr2, 0);
1828
1829 if (sta)
1830 atomic_dec(&sta->users);
1831}
1832
1833#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
1834
1835
1836/* Called only as a scheduled task for pending AP frames. */
1837static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
1838 struct sk_buff *skb)
1839{
1840 struct hostap_skb_tx_data *meta;
1841
1842 if (!(sta->flags & WLAN_STA_PS)) {
1843 /* Station has moved to non-PS mode, so send all buffered
1844 * frames using normal device queue. */
1845 dev_queue_xmit(skb);
1846 return;
1847 }
1848
1849 /* add a flag for hostap_handle_sta_tx() to know that this skb should
1850 * be passed through even though STA is using PS */
1851 meta = (struct hostap_skb_tx_data *) skb->cb;
1852 meta->flags |= HOSTAP_TX_FLAGS_BUFFERED_FRAME;
1853 if (!skb_queue_empty(&sta->tx_buf)) {
1854 /* indicate to STA that more frames follow */
1855 meta->flags |= HOSTAP_TX_FLAGS_ADD_MOREDATA;
1856 }
1857 dev_queue_xmit(skb);
1858}
1859
1860
1861/* Called only as a scheduled task for pending AP frames. */
1862static void handle_pspoll(local_info_t *local,
1863 struct ieee80211_hdr *hdr,
1864 struct hostap_80211_rx_status *rx_stats)
1865{
1866 struct net_device *dev = local->dev;
1867 struct sta_info *sta;
1868 u16 aid;
1869 struct sk_buff *skb;
1870
1871 PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=" MACSTR ", TA=" MACSTR
1872 " PWRMGT=%d\n",
1873 MAC2STR(hdr->addr1), MAC2STR(hdr->addr2),
1874 !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM));
1875
1876 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
1877 PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=" MACSTR
1878 " not own MAC\n", MAC2STR(hdr->addr1));
1879 return;
1880 }
1881
1882 aid = __le16_to_cpu(hdr->duration_id);
1883 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) {
1884 PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n");
1885 return;
1886 }
1887 aid &= ~BIT(15) & ~BIT(14);
1888 if (aid == 0 || aid > MAX_AID_TABLE_SIZE) {
1889 PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid);
1890 return;
1891 }
1892 PDEBUG(DEBUG_PS2, " aid=%d\n", aid);
1893
1894 spin_lock_bh(&local->ap->sta_table_lock);
1895 sta = ap_get_sta(local->ap, hdr->addr2);
1896 if (sta)
1897 atomic_inc(&sta->users);
1898 spin_unlock_bh(&local->ap->sta_table_lock);
1899
1900 if (sta == NULL) {
1901 PDEBUG(DEBUG_PS, " STA not found\n");
1902 return;
1903 }
1904 if (sta->aid != aid) {
1905 PDEBUG(DEBUG_PS, " received aid=%i does not match with "
1906 "assoc.aid=%d\n", aid, sta->aid);
1907 return;
1908 }
1909
1910 /* FIX: todo:
1911 * - add timeout for buffering (clear aid in TIM vector if buffer timed
1912 * out (expiry time must be longer than ListenInterval for
1913 * the corresponding STA; "8802-11: 11.2.1.9 AP aging function"
1914 * - what to do, if buffered, pspolled, and sent frame is not ACKed by
1915 * sta; store buffer for later use and leave TIM aid bit set? use
1916 * TX event to check whether frame was ACKed?
1917 */
1918
1919 while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) {
1920 /* send buffered frame .. */
1921 PDEBUG(DEBUG_PS2, "Sending buffered frame to STA after PS POLL"
1922 " (buffer_count=%d)\n", skb_queue_len(&sta->tx_buf));
1923
1924 pspoll_send_buffered(local, sta, skb);
1925
1926 if (sta->flags & WLAN_STA_PS) {
1927 /* send only one buffered packet per PS Poll */
1928 /* FIX: should ignore further PS Polls until the
1929 * buffered packet that was just sent is acknowledged
1930 * (Tx or TxExc event) */
1931 break;
1932 }
1933 }
1934
1935 if (skb_queue_empty(&sta->tx_buf)) {
1936 /* try to clear aid from TIM */
1937 if (!(sta->flags & WLAN_STA_TIM))
1938 PDEBUG(DEBUG_PS2, "Re-unsetting TIM for aid %d\n",
1939 aid);
1940 hostap_set_tim(local, aid, 0);
1941 sta->flags &= ~WLAN_STA_TIM;
1942 }
1943
1944 atomic_dec(&sta->users);
1945}
1946
1947
1948#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
1949
1950static void handle_wds_oper_queue(void *data)
1951{
1952 local_info_t *local = data;
1953 struct wds_oper_data *entry, *prev;
1954
1955 spin_lock_bh(&local->lock);
1956 entry = local->ap->wds_oper_entries;
1957 local->ap->wds_oper_entries = NULL;
1958 spin_unlock_bh(&local->lock);
1959
1960 while (entry) {
1961 PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection "
1962 "to AP " MACSTR "\n",
1963 local->dev->name,
1964 entry->type == WDS_ADD ? "adding" : "removing",
1965 MAC2STR(entry->addr));
1966 if (entry->type == WDS_ADD)
1967 prism2_wds_add(local, entry->addr, 0);
1968 else if (entry->type == WDS_DEL)
1969 prism2_wds_del(local, entry->addr, 0, 1);
1970
1971 prev = entry;
1972 entry = entry->next;
1973 kfree(prev);
1974 }
1975}
1976
1977
1978/* Called only as a scheduled task for pending AP frames. */
1979static void handle_beacon(local_info_t *local, struct sk_buff *skb,
1980 struct hostap_80211_rx_status *rx_stats)
1981{
1982 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1983 char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
1984 int len, left;
1985 u16 *pos, beacon_int, capability;
1986 char *ssid = NULL;
1987 unsigned char *supp_rates = NULL;
1988 int ssid_len = 0, supp_rates_len = 0;
1989 struct sta_info *sta = NULL;
1990 int new_sta = 0, channel = -1;
1991
1992 len = skb->len - IEEE80211_MGMT_HDR_LEN;
1993
1994 if (len < 8 + 2 + 2) {
1995 printk(KERN_DEBUG "handle_beacon - too short payload "
1996 "(len=%d)\n", len);
1997 return;
1998 }
1999
2000 pos = (u16 *) body;
2001 left = len;
2002
2003 /* Timestamp (8 octets) */
2004 pos += 4; left -= 8;
2005 /* Beacon interval (2 octets) */
2006 beacon_int = __le16_to_cpu(*pos);
2007 pos++; left -= 2;
2008 /* Capability information (2 octets) */
2009 capability = __le16_to_cpu(*pos);
2010 pos++; left -= 2;
2011
2012 if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS &&
2013 capability & WLAN_CAPABILITY_IBSS)
2014 return;
2015
2016 if (left >= 2) {
2017 unsigned int ileft;
2018 unsigned char *u = (unsigned char *) pos;
2019
2020 if (*u == WLAN_EID_SSID) {
2021 u++; left--;
2022 ileft = *u;
2023 u++; left--;
2024
2025 if (ileft > left || ileft > MAX_SSID_LEN) {
2026 PDEBUG(DEBUG_AP, "SSID: overflow\n");
2027 return;
2028 }
2029
2030 if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID &&
2031 (ileft != strlen(local->essid) ||
2032 memcmp(local->essid, u, ileft) != 0)) {
2033 /* not our SSID */
2034 return;
2035 }
2036
2037 ssid = u;
2038 ssid_len = ileft;
2039
2040 u += ileft;
2041 left -= ileft;
2042 }
2043
2044 if (*u == WLAN_EID_SUPP_RATES) {
2045 u++; left--;
2046 ileft = *u;
2047 u++; left--;
2048
2049 if (ileft > left || ileft == 0 || ileft > 8) {
2050 PDEBUG(DEBUG_AP, " - SUPP_RATES len error\n");
2051 return;
2052 }
2053
2054 supp_rates = u;
2055 supp_rates_len = ileft;
2056
2057 u += ileft;
2058 left -= ileft;
2059 }
2060
2061 if (*u == WLAN_EID_DS_PARAMS) {
2062 u++; left--;
2063 ileft = *u;
2064 u++; left--;
2065
2066 if (ileft > left || ileft != 1) {
2067 PDEBUG(DEBUG_AP, " - DS_PARAMS len error\n");
2068 return;
2069 }
2070
2071 channel = *u;
2072
2073 u += ileft;
2074 left -= ileft;
2075 }
2076 }
2077
2078 spin_lock_bh(&local->ap->sta_table_lock);
2079 sta = ap_get_sta(local->ap, hdr->addr2);
2080 if (sta != NULL)
2081 atomic_inc(&sta->users);
2082 spin_unlock_bh(&local->ap->sta_table_lock);
2083
2084 if (sta == NULL) {
2085 /* add new AP */
2086 new_sta = 1;
2087 sta = ap_add_sta(local->ap, hdr->addr2);
2088 if (sta == NULL) {
2089 printk(KERN_INFO "prism2: kmalloc failed for AP "
2090 "data structure\n");
2091 return;
2092 }
2093 hostap_event_new_sta(local->dev, sta);
2094
2095 /* mark APs authentication and associated for pseudo ad-hoc
2096 * style communication */
2097 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
2098
2099 if (local->ap->autom_ap_wds) {
2100 hostap_wds_link_oper(local, sta->addr, WDS_ADD);
2101 }
2102 }
2103
2104 sta->ap = 1;
2105 if (ssid) {
2106 sta->u.ap.ssid_len = ssid_len;
2107 memcpy(sta->u.ap.ssid, ssid, ssid_len);
2108 sta->u.ap.ssid[ssid_len] = '\0';
2109 } else {
2110 sta->u.ap.ssid_len = 0;
2111 sta->u.ap.ssid[0] = '\0';
2112 }
2113 sta->u.ap.channel = channel;
2114 sta->rx_packets++;
2115 sta->rx_bytes += len;
2116 sta->u.ap.last_beacon = sta->last_rx = jiffies;
2117 sta->capability = capability;
2118 sta->listen_interval = beacon_int;
2119
2120 atomic_dec(&sta->users);
2121
2122 if (new_sta) {
2123 memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
2124 memcpy(sta->supported_rates, supp_rates, supp_rates_len);
2125 prism2_check_tx_rates(sta);
2126 }
2127}
2128
2129#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2130
2131
2132/* Called only as a tasklet. */
2133static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
2134 struct hostap_80211_rx_status *rx_stats)
2135{
2136#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2137 struct net_device *dev = local->dev;
2138#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2139 u16 fc, type, stype;
2140 struct ieee80211_hdr *hdr;
2141
2142 /* FIX: should give skb->len to handler functions and check that the
2143 * buffer is long enough */
2144 hdr = (struct ieee80211_hdr *) skb->data;
2145 fc = le16_to_cpu(hdr->frame_ctl);
2146 type = WLAN_FC_GET_TYPE(fc);
2147 stype = WLAN_FC_GET_STYPE(fc);
2148
2149#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2150 if (!local->hostapd && type == IEEE80211_FTYPE_DATA) {
2151 PDEBUG(DEBUG_AP, "handle_ap_item - data frame\n");
2152
2153 if (!(fc & IEEE80211_FCTL_TODS) ||
2154 (fc & IEEE80211_FCTL_FROMDS)) {
2155 if (stype == IEEE80211_STYPE_NULLFUNC) {
2156 /* no ToDS nullfunc seems to be used to check
2157 * AP association; so send reject message to
2158 * speed up re-association */
2159 ap_handle_dropped_data(local, hdr);
2160 goto done;
2161 }
2162 PDEBUG(DEBUG_AP, " not ToDS frame (fc=0x%04x)\n",
2163 fc);
2164 goto done;
2165 }
2166
2167 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
2168 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)="
2169 MACSTR " not own MAC\n",
2170 MAC2STR(hdr->addr1));
2171 goto done;
2172 }
2173
2174 if (local->ap->nullfunc_ack &&
2175 stype == IEEE80211_STYPE_NULLFUNC)
2176 ap_handle_data_nullfunc(local, hdr);
2177 else
2178 ap_handle_dropped_data(local, hdr);
2179 goto done;
2180 }
2181
2182 if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_BEACON) {
2183 handle_beacon(local, skb, rx_stats);
2184 goto done;
2185 }
2186#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2187
2188 if (type == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) {
2189 handle_pspoll(local, hdr, rx_stats);
2190 goto done;
2191 }
2192
2193 if (local->hostapd) {
2194 PDEBUG(DEBUG_AP, "Unknown frame in AP queue: type=0x%02x "
2195 "subtype=0x%02x\n", type, stype);
2196 goto done;
2197 }
2198
2199#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2200 if (type != IEEE80211_FTYPE_MGMT) {
2201 PDEBUG(DEBUG_AP, "handle_ap_item - not a management frame?\n");
2202 goto done;
2203 }
2204
2205 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
2206 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=" MACSTR
2207 " not own MAC\n", MAC2STR(hdr->addr1));
2208 goto done;
2209 }
2210
2211 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
2212 PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=" MACSTR
2213 " not own MAC\n", MAC2STR(hdr->addr3));
2214 goto done;
2215 }
2216
2217 switch (stype) {
2218 case IEEE80211_STYPE_ASSOC_REQ:
2219 handle_assoc(local, skb, rx_stats, 0);
2220 break;
2221 case IEEE80211_STYPE_ASSOC_RESP:
2222 PDEBUG(DEBUG_AP, "==> ASSOC RESP (ignored)\n");
2223 break;
2224 case IEEE80211_STYPE_REASSOC_REQ:
2225 handle_assoc(local, skb, rx_stats, 1);
2226 break;
2227 case IEEE80211_STYPE_REASSOC_RESP:
2228 PDEBUG(DEBUG_AP, "==> REASSOC RESP (ignored)\n");
2229 break;
2230 case IEEE80211_STYPE_ATIM:
2231 PDEBUG(DEBUG_AP, "==> ATIM (ignored)\n");
2232 break;
2233 case IEEE80211_STYPE_DISASSOC:
2234 handle_disassoc(local, skb, rx_stats);
2235 break;
2236 case IEEE80211_STYPE_AUTH:
2237 handle_authen(local, skb, rx_stats);
2238 break;
2239 case IEEE80211_STYPE_DEAUTH:
2240 handle_deauth(local, skb, rx_stats);
2241 break;
2242 default:
2243 PDEBUG(DEBUG_AP, "Unknown mgmt frame subtype 0x%02x\n",
2244 stype >> 4);
2245 break;
2246 }
2247#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2248
2249 done:
2250 dev_kfree_skb(skb);
2251}
2252
2253
2254/* Called only as a tasklet (software IRQ) */
2255void hostap_rx(struct net_device *dev, struct sk_buff *skb,
2256 struct hostap_80211_rx_status *rx_stats)
2257{
2258 struct hostap_interface *iface;
2259 local_info_t *local;
2260 u16 fc;
2261 struct ieee80211_hdr *hdr;
2262
2263 iface = netdev_priv(dev);
2264 local = iface->local;
2265
2266 if (skb->len < 16)
2267 goto drop;
2268
2269 local->stats.rx_packets++;
2270
2271 hdr = (struct ieee80211_hdr *) skb->data;
2272 fc = le16_to_cpu(hdr->frame_ctl);
2273
2274 if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
2275 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_MGMT &&
2276 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_BEACON)
2277 goto drop;
2278
2279 skb->protocol = __constant_htons(ETH_P_HOSTAP);
2280 handle_ap_item(local, skb, rx_stats);
2281 return;
2282
2283 drop:
2284 dev_kfree_skb(skb);
2285}
2286
2287
2288/* Called only as a tasklet (software IRQ) */
2289static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2290{
2291 struct sk_buff *skb;
2292 struct ieee80211_hdr *hdr;
2293 struct hostap_80211_rx_status rx_stats;
2294
2295 if (skb_queue_empty(&sta->tx_buf))
2296 return;
2297
2298 skb = dev_alloc_skb(16);
2299 if (skb == NULL) {
2300 printk(KERN_DEBUG "%s: schedule_packet_send: skb alloc "
2301 "failed\n", local->dev->name);
2302 return;
2303 }
2304
2305 hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
2306
2307 /* Generate a fake pspoll frame to start packet delivery */
2308 hdr->frame_ctl = __constant_cpu_to_le16(
2309 IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
2310 memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN);
2311 memcpy(hdr->addr2, sta->addr, ETH_ALEN);
2312 hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14));
2313
2314 PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for "
2315 "STA " MACSTR "\n", local->dev->name, MAC2STR(sta->addr));
2316
2317 skb->dev = local->dev;
2318
2319 memset(&rx_stats, 0, sizeof(rx_stats));
2320 hostap_rx(local->dev, skb, &rx_stats);
2321}
2322
2323
2324static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2325 struct iw_quality qual[], int buf_size,
2326 int aplist)
2327{
2328 struct ap_data *ap = local->ap;
2329 struct list_head *ptr;
2330 int count = 0;
2331
2332 spin_lock_bh(&ap->sta_table_lock);
2333
2334 for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
2335 ptr = ptr->next) {
2336 struct sta_info *sta = (struct sta_info *) ptr;
2337
2338 if (aplist && !sta->ap)
2339 continue;
2340 addr[count].sa_family = ARPHRD_ETHER;
2341 memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
2342 if (sta->last_rx_silence == 0)
2343 qual[count].qual = sta->last_rx_signal < 27 ?
2344 0 : (sta->last_rx_signal - 27) * 92 / 127;
2345 else
2346 qual[count].qual = sta->last_rx_signal -
2347 sta->last_rx_silence - 35;
2348 qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
2349 qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
2350 qual[count].updated = sta->last_rx_updated;
2351
2352 sta->last_rx_updated = 0;
2353
2354 count++;
2355 if (count >= buf_size)
2356 break;
2357 }
2358 spin_unlock_bh(&ap->sta_table_lock);
2359
2360 return count;
2361}
2362
2363
2364/* Translate our list of Access Points & Stations to a card independant
2365 * format that the Wireless Tools will understand - Jean II */
2366static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
2367{
2368 struct hostap_interface *iface;
2369 local_info_t *local;
2370 struct ap_data *ap;
2371 struct list_head *ptr;
2372 struct iw_event iwe;
2373 char *current_ev = buffer;
2374 char *end_buf = buffer + IW_SCAN_MAX_DATA;
2375#if !defined(PRISM2_NO_KERNEL_IEEE80211_MGMT)
2376 char buf[64];
2377#endif
2378
2379 iface = netdev_priv(dev);
2380 local = iface->local;
2381 ap = local->ap;
2382
2383 spin_lock_bh(&ap->sta_table_lock);
2384
2385 for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
2386 ptr = ptr->next) {
2387 struct sta_info *sta = (struct sta_info *) ptr;
2388
2389 /* First entry *MUST* be the AP MAC address */
2390 memset(&iwe, 0, sizeof(iwe));
2391 iwe.cmd = SIOCGIWAP;
2392 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
2393 memcpy(iwe.u.ap_addr.sa_data, sta->addr, ETH_ALEN);
2394 iwe.len = IW_EV_ADDR_LEN;
2395 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
2396 IW_EV_ADDR_LEN);
2397
2398 /* Use the mode to indicate if it's a station or
2399 * an Access Point */
2400 memset(&iwe, 0, sizeof(iwe));
2401 iwe.cmd = SIOCGIWMODE;
2402 if (sta->ap)
2403 iwe.u.mode = IW_MODE_MASTER;
2404 else
2405 iwe.u.mode = IW_MODE_INFRA;
2406 iwe.len = IW_EV_UINT_LEN;
2407 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
2408 IW_EV_UINT_LEN);
2409
2410 /* Some quality */
2411 memset(&iwe, 0, sizeof(iwe));
2412 iwe.cmd = IWEVQUAL;
2413 if (sta->last_rx_silence == 0)
2414 iwe.u.qual.qual = sta->last_rx_signal < 27 ?
2415 0 : (sta->last_rx_signal - 27) * 92 / 127;
2416 else
2417 iwe.u.qual.qual = sta->last_rx_signal -
2418 sta->last_rx_silence - 35;
2419 iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
2420 iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
2421 iwe.u.qual.updated = sta->last_rx_updated;
2422 iwe.len = IW_EV_QUAL_LEN;
2423 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
2424 IW_EV_QUAL_LEN);
2425
2426#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2427 if (sta->ap) {
2428 memset(&iwe, 0, sizeof(iwe));
2429 iwe.cmd = SIOCGIWESSID;
2430 iwe.u.data.length = sta->u.ap.ssid_len;
2431 iwe.u.data.flags = 1;
2432 current_ev = iwe_stream_add_point(current_ev, end_buf,
2433 &iwe,
2434 sta->u.ap.ssid);
2435
2436 memset(&iwe, 0, sizeof(iwe));
2437 iwe.cmd = SIOCGIWENCODE;
2438 if (sta->capability & WLAN_CAPABILITY_PRIVACY)
2439 iwe.u.data.flags =
2440 IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
2441 else
2442 iwe.u.data.flags = IW_ENCODE_DISABLED;
2443 current_ev = iwe_stream_add_point(current_ev, end_buf,
2444 &iwe,
2445 sta->u.ap.ssid
2446 /* 0 byte memcpy */);
2447
2448 if (sta->u.ap.channel > 0 &&
2449 sta->u.ap.channel <= FREQ_COUNT) {
2450 memset(&iwe, 0, sizeof(iwe));
2451 iwe.cmd = SIOCGIWFREQ;
2452 iwe.u.freq.m = freq_list[sta->u.ap.channel - 1]
2453 * 100000;
2454 iwe.u.freq.e = 1;
2455 current_ev = iwe_stream_add_event(
2456 current_ev, end_buf, &iwe,
2457 IW_EV_FREQ_LEN);
2458 }
2459
2460 memset(&iwe, 0, sizeof(iwe));
2461 iwe.cmd = IWEVCUSTOM;
2462 sprintf(buf, "beacon_interval=%d",
2463 sta->listen_interval);
2464 iwe.u.data.length = strlen(buf);
2465 current_ev = iwe_stream_add_point(current_ev, end_buf,
2466 &iwe, buf);
2467 }
2468#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2469
2470 sta->last_rx_updated = 0;
2471
2472 /* To be continued, we should make good use of IWEVCUSTOM */
2473 }
2474
2475 spin_unlock_bh(&ap->sta_table_lock);
2476
2477 return current_ev - buffer;
2478}
2479
2480
2481static int prism2_hostapd_add_sta(struct ap_data *ap,
2482 struct prism2_hostapd_param *param)
2483{
2484 struct sta_info *sta;
2485
2486 spin_lock_bh(&ap->sta_table_lock);
2487 sta = ap_get_sta(ap, param->sta_addr);
2488 if (sta)
2489 atomic_inc(&sta->users);
2490 spin_unlock_bh(&ap->sta_table_lock);
2491
2492 if (sta == NULL) {
2493 sta = ap_add_sta(ap, param->sta_addr);
2494 if (sta == NULL)
2495 return -1;
2496 }
2497
2498 if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
2499 hostap_event_new_sta(sta->local->dev, sta);
2500
2501 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC;
2502 sta->last_rx = jiffies;
2503 sta->aid = param->u.add_sta.aid;
2504 sta->capability = param->u.add_sta.capability;
2505 sta->tx_supp_rates = param->u.add_sta.tx_supp_rates;
2506 if (sta->tx_supp_rates & WLAN_RATE_1M)
2507 sta->supported_rates[0] = 2;
2508 if (sta->tx_supp_rates & WLAN_RATE_2M)
2509 sta->supported_rates[1] = 4;
2510 if (sta->tx_supp_rates & WLAN_RATE_5M5)
2511 sta->supported_rates[2] = 11;
2512 if (sta->tx_supp_rates & WLAN_RATE_11M)
2513 sta->supported_rates[3] = 22;
2514 prism2_check_tx_rates(sta);
2515 atomic_dec(&sta->users);
2516 return 0;
2517}
2518
2519
2520static int prism2_hostapd_remove_sta(struct ap_data *ap,
2521 struct prism2_hostapd_param *param)
2522{
2523 struct sta_info *sta;
2524
2525 spin_lock_bh(&ap->sta_table_lock);
2526 sta = ap_get_sta(ap, param->sta_addr);
2527 if (sta) {
2528 ap_sta_hash_del(ap, sta);
2529 list_del(&sta->list);
2530 }
2531 spin_unlock_bh(&ap->sta_table_lock);
2532
2533 if (!sta)
2534 return -ENOENT;
2535
2536 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
2537 hostap_event_expired_sta(sta->local->dev, sta);
2538 ap_free_sta(ap, sta);
2539
2540 return 0;
2541}
2542
2543
2544static int prism2_hostapd_get_info_sta(struct ap_data *ap,
2545 struct prism2_hostapd_param *param)
2546{
2547 struct sta_info *sta;
2548
2549 spin_lock_bh(&ap->sta_table_lock);
2550 sta = ap_get_sta(ap, param->sta_addr);
2551 if (sta)
2552 atomic_inc(&sta->users);
2553 spin_unlock_bh(&ap->sta_table_lock);
2554
2555 if (!sta)
2556 return -ENOENT;
2557
2558 param->u.get_info_sta.inactive_sec = (jiffies - sta->last_rx) / HZ;
2559
2560 atomic_dec(&sta->users);
2561
2562 return 1;
2563}
2564
2565
2566static int prism2_hostapd_set_flags_sta(struct ap_data *ap,
2567 struct prism2_hostapd_param *param)
2568{
2569 struct sta_info *sta;
2570
2571 spin_lock_bh(&ap->sta_table_lock);
2572 sta = ap_get_sta(ap, param->sta_addr);
2573 if (sta) {
2574 sta->flags |= param->u.set_flags_sta.flags_or;
2575 sta->flags &= param->u.set_flags_sta.flags_and;
2576 }
2577 spin_unlock_bh(&ap->sta_table_lock);
2578
2579 if (!sta)
2580 return -ENOENT;
2581
2582 return 0;
2583}
2584
2585
2586static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
2587 struct prism2_hostapd_param *param)
2588{
2589 struct sta_info *sta;
2590 int rate;
2591
2592 spin_lock_bh(&ap->sta_table_lock);
2593 sta = ap_get_sta(ap, param->sta_addr);
2594 if (sta) {
2595 sta->rx_packets = sta->tx_packets = 0;
2596 sta->rx_bytes = sta->tx_bytes = 0;
2597 for (rate = 0; rate < WLAN_RATE_COUNT; rate++) {
2598 sta->tx_count[rate] = 0;
2599 sta->rx_count[rate] = 0;
2600 }
2601 }
2602 spin_unlock_bh(&ap->sta_table_lock);
2603
2604 if (!sta)
2605 return -ENOENT;
2606
2607 return 0;
2608}
2609
2610
2611static int prism2_hostapd(struct ap_data *ap,
2612 struct prism2_hostapd_param *param)
2613{
2614 switch (param->cmd) {
2615 case PRISM2_HOSTAPD_FLUSH:
2616 ap_control_kickall(ap);
2617 return 0;
2618 case PRISM2_HOSTAPD_ADD_STA:
2619 return prism2_hostapd_add_sta(ap, param);
2620 case PRISM2_HOSTAPD_REMOVE_STA:
2621 return prism2_hostapd_remove_sta(ap, param);
2622 case PRISM2_HOSTAPD_GET_INFO_STA:
2623 return prism2_hostapd_get_info_sta(ap, param);
2624 case PRISM2_HOSTAPD_SET_FLAGS_STA:
2625 return prism2_hostapd_set_flags_sta(ap, param);
2626 case PRISM2_HOSTAPD_STA_CLEAR_STATS:
2627 return prism2_hostapd_sta_clear_stats(ap, param);
2628 default:
2629 printk(KERN_WARNING "prism2_hostapd: unknown cmd=%d\n",
2630 param->cmd);
2631 return -EOPNOTSUPP;
2632 }
2633}
2634
2635
2636/* Update station info for host-based TX rate control and return current
2637 * TX rate */
2638static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev)
2639{
2640 int ret = sta->tx_rate;
2641 struct hostap_interface *iface;
2642 local_info_t *local;
2643
2644 iface = netdev_priv(dev);
2645 local = iface->local;
2646
2647 sta->tx_count[sta->tx_rate_idx]++;
2648 sta->tx_since_last_failure++;
2649 sta->tx_consecutive_exc = 0;
2650 if (sta->tx_since_last_failure >= WLAN_RATE_UPDATE_COUNT &&
2651 sta->tx_rate_idx < sta->tx_max_rate) {
2652 /* use next higher rate */
2653 int old_rate, new_rate;
2654 old_rate = new_rate = sta->tx_rate_idx;
2655 while (new_rate < sta->tx_max_rate) {
2656 new_rate++;
2657 if (ap_tx_rate_ok(new_rate, sta, local)) {
2658 sta->tx_rate_idx = new_rate;
2659 break;
2660 }
2661 }
2662 if (old_rate != sta->tx_rate_idx) {
2663 switch (sta->tx_rate_idx) {
2664 case 0: sta->tx_rate = 10; break;
2665 case 1: sta->tx_rate = 20; break;
2666 case 2: sta->tx_rate = 55; break;
2667 case 3: sta->tx_rate = 110; break;
2668 default: sta->tx_rate = 0; break;
2669 }
2670 PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate raised to"
2671 " %d\n", dev->name, MAC2STR(sta->addr),
2672 sta->tx_rate);
2673 }
2674 sta->tx_since_last_failure = 0;
2675 }
2676
2677 return ret;
2678}
2679
2680
2681/* Called only from software IRQ. Called for each TX frame prior possible
2682 * encryption and transmit. */
2683ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2684{
2685 struct sta_info *sta = NULL;
2686 struct sk_buff *skb = tx->skb;
2687 int set_tim, ret;
2688 struct ieee80211_hdr *hdr;
2689 struct hostap_skb_tx_data *meta;
2690
2691 meta = (struct hostap_skb_tx_data *) skb->cb;
2692 ret = AP_TX_CONTINUE;
2693 if (local->ap == NULL || skb->len < 10 ||
2694 meta->iface->type == HOSTAP_INTERFACE_STA)
2695 goto out;
2696
2697 hdr = (struct ieee80211_hdr *) skb->data;
2698
2699 if (hdr->addr1[0] & 0x01) {
2700 /* broadcast/multicast frame - no AP related processing */
2701 goto out;
2702 }
2703
2704 /* unicast packet - check whether destination STA is associated */
2705 spin_lock(&local->ap->sta_table_lock);
2706 sta = ap_get_sta(local->ap, hdr->addr1);
2707 if (sta)
2708 atomic_inc(&sta->users);
2709 spin_unlock(&local->ap->sta_table_lock);
2710
2711 if (local->iw_mode == IW_MODE_MASTER && sta == NULL &&
2712 !(meta->flags & HOSTAP_TX_FLAGS_WDS) &&
2713 meta->iface->type != HOSTAP_INTERFACE_MASTER &&
2714 meta->iface->type != HOSTAP_INTERFACE_AP) {
2715#if 0
2716 /* This can happen, e.g., when wlan0 is added to a bridge and
2717 * bridging code does not know which port is the correct target
2718 * for a unicast frame. In this case, the packet is send to all
2719 * ports of the bridge. Since this is a valid scenario, do not
2720 * print out any errors here. */
2721 if (net_ratelimit()) {
2722 printk(KERN_DEBUG "AP: drop packet to non-associated "
2723 "STA " MACSTR "\n", MAC2STR(hdr->addr1));
2724 }
2725#endif
2726 local->ap->tx_drop_nonassoc++;
2727 ret = AP_TX_DROP;
2728 goto out;
2729 }
2730
2731 if (sta == NULL)
2732 goto out;
2733
2734 if (!(sta->flags & WLAN_STA_AUTHORIZED))
2735 ret = AP_TX_CONTINUE_NOT_AUTHORIZED;
2736
2737 /* Set tx_rate if using host-based TX rate control */
2738 if (!local->fw_tx_rate_control)
2739 local->ap->last_tx_rate = meta->rate =
2740 ap_update_sta_tx_rate(sta, local->dev);
2741
2742 if (local->iw_mode != IW_MODE_MASTER)
2743 goto out;
2744
2745 if (!(sta->flags & WLAN_STA_PS))
2746 goto out;
2747
2748 if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) {
2749 /* indicate to STA that more frames follow */
2750 hdr->frame_ctl |=
2751 __constant_cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2752 }
2753
2754 if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) {
2755 /* packet was already buffered and now send due to
2756 * PS poll, so do not rebuffer it */
2757 goto out;
2758 }
2759
2760 if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) {
2761 PDEBUG(DEBUG_PS, "%s: No more space in STA (" MACSTR ")'s PS "
2762 "mode buffer\n", local->dev->name, MAC2STR(sta->addr));
2763 /* Make sure that TIM is set for the station (it might not be
2764 * after AP wlan hw reset). */
2765 /* FIX: should fix hw reset to restore bits based on STA
2766 * buffer state.. */
2767 hostap_set_tim(local, sta->aid, 1);
2768 sta->flags |= WLAN_STA_TIM;
2769 ret = AP_TX_DROP;
2770 goto out;
2771 }
2772
2773 /* STA in PS mode, buffer frame for later delivery */
2774 set_tim = skb_queue_empty(&sta->tx_buf);
2775 skb_queue_tail(&sta->tx_buf, skb);
2776 /* FIX: could save RX time to skb and expire buffered frames after
2777 * some time if STA does not poll for them */
2778
2779 if (set_tim) {
2780 if (sta->flags & WLAN_STA_TIM)
2781 PDEBUG(DEBUG_PS2, "Re-setting TIM for aid %d\n",
2782 sta->aid);
2783 hostap_set_tim(local, sta->aid, 1);
2784 sta->flags |= WLAN_STA_TIM;
2785 }
2786
2787 ret = AP_TX_BUFFERED;
2788
2789 out:
2790 if (sta != NULL) {
2791 if (ret == AP_TX_CONTINUE ||
2792 ret == AP_TX_CONTINUE_NOT_AUTHORIZED) {
2793 sta->tx_packets++;
2794 sta->tx_bytes += skb->len;
2795 sta->last_tx = jiffies;
2796 }
2797
2798 if ((ret == AP_TX_CONTINUE ||
2799 ret == AP_TX_CONTINUE_NOT_AUTHORIZED) &&
2800 sta->crypt && tx->host_encrypt) {
2801 tx->crypt = sta->crypt;
2802 tx->sta_ptr = sta; /* hostap_handle_sta_release() will
2803 * be called to release sta info
2804 * later */
2805 } else
2806 atomic_dec(&sta->users);
2807 }
2808
2809 return ret;
2810}
2811
2812
2813void hostap_handle_sta_release(void *ptr)
2814{
2815 struct sta_info *sta = ptr;
2816 atomic_dec(&sta->users);
2817}
2818
2819
2820/* Called only as a tasklet (software IRQ) */
2821void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
2822{
2823 struct sta_info *sta;
2824 struct ieee80211_hdr *hdr;
2825 struct hostap_skb_tx_data *meta;
2826
2827 hdr = (struct ieee80211_hdr *) skb->data;
2828 meta = (struct hostap_skb_tx_data *) skb->cb;
2829
2830 spin_lock(&local->ap->sta_table_lock);
2831 sta = ap_get_sta(local->ap, hdr->addr1);
2832 if (!sta) {
2833 spin_unlock(&local->ap->sta_table_lock);
2834 PDEBUG(DEBUG_AP, "%s: Could not find STA " MACSTR " for this "
2835 "TX error (@%lu)\n",
2836 local->dev->name, MAC2STR(hdr->addr1), jiffies);
2837 return;
2838 }
2839
2840 sta->tx_since_last_failure = 0;
2841 sta->tx_consecutive_exc++;
2842
2843 if (sta->tx_consecutive_exc >= WLAN_RATE_DECREASE_THRESHOLD &&
2844 sta->tx_rate_idx > 0 && meta->rate <= sta->tx_rate) {
2845 /* use next lower rate */
2846 int old, rate;
2847 old = rate = sta->tx_rate_idx;
2848 while (rate > 0) {
2849 rate--;
2850 if (ap_tx_rate_ok(rate, sta, local)) {
2851 sta->tx_rate_idx = rate;
2852 break;
2853 }
2854 }
2855 if (old != sta->tx_rate_idx) {
2856 switch (sta->tx_rate_idx) {
2857 case 0: sta->tx_rate = 10; break;
2858 case 1: sta->tx_rate = 20; break;
2859 case 2: sta->tx_rate = 55; break;
2860 case 3: sta->tx_rate = 110; break;
2861 default: sta->tx_rate = 0; break;
2862 }
2863 PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate lowered "
2864 "to %d\n", local->dev->name, MAC2STR(sta->addr),
2865 sta->tx_rate);
2866 }
2867 sta->tx_consecutive_exc = 0;
2868 }
2869 spin_unlock(&local->ap->sta_table_lock);
2870}
2871
2872
2873static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
2874 int pwrmgt, int type, int stype)
2875{
2876 if (pwrmgt && !(sta->flags & WLAN_STA_PS)) {
2877 sta->flags |= WLAN_STA_PS;
2878 PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to use PS "
2879 "mode (type=0x%02X, stype=0x%02X)\n",
2880 MAC2STR(sta->addr), type >> 2, stype >> 4);
2881 } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) {
2882 sta->flags &= ~WLAN_STA_PS;
2883 PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to not use "
2884 "PS mode (type=0x%02X, stype=0x%02X)\n",
2885 MAC2STR(sta->addr), type >> 2, stype >> 4);
2886 if (type != IEEE80211_FTYPE_CTL ||
2887 stype != IEEE80211_STYPE_PSPOLL)
2888 schedule_packet_send(local, sta);
2889 }
2890}
2891
2892
2893/* Called only as a tasklet (software IRQ). Called for each RX frame to update
2894 * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */
2895int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
2896{
2897 struct sta_info *sta;
2898 u16 fc;
2899
2900 spin_lock(&local->ap->sta_table_lock);
2901 sta = ap_get_sta(local->ap, hdr->addr2);
2902 if (sta)
2903 atomic_inc(&sta->users);
2904 spin_unlock(&local->ap->sta_table_lock);
2905
2906 if (!sta)
2907 return -1;
2908
2909 fc = le16_to_cpu(hdr->frame_ctl);
2910 hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
2911 WLAN_FC_GET_TYPE(fc), WLAN_FC_GET_STYPE(fc));
2912
2913 atomic_dec(&sta->users);
2914 return 0;
2915}
2916
2917
2918/* Called only as a tasklet (software IRQ). Called for each RX frame after
2919 * getting RX header and payload from hardware. */
2920ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
2921 struct sk_buff *skb,
2922 struct hostap_80211_rx_status *rx_stats,
2923 int wds)
2924{
2925 int ret;
2926 struct sta_info *sta;
2927 u16 fc, type, stype;
2928 struct ieee80211_hdr *hdr;
2929
2930 if (local->ap == NULL)
2931 return AP_RX_CONTINUE;
2932
2933 hdr = (struct ieee80211_hdr *) skb->data;
2934
2935 fc = le16_to_cpu(hdr->frame_ctl);
2936 type = WLAN_FC_GET_TYPE(fc);
2937 stype = WLAN_FC_GET_STYPE(fc);
2938
2939 spin_lock(&local->ap->sta_table_lock);
2940 sta = ap_get_sta(local->ap, hdr->addr2);
2941 if (sta)
2942 atomic_inc(&sta->users);
2943 spin_unlock(&local->ap->sta_table_lock);
2944
2945 if (sta && !(sta->flags & WLAN_STA_AUTHORIZED))
2946 ret = AP_RX_CONTINUE_NOT_AUTHORIZED;
2947 else
2948 ret = AP_RX_CONTINUE;
2949
2950
2951 if (fc & IEEE80211_FCTL_TODS) {
2952 if (!wds && (sta == NULL || !(sta->flags & WLAN_STA_ASSOC))) {
2953 if (local->hostapd) {
2954 prism2_rx_80211(local->apdev, skb, rx_stats,
2955 PRISM2_RX_NON_ASSOC);
2956#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2957 } else {
2958 printk(KERN_DEBUG "%s: dropped received packet"
2959 " from non-associated STA " MACSTR
2960 " (type=0x%02x, subtype=0x%02x)\n",
2961 dev->name, MAC2STR(hdr->addr2),
2962 type >> 2, stype >> 4);
2963 hostap_rx(dev, skb, rx_stats);
2964#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2965 }
2966 ret = AP_RX_EXIT;
2967 goto out;
2968 }
2969 } else if (fc & IEEE80211_FCTL_FROMDS) {
2970 if (!wds) {
2971 /* FromDS frame - not for us; probably
2972 * broadcast/multicast in another BSS - drop */
2973 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
2974 printk(KERN_DEBUG "Odd.. FromDS packet "
2975 "received with own BSSID\n");
2976 hostap_dump_rx_80211(dev->name, skb, rx_stats);
2977 }
2978 ret = AP_RX_DROP;
2979 goto out;
2980 }
2981 } else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
2982 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
2983
2984 if (local->hostapd) {
2985 prism2_rx_80211(local->apdev, skb, rx_stats,
2986 PRISM2_RX_NON_ASSOC);
2987#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
2988 } else {
2989 /* At least Lucent f/w seems to send data::nullfunc
2990 * frames with no ToDS flag when the current AP returns
2991 * after being unavailable for some time. Speed up
2992 * re-association by informing the station about it not
2993 * being associated. */
2994 printk(KERN_DEBUG "%s: rejected received nullfunc "
2995 "frame without ToDS from not associated STA "
2996 MACSTR "\n",
2997 dev->name, MAC2STR(hdr->addr2));
2998 hostap_rx(dev, skb, rx_stats);
2999#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
3000 }
3001 ret = AP_RX_EXIT;
3002 goto out;
3003 } else if (stype == IEEE80211_STYPE_NULLFUNC) {
3004 /* At least Lucent cards seem to send periodic nullfunc
3005 * frames with ToDS. Let these through to update SQ
3006 * stats and PS state. Nullfunc frames do not contain
3007 * any data and they will be dropped below. */
3008 } else {
3009 /* If BSSID (Addr3) is foreign, this frame is a normal
3010 * broadcast frame from an IBSS network. Drop it silently.
3011 * If BSSID is own, report the dropping of this frame. */
3012 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
3013 printk(KERN_DEBUG "%s: dropped received packet from "
3014 MACSTR " with no ToDS flag (type=0x%02x, "
3015 "subtype=0x%02x)\n", dev->name,
3016 MAC2STR(hdr->addr2), type >> 2, stype >> 4);
3017 hostap_dump_rx_80211(dev->name, skb, rx_stats);
3018 }
3019 ret = AP_RX_DROP;
3020 goto out;
3021 }
3022
3023 if (sta) {
3024 hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
3025 type, stype);
3026
3027 sta->rx_packets++;
3028 sta->rx_bytes += skb->len;
3029 sta->last_rx = jiffies;
3030 }
3031
3032 if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC &&
3033 fc & IEEE80211_FCTL_TODS) {
3034 if (local->hostapd) {
3035 prism2_rx_80211(local->apdev, skb, rx_stats,
3036 PRISM2_RX_NULLFUNC_ACK);
3037#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
3038 } else {
3039 /* some STA f/w's seem to require control::ACK frame
3040 * for data::nullfunc, but Prism2 f/w 0.8.0 (at least
3041 * from Compaq) does not send this.. Try to generate
3042 * ACK for these frames from the host driver to make
3043 * power saving work with, e.g., Lucent WaveLAN f/w */
3044 hostap_rx(dev, skb, rx_stats);
3045#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
3046 }
3047 ret = AP_RX_EXIT;
3048 goto out;
3049 }
3050
3051 out:
3052 if (sta)
3053 atomic_dec(&sta->users);
3054
3055 return ret;
3056}
3057
3058
3059/* Called only as a tasklet (software IRQ) */
3060int hostap_handle_sta_crypto(local_info_t *local,
3061 struct ieee80211_hdr *hdr,
3062 struct ieee80211_crypt_data **crypt,
3063 void **sta_ptr)
3064{
3065 struct sta_info *sta;
3066
3067 spin_lock(&local->ap->sta_table_lock);
3068 sta = ap_get_sta(local->ap, hdr->addr2);
3069 if (sta)
3070 atomic_inc(&sta->users);
3071 spin_unlock(&local->ap->sta_table_lock);
3072
3073 if (!sta)
3074 return -1;
3075
3076 if (sta->crypt) {
3077 *crypt = sta->crypt;
3078 *sta_ptr = sta;
3079 /* hostap_handle_sta_release() will be called to release STA
3080 * info */
3081 } else
3082 atomic_dec(&sta->users);
3083
3084 return 0;
3085}
3086
3087
3088/* Called only as a tasklet (software IRQ) */
3089int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr)
3090{
3091 struct sta_info *sta;
3092 int ret = 0;
3093
3094 spin_lock(&ap->sta_table_lock);
3095 sta = ap_get_sta(ap, sta_addr);
3096 if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap)
3097 ret = 1;
3098 spin_unlock(&ap->sta_table_lock);
3099
3100 return ret;
3101}
3102
3103
3104/* Called only as a tasklet (software IRQ) */
3105int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr)
3106{
3107 struct sta_info *sta;
3108 int ret = 0;
3109
3110 spin_lock(&ap->sta_table_lock);
3111 sta = ap_get_sta(ap, sta_addr);
3112 if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap &&
3113 ((sta->flags & WLAN_STA_AUTHORIZED) ||
3114 ap->local->ieee_802_1x == 0))
3115 ret = 1;
3116 spin_unlock(&ap->sta_table_lock);
3117
3118 return ret;
3119}
3120
3121
3122/* Called only as a tasklet (software IRQ) */
3123int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
3124{
3125 struct sta_info *sta;
3126 int ret = 1;
3127
3128 if (!ap)
3129 return -1;
3130
3131 spin_lock(&ap->sta_table_lock);
3132 sta = ap_get_sta(ap, sta_addr);
3133 if (sta)
3134 ret = 0;
3135 spin_unlock(&ap->sta_table_lock);
3136
3137 if (ret == 1) {
3138 sta = ap_add_sta(ap, sta_addr);
3139 if (!sta)
3140 ret = -1;
3141 sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
3142 sta->ap = 1;
3143 memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
3144 /* No way of knowing which rates are supported since we did not
3145 * get supported rates element from beacon/assoc req. Assume
3146 * that remote end supports all 802.11b rates. */
3147 sta->supported_rates[0] = 0x82;
3148 sta->supported_rates[1] = 0x84;
3149 sta->supported_rates[2] = 0x0b;
3150 sta->supported_rates[3] = 0x16;
3151 sta->tx_supp_rates = WLAN_RATE_1M | WLAN_RATE_2M |
3152 WLAN_RATE_5M5 | WLAN_RATE_11M;
3153 sta->tx_rate = 110;
3154 sta->tx_max_rate = sta->tx_rate_idx = 3;
3155 }
3156
3157 return ret;
3158}
3159
3160
3161/* Called only as a tasklet (software IRQ) */
3162int hostap_update_rx_stats(struct ap_data *ap,
3163 struct ieee80211_hdr *hdr,
3164 struct hostap_80211_rx_status *rx_stats)
3165{
3166 struct sta_info *sta;
3167
3168 if (!ap)
3169 return -1;
3170
3171 spin_lock(&ap->sta_table_lock);
3172 sta = ap_get_sta(ap, hdr->addr2);
3173 if (sta) {
3174 sta->last_rx_silence = rx_stats->noise;
3175 sta->last_rx_signal = rx_stats->signal;
3176 sta->last_rx_rate = rx_stats->rate;
3177 sta->last_rx_updated = 7;
3178 if (rx_stats->rate == 10)
3179 sta->rx_count[0]++;
3180 else if (rx_stats->rate == 20)
3181 sta->rx_count[1]++;
3182 else if (rx_stats->rate == 55)
3183 sta->rx_count[2]++;
3184 else if (rx_stats->rate == 110)
3185 sta->rx_count[3]++;
3186 }
3187 spin_unlock(&ap->sta_table_lock);
3188
3189 return sta ? 0 : -1;
3190}
3191
3192
3193void hostap_update_rates(local_info_t *local)
3194{
3195 struct list_head *ptr;
3196 struct ap_data *ap = local->ap;
3197
3198 if (!ap)
3199 return;
3200
3201 spin_lock_bh(&ap->sta_table_lock);
3202 for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
3203 struct sta_info *sta = (struct sta_info *) ptr;
3204 prism2_check_tx_rates(sta);
3205 }
3206 spin_unlock_bh(&ap->sta_table_lock);
3207}
3208
3209
3210static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3211 struct ieee80211_crypt_data ***crypt)
3212{
3213 struct sta_info *sta;
3214
3215 spin_lock_bh(&ap->sta_table_lock);
3216 sta = ap_get_sta(ap, addr);
3217 if (sta)
3218 atomic_inc(&sta->users);
3219 spin_unlock_bh(&ap->sta_table_lock);
3220
3221 if (!sta && permanent)
3222 sta = ap_add_sta(ap, addr);
3223
3224 if (!sta)
3225 return NULL;
3226
3227 if (permanent)
3228 sta->flags |= WLAN_STA_PERM;
3229
3230 *crypt = &sta->crypt;
3231
3232 return sta;
3233}
3234
3235
3236void hostap_add_wds_links(local_info_t *local)
3237{
3238 struct ap_data *ap = local->ap;
3239 struct list_head *ptr;
3240
3241 spin_lock_bh(&ap->sta_table_lock);
3242 list_for_each(ptr, &ap->sta_list) {
3243 struct sta_info *sta = list_entry(ptr, struct sta_info, list);
3244 if (sta->ap)
3245 hostap_wds_link_oper(local, sta->addr, WDS_ADD);
3246 }
3247 spin_unlock_bh(&ap->sta_table_lock);
3248
3249 schedule_work(&local->ap->wds_oper_queue);
3250}
3251
3252
3253void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type)
3254{
3255 struct wds_oper_data *entry;
3256
3257 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
3258 if (!entry)
3259 return;
3260 memcpy(entry->addr, addr, ETH_ALEN);
3261 entry->type = type;
3262 spin_lock_bh(&local->lock);
3263 entry->next = local->ap->wds_oper_entries;
3264 local->ap->wds_oper_entries = entry;
3265 spin_unlock_bh(&local->lock);
3266
3267 schedule_work(&local->ap->wds_oper_queue);
3268}
3269
3270
3271EXPORT_SYMBOL(hostap_init_data);
3272EXPORT_SYMBOL(hostap_init_ap_proc);
3273EXPORT_SYMBOL(hostap_free_data);
3274EXPORT_SYMBOL(hostap_check_sta_fw_version);
3275EXPORT_SYMBOL(hostap_handle_sta_tx);
3276EXPORT_SYMBOL(hostap_handle_sta_release);
3277EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
3278EXPORT_SYMBOL(hostap_update_sta_ps);
3279EXPORT_SYMBOL(hostap_handle_sta_rx);
3280EXPORT_SYMBOL(hostap_is_sta_assoc);
3281EXPORT_SYMBOL(hostap_is_sta_authorized);
3282EXPORT_SYMBOL(hostap_add_sta);
3283EXPORT_SYMBOL(hostap_update_rates);
3284EXPORT_SYMBOL(hostap_add_wds_links);
3285EXPORT_SYMBOL(hostap_wds_link_oper);
3286#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
3287EXPORT_SYMBOL(hostap_deauth_all_stas);
3288#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
new file mode 100644
index 000000000000..816a52bcea8f
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -0,0 +1,261 @@
1#ifndef HOSTAP_AP_H
2#define HOSTAP_AP_H
3
4/* AP data structures for STAs */
5
6/* maximum number of frames to buffer per STA */
7#define STA_MAX_TX_BUFFER 32
8
9/* STA flags */
10#define WLAN_STA_AUTH BIT(0)
11#define WLAN_STA_ASSOC BIT(1)
12#define WLAN_STA_PS BIT(2)
13#define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */
14#define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */
15#define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is
16 * controlling whether STA is authorized to
17 * send and receive non-IEEE 802.1X frames
18 */
19#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
20
21#define WLAN_RATE_1M BIT(0)
22#define WLAN_RATE_2M BIT(1)
23#define WLAN_RATE_5M5 BIT(2)
24#define WLAN_RATE_11M BIT(3)
25#define WLAN_RATE_COUNT 4
26
27/* Maximum size of Supported Rates info element. IEEE 802.11 has a limit of 8,
28 * but some pre-standard IEEE 802.11g products use longer elements. */
29#define WLAN_SUPP_RATES_MAX 32
30
31/* Try to increase TX rate after # successfully sent consecutive packets */
32#define WLAN_RATE_UPDATE_COUNT 50
33
34/* Decrease TX rate after # consecutive dropped packets */
35#define WLAN_RATE_DECREASE_THRESHOLD 2
36
37struct sta_info {
38 struct list_head list;
39 struct sta_info *hnext; /* next entry in hash table list */
40 atomic_t users; /* number of users (do not remove if > 0) */
41 struct proc_dir_entry *proc;
42
43 u8 addr[6];
44 u16 aid; /* STA's unique AID (1 .. 2007) or 0 if not yet assigned */
45 u32 flags;
46 u16 capability;
47 u16 listen_interval; /* or beacon_int for APs */
48 u8 supported_rates[WLAN_SUPP_RATES_MAX];
49
50 unsigned long last_auth;
51 unsigned long last_assoc;
52 unsigned long last_rx;
53 unsigned long last_tx;
54 unsigned long rx_packets, tx_packets;
55 unsigned long rx_bytes, tx_bytes;
56 struct sk_buff_head tx_buf;
57 /* FIX: timeout buffers with an expiry time somehow derived from
58 * listen_interval */
59
60 s8 last_rx_silence; /* Noise in dBm */
61 s8 last_rx_signal; /* Signal strength in dBm */
62 u8 last_rx_rate; /* TX rate in 0.1 Mbps */
63 u8 last_rx_updated; /* IWSPY's struct iw_quality::updated */
64
65 u8 tx_supp_rates; /* bit field of supported TX rates */
66 u8 tx_rate; /* current TX rate (in 0.1 Mbps) */
67 u8 tx_rate_idx; /* current TX rate (WLAN_RATE_*) */
68 u8 tx_max_rate; /* max TX rate (WLAN_RATE_*) */
69 u32 tx_count[WLAN_RATE_COUNT]; /* number of frames sent (per rate) */
70 u32 rx_count[WLAN_RATE_COUNT]; /* number of frames received (per rate)
71 */
72 u32 tx_since_last_failure;
73 u32 tx_consecutive_exc;
74
75 struct ieee80211_crypt_data *crypt;
76
77 int ap; /* whether this station is an AP */
78
79 local_info_t *local;
80
81#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
82 union {
83 struct {
84 char *challenge; /* shared key authentication
85 * challenge */
86 } sta;
87 struct {
88 int ssid_len;
89 unsigned char ssid[MAX_SSID_LEN + 1]; /* AP's ssid */
90 int channel;
91 unsigned long last_beacon; /* last RX beacon time */
92 } ap;
93 } u;
94
95 struct timer_list timer;
96 enum { STA_NULLFUNC = 0, STA_DISASSOC, STA_DEAUTH } timeout_next;
97#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
98};
99
100
101#define MAX_STA_COUNT 1024
102
103/* Maximum number of AIDs to use for STAs; must be 2007 or lower
104 * (8802.11 limitation) */
105#define MAX_AID_TABLE_SIZE 128
106
107#define STA_HASH_SIZE 256
108#define STA_HASH(sta) (sta[5])
109
110
111/* Default value for maximum station inactivity. After AP_MAX_INACTIVITY_SEC
112 * has passed since last received frame from the station, a nullfunc data
113 * frame is sent to the station. If this frame is not acknowledged and no other
114 * frames have been received, the station will be disassociated after
115 * AP_DISASSOC_DELAY. Similarily, a the station will be deauthenticated after
116 * AP_DEAUTH_DELAY. AP_TIMEOUT_RESOLUTION is the resolution that is used with
117 * max inactivity timer. */
118#define AP_MAX_INACTIVITY_SEC (5 * 60)
119#define AP_DISASSOC_DELAY (HZ)
120#define AP_DEAUTH_DELAY (HZ)
121
122/* ap_policy: whether to accept frames to/from other APs/IBSS */
123typedef enum {
124 AP_OTHER_AP_SKIP_ALL = 0,
125 AP_OTHER_AP_SAME_SSID = 1,
126 AP_OTHER_AP_ALL = 2,
127 AP_OTHER_AP_EVEN_IBSS = 3
128} ap_policy_enum;
129
130#define PRISM2_AUTH_OPEN BIT(0)
131#define PRISM2_AUTH_SHARED_KEY BIT(1)
132
133
134/* MAC address-based restrictions */
135struct mac_entry {
136 struct list_head list;
137 u8 addr[6];
138};
139
140struct mac_restrictions {
141 enum { MAC_POLICY_OPEN = 0, MAC_POLICY_ALLOW, MAC_POLICY_DENY } policy;
142 unsigned int entries;
143 struct list_head mac_list;
144 spinlock_t lock;
145};
146
147
148struct add_sta_proc_data {
149 u8 addr[ETH_ALEN];
150 struct add_sta_proc_data *next;
151};
152
153
154typedef enum { WDS_ADD, WDS_DEL } wds_oper_type;
155struct wds_oper_data {
156 wds_oper_type type;
157 u8 addr[ETH_ALEN];
158 struct wds_oper_data *next;
159};
160
161
162struct ap_data {
163 int initialized; /* whether ap_data has been initialized */
164 local_info_t *local;
165 int bridge_packets; /* send packet to associated STAs directly to the
166 * wireless media instead of higher layers in the
167 * kernel */
168 unsigned int bridged_unicast; /* number of unicast frames bridged on
169 * wireless media */
170 unsigned int bridged_multicast; /* number of non-unicast frames
171 * bridged on wireless media */
172 unsigned int tx_drop_nonassoc; /* number of unicast TX packets dropped
173 * because they were to an address that
174 * was not associated */
175 int nullfunc_ack; /* use workaround for nullfunc frame ACKs */
176
177 spinlock_t sta_table_lock;
178 int num_sta; /* number of entries in sta_list */
179 struct list_head sta_list; /* STA info list head */
180 struct sta_info *sta_hash[STA_HASH_SIZE];
181
182 struct proc_dir_entry *proc;
183
184 ap_policy_enum ap_policy;
185 unsigned int max_inactivity;
186 int autom_ap_wds;
187
188 struct mac_restrictions mac_restrictions; /* MAC-based auth */
189 int last_tx_rate;
190
191 struct work_struct add_sta_proc_queue;
192 struct add_sta_proc_data *add_sta_proc_entries;
193
194 struct work_struct wds_oper_queue;
195 struct wds_oper_data *wds_oper_entries;
196
197 u16 tx_callback_idx;
198
199#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
200 /* pointers to STA info; based on allocated AID or NULL if AID free
201 * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
202 * and so on
203 */
204 struct sta_info *sta_aid[MAX_AID_TABLE_SIZE];
205
206 u16 tx_callback_auth, tx_callback_assoc, tx_callback_poll;
207
208 /* WEP operations for generating challenges to be used with shared key
209 * authentication */
210 struct ieee80211_crypto_ops *crypt;
211 void *crypt_priv;
212#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
213};
214
215
216void hostap_rx(struct net_device *dev, struct sk_buff *skb,
217 struct hostap_80211_rx_status *rx_stats);
218void hostap_init_data(local_info_t *local);
219void hostap_init_ap_proc(local_info_t *local);
220void hostap_free_data(struct ap_data *ap);
221void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver);
222
223typedef enum {
224 AP_TX_CONTINUE, AP_TX_DROP, AP_TX_RETRY, AP_TX_BUFFERED,
225 AP_TX_CONTINUE_NOT_AUTHORIZED
226} ap_tx_ret;
227struct hostap_tx_data {
228 struct sk_buff *skb;
229 int host_encrypt;
230 struct ieee80211_crypt_data *crypt;
231 void *sta_ptr;
232};
233ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
234void hostap_handle_sta_release(void *ptr);
235void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
236int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr);
237typedef enum {
238 AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
239} ap_rx_ret;
240ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
241 struct sk_buff *skb,
242 struct hostap_80211_rx_status *rx_stats,
243 int wds);
244int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr,
245 struct ieee80211_crypt_data **crypt,
246 void **sta_ptr);
247int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
248int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
249int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
250int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr,
251 struct hostap_80211_rx_status *rx_stats);
252void hostap_update_rates(local_info_t *local);
253void hostap_add_wds_links(local_info_t *local);
254void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type);
255
256#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
257void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
258 int resend);
259#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
260
261#endif /* HOSTAP_AP_H */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
new file mode 100644
index 000000000000..6f4fa9dc308f
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -0,0 +1,435 @@
1#ifndef HOSTAP_COMMON_H
2#define HOSTAP_COMMON_H
3
4#define BIT(x) (1 << (x))
5
6#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
7#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
8
9
10/* IEEE 802.11 defines */
11
12/* Information Element IDs */
13#define WLAN_EID_SSID 0
14#define WLAN_EID_SUPP_RATES 1
15#define WLAN_EID_FH_PARAMS 2
16#define WLAN_EID_DS_PARAMS 3
17#define WLAN_EID_CF_PARAMS 4
18#define WLAN_EID_TIM 5
19#define WLAN_EID_IBSS_PARAMS 6
20#define WLAN_EID_CHALLENGE 16
21#define WLAN_EID_RSN 48
22#define WLAN_EID_GENERIC 221
23
24
25/* HFA384X Configuration RIDs */
26#define HFA384X_RID_CNFPORTTYPE 0xFC00
27#define HFA384X_RID_CNFOWNMACADDR 0xFC01
28#define HFA384X_RID_CNFDESIREDSSID 0xFC02
29#define HFA384X_RID_CNFOWNCHANNEL 0xFC03
30#define HFA384X_RID_CNFOWNSSID 0xFC04
31#define HFA384X_RID_CNFOWNATIMWINDOW 0xFC05
32#define HFA384X_RID_CNFSYSTEMSCALE 0xFC06
33#define HFA384X_RID_CNFMAXDATALEN 0xFC07
34#define HFA384X_RID_CNFWDSADDRESS 0xFC08
35#define HFA384X_RID_CNFPMENABLED 0xFC09
36#define HFA384X_RID_CNFPMEPS 0xFC0A
37#define HFA384X_RID_CNFMULTICASTRECEIVE 0xFC0B
38#define HFA384X_RID_CNFMAXSLEEPDURATION 0xFC0C
39#define HFA384X_RID_CNFPMHOLDOVERDURATION 0xFC0D
40#define HFA384X_RID_CNFOWNNAME 0xFC0E
41#define HFA384X_RID_CNFOWNDTIMPERIOD 0xFC10
42#define HFA384X_RID_CNFWDSADDRESS1 0xFC11 /* AP f/w only */
43#define HFA384X_RID_CNFWDSADDRESS2 0xFC12 /* AP f/w only */
44#define HFA384X_RID_CNFWDSADDRESS3 0xFC13 /* AP f/w only */
45#define HFA384X_RID_CNFWDSADDRESS4 0xFC14 /* AP f/w only */
46#define HFA384X_RID_CNFWDSADDRESS5 0xFC15 /* AP f/w only */
47#define HFA384X_RID_CNFWDSADDRESS6 0xFC16 /* AP f/w only */
48#define HFA384X_RID_CNFMULTICASTPMBUFFERING 0xFC17 /* AP f/w only */
49#define HFA384X_RID_UNKNOWN1 0xFC20
50#define HFA384X_RID_UNKNOWN2 0xFC21
51#define HFA384X_RID_CNFWEPDEFAULTKEYID 0xFC23
52#define HFA384X_RID_CNFDEFAULTKEY0 0xFC24
53#define HFA384X_RID_CNFDEFAULTKEY1 0xFC25
54#define HFA384X_RID_CNFDEFAULTKEY2 0xFC26
55#define HFA384X_RID_CNFDEFAULTKEY3 0xFC27
56#define HFA384X_RID_CNFWEPFLAGS 0xFC28
57#define HFA384X_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
58#define HFA384X_RID_CNFAUTHENTICATION 0xFC2A
59#define HFA384X_RID_CNFMAXASSOCSTA 0xFC2B /* AP f/w only */
60#define HFA384X_RID_CNFTXCONTROL 0xFC2C
61#define HFA384X_RID_CNFROAMINGMODE 0xFC2D
62#define HFA384X_RID_CNFHOSTAUTHENTICATION 0xFC2E /* AP f/w only */
63#define HFA384X_RID_CNFRCVCRCERROR 0xFC30
64#define HFA384X_RID_CNFMMLIFE 0xFC31
65#define HFA384X_RID_CNFALTRETRYCOUNT 0xFC32
66#define HFA384X_RID_CNFBEACONINT 0xFC33
67#define HFA384X_RID_CNFAPPCFINFO 0xFC34 /* AP f/w only */
68#define HFA384X_RID_CNFSTAPCFINFO 0xFC35
69#define HFA384X_RID_CNFPRIORITYQUSAGE 0xFC37
70#define HFA384X_RID_CNFTIMCTRL 0xFC40
71#define HFA384X_RID_UNKNOWN3 0xFC41 /* added in STA f/w 0.7.x */
72#define HFA384X_RID_CNFTHIRTY2TALLY 0xFC42 /* added in STA f/w 0.8.0 */
73#define HFA384X_RID_CNFENHSECURITY 0xFC43 /* AP f/w or STA f/w >= 1.6.3 */
74#define HFA384X_RID_CNFDBMADJUST 0xFC46 /* added in STA f/w 1.3.1 */
75#define HFA384X_RID_GENERICELEMENT 0xFC48 /* added in STA f/w 1.7.0;
76 * write only */
77#define HFA384X_RID_PROPAGATIONDELAY 0xFC49 /* added in STA f/w 1.7.6 */
78#define HFA384X_RID_GROUPADDRESSES 0xFC80
79#define HFA384X_RID_CREATEIBSS 0xFC81
80#define HFA384X_RID_FRAGMENTATIONTHRESHOLD 0xFC82
81#define HFA384X_RID_RTSTHRESHOLD 0xFC83
82#define HFA384X_RID_TXRATECONTROL 0xFC84
83#define HFA384X_RID_PROMISCUOUSMODE 0xFC85
84#define HFA384X_RID_FRAGMENTATIONTHRESHOLD0 0xFC90 /* AP f/w only */
85#define HFA384X_RID_FRAGMENTATIONTHRESHOLD1 0xFC91 /* AP f/w only */
86#define HFA384X_RID_FRAGMENTATIONTHRESHOLD2 0xFC92 /* AP f/w only */
87#define HFA384X_RID_FRAGMENTATIONTHRESHOLD3 0xFC93 /* AP f/w only */
88#define HFA384X_RID_FRAGMENTATIONTHRESHOLD4 0xFC94 /* AP f/w only */
89#define HFA384X_RID_FRAGMENTATIONTHRESHOLD5 0xFC95 /* AP f/w only */
90#define HFA384X_RID_FRAGMENTATIONTHRESHOLD6 0xFC96 /* AP f/w only */
91#define HFA384X_RID_RTSTHRESHOLD0 0xFC97 /* AP f/w only */
92#define HFA384X_RID_RTSTHRESHOLD1 0xFC98 /* AP f/w only */
93#define HFA384X_RID_RTSTHRESHOLD2 0xFC99 /* AP f/w only */
94#define HFA384X_RID_RTSTHRESHOLD3 0xFC9A /* AP f/w only */
95#define HFA384X_RID_RTSTHRESHOLD4 0xFC9B /* AP f/w only */
96#define HFA384X_RID_RTSTHRESHOLD5 0xFC9C /* AP f/w only */
97#define HFA384X_RID_RTSTHRESHOLD6 0xFC9D /* AP f/w only */
98#define HFA384X_RID_TXRATECONTROL0 0xFC9E /* AP f/w only */
99#define HFA384X_RID_TXRATECONTROL1 0xFC9F /* AP f/w only */
100#define HFA384X_RID_TXRATECONTROL2 0xFCA0 /* AP f/w only */
101#define HFA384X_RID_TXRATECONTROL3 0xFCA1 /* AP f/w only */
102#define HFA384X_RID_TXRATECONTROL4 0xFCA2 /* AP f/w only */
103#define HFA384X_RID_TXRATECONTROL5 0xFCA3 /* AP f/w only */
104#define HFA384X_RID_TXRATECONTROL6 0xFCA4 /* AP f/w only */
105#define HFA384X_RID_CNFSHORTPREAMBLE 0xFCB0
106#define HFA384X_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
107#define HFA384X_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
108#define HFA384X_RID_CNFBASICRATES 0xFCB3
109#define HFA384X_RID_CNFSUPPORTEDRATES 0xFCB4
110#define HFA384X_RID_CNFFALLBACKCTRL 0xFCB5 /* added in STA f/w 1.3.1 */
111#define HFA384X_RID_WEPKEYDISABLE 0xFCB6 /* added in STA f/w 1.3.1 */
112#define HFA384X_RID_WEPKEYMAPINDEX 0xFCB7 /* ? */
113#define HFA384X_RID_BROADCASTKEYID 0xFCB8 /* ? */
114#define HFA384X_RID_ENTSECFLAGEYID 0xFCB9 /* ? */
115#define HFA384X_RID_CNFPASSIVESCANCTRL 0xFCBA /* added in STA f/w 1.5.0 */
116#define HFA384X_RID_SSNHANDLINGMODE 0xFCBB /* added in STA f/w 1.7.0 */
117#define HFA384X_RID_MDCCONTROL 0xFCBC /* added in STA f/w 1.7.0 */
118#define HFA384X_RID_MDCCOUNTRY 0xFCBD /* added in STA f/w 1.7.0 */
119#define HFA384X_RID_TXPOWERMAX 0xFCBE /* added in STA f/w 1.7.0 */
120#define HFA384X_RID_CNFLFOENABLED 0xFCBF /* added in STA f/w 1.6.3 */
121#define HFA384X_RID_CAPINFO 0xFCC0 /* added in STA f/w 1.7.0 */
122#define HFA384X_RID_LISTENINTERVAL 0xFCC1 /* added in STA f/w 1.7.0 */
123#define HFA384X_RID_SW_ANT_DIV 0xFCC2 /* added in STA f/w 1.7.0; Prism3 */
124#define HFA384X_RID_LED_CTRL 0xFCC4 /* added in STA f/w 1.7.6 */
125#define HFA384X_RID_HFODELAY 0xFCC5 /* added in STA f/w 1.7.6 */
126#define HFA384X_RID_DISALLOWEDBSSID 0xFCC6 /* added in STA f/w 1.8.0 */
127#define HFA384X_RID_TICKTIME 0xFCE0
128#define HFA384X_RID_SCANREQUEST 0xFCE1
129#define HFA384X_RID_JOINREQUEST 0xFCE2
130#define HFA384X_RID_AUTHENTICATESTATION 0xFCE3 /* AP f/w only */
131#define HFA384X_RID_CHANNELINFOREQUEST 0xFCE4 /* AP f/w only */
132#define HFA384X_RID_HOSTSCAN 0xFCE5 /* added in STA f/w 1.3.1 */
133
134/* HFA384X Information RIDs */
135#define HFA384X_RID_MAXLOADTIME 0xFD00
136#define HFA384X_RID_DOWNLOADBUFFER 0xFD01
137#define HFA384X_RID_PRIID 0xFD02
138#define HFA384X_RID_PRISUPRANGE 0xFD03
139#define HFA384X_RID_CFIACTRANGES 0xFD04
140#define HFA384X_RID_NICSERNUM 0xFD0A
141#define HFA384X_RID_NICID 0xFD0B
142#define HFA384X_RID_MFISUPRANGE 0xFD0C
143#define HFA384X_RID_CFISUPRANGE 0xFD0D
144#define HFA384X_RID_CHANNELLIST 0xFD10
145#define HFA384X_RID_REGULATORYDOMAINS 0xFD11
146#define HFA384X_RID_TEMPTYPE 0xFD12
147#define HFA384X_RID_CIS 0xFD13
148#define HFA384X_RID_STAID 0xFD20
149#define HFA384X_RID_STASUPRANGE 0xFD21
150#define HFA384X_RID_MFIACTRANGES 0xFD22
151#define HFA384X_RID_CFIACTRANGES2 0xFD23
152#define HFA384X_RID_PRODUCTNAME 0xFD24 /* added in STA f/w 1.3.1;
153 * only Prism2.5(?) */
154#define HFA384X_RID_PORTSTATUS 0xFD40
155#define HFA384X_RID_CURRENTSSID 0xFD41
156#define HFA384X_RID_CURRENTBSSID 0xFD42
157#define HFA384X_RID_COMMSQUALITY 0xFD43
158#define HFA384X_RID_CURRENTTXRATE 0xFD44
159#define HFA384X_RID_CURRENTBEACONINTERVAL 0xFD45
160#define HFA384X_RID_CURRENTSCALETHRESHOLDS 0xFD46
161#define HFA384X_RID_PROTOCOLRSPTIME 0xFD47
162#define HFA384X_RID_SHORTRETRYLIMIT 0xFD48
163#define HFA384X_RID_LONGRETRYLIMIT 0xFD49
164#define HFA384X_RID_MAXTRANSMITLIFETIME 0xFD4A
165#define HFA384X_RID_MAXRECEIVELIFETIME 0xFD4B
166#define HFA384X_RID_CFPOLLABLE 0xFD4C
167#define HFA384X_RID_AUTHENTICATIONALGORITHMS 0xFD4D
168#define HFA384X_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
169#define HFA384X_RID_DBMCOMMSQUALITY 0xFD51 /* added in STA f/w 1.3.1 */
170#define HFA384X_RID_CURRENTTXRATE1 0xFD80 /* AP f/w only */
171#define HFA384X_RID_CURRENTTXRATE2 0xFD81 /* AP f/w only */
172#define HFA384X_RID_CURRENTTXRATE3 0xFD82 /* AP f/w only */
173#define HFA384X_RID_CURRENTTXRATE4 0xFD83 /* AP f/w only */
174#define HFA384X_RID_CURRENTTXRATE5 0xFD84 /* AP f/w only */
175#define HFA384X_RID_CURRENTTXRATE6 0xFD85 /* AP f/w only */
176#define HFA384X_RID_OWNMACADDR 0xFD86 /* AP f/w only */
177#define HFA384X_RID_SCANRESULTSTABLE 0xFD88 /* added in STA f/w 0.8.3 */
178#define HFA384X_RID_HOSTSCANRESULTS 0xFD89 /* added in STA f/w 1.3.1 */
179#define HFA384X_RID_AUTHENTICATIONUSED 0xFD8A /* added in STA f/w 1.3.4 */
180#define HFA384X_RID_CNFFAASWITCHCTRL 0xFD8B /* added in STA f/w 1.6.3 */
181#define HFA384X_RID_ASSOCIATIONFAILURE 0xFD8D /* added in STA f/w 1.8.0 */
182#define HFA384X_RID_PHYTYPE 0xFDC0
183#define HFA384X_RID_CURRENTCHANNEL 0xFDC1
184#define HFA384X_RID_CURRENTPOWERSTATE 0xFDC2
185#define HFA384X_RID_CCAMODE 0xFDC3
186#define HFA384X_RID_SUPPORTEDDATARATES 0xFDC6
187#define HFA384X_RID_LFO_VOLT_REG_TEST_RES 0xFDC7 /* added in STA f/w 1.7.1 */
188#define HFA384X_RID_BUILDSEQ 0xFFFE
189#define HFA384X_RID_FWID 0xFFFF
190
191
192struct hfa384x_comp_ident
193{
194 u16 id;
195 u16 variant;
196 u16 major;
197 u16 minor;
198} __attribute__ ((packed));
199
200#define HFA384X_COMP_ID_PRI 0x15
201#define HFA384X_COMP_ID_STA 0x1f
202#define HFA384X_COMP_ID_FW_AP 0x14b
203
204struct hfa384x_sup_range
205{
206 u16 role;
207 u16 id;
208 u16 variant;
209 u16 bottom;
210 u16 top;
211} __attribute__ ((packed));
212
213
214struct hfa384x_build_id
215{
216 u16 pri_seq;
217 u16 sec_seq;
218} __attribute__ ((packed));
219
220/* FD01 - Download Buffer */
221struct hfa384x_rid_download_buffer
222{
223 u16 page;
224 u16 offset;
225 u16 length;
226} __attribute__ ((packed));
227
228/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
229struct hfa384x_comms_quality {
230 u16 comm_qual; /* 0 .. 92 */
231 u16 signal_level; /* 27 .. 154 */
232 u16 noise_level; /* 27 .. 154 */
233} __attribute__ ((packed));
234
235
236/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
237
238/* New wireless extensions API - SET/GET convention (even ioctl numbers are
239 * root only)
240 */
241#define PRISM2_IOCTL_PRISM2_PARAM (SIOCIWFIRSTPRIV + 0)
242#define PRISM2_IOCTL_GET_PRISM2_PARAM (SIOCIWFIRSTPRIV + 1)
243#define PRISM2_IOCTL_WRITEMIF (SIOCIWFIRSTPRIV + 2)
244#define PRISM2_IOCTL_READMIF (SIOCIWFIRSTPRIV + 3)
245#define PRISM2_IOCTL_MONITOR (SIOCIWFIRSTPRIV + 4)
246#define PRISM2_IOCTL_RESET (SIOCIWFIRSTPRIV + 6)
247#define PRISM2_IOCTL_INQUIRE (SIOCIWFIRSTPRIV + 8)
248#define PRISM2_IOCTL_WDS_ADD (SIOCIWFIRSTPRIV + 10)
249#define PRISM2_IOCTL_WDS_DEL (SIOCIWFIRSTPRIV + 12)
250#define PRISM2_IOCTL_SET_RID_WORD (SIOCIWFIRSTPRIV + 14)
251#define PRISM2_IOCTL_MACCMD (SIOCIWFIRSTPRIV + 16)
252#define PRISM2_IOCTL_ADDMAC (SIOCIWFIRSTPRIV + 18)
253#define PRISM2_IOCTL_DELMAC (SIOCIWFIRSTPRIV + 20)
254#define PRISM2_IOCTL_KICKMAC (SIOCIWFIRSTPRIV + 22)
255
256/* following are not in SIOCGIWPRIV list; check permission in the driver code
257 */
258#define PRISM2_IOCTL_DOWNLOAD (SIOCDEVPRIVATE + 13)
259#define PRISM2_IOCTL_HOSTAPD (SIOCDEVPRIVATE + 14)
260
261
262/* PRISM2_IOCTL_PRISM2_PARAM ioctl() subtypes: */
263enum {
264 /* PRISM2_PARAM_PTYPE = 1, */ /* REMOVED 2003-10-22 */
265 PRISM2_PARAM_TXRATECTRL = 2,
266 PRISM2_PARAM_BEACON_INT = 3,
267 PRISM2_PARAM_PSEUDO_IBSS = 4,
268 PRISM2_PARAM_ALC = 5,
269 /* PRISM2_PARAM_TXPOWER = 6, */ /* REMOVED 2003-10-22 */
270 PRISM2_PARAM_DUMP = 7,
271 PRISM2_PARAM_OTHER_AP_POLICY = 8,
272 PRISM2_PARAM_AP_MAX_INACTIVITY = 9,
273 PRISM2_PARAM_AP_BRIDGE_PACKETS = 10,
274 PRISM2_PARAM_DTIM_PERIOD = 11,
275 PRISM2_PARAM_AP_NULLFUNC_ACK = 12,
276 PRISM2_PARAM_MAX_WDS = 13,
277 PRISM2_PARAM_AP_AUTOM_AP_WDS = 14,
278 PRISM2_PARAM_AP_AUTH_ALGS = 15,
279 PRISM2_PARAM_MONITOR_ALLOW_FCSERR = 16,
280 PRISM2_PARAM_HOST_ENCRYPT = 17,
281 PRISM2_PARAM_HOST_DECRYPT = 18,
282 /* PRISM2_PARAM_BUS_MASTER_THRESHOLD_RX = 19, REMOVED 2005-08-14 */
283 /* PRISM2_PARAM_BUS_MASTER_THRESHOLD_TX = 20, REMOVED 2005-08-14 */
284 PRISM2_PARAM_HOST_ROAMING = 21,
285 PRISM2_PARAM_BCRX_STA_KEY = 22,
286 PRISM2_PARAM_IEEE_802_1X = 23,
287 PRISM2_PARAM_ANTSEL_TX = 24,
288 PRISM2_PARAM_ANTSEL_RX = 25,
289 PRISM2_PARAM_MONITOR_TYPE = 26,
290 PRISM2_PARAM_WDS_TYPE = 27,
291 PRISM2_PARAM_HOSTSCAN = 28,
292 PRISM2_PARAM_AP_SCAN = 29,
293 PRISM2_PARAM_ENH_SEC = 30,
294 PRISM2_PARAM_IO_DEBUG = 31,
295 PRISM2_PARAM_BASIC_RATES = 32,
296 PRISM2_PARAM_OPER_RATES = 33,
297 PRISM2_PARAM_HOSTAPD = 34,
298 PRISM2_PARAM_HOSTAPD_STA = 35,
299 PRISM2_PARAM_WPA = 36,
300 PRISM2_PARAM_PRIVACY_INVOKED = 37,
301 PRISM2_PARAM_TKIP_COUNTERMEASURES = 38,
302 PRISM2_PARAM_DROP_UNENCRYPTED = 39,
303 PRISM2_PARAM_SCAN_CHANNEL_MASK = 40,
304};
305
306enum { HOSTAP_ANTSEL_DO_NOT_TOUCH = 0, HOSTAP_ANTSEL_DIVERSITY = 1,
307 HOSTAP_ANTSEL_LOW = 2, HOSTAP_ANTSEL_HIGH = 3 };
308
309
310/* PRISM2_IOCTL_MACCMD ioctl() subcommands: */
311enum { AP_MAC_CMD_POLICY_OPEN = 0, AP_MAC_CMD_POLICY_ALLOW = 1,
312 AP_MAC_CMD_POLICY_DENY = 2, AP_MAC_CMD_FLUSH = 3,
313 AP_MAC_CMD_KICKALL = 4 };
314
315
316/* PRISM2_IOCTL_DOWNLOAD ioctl() dl_cmd: */
317enum {
318 PRISM2_DOWNLOAD_VOLATILE = 1 /* RAM */,
319 /* Note! Old versions of prism2_srec have a fatal error in CRC-16
320 * calculation, which will corrupt all non-volatile downloads.
321 * PRISM2_DOWNLOAD_NON_VOLATILE used to be 2, but it is now 3 to
322 * prevent use of old versions of prism2_srec for non-volatile
323 * download. */
324 PRISM2_DOWNLOAD_NON_VOLATILE = 3 /* FLASH */,
325 PRISM2_DOWNLOAD_VOLATILE_GENESIS = 4 /* RAM in Genesis mode */,
326 /* Persistent versions of volatile download commands (keep firmware
327 * data in memory and automatically re-download after hw_reset */
328 PRISM2_DOWNLOAD_VOLATILE_PERSISTENT = 5,
329 PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT = 6,
330};
331
332struct prism2_download_param {
333 u32 dl_cmd;
334 u32 start_addr;
335 u32 num_areas;
336 struct prism2_download_area {
337 u32 addr; /* wlan card address */
338 u32 len;
339 void __user *ptr; /* pointer to data in user space */
340 } data[0];
341};
342
343#define PRISM2_MAX_DOWNLOAD_AREA_LEN 131072
344#define PRISM2_MAX_DOWNLOAD_LEN 262144
345
346
347/* PRISM2_IOCTL_HOSTAPD ioctl() cmd: */
348enum {
349 PRISM2_HOSTAPD_FLUSH = 1,
350 PRISM2_HOSTAPD_ADD_STA = 2,
351 PRISM2_HOSTAPD_REMOVE_STA = 3,
352 PRISM2_HOSTAPD_GET_INFO_STA = 4,
353 /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
354 PRISM2_SET_ENCRYPTION = 6,
355 PRISM2_GET_ENCRYPTION = 7,
356 PRISM2_HOSTAPD_SET_FLAGS_STA = 8,
357 PRISM2_HOSTAPD_GET_RID = 9,
358 PRISM2_HOSTAPD_SET_RID = 10,
359 PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
360 PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
361 PRISM2_HOSTAPD_MLME = 13,
362 PRISM2_HOSTAPD_SCAN_REQ = 14,
363 PRISM2_HOSTAPD_STA_CLEAR_STATS = 15,
364};
365
366#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
367#define PRISM2_HOSTAPD_RID_HDR_LEN \
368((int) (&((struct prism2_hostapd_param *) 0)->u.rid.data))
369#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
370((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
371
372/* Maximum length for algorithm names (-1 for nul termination) used in ioctl()
373 */
374#define HOSTAP_CRYPT_ALG_NAME_LEN 16
375
376
377struct prism2_hostapd_param {
378 u32 cmd;
379 u8 sta_addr[ETH_ALEN];
380 union {
381 struct {
382 u16 aid;
383 u16 capability;
384 u8 tx_supp_rates;
385 } add_sta;
386 struct {
387 u32 inactive_sec;
388 } get_info_sta;
389 struct {
390 u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
391 u32 flags;
392 u32 err;
393 u8 idx;
394 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
395 u16 key_len;
396 u8 key[0];
397 } crypt;
398 struct {
399 u32 flags_and;
400 u32 flags_or;
401 } set_flags_sta;
402 struct {
403 u16 rid;
404 u16 len;
405 u8 data[0];
406 } rid;
407 struct {
408 u8 len;
409 u8 data[0];
410 } generic_elem;
411 struct {
412#define MLME_STA_DEAUTH 0
413#define MLME_STA_DISASSOC 1
414 u16 cmd;
415 u16 reason_code;
416 } mlme;
417 struct {
418 u8 ssid_len;
419 u8 ssid[32];
420 } scan_req;
421 } u;
422};
423
424#define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT(0)
425#define HOSTAP_CRYPT_FLAG_PERMANENT BIT(1)
426
427#define HOSTAP_CRYPT_ERR_UNKNOWN_ALG 2
428#define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3
429#define HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED 4
430#define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5
431#define HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED 6
432#define HOSTAP_CRYPT_ERR_CARD_CONF_FAILED 7
433
434
435#endif /* HOSTAP_COMMON_H */
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
new file mode 100644
index 000000000000..7ed3425d08c1
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -0,0 +1,55 @@
1#ifndef HOSTAP_CONFIG_H
2#define HOSTAP_CONFIG_H
3
4#define PRISM2_VERSION "0.4.4-kernel"
5
6/* In the previous versions of Host AP driver, support for user space version
7 * of IEEE 802.11 management (hostapd) used to be disabled in the default
8 * configuration. From now on, support for hostapd is always included and it is
9 * possible to disable kernel driver version of IEEE 802.11 management with a
10 * separate define, PRISM2_NO_KERNEL_IEEE80211_MGMT. */
11/* #define PRISM2_NO_KERNEL_IEEE80211_MGMT */
12
13/* Maximum number of events handler per one interrupt */
14#define PRISM2_MAX_INTERRUPT_EVENTS 20
15
16/* Include code for downloading firmware images into volatile RAM. */
17#define PRISM2_DOWNLOAD_SUPPORT
18
19/* Allow kernel configuration to enable download support. */
20#if !defined(PRISM2_DOWNLOAD_SUPPORT) && defined(CONFIG_HOSTAP_FIRMWARE)
21#define PRISM2_DOWNLOAD_SUPPORT
22#endif
23
24#ifdef PRISM2_DOWNLOAD_SUPPORT
25/* Allow writing firmware images into flash, i.e., to non-volatile storage.
26 * Before you enable this option, you should make absolutely sure that you are
27 * using prism2_srec utility that comes with THIS version of the driver!
28 * In addition, please note that it is possible to kill your card with
29 * non-volatile download if you are using incorrect image. This feature has not
30 * been fully tested, so please be careful with it. */
31/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
32#endif /* PRISM2_DOWNLOAD_SUPPORT */
33
34/* Save low-level I/O for debugging. This should not be enabled in normal use.
35 */
36/* #define PRISM2_IO_DEBUG */
37
38/* Following defines can be used to remove unneeded parts of the driver, e.g.,
39 * to limit the size of the kernel module. Definitions can be added here in
40 * hostap_config.h or they can be added to make command with EXTRA_CFLAGS,
41 * e.g.,
42 * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
43 */
44
45/* Do not include debug messages into the driver */
46/* #define PRISM2_NO_DEBUG */
47
48/* Do not include /proc/net/prism2/wlan#/{registers,debug} */
49/* #define PRISM2_NO_PROCFS_DEBUG */
50
51/* Do not include station functionality (i.e., allow only Master (Host AP) mode
52 */
53/* #define PRISM2_NO_STATION_MODES */
54
55#endif /* HOSTAP_CONFIG_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
new file mode 100644
index 000000000000..faa83badf0a1
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -0,0 +1,1030 @@
1#define PRISM2_PCCARD
2
3#include <linux/config.h>
4#include <linux/module.h>
5#include <linux/init.h>
6#include <linux/if.h>
7#include <linux/wait.h>
8#include <linux/timer.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
11#include <linux/workqueue.h>
12#include <linux/wireless.h>
13#include <net/iw_handler.h>
14
15#include <pcmcia/cs_types.h>
16#include <pcmcia/cs.h>
17#include <pcmcia/cistpl.h>
18#include <pcmcia/cisreg.h>
19#include <pcmcia/ds.h>
20
21#include <asm/io.h>
22
23#include "hostap_wlan.h"
24
25
26static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
27static dev_info_t dev_info = "hostap_cs";
28static dev_link_t *dev_list = NULL;
29
30MODULE_AUTHOR("Jouni Malinen");
31MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
32 "cards (PC Card).");
33MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
34MODULE_LICENSE("GPL");
35MODULE_VERSION(PRISM2_VERSION);
36
37
38static int ignore_cis_vcc;
39module_param(ignore_cis_vcc, int, 0444);
40MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
41
42
43/* struct local_info::hw_priv */
44struct hostap_cs_priv {
45 dev_node_t node;
46 dev_link_t *link;
47 int sandisk_connectplus;
48};
49
50
51#ifdef PRISM2_IO_DEBUG
52
53static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
54{
55 struct hostap_interface *iface;
56 local_info_t *local;
57 unsigned long flags;
58
59 iface = netdev_priv(dev);
60 local = iface->local;
61 spin_lock_irqsave(&local->lock, flags);
62 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
63 outb(v, dev->base_addr + a);
64 spin_unlock_irqrestore(&local->lock, flags);
65}
66
67static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
68{
69 struct hostap_interface *iface;
70 local_info_t *local;
71 unsigned long flags;
72 u8 v;
73
74 iface = netdev_priv(dev);
75 local = iface->local;
76 spin_lock_irqsave(&local->lock, flags);
77 v = inb(dev->base_addr + a);
78 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
79 spin_unlock_irqrestore(&local->lock, flags);
80 return v;
81}
82
83static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
84{
85 struct hostap_interface *iface;
86 local_info_t *local;
87 unsigned long flags;
88
89 iface = netdev_priv(dev);
90 local = iface->local;
91 spin_lock_irqsave(&local->lock, flags);
92 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
93 outw(v, dev->base_addr + a);
94 spin_unlock_irqrestore(&local->lock, flags);
95}
96
97static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
98{
99 struct hostap_interface *iface;
100 local_info_t *local;
101 unsigned long flags;
102 u16 v;
103
104 iface = netdev_priv(dev);
105 local = iface->local;
106 spin_lock_irqsave(&local->lock, flags);
107 v = inw(dev->base_addr + a);
108 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
109 spin_unlock_irqrestore(&local->lock, flags);
110 return v;
111}
112
113static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
114 u8 *buf, int wc)
115{
116 struct hostap_interface *iface;
117 local_info_t *local;
118 unsigned long flags;
119
120 iface = netdev_priv(dev);
121 local = iface->local;
122 spin_lock_irqsave(&local->lock, flags);
123 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
124 outsw(dev->base_addr + a, buf, wc);
125 spin_unlock_irqrestore(&local->lock, flags);
126}
127
128static inline void hfa384x_insw_debug(struct net_device *dev, int a,
129 u8 *buf, int wc)
130{
131 struct hostap_interface *iface;
132 local_info_t *local;
133 unsigned long flags;
134
135 iface = netdev_priv(dev);
136 local = iface->local;
137 spin_lock_irqsave(&local->lock, flags);
138 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
139 insw(dev->base_addr + a, buf, wc);
140 spin_unlock_irqrestore(&local->lock, flags);
141}
142
143#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
144#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
145#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
146#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
147#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
148#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
149
150#else /* PRISM2_IO_DEBUG */
151
152#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
153#define HFA384X_INB(a) inb(dev->base_addr + (a))
154#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
155#define HFA384X_INW(a) inw(dev->base_addr + (a))
156#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
157#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
158
159#endif /* PRISM2_IO_DEBUG */
160
161
162static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
163 int len)
164{
165 u16 d_off;
166 u16 *pos;
167
168 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
169 pos = (u16 *) buf;
170
171 if (len / 2)
172 HFA384X_INSW(d_off, buf, len / 2);
173 pos += len / 2;
174
175 if (len & 1)
176 *((char *) pos) = HFA384X_INB(d_off);
177
178 return 0;
179}
180
181
182static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
183{
184 u16 d_off;
185 u16 *pos;
186
187 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
188 pos = (u16 *) buf;
189
190 if (len / 2)
191 HFA384X_OUTSW(d_off, buf, len / 2);
192 pos += len / 2;
193
194 if (len & 1)
195 HFA384X_OUTB(*((char *) pos), d_off);
196
197 return 0;
198}
199
200
201/* FIX: This might change at some point.. */
202#include "hostap_hw.c"
203
204
205
206static void prism2_detach(dev_link_t *link);
207static void prism2_release(u_long arg);
208static int prism2_event(event_t event, int priority,
209 event_callback_args_t *args);
210
211
212static int prism2_pccard_card_present(local_info_t *local)
213{
214 struct hostap_cs_priv *hw_priv = local->hw_priv;
215 if (hw_priv != NULL && hw_priv->link != NULL &&
216 ((hw_priv->link->state & (DEV_PRESENT | DEV_CONFIG)) ==
217 (DEV_PRESENT | DEV_CONFIG)))
218 return 1;
219 return 0;
220}
221
222
223/*
224 * SanDisk CompactFlash WLAN Flashcard - Product Manual v1.0
225 * Document No. 20-10-00058, January 2004
226 * http://www.sandisk.com/pdf/industrial/ProdManualCFWLANv1.0.pdf
227 */
228#define SANDISK_WLAN_ACTIVATION_OFF 0x40
229#define SANDISK_HCR_OFF 0x42
230
231
232static void sandisk_set_iobase(local_info_t *local)
233{
234 int res;
235 conf_reg_t reg;
236 struct hostap_cs_priv *hw_priv = local->hw_priv;
237
238 reg.Function = 0;
239 reg.Action = CS_WRITE;
240 reg.Offset = 0x10; /* 0x3f0 IO base 1 */
241 reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
242 res = pcmcia_access_configuration_register(hw_priv->link->handle,
243 &reg);
244 if (res != CS_SUCCESS) {
245 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
246 " res=%d\n", res);
247 }
248 udelay(10);
249
250 reg.Function = 0;
251 reg.Action = CS_WRITE;
252 reg.Offset = 0x12; /* 0x3f2 IO base 2 */
253 reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
254 res = pcmcia_access_configuration_register(hw_priv->link->handle,
255 &reg);
256 if (res != CS_SUCCESS) {
257 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
258 " res=%d\n", res);
259 }
260}
261
262
263static void sandisk_write_hcr(local_info_t *local, int hcr)
264{
265 struct net_device *dev = local->dev;
266 int i;
267
268 HFA384X_OUTB(0x80, SANDISK_WLAN_ACTIVATION_OFF);
269 udelay(50);
270 for (i = 0; i < 10; i++) {
271 HFA384X_OUTB(hcr, SANDISK_HCR_OFF);
272 }
273 udelay(55);
274 HFA384X_OUTB(0x45, SANDISK_WLAN_ACTIVATION_OFF);
275}
276
277
278static int sandisk_enable_wireless(struct net_device *dev)
279{
280 int res, ret = 0;
281 conf_reg_t reg;
282 struct hostap_interface *iface = dev->priv;
283 local_info_t *local = iface->local;
284 tuple_t tuple;
285 cisparse_t *parse = NULL;
286 u_char buf[64];
287 struct hostap_cs_priv *hw_priv = local->hw_priv;
288
289 if (hw_priv->link->io.NumPorts1 < 0x42) {
290 /* Not enough ports to be SanDisk multi-function card */
291 ret = -ENODEV;
292 goto done;
293 }
294
295 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
296 if (parse == NULL) {
297 ret = -ENOMEM;
298 goto done;
299 }
300
301 tuple.DesiredTuple = CISTPL_MANFID;
302 tuple.Attributes = TUPLE_RETURN_COMMON;
303 tuple.TupleData = buf;
304 tuple.TupleDataMax = sizeof(buf);
305 tuple.TupleOffset = 0;
306 if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
307 pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
308 pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
309 parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
310 /* No SanDisk manfid found */
311 ret = -ENODEV;
312 goto done;
313 }
314
315 tuple.DesiredTuple = CISTPL_LONGLINK_MFC;
316 if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
317 pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
318 pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
319 parse->longlink_mfc.nfn < 2) {
320 /* No multi-function links found */
321 ret = -ENODEV;
322 goto done;
323 }
324
325 printk(KERN_DEBUG "%s: Multi-function SanDisk ConnectPlus detected"
326 " - using vendor-specific initialization\n", dev->name);
327 hw_priv->sandisk_connectplus = 1;
328
329 reg.Function = 0;
330 reg.Action = CS_WRITE;
331 reg.Offset = CISREG_COR;
332 reg.Value = COR_SOFT_RESET;
333 res = pcmcia_access_configuration_register(hw_priv->link->handle,
334 &reg);
335 if (res != CS_SUCCESS) {
336 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
337 dev->name, res);
338 goto done;
339 }
340 mdelay(5);
341
342 reg.Function = 0;
343 reg.Action = CS_WRITE;
344 reg.Offset = CISREG_COR;
345 /*
346 * Do not enable interrupts here to avoid some bogus events. Interrupts
347 * will be enabled during the first cor_sreset call.
348 */
349 reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
350 res = pcmcia_access_configuration_register(hw_priv->link->handle,
351 &reg);
352 if (res != CS_SUCCESS) {
353 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
354 dev->name, res);
355 goto done;
356 }
357 mdelay(5);
358
359 sandisk_set_iobase(local);
360
361 HFA384X_OUTB(0xc5, SANDISK_WLAN_ACTIVATION_OFF);
362 udelay(10);
363 HFA384X_OUTB(0x4b, SANDISK_WLAN_ACTIVATION_OFF);
364 udelay(10);
365
366done:
367 kfree(parse);
368 return ret;
369}
370
371
372static void prism2_pccard_cor_sreset(local_info_t *local)
373{
374 int res;
375 conf_reg_t reg;
376 struct hostap_cs_priv *hw_priv = local->hw_priv;
377
378 if (!prism2_pccard_card_present(local))
379 return;
380
381 reg.Function = 0;
382 reg.Action = CS_READ;
383 reg.Offset = CISREG_COR;
384 reg.Value = 0;
385 res = pcmcia_access_configuration_register(hw_priv->link->handle,
386 &reg);
387 if (res != CS_SUCCESS) {
388 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
389 res);
390 return;
391 }
392 printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
393 reg.Value);
394
395 reg.Action = CS_WRITE;
396 reg.Value |= COR_SOFT_RESET;
397 res = pcmcia_access_configuration_register(hw_priv->link->handle,
398 &reg);
399 if (res != CS_SUCCESS) {
400 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
401 res);
402 return;
403 }
404
405 mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
406
407 reg.Value &= ~COR_SOFT_RESET;
408 if (hw_priv->sandisk_connectplus)
409 reg.Value |= COR_IREQ_ENA;
410 res = pcmcia_access_configuration_register(hw_priv->link->handle,
411 &reg);
412 if (res != CS_SUCCESS) {
413 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
414 res);
415 return;
416 }
417
418 mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
419
420 if (hw_priv->sandisk_connectplus)
421 sandisk_set_iobase(local);
422}
423
424
425static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
426{
427 int res;
428 conf_reg_t reg;
429 int old_cor;
430 struct hostap_cs_priv *hw_priv = local->hw_priv;
431
432 if (!prism2_pccard_card_present(local))
433 return;
434
435 if (hw_priv->sandisk_connectplus) {
436 sandisk_write_hcr(local, hcr);
437 return;
438 }
439
440 reg.Function = 0;
441 reg.Action = CS_READ;
442 reg.Offset = CISREG_COR;
443 reg.Value = 0;
444 res = pcmcia_access_configuration_register(hw_priv->link->handle,
445 &reg);
446 if (res != CS_SUCCESS) {
447 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
448 "(%d)\n", res);
449 return;
450 }
451 printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
452 reg.Value);
453 old_cor = reg.Value;
454
455 reg.Action = CS_WRITE;
456 reg.Value |= COR_SOFT_RESET;
457 res = pcmcia_access_configuration_register(hw_priv->link->handle,
458 &reg);
459 if (res != CS_SUCCESS) {
460 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
461 "(%d)\n", res);
462 return;
463 }
464
465 mdelay(10);
466
467 /* Setup Genesis mode */
468 reg.Action = CS_WRITE;
469 reg.Value = hcr;
470 reg.Offset = CISREG_CCSR;
471 res = pcmcia_access_configuration_register(hw_priv->link->handle,
472 &reg);
473 if (res != CS_SUCCESS) {
474 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
475 "(%d)\n", res);
476 return;
477 }
478 mdelay(10);
479
480 reg.Action = CS_WRITE;
481 reg.Offset = CISREG_COR;
482 reg.Value = old_cor & ~COR_SOFT_RESET;
483 res = pcmcia_access_configuration_register(hw_priv->link->handle,
484 &reg);
485 if (res != CS_SUCCESS) {
486 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
487 "(%d)\n", res);
488 return;
489 }
490
491 mdelay(10);
492}
493
494
495static int prism2_pccard_dev_open(local_info_t *local)
496{
497 struct hostap_cs_priv *hw_priv = local->hw_priv;
498 hw_priv->link->open++;
499 return 0;
500}
501
502
503static int prism2_pccard_dev_close(local_info_t *local)
504{
505 struct hostap_cs_priv *hw_priv;
506
507 if (local == NULL || local->hw_priv == NULL)
508 return 1;
509 hw_priv = local->hw_priv;
510 if (hw_priv->link == NULL)
511 return 1;
512
513 if (!hw_priv->link->open) {
514 printk(KERN_WARNING "%s: prism2_pccard_dev_close(): "
515 "link not open?!\n", local->dev->name);
516 return 1;
517 }
518
519 hw_priv->link->open--;
520
521 return 0;
522}
523
524
525static struct prism2_helper_functions prism2_pccard_funcs =
526{
527 .card_present = prism2_pccard_card_present,
528 .cor_sreset = prism2_pccard_cor_sreset,
529 .dev_open = prism2_pccard_dev_open,
530 .dev_close = prism2_pccard_dev_close,
531 .genesis_reset = prism2_pccard_genesis_reset,
532 .hw_type = HOSTAP_HW_PCCARD,
533};
534
535
536/* allocate local data and register with CardServices
537 * initialize dev_link structure, but do not configure the card yet */
538static dev_link_t *prism2_attach(void)
539{
540 dev_link_t *link;
541 client_reg_t client_reg;
542 int ret;
543
544 link = kmalloc(sizeof(dev_link_t), GFP_KERNEL);
545 if (link == NULL)
546 return NULL;
547
548 memset(link, 0, sizeof(dev_link_t));
549
550 PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
551 link->conf.Vcc = 33;
552 link->conf.IntType = INT_MEMORY_AND_IO;
553
554 /* register with CardServices */
555 link->next = dev_list;
556 dev_list = link;
557 client_reg.dev_info = &dev_info;
558 client_reg.Version = 0x0210;
559 client_reg.event_callback_args.client_data = link;
560 ret = pcmcia_register_client(&link->handle, &client_reg);
561 if (ret != CS_SUCCESS) {
562 cs_error(link->handle, RegisterClient, ret);
563 prism2_detach(link);
564 return NULL;
565 }
566 return link;
567}
568
569
570static void prism2_detach(dev_link_t *link)
571{
572 dev_link_t **linkp;
573
574 PDEBUG(DEBUG_FLOW, "prism2_detach\n");
575
576 for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
577 if (*linkp == link)
578 break;
579 if (*linkp == NULL) {
580 printk(KERN_WARNING "%s: Attempt to detach non-existing "
581 "PCMCIA client\n", dev_info);
582 return;
583 }
584
585 if (link->state & DEV_CONFIG) {
586 prism2_release((u_long)link);
587 }
588
589 if (link->handle) {
590 int res = pcmcia_deregister_client(link->handle);
591 if (res) {
592 printk("CardService(DeregisterClient) => %d\n", res);
593 cs_error(link->handle, DeregisterClient, res);
594 }
595 }
596
597 *linkp = link->next;
598 /* release net devices */
599 if (link->priv) {
600 struct net_device *dev;
601 struct hostap_interface *iface;
602 dev = link->priv;
603 iface = netdev_priv(dev);
604 kfree(iface->local->hw_priv);
605 iface->local->hw_priv = NULL;
606 prism2_free_local_data(dev);
607 }
608 kfree(link);
609}
610
611
612#define CS_CHECK(fn, ret) \
613do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
614
615#define CFG_CHECK2(fn, retf) \
616do { int ret = (retf); \
617if (ret != 0) { \
618 PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \
619 cs_error(link->handle, fn, ret); \
620 goto next_entry; \
621} \
622} while (0)
623
624
625/* run after a CARD_INSERTION event is received to configure the PCMCIA
626 * socket and make the device available to the system */
627static int prism2_config(dev_link_t *link)
628{
629 struct net_device *dev;
630 struct hostap_interface *iface;
631 local_info_t *local;
632 int ret = 1;
633 tuple_t tuple;
634 cisparse_t *parse;
635 int last_fn, last_ret;
636 u_char buf[64];
637 config_info_t conf;
638 cistpl_cftable_entry_t dflt = { 0 };
639 struct hostap_cs_priv *hw_priv;
640
641 PDEBUG(DEBUG_FLOW, "prism2_config()\n");
642
643 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
644 hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
645 if (parse == NULL || hw_priv == NULL) {
646 kfree(parse);
647 kfree(hw_priv);
648 ret = -ENOMEM;
649 goto failed;
650 }
651 memset(hw_priv, 0, sizeof(*hw_priv));
652
653 tuple.DesiredTuple = CISTPL_CONFIG;
654 tuple.Attributes = 0;
655 tuple.TupleData = buf;
656 tuple.TupleDataMax = sizeof(buf);
657 tuple.TupleOffset = 0;
658 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
659 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link->handle, &tuple));
660 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link->handle, &tuple, parse));
661 link->conf.ConfigBase = parse->config.base;
662 link->conf.Present = parse->config.rmask[0];
663
664 CS_CHECK(GetConfigurationInfo,
665 pcmcia_get_configuration_info(link->handle, &conf));
666 PDEBUG(DEBUG_HW, "%s: %s Vcc=%d (from config)\n", dev_info,
667 ignore_cis_vcc ? "ignoring" : "setting", conf.Vcc);
668 link->conf.Vcc = conf.Vcc;
669
670 /* Look for an appropriate configuration table entry in the CIS */
671 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
672 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
673 for (;;) {
674 cistpl_cftable_entry_t *cfg = &(parse->cftable_entry);
675 CFG_CHECK2(GetTupleData,
676 pcmcia_get_tuple_data(link->handle, &tuple));
677 CFG_CHECK2(ParseTuple,
678 pcmcia_parse_tuple(link->handle, &tuple, parse));
679
680 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
681 dflt = *cfg;
682 if (cfg->index == 0)
683 goto next_entry;
684 link->conf.ConfigIndex = cfg->index;
685 PDEBUG(DEBUG_EXTRA, "Checking CFTABLE_ENTRY 0x%02X "
686 "(default 0x%02X)\n", cfg->index, dflt.index);
687
688 /* Does this card need audio output? */
689 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
690 link->conf.Attributes |= CONF_ENABLE_SPKR;
691 link->conf.Status = CCSR_AUDIO_ENA;
692 }
693
694 /* Use power settings for Vcc and Vpp if present */
695 /* Note that the CIS values need to be rescaled */
696 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
697 if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] /
698 10000 && !ignore_cis_vcc) {
699 PDEBUG(DEBUG_EXTRA, " Vcc mismatch - skipping"
700 " this entry\n");
701 goto next_entry;
702 }
703 } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
704 if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] /
705 10000 && !ignore_cis_vcc) {
706 PDEBUG(DEBUG_EXTRA, " Vcc (default) mismatch "
707 "- skipping this entry\n");
708 goto next_entry;
709 }
710 }
711
712 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
713 link->conf.Vpp1 = link->conf.Vpp2 =
714 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
715 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
716 link->conf.Vpp1 = link->conf.Vpp2 =
717 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
718
719 /* Do we need to allocate an interrupt? */
720 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
721 link->conf.Attributes |= CONF_ENABLE_IRQ;
722 else if (!(link->conf.Attributes & CONF_ENABLE_IRQ)) {
723 /* At least Compaq WL200 does not have IRQInfo1 set,
724 * but it does not work without interrupts.. */
725 printk("Config has no IRQ info, but trying to enable "
726 "IRQ anyway..\n");
727 link->conf.Attributes |= CONF_ENABLE_IRQ;
728 }
729
730 /* IO window settings */
731 PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
732 "dflt.io.nwin=%d\n",
733 cfg->io.nwin, dflt.io.nwin);
734 link->io.NumPorts1 = link->io.NumPorts2 = 0;
735 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
736 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
737 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
738 PDEBUG(DEBUG_EXTRA, "io->flags = 0x%04X, "
739 "io.base=0x%04x, len=%d\n", io->flags,
740 io->win[0].base, io->win[0].len);
741 if (!(io->flags & CISTPL_IO_8BIT))
742 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
743 if (!(io->flags & CISTPL_IO_16BIT))
744 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
745 link->io.IOAddrLines = io->flags &
746 CISTPL_IO_LINES_MASK;
747 link->io.BasePort1 = io->win[0].base;
748 link->io.NumPorts1 = io->win[0].len;
749 if (io->nwin > 1) {
750 link->io.Attributes2 = link->io.Attributes1;
751 link->io.BasePort2 = io->win[1].base;
752 link->io.NumPorts2 = io->win[1].len;
753 }
754 }
755
756 /* This reserves IO space but doesn't actually enable it */
757 CFG_CHECK2(RequestIO,
758 pcmcia_request_io(link->handle, &link->io));
759
760 /* This configuration table entry is OK */
761 break;
762
763 next_entry:
764 CS_CHECK(GetNextTuple,
765 pcmcia_get_next_tuple(link->handle, &tuple));
766 }
767
768 /* Need to allocate net_device before requesting IRQ handler */
769 dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
770 &handle_to_dev(link->handle));
771 if (dev == NULL)
772 goto failed;
773 link->priv = dev;
774
775 iface = netdev_priv(dev);
776 local = iface->local;
777 local->hw_priv = hw_priv;
778 hw_priv->link = link;
779 strcpy(hw_priv->node.dev_name, dev->name);
780 link->dev = &hw_priv->node;
781
782 /*
783 * Allocate an interrupt line. Note that this does not assign a
784 * handler to the interrupt, unless the 'Handler' member of the
785 * irq structure is initialized.
786 */
787 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
788 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
789 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
790 link->irq.Handler = prism2_interrupt;
791 link->irq.Instance = dev;
792 CS_CHECK(RequestIRQ,
793 pcmcia_request_irq(link->handle, &link->irq));
794 }
795
796 /*
797 * This actually configures the PCMCIA socket -- setting up
798 * the I/O windows and the interrupt mapping, and putting the
799 * card and host interface into "Memory and IO" mode.
800 */
801 CS_CHECK(RequestConfiguration,
802 pcmcia_request_configuration(link->handle, &link->conf));
803
804 dev->irq = link->irq.AssignedIRQ;
805 dev->base_addr = link->io.BasePort1;
806
807 /* Finally, report what we've done */
808 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
809 dev_info, link->conf.ConfigIndex,
810 link->conf.Vcc / 10, link->conf.Vcc % 10);
811 if (link->conf.Vpp1)
812 printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
813 link->conf.Vpp1 % 10);
814 if (link->conf.Attributes & CONF_ENABLE_IRQ)
815 printk(", irq %d", link->irq.AssignedIRQ);
816 if (link->io.NumPorts1)
817 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
818 link->io.BasePort1+link->io.NumPorts1-1);
819 if (link->io.NumPorts2)
820 printk(" & 0x%04x-0x%04x", link->io.BasePort2,
821 link->io.BasePort2+link->io.NumPorts2-1);
822 printk("\n");
823
824 link->state |= DEV_CONFIG;
825 link->state &= ~DEV_CONFIG_PENDING;
826
827 local->shutdown = 0;
828
829 sandisk_enable_wireless(dev);
830
831 ret = prism2_hw_config(dev, 1);
832 if (!ret) {
833 ret = hostap_hw_ready(dev);
834 if (ret == 0 && local->ddev)
835 strcpy(hw_priv->node.dev_name, local->ddev->name);
836 }
837 kfree(parse);
838 return ret;
839
840 cs_failed:
841 cs_error(link->handle, last_fn, last_ret);
842
843 failed:
844 kfree(parse);
845 kfree(hw_priv);
846 prism2_release((u_long)link);
847 return ret;
848}
849
850
851static void prism2_release(u_long arg)
852{
853 dev_link_t *link = (dev_link_t *)arg;
854
855 PDEBUG(DEBUG_FLOW, "prism2_release\n");
856
857 if (link->priv) {
858 struct net_device *dev = link->priv;
859 struct hostap_interface *iface;
860
861 iface = netdev_priv(dev);
862 if (link->state & DEV_CONFIG)
863 prism2_hw_shutdown(dev, 0);
864 iface->local->shutdown = 1;
865 }
866
867 if (link->win)
868 pcmcia_release_window(link->win);
869 pcmcia_release_configuration(link->handle);
870 if (link->io.NumPorts1)
871 pcmcia_release_io(link->handle, &link->io);
872 if (link->irq.AssignedIRQ)
873 pcmcia_release_irq(link->handle, &link->irq);
874
875 link->state &= ~DEV_CONFIG;
876
877 PDEBUG(DEBUG_FLOW, "release - done\n");
878}
879
880
881static int prism2_event(event_t event, int priority,
882 event_callback_args_t *args)
883{
884 dev_link_t *link = args->client_data;
885 struct net_device *dev = (struct net_device *) link->priv;
886
887 switch (event) {
888 case CS_EVENT_CARD_INSERTION:
889 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_INSERTION\n", dev_info);
890 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
891 if (prism2_config(link)) {
892 PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
893 }
894 break;
895
896 case CS_EVENT_CARD_REMOVAL:
897 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_REMOVAL\n", dev_info);
898 link->state &= ~DEV_PRESENT;
899 if (link->state & DEV_CONFIG) {
900 netif_stop_queue(dev);
901 netif_device_detach(dev);
902 prism2_release((u_long) link);
903 }
904 break;
905
906 case CS_EVENT_PM_SUSPEND:
907 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
908 link->state |= DEV_SUSPEND;
909 /* fall through */
910
911 case CS_EVENT_RESET_PHYSICAL:
912 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info);
913 if (link->state & DEV_CONFIG) {
914 if (link->open) {
915 netif_stop_queue(dev);
916 netif_device_detach(dev);
917 }
918 prism2_suspend(dev);
919 pcmcia_release_configuration(link->handle);
920 }
921 break;
922
923 case CS_EVENT_PM_RESUME:
924 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
925 link->state &= ~DEV_SUSPEND;
926 /* fall through */
927
928 case CS_EVENT_CARD_RESET:
929 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_RESET\n", dev_info);
930 if (link->state & DEV_CONFIG) {
931 pcmcia_request_configuration(link->handle,
932 &link->conf);
933 prism2_hw_shutdown(dev, 1);
934 prism2_hw_config(dev, link->open ? 0 : 1);
935 if (link->open) {
936 netif_device_attach(dev);
937 netif_start_queue(dev);
938 }
939 }
940 break;
941
942 default:
943 PDEBUG(DEBUG_EXTRA, "%s: prism2_event() - unknown event %d\n",
944 dev_info, event);
945 break;
946 }
947 return 0;
948}
949
950
951static struct pcmcia_device_id hostap_cs_ids[] = {
952 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
953 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
954 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
955 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
956 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
957 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
958 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
959 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
960 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
961 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
962 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
963 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
964 PCMCIA_DEVICE_MANF_CARD(0x02d2, 0x0001),
965 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
966 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
967 PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000),
968 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
969 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
970 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
971 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
972 0x7a954bd9, 0x74be00c6),
973 PCMCIA_DEVICE_PROD_ID1234(
974 "Intersil", "PRISM 2_5 PCMCIA ADAPTER", "ISL37300P",
975 "Eval-RevA",
976 0x4b801a17, 0x6345a0bf, 0xc9049a39, 0xc23adc0e),
977 PCMCIA_DEVICE_PROD_ID123(
978 "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
979 0xe6ec52ce, 0x08649af2, 0x4b74baa0),
980 PCMCIA_DEVICE_PROD_ID123(
981 "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
982 0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
983 PCMCIA_DEVICE_PROD_ID123(
984 "Instant Wireless ", " Network PC CARD", "Version 01.02",
985 0x11d901af, 0x6e9bd926, 0x4b74baa0),
986 PCMCIA_DEVICE_PROD_ID123(
987 "SMC", "SMC2632W", "Version 01.02",
988 0xc4f8b18b, 0x474a1f2a, 0x4b74baa0),
989 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G",
990 0x2decece3, 0x82067c18),
991 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card",
992 0x54f7c49c, 0x15a75e5b),
993 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE",
994 0x74c5e40d, 0xdb472a18),
995 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card",
996 0x0733cc81, 0x0c52f395),
997 PCMCIA_DEVICE_PROD_ID12(
998 "ZoomAir 11Mbps High", "Rate wireless Networking",
999 0x273fe3db, 0x32a1eaee),
1000 PCMCIA_DEVICE_NULL
1001};
1002MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
1003
1004
1005static struct pcmcia_driver hostap_driver = {
1006 .drv = {
1007 .name = "hostap_cs",
1008 },
1009 .attach = prism2_attach,
1010 .detach = prism2_detach,
1011 .owner = THIS_MODULE,
1012 .event = prism2_event,
1013 .id_table = hostap_cs_ids,
1014};
1015
1016static int __init init_prism2_pccard(void)
1017{
1018 printk(KERN_INFO "%s: %s\n", dev_info, version);
1019 return pcmcia_register_driver(&hostap_driver);
1020}
1021
1022static void __exit exit_prism2_pccard(void)
1023{
1024 pcmcia_unregister_driver(&hostap_driver);
1025 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
1026}
1027
1028
1029module_init(init_prism2_pccard);
1030module_exit(exit_prism2_pccard);
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
new file mode 100644
index 000000000000..ab26b52b3e76
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -0,0 +1,766 @@
1static int prism2_enable_aux_port(struct net_device *dev, int enable)
2{
3 u16 val, reg;
4 int i, tries;
5 unsigned long flags;
6 struct hostap_interface *iface;
7 local_info_t *local;
8
9 iface = netdev_priv(dev);
10 local = iface->local;
11
12 if (local->no_pri) {
13 if (enable) {
14 PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux "
15 "port is already enabled\n", dev->name);
16 }
17 return 0;
18 }
19
20 spin_lock_irqsave(&local->cmdlock, flags);
21
22 /* wait until busy bit is clear */
23 tries = HFA384X_CMD_BUSY_TIMEOUT;
24 while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
25 tries--;
26 udelay(1);
27 }
28 if (tries == 0) {
29 reg = HFA384X_INW(HFA384X_CMD_OFF);
30 spin_unlock_irqrestore(&local->cmdlock, flags);
31 printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n",
32 dev->name, reg);
33 return -ETIMEDOUT;
34 }
35
36 val = HFA384X_INW(HFA384X_CONTROL_OFF);
37
38 if (enable) {
39 HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF);
40 HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF);
41 HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF);
42
43 if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED)
44 printk("prism2_enable_aux_port: was not disabled!?\n");
45 val &= ~HFA384X_AUX_PORT_MASK;
46 val |= HFA384X_AUX_PORT_ENABLE;
47 } else {
48 HFA384X_OUTW(0, HFA384X_PARAM0_OFF);
49 HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
50 HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
51
52 if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED)
53 printk("prism2_enable_aux_port: was not enabled!?\n");
54 val &= ~HFA384X_AUX_PORT_MASK;
55 val |= HFA384X_AUX_PORT_DISABLE;
56 }
57 HFA384X_OUTW(val, HFA384X_CONTROL_OFF);
58
59 udelay(5);
60
61 i = 10000;
62 while (i > 0) {
63 val = HFA384X_INW(HFA384X_CONTROL_OFF);
64 val &= HFA384X_AUX_PORT_MASK;
65
66 if ((enable && val == HFA384X_AUX_PORT_ENABLED) ||
67 (!enable && val == HFA384X_AUX_PORT_DISABLED))
68 break;
69
70 udelay(10);
71 i--;
72 }
73
74 spin_unlock_irqrestore(&local->cmdlock, flags);
75
76 if (i == 0) {
77 printk("prism2_enable_aux_port(%d) timed out\n",
78 enable);
79 return -ETIMEDOUT;
80 }
81
82 return 0;
83}
84
85
86static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len,
87 void *buf)
88{
89 u16 page, offset;
90 if (addr & 1 || len & 1)
91 return -1;
92
93 page = addr >> 7;
94 offset = addr & 0x7f;
95
96 HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
97 HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
98
99 udelay(5);
100
101#ifdef PRISM2_PCI
102 {
103 u16 *pos = (u16 *) buf;
104 while (len > 0) {
105 *pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF);
106 len -= 2;
107 }
108 }
109#else /* PRISM2_PCI */
110 HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2);
111#endif /* PRISM2_PCI */
112
113 return 0;
114}
115
116
117static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len,
118 void *buf)
119{
120 u16 page, offset;
121 if (addr & 1 || len & 1)
122 return -1;
123
124 page = addr >> 7;
125 offset = addr & 0x7f;
126
127 HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
128 HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
129
130 udelay(5);
131
132#ifdef PRISM2_PCI
133 {
134 u16 *pos = (u16 *) buf;
135 while (len > 0) {
136 HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF);
137 len -= 2;
138 }
139 }
140#else /* PRISM2_PCI */
141 HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2);
142#endif /* PRISM2_PCI */
143
144 return 0;
145}
146
147
148static int prism2_pda_ok(u8 *buf)
149{
150 u16 *pda = (u16 *) buf;
151 int pos;
152 u16 len, pdr;
153
154 if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff &&
155 buf[3] == 0x00)
156 return 0;
157
158 pos = 0;
159 while (pos + 1 < PRISM2_PDA_SIZE / 2) {
160 len = le16_to_cpu(pda[pos]);
161 pdr = le16_to_cpu(pda[pos + 1]);
162 if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2)
163 return 0;
164
165 if (pdr == 0x0000 && len == 2) {
166 /* PDA end found */
167 return 1;
168 }
169
170 pos += len + 1;
171 }
172
173 return 0;
174}
175
176
177static int prism2_download_aux_dump(struct net_device *dev,
178 unsigned int addr, int len, u8 *buf)
179{
180 int res;
181
182 prism2_enable_aux_port(dev, 1);
183 res = hfa384x_from_aux(dev, addr, len, buf);
184 prism2_enable_aux_port(dev, 0);
185 if (res)
186 return -1;
187
188 return 0;
189}
190
191
192static u8 * prism2_read_pda(struct net_device *dev)
193{
194 u8 *buf;
195 int res, i, found = 0;
196#define NUM_PDA_ADDRS 4
197 unsigned int pda_addr[NUM_PDA_ADDRS] = {
198 0x7f0000 /* others than HFA3841 */,
199 0x3f0000 /* HFA3841 */,
200 0x390000 /* apparently used in older cards */,
201 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */,
202 };
203
204 buf = (u8 *) kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL);
205 if (buf == NULL)
206 return NULL;
207
208 /* Note: wlan card should be in initial state (just after init cmd)
209 * and no other operations should be performed concurrently. */
210
211 prism2_enable_aux_port(dev, 1);
212
213 for (i = 0; i < NUM_PDA_ADDRS; i++) {
214 PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x",
215 dev->name, pda_addr[i]);
216 res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf);
217 if (res)
218 continue;
219 if (res == 0 && prism2_pda_ok(buf)) {
220 PDEBUG2(DEBUG_EXTRA2, ": OK\n");
221 found = 1;
222 break;
223 } else {
224 PDEBUG2(DEBUG_EXTRA2, ": failed\n");
225 }
226 }
227
228 prism2_enable_aux_port(dev, 0);
229
230 if (!found) {
231 printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name);
232 kfree(buf);
233 buf = NULL;
234 }
235
236 return buf;
237}
238
239
240static int prism2_download_volatile(local_info_t *local,
241 struct prism2_download_data *param)
242{
243 struct net_device *dev = local->dev;
244 int ret = 0, i;
245 u16 param0, param1;
246
247 if (local->hw_downloading) {
248 printk(KERN_WARNING "%s: Already downloading - aborting new "
249 "request\n", dev->name);
250 return -1;
251 }
252
253 local->hw_downloading = 1;
254 if (local->pri_only) {
255 hfa384x_disable_interrupts(dev);
256 } else {
257 prism2_hw_shutdown(dev, 0);
258
259 if (prism2_hw_init(dev, 0)) {
260 printk(KERN_WARNING "%s: Could not initialize card for"
261 " download\n", dev->name);
262 ret = -1;
263 goto out;
264 }
265 }
266
267 if (prism2_enable_aux_port(dev, 1)) {
268 printk(KERN_WARNING "%s: Could not enable AUX port\n",
269 dev->name);
270 ret = -1;
271 goto out;
272 }
273
274 param0 = param->start_addr & 0xffff;
275 param1 = param->start_addr >> 16;
276
277 HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
278 HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
279 if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
280 (HFA384X_PROGMODE_ENABLE_VOLATILE << 8),
281 param0)) {
282 printk(KERN_WARNING "%s: Download command execution failed\n",
283 dev->name);
284 ret = -1;
285 goto out;
286 }
287
288 for (i = 0; i < param->num_areas; i++) {
289 PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
290 dev->name, param->data[i].len, param->data[i].addr);
291 if (hfa384x_to_aux(dev, param->data[i].addr,
292 param->data[i].len, param->data[i].data)) {
293 printk(KERN_WARNING "%s: RAM download at 0x%08x "
294 "(len=%d) failed\n", dev->name,
295 param->data[i].addr, param->data[i].len);
296 ret = -1;
297 goto out;
298 }
299 }
300
301 HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
302 HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
303 if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
304 (HFA384X_PROGMODE_DISABLE << 8), param0)) {
305 printk(KERN_WARNING "%s: Download command execution failed\n",
306 dev->name);
307 ret = -1;
308 goto out;
309 }
310 /* ProgMode disable causes the hardware to restart itself from the
311 * given starting address. Give hw some time and ACK command just in
312 * case restart did not happen. */
313 mdelay(5);
314 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
315
316 if (prism2_enable_aux_port(dev, 0)) {
317 printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
318 dev->name);
319 /* continue anyway.. restart should have taken care of this */
320 }
321
322 mdelay(5);
323 local->hw_downloading = 0;
324 if (prism2_hw_config(dev, 2)) {
325 printk(KERN_WARNING "%s: Card configuration after RAM "
326 "download failed\n", dev->name);
327 ret = -1;
328 goto out;
329 }
330
331 out:
332 local->hw_downloading = 0;
333 return ret;
334}
335
336
337static int prism2_enable_genesis(local_info_t *local, int hcr)
338{
339 struct net_device *dev = local->dev;
340 u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff };
341 u8 readbuf[4];
342
343 printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n",
344 dev->name, hcr);
345 local->func->cor_sreset(local);
346 hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
347 local->func->genesis_reset(local, hcr);
348
349 /* Readback test */
350 hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
351 hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
352 hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
353
354 if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) {
355 printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n",
356 hcr);
357 return 0;
358 } else {
359 printk(KERN_DEBUG "Readback test failed, HCR 0x%02x "
360 "write %02x %02x %02x %02x read %02x %02x %02x %02x\n",
361 hcr, initseq[0], initseq[1], initseq[2], initseq[3],
362 readbuf[0], readbuf[1], readbuf[2], readbuf[3]);
363 return 1;
364 }
365}
366
367
368static int prism2_get_ram_size(local_info_t *local)
369{
370 int ret;
371
372 /* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */
373 if (prism2_enable_genesis(local, 0x1f) == 0)
374 ret = 8;
375 else if (prism2_enable_genesis(local, 0x0f) == 0)
376 ret = 16;
377 else
378 ret = -1;
379
380 /* Disable genesis mode */
381 local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17);
382
383 return ret;
384}
385
386
387static int prism2_download_genesis(local_info_t *local,
388 struct prism2_download_data *param)
389{
390 struct net_device *dev = local->dev;
391 int ram16 = 0, i;
392 int ret = 0;
393
394 if (local->hw_downloading) {
395 printk(KERN_WARNING "%s: Already downloading - aborting new "
396 "request\n", dev->name);
397 return -EBUSY;
398 }
399
400 if (!local->func->genesis_reset || !local->func->cor_sreset) {
401 printk(KERN_INFO "%s: Genesis mode downloading not supported "
402 "with this hwmodel\n", dev->name);
403 return -EOPNOTSUPP;
404 }
405
406 local->hw_downloading = 1;
407
408 if (prism2_enable_aux_port(dev, 1)) {
409 printk(KERN_DEBUG "%s: failed to enable AUX port\n",
410 dev->name);
411 ret = -EIO;
412 goto out;
413 }
414
415 if (local->sram_type == -1) {
416 /* 0x1F for x8 SRAM or 0x0F for x16 SRAM */
417 if (prism2_enable_genesis(local, 0x1f) == 0) {
418 ram16 = 0;
419 PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 "
420 "SRAM\n", dev->name);
421 } else if (prism2_enable_genesis(local, 0x0f) == 0) {
422 ram16 = 1;
423 PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 "
424 "SRAM\n", dev->name);
425 } else {
426 printk(KERN_DEBUG "%s: Could not initiate genesis "
427 "mode\n", dev->name);
428 ret = -EIO;
429 goto out;
430 }
431 } else {
432 if (prism2_enable_genesis(local, local->sram_type == 8 ?
433 0x1f : 0x0f)) {
434 printk(KERN_DEBUG "%s: Failed to set Genesis "
435 "mode (sram_type=%d)\n", dev->name,
436 local->sram_type);
437 ret = -EIO;
438 goto out;
439 }
440 ram16 = local->sram_type != 8;
441 }
442
443 for (i = 0; i < param->num_areas; i++) {
444 PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
445 dev->name, param->data[i].len, param->data[i].addr);
446 if (hfa384x_to_aux(dev, param->data[i].addr,
447 param->data[i].len, param->data[i].data)) {
448 printk(KERN_WARNING "%s: RAM download at 0x%08x "
449 "(len=%d) failed\n", dev->name,
450 param->data[i].addr, param->data[i].len);
451 ret = -EIO;
452 goto out;
453 }
454 }
455
456 PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n");
457 local->func->genesis_reset(local, ram16 ? 0x07 : 0x17);
458 if (prism2_enable_aux_port(dev, 0)) {
459 printk(KERN_DEBUG "%s: Failed to disable AUX port\n",
460 dev->name);
461 }
462
463 mdelay(5);
464 local->hw_downloading = 0;
465
466 PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n");
467 /*
468 * Make sure the INIT command does not generate a command completion
469 * event by disabling interrupts.
470 */
471 hfa384x_disable_interrupts(dev);
472 if (prism2_hw_init(dev, 1)) {
473 printk(KERN_DEBUG "%s: Initialization after genesis mode "
474 "download failed\n", dev->name);
475 ret = -EIO;
476 goto out;
477 }
478
479 PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n");
480 if (prism2_hw_init2(dev, 1)) {
481 printk(KERN_DEBUG "%s: Initialization(2) after genesis mode "
482 "download failed\n", dev->name);
483 ret = -EIO;
484 goto out;
485 }
486
487 out:
488 local->hw_downloading = 0;
489 return ret;
490}
491
492
493#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
494/* Note! Non-volatile downloading functionality has not yet been tested
495 * thoroughly and it may corrupt flash image and effectively kill the card that
496 * is being updated. You have been warned. */
497
498static inline int prism2_download_block(struct net_device *dev,
499 u32 addr, u8 *data,
500 u32 bufaddr, int rest_len)
501{
502 u16 param0, param1;
503 int block_len;
504
505 block_len = rest_len < 4096 ? rest_len : 4096;
506
507 param0 = addr & 0xffff;
508 param1 = addr >> 16;
509
510 HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF);
511 HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
512
513 if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
514 (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8),
515 param0)) {
516 printk(KERN_WARNING "%s: Flash download command execution "
517 "failed\n", dev->name);
518 return -1;
519 }
520
521 if (hfa384x_to_aux(dev, bufaddr, block_len, data)) {
522 printk(KERN_WARNING "%s: flash download at 0x%08x "
523 "(len=%d) failed\n", dev->name, addr, block_len);
524 return -1;
525 }
526
527 HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
528 HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
529 if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
530 (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8),
531 0)) {
532 printk(KERN_WARNING "%s: Flash write command execution "
533 "failed\n", dev->name);
534 return -1;
535 }
536
537 return block_len;
538}
539
540
541static int prism2_download_nonvolatile(local_info_t *local,
542 struct prism2_download_data *dl)
543{
544 struct net_device *dev = local->dev;
545 int ret = 0, i;
546 struct {
547 u16 page;
548 u16 offset;
549 u16 len;
550 } dlbuffer;
551 u32 bufaddr;
552
553 if (local->hw_downloading) {
554 printk(KERN_WARNING "%s: Already downloading - aborting new "
555 "request\n", dev->name);
556 return -1;
557 }
558
559 ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER,
560 &dlbuffer, 6, 0);
561
562 if (ret < 0) {
563 printk(KERN_WARNING "%s: Could not read download buffer "
564 "parameters\n", dev->name);
565 goto out;
566 }
567
568 dlbuffer.page = le16_to_cpu(dlbuffer.page);
569 dlbuffer.offset = le16_to_cpu(dlbuffer.offset);
570 dlbuffer.len = le16_to_cpu(dlbuffer.len);
571
572 printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n",
573 dlbuffer.len, dlbuffer.page, dlbuffer.offset);
574
575 bufaddr = (dlbuffer.page << 7) + dlbuffer.offset;
576
577 local->hw_downloading = 1;
578
579 if (!local->pri_only) {
580 prism2_hw_shutdown(dev, 0);
581
582 if (prism2_hw_init(dev, 0)) {
583 printk(KERN_WARNING "%s: Could not initialize card for"
584 " download\n", dev->name);
585 ret = -1;
586 goto out;
587 }
588 }
589
590 hfa384x_disable_interrupts(dev);
591
592 if (prism2_enable_aux_port(dev, 1)) {
593 printk(KERN_WARNING "%s: Could not enable AUX port\n",
594 dev->name);
595 ret = -1;
596 goto out;
597 }
598
599 printk(KERN_DEBUG "%s: starting flash download\n", dev->name);
600 for (i = 0; i < dl->num_areas; i++) {
601 int rest_len = dl->data[i].len;
602 int data_off = 0;
603
604 while (rest_len > 0) {
605 int block_len;
606
607 block_len = prism2_download_block(
608 dev, dl->data[i].addr + data_off,
609 dl->data[i].data + data_off, bufaddr,
610 rest_len);
611
612 if (block_len < 0) {
613 ret = -1;
614 goto out;
615 }
616
617 rest_len -= block_len;
618 data_off += block_len;
619 }
620 }
621
622 HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
623 HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
624 if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
625 (HFA384X_PROGMODE_DISABLE << 8), 0)) {
626 printk(KERN_WARNING "%s: Download command execution failed\n",
627 dev->name);
628 ret = -1;
629 goto out;
630 }
631
632 if (prism2_enable_aux_port(dev, 0)) {
633 printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
634 dev->name);
635 /* continue anyway.. restart should have taken care of this */
636 }
637
638 mdelay(5);
639
640 local->func->hw_reset(dev);
641 local->hw_downloading = 0;
642 if (prism2_hw_config(dev, 2)) {
643 printk(KERN_WARNING "%s: Card configuration after flash "
644 "download failed\n", dev->name);
645 ret = -1;
646 } else {
647 printk(KERN_INFO "%s: Card initialized successfully after "
648 "flash download\n", dev->name);
649 }
650
651 out:
652 local->hw_downloading = 0;
653 return ret;
654}
655#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
656
657
658static void prism2_download_free_data(struct prism2_download_data *dl)
659{
660 int i;
661
662 if (dl == NULL)
663 return;
664
665 for (i = 0; i < dl->num_areas; i++)
666 kfree(dl->data[i].data);
667 kfree(dl);
668}
669
670
671static int prism2_download(local_info_t *local,
672 struct prism2_download_param *param)
673{
674 int ret = 0;
675 int i;
676 u32 total_len = 0;
677 struct prism2_download_data *dl = NULL;
678
679 printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x "
680 "num_areas=%d\n",
681 param->dl_cmd, param->start_addr, param->num_areas);
682
683 if (param->num_areas > 100) {
684 ret = -EINVAL;
685 goto out;
686 }
687
688 dl = kmalloc(sizeof(*dl) + param->num_areas *
689 sizeof(struct prism2_download_data_area), GFP_KERNEL);
690 if (dl == NULL) {
691 ret = -ENOMEM;
692 goto out;
693 }
694 memset(dl, 0, sizeof(*dl) + param->num_areas *
695 sizeof(struct prism2_download_data_area));
696 dl->dl_cmd = param->dl_cmd;
697 dl->start_addr = param->start_addr;
698 dl->num_areas = param->num_areas;
699 for (i = 0; i < param->num_areas; i++) {
700 PDEBUG(DEBUG_EXTRA2,
701 " area %d: addr=0x%08x len=%d ptr=0x%p\n",
702 i, param->data[i].addr, param->data[i].len,
703 param->data[i].ptr);
704
705 dl->data[i].addr = param->data[i].addr;
706 dl->data[i].len = param->data[i].len;
707
708 total_len += param->data[i].len;
709 if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN ||
710 total_len > PRISM2_MAX_DOWNLOAD_LEN) {
711 ret = -E2BIG;
712 goto out;
713 }
714
715 dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL);
716 if (dl->data[i].data == NULL) {
717 ret = -ENOMEM;
718 goto out;
719 }
720
721 if (copy_from_user(dl->data[i].data, param->data[i].ptr,
722 param->data[i].len)) {
723 ret = -EFAULT;
724 goto out;
725 }
726 }
727
728 switch (param->dl_cmd) {
729 case PRISM2_DOWNLOAD_VOLATILE:
730 case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT:
731 ret = prism2_download_volatile(local, dl);
732 break;
733 case PRISM2_DOWNLOAD_VOLATILE_GENESIS:
734 case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT:
735 ret = prism2_download_genesis(local, dl);
736 break;
737 case PRISM2_DOWNLOAD_NON_VOLATILE:
738#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
739 ret = prism2_download_nonvolatile(local, dl);
740#else /* PRISM2_NON_VOLATILE_DOWNLOAD */
741 printk(KERN_INFO "%s: non-volatile downloading not enabled\n",
742 local->dev->name);
743 ret = -EOPNOTSUPP;
744#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
745 break;
746 default:
747 printk(KERN_DEBUG "%s: unsupported download command %d\n",
748 local->dev->name, param->dl_cmd);
749 ret = -EINVAL;
750 break;
751 };
752
753 out:
754 if (ret == 0 && dl &&
755 param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) {
756 prism2_download_free_data(local->dl_pri);
757 local->dl_pri = dl;
758 } else if (ret == 0 && dl &&
759 param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) {
760 prism2_download_free_data(local->dl_sec);
761 local->dl_sec = dl;
762 } else
763 prism2_download_free_data(dl);
764
765 return ret;
766}
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
new file mode 100644
index 000000000000..e533a663deda
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -0,0 +1,3445 @@
1/*
2 * Host AP (software wireless LAN access point) driver for
3 * Intersil Prism2/2.5/3.
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. See README and COPYING for
12 * more details.
13 *
14 * FIX:
15 * - there is currently no way of associating TX packets to correct wds device
16 * when TX Exc/OK event occurs, so all tx_packets and some
17 * tx_errors/tx_dropped are added to the main netdevice; using sw_support
18 * field in txdesc might be used to fix this (using Alloc event to increment
19 * tx_packets would need some further info in txfid table)
20 *
21 * Buffer Access Path (BAP) usage:
22 * Prism2 cards have two separate BAPs for accessing the card memory. These
23 * should allow concurrent access to two different frames and the driver
24 * previously used BAP0 for sending data and BAP1 for receiving data.
25 * However, there seems to be number of issues with concurrent access and at
26 * least one know hardware bug in using BAP0 and BAP1 concurrently with PCI
27 * Prism2.5. Therefore, the driver now only uses BAP0 for moving data between
28 * host and card memories. BAP0 accesses are protected with local->baplock
29 * (spin_lock_bh) to prevent concurrent use.
30 */
31
32
33#include <linux/config.h>
34#include <linux/version.h>
35
36#include <asm/delay.h>
37#include <asm/uaccess.h>
38
39#include <linux/slab.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/proc_fs.h>
43#include <linux/if_arp.h>
44#include <linux/delay.h>
45#include <linux/random.h>
46#include <linux/wait.h>
47#include <linux/sched.h>
48#include <linux/rtnetlink.h>
49#include <linux/wireless.h>
50#include <net/iw_handler.h>
51#include <net/ieee80211.h>
52#include <net/ieee80211_crypt.h>
53#include <asm/irq.h>
54
55#include "hostap_80211.h"
56#include "hostap.h"
57#include "hostap_ap.h"
58
59
60/* #define final_version */
61
62static int mtu = 1500;
63module_param(mtu, int, 0444);
64MODULE_PARM_DESC(mtu, "Maximum transfer unit");
65
66static int channel[MAX_PARM_DEVICES] = { 3, DEF_INTS };
67module_param_array(channel, int, NULL, 0444);
68MODULE_PARM_DESC(channel, "Initial channel");
69
70static char essid[33] = "test";
71module_param_string(essid, essid, sizeof(essid), 0444);
72MODULE_PARM_DESC(essid, "Host AP's ESSID");
73
74static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS };
75module_param_array(iw_mode, int, NULL, 0444);
76MODULE_PARM_DESC(iw_mode, "Initial operation mode");
77
78static int beacon_int[MAX_PARM_DEVICES] = { 100, DEF_INTS };
79module_param_array(beacon_int, int, NULL, 0444);
80MODULE_PARM_DESC(beacon_int, "Beacon interval (1 = 1024 usec)");
81
82static int dtim_period[MAX_PARM_DEVICES] = { 1, DEF_INTS };
83module_param_array(dtim_period, int, NULL, 0444);
84MODULE_PARM_DESC(dtim_period, "DTIM period");
85
86static char dev_template[16] = "wlan%d";
87module_param_string(dev_template, dev_template, sizeof(dev_template), 0444);
88MODULE_PARM_DESC(dev_template, "Prefix for network device name (default: "
89 "wlan%d)");
90
91#ifdef final_version
92#define EXTRA_EVENTS_WTERR 0
93#else
94/* check WTERR events (Wait Time-out) in development versions */
95#define EXTRA_EVENTS_WTERR HFA384X_EV_WTERR
96#endif
97
98/* Events that will be using BAP0 */
99#define HFA384X_BAP0_EVENTS \
100 (HFA384X_EV_TXEXC | HFA384X_EV_RX | HFA384X_EV_INFO | HFA384X_EV_TX)
101
102/* event mask, i.e., events that will result in an interrupt */
103#define HFA384X_EVENT_MASK \
104 (HFA384X_BAP0_EVENTS | HFA384X_EV_ALLOC | HFA384X_EV_INFDROP | \
105 HFA384X_EV_CMD | HFA384X_EV_TICK | \
106 EXTRA_EVENTS_WTERR)
107
108/* Default TX control flags: use 802.11 headers and request interrupt for
109 * failed transmits. Frames that request ACK callback, will add
110 * _TX_OK flag and _ALT_RTRY flag may be used to select different retry policy.
111 */
112#define HFA384X_TX_CTRL_FLAGS \
113 (HFA384X_TX_CTRL_802_11 | HFA384X_TX_CTRL_TX_EX)
114
115
116/* ca. 1 usec */
117#define HFA384X_CMD_BUSY_TIMEOUT 5000
118#define HFA384X_BAP_BUSY_TIMEOUT 50000
119
120/* ca. 10 usec */
121#define HFA384X_CMD_COMPL_TIMEOUT 20000
122#define HFA384X_DL_COMPL_TIMEOUT 1000000
123
124/* Wait times for initialization; yield to other processes to avoid busy
125 * waiting for long time. */
126#define HFA384X_INIT_TIMEOUT (HZ / 2) /* 500 ms */
127#define HFA384X_ALLOC_COMPL_TIMEOUT (HZ / 20) /* 50 ms */
128
129
130static void prism2_hw_reset(struct net_device *dev);
131static void prism2_check_sta_fw_version(local_info_t *local);
132
133#ifdef PRISM2_DOWNLOAD_SUPPORT
134/* hostap_download.c */
135static int prism2_download_aux_dump(struct net_device *dev,
136 unsigned int addr, int len, u8 *buf);
137static u8 * prism2_read_pda(struct net_device *dev);
138static int prism2_download(local_info_t *local,
139 struct prism2_download_param *param);
140static void prism2_download_free_data(struct prism2_download_data *dl);
141static int prism2_download_volatile(local_info_t *local,
142 struct prism2_download_data *param);
143static int prism2_download_genesis(local_info_t *local,
144 struct prism2_download_data *param);
145static int prism2_get_ram_size(local_info_t *local);
146#endif /* PRISM2_DOWNLOAD_SUPPORT */
147
148
149
150
151#ifndef final_version
152/* magic value written to SWSUPPORT0 reg. for detecting whether card is still
153 * present */
154#define HFA384X_MAGIC 0x8A32
155#endif
156
157
158static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
159{
160 return HFA384X_INW(reg);
161}
162
163
164static void hfa384x_read_regs(struct net_device *dev,
165 struct hfa384x_regs *regs)
166{
167 regs->cmd = HFA384X_INW(HFA384X_CMD_OFF);
168 regs->evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
169 regs->offset0 = HFA384X_INW(HFA384X_OFFSET0_OFF);
170 regs->offset1 = HFA384X_INW(HFA384X_OFFSET1_OFF);
171 regs->swsupport0 = HFA384X_INW(HFA384X_SWSUPPORT0_OFF);
172}
173
174
175/**
176 * __hostap_cmd_queue_free - Free Prism2 command queue entry (private)
177 * @local: pointer to private Host AP driver data
178 * @entry: Prism2 command queue entry to be freed
179 * @del_req: request the entry to be removed
180 *
181 * Internal helper function for freeing Prism2 command queue entries.
182 * Caller must have acquired local->cmdlock before calling this function.
183 */
184static inline void __hostap_cmd_queue_free(local_info_t *local,
185 struct hostap_cmd_queue *entry,
186 int del_req)
187{
188 if (del_req) {
189 entry->del_req = 1;
190 if (!list_empty(&entry->list)) {
191 list_del_init(&entry->list);
192 local->cmd_queue_len--;
193 }
194 }
195
196 if (atomic_dec_and_test(&entry->usecnt) && entry->del_req)
197 kfree(entry);
198}
199
200
201/**
202 * hostap_cmd_queue_free - Free Prism2 command queue entry
203 * @local: pointer to private Host AP driver data
204 * @entry: Prism2 command queue entry to be freed
205 * @del_req: request the entry to be removed
206 *
207 * Free a Prism2 command queue entry.
208 */
209static inline void hostap_cmd_queue_free(local_info_t *local,
210 struct hostap_cmd_queue *entry,
211 int del_req)
212{
213 unsigned long flags;
214
215 spin_lock_irqsave(&local->cmdlock, flags);
216 __hostap_cmd_queue_free(local, entry, del_req);
217 spin_unlock_irqrestore(&local->cmdlock, flags);
218}
219
220
221/**
222 * prism2_clear_cmd_queue - Free all pending Prism2 command queue entries
223 * @local: pointer to private Host AP driver data
224 */
225static void prism2_clear_cmd_queue(local_info_t *local)
226{
227 struct list_head *ptr, *n;
228 unsigned long flags;
229 struct hostap_cmd_queue *entry;
230
231 spin_lock_irqsave(&local->cmdlock, flags);
232 list_for_each_safe(ptr, n, &local->cmd_queue) {
233 entry = list_entry(ptr, struct hostap_cmd_queue, list);
234 atomic_inc(&entry->usecnt);
235 printk(KERN_DEBUG "%s: removed pending cmd_queue entry "
236 "(type=%d, cmd=0x%04x, param0=0x%04x)\n",
237 local->dev->name, entry->type, entry->cmd,
238 entry->param0);
239 __hostap_cmd_queue_free(local, entry, 1);
240 }
241 if (local->cmd_queue_len) {
242 /* This should not happen; print debug message and clear
243 * queue length. */
244 printk(KERN_DEBUG "%s: cmd_queue_len (%d) not zero after "
245 "flush\n", local->dev->name, local->cmd_queue_len);
246 local->cmd_queue_len = 0;
247 }
248 spin_unlock_irqrestore(&local->cmdlock, flags);
249}
250
251
252/**
253 * hfa384x_cmd_issue - Issue a Prism2 command to the hardware
254 * @dev: pointer to net_device
255 * @entry: Prism2 command queue entry to be issued
256 */
257static inline int hfa384x_cmd_issue(struct net_device *dev,
258 struct hostap_cmd_queue *entry)
259{
260 struct hostap_interface *iface;
261 local_info_t *local;
262 int tries;
263 u16 reg;
264 unsigned long flags;
265
266 iface = netdev_priv(dev);
267 local = iface->local;
268
269 if (local->func->card_present && !local->func->card_present(local))
270 return -ENODEV;
271
272 if (entry->issued) {
273 printk(KERN_DEBUG "%s: driver bug - re-issuing command @%p\n",
274 dev->name, entry);
275 }
276
277 /* wait until busy bit is clear; this should always be clear since the
278 * commands are serialized */
279 tries = HFA384X_CMD_BUSY_TIMEOUT;
280 while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
281 tries--;
282 udelay(1);
283 }
284#ifndef final_version
285 if (tries != HFA384X_CMD_BUSY_TIMEOUT) {
286 prism2_io_debug_error(dev, 1);
287 printk(KERN_DEBUG "%s: hfa384x_cmd_issue: cmd reg was busy "
288 "for %d usec\n", dev->name,
289 HFA384X_CMD_BUSY_TIMEOUT - tries);
290 }
291#endif
292 if (tries == 0) {
293 reg = HFA384X_INW(HFA384X_CMD_OFF);
294 prism2_io_debug_error(dev, 2);
295 printk(KERN_DEBUG "%s: hfa384x_cmd_issue - timeout - "
296 "reg=0x%04x\n", dev->name, reg);
297 return -ETIMEDOUT;
298 }
299
300 /* write command */
301 spin_lock_irqsave(&local->cmdlock, flags);
302 HFA384X_OUTW(entry->param0, HFA384X_PARAM0_OFF);
303 HFA384X_OUTW(entry->param1, HFA384X_PARAM1_OFF);
304 HFA384X_OUTW(entry->cmd, HFA384X_CMD_OFF);
305 entry->issued = 1;
306 spin_unlock_irqrestore(&local->cmdlock, flags);
307
308 return 0;
309}
310
311
312/**
313 * hfa384x_cmd - Issue a Prism2 command and wait (sleep) for completion
314 * @dev: pointer to net_device
315 * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
316 * @param0: value for Param0 register
317 * @param1: value for Param1 register (pointer; %NULL if not used)
318 * @resp0: pointer for Resp0 data or %NULL if Resp0 is not needed
319 *
320 * Issue given command (possibly after waiting in command queue) and sleep
321 * until the command is completed (or timed out or interrupted). This can be
322 * called only from user process context.
323 */
324static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
325 u16 *param1, u16 *resp0)
326{
327 struct hostap_interface *iface;
328 local_info_t *local;
329 int err, res, issue, issued = 0;
330 unsigned long flags;
331 struct hostap_cmd_queue *entry;
332 DECLARE_WAITQUEUE(wait, current);
333
334 iface = netdev_priv(dev);
335 local = iface->local;
336
337 if (in_interrupt()) {
338 printk(KERN_DEBUG "%s: hfa384x_cmd called from interrupt "
339 "context\n", dev->name);
340 return -1;
341 }
342
343 if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
344 printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
345 dev->name);
346 return -1;
347 }
348
349 if (signal_pending(current))
350 return -EINTR;
351
352 entry = (struct hostap_cmd_queue *)
353 kmalloc(sizeof(*entry), GFP_ATOMIC);
354 if (entry == NULL) {
355 printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
356 dev->name);
357 return -ENOMEM;
358 }
359 memset(entry, 0, sizeof(*entry));
360 atomic_set(&entry->usecnt, 1);
361 entry->type = CMD_SLEEP;
362 entry->cmd = cmd;
363 entry->param0 = param0;
364 if (param1)
365 entry->param1 = *param1;
366 init_waitqueue_head(&entry->compl);
367
368 /* prepare to wait for command completion event, but do not sleep yet
369 */
370 add_wait_queue(&entry->compl, &wait);
371 set_current_state(TASK_INTERRUPTIBLE);
372
373 spin_lock_irqsave(&local->cmdlock, flags);
374 issue = list_empty(&local->cmd_queue);
375 if (issue)
376 entry->issuing = 1;
377 list_add_tail(&entry->list, &local->cmd_queue);
378 local->cmd_queue_len++;
379 spin_unlock_irqrestore(&local->cmdlock, flags);
380
381 err = 0;
382 if (!issue)
383 goto wait_completion;
384
385 if (signal_pending(current))
386 err = -EINTR;
387
388 if (!err) {
389 if (hfa384x_cmd_issue(dev, entry))
390 err = -ETIMEDOUT;
391 else
392 issued = 1;
393 }
394
395 wait_completion:
396 if (!err && entry->type != CMD_COMPLETED) {
397 /* sleep until command is completed or timed out */
398 res = schedule_timeout(2 * HZ);
399 } else
400 res = -1;
401
402 if (!err && signal_pending(current))
403 err = -EINTR;
404
405 if (err && issued) {
406 /* the command was issued, so a CmdCompl event should occur
407 * soon; however, there's a pending signal and
408 * schedule_timeout() would be interrupted; wait a short period
409 * of time to avoid removing entry from the list before
410 * CmdCompl event */
411 udelay(300);
412 }
413
414 set_current_state(TASK_RUNNING);
415 remove_wait_queue(&entry->compl, &wait);
416
417 /* If entry->list is still in the list, it must be removed
418 * first and in this case prism2_cmd_ev() does not yet have
419 * local reference to it, and the data can be kfree()'d
420 * here. If the command completion event is still generated,
421 * it will be assigned to next (possibly) pending command, but
422 * the driver will reset the card anyway due to timeout
423 *
424 * If the entry is not in the list prism2_cmd_ev() has a local
425 * reference to it, but keeps cmdlock as long as the data is
426 * needed, so the data can be kfree()'d here. */
427
428 /* FIX: if the entry->list is in the list, it has not been completed
429 * yet, so removing it here is somewhat wrong.. this could cause
430 * references to freed memory and next list_del() causing NULL pointer
431 * dereference.. it would probably be better to leave the entry in the
432 * list and the list should be emptied during hw reset */
433
434 spin_lock_irqsave(&local->cmdlock, flags);
435 if (!list_empty(&entry->list)) {
436 printk(KERN_DEBUG "%s: hfa384x_cmd: entry still in list? "
437 "(entry=%p, type=%d, res=%d)\n", dev->name, entry,
438 entry->type, res);
439 list_del_init(&entry->list);
440 local->cmd_queue_len--;
441 }
442 spin_unlock_irqrestore(&local->cmdlock, flags);
443
444 if (err) {
445 printk(KERN_DEBUG "%s: hfa384x_cmd: interrupted; err=%d\n",
446 dev->name, err);
447 res = err;
448 goto done;
449 }
450
451 if (entry->type != CMD_COMPLETED) {
452 u16 reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
453 printk(KERN_DEBUG "%s: hfa384x_cmd: command was not "
454 "completed (res=%d, entry=%p, type=%d, cmd=0x%04x, "
455 "param0=0x%04x, EVSTAT=%04x INTEN=%04x)\n", dev->name,
456 res, entry, entry->type, entry->cmd, entry->param0, reg,
457 HFA384X_INW(HFA384X_INTEN_OFF));
458 if (reg & HFA384X_EV_CMD) {
459 /* Command completion event is pending, but the
460 * interrupt was not delivered - probably an issue
461 * with pcmcia-cs configuration. */
462 printk(KERN_WARNING "%s: interrupt delivery does not "
463 "seem to work\n", dev->name);
464 }
465 prism2_io_debug_error(dev, 3);
466 res = -ETIMEDOUT;
467 goto done;
468 }
469
470 if (resp0 != NULL)
471 *resp0 = entry->resp0;
472#ifndef final_version
473 if (entry->res) {
474 printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x, "
475 "resp0=0x%04x\n",
476 dev->name, cmd, entry->res, entry->resp0);
477 }
478#endif /* final_version */
479
480 res = entry->res;
481 done:
482 hostap_cmd_queue_free(local, entry, 1);
483 return res;
484}
485
486
487/**
488 * hfa384x_cmd_callback - Issue a Prism2 command; callback when completed
489 * @dev: pointer to net_device
490 * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
491 * @param0: value for Param0 register
492 * @callback: command completion callback function (%NULL = no callback)
493 * @context: context data to be given to the callback function
494 *
495 * Issue given command (possibly after waiting in command queue) and use
496 * callback function to indicate command completion. This can be called both
497 * from user and interrupt context. The callback function will be called in
498 * hardware IRQ context. It can be %NULL, when no function is called when
499 * command is completed.
500 */
501static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
502 void (*callback)(struct net_device *dev,
503 long context, u16 resp0,
504 u16 status),
505 long context)
506{
507 struct hostap_interface *iface;
508 local_info_t *local;
509 int issue, ret;
510 unsigned long flags;
511 struct hostap_cmd_queue *entry;
512
513 iface = netdev_priv(dev);
514 local = iface->local;
515
516 if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN + 2) {
517 printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
518 dev->name);
519 return -1;
520 }
521
522 entry = (struct hostap_cmd_queue *)
523 kmalloc(sizeof(*entry), GFP_ATOMIC);
524 if (entry == NULL) {
525 printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
526 "failed\n", dev->name);
527 return -ENOMEM;
528 }
529 memset(entry, 0, sizeof(*entry));
530 atomic_set(&entry->usecnt, 1);
531 entry->type = CMD_CALLBACK;
532 entry->cmd = cmd;
533 entry->param0 = param0;
534 entry->callback = callback;
535 entry->context = context;
536
537 spin_lock_irqsave(&local->cmdlock, flags);
538 issue = list_empty(&local->cmd_queue);
539 if (issue)
540 entry->issuing = 1;
541 list_add_tail(&entry->list, &local->cmd_queue);
542 local->cmd_queue_len++;
543 spin_unlock_irqrestore(&local->cmdlock, flags);
544
545 if (issue && hfa384x_cmd_issue(dev, entry))
546 ret = -ETIMEDOUT;
547 else
548 ret = 0;
549
550 hostap_cmd_queue_free(local, entry, ret);
551
552 return ret;
553}
554
555
556/**
557 * __hfa384x_cmd_no_wait - Issue a Prism2 command (private)
558 * @dev: pointer to net_device
559 * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
560 * @param0: value for Param0 register
561 * @io_debug_num: I/O debug error number
562 *
563 * Shared helper function for hfa384x_cmd_wait() and hfa384x_cmd_no_wait().
564 */
565static int __hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0,
566 int io_debug_num)
567{
568 int tries;
569 u16 reg;
570
571 /* wait until busy bit is clear; this should always be clear since the
572 * commands are serialized */
573 tries = HFA384X_CMD_BUSY_TIMEOUT;
574 while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
575 tries--;
576 udelay(1);
577 }
578 if (tries == 0) {
579 reg = HFA384X_INW(HFA384X_CMD_OFF);
580 prism2_io_debug_error(dev, io_debug_num);
581 printk(KERN_DEBUG "%s: __hfa384x_cmd_no_wait(%d) - timeout - "
582 "reg=0x%04x\n", dev->name, io_debug_num, reg);
583 return -ETIMEDOUT;
584 }
585
586 /* write command */
587 HFA384X_OUTW(param0, HFA384X_PARAM0_OFF);
588 HFA384X_OUTW(cmd, HFA384X_CMD_OFF);
589
590 return 0;
591}
592
593
594/**
595 * hfa384x_cmd_wait - Issue a Prism2 command and busy wait for completion
596 * @dev: pointer to net_device
597 * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
598 * @param0: value for Param0 register
599 */
600static int hfa384x_cmd_wait(struct net_device *dev, u16 cmd, u16 param0)
601{
602 int res, tries;
603 u16 reg;
604
605 res = __hfa384x_cmd_no_wait(dev, cmd, param0, 4);
606 if (res)
607 return res;
608
609 /* wait for command completion */
610 if ((cmd & HFA384X_CMDCODE_MASK) == HFA384X_CMDCODE_DOWNLOAD)
611 tries = HFA384X_DL_COMPL_TIMEOUT;
612 else
613 tries = HFA384X_CMD_COMPL_TIMEOUT;
614
615 while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
616 tries > 0) {
617 tries--;
618 udelay(10);
619 }
620 if (tries == 0) {
621 reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
622 prism2_io_debug_error(dev, 5);
623 printk(KERN_DEBUG "%s: hfa384x_cmd_wait - timeout2 - "
624 "reg=0x%04x\n", dev->name, reg);
625 return -ETIMEDOUT;
626 }
627
628 res = (HFA384X_INW(HFA384X_STATUS_OFF) &
629 (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) |
630 BIT(8))) >> 8;
631#ifndef final_version
632 if (res) {
633 printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x\n",
634 dev->name, cmd, res);
635 }
636#endif
637
638 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
639
640 return res;
641}
642
643
644/**
645 * hfa384x_cmd_no_wait - Issue a Prism2 command; do not wait for completion
646 * @dev: pointer to net_device
647 * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
648 * @param0: value for Param0 register
649 */
650static inline int hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd,
651 u16 param0)
652{
653 return __hfa384x_cmd_no_wait(dev, cmd, param0, 6);
654}
655
656
657/**
658 * prism2_cmd_ev - Prism2 command completion event handler
659 * @dev: pointer to net_device
660 *
661 * Interrupt handler for command completion events. Called by the main
662 * interrupt handler in hardware IRQ context. Read Resp0 and status registers
663 * from the hardware and ACK the event. Depending on the issued command type
664 * either wake up the sleeping process that is waiting for command completion
665 * or call the callback function. Issue the next command, if one is pending.
666 */
667static void prism2_cmd_ev(struct net_device *dev)
668{
669 struct hostap_interface *iface;
670 local_info_t *local;
671 struct hostap_cmd_queue *entry = NULL;
672
673 iface = netdev_priv(dev);
674 local = iface->local;
675
676 spin_lock(&local->cmdlock);
677 if (!list_empty(&local->cmd_queue)) {
678 entry = list_entry(local->cmd_queue.next,
679 struct hostap_cmd_queue, list);
680 atomic_inc(&entry->usecnt);
681 list_del_init(&entry->list);
682 local->cmd_queue_len--;
683
684 if (!entry->issued) {
685 printk(KERN_DEBUG "%s: Command completion event, but "
686 "cmd not issued\n", dev->name);
687 __hostap_cmd_queue_free(local, entry, 1);
688 entry = NULL;
689 }
690 }
691 spin_unlock(&local->cmdlock);
692
693 if (!entry) {
694 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
695 printk(KERN_DEBUG "%s: Command completion event, but no "
696 "pending commands\n", dev->name);
697 return;
698 }
699
700 entry->resp0 = HFA384X_INW(HFA384X_RESP0_OFF);
701 entry->res = (HFA384X_INW(HFA384X_STATUS_OFF) &
702 (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) |
703 BIT(9) | BIT(8))) >> 8;
704 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
705
706 /* TODO: rest of the CmdEv handling could be moved to tasklet */
707 if (entry->type == CMD_SLEEP) {
708 entry->type = CMD_COMPLETED;
709 wake_up_interruptible(&entry->compl);
710 } else if (entry->type == CMD_CALLBACK) {
711 if (entry->callback)
712 entry->callback(dev, entry->context, entry->resp0,
713 entry->res);
714 } else {
715 printk(KERN_DEBUG "%s: Invalid command completion type %d\n",
716 dev->name, entry->type);
717 }
718 hostap_cmd_queue_free(local, entry, 1);
719
720 /* issue next command, if pending */
721 entry = NULL;
722 spin_lock(&local->cmdlock);
723 if (!list_empty(&local->cmd_queue)) {
724 entry = list_entry(local->cmd_queue.next,
725 struct hostap_cmd_queue, list);
726 if (entry->issuing) {
727 /* hfa384x_cmd() has already started issuing this
728 * command, so do not start here */
729 entry = NULL;
730 }
731 if (entry)
732 atomic_inc(&entry->usecnt);
733 }
734 spin_unlock(&local->cmdlock);
735
736 if (entry) {
737 /* issue next command; if command issuing fails, remove the
738 * entry from cmd_queue */
739 int res = hfa384x_cmd_issue(dev, entry);
740 spin_lock(&local->cmdlock);
741 __hostap_cmd_queue_free(local, entry, res);
742 spin_unlock(&local->cmdlock);
743 }
744}
745
746
747static inline int hfa384x_wait_offset(struct net_device *dev, u16 o_off)
748{
749 int tries = HFA384X_BAP_BUSY_TIMEOUT;
750 int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
751
752 while (res && tries > 0) {
753 tries--;
754 udelay(1);
755 res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
756 }
757 return res;
758}
759
760
761/* Offset must be even */
762static int hfa384x_setup_bap(struct net_device *dev, u16 bap, u16 id,
763 int offset)
764{
765 u16 o_off, s_off;
766 int ret = 0;
767
768 if (offset % 2 || bap > 1)
769 return -EINVAL;
770
771 if (bap == BAP1) {
772 o_off = HFA384X_OFFSET1_OFF;
773 s_off = HFA384X_SELECT1_OFF;
774 } else {
775 o_off = HFA384X_OFFSET0_OFF;
776 s_off = HFA384X_SELECT0_OFF;
777 }
778
779 if (hfa384x_wait_offset(dev, o_off)) {
780 prism2_io_debug_error(dev, 7);
781 printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout before\n",
782 dev->name);
783 ret = -ETIMEDOUT;
784 goto out;
785 }
786
787 HFA384X_OUTW(id, s_off);
788 HFA384X_OUTW(offset, o_off);
789
790 if (hfa384x_wait_offset(dev, o_off)) {
791 prism2_io_debug_error(dev, 8);
792 printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout after\n",
793 dev->name);
794 ret = -ETIMEDOUT;
795 goto out;
796 }
797#ifndef final_version
798 if (HFA384X_INW(o_off) & HFA384X_OFFSET_ERR) {
799 prism2_io_debug_error(dev, 9);
800 printk(KERN_DEBUG "%s: hfa384x_setup_bap - offset error "
801 "(%d,0x04%x,%d); reg=0x%04x\n",
802 dev->name, bap, id, offset, HFA384X_INW(o_off));
803 ret = -EINVAL;
804 }
805#endif
806
807 out:
808 return ret;
809}
810
811
812static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
813 int exact_len)
814{
815 struct hostap_interface *iface;
816 local_info_t *local;
817 int res, rlen = 0;
818 struct hfa384x_rid_hdr rec;
819
820 iface = netdev_priv(dev);
821 local = iface->local;
822
823 if (local->no_pri) {
824 printk(KERN_DEBUG "%s: cannot get RID %04x (len=%d) - no PRI "
825 "f/w\n", dev->name, rid, len);
826 return -ENOTTY; /* Well.. not really correct, but return
827 * something unique enough.. */
828 }
829
830 if ((local->func->card_present && !local->func->card_present(local)) ||
831 local->hw_downloading)
832 return -ENODEV;
833
834 res = down_interruptible(&local->rid_bap_sem);
835 if (res)
836 return res;
837
838 res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS, rid, NULL, NULL);
839 if (res) {
840 printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed "
841 "(res=%d, rid=%04x, len=%d)\n",
842 dev->name, res, rid, len);
843 up(&local->rid_bap_sem);
844 return res;
845 }
846
847 spin_lock_bh(&local->baplock);
848
849 res = hfa384x_setup_bap(dev, BAP0, rid, 0);
850 if (!res)
851 res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
852
853 if (le16_to_cpu(rec.len) == 0) {
854 /* RID not available */
855 res = -ENODATA;
856 }
857
858 rlen = (le16_to_cpu(rec.len) - 1) * 2;
859 if (!res && exact_len && rlen != len) {
860 printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
861 "rid=0x%04x, len=%d (expected %d)\n",
862 dev->name, rid, rlen, len);
863 res = -ENODATA;
864 }
865
866 if (!res)
867 res = hfa384x_from_bap(dev, BAP0, buf, len);
868
869 spin_unlock_bh(&local->baplock);
870 up(&local->rid_bap_sem);
871
872 if (res) {
873 if (res != -ENODATA)
874 printk(KERN_DEBUG "%s: hfa384x_get_rid (rid=%04x, "
875 "len=%d) - failed - res=%d\n", dev->name, rid,
876 len, res);
877 if (res == -ETIMEDOUT)
878 prism2_hw_reset(dev);
879 return res;
880 }
881
882 return rlen;
883}
884
885
886static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len)
887{
888 struct hostap_interface *iface;
889 local_info_t *local;
890 struct hfa384x_rid_hdr rec;
891 int res;
892
893 iface = netdev_priv(dev);
894 local = iface->local;
895
896 if (local->no_pri) {
897 printk(KERN_DEBUG "%s: cannot set RID %04x (len=%d) - no PRI "
898 "f/w\n", dev->name, rid, len);
899 return -ENOTTY; /* Well.. not really correct, but return
900 * something unique enough.. */
901 }
902
903 if ((local->func->card_present && !local->func->card_present(local)) ||
904 local->hw_downloading)
905 return -ENODEV;
906
907 rec.rid = cpu_to_le16(rid);
908 /* RID len in words and +1 for rec.rid */
909 rec.len = cpu_to_le16(len / 2 + len % 2 + 1);
910
911 res = down_interruptible(&local->rid_bap_sem);
912 if (res)
913 return res;
914
915 spin_lock_bh(&local->baplock);
916 res = hfa384x_setup_bap(dev, BAP0, rid, 0);
917 if (!res)
918 res = hfa384x_to_bap(dev, BAP0, &rec, sizeof(rec));
919 if (!res)
920 res = hfa384x_to_bap(dev, BAP0, buf, len);
921 spin_unlock_bh(&local->baplock);
922
923 if (res) {
924 printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - "
925 "failed - res=%d\n", dev->name, rid, len, res);
926 up(&local->rid_bap_sem);
927 return res;
928 }
929
930 res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL);
931 up(&local->rid_bap_sem);
932 if (res) {
933 printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE "
934 "failed (res=%d, rid=%04x, len=%d)\n",
935 dev->name, res, rid, len);
936 return res;
937 }
938
939 if (res == -ETIMEDOUT)
940 prism2_hw_reset(dev);
941
942 return res;
943}
944
945
946static void hfa384x_disable_interrupts(struct net_device *dev)
947{
948 /* disable interrupts and clear event status */
949 HFA384X_OUTW(0, HFA384X_INTEN_OFF);
950 HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
951}
952
953
954static void hfa384x_enable_interrupts(struct net_device *dev)
955{
956 /* ack pending events and enable interrupts from selected events */
957 HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
958 HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
959}
960
961
962static void hfa384x_events_no_bap0(struct net_device *dev)
963{
964 HFA384X_OUTW(HFA384X_EVENT_MASK & ~HFA384X_BAP0_EVENTS,
965 HFA384X_INTEN_OFF);
966}
967
968
969static void hfa384x_events_all(struct net_device *dev)
970{
971 HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
972}
973
974
975static void hfa384x_events_only_cmd(struct net_device *dev)
976{
977 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_INTEN_OFF);
978}
979
980
981static u16 hfa384x_allocate_fid(struct net_device *dev, int len)
982{
983 u16 fid;
984 unsigned long delay;
985
986 /* FIX: this could be replace with hfa384x_cmd() if the Alloc event
987 * below would be handled like CmdCompl event (sleep here, wake up from
988 * interrupt handler */
989 if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_ALLOC, len)) {
990 printk(KERN_DEBUG "%s: cannot allocate fid, len=%d\n",
991 dev->name, len);
992 return 0xffff;
993 }
994
995 delay = jiffies + HFA384X_ALLOC_COMPL_TIMEOUT;
996 while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC) &&
997 time_before(jiffies, delay))
998 yield();
999 if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC)) {
1000 printk("%s: fid allocate, len=%d - timeout\n", dev->name, len);
1001 return 0xffff;
1002 }
1003
1004 fid = HFA384X_INW(HFA384X_ALLOCFID_OFF);
1005 HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
1006
1007 return fid;
1008}
1009
1010
1011static int prism2_reset_port(struct net_device *dev)
1012{
1013 struct hostap_interface *iface;
1014 local_info_t *local;
1015 int res;
1016
1017 iface = netdev_priv(dev);
1018 local = iface->local;
1019
1020 if (!local->dev_enabled)
1021 return 0;
1022
1023 res = hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0,
1024 NULL, NULL);
1025 if (res)
1026 printk(KERN_DEBUG "%s: reset port failed to disable port\n",
1027 dev->name);
1028 else {
1029 res = hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0,
1030 NULL, NULL);
1031 if (res)
1032 printk(KERN_DEBUG "%s: reset port failed to enable "
1033 "port\n", dev->name);
1034 }
1035
1036 /* It looks like at least some STA firmware versions reset
1037 * fragmentation threshold back to 2346 after enable command. Restore
1038 * the configured value, if it differs from this default. */
1039 if (local->fragm_threshold != 2346 &&
1040 hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
1041 local->fragm_threshold)) {
1042 printk(KERN_DEBUG "%s: failed to restore fragmentation "
1043 "threshold (%d) after Port0 enable\n",
1044 dev->name, local->fragm_threshold);
1045 }
1046
1047 return res;
1048}
1049
1050
1051static int prism2_get_version_info(struct net_device *dev, u16 rid,
1052 const char *txt)
1053{
1054 struct hfa384x_comp_ident comp;
1055 struct hostap_interface *iface;
1056 local_info_t *local;
1057
1058 iface = netdev_priv(dev);
1059 local = iface->local;
1060
1061 if (local->no_pri) {
1062 /* PRI f/w not yet available - cannot read RIDs */
1063 return -1;
1064 }
1065 if (hfa384x_get_rid(dev, rid, &comp, sizeof(comp), 1) < 0) {
1066 printk(KERN_DEBUG "Could not get RID for component %s\n", txt);
1067 return -1;
1068 }
1069
1070 printk(KERN_INFO "%s: %s: id=0x%02x v%d.%d.%d\n", dev->name, txt,
1071 __le16_to_cpu(comp.id), __le16_to_cpu(comp.major),
1072 __le16_to_cpu(comp.minor), __le16_to_cpu(comp.variant));
1073 return 0;
1074}
1075
1076
1077static int prism2_setup_rids(struct net_device *dev)
1078{
1079 struct hostap_interface *iface;
1080 local_info_t *local;
1081 u16 tmp;
1082 int ret = 0;
1083
1084 iface = netdev_priv(dev);
1085 local = iface->local;
1086
1087 hostap_set_word(dev, HFA384X_RID_TICKTIME, 2000);
1088
1089 if (!local->fw_ap) {
1090 tmp = hostap_get_porttype(local);
1091 ret = hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, tmp);
1092 if (ret) {
1093 printk("%s: Port type setting to %d failed\n",
1094 dev->name, tmp);
1095 goto fail;
1096 }
1097 }
1098
1099 /* Setting SSID to empty string seems to kill the card in Host AP mode
1100 */
1101 if (local->iw_mode != IW_MODE_MASTER || local->essid[0] != '\0') {
1102 ret = hostap_set_string(dev, HFA384X_RID_CNFOWNSSID,
1103 local->essid);
1104 if (ret) {
1105 printk("%s: AP own SSID setting failed\n", dev->name);
1106 goto fail;
1107 }
1108 }
1109
1110 ret = hostap_set_word(dev, HFA384X_RID_CNFMAXDATALEN,
1111 PRISM2_DATA_MAXLEN);
1112 if (ret) {
1113 printk("%s: MAC data length setting to %d failed\n",
1114 dev->name, PRISM2_DATA_MAXLEN);
1115 goto fail;
1116 }
1117
1118 if (hfa384x_get_rid(dev, HFA384X_RID_CHANNELLIST, &tmp, 2, 1) < 0) {
1119 printk("%s: Channel list read failed\n", dev->name);
1120 ret = -EINVAL;
1121 goto fail;
1122 }
1123 local->channel_mask = __le16_to_cpu(tmp);
1124
1125 if (local->channel < 1 || local->channel > 14 ||
1126 !(local->channel_mask & (1 << (local->channel - 1)))) {
1127 printk(KERN_WARNING "%s: Channel setting out of range "
1128 "(%d)!\n", dev->name, local->channel);
1129 ret = -EBUSY;
1130 goto fail;
1131 }
1132
1133 ret = hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel);
1134 if (ret) {
1135 printk("%s: Channel setting to %d failed\n",
1136 dev->name, local->channel);
1137 goto fail;
1138 }
1139
1140 ret = hostap_set_word(dev, HFA384X_RID_CNFBEACONINT,
1141 local->beacon_int);
1142 if (ret) {
1143 printk("%s: Beacon interval setting to %d failed\n",
1144 dev->name, local->beacon_int);
1145 /* this may fail with Symbol/Lucent firmware */
1146 if (ret == -ETIMEDOUT)
1147 goto fail;
1148 }
1149
1150 ret = hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD,
1151 local->dtim_period);
1152 if (ret) {
1153 printk("%s: DTIM period setting to %d failed\n",
1154 dev->name, local->dtim_period);
1155 /* this may fail with Symbol/Lucent firmware */
1156 if (ret == -ETIMEDOUT)
1157 goto fail;
1158 }
1159
1160 ret = hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
1161 local->is_promisc);
1162 if (ret)
1163 printk(KERN_INFO "%s: Setting promiscuous mode (%d) failed\n",
1164 dev->name, local->is_promisc);
1165
1166 if (!local->fw_ap) {
1167 ret = hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID,
1168 local->essid);
1169 if (ret) {
1170 printk("%s: Desired SSID setting failed\n", dev->name);
1171 goto fail;
1172 }
1173 }
1174
1175 /* Setup TXRateControl, defaults to allow use of 1, 2, 5.5, and
1176 * 11 Mbps in automatic TX rate fallback and 1 and 2 Mbps as basic
1177 * rates */
1178 if (local->tx_rate_control == 0) {
1179 local->tx_rate_control =
1180 HFA384X_RATES_1MBPS |
1181 HFA384X_RATES_2MBPS |
1182 HFA384X_RATES_5MBPS |
1183 HFA384X_RATES_11MBPS;
1184 }
1185 if (local->basic_rates == 0)
1186 local->basic_rates = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS;
1187
1188 if (!local->fw_ap) {
1189 ret = hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
1190 local->tx_rate_control);
1191 if (ret) {
1192 printk("%s: TXRateControl setting to %d failed\n",
1193 dev->name, local->tx_rate_control);
1194 goto fail;
1195 }
1196
1197 ret = hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
1198 local->tx_rate_control);
1199 if (ret) {
1200 printk("%s: cnfSupportedRates setting to %d failed\n",
1201 dev->name, local->tx_rate_control);
1202 }
1203
1204 ret = hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
1205 local->basic_rates);
1206 if (ret) {
1207 printk("%s: cnfBasicRates setting to %d failed\n",
1208 dev->name, local->basic_rates);
1209 }
1210
1211 ret = hostap_set_word(dev, HFA384X_RID_CREATEIBSS, 1);
1212 if (ret) {
1213 printk("%s: Create IBSS setting to 1 failed\n",
1214 dev->name);
1215 }
1216 }
1217
1218 if (local->name_set)
1219 (void) hostap_set_string(dev, HFA384X_RID_CNFOWNNAME,
1220 local->name);
1221
1222 if (hostap_set_encryption(local)) {
1223 printk(KERN_INFO "%s: could not configure encryption\n",
1224 dev->name);
1225 }
1226
1227 (void) hostap_set_antsel(local);
1228
1229 if (hostap_set_roaming(local)) {
1230 printk(KERN_INFO "%s: could not set host roaming\n",
1231 dev->name);
1232 }
1233
1234 if (local->sta_fw_ver >= PRISM2_FW_VER(1,6,3) &&
1235 hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec))
1236 printk(KERN_INFO "%s: cnfEnhSecurity setting to 0x%x failed\n",
1237 dev->name, local->enh_sec);
1238
1239 /* 32-bit tallies were added in STA f/w 0.8.0, but they were apparently
1240 * not working correctly (last seven counters report bogus values).
1241 * This has been fixed in 0.8.2, so enable 32-bit tallies only
1242 * beginning with that firmware version. Another bug fix for 32-bit
1243 * tallies in 1.4.0; should 16-bit tallies be used for some other
1244 * versions, too? */
1245 if (local->sta_fw_ver >= PRISM2_FW_VER(0,8,2)) {
1246 if (hostap_set_word(dev, HFA384X_RID_CNFTHIRTY2TALLY, 1)) {
1247 printk(KERN_INFO "%s: cnfThirty2Tally setting "
1248 "failed\n", dev->name);
1249 local->tallies32 = 0;
1250 } else
1251 local->tallies32 = 1;
1252 } else
1253 local->tallies32 = 0;
1254
1255 hostap_set_auth_algs(local);
1256
1257 if (hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
1258 local->fragm_threshold)) {
1259 printk(KERN_INFO "%s: setting FragmentationThreshold to %d "
1260 "failed\n", dev->name, local->fragm_threshold);
1261 }
1262
1263 if (hostap_set_word(dev, HFA384X_RID_RTSTHRESHOLD,
1264 local->rts_threshold)) {
1265 printk(KERN_INFO "%s: setting RTSThreshold to %d failed\n",
1266 dev->name, local->rts_threshold);
1267 }
1268
1269 if (local->manual_retry_count >= 0 &&
1270 hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
1271 local->manual_retry_count)) {
1272 printk(KERN_INFO "%s: setting cnfAltRetryCount to %d failed\n",
1273 dev->name, local->manual_retry_count);
1274 }
1275
1276 if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1) &&
1277 hfa384x_get_rid(dev, HFA384X_RID_CNFDBMADJUST, &tmp, 2, 1) == 2) {
1278 local->rssi_to_dBm = le16_to_cpu(tmp);
1279 }
1280
1281 if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->wpa &&
1282 hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1)) {
1283 printk(KERN_INFO "%s: setting ssnHandlingMode to 1 failed\n",
1284 dev->name);
1285 }
1286
1287 if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->generic_elem &&
1288 hfa384x_set_rid(dev, HFA384X_RID_GENERICELEMENT,
1289 local->generic_elem, local->generic_elem_len)) {
1290 printk(KERN_INFO "%s: setting genericElement failed\n",
1291 dev->name);
1292 }
1293
1294 fail:
1295 return ret;
1296}
1297
1298
1299static int prism2_hw_init(struct net_device *dev, int initial)
1300{
1301 struct hostap_interface *iface;
1302 local_info_t *local;
1303 int ret, first = 1;
1304 unsigned long start, delay;
1305
1306 PDEBUG(DEBUG_FLOW, "prism2_hw_init()\n");
1307
1308 iface = netdev_priv(dev);
1309 local = iface->local;
1310
1311 clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits);
1312
1313 init:
1314 /* initialize HFA 384x */
1315 ret = hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_INIT, 0);
1316 if (ret) {
1317 printk(KERN_INFO "%s: first command failed - assuming card "
1318 "does not have primary firmware\n", dev_info);
1319 }
1320
1321 if (first && (HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
1322 /* EvStat has Cmd bit set in some cases, so retry once if no
1323 * wait was needed */
1324 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
1325 printk(KERN_DEBUG "%s: init command completed too quickly - "
1326 "retrying\n", dev->name);
1327 first = 0;
1328 goto init;
1329 }
1330
1331 start = jiffies;
1332 delay = jiffies + HFA384X_INIT_TIMEOUT;
1333 while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
1334 time_before(jiffies, delay))
1335 yield();
1336 if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
1337 printk(KERN_DEBUG "%s: assuming no Primary image in "
1338 "flash - card initialization not completed\n",
1339 dev_info);
1340 local->no_pri = 1;
1341#ifdef PRISM2_DOWNLOAD_SUPPORT
1342 if (local->sram_type == -1)
1343 local->sram_type = prism2_get_ram_size(local);
1344#endif /* PRISM2_DOWNLOAD_SUPPORT */
1345 return 1;
1346 }
1347 local->no_pri = 0;
1348 printk(KERN_DEBUG "prism2_hw_init: initialized in %lu ms\n",
1349 (jiffies - start) * 1000 / HZ);
1350 HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
1351 return 0;
1352}
1353
1354
1355static int prism2_hw_init2(struct net_device *dev, int initial)
1356{
1357 struct hostap_interface *iface;
1358 local_info_t *local;
1359 int i;
1360
1361 iface = netdev_priv(dev);
1362 local = iface->local;
1363
1364#ifdef PRISM2_DOWNLOAD_SUPPORT
1365 kfree(local->pda);
1366 if (local->no_pri)
1367 local->pda = NULL;
1368 else
1369 local->pda = prism2_read_pda(dev);
1370#endif /* PRISM2_DOWNLOAD_SUPPORT */
1371
1372 hfa384x_disable_interrupts(dev);
1373
1374#ifndef final_version
1375 HFA384X_OUTW(HFA384X_MAGIC, HFA384X_SWSUPPORT0_OFF);
1376 if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
1377 printk("SWSUPPORT0 write/read failed: %04X != %04X\n",
1378 HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC);
1379 goto failed;
1380 }
1381#endif
1382
1383 if (initial || local->pri_only) {
1384 hfa384x_events_only_cmd(dev);
1385 /* get card version information */
1386 if (prism2_get_version_info(dev, HFA384X_RID_NICID, "NIC") ||
1387 prism2_get_version_info(dev, HFA384X_RID_PRIID, "PRI")) {
1388 hfa384x_disable_interrupts(dev);
1389 goto failed;
1390 }
1391
1392 if (prism2_get_version_info(dev, HFA384X_RID_STAID, "STA")) {
1393 printk(KERN_DEBUG "%s: Failed to read STA f/w version "
1394 "- only Primary f/w present\n", dev->name);
1395 local->pri_only = 1;
1396 return 0;
1397 }
1398 local->pri_only = 0;
1399 hfa384x_disable_interrupts(dev);
1400 }
1401
1402 /* FIX: could convert allocate_fid to use sleeping CmdCompl wait and
1403 * enable interrupts before this. This would also require some sort of
1404 * sleeping AllocEv waiting */
1405
1406 /* allocate TX FIDs */
1407 local->txfid_len = PRISM2_TXFID_LEN;
1408 for (i = 0; i < PRISM2_TXFID_COUNT; i++) {
1409 local->txfid[i] = hfa384x_allocate_fid(dev, local->txfid_len);
1410 if (local->txfid[i] == 0xffff && local->txfid_len > 1600) {
1411 local->txfid[i] = hfa384x_allocate_fid(dev, 1600);
1412 if (local->txfid[i] != 0xffff) {
1413 printk(KERN_DEBUG "%s: Using shorter TX FID "
1414 "(1600 bytes)\n", dev->name);
1415 local->txfid_len = 1600;
1416 }
1417 }
1418 if (local->txfid[i] == 0xffff)
1419 goto failed;
1420 local->intransmitfid[i] = PRISM2_TXFID_EMPTY;
1421 }
1422
1423 hfa384x_events_only_cmd(dev);
1424
1425 if (initial) {
1426 struct list_head *ptr;
1427 prism2_check_sta_fw_version(local);
1428
1429 if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
1430 &dev->dev_addr, 6, 1) < 0) {
1431 printk("%s: could not get own MAC address\n",
1432 dev->name);
1433 }
1434 list_for_each(ptr, &local->hostap_interfaces) {
1435 iface = list_entry(ptr, struct hostap_interface, list);
1436 memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN);
1437 }
1438 } else if (local->fw_ap)
1439 prism2_check_sta_fw_version(local);
1440
1441 prism2_setup_rids(dev);
1442
1443 /* MAC is now configured, but port 0 is not yet enabled */
1444 return 0;
1445
1446 failed:
1447 if (!local->no_pri)
1448 printk(KERN_WARNING "%s: Initialization failed\n", dev_info);
1449 return 1;
1450}
1451
1452
1453static int prism2_hw_enable(struct net_device *dev, int initial)
1454{
1455 struct hostap_interface *iface;
1456 local_info_t *local;
1457 int was_resetting;
1458
1459 iface = netdev_priv(dev);
1460 local = iface->local;
1461 was_resetting = local->hw_resetting;
1462
1463 if (hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) {
1464 printk("%s: MAC port 0 enabling failed\n", dev->name);
1465 return 1;
1466 }
1467
1468 local->hw_ready = 1;
1469 local->hw_reset_tries = 0;
1470 local->hw_resetting = 0;
1471 hfa384x_enable_interrupts(dev);
1472
1473 /* at least D-Link DWL-650 seems to require additional port reset
1474 * before it starts acting as an AP, so reset port automatically
1475 * here just in case */
1476 if (initial && prism2_reset_port(dev)) {
1477 printk("%s: MAC port 0 reseting failed\n", dev->name);
1478 return 1;
1479 }
1480
1481 if (was_resetting && netif_queue_stopped(dev)) {
1482 /* If hw_reset() was called during pending transmit, netif
1483 * queue was stopped. Wake it up now since the wlan card has
1484 * been resetted. */
1485 netif_wake_queue(dev);
1486 }
1487
1488 return 0;
1489}
1490
1491
1492static int prism2_hw_config(struct net_device *dev, int initial)
1493{
1494 struct hostap_interface *iface;
1495 local_info_t *local;
1496
1497 iface = netdev_priv(dev);
1498 local = iface->local;
1499
1500 if (local->hw_downloading)
1501 return 1;
1502
1503 if (prism2_hw_init(dev, initial)) {
1504 return local->no_pri ? 0 : 1;
1505 }
1506
1507 if (prism2_hw_init2(dev, initial))
1508 return 1;
1509
1510 /* Enable firmware if secondary image is loaded and at least one of the
1511 * netdevices is up. */
1512 if (!local->pri_only &&
1513 (initial == 0 || (initial == 2 && local->num_dev_open > 0))) {
1514 if (!local->dev_enabled)
1515 prism2_callback(local, PRISM2_CALLBACK_ENABLE);
1516 local->dev_enabled = 1;
1517 return prism2_hw_enable(dev, initial);
1518 }
1519
1520 return 0;
1521}
1522
1523
1524static void prism2_hw_shutdown(struct net_device *dev, int no_disable)
1525{
1526 struct hostap_interface *iface;
1527 local_info_t *local;
1528
1529 iface = netdev_priv(dev);
1530 local = iface->local;
1531
1532 /* Allow only command completion events during disable */
1533 hfa384x_events_only_cmd(dev);
1534
1535 local->hw_ready = 0;
1536 if (local->dev_enabled)
1537 prism2_callback(local, PRISM2_CALLBACK_DISABLE);
1538 local->dev_enabled = 0;
1539
1540 if (local->func->card_present && !local->func->card_present(local)) {
1541 printk(KERN_DEBUG "%s: card already removed or not configured "
1542 "during shutdown\n", dev->name);
1543 return;
1544 }
1545
1546 if ((no_disable & HOSTAP_HW_NO_DISABLE) == 0 &&
1547 hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL))
1548 printk(KERN_WARNING "%s: Shutdown failed\n", dev_info);
1549
1550 hfa384x_disable_interrupts(dev);
1551
1552 if (no_disable & HOSTAP_HW_ENABLE_CMDCOMPL)
1553 hfa384x_events_only_cmd(dev);
1554 else
1555 prism2_clear_cmd_queue(local);
1556}
1557
1558
1559static void prism2_hw_reset(struct net_device *dev)
1560{
1561 struct hostap_interface *iface;
1562 local_info_t *local;
1563
1564#if 0
1565 static long last_reset = 0;
1566
1567 /* do not reset card more than once per second to avoid ending up in a
1568 * busy loop reseting the card */
1569 if (time_before_eq(jiffies, last_reset + HZ))
1570 return;
1571 last_reset = jiffies;
1572#endif
1573
1574 iface = netdev_priv(dev);
1575 local = iface->local;
1576
1577 if (in_interrupt()) {
1578 printk(KERN_DEBUG "%s: driver bug - prism2_hw_reset() called "
1579 "in interrupt context\n", dev->name);
1580 return;
1581 }
1582
1583 if (local->hw_downloading)
1584 return;
1585
1586 if (local->hw_resetting) {
1587 printk(KERN_WARNING "%s: %s: already resetting card - "
1588 "ignoring reset request\n", dev_info, dev->name);
1589 return;
1590 }
1591
1592 local->hw_reset_tries++;
1593 if (local->hw_reset_tries > 10) {
1594 printk(KERN_WARNING "%s: too many reset tries, skipping\n",
1595 dev->name);
1596 return;
1597 }
1598
1599 printk(KERN_WARNING "%s: %s: resetting card\n", dev_info, dev->name);
1600 hfa384x_disable_interrupts(dev);
1601 local->hw_resetting = 1;
1602 if (local->func->cor_sreset) {
1603 /* Host system seems to hang in some cases with high traffic
1604 * load or shared interrupts during COR sreset. Disable shared
1605 * interrupts during reset to avoid these crashes. COS sreset
1606 * takes quite a long time, so it is unfortunate that this
1607 * seems to be needed. Anyway, I do not know of any better way
1608 * of avoiding the crash. */
1609 disable_irq(dev->irq);
1610 local->func->cor_sreset(local);
1611 enable_irq(dev->irq);
1612 }
1613 prism2_hw_shutdown(dev, 1);
1614 prism2_hw_config(dev, 0);
1615 local->hw_resetting = 0;
1616
1617#ifdef PRISM2_DOWNLOAD_SUPPORT
1618 if (local->dl_pri) {
1619 printk(KERN_DEBUG "%s: persistent download of primary "
1620 "firmware\n", dev->name);
1621 if (prism2_download_genesis(local, local->dl_pri) < 0)
1622 printk(KERN_WARNING "%s: download (PRI) failed\n",
1623 dev->name);
1624 }
1625
1626 if (local->dl_sec) {
1627 printk(KERN_DEBUG "%s: persistent download of secondary "
1628 "firmware\n", dev->name);
1629 if (prism2_download_volatile(local, local->dl_sec) < 0)
1630 printk(KERN_WARNING "%s: download (SEC) failed\n",
1631 dev->name);
1632 }
1633#endif /* PRISM2_DOWNLOAD_SUPPORT */
1634
1635 /* TODO: restore beacon TIM bits for STAs that have buffered frames */
1636}
1637
1638
1639static void prism2_schedule_reset(local_info_t *local)
1640{
1641 schedule_work(&local->reset_queue);
1642}
1643
1644
1645/* Called only as scheduled task after noticing card timeout in interrupt
1646 * context */
1647static void handle_reset_queue(void *data)
1648{
1649 local_info_t *local = (local_info_t *) data;
1650
1651 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
1652 prism2_hw_reset(local->dev);
1653
1654 if (netif_queue_stopped(local->dev)) {
1655 int i;
1656
1657 for (i = 0; i < PRISM2_TXFID_COUNT; i++)
1658 if (local->intransmitfid[i] == PRISM2_TXFID_EMPTY) {
1659 PDEBUG(DEBUG_EXTRA, "prism2_tx_timeout: "
1660 "wake up queue\n");
1661 netif_wake_queue(local->dev);
1662 break;
1663 }
1664 }
1665}
1666
1667
1668static int prism2_get_txfid_idx(local_info_t *local)
1669{
1670 int idx, end;
1671 unsigned long flags;
1672
1673 spin_lock_irqsave(&local->txfidlock, flags);
1674 end = idx = local->next_txfid;
1675 do {
1676 if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
1677 local->intransmitfid[idx] = PRISM2_TXFID_RESERVED;
1678 spin_unlock_irqrestore(&local->txfidlock, flags);
1679 return idx;
1680 }
1681 idx++;
1682 if (idx >= PRISM2_TXFID_COUNT)
1683 idx = 0;
1684 } while (idx != end);
1685 spin_unlock_irqrestore(&local->txfidlock, flags);
1686
1687 PDEBUG(DEBUG_EXTRA2, "prism2_get_txfid_idx: no room in txfid buf: "
1688 "packet dropped\n");
1689 local->stats.tx_dropped++;
1690
1691 return -1;
1692}
1693
1694
1695/* Called only from hardware IRQ */
1696static void prism2_transmit_cb(struct net_device *dev, long context,
1697 u16 resp0, u16 res)
1698{
1699 struct hostap_interface *iface;
1700 local_info_t *local;
1701 int idx = (int) context;
1702
1703 iface = netdev_priv(dev);
1704 local = iface->local;
1705
1706 if (res) {
1707 printk(KERN_DEBUG "%s: prism2_transmit_cb - res=0x%02x\n",
1708 dev->name, res);
1709 return;
1710 }
1711
1712 if (idx < 0 || idx >= PRISM2_TXFID_COUNT) {
1713 printk(KERN_DEBUG "%s: prism2_transmit_cb called with invalid "
1714 "idx=%d\n", dev->name, idx);
1715 return;
1716 }
1717
1718 if (!test_and_clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
1719 printk(KERN_DEBUG "%s: driver bug: prism2_transmit_cb called "
1720 "with no pending transmit\n", dev->name);
1721 }
1722
1723 if (netif_queue_stopped(dev)) {
1724 /* ready for next TX, so wake up queue that was stopped in
1725 * prism2_transmit() */
1726 netif_wake_queue(dev);
1727 }
1728
1729 spin_lock(&local->txfidlock);
1730
1731 /* With reclaim, Resp0 contains new txfid for transmit; the old txfid
1732 * will be automatically allocated for the next TX frame */
1733 local->intransmitfid[idx] = resp0;
1734
1735 PDEBUG(DEBUG_FID, "%s: prism2_transmit_cb: txfid[%d]=0x%04x, "
1736 "resp0=0x%04x, transmit_txfid=0x%04x\n",
1737 dev->name, idx, local->txfid[idx],
1738 resp0, local->intransmitfid[local->next_txfid]);
1739
1740 idx++;
1741 if (idx >= PRISM2_TXFID_COUNT)
1742 idx = 0;
1743 local->next_txfid = idx;
1744
1745 /* check if all TX buffers are occupied */
1746 do {
1747 if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
1748 spin_unlock(&local->txfidlock);
1749 return;
1750 }
1751 idx++;
1752 if (idx >= PRISM2_TXFID_COUNT)
1753 idx = 0;
1754 } while (idx != local->next_txfid);
1755 spin_unlock(&local->txfidlock);
1756
1757 /* no empty TX buffers, stop queue */
1758 netif_stop_queue(dev);
1759}
1760
1761
1762/* Called only from software IRQ if PCI bus master is not used (with bus master
1763 * this can be called both from software and hardware IRQ) */
1764static int prism2_transmit(struct net_device *dev, int idx)
1765{
1766 struct hostap_interface *iface;
1767 local_info_t *local;
1768 int res;
1769
1770 iface = netdev_priv(dev);
1771 local = iface->local;
1772
1773 /* The driver tries to stop netif queue so that there would not be
1774 * more than one attempt to transmit frames going on; check that this
1775 * is really the case */
1776
1777 if (test_and_set_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
1778 printk(KERN_DEBUG "%s: driver bug - prism2_transmit() called "
1779 "when previous TX was pending\n", dev->name);
1780 return -1;
1781 }
1782
1783 /* stop the queue for the time that transmit is pending */
1784 netif_stop_queue(dev);
1785
1786 /* transmit packet */
1787 res = hfa384x_cmd_callback(
1788 dev,
1789 HFA384X_CMDCODE_TRANSMIT | HFA384X_CMD_TX_RECLAIM,
1790 local->txfid[idx],
1791 prism2_transmit_cb, (long) idx);
1792
1793 if (res) {
1794 struct net_device_stats *stats;
1795 printk(KERN_DEBUG "%s: prism2_transmit: CMDCODE_TRANSMIT "
1796 "failed (res=%d)\n", dev->name, res);
1797 stats = hostap_get_stats(dev);
1798 stats->tx_dropped++;
1799 netif_wake_queue(dev);
1800 return -1;
1801 }
1802 dev->trans_start = jiffies;
1803
1804 /* Since we did not wait for command completion, the card continues
1805 * to process on the background and we will finish handling when
1806 * command completion event is handled (prism2_cmd_ev() function) */
1807
1808 return 0;
1809}
1810
1811
1812/* Send IEEE 802.11 frame (convert the header into Prism2 TX descriptor and
1813 * send the payload with this descriptor) */
1814/* Called only from software IRQ */
1815static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
1816{
1817 struct hostap_interface *iface;
1818 local_info_t *local;
1819 struct hfa384x_tx_frame txdesc;
1820 struct hostap_skb_tx_data *meta;
1821 int hdr_len, data_len, idx, res, ret = -1;
1822 u16 tx_control, fc;
1823
1824 iface = netdev_priv(dev);
1825 local = iface->local;
1826
1827 meta = (struct hostap_skb_tx_data *) skb->cb;
1828
1829 prism2_callback(local, PRISM2_CALLBACK_TX_START);
1830
1831 if ((local->func->card_present && !local->func->card_present(local)) ||
1832 !local->hw_ready || local->hw_downloading || local->pri_only) {
1833 if (net_ratelimit()) {
1834 printk(KERN_DEBUG "%s: prism2_tx_80211: hw not ready -"
1835 " skipping\n", dev->name);
1836 }
1837 goto fail;
1838 }
1839
1840 memset(&txdesc, 0, sizeof(txdesc));
1841
1842 /* skb->data starts with txdesc->frame_control */
1843 hdr_len = 24;
1844 memcpy(&txdesc.frame_control, skb->data, hdr_len);
1845 fc = le16_to_cpu(txdesc.frame_control);
1846 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
1847 (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) &&
1848 skb->len >= 30) {
1849 /* Addr4 */
1850 memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN);
1851 hdr_len += ETH_ALEN;
1852 }
1853
1854 tx_control = local->tx_control;
1855 if (meta->tx_cb_idx) {
1856 tx_control |= HFA384X_TX_CTRL_TX_OK;
1857 txdesc.sw_support = cpu_to_le16(meta->tx_cb_idx);
1858 }
1859 txdesc.tx_control = cpu_to_le16(tx_control);
1860 txdesc.tx_rate = meta->rate;
1861
1862 data_len = skb->len - hdr_len;
1863 txdesc.data_len = cpu_to_le16(data_len);
1864 txdesc.len = cpu_to_be16(data_len);
1865
1866 idx = prism2_get_txfid_idx(local);
1867 if (idx < 0)
1868 goto fail;
1869
1870 if (local->frame_dump & PRISM2_DUMP_TX_HDR)
1871 hostap_dump_tx_header(dev->name, &txdesc);
1872
1873 spin_lock(&local->baplock);
1874 res = hfa384x_setup_bap(dev, BAP0, local->txfid[idx], 0);
1875
1876 if (!res)
1877 res = hfa384x_to_bap(dev, BAP0, &txdesc, sizeof(txdesc));
1878 if (!res)
1879 res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len,
1880 skb->len - hdr_len);
1881 spin_unlock(&local->baplock);
1882
1883 if (!res)
1884 res = prism2_transmit(dev, idx);
1885 if (res) {
1886 printk(KERN_DEBUG "%s: prism2_tx_80211 - to BAP0 failed\n",
1887 dev->name);
1888 local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
1889 schedule_work(&local->reset_queue);
1890 goto fail;
1891 }
1892
1893 ret = 0;
1894
1895fail:
1896 prism2_callback(local, PRISM2_CALLBACK_TX_END);
1897 return ret;
1898}
1899
1900
1901/* Some SMP systems have reported number of odd errors with hostap_pci. fid
1902 * register has changed values between consecutive reads for an unknown reason.
1903 * This should really not happen, so more debugging is needed. This test
1904 * version is a big slower, but it will detect most of such register changes
1905 * and will try to get the correct fid eventually. */
1906#define EXTRA_FID_READ_TESTS
1907
1908static inline u16 prism2_read_fid_reg(struct net_device *dev, u16 reg)
1909{
1910#ifdef EXTRA_FID_READ_TESTS
1911 u16 val, val2, val3;
1912 int i;
1913
1914 for (i = 0; i < 10; i++) {
1915 val = HFA384X_INW(reg);
1916 val2 = HFA384X_INW(reg);
1917 val3 = HFA384X_INW(reg);
1918
1919 if (val == val2 && val == val3)
1920 return val;
1921
1922 printk(KERN_DEBUG "%s: detected fid change (try=%d, reg=%04x):"
1923 " %04x %04x %04x\n",
1924 dev->name, i, reg, val, val2, val3);
1925 if ((val == val2 || val == val3) && val != 0)
1926 return val;
1927 if (val2 == val3 && val2 != 0)
1928 return val2;
1929 }
1930 printk(KERN_WARNING "%s: Uhhuh.. could not read good fid from reg "
1931 "%04x (%04x %04x %04x)\n", dev->name, reg, val, val2, val3);
1932 return val;
1933#else /* EXTRA_FID_READ_TESTS */
1934 return HFA384X_INW(reg);
1935#endif /* EXTRA_FID_READ_TESTS */
1936}
1937
1938
1939/* Called only as a tasklet (software IRQ) */
1940static void prism2_rx(local_info_t *local)
1941{
1942 struct net_device *dev = local->dev;
1943 int res, rx_pending = 0;
1944 u16 len, hdr_len, rxfid, status, macport;
1945 struct net_device_stats *stats;
1946 struct hfa384x_rx_frame rxdesc;
1947 struct sk_buff *skb = NULL;
1948
1949 prism2_callback(local, PRISM2_CALLBACK_RX_START);
1950 stats = hostap_get_stats(dev);
1951
1952 rxfid = prism2_read_fid_reg(dev, HFA384X_RXFID_OFF);
1953#ifndef final_version
1954 if (rxfid == 0) {
1955 rxfid = HFA384X_INW(HFA384X_RXFID_OFF);
1956 printk(KERN_DEBUG "prism2_rx: rxfid=0 (next 0x%04x)\n",
1957 rxfid);
1958 if (rxfid == 0) {
1959 schedule_work(&local->reset_queue);
1960 goto rx_dropped;
1961 }
1962 /* try to continue with the new rxfid value */
1963 }
1964#endif
1965
1966 spin_lock(&local->baplock);
1967 res = hfa384x_setup_bap(dev, BAP0, rxfid, 0);
1968 if (!res)
1969 res = hfa384x_from_bap(dev, BAP0, &rxdesc, sizeof(rxdesc));
1970
1971 if (res) {
1972 spin_unlock(&local->baplock);
1973 printk(KERN_DEBUG "%s: copy from BAP0 failed %d\n", dev->name,
1974 res);
1975 if (res == -ETIMEDOUT) {
1976 schedule_work(&local->reset_queue);
1977 }
1978 goto rx_dropped;
1979 }
1980
1981 len = le16_to_cpu(rxdesc.data_len);
1982 hdr_len = sizeof(rxdesc);
1983 status = le16_to_cpu(rxdesc.status);
1984 macport = (status >> 8) & 0x07;
1985
1986 /* Drop frames with too large reported payload length. Monitor mode
1987 * seems to sometimes pass frames (e.g., ctrl::ack) with signed and
1988 * negative value, so allow also values 65522 .. 65534 (-14 .. -2) for
1989 * macport 7 */
1990 if (len > PRISM2_DATA_MAXLEN + 8 /* WEP */) {
1991 if (macport == 7 && local->iw_mode == IW_MODE_MONITOR) {
1992 if (len >= (u16) -14) {
1993 hdr_len -= 65535 - len;
1994 hdr_len--;
1995 }
1996 len = 0;
1997 } else {
1998 spin_unlock(&local->baplock);
1999 printk(KERN_DEBUG "%s: Received frame with invalid "
2000 "length 0x%04x\n", dev->name, len);
2001 hostap_dump_rx_header(dev->name, &rxdesc);
2002 goto rx_dropped;
2003 }
2004 }
2005
2006 skb = dev_alloc_skb(len + hdr_len);
2007 if (!skb) {
2008 spin_unlock(&local->baplock);
2009 printk(KERN_DEBUG "%s: RX failed to allocate skb\n",
2010 dev->name);
2011 goto rx_dropped;
2012 }
2013 skb->dev = dev;
2014 memcpy(skb_put(skb, hdr_len), &rxdesc, hdr_len);
2015
2016 if (len > 0)
2017 res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
2018 spin_unlock(&local->baplock);
2019 if (res) {
2020 printk(KERN_DEBUG "%s: RX failed to read "
2021 "frame data\n", dev->name);
2022 goto rx_dropped;
2023 }
2024
2025 skb_queue_tail(&local->rx_list, skb);
2026 tasklet_schedule(&local->rx_tasklet);
2027
2028 rx_exit:
2029 prism2_callback(local, PRISM2_CALLBACK_RX_END);
2030 if (!rx_pending) {
2031 HFA384X_OUTW(HFA384X_EV_RX, HFA384X_EVACK_OFF);
2032 }
2033
2034 return;
2035
2036 rx_dropped:
2037 stats->rx_dropped++;
2038 if (skb)
2039 dev_kfree_skb(skb);
2040 goto rx_exit;
2041}
2042
2043
2044/* Called only as a tasklet (software IRQ) */
2045static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
2046{
2047 struct hfa384x_rx_frame *rxdesc;
2048 struct net_device *dev = skb->dev;
2049 struct hostap_80211_rx_status stats;
2050 int hdrlen, rx_hdrlen;
2051
2052 rx_hdrlen = sizeof(*rxdesc);
2053 if (skb->len < sizeof(*rxdesc)) {
2054 /* Allow monitor mode to receive shorter frames */
2055 if (local->iw_mode == IW_MODE_MONITOR &&
2056 skb->len >= sizeof(*rxdesc) - 30) {
2057 rx_hdrlen = skb->len;
2058 } else {
2059 dev_kfree_skb(skb);
2060 return;
2061 }
2062 }
2063
2064 rxdesc = (struct hfa384x_rx_frame *) skb->data;
2065
2066 if (local->frame_dump & PRISM2_DUMP_RX_HDR &&
2067 skb->len >= sizeof(*rxdesc))
2068 hostap_dump_rx_header(dev->name, rxdesc);
2069
2070 if (le16_to_cpu(rxdesc->status) & HFA384X_RX_STATUS_FCSERR &&
2071 (!local->monitor_allow_fcserr ||
2072 local->iw_mode != IW_MODE_MONITOR))
2073 goto drop;
2074
2075 if (skb->len > PRISM2_DATA_MAXLEN) {
2076 printk(KERN_DEBUG "%s: RX: len(%d) > MAX(%d)\n",
2077 dev->name, skb->len, PRISM2_DATA_MAXLEN);
2078 goto drop;
2079 }
2080
2081 stats.mac_time = le32_to_cpu(rxdesc->time);
2082 stats.signal = rxdesc->signal - local->rssi_to_dBm;
2083 stats.noise = rxdesc->silence - local->rssi_to_dBm;
2084 stats.rate = rxdesc->rate;
2085
2086 /* Convert Prism2 RX structure into IEEE 802.11 header */
2087 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(rxdesc->frame_control));
2088 if (hdrlen > rx_hdrlen)
2089 hdrlen = rx_hdrlen;
2090
2091 memmove(skb_pull(skb, rx_hdrlen - hdrlen),
2092 &rxdesc->frame_control, hdrlen);
2093
2094 hostap_80211_rx(dev, skb, &stats);
2095 return;
2096
2097 drop:
2098 dev_kfree_skb(skb);
2099}
2100
2101
2102/* Called only as a tasklet (software IRQ) */
2103static void hostap_rx_tasklet(unsigned long data)
2104{
2105 local_info_t *local = (local_info_t *) data;
2106 struct sk_buff *skb;
2107
2108 while ((skb = skb_dequeue(&local->rx_list)) != NULL)
2109 hostap_rx_skb(local, skb);
2110}
2111
2112
2113/* Called only from hardware IRQ */
2114static void prism2_alloc_ev(struct net_device *dev)
2115{
2116 struct hostap_interface *iface;
2117 local_info_t *local;
2118 int idx;
2119 u16 fid;
2120
2121 iface = netdev_priv(dev);
2122 local = iface->local;
2123
2124 fid = prism2_read_fid_reg(dev, HFA384X_ALLOCFID_OFF);
2125
2126 PDEBUG(DEBUG_FID, "FID: interrupt: ALLOC - fid=0x%04x\n", fid);
2127
2128 spin_lock(&local->txfidlock);
2129 idx = local->next_alloc;
2130
2131 do {
2132 if (local->txfid[idx] == fid) {
2133 PDEBUG(DEBUG_FID, "FID: found matching txfid[%d]\n",
2134 idx);
2135
2136#ifndef final_version
2137 if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY)
2138 printk("Already released txfid found at idx "
2139 "%d\n", idx);
2140 if (local->intransmitfid[idx] == PRISM2_TXFID_RESERVED)
2141 printk("Already reserved txfid found at idx "
2142 "%d\n", idx);
2143#endif
2144 local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
2145 idx++;
2146 local->next_alloc = idx >= PRISM2_TXFID_COUNT ? 0 :
2147 idx;
2148
2149 if (!test_bit(HOSTAP_BITS_TRANSMIT, &local->bits) &&
2150 netif_queue_stopped(dev))
2151 netif_wake_queue(dev);
2152
2153 spin_unlock(&local->txfidlock);
2154 return;
2155 }
2156
2157 idx++;
2158 if (idx >= PRISM2_TXFID_COUNT)
2159 idx = 0;
2160 } while (idx != local->next_alloc);
2161
2162 printk(KERN_WARNING "%s: could not find matching txfid (0x%04x, new "
2163 "read 0x%04x) for alloc event\n", dev->name, fid,
2164 HFA384X_INW(HFA384X_ALLOCFID_OFF));
2165 printk(KERN_DEBUG "TXFIDs:");
2166 for (idx = 0; idx < PRISM2_TXFID_COUNT; idx++)
2167 printk(" %04x[%04x]", local->txfid[idx],
2168 local->intransmitfid[idx]);
2169 printk("\n");
2170 spin_unlock(&local->txfidlock);
2171
2172 /* FIX: should probably schedule reset; reference to one txfid was lost
2173 * completely.. Bad things will happen if we run out of txfids
2174 * Actually, this will cause netdev watchdog to notice TX timeout and
2175 * then card reset after all txfids have been leaked. */
2176}
2177
2178
2179/* Called only as a tasklet (software IRQ) */
2180static void hostap_tx_callback(local_info_t *local,
2181 struct hfa384x_tx_frame *txdesc, int ok,
2182 char *payload)
2183{
2184 u16 sw_support, hdrlen, len;
2185 struct sk_buff *skb;
2186 struct hostap_tx_callback_info *cb;
2187
2188 /* Make sure that frame was from us. */
2189 if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
2190 printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
2191 local->dev->name);
2192 return;
2193 }
2194
2195 sw_support = le16_to_cpu(txdesc->sw_support);
2196
2197 spin_lock(&local->lock);
2198 cb = local->tx_callback;
2199 while (cb != NULL && cb->idx != sw_support)
2200 cb = cb->next;
2201 spin_unlock(&local->lock);
2202
2203 if (cb == NULL) {
2204 printk(KERN_DEBUG "%s: could not find TX callback (idx %d)\n",
2205 local->dev->name, sw_support);
2206 return;
2207 }
2208
2209 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(txdesc->frame_control));
2210 len = le16_to_cpu(txdesc->data_len);
2211 skb = dev_alloc_skb(hdrlen + len);
2212 if (skb == NULL) {
2213 printk(KERN_DEBUG "%s: hostap_tx_callback failed to allocate "
2214 "skb\n", local->dev->name);
2215 return;
2216 }
2217
2218 memcpy(skb_put(skb, hdrlen), (void *) &txdesc->frame_control, hdrlen);
2219 if (payload)
2220 memcpy(skb_put(skb, len), payload, len);
2221
2222 skb->dev = local->dev;
2223 skb->mac.raw = skb->data;
2224
2225 cb->func(skb, ok, cb->data);
2226}
2227
2228
2229/* Called only as a tasklet (software IRQ) */
2230static int hostap_tx_compl_read(local_info_t *local, int error,
2231 struct hfa384x_tx_frame *txdesc,
2232 char **payload)
2233{
2234 u16 fid, len;
2235 int res, ret = 0;
2236 struct net_device *dev = local->dev;
2237
2238 fid = prism2_read_fid_reg(dev, HFA384X_TXCOMPLFID_OFF);
2239
2240 PDEBUG(DEBUG_FID, "interrupt: TX (err=%d) - fid=0x%04x\n", fid, error);
2241
2242 spin_lock(&local->baplock);
2243 res = hfa384x_setup_bap(dev, BAP0, fid, 0);
2244 if (!res)
2245 res = hfa384x_from_bap(dev, BAP0, txdesc, sizeof(*txdesc));
2246 if (res) {
2247 PDEBUG(DEBUG_EXTRA, "%s: TX (err=%d) - fid=0x%04x - could not "
2248 "read txdesc\n", dev->name, error, fid);
2249 if (res == -ETIMEDOUT) {
2250 schedule_work(&local->reset_queue);
2251 }
2252 ret = -1;
2253 goto fail;
2254 }
2255 if (txdesc->sw_support) {
2256 len = le16_to_cpu(txdesc->data_len);
2257 if (len < PRISM2_DATA_MAXLEN) {
2258 *payload = (char *) kmalloc(len, GFP_ATOMIC);
2259 if (*payload == NULL ||
2260 hfa384x_from_bap(dev, BAP0, *payload, len)) {
2261 PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
2262 "frame payload\n", dev->name);
2263 kfree(*payload);
2264 *payload = NULL;
2265 ret = -1;
2266 goto fail;
2267 }
2268 }
2269 }
2270
2271 fail:
2272 spin_unlock(&local->baplock);
2273
2274 return ret;
2275}
2276
2277
2278/* Called only as a tasklet (software IRQ) */
2279static void prism2_tx_ev(local_info_t *local)
2280{
2281 struct net_device *dev = local->dev;
2282 char *payload = NULL;
2283 struct hfa384x_tx_frame txdesc;
2284
2285 if (hostap_tx_compl_read(local, 0, &txdesc, &payload))
2286 goto fail;
2287
2288 if (local->frame_dump & PRISM2_DUMP_TX_HDR) {
2289 PDEBUG(DEBUG_EXTRA, "%s: TX - status=0x%04x "
2290 "retry_count=%d tx_rate=%d seq_ctrl=%d "
2291 "duration_id=%d\n",
2292 dev->name, le16_to_cpu(txdesc.status),
2293 txdesc.retry_count, txdesc.tx_rate,
2294 le16_to_cpu(txdesc.seq_ctrl),
2295 le16_to_cpu(txdesc.duration_id));
2296 }
2297
2298 if (txdesc.sw_support)
2299 hostap_tx_callback(local, &txdesc, 1, payload);
2300 kfree(payload);
2301
2302 fail:
2303 HFA384X_OUTW(HFA384X_EV_TX, HFA384X_EVACK_OFF);
2304}
2305
2306
2307/* Called only as a tasklet (software IRQ) */
2308static void hostap_sta_tx_exc_tasklet(unsigned long data)
2309{
2310 local_info_t *local = (local_info_t *) data;
2311 struct sk_buff *skb;
2312
2313 while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
2314 struct hfa384x_tx_frame *txdesc =
2315 (struct hfa384x_tx_frame *) skb->data;
2316
2317 if (skb->len >= sizeof(*txdesc)) {
2318 /* Convert Prism2 RX structure into IEEE 802.11 header
2319 */
2320 u16 fc = le16_to_cpu(txdesc->frame_control);
2321 int hdrlen = hostap_80211_get_hdrlen(fc);
2322 memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen),
2323 &txdesc->frame_control, hdrlen);
2324
2325 hostap_handle_sta_tx_exc(local, skb);
2326 }
2327 dev_kfree_skb(skb);
2328 }
2329}
2330
2331
2332/* Called only as a tasklet (software IRQ) */
2333static void prism2_txexc(local_info_t *local)
2334{
2335 struct net_device *dev = local->dev;
2336 u16 status, fc;
2337 int show_dump, res;
2338 char *payload = NULL;
2339 struct hfa384x_tx_frame txdesc;
2340
2341 show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR;
2342 local->stats.tx_errors++;
2343
2344 res = hostap_tx_compl_read(local, 1, &txdesc, &payload);
2345 HFA384X_OUTW(HFA384X_EV_TXEXC, HFA384X_EVACK_OFF);
2346 if (res)
2347 return;
2348
2349 status = le16_to_cpu(txdesc.status);
2350
2351 /* We produce a TXDROP event only for retry or lifetime
2352 * exceeded, because that's the only status that really mean
2353 * that this particular node went away.
2354 * Other errors means that *we* screwed up. - Jean II */
2355 if (status & (HFA384X_TX_STATUS_RETRYERR | HFA384X_TX_STATUS_AGEDERR))
2356 {
2357 union iwreq_data wrqu;
2358
2359 /* Copy 802.11 dest address. */
2360 memcpy(wrqu.addr.sa_data, txdesc.addr1, ETH_ALEN);
2361 wrqu.addr.sa_family = ARPHRD_ETHER;
2362 wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
2363 } else
2364 show_dump = 1;
2365
2366 if (local->iw_mode == IW_MODE_MASTER ||
2367 local->iw_mode == IW_MODE_REPEAT ||
2368 local->wds_type & HOSTAP_WDS_AP_CLIENT) {
2369 struct sk_buff *skb;
2370 skb = dev_alloc_skb(sizeof(txdesc));
2371 if (skb) {
2372 memcpy(skb_put(skb, sizeof(txdesc)), &txdesc,
2373 sizeof(txdesc));
2374 skb_queue_tail(&local->sta_tx_exc_list, skb);
2375 tasklet_schedule(&local->sta_tx_exc_tasklet);
2376 }
2377 }
2378
2379 if (txdesc.sw_support)
2380 hostap_tx_callback(local, &txdesc, 0, payload);
2381 kfree(payload);
2382
2383 if (!show_dump)
2384 return;
2385
2386 PDEBUG(DEBUG_EXTRA, "%s: TXEXC - status=0x%04x (%s%s%s%s)"
2387 " tx_control=%04x\n",
2388 dev->name, status,
2389 status & HFA384X_TX_STATUS_RETRYERR ? "[RetryErr]" : "",
2390 status & HFA384X_TX_STATUS_AGEDERR ? "[AgedErr]" : "",
2391 status & HFA384X_TX_STATUS_DISCON ? "[Discon]" : "",
2392 status & HFA384X_TX_STATUS_FORMERR ? "[FormErr]" : "",
2393 le16_to_cpu(txdesc.tx_control));
2394
2395 fc = le16_to_cpu(txdesc.frame_control);
2396 PDEBUG(DEBUG_EXTRA, " retry_count=%d tx_rate=%d fc=0x%04x "
2397 "(%s%s%s::%d%s%s)\n",
2398 txdesc.retry_count, txdesc.tx_rate, fc,
2399 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_MGMT ? "Mgmt" : "",
2400 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_CTL ? "Ctrl" : "",
2401 WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA ? "Data" : "",
2402 WLAN_FC_GET_STYPE(fc) >> 4,
2403 fc & IEEE80211_FCTL_TODS ? " ToDS" : "",
2404 fc & IEEE80211_FCTL_FROMDS ? " FromDS" : "");
2405 PDEBUG(DEBUG_EXTRA, " A1=" MACSTR " A2=" MACSTR " A3="
2406 MACSTR " A4=" MACSTR "\n",
2407 MAC2STR(txdesc.addr1), MAC2STR(txdesc.addr2),
2408 MAC2STR(txdesc.addr3), MAC2STR(txdesc.addr4));
2409}
2410
2411
2412/* Called only as a tasklet (software IRQ) */
2413static void hostap_info_tasklet(unsigned long data)
2414{
2415 local_info_t *local = (local_info_t *) data;
2416 struct sk_buff *skb;
2417
2418 while ((skb = skb_dequeue(&local->info_list)) != NULL) {
2419 hostap_info_process(local, skb);
2420 dev_kfree_skb(skb);
2421 }
2422}
2423
2424
2425/* Called only as a tasklet (software IRQ) */
2426static void prism2_info(local_info_t *local)
2427{
2428 struct net_device *dev = local->dev;
2429 u16 fid;
2430 int res, left;
2431 struct hfa384x_info_frame info;
2432 struct sk_buff *skb;
2433
2434 fid = HFA384X_INW(HFA384X_INFOFID_OFF);
2435
2436 spin_lock(&local->baplock);
2437 res = hfa384x_setup_bap(dev, BAP0, fid, 0);
2438 if (!res)
2439 res = hfa384x_from_bap(dev, BAP0, &info, sizeof(info));
2440 if (res) {
2441 spin_unlock(&local->baplock);
2442 printk(KERN_DEBUG "Could not get info frame (fid=0x%04x)\n",
2443 fid);
2444 if (res == -ETIMEDOUT) {
2445 schedule_work(&local->reset_queue);
2446 }
2447 goto out;
2448 }
2449
2450 le16_to_cpus(&info.len);
2451 le16_to_cpus(&info.type);
2452 left = (info.len - 1) * 2;
2453
2454 if (info.len & 0x8000 || info.len == 0 || left > 2060) {
2455 /* data register seems to give 0x8000 in some error cases even
2456 * though busy bit is not set in offset register;
2457 * in addition, length must be at least 1 due to type field */
2458 spin_unlock(&local->baplock);
2459 printk(KERN_DEBUG "%s: Received info frame with invalid "
2460 "length 0x%04x (type 0x%04x)\n", dev->name, info.len,
2461 info.type);
2462 goto out;
2463 }
2464
2465 skb = dev_alloc_skb(sizeof(info) + left);
2466 if (skb == NULL) {
2467 spin_unlock(&local->baplock);
2468 printk(KERN_DEBUG "%s: Could not allocate skb for info "
2469 "frame\n", dev->name);
2470 goto out;
2471 }
2472
2473 memcpy(skb_put(skb, sizeof(info)), &info, sizeof(info));
2474 if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
2475 {
2476 spin_unlock(&local->baplock);
2477 printk(KERN_WARNING "%s: Info frame read failed (fid=0x%04x, "
2478 "len=0x%04x, type=0x%04x\n",
2479 dev->name, fid, info.len, info.type);
2480 dev_kfree_skb(skb);
2481 goto out;
2482 }
2483 spin_unlock(&local->baplock);
2484
2485 skb_queue_tail(&local->info_list, skb);
2486 tasklet_schedule(&local->info_tasklet);
2487
2488 out:
2489 HFA384X_OUTW(HFA384X_EV_INFO, HFA384X_EVACK_OFF);
2490}
2491
2492
2493/* Called only as a tasklet (software IRQ) */
2494static void hostap_bap_tasklet(unsigned long data)
2495{
2496 local_info_t *local = (local_info_t *) data;
2497 struct net_device *dev = local->dev;
2498 u16 ev;
2499 int frames = 30;
2500
2501 if (local->func->card_present && !local->func->card_present(local))
2502 return;
2503
2504 set_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
2505
2506 /* Process all pending BAP events without generating new interrupts
2507 * for them */
2508 while (frames-- > 0) {
2509 ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
2510 if (ev == 0xffff || !(ev & HFA384X_BAP0_EVENTS))
2511 break;
2512 if (ev & HFA384X_EV_RX)
2513 prism2_rx(local);
2514 if (ev & HFA384X_EV_INFO)
2515 prism2_info(local);
2516 if (ev & HFA384X_EV_TX)
2517 prism2_tx_ev(local);
2518 if (ev & HFA384X_EV_TXEXC)
2519 prism2_txexc(local);
2520 }
2521
2522 set_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
2523 clear_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
2524
2525 /* Enable interrupts for new BAP events */
2526 hfa384x_events_all(dev);
2527 clear_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
2528}
2529
2530
2531/* Called only from hardware IRQ */
2532static void prism2_infdrop(struct net_device *dev)
2533{
2534 static unsigned long last_inquire = 0;
2535
2536 PDEBUG(DEBUG_EXTRA, "%s: INFDROP event\n", dev->name);
2537
2538 /* some firmware versions seem to get stuck with
2539 * full CommTallies in high traffic load cases; every
2540 * packet will then cause INFDROP event and CommTallies
2541 * info frame will not be sent automatically. Try to
2542 * get out of this state by inquiring CommTallies. */
2543 if (!last_inquire || time_after(jiffies, last_inquire + HZ)) {
2544 hfa384x_cmd_callback(dev, HFA384X_CMDCODE_INQUIRE,
2545 HFA384X_INFO_COMMTALLIES, NULL, 0);
2546 last_inquire = jiffies;
2547 }
2548}
2549
2550
2551/* Called only from hardware IRQ */
2552static void prism2_ev_tick(struct net_device *dev)
2553{
2554 struct hostap_interface *iface;
2555 local_info_t *local;
2556 u16 evstat, inten;
2557 static int prev_stuck = 0;
2558
2559 iface = netdev_priv(dev);
2560 local = iface->local;
2561
2562 if (time_after(jiffies, local->last_tick_timer + 5 * HZ) &&
2563 local->last_tick_timer) {
2564 evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
2565 inten = HFA384X_INW(HFA384X_INTEN_OFF);
2566 if (!prev_stuck) {
2567 printk(KERN_INFO "%s: SW TICK stuck? "
2568 "bits=0x%lx EvStat=%04x IntEn=%04x\n",
2569 dev->name, local->bits, evstat, inten);
2570 }
2571 local->sw_tick_stuck++;
2572 if ((evstat & HFA384X_BAP0_EVENTS) &&
2573 (inten & HFA384X_BAP0_EVENTS)) {
2574 printk(KERN_INFO "%s: trying to recover from IRQ "
2575 "hang\n", dev->name);
2576 hfa384x_events_no_bap0(dev);
2577 }
2578 prev_stuck = 1;
2579 } else
2580 prev_stuck = 0;
2581}
2582
2583
2584/* Called only from hardware IRQ */
2585static inline void prism2_check_magic(local_info_t *local)
2586{
2587 /* at least PCI Prism2.5 with bus mastering seems to sometimes
2588 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
2589 * register once or twice seems to get the correct value.. PCI cards
2590 * cannot anyway be removed during normal operation, so there is not
2591 * really any need for this verification with them. */
2592
2593#ifndef PRISM2_PCI
2594#ifndef final_version
2595 static unsigned long last_magic_err = 0;
2596 struct net_device *dev = local->dev;
2597
2598 if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
2599 if (!local->hw_ready)
2600 return;
2601 HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
2602 if (time_after(jiffies, last_magic_err + 10 * HZ)) {
2603 printk("%s: Interrupt, but SWSUPPORT0 does not match: "
2604 "%04X != %04X - card removed?\n", dev->name,
2605 HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
2606 HFA384X_MAGIC);
2607 last_magic_err = jiffies;
2608 } else if (net_ratelimit()) {
2609 printk(KERN_DEBUG "%s: interrupt - SWSUPPORT0=%04x "
2610 "MAGIC=%04x\n", dev->name,
2611 HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
2612 HFA384X_MAGIC);
2613 }
2614 if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != 0xffff)
2615 schedule_work(&local->reset_queue);
2616 return;
2617 }
2618#endif /* final_version */
2619#endif /* !PRISM2_PCI */
2620}
2621
2622
2623/* Called only from hardware IRQ */
2624static irqreturn_t prism2_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2625{
2626 struct net_device *dev = (struct net_device *) dev_id;
2627 struct hostap_interface *iface;
2628 local_info_t *local;
2629 int events = 0;
2630 u16 ev;
2631
2632 iface = netdev_priv(dev);
2633 local = iface->local;
2634
2635 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
2636
2637 if (local->func->card_present && !local->func->card_present(local)) {
2638 if (net_ratelimit()) {
2639 printk(KERN_DEBUG "%s: Interrupt, but dev not OK\n",
2640 dev->name);
2641 }
2642 return IRQ_HANDLED;
2643 }
2644
2645 prism2_check_magic(local);
2646
2647 for (;;) {
2648 ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
2649 if (ev == 0xffff) {
2650 if (local->shutdown)
2651 return IRQ_HANDLED;
2652 HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
2653 printk(KERN_DEBUG "%s: prism2_interrupt: ev=0xffff\n",
2654 dev->name);
2655 return IRQ_HANDLED;
2656 }
2657
2658 ev &= HFA384X_INW(HFA384X_INTEN_OFF);
2659 if (ev == 0)
2660 break;
2661
2662 if (ev & HFA384X_EV_CMD) {
2663 prism2_cmd_ev(dev);
2664 }
2665
2666 /* Above events are needed even before hw is ready, but other
2667 * events should be skipped during initialization. This may
2668 * change for AllocEv if allocate_fid is implemented without
2669 * busy waiting. */
2670 if (!local->hw_ready || local->hw_resetting ||
2671 !local->dev_enabled) {
2672 ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
2673 if (ev & HFA384X_EV_CMD)
2674 goto next_event;
2675 if ((ev & HFA384X_EVENT_MASK) == 0)
2676 return IRQ_HANDLED;
2677 if (local->dev_enabled && (ev & ~HFA384X_EV_TICK) &&
2678 net_ratelimit()) {
2679 printk(KERN_DEBUG "%s: prism2_interrupt: hw "
2680 "not ready; skipping events 0x%04x "
2681 "(IntEn=0x%04x)%s%s%s\n",
2682 dev->name, ev,
2683 HFA384X_INW(HFA384X_INTEN_OFF),
2684 !local->hw_ready ? " (!hw_ready)" : "",
2685 local->hw_resetting ?
2686 " (hw_resetting)" : "",
2687 !local->dev_enabled ?
2688 " (!dev_enabled)" : "");
2689 }
2690 HFA384X_OUTW(ev, HFA384X_EVACK_OFF);
2691 return IRQ_HANDLED;
2692 }
2693
2694 if (ev & HFA384X_EV_TICK) {
2695 prism2_ev_tick(dev);
2696 HFA384X_OUTW(HFA384X_EV_TICK, HFA384X_EVACK_OFF);
2697 }
2698
2699 if (ev & HFA384X_EV_ALLOC) {
2700 prism2_alloc_ev(dev);
2701 HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
2702 }
2703
2704 /* Reading data from the card is quite time consuming, so do it
2705 * in tasklets. TX, TXEXC, RX, and INFO events will be ACKed
2706 * and unmasked after needed data has been read completely. */
2707 if (ev & HFA384X_BAP0_EVENTS) {
2708 hfa384x_events_no_bap0(dev);
2709 tasklet_schedule(&local->bap_tasklet);
2710 }
2711
2712#ifndef final_version
2713 if (ev & HFA384X_EV_WTERR) {
2714 PDEBUG(DEBUG_EXTRA, "%s: WTERR event\n", dev->name);
2715 HFA384X_OUTW(HFA384X_EV_WTERR, HFA384X_EVACK_OFF);
2716 }
2717#endif /* final_version */
2718
2719 if (ev & HFA384X_EV_INFDROP) {
2720 prism2_infdrop(dev);
2721 HFA384X_OUTW(HFA384X_EV_INFDROP, HFA384X_EVACK_OFF);
2722 }
2723
2724 next_event:
2725 events++;
2726 if (events >= PRISM2_MAX_INTERRUPT_EVENTS) {
2727 PDEBUG(DEBUG_EXTRA, "prism2_interrupt: >%d events "
2728 "(EvStat=0x%04x)\n",
2729 PRISM2_MAX_INTERRUPT_EVENTS,
2730 HFA384X_INW(HFA384X_EVSTAT_OFF));
2731 break;
2732 }
2733 }
2734 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 1);
2735 return IRQ_RETVAL(events);
2736}
2737
2738
2739static void prism2_check_sta_fw_version(local_info_t *local)
2740{
2741 struct hfa384x_comp_ident comp;
2742 int id, variant, major, minor;
2743
2744 if (hfa384x_get_rid(local->dev, HFA384X_RID_STAID,
2745 &comp, sizeof(comp), 1) < 0)
2746 return;
2747
2748 local->fw_ap = 0;
2749 id = le16_to_cpu(comp.id);
2750 if (id != HFA384X_COMP_ID_STA) {
2751 if (id == HFA384X_COMP_ID_FW_AP)
2752 local->fw_ap = 1;
2753 return;
2754 }
2755
2756 major = __le16_to_cpu(comp.major);
2757 minor = __le16_to_cpu(comp.minor);
2758 variant = __le16_to_cpu(comp.variant);
2759 local->sta_fw_ver = PRISM2_FW_VER(major, minor, variant);
2760
2761 /* Station firmware versions before 1.4.x seem to have a bug in
2762 * firmware-based WEP encryption when using Host AP mode, so use
2763 * host_encrypt as a default for them. Firmware version 1.4.9 is the
2764 * first one that has been seen to produce correct encryption, but the
2765 * bug might be fixed before that (although, at least 1.4.2 is broken).
2766 */
2767 local->fw_encrypt_ok = local->sta_fw_ver >= PRISM2_FW_VER(1,4,9);
2768
2769 if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
2770 !local->fw_encrypt_ok) {
2771 printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
2772 "a workaround for firmware bug in Host AP mode WEP\n",
2773 local->dev->name);
2774 local->host_encrypt = 1;
2775 }
2776
2777 /* IEEE 802.11 standard compliant WDS frames (4 addresses) were broken
2778 * in station firmware versions before 1.5.x. With these versions, the
2779 * driver uses a workaround with bogus frame format (4th address after
2780 * the payload). This is not compatible with other AP devices. Since
2781 * the firmware bug is fixed in the latest station firmware versions,
2782 * automatically enable standard compliant mode for cards using station
2783 * firmware version 1.5.0 or newer. */
2784 if (local->sta_fw_ver >= PRISM2_FW_VER(1,5,0))
2785 local->wds_type |= HOSTAP_WDS_STANDARD_FRAME;
2786 else {
2787 printk(KERN_DEBUG "%s: defaulting to bogus WDS frame as a "
2788 "workaround for firmware bug in Host AP mode WDS\n",
2789 local->dev->name);
2790 }
2791
2792 hostap_check_sta_fw_version(local->ap, local->sta_fw_ver);
2793}
2794
2795
2796static void prism2_crypt_deinit_entries(local_info_t *local, int force)
2797{
2798 struct list_head *ptr, *n;
2799 struct ieee80211_crypt_data *entry;
2800
2801 for (ptr = local->crypt_deinit_list.next, n = ptr->next;
2802 ptr != &local->crypt_deinit_list; ptr = n, n = ptr->next) {
2803 entry = list_entry(ptr, struct ieee80211_crypt_data, list);
2804
2805 if (atomic_read(&entry->refcnt) != 0 && !force)
2806 continue;
2807
2808 list_del(ptr);
2809
2810 if (entry->ops)
2811 entry->ops->deinit(entry->priv);
2812 kfree(entry);
2813 }
2814}
2815
2816
2817static void prism2_crypt_deinit_handler(unsigned long data)
2818{
2819 local_info_t *local = (local_info_t *) data;
2820 unsigned long flags;
2821
2822 spin_lock_irqsave(&local->lock, flags);
2823 prism2_crypt_deinit_entries(local, 0);
2824 if (!list_empty(&local->crypt_deinit_list)) {
2825 printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
2826 "deletion list\n", local->dev->name);
2827 local->crypt_deinit_timer.expires = jiffies + HZ;
2828 add_timer(&local->crypt_deinit_timer);
2829 }
2830 spin_unlock_irqrestore(&local->lock, flags);
2831
2832}
2833
2834
2835static void hostap_passive_scan(unsigned long data)
2836{
2837 local_info_t *local = (local_info_t *) data;
2838 struct net_device *dev = local->dev;
2839 u16 channel;
2840
2841 if (local->passive_scan_interval <= 0)
2842 return;
2843
2844 if (local->passive_scan_state == PASSIVE_SCAN_LISTEN) {
2845 int max_tries = 16;
2846
2847 /* Even though host system does not really know when the WLAN
2848 * MAC is sending frames, try to avoid changing channels for
2849 * passive scanning when a host-generated frame is being
2850 * transmitted */
2851 if (test_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
2852 printk(KERN_DEBUG "%s: passive scan detected pending "
2853 "TX - delaying\n", dev->name);
2854 local->passive_scan_timer.expires = jiffies + HZ / 10;
2855 add_timer(&local->passive_scan_timer);
2856 return;
2857 }
2858
2859 do {
2860 local->passive_scan_channel++;
2861 if (local->passive_scan_channel > 14)
2862 local->passive_scan_channel = 1;
2863 max_tries--;
2864 } while (!(local->channel_mask &
2865 (1 << (local->passive_scan_channel - 1))) &&
2866 max_tries > 0);
2867
2868 if (max_tries == 0) {
2869 printk(KERN_INFO "%s: no allowed passive scan channels"
2870 " found\n", dev->name);
2871 return;
2872 }
2873
2874 printk(KERN_DEBUG "%s: passive scan channel %d\n",
2875 dev->name, local->passive_scan_channel);
2876 channel = local->passive_scan_channel;
2877 local->passive_scan_state = PASSIVE_SCAN_WAIT;
2878 local->passive_scan_timer.expires = jiffies + HZ / 10;
2879 } else {
2880 channel = local->channel;
2881 local->passive_scan_state = PASSIVE_SCAN_LISTEN;
2882 local->passive_scan_timer.expires = jiffies +
2883 local->passive_scan_interval * HZ;
2884 }
2885
2886 if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST |
2887 (HFA384X_TEST_CHANGE_CHANNEL << 8),
2888 channel, NULL, 0))
2889 printk(KERN_ERR "%s: passive scan channel set %d "
2890 "failed\n", dev->name, channel);
2891
2892 add_timer(&local->passive_scan_timer);
2893}
2894
2895
2896/* Called only as a scheduled task when communications quality values should
2897 * be updated. */
2898static void handle_comms_qual_update(void *data)
2899{
2900 local_info_t *local = data;
2901 prism2_update_comms_qual(local->dev);
2902}
2903
2904
2905/* Software watchdog - called as a timer. Hardware interrupt (Tick event) is
2906 * used to monitor that local->last_tick_timer is being updated. If not,
2907 * interrupt busy-loop is assumed and driver tries to recover by masking out
2908 * some events. */
2909static void hostap_tick_timer(unsigned long data)
2910{
2911 static unsigned long last_inquire = 0;
2912 local_info_t *local = (local_info_t *) data;
2913 local->last_tick_timer = jiffies;
2914
2915 /* Inquire CommTallies every 10 seconds to keep the statistics updated
2916 * more often during low load and when using 32-bit tallies. */
2917 if ((!last_inquire || time_after(jiffies, last_inquire + 10 * HZ)) &&
2918 !local->hw_downloading && local->hw_ready &&
2919 !local->hw_resetting && local->dev_enabled) {
2920 hfa384x_cmd_callback(local->dev, HFA384X_CMDCODE_INQUIRE,
2921 HFA384X_INFO_COMMTALLIES, NULL, 0);
2922 last_inquire = jiffies;
2923 }
2924
2925 if ((local->last_comms_qual_update == 0 ||
2926 time_after(jiffies, local->last_comms_qual_update + 10 * HZ)) &&
2927 (local->iw_mode == IW_MODE_INFRA ||
2928 local->iw_mode == IW_MODE_ADHOC)) {
2929 schedule_work(&local->comms_qual_update);
2930 }
2931
2932 local->tick_timer.expires = jiffies + 2 * HZ;
2933 add_timer(&local->tick_timer);
2934}
2935
2936
2937#ifndef PRISM2_NO_PROCFS_DEBUG
2938static int prism2_registers_proc_read(char *page, char **start, off_t off,
2939 int count, int *eof, void *data)
2940{
2941 char *p = page;
2942 local_info_t *local = (local_info_t *) data;
2943
2944 if (off != 0) {
2945 *eof = 1;
2946 return 0;
2947 }
2948
2949#define SHOW_REG(n) \
2950p += sprintf(p, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
2951
2952 SHOW_REG(CMD);
2953 SHOW_REG(PARAM0);
2954 SHOW_REG(PARAM1);
2955 SHOW_REG(PARAM2);
2956 SHOW_REG(STATUS);
2957 SHOW_REG(RESP0);
2958 SHOW_REG(RESP1);
2959 SHOW_REG(RESP2);
2960 SHOW_REG(INFOFID);
2961 SHOW_REG(CONTROL);
2962 SHOW_REG(SELECT0);
2963 SHOW_REG(SELECT1);
2964 SHOW_REG(OFFSET0);
2965 SHOW_REG(OFFSET1);
2966 SHOW_REG(RXFID);
2967 SHOW_REG(ALLOCFID);
2968 SHOW_REG(TXCOMPLFID);
2969 SHOW_REG(SWSUPPORT0);
2970 SHOW_REG(SWSUPPORT1);
2971 SHOW_REG(SWSUPPORT2);
2972 SHOW_REG(EVSTAT);
2973 SHOW_REG(INTEN);
2974 SHOW_REG(EVACK);
2975 /* Do not read data registers, because they change the state of the
2976 * MAC (offset += 2) */
2977 /* SHOW_REG(DATA0); */
2978 /* SHOW_REG(DATA1); */
2979 SHOW_REG(AUXPAGE);
2980 SHOW_REG(AUXOFFSET);
2981 /* SHOW_REG(AUXDATA); */
2982#ifdef PRISM2_PCI
2983 SHOW_REG(PCICOR);
2984 SHOW_REG(PCIHCR);
2985 SHOW_REG(PCI_M0_ADDRH);
2986 SHOW_REG(PCI_M0_ADDRL);
2987 SHOW_REG(PCI_M0_LEN);
2988 SHOW_REG(PCI_M0_CTL);
2989 SHOW_REG(PCI_STATUS);
2990 SHOW_REG(PCI_M1_ADDRH);
2991 SHOW_REG(PCI_M1_ADDRL);
2992 SHOW_REG(PCI_M1_LEN);
2993 SHOW_REG(PCI_M1_CTL);
2994#endif /* PRISM2_PCI */
2995
2996 return (p - page);
2997}
2998#endif /* PRISM2_NO_PROCFS_DEBUG */
2999
3000
3001struct set_tim_data {
3002 struct list_head list;
3003 int aid;
3004 int set;
3005};
3006
3007static int prism2_set_tim(struct net_device *dev, int aid, int set)
3008{
3009 struct list_head *ptr;
3010 struct set_tim_data *new_entry;
3011 struct hostap_interface *iface;
3012 local_info_t *local;
3013
3014 iface = netdev_priv(dev);
3015 local = iface->local;
3016
3017 new_entry = (struct set_tim_data *)
3018 kmalloc(sizeof(*new_entry), GFP_ATOMIC);
3019 if (new_entry == NULL) {
3020 printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
3021 local->dev->name);
3022 return -ENOMEM;
3023 }
3024 memset(new_entry, 0, sizeof(*new_entry));
3025 new_entry->aid = aid;
3026 new_entry->set = set;
3027
3028 spin_lock_bh(&local->set_tim_lock);
3029 list_for_each(ptr, &local->set_tim_list) {
3030 struct set_tim_data *entry =
3031 list_entry(ptr, struct set_tim_data, list);
3032 if (entry->aid == aid) {
3033 PDEBUG(DEBUG_PS2, "%s: prism2_set_tim: aid=%d "
3034 "set=%d ==> %d\n",
3035 local->dev->name, aid, entry->set, set);
3036 entry->set = set;
3037 kfree(new_entry);
3038 new_entry = NULL;
3039 break;
3040 }
3041 }
3042 if (new_entry)
3043 list_add_tail(&new_entry->list, &local->set_tim_list);
3044 spin_unlock_bh(&local->set_tim_lock);
3045
3046 schedule_work(&local->set_tim_queue);
3047
3048 return 0;
3049}
3050
3051
3052static void handle_set_tim_queue(void *data)
3053{
3054 local_info_t *local = (local_info_t *) data;
3055 struct set_tim_data *entry;
3056 u16 val;
3057
3058 for (;;) {
3059 entry = NULL;
3060 spin_lock_bh(&local->set_tim_lock);
3061 if (!list_empty(&local->set_tim_list)) {
3062 entry = list_entry(local->set_tim_list.next,
3063 struct set_tim_data, list);
3064 list_del(&entry->list);
3065 }
3066 spin_unlock_bh(&local->set_tim_lock);
3067 if (!entry)
3068 break;
3069
3070 PDEBUG(DEBUG_PS2, "%s: handle_set_tim_queue: aid=%d set=%d\n",
3071 local->dev->name, entry->aid, entry->set);
3072
3073 val = entry->aid;
3074 if (entry->set)
3075 val |= 0x8000;
3076 if (hostap_set_word(local->dev, HFA384X_RID_CNFTIMCTRL, val)) {
3077 printk(KERN_DEBUG "%s: set_tim failed (aid=%d "
3078 "set=%d)\n",
3079 local->dev->name, entry->aid, entry->set);
3080 }
3081
3082 kfree(entry);
3083 }
3084}
3085
3086
3087static void prism2_clear_set_tim_queue(local_info_t *local)
3088{
3089 struct list_head *ptr, *n;
3090
3091 list_for_each_safe(ptr, n, &local->set_tim_list) {
3092 struct set_tim_data *entry;
3093 entry = list_entry(ptr, struct set_tim_data, list);
3094 list_del(&entry->list);
3095 kfree(entry);
3096 }
3097}
3098
3099
3100static struct net_device *
3101prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
3102 struct device *sdev)
3103{
3104 struct net_device *dev;
3105 struct hostap_interface *iface;
3106 struct local_info *local;
3107 int len, i, ret;
3108
3109 if (funcs == NULL)
3110 return NULL;
3111
3112 len = strlen(dev_template);
3113 if (len >= IFNAMSIZ || strstr(dev_template, "%d") == NULL) {
3114 printk(KERN_WARNING "hostap: Invalid dev_template='%s'\n",
3115 dev_template);
3116 return NULL;
3117 }
3118
3119 len = sizeof(struct hostap_interface) +
3120 3 + sizeof(struct local_info) +
3121 3 + sizeof(struct ap_data);
3122
3123 dev = alloc_etherdev(len);
3124 if (dev == NULL)
3125 return NULL;
3126
3127 iface = netdev_priv(dev);
3128 local = (struct local_info *) ((((long) (iface + 1)) + 3) & ~3);
3129 local->ap = (struct ap_data *) ((((long) (local + 1)) + 3) & ~3);
3130 local->dev = iface->dev = dev;
3131 iface->local = local;
3132 iface->type = HOSTAP_INTERFACE_MASTER;
3133 INIT_LIST_HEAD(&local->hostap_interfaces);
3134
3135 local->hw_module = THIS_MODULE;
3136
3137#ifdef PRISM2_IO_DEBUG
3138 local->io_debug_enabled = 1;
3139#endif /* PRISM2_IO_DEBUG */
3140
3141 local->func = funcs;
3142 local->func->cmd = hfa384x_cmd;
3143 local->func->read_regs = hfa384x_read_regs;
3144 local->func->get_rid = hfa384x_get_rid;
3145 local->func->set_rid = hfa384x_set_rid;
3146 local->func->hw_enable = prism2_hw_enable;
3147 local->func->hw_config = prism2_hw_config;
3148 local->func->hw_reset = prism2_hw_reset;
3149 local->func->hw_shutdown = prism2_hw_shutdown;
3150 local->func->reset_port = prism2_reset_port;
3151 local->func->schedule_reset = prism2_schedule_reset;
3152#ifdef PRISM2_DOWNLOAD_SUPPORT
3153 local->func->read_aux = prism2_download_aux_dump;
3154 local->func->download = prism2_download;
3155#endif /* PRISM2_DOWNLOAD_SUPPORT */
3156 local->func->tx = prism2_tx_80211;
3157 local->func->set_tim = prism2_set_tim;
3158 local->func->need_tx_headroom = 0; /* no need to add txdesc in
3159 * skb->data (FIX: maybe for DMA bus
3160 * mastering? */
3161
3162 local->mtu = mtu;
3163
3164 rwlock_init(&local->iface_lock);
3165 spin_lock_init(&local->txfidlock);
3166 spin_lock_init(&local->cmdlock);
3167 spin_lock_init(&local->baplock);
3168 spin_lock_init(&local->lock);
3169 init_MUTEX(&local->rid_bap_sem);
3170
3171 if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
3172 card_idx = 0;
3173 local->card_idx = card_idx;
3174
3175 len = strlen(essid);
3176 memcpy(local->essid, essid,
3177 len > MAX_SSID_LEN ? MAX_SSID_LEN : len);
3178 local->essid[MAX_SSID_LEN] = '\0';
3179 i = GET_INT_PARM(iw_mode, card_idx);
3180 if ((i >= IW_MODE_ADHOC && i <= IW_MODE_REPEAT) ||
3181 i == IW_MODE_MONITOR) {
3182 local->iw_mode = i;
3183 } else {
3184 printk(KERN_WARNING "prism2: Unknown iw_mode %d; using "
3185 "IW_MODE_MASTER\n", i);
3186 local->iw_mode = IW_MODE_MASTER;
3187 }
3188 local->channel = GET_INT_PARM(channel, card_idx);
3189 local->beacon_int = GET_INT_PARM(beacon_int, card_idx);
3190 local->dtim_period = GET_INT_PARM(dtim_period, card_idx);
3191 local->wds_max_connections = 16;
3192 local->tx_control = HFA384X_TX_CTRL_FLAGS;
3193 local->manual_retry_count = -1;
3194 local->rts_threshold = 2347;
3195 local->fragm_threshold = 2346;
3196 local->rssi_to_dBm = 100; /* default; to be overriden by
3197 * cnfDbmAdjust, if available */
3198 local->auth_algs = PRISM2_AUTH_OPEN | PRISM2_AUTH_SHARED_KEY;
3199 local->sram_type = -1;
3200 local->scan_channel_mask = 0xffff;
3201
3202 /* Initialize task queue structures */
3203 INIT_WORK(&local->reset_queue, handle_reset_queue, local);
3204 INIT_WORK(&local->set_multicast_list_queue,
3205 hostap_set_multicast_list_queue, local->dev);
3206
3207 INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
3208 INIT_LIST_HEAD(&local->set_tim_list);
3209 spin_lock_init(&local->set_tim_lock);
3210
3211 INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
3212
3213 /* Initialize tasklets for handling hardware IRQ related operations
3214 * outside hw IRQ handler */
3215#define HOSTAP_TASKLET_INIT(q, f, d) \
3216do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \
3217while (0)
3218 HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
3219 (unsigned long) local);
3220
3221 HOSTAP_TASKLET_INIT(&local->info_tasklet, hostap_info_tasklet,
3222 (unsigned long) local);
3223 hostap_info_init(local);
3224
3225 HOSTAP_TASKLET_INIT(&local->rx_tasklet,
3226 hostap_rx_tasklet, (unsigned long) local);
3227 skb_queue_head_init(&local->rx_list);
3228
3229 HOSTAP_TASKLET_INIT(&local->sta_tx_exc_tasklet,
3230 hostap_sta_tx_exc_tasklet, (unsigned long) local);
3231 skb_queue_head_init(&local->sta_tx_exc_list);
3232
3233 INIT_LIST_HEAD(&local->cmd_queue);
3234 init_waitqueue_head(&local->hostscan_wq);
3235 INIT_LIST_HEAD(&local->crypt_deinit_list);
3236 init_timer(&local->crypt_deinit_timer);
3237 local->crypt_deinit_timer.data = (unsigned long) local;
3238 local->crypt_deinit_timer.function = prism2_crypt_deinit_handler;
3239
3240 init_timer(&local->passive_scan_timer);
3241 local->passive_scan_timer.data = (unsigned long) local;
3242 local->passive_scan_timer.function = hostap_passive_scan;
3243
3244 init_timer(&local->tick_timer);
3245 local->tick_timer.data = (unsigned long) local;
3246 local->tick_timer.function = hostap_tick_timer;
3247 local->tick_timer.expires = jiffies + 2 * HZ;
3248 add_timer(&local->tick_timer);
3249
3250 INIT_LIST_HEAD(&local->bss_list);
3251
3252 hostap_setup_dev(dev, local, 1);
3253 local->saved_eth_header_parse = dev->hard_header_parse;
3254
3255 dev->hard_start_xmit = hostap_master_start_xmit;
3256 dev->type = ARPHRD_IEEE80211;
3257 dev->hard_header_parse = hostap_80211_header_parse;
3258
3259 rtnl_lock();
3260 ret = dev_alloc_name(dev, "wifi%d");
3261 SET_NETDEV_DEV(dev, sdev);
3262 if (ret >= 0)
3263 ret = register_netdevice(dev);
3264 rtnl_unlock();
3265 if (ret < 0) {
3266 printk(KERN_WARNING "%s: register netdevice failed!\n",
3267 dev_info);
3268 goto fail;
3269 }
3270 printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
3271
3272#ifndef PRISM2_NO_PROCFS_DEBUG
3273 create_proc_read_entry("registers", 0, local->proc,
3274 prism2_registers_proc_read, local);
3275#endif /* PRISM2_NO_PROCFS_DEBUG */
3276
3277 hostap_init_data(local);
3278 return dev;
3279
3280 fail:
3281 free_netdev(dev);
3282 return NULL;
3283}
3284
3285
3286static int hostap_hw_ready(struct net_device *dev)
3287{
3288 struct hostap_interface *iface;
3289 struct local_info *local;
3290
3291 iface = netdev_priv(dev);
3292 local = iface->local;
3293 local->ddev = hostap_add_interface(local, HOSTAP_INTERFACE_MAIN, 0,
3294 "", dev_template);
3295
3296 if (local->ddev) {
3297 if (local->iw_mode == IW_MODE_INFRA ||
3298 local->iw_mode == IW_MODE_ADHOC) {
3299 netif_carrier_off(local->dev);
3300 netif_carrier_off(local->ddev);
3301 }
3302 hostap_init_proc(local);
3303 hostap_init_ap_proc(local);
3304 return 0;
3305 }
3306
3307 return -1;
3308}
3309
3310
3311static void prism2_free_local_data(struct net_device *dev)
3312{
3313 struct hostap_tx_callback_info *tx_cb, *tx_cb_prev;
3314 int i;
3315 struct hostap_interface *iface;
3316 struct local_info *local;
3317 struct list_head *ptr, *n;
3318
3319 if (dev == NULL)
3320 return;
3321
3322 iface = netdev_priv(dev);
3323 local = iface->local;
3324
3325 flush_scheduled_work();
3326
3327 if (timer_pending(&local->crypt_deinit_timer))
3328 del_timer(&local->crypt_deinit_timer);
3329 prism2_crypt_deinit_entries(local, 1);
3330
3331 if (timer_pending(&local->passive_scan_timer))
3332 del_timer(&local->passive_scan_timer);
3333
3334 if (timer_pending(&local->tick_timer))
3335 del_timer(&local->tick_timer);
3336
3337 prism2_clear_cmd_queue(local);
3338
3339 skb_queue_purge(&local->info_list);
3340 skb_queue_purge(&local->rx_list);
3341 skb_queue_purge(&local->sta_tx_exc_list);
3342
3343 if (local->dev_enabled)
3344 prism2_callback(local, PRISM2_CALLBACK_DISABLE);
3345
3346 for (i = 0; i < WEP_KEYS; i++) {
3347 struct ieee80211_crypt_data *crypt = local->crypt[i];
3348 if (crypt) {
3349 if (crypt->ops)
3350 crypt->ops->deinit(crypt->priv);
3351 kfree(crypt);
3352 local->crypt[i] = NULL;
3353 }
3354 }
3355
3356 if (local->ap != NULL)
3357 hostap_free_data(local->ap);
3358
3359#ifndef PRISM2_NO_PROCFS_DEBUG
3360 if (local->proc != NULL)
3361 remove_proc_entry("registers", local->proc);
3362#endif /* PRISM2_NO_PROCFS_DEBUG */
3363 hostap_remove_proc(local);
3364
3365 tx_cb = local->tx_callback;
3366 while (tx_cb != NULL) {
3367 tx_cb_prev = tx_cb;
3368 tx_cb = tx_cb->next;
3369 kfree(tx_cb_prev);
3370 }
3371
3372 hostap_set_hostapd(local, 0, 0);
3373 hostap_set_hostapd_sta(local, 0, 0);
3374
3375 for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
3376 if (local->frag_cache[i].skb != NULL)
3377 dev_kfree_skb(local->frag_cache[i].skb);
3378 }
3379
3380#ifdef PRISM2_DOWNLOAD_SUPPORT
3381 prism2_download_free_data(local->dl_pri);
3382 prism2_download_free_data(local->dl_sec);
3383#endif /* PRISM2_DOWNLOAD_SUPPORT */
3384
3385 list_for_each_safe(ptr, n, &local->hostap_interfaces) {
3386 iface = list_entry(ptr, struct hostap_interface, list);
3387 if (iface->type == HOSTAP_INTERFACE_MASTER) {
3388 /* special handling for this interface below */
3389 continue;
3390 }
3391 hostap_remove_interface(iface->dev, 0, 1);
3392 }
3393
3394 prism2_clear_set_tim_queue(local);
3395
3396 list_for_each_safe(ptr, n, &local->bss_list) {
3397 struct hostap_bss_info *bss =
3398 list_entry(ptr, struct hostap_bss_info, list);
3399 kfree(bss);
3400 }
3401
3402 kfree(local->pda);
3403 kfree(local->last_scan_results);
3404 kfree(local->generic_elem);
3405
3406 unregister_netdev(local->dev);
3407 free_netdev(local->dev);
3408}
3409
3410
3411#ifndef PRISM2_PLX
3412static void prism2_suspend(struct net_device *dev)
3413{
3414 struct hostap_interface *iface;
3415 struct local_info *local;
3416 union iwreq_data wrqu;
3417
3418 iface = dev->priv;
3419 local = iface->local;
3420
3421 /* Send disconnect event, e.g., to trigger reassociation after resume
3422 * if wpa_supplicant is used. */
3423 memset(&wrqu, 0, sizeof(wrqu));
3424 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
3425 wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
3426
3427 /* Disable hardware and firmware */
3428 prism2_hw_shutdown(dev, 0);
3429}
3430#endif /* PRISM2_PLX */
3431
3432
3433/* These might at some point be compiled separately and used as separate
3434 * kernel modules or linked into one */
3435#ifdef PRISM2_DOWNLOAD_SUPPORT
3436#include "hostap_download.c"
3437#endif /* PRISM2_DOWNLOAD_SUPPORT */
3438
3439#ifdef PRISM2_CALLBACK
3440/* External hostap_callback.c file can be used to, e.g., blink activity led.
3441 * This can use platform specific code and must define prism2_callback()
3442 * function (if PRISM2_CALLBACK is not defined, these function calls are not
3443 * used. */
3444#include "hostap_callback.c"
3445#endif /* PRISM2_CALLBACK */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
new file mode 100644
index 000000000000..5aa998fdf1c4
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -0,0 +1,499 @@
1/* Host AP driver Info Frame processing (part of hostap.o module) */
2
3
4/* Called only as a tasklet (software IRQ) */
5static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
6 int left)
7{
8 struct hfa384x_comm_tallies *tallies;
9
10 if (left < sizeof(struct hfa384x_comm_tallies)) {
11 printk(KERN_DEBUG "%s: too short (len=%d) commtallies "
12 "info frame\n", local->dev->name, left);
13 return;
14 }
15
16 tallies = (struct hfa384x_comm_tallies *) buf;
17#define ADD_COMM_TALLIES(name) \
18local->comm_tallies.name += le16_to_cpu(tallies->name)
19 ADD_COMM_TALLIES(tx_unicast_frames);
20 ADD_COMM_TALLIES(tx_multicast_frames);
21 ADD_COMM_TALLIES(tx_fragments);
22 ADD_COMM_TALLIES(tx_unicast_octets);
23 ADD_COMM_TALLIES(tx_multicast_octets);
24 ADD_COMM_TALLIES(tx_deferred_transmissions);
25 ADD_COMM_TALLIES(tx_single_retry_frames);
26 ADD_COMM_TALLIES(tx_multiple_retry_frames);
27 ADD_COMM_TALLIES(tx_retry_limit_exceeded);
28 ADD_COMM_TALLIES(tx_discards);
29 ADD_COMM_TALLIES(rx_unicast_frames);
30 ADD_COMM_TALLIES(rx_multicast_frames);
31 ADD_COMM_TALLIES(rx_fragments);
32 ADD_COMM_TALLIES(rx_unicast_octets);
33 ADD_COMM_TALLIES(rx_multicast_octets);
34 ADD_COMM_TALLIES(rx_fcs_errors);
35 ADD_COMM_TALLIES(rx_discards_no_buffer);
36 ADD_COMM_TALLIES(tx_discards_wrong_sa);
37 ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
38 ADD_COMM_TALLIES(rx_message_in_msg_fragments);
39 ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
40#undef ADD_COMM_TALLIES
41}
42
43
44/* Called only as a tasklet (software IRQ) */
45static void prism2_info_commtallies32(local_info_t *local, unsigned char *buf,
46 int left)
47{
48 struct hfa384x_comm_tallies32 *tallies;
49
50 if (left < sizeof(struct hfa384x_comm_tallies32)) {
51 printk(KERN_DEBUG "%s: too short (len=%d) commtallies32 "
52 "info frame\n", local->dev->name, left);
53 return;
54 }
55
56 tallies = (struct hfa384x_comm_tallies32 *) buf;
57#define ADD_COMM_TALLIES(name) \
58local->comm_tallies.name += le32_to_cpu(tallies->name)
59 ADD_COMM_TALLIES(tx_unicast_frames);
60 ADD_COMM_TALLIES(tx_multicast_frames);
61 ADD_COMM_TALLIES(tx_fragments);
62 ADD_COMM_TALLIES(tx_unicast_octets);
63 ADD_COMM_TALLIES(tx_multicast_octets);
64 ADD_COMM_TALLIES(tx_deferred_transmissions);
65 ADD_COMM_TALLIES(tx_single_retry_frames);
66 ADD_COMM_TALLIES(tx_multiple_retry_frames);
67 ADD_COMM_TALLIES(tx_retry_limit_exceeded);
68 ADD_COMM_TALLIES(tx_discards);
69 ADD_COMM_TALLIES(rx_unicast_frames);
70 ADD_COMM_TALLIES(rx_multicast_frames);
71 ADD_COMM_TALLIES(rx_fragments);
72 ADD_COMM_TALLIES(rx_unicast_octets);
73 ADD_COMM_TALLIES(rx_multicast_octets);
74 ADD_COMM_TALLIES(rx_fcs_errors);
75 ADD_COMM_TALLIES(rx_discards_no_buffer);
76 ADD_COMM_TALLIES(tx_discards_wrong_sa);
77 ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
78 ADD_COMM_TALLIES(rx_message_in_msg_fragments);
79 ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
80#undef ADD_COMM_TALLIES
81}
82
83
84/* Called only as a tasklet (software IRQ) */
85static void prism2_info_commtallies(local_info_t *local, unsigned char *buf,
86 int left)
87{
88 if (local->tallies32)
89 prism2_info_commtallies32(local, buf, left);
90 else
91 prism2_info_commtallies16(local, buf, left);
92}
93
94
95#ifndef PRISM2_NO_STATION_MODES
96#ifndef PRISM2_NO_DEBUG
97static const char* hfa384x_linkstatus_str(u16 linkstatus)
98{
99 switch (linkstatus) {
100 case HFA384X_LINKSTATUS_CONNECTED:
101 return "Connected";
102 case HFA384X_LINKSTATUS_DISCONNECTED:
103 return "Disconnected";
104 case HFA384X_LINKSTATUS_AP_CHANGE:
105 return "Access point change";
106 case HFA384X_LINKSTATUS_AP_OUT_OF_RANGE:
107 return "Access point out of range";
108 case HFA384X_LINKSTATUS_AP_IN_RANGE:
109 return "Access point in range";
110 case HFA384X_LINKSTATUS_ASSOC_FAILED:
111 return "Association failed";
112 default:
113 return "Unknown";
114 }
115}
116#endif /* PRISM2_NO_DEBUG */
117
118
119/* Called only as a tasklet (software IRQ) */
120static void prism2_info_linkstatus(local_info_t *local, unsigned char *buf,
121 int left)
122{
123 u16 val;
124 int non_sta_mode;
125
126 /* Alloc new JoinRequests to occur since LinkStatus for the previous
127 * has been received */
128 local->last_join_time = 0;
129
130 if (left != 2) {
131 printk(KERN_DEBUG "%s: invalid linkstatus info frame "
132 "length %d\n", local->dev->name, left);
133 return;
134 }
135
136 non_sta_mode = local->iw_mode == IW_MODE_MASTER ||
137 local->iw_mode == IW_MODE_REPEAT ||
138 local->iw_mode == IW_MODE_MONITOR;
139
140 val = buf[0] | (buf[1] << 8);
141 if (!non_sta_mode || val != HFA384X_LINKSTATUS_DISCONNECTED) {
142 PDEBUG(DEBUG_EXTRA, "%s: LinkStatus=%d (%s)\n",
143 local->dev->name, val, hfa384x_linkstatus_str(val));
144 }
145
146 if (non_sta_mode) {
147 netif_carrier_on(local->dev);
148 netif_carrier_on(local->ddev);
149 return;
150 }
151
152 /* Get current BSSID later in scheduled task */
153 set_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info);
154 local->prev_link_status = val;
155 schedule_work(&local->info_queue);
156}
157
158
159static void prism2_host_roaming(local_info_t *local)
160{
161 struct hfa384x_join_request req;
162 struct net_device *dev = local->dev;
163 struct hfa384x_hostscan_result *selected, *entry;
164 int i;
165 unsigned long flags;
166
167 if (local->last_join_time &&
168 time_before(jiffies, local->last_join_time + 10 * HZ)) {
169 PDEBUG(DEBUG_EXTRA, "%s: last join request has not yet been "
170 "completed - waiting for it before issuing new one\n",
171 dev->name);
172 return;
173 }
174
175 /* ScanResults are sorted: first ESS results in decreasing signal
176 * quality then IBSS results in similar order.
177 * Trivial roaming policy: just select the first entry.
178 * This could probably be improved by adding hysteresis to limit
179 * number of handoffs, etc.
180 *
181 * Could do periodic RID_SCANREQUEST or Inquire F101 to get new
182 * ScanResults */
183 spin_lock_irqsave(&local->lock, flags);
184 if (local->last_scan_results == NULL ||
185 local->last_scan_results_count == 0) {
186 spin_unlock_irqrestore(&local->lock, flags);
187 PDEBUG(DEBUG_EXTRA, "%s: no scan results for host roaming\n",
188 dev->name);
189 return;
190 }
191
192 selected = &local->last_scan_results[0];
193
194 if (local->preferred_ap[0] || local->preferred_ap[1] ||
195 local->preferred_ap[2] || local->preferred_ap[3] ||
196 local->preferred_ap[4] || local->preferred_ap[5]) {
197 /* Try to find preferred AP */
198 PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " MACSTR "\n",
199 dev->name, MAC2STR(local->preferred_ap));
200 for (i = 0; i < local->last_scan_results_count; i++) {
201 entry = &local->last_scan_results[i];
202 if (memcmp(local->preferred_ap, entry->bssid, 6) == 0)
203 {
204 PDEBUG(DEBUG_EXTRA, "%s: using preferred AP "
205 "selection\n", dev->name);
206 selected = entry;
207 break;
208 }
209 }
210 }
211
212 memcpy(req.bssid, selected->bssid, 6);
213 req.channel = selected->chid;
214 spin_unlock_irqrestore(&local->lock, flags);
215
216 PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=" MACSTR " channel=%d\n",
217 dev->name, MAC2STR(req.bssid), le16_to_cpu(req.channel));
218 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
219 sizeof(req))) {
220 printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name);
221 }
222 local->last_join_time = jiffies;
223}
224
225
226static void hostap_report_scan_complete(local_info_t *local)
227{
228 union iwreq_data wrqu;
229
230 /* Inform user space about new scan results (just empty event,
231 * SIOCGIWSCAN can be used to fetch data */
232 wrqu.data.length = 0;
233 wrqu.data.flags = 0;
234 wireless_send_event(local->dev, SIOCGIWSCAN, &wrqu, NULL);
235
236 /* Allow SIOCGIWSCAN handling to occur since we have received
237 * scanning result */
238 local->scan_timestamp = 0;
239}
240
241
242/* Called only as a tasklet (software IRQ) */
243static void prism2_info_scanresults(local_info_t *local, unsigned char *buf,
244 int left)
245{
246 u16 *pos;
247 int new_count, i;
248 unsigned long flags;
249 struct hfa384x_scan_result *res;
250 struct hfa384x_hostscan_result *results, *prev;
251
252 if (left < 4) {
253 printk(KERN_DEBUG "%s: invalid scanresult info frame "
254 "length %d\n", local->dev->name, left);
255 return;
256 }
257
258 pos = (u16 *) buf;
259 pos++;
260 pos++;
261 left -= 4;
262
263 new_count = left / sizeof(struct hfa384x_scan_result);
264 results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
265 GFP_ATOMIC);
266 if (results == NULL)
267 return;
268
269 /* Convert to hostscan result format. */
270 res = (struct hfa384x_scan_result *) pos;
271 for (i = 0; i < new_count; i++) {
272 memcpy(&results[i], &res[i],
273 sizeof(struct hfa384x_scan_result));
274 results[i].atim = 0;
275 }
276
277 spin_lock_irqsave(&local->lock, flags);
278 local->last_scan_type = PRISM2_SCAN;
279 prev = local->last_scan_results;
280 local->last_scan_results = results;
281 local->last_scan_results_count = new_count;
282 spin_unlock_irqrestore(&local->lock, flags);
283 kfree(prev);
284
285 hostap_report_scan_complete(local);
286
287 /* Perform rest of ScanResults handling later in scheduled task */
288 set_bit(PRISM2_INFO_PENDING_SCANRESULTS, &local->pending_info);
289 schedule_work(&local->info_queue);
290}
291
292
293/* Called only as a tasklet (software IRQ) */
294static void prism2_info_hostscanresults(local_info_t *local,
295 unsigned char *buf, int left)
296{
297 int i, result_size, copy_len, new_count;
298 struct hfa384x_hostscan_result *results, *prev;
299 unsigned long flags;
300 u16 *pos;
301 u8 *ptr;
302
303 wake_up_interruptible(&local->hostscan_wq);
304
305 if (left < 4) {
306 printk(KERN_DEBUG "%s: invalid hostscanresult info frame "
307 "length %d\n", local->dev->name, left);
308 return;
309 }
310
311 pos = (u16 *) buf;
312 copy_len = result_size = le16_to_cpu(*pos);
313 if (result_size == 0) {
314 printk(KERN_DEBUG "%s: invalid result_size (0) in "
315 "hostscanresults\n", local->dev->name);
316 return;
317 }
318 if (copy_len > sizeof(struct hfa384x_hostscan_result))
319 copy_len = sizeof(struct hfa384x_hostscan_result);
320
321 pos++;
322 pos++;
323 left -= 4;
324 ptr = (u8 *) pos;
325
326 new_count = left / result_size;
327 results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
328 GFP_ATOMIC);
329 if (results == NULL)
330 return;
331 memset(results, 0, new_count * sizeof(struct hfa384x_hostscan_result));
332
333 for (i = 0; i < new_count; i++) {
334 memcpy(&results[i], ptr, copy_len);
335 ptr += result_size;
336 left -= result_size;
337 }
338
339 if (left) {
340 printk(KERN_DEBUG "%s: short HostScan result entry (%d/%d)\n",
341 local->dev->name, left, result_size);
342 }
343
344 spin_lock_irqsave(&local->lock, flags);
345 local->last_scan_type = PRISM2_HOSTSCAN;
346 prev = local->last_scan_results;
347 local->last_scan_results = results;
348 local->last_scan_results_count = new_count;
349 spin_unlock_irqrestore(&local->lock, flags);
350 kfree(prev);
351
352 hostap_report_scan_complete(local);
353}
354#endif /* PRISM2_NO_STATION_MODES */
355
356
357/* Called only as a tasklet (software IRQ) */
358void hostap_info_process(local_info_t *local, struct sk_buff *skb)
359{
360 struct hfa384x_info_frame *info;
361 unsigned char *buf;
362 int left;
363#ifndef PRISM2_NO_DEBUG
364 int i;
365#endif /* PRISM2_NO_DEBUG */
366
367 info = (struct hfa384x_info_frame *) skb->data;
368 buf = skb->data + sizeof(*info);
369 left = skb->len - sizeof(*info);
370
371 switch (info->type) {
372 case HFA384X_INFO_COMMTALLIES:
373 prism2_info_commtallies(local, buf, left);
374 break;
375
376#ifndef PRISM2_NO_STATION_MODES
377 case HFA384X_INFO_LINKSTATUS:
378 prism2_info_linkstatus(local, buf, left);
379 break;
380
381 case HFA384X_INFO_SCANRESULTS:
382 prism2_info_scanresults(local, buf, left);
383 break;
384
385 case HFA384X_INFO_HOSTSCANRESULTS:
386 prism2_info_hostscanresults(local, buf, left);
387 break;
388#endif /* PRISM2_NO_STATION_MODES */
389
390#ifndef PRISM2_NO_DEBUG
391 default:
392 PDEBUG(DEBUG_EXTRA, "%s: INFO - len=%d type=0x%04x\n",
393 local->dev->name, info->len, info->type);
394 PDEBUG(DEBUG_EXTRA, "Unknown info frame:");
395 for (i = 0; i < (left < 100 ? left : 100); i++)
396 PDEBUG2(DEBUG_EXTRA, " %02x", buf[i]);
397 PDEBUG2(DEBUG_EXTRA, "\n");
398 break;
399#endif /* PRISM2_NO_DEBUG */
400 }
401}
402
403
404#ifndef PRISM2_NO_STATION_MODES
405static void handle_info_queue_linkstatus(local_info_t *local)
406{
407 int val = local->prev_link_status;
408 int connected;
409 union iwreq_data wrqu;
410
411 connected =
412 val == HFA384X_LINKSTATUS_CONNECTED ||
413 val == HFA384X_LINKSTATUS_AP_CHANGE ||
414 val == HFA384X_LINKSTATUS_AP_IN_RANGE;
415
416 if (local->func->get_rid(local->dev, HFA384X_RID_CURRENTBSSID,
417 local->bssid, ETH_ALEN, 1) < 0) {
418 printk(KERN_DEBUG "%s: could not read CURRENTBSSID after "
419 "LinkStatus event\n", local->dev->name);
420 } else {
421 PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" MACSTR "\n",
422 local->dev->name,
423 MAC2STR((unsigned char *) local->bssid));
424 if (local->wds_type & HOSTAP_WDS_AP_CLIENT)
425 hostap_add_sta(local->ap, local->bssid);
426 }
427
428 /* Get BSSID if we have a valid AP address */
429 if (connected) {
430 netif_carrier_on(local->dev);
431 netif_carrier_on(local->ddev);
432 memcpy(wrqu.ap_addr.sa_data, local->bssid, ETH_ALEN);
433 } else {
434 netif_carrier_off(local->dev);
435 netif_carrier_off(local->ddev);
436 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
437 }
438 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
439
440 /*
441 * Filter out sequential disconnect events in order not to cause a
442 * flood of SIOCGIWAP events that have a race condition with EAPOL
443 * frames and can confuse wpa_supplicant about the current association
444 * status.
445 */
446 if (connected || local->prev_linkstatus_connected)
447 wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
448 local->prev_linkstatus_connected = connected;
449}
450
451
452static void handle_info_queue_scanresults(local_info_t *local)
453{
454 if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA)
455 prism2_host_roaming(local);
456
457 if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
458 memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00",
459 ETH_ALEN) != 0) {
460 /*
461 * Firmware seems to be getting into odd state in host_roaming
462 * mode 2 when hostscan is used without join command, so try
463 * to fix this by re-joining the current AP. This does not
464 * actually trigger a new association if the current AP is
465 * still in the scan results.
466 */
467 prism2_host_roaming(local);
468 }
469}
470
471
472/* Called only as scheduled task after receiving info frames (used to avoid
473 * pending too much time in HW IRQ handler). */
474static void handle_info_queue(void *data)
475{
476 local_info_t *local = (local_info_t *) data;
477
478 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
479 &local->pending_info))
480 handle_info_queue_linkstatus(local);
481
482 if (test_and_clear_bit(PRISM2_INFO_PENDING_SCANRESULTS,
483 &local->pending_info))
484 handle_info_queue_scanresults(local);
485}
486#endif /* PRISM2_NO_STATION_MODES */
487
488
489void hostap_info_init(local_info_t *local)
490{
491 skb_queue_head_init(&local->info_list);
492#ifndef PRISM2_NO_STATION_MODES
493 INIT_WORK(&local->info_queue, handle_info_queue, local);
494#endif /* PRISM2_NO_STATION_MODES */
495}
496
497
498EXPORT_SYMBOL(hostap_info_init);
499EXPORT_SYMBOL(hostap_info_process);
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
new file mode 100644
index 000000000000..e720369a3515
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -0,0 +1,4102 @@
1/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
2
3#ifdef in_atomic
4/* Get kernel_locked() for in_atomic() */
5#include <linux/smp_lock.h>
6#endif
7#include <linux/ethtool.h>
8
9
10static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
11{
12 struct hostap_interface *iface;
13 local_info_t *local;
14 struct iw_statistics *wstats;
15
16 iface = netdev_priv(dev);
17 local = iface->local;
18
19 /* Why are we doing that ? Jean II */
20 if (iface->type != HOSTAP_INTERFACE_MAIN)
21 return NULL;
22
23 wstats = &local->wstats;
24
25 wstats->status = 0;
26 wstats->discard.code =
27 local->comm_tallies.rx_discards_wep_undecryptable;
28 wstats->discard.misc =
29 local->comm_tallies.rx_fcs_errors +
30 local->comm_tallies.rx_discards_no_buffer +
31 local->comm_tallies.tx_discards_wrong_sa;
32
33 wstats->discard.retries =
34 local->comm_tallies.tx_retry_limit_exceeded;
35 wstats->discard.fragment =
36 local->comm_tallies.rx_message_in_bad_msg_fragments;
37
38 if (local->iw_mode != IW_MODE_MASTER &&
39 local->iw_mode != IW_MODE_REPEAT) {
40 int update = 1;
41#ifdef in_atomic
42 /* RID reading might sleep and it must not be called in
43 * interrupt context or while atomic. However, this
44 * function seems to be called while atomic (at least in Linux
45 * 2.5.59). Update signal quality values only if in suitable
46 * context. Otherwise, previous values read from tick timer
47 * will be used. */
48 if (in_atomic())
49 update = 0;
50#endif /* in_atomic */
51
52 if (update && prism2_update_comms_qual(dev) == 0)
53 wstats->qual.updated = 7;
54
55 wstats->qual.qual = local->comms_qual;
56 wstats->qual.level = local->avg_signal;
57 wstats->qual.noise = local->avg_noise;
58 } else {
59 wstats->qual.qual = 0;
60 wstats->qual.level = 0;
61 wstats->qual.noise = 0;
62 wstats->qual.updated = 0;
63 }
64
65 return wstats;
66}
67
68
69static int prism2_get_datarates(struct net_device *dev, u8 *rates)
70{
71 struct hostap_interface *iface;
72 local_info_t *local;
73 u8 buf[12];
74 int len;
75 u16 val;
76
77 iface = netdev_priv(dev);
78 local = iface->local;
79
80 len = local->func->get_rid(dev, HFA384X_RID_SUPPORTEDDATARATES, buf,
81 sizeof(buf), 0);
82 if (len < 2)
83 return 0;
84
85 val = le16_to_cpu(*(u16 *) buf); /* string length */
86
87 if (len - 2 < val || val > 10)
88 return 0;
89
90 memcpy(rates, buf + 2, val);
91 return val;
92}
93
94
95static int prism2_get_name(struct net_device *dev,
96 struct iw_request_info *info,
97 char *name, char *extra)
98{
99 u8 rates[10];
100 int len, i, over2 = 0;
101
102 len = prism2_get_datarates(dev, rates);
103
104 for (i = 0; i < len; i++) {
105 if (rates[i] == 0x0b || rates[i] == 0x16) {
106 over2 = 1;
107 break;
108 }
109 }
110
111 strcpy(name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
112
113 return 0;
114}
115
116
117static void prism2_crypt_delayed_deinit(local_info_t *local,
118 struct ieee80211_crypt_data **crypt)
119{
120 struct ieee80211_crypt_data *tmp;
121 unsigned long flags;
122
123 tmp = *crypt;
124 *crypt = NULL;
125
126 if (tmp == NULL)
127 return;
128
129 /* must not run ops->deinit() while there may be pending encrypt or
130 * decrypt operations. Use a list of delayed deinits to avoid needing
131 * locking. */
132
133 spin_lock_irqsave(&local->lock, flags);
134 list_add(&tmp->list, &local->crypt_deinit_list);
135 if (!timer_pending(&local->crypt_deinit_timer)) {
136 local->crypt_deinit_timer.expires = jiffies + HZ;
137 add_timer(&local->crypt_deinit_timer);
138 }
139 spin_unlock_irqrestore(&local->lock, flags);
140}
141
142
143static int prism2_ioctl_siwencode(struct net_device *dev,
144 struct iw_request_info *info,
145 struct iw_point *erq, char *keybuf)
146{
147 struct hostap_interface *iface;
148 local_info_t *local;
149 int i;
150 struct ieee80211_crypt_data **crypt;
151
152 iface = netdev_priv(dev);
153 local = iface->local;
154
155 i = erq->flags & IW_ENCODE_INDEX;
156 if (i < 1 || i > 4)
157 i = local->tx_keyidx;
158 else
159 i--;
160 if (i < 0 || i >= WEP_KEYS)
161 return -EINVAL;
162
163 crypt = &local->crypt[i];
164
165 if (erq->flags & IW_ENCODE_DISABLED) {
166 if (*crypt)
167 prism2_crypt_delayed_deinit(local, crypt);
168 goto done;
169 }
170
171 if (*crypt != NULL && (*crypt)->ops != NULL &&
172 strcmp((*crypt)->ops->name, "WEP") != 0) {
173 /* changing to use WEP; deinit previously used algorithm */
174 prism2_crypt_delayed_deinit(local, crypt);
175 }
176
177 if (*crypt == NULL) {
178 struct ieee80211_crypt_data *new_crypt;
179
180 /* take WEP into use */
181 new_crypt = (struct ieee80211_crypt_data *)
182 kmalloc(sizeof(struct ieee80211_crypt_data),
183 GFP_KERNEL);
184 if (new_crypt == NULL)
185 return -ENOMEM;
186 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
187 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
188 if (!new_crypt->ops) {
189 request_module("ieee80211_crypt_wep");
190 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
191 }
192 if (new_crypt->ops)
193 new_crypt->priv = new_crypt->ops->init(i);
194 if (!new_crypt->ops || !new_crypt->priv) {
195 kfree(new_crypt);
196 new_crypt = NULL;
197
198 printk(KERN_WARNING "%s: could not initialize WEP: "
199 "load module hostap_crypt_wep.o\n",
200 dev->name);
201 return -EOPNOTSUPP;
202 }
203 *crypt = new_crypt;
204 }
205
206 if (erq->length > 0) {
207 int len = erq->length <= 5 ? 5 : 13;
208 int first = 1, j;
209 if (len > erq->length)
210 memset(keybuf + erq->length, 0, len - erq->length);
211 (*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv);
212 for (j = 0; j < WEP_KEYS; j++) {
213 if (j != i && local->crypt[j]) {
214 first = 0;
215 break;
216 }
217 }
218 if (first)
219 local->tx_keyidx = i;
220 } else {
221 /* No key data - just set the default TX key index */
222 local->tx_keyidx = i;
223 }
224
225 done:
226 local->open_wep = erq->flags & IW_ENCODE_OPEN;
227
228 if (hostap_set_encryption(local)) {
229 printk(KERN_DEBUG "%s: set_encryption failed\n", dev->name);
230 return -EINVAL;
231 }
232
233 /* Do not reset port0 if card is in Managed mode since resetting will
234 * generate new IEEE 802.11 authentication which may end up in looping
235 * with IEEE 802.1X. Prism2 documentation seem to require port reset
236 * after WEP configuration. However, keys are apparently changed at
237 * least in Managed mode. */
238 if (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(dev)) {
239 printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
246
247static int prism2_ioctl_giwencode(struct net_device *dev,
248 struct iw_request_info *info,
249 struct iw_point *erq, char *key)
250{
251 struct hostap_interface *iface;
252 local_info_t *local;
253 int i, len;
254 u16 val;
255 struct ieee80211_crypt_data *crypt;
256
257 iface = netdev_priv(dev);
258 local = iface->local;
259
260 i = erq->flags & IW_ENCODE_INDEX;
261 if (i < 1 || i > 4)
262 i = local->tx_keyidx;
263 else
264 i--;
265 if (i < 0 || i >= WEP_KEYS)
266 return -EINVAL;
267
268 crypt = local->crypt[i];
269 erq->flags = i + 1;
270
271 if (crypt == NULL || crypt->ops == NULL) {
272 erq->length = 0;
273 erq->flags |= IW_ENCODE_DISABLED;
274 return 0;
275 }
276
277 if (strcmp(crypt->ops->name, "WEP") != 0) {
278 /* only WEP is supported with wireless extensions, so just
279 * report that encryption is used */
280 erq->length = 0;
281 erq->flags |= IW_ENCODE_ENABLED;
282 return 0;
283 }
284
285 /* Reads from HFA384X_RID_CNFDEFAULTKEY* return bogus values, so show
286 * the keys from driver buffer */
287 len = crypt->ops->get_key(key, WEP_KEY_LEN, NULL, crypt->priv);
288 erq->length = (len >= 0 ? len : 0);
289
290 if (local->func->get_rid(dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0)
291 {
292 printk("CNFWEPFLAGS reading failed\n");
293 return -EOPNOTSUPP;
294 }
295 le16_to_cpus(&val);
296 if (val & HFA384X_WEPFLAGS_PRIVACYINVOKED)
297 erq->flags |= IW_ENCODE_ENABLED;
298 else
299 erq->flags |= IW_ENCODE_DISABLED;
300 if (val & HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED)
301 erq->flags |= IW_ENCODE_RESTRICTED;
302 else
303 erq->flags |= IW_ENCODE_OPEN;
304
305 return 0;
306}
307
308
309static int hostap_set_rate(struct net_device *dev)
310{
311 struct hostap_interface *iface;
312 local_info_t *local;
313 int ret, basic_rates;
314
315 iface = netdev_priv(dev);
316 local = iface->local;
317
318 basic_rates = local->basic_rates & local->tx_rate_control;
319 if (!basic_rates || basic_rates != local->basic_rates) {
320 printk(KERN_INFO "%s: updating basic rate set automatically "
321 "to match with the new supported rate set\n",
322 dev->name);
323 if (!basic_rates)
324 basic_rates = local->tx_rate_control;
325
326 local->basic_rates = basic_rates;
327 if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
328 basic_rates))
329 printk(KERN_WARNING "%s: failed to set "
330 "cnfBasicRates\n", dev->name);
331 }
332
333 ret = (hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
334 local->tx_rate_control) ||
335 hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
336 local->tx_rate_control) ||
337 local->func->reset_port(dev));
338
339 if (ret) {
340 printk(KERN_WARNING "%s: TXRateControl/cnfSupportedRates "
341 "setting to 0x%x failed\n",
342 dev->name, local->tx_rate_control);
343 }
344
345 /* Update TX rate configuration for all STAs based on new operational
346 * rate set. */
347 hostap_update_rates(local);
348
349 return ret;
350}
351
352
353static int prism2_ioctl_siwrate(struct net_device *dev,
354 struct iw_request_info *info,
355 struct iw_param *rrq, char *extra)
356{
357 struct hostap_interface *iface;
358 local_info_t *local;
359
360 iface = netdev_priv(dev);
361 local = iface->local;
362
363 if (rrq->fixed) {
364 switch (rrq->value) {
365 case 11000000:
366 local->tx_rate_control = HFA384X_RATES_11MBPS;
367 break;
368 case 5500000:
369 local->tx_rate_control = HFA384X_RATES_5MBPS;
370 break;
371 case 2000000:
372 local->tx_rate_control = HFA384X_RATES_2MBPS;
373 break;
374 case 1000000:
375 local->tx_rate_control = HFA384X_RATES_1MBPS;
376 break;
377 default:
378 local->tx_rate_control = HFA384X_RATES_1MBPS |
379 HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
380 HFA384X_RATES_11MBPS;
381 break;
382 }
383 } else {
384 switch (rrq->value) {
385 case 11000000:
386 local->tx_rate_control = HFA384X_RATES_1MBPS |
387 HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
388 HFA384X_RATES_11MBPS;
389 break;
390 case 5500000:
391 local->tx_rate_control = HFA384X_RATES_1MBPS |
392 HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS;
393 break;
394 case 2000000:
395 local->tx_rate_control = HFA384X_RATES_1MBPS |
396 HFA384X_RATES_2MBPS;
397 break;
398 case 1000000:
399 local->tx_rate_control = HFA384X_RATES_1MBPS;
400 break;
401 default:
402 local->tx_rate_control = HFA384X_RATES_1MBPS |
403 HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
404 HFA384X_RATES_11MBPS;
405 break;
406 }
407 }
408
409 return hostap_set_rate(dev);
410}
411
412
413static int prism2_ioctl_giwrate(struct net_device *dev,
414 struct iw_request_info *info,
415 struct iw_param *rrq, char *extra)
416{
417 u16 val;
418 struct hostap_interface *iface;
419 local_info_t *local;
420 int ret = 0;
421
422 iface = netdev_priv(dev);
423 local = iface->local;
424
425 if (local->func->get_rid(dev, HFA384X_RID_TXRATECONTROL, &val, 2, 1) <
426 0)
427 return -EINVAL;
428
429 if ((val & 0x1) && (val > 1))
430 rrq->fixed = 0;
431 else
432 rrq->fixed = 1;
433
434 if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL &&
435 !local->fw_tx_rate_control) {
436 /* HFA384X_RID_CURRENTTXRATE seems to always be 2 Mbps in
437 * Host AP mode, so use the recorded TX rate of the last sent
438 * frame */
439 rrq->value = local->ap->last_tx_rate > 0 ?
440 local->ap->last_tx_rate * 100000 : 11000000;
441 return 0;
442 }
443
444 if (local->func->get_rid(dev, HFA384X_RID_CURRENTTXRATE, &val, 2, 1) <
445 0)
446 return -EINVAL;
447
448 switch (val) {
449 case HFA384X_RATES_1MBPS:
450 rrq->value = 1000000;
451 break;
452 case HFA384X_RATES_2MBPS:
453 rrq->value = 2000000;
454 break;
455 case HFA384X_RATES_5MBPS:
456 rrq->value = 5500000;
457 break;
458 case HFA384X_RATES_11MBPS:
459 rrq->value = 11000000;
460 break;
461 default:
462 /* should not happen */
463 rrq->value = 11000000;
464 ret = -EINVAL;
465 break;
466 }
467
468 return ret;
469}
470
471
472static int prism2_ioctl_siwsens(struct net_device *dev,
473 struct iw_request_info *info,
474 struct iw_param *sens, char *extra)
475{
476 struct hostap_interface *iface;
477 local_info_t *local;
478
479 iface = netdev_priv(dev);
480 local = iface->local;
481
482 /* Set the desired AP density */
483 if (sens->value < 1 || sens->value > 3)
484 return -EINVAL;
485
486 if (hostap_set_word(dev, HFA384X_RID_CNFSYSTEMSCALE, sens->value) ||
487 local->func->reset_port(dev))
488 return -EINVAL;
489
490 return 0;
491}
492
493static int prism2_ioctl_giwsens(struct net_device *dev,
494 struct iw_request_info *info,
495 struct iw_param *sens, char *extra)
496{
497 struct hostap_interface *iface;
498 local_info_t *local;
499 u16 val;
500
501 iface = netdev_priv(dev);
502 local = iface->local;
503
504 /* Get the current AP density */
505 if (local->func->get_rid(dev, HFA384X_RID_CNFSYSTEMSCALE, &val, 2, 1) <
506 0)
507 return -EINVAL;
508
509 sens->value = __le16_to_cpu(val);
510 sens->fixed = 1;
511
512 return 0;
513}
514
515
516/* Deprecated in new wireless extension API */
517static int prism2_ioctl_giwaplist(struct net_device *dev,
518 struct iw_request_info *info,
519 struct iw_point *data, char *extra)
520{
521 struct hostap_interface *iface;
522 local_info_t *local;
523 struct sockaddr *addr;
524 struct iw_quality *qual;
525
526 iface = netdev_priv(dev);
527 local = iface->local;
528
529 if (local->iw_mode != IW_MODE_MASTER) {
530 printk(KERN_DEBUG "SIOCGIWAPLIST is currently only supported "
531 "in Host AP mode\n");
532 data->length = 0;
533 return -EOPNOTSUPP;
534 }
535
536 addr = kmalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
537 qual = kmalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
538 if (addr == NULL || qual == NULL) {
539 kfree(addr);
540 kfree(qual);
541 data->length = 0;
542 return -ENOMEM;
543 }
544
545 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
546
547 memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
548 data->flags = 1; /* has quality information */
549 memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
550 sizeof(struct iw_quality) * data->length);
551
552 kfree(addr);
553 kfree(qual);
554
555 return 0;
556}
557
558
559static int prism2_ioctl_siwrts(struct net_device *dev,
560 struct iw_request_info *info,
561 struct iw_param *rts, char *extra)
562{
563 struct hostap_interface *iface;
564 local_info_t *local;
565 u16 val;
566
567 iface = netdev_priv(dev);
568 local = iface->local;
569
570 if (rts->disabled)
571 val = __constant_cpu_to_le16(2347);
572 else if (rts->value < 0 || rts->value > 2347)
573 return -EINVAL;
574 else
575 val = __cpu_to_le16(rts->value);
576
577 if (local->func->set_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2) ||
578 local->func->reset_port(dev))
579 return -EINVAL;
580
581 local->rts_threshold = rts->value;
582
583 return 0;
584}
585
586static int prism2_ioctl_giwrts(struct net_device *dev,
587 struct iw_request_info *info,
588 struct iw_param *rts, char *extra)
589{
590 struct hostap_interface *iface;
591 local_info_t *local;
592 u16 val;
593
594 iface = netdev_priv(dev);
595 local = iface->local;
596
597 if (local->func->get_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2, 1) <
598 0)
599 return -EINVAL;
600
601 rts->value = __le16_to_cpu(val);
602 rts->disabled = (rts->value == 2347);
603 rts->fixed = 1;
604
605 return 0;
606}
607
608
609static int prism2_ioctl_siwfrag(struct net_device *dev,
610 struct iw_request_info *info,
611 struct iw_param *rts, char *extra)
612{
613 struct hostap_interface *iface;
614 local_info_t *local;
615 u16 val;
616
617 iface = netdev_priv(dev);
618 local = iface->local;
619
620 if (rts->disabled)
621 val = __constant_cpu_to_le16(2346);
622 else if (rts->value < 256 || rts->value > 2346)
623 return -EINVAL;
624 else
625 val = __cpu_to_le16(rts->value & ~0x1); /* even numbers only */
626
627 local->fragm_threshold = rts->value & ~0x1;
628 if (local->func->set_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val,
629 2)
630 || local->func->reset_port(dev))
631 return -EINVAL;
632
633 return 0;
634}
635
636static int prism2_ioctl_giwfrag(struct net_device *dev,
637 struct iw_request_info *info,
638 struct iw_param *rts, char *extra)
639{
640 struct hostap_interface *iface;
641 local_info_t *local;
642 u16 val;
643
644 iface = netdev_priv(dev);
645 local = iface->local;
646
647 if (local->func->get_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
648 &val, 2, 1) < 0)
649 return -EINVAL;
650
651 rts->value = __le16_to_cpu(val);
652 rts->disabled = (rts->value == 2346);
653 rts->fixed = 1;
654
655 return 0;
656}
657
658
659#ifndef PRISM2_NO_STATION_MODES
660static int hostap_join_ap(struct net_device *dev)
661{
662 struct hostap_interface *iface;
663 local_info_t *local;
664 struct hfa384x_join_request req;
665 unsigned long flags;
666 int i;
667 struct hfa384x_hostscan_result *entry;
668
669 iface = netdev_priv(dev);
670 local = iface->local;
671
672 memcpy(req.bssid, local->preferred_ap, ETH_ALEN);
673 req.channel = 0;
674
675 spin_lock_irqsave(&local->lock, flags);
676 for (i = 0; i < local->last_scan_results_count; i++) {
677 if (!local->last_scan_results)
678 break;
679 entry = &local->last_scan_results[i];
680 if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
681 req.channel = entry->chid;
682 break;
683 }
684 }
685 spin_unlock_irqrestore(&local->lock, flags);
686
687 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
688 sizeof(req))) {
689 printk(KERN_DEBUG "%s: JoinRequest " MACSTR
690 " failed\n",
691 dev->name, MAC2STR(local->preferred_ap));
692 return -1;
693 }
694
695 printk(KERN_DEBUG "%s: Trying to join BSSID " MACSTR "\n",
696 dev->name, MAC2STR(local->preferred_ap));
697
698 return 0;
699}
700#endif /* PRISM2_NO_STATION_MODES */
701
702
703static int prism2_ioctl_siwap(struct net_device *dev,
704 struct iw_request_info *info,
705 struct sockaddr *ap_addr, char *extra)
706{
707#ifdef PRISM2_NO_STATION_MODES
708 return -EOPNOTSUPP;
709#else /* PRISM2_NO_STATION_MODES */
710 struct hostap_interface *iface;
711 local_info_t *local;
712
713 iface = netdev_priv(dev);
714 local = iface->local;
715
716 memcpy(local->preferred_ap, &ap_addr->sa_data, ETH_ALEN);
717
718 if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) {
719 struct hfa384x_scan_request scan_req;
720 memset(&scan_req, 0, sizeof(scan_req));
721 scan_req.channel_list = __constant_cpu_to_le16(0x3fff);
722 scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
723 if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST,
724 &scan_req, sizeof(scan_req))) {
725 printk(KERN_DEBUG "%s: ScanResults request failed - "
726 "preferred AP delayed to next unsolicited "
727 "scan\n", dev->name);
728 }
729 } else if (local->host_roaming == 2 &&
730 local->iw_mode == IW_MODE_INFRA) {
731 if (hostap_join_ap(dev))
732 return -EINVAL;
733 } else {
734 printk(KERN_DEBUG "%s: Preferred AP (SIOCSIWAP) is used only "
735 "in Managed mode when host_roaming is enabled\n",
736 dev->name);
737 }
738
739 return 0;
740#endif /* PRISM2_NO_STATION_MODES */
741}
742
743static int prism2_ioctl_giwap(struct net_device *dev,
744 struct iw_request_info *info,
745 struct sockaddr *ap_addr, char *extra)
746{
747 struct hostap_interface *iface;
748 local_info_t *local;
749
750 iface = netdev_priv(dev);
751 local = iface->local;
752
753 ap_addr->sa_family = ARPHRD_ETHER;
754 switch (iface->type) {
755 case HOSTAP_INTERFACE_AP:
756 memcpy(&ap_addr->sa_data, dev->dev_addr, ETH_ALEN);
757 break;
758 case HOSTAP_INTERFACE_STA:
759 memcpy(&ap_addr->sa_data, local->assoc_ap_addr, ETH_ALEN);
760 break;
761 case HOSTAP_INTERFACE_WDS:
762 memcpy(&ap_addr->sa_data, iface->u.wds.remote_addr, ETH_ALEN);
763 break;
764 default:
765 if (local->func->get_rid(dev, HFA384X_RID_CURRENTBSSID,
766 &ap_addr->sa_data, ETH_ALEN, 1) < 0)
767 return -EOPNOTSUPP;
768
769 /* local->bssid is also updated in LinkStatus handler when in
770 * station mode */
771 memcpy(local->bssid, &ap_addr->sa_data, ETH_ALEN);
772 break;
773 }
774
775 return 0;
776}
777
778
779static int prism2_ioctl_siwnickn(struct net_device *dev,
780 struct iw_request_info *info,
781 struct iw_point *data, char *nickname)
782{
783 struct hostap_interface *iface;
784 local_info_t *local;
785
786 iface = netdev_priv(dev);
787 local = iface->local;
788
789 memset(local->name, 0, sizeof(local->name));
790 memcpy(local->name, nickname, data->length);
791 local->name_set = 1;
792
793 if (hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name) ||
794 local->func->reset_port(dev))
795 return -EINVAL;
796
797 return 0;
798}
799
800static int prism2_ioctl_giwnickn(struct net_device *dev,
801 struct iw_request_info *info,
802 struct iw_point *data, char *nickname)
803{
804 struct hostap_interface *iface;
805 local_info_t *local;
806 int len;
807 char name[MAX_NAME_LEN + 3];
808 u16 val;
809
810 iface = netdev_priv(dev);
811 local = iface->local;
812
813 len = local->func->get_rid(dev, HFA384X_RID_CNFOWNNAME,
814 &name, MAX_NAME_LEN + 2, 0);
815 val = __le16_to_cpu(*(u16 *) name);
816 if (len > MAX_NAME_LEN + 2 || len < 0 || val > MAX_NAME_LEN)
817 return -EOPNOTSUPP;
818
819 name[val + 2] = '\0';
820 data->length = val + 1;
821 memcpy(nickname, name + 2, val + 1);
822
823 return 0;
824}
825
826
827static int prism2_ioctl_siwfreq(struct net_device *dev,
828 struct iw_request_info *info,
829 struct iw_freq *freq, char *extra)
830{
831 struct hostap_interface *iface;
832 local_info_t *local;
833
834 iface = netdev_priv(dev);
835 local = iface->local;
836
837 /* freq => chan. */
838 if (freq->e == 1 &&
839 freq->m / 100000 >= freq_list[0] &&
840 freq->m / 100000 <= freq_list[FREQ_COUNT - 1]) {
841 int ch;
842 int fr = freq->m / 100000;
843 for (ch = 0; ch < FREQ_COUNT; ch++) {
844 if (fr == freq_list[ch]) {
845 freq->e = 0;
846 freq->m = ch + 1;
847 break;
848 }
849 }
850 }
851
852 if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT ||
853 !(local->channel_mask & (1 << (freq->m - 1))))
854 return -EINVAL;
855
856 local->channel = freq->m; /* channel is used in prism2_setup_rids() */
857 if (hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel) ||
858 local->func->reset_port(dev))
859 return -EINVAL;
860
861 return 0;
862}
863
864static int prism2_ioctl_giwfreq(struct net_device *dev,
865 struct iw_request_info *info,
866 struct iw_freq *freq, char *extra)
867{
868 struct hostap_interface *iface;
869 local_info_t *local;
870 u16 val;
871
872 iface = netdev_priv(dev);
873 local = iface->local;
874
875 if (local->func->get_rid(dev, HFA384X_RID_CURRENTCHANNEL, &val, 2, 1) <
876 0)
877 return -EINVAL;
878
879 le16_to_cpus(&val);
880 if (val < 1 || val > FREQ_COUNT)
881 return -EINVAL;
882
883 freq->m = freq_list[val - 1] * 100000;
884 freq->e = 1;
885
886 return 0;
887}
888
889
890static void hostap_monitor_set_type(local_info_t *local)
891{
892 struct net_device *dev = local->ddev;
893
894 if (dev == NULL)
895 return;
896
897 if (local->monitor_type == PRISM2_MONITOR_PRISM ||
898 local->monitor_type == PRISM2_MONITOR_CAPHDR) {
899 dev->type = ARPHRD_IEEE80211_PRISM;
900 dev->hard_header_parse =
901 hostap_80211_prism_header_parse;
902 } else {
903 dev->type = ARPHRD_IEEE80211;
904 dev->hard_header_parse = hostap_80211_header_parse;
905 }
906}
907
908
909static int prism2_ioctl_siwessid(struct net_device *dev,
910 struct iw_request_info *info,
911 struct iw_point *data, char *ssid)
912{
913 struct hostap_interface *iface;
914 local_info_t *local;
915
916 iface = netdev_priv(dev);
917 local = iface->local;
918
919 if (iface->type == HOSTAP_INTERFACE_WDS)
920 return -EOPNOTSUPP;
921
922 if (data->flags == 0)
923 ssid[0] = '\0'; /* ANY */
924
925 if (local->iw_mode == IW_MODE_MASTER && ssid[0] == '\0') {
926 /* Setting SSID to empty string seems to kill the card in
927 * Host AP mode */
928 printk(KERN_DEBUG "%s: Host AP mode does not support "
929 "'Any' essid\n", dev->name);
930 return -EINVAL;
931 }
932
933 memcpy(local->essid, ssid, data->length);
934 local->essid[data->length] = '\0';
935
936 if ((!local->fw_ap &&
937 hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid))
938 || hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid) ||
939 local->func->reset_port(dev))
940 return -EINVAL;
941
942 return 0;
943}
944
945static int prism2_ioctl_giwessid(struct net_device *dev,
946 struct iw_request_info *info,
947 struct iw_point *data, char *essid)
948{
949 struct hostap_interface *iface;
950 local_info_t *local;
951 u16 val;
952
953 iface = netdev_priv(dev);
954 local = iface->local;
955
956 if (iface->type == HOSTAP_INTERFACE_WDS)
957 return -EOPNOTSUPP;
958
959 data->flags = 1; /* active */
960 if (local->iw_mode == IW_MODE_MASTER) {
961 data->length = strlen(local->essid);
962 memcpy(essid, local->essid, IW_ESSID_MAX_SIZE);
963 } else {
964 int len;
965 char ssid[MAX_SSID_LEN + 2];
966 memset(ssid, 0, sizeof(ssid));
967 len = local->func->get_rid(dev, HFA384X_RID_CURRENTSSID,
968 &ssid, MAX_SSID_LEN + 2, 0);
969 val = __le16_to_cpu(*(u16 *) ssid);
970 if (len > MAX_SSID_LEN + 2 || len < 0 || val > MAX_SSID_LEN) {
971 return -EOPNOTSUPP;
972 }
973 data->length = val;
974 memcpy(essid, ssid + 2, IW_ESSID_MAX_SIZE);
975 }
976
977 return 0;
978}
979
980
981static int prism2_ioctl_giwrange(struct net_device *dev,
982 struct iw_request_info *info,
983 struct iw_point *data, char *extra)
984{
985 struct hostap_interface *iface;
986 local_info_t *local;
987 struct iw_range *range = (struct iw_range *) extra;
988 u8 rates[10];
989 u16 val;
990 int i, len, over2;
991
992 iface = netdev_priv(dev);
993 local = iface->local;
994
995 data->length = sizeof(struct iw_range);
996 memset(range, 0, sizeof(struct iw_range));
997
998 /* TODO: could fill num_txpower and txpower array with
999 * something; however, there are 128 different values.. */
1000
1001 range->txpower_capa = IW_TXPOW_DBM;
1002
1003 if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC)
1004 {
1005 range->min_pmp = 1 * 1024;
1006 range->max_pmp = 65535 * 1024;
1007 range->min_pmt = 1 * 1024;
1008 range->max_pmt = 1000 * 1024;
1009 range->pmp_flags = IW_POWER_PERIOD;
1010 range->pmt_flags = IW_POWER_TIMEOUT;
1011 range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
1012 IW_POWER_UNICAST_R | IW_POWER_ALL_R;
1013 }
1014
1015 range->we_version_compiled = WIRELESS_EXT;
1016 range->we_version_source = 18;
1017
1018 range->retry_capa = IW_RETRY_LIMIT;
1019 range->retry_flags = IW_RETRY_LIMIT;
1020 range->min_retry = 0;
1021 range->max_retry = 255;
1022
1023 range->num_channels = FREQ_COUNT;
1024
1025 val = 0;
1026 for (i = 0; i < FREQ_COUNT; i++) {
1027 if (local->channel_mask & (1 << i)) {
1028 range->freq[val].i = i + 1;
1029 range->freq[val].m = freq_list[i] * 100000;
1030 range->freq[val].e = 1;
1031 val++;
1032 }
1033 if (val == IW_MAX_FREQUENCIES)
1034 break;
1035 }
1036 range->num_frequency = val;
1037
1038 if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
1039 range->max_qual.qual = 70; /* what is correct max? This was not
1040 * documented exactly. At least
1041 * 69 has been observed. */
1042 range->max_qual.level = 0; /* dB */
1043 range->max_qual.noise = 0; /* dB */
1044
1045 /* What would be suitable values for "average/typical" qual? */
1046 range->avg_qual.qual = 20;
1047 range->avg_qual.level = -60;
1048 range->avg_qual.noise = -95;
1049 } else {
1050 range->max_qual.qual = 92; /* 0 .. 92 */
1051 range->max_qual.level = 154; /* 27 .. 154 */
1052 range->max_qual.noise = 154; /* 27 .. 154 */
1053 }
1054 range->sensitivity = 3;
1055
1056 range->max_encoding_tokens = WEP_KEYS;
1057 range->num_encoding_sizes = 2;
1058 range->encoding_size[0] = 5;
1059 range->encoding_size[1] = 13;
1060
1061 over2 = 0;
1062 len = prism2_get_datarates(dev, rates);
1063 range->num_bitrates = 0;
1064 for (i = 0; i < len; i++) {
1065 if (range->num_bitrates < IW_MAX_BITRATES) {
1066 range->bitrate[range->num_bitrates] =
1067 rates[i] * 500000;
1068 range->num_bitrates++;
1069 }
1070 if (rates[i] == 0x0b || rates[i] == 0x16)
1071 over2 = 1;
1072 }
1073 /* estimated maximum TCP throughput values (bps) */
1074 range->throughput = over2 ? 5500000 : 1500000;
1075
1076 range->min_rts = 0;
1077 range->max_rts = 2347;
1078 range->min_frag = 256;
1079 range->max_frag = 2346;
1080
1081 /* Event capability (kernel + driver) */
1082 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
1083 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
1084 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
1085 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
1086 range->event_capa[1] = IW_EVENT_CAPA_K_1;
1087 range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) |
1088 IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
1089 IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
1090 IW_EVENT_CAPA_MASK(IWEVEXPIRED));
1091
1092 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
1093 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
1094
1095 return 0;
1096}
1097
1098
1099static int hostap_monitor_mode_enable(local_info_t *local)
1100{
1101 struct net_device *dev = local->dev;
1102
1103 printk(KERN_DEBUG "Enabling monitor mode\n");
1104 hostap_monitor_set_type(local);
1105
1106 if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
1107 HFA384X_PORTTYPE_PSEUDO_IBSS)) {
1108 printk(KERN_DEBUG "Port type setting for monitor mode "
1109 "failed\n");
1110 return -EOPNOTSUPP;
1111 }
1112
1113 /* Host decrypt is needed to get the IV and ICV fields;
1114 * however, monitor mode seems to remove WEP flag from frame
1115 * control field */
1116 if (hostap_set_word(dev, HFA384X_RID_CNFWEPFLAGS,
1117 HFA384X_WEPFLAGS_HOSTENCRYPT |
1118 HFA384X_WEPFLAGS_HOSTDECRYPT)) {
1119 printk(KERN_DEBUG "WEP flags setting failed\n");
1120 return -EOPNOTSUPP;
1121 }
1122
1123 if (local->func->reset_port(dev) ||
1124 local->func->cmd(dev, HFA384X_CMDCODE_TEST |
1125 (HFA384X_TEST_MONITOR << 8),
1126 0, NULL, NULL)) {
1127 printk(KERN_DEBUG "Setting monitor mode failed\n");
1128 return -EOPNOTSUPP;
1129 }
1130
1131 return 0;
1132}
1133
1134
1135static int hostap_monitor_mode_disable(local_info_t *local)
1136{
1137 struct net_device *dev = local->ddev;
1138
1139 if (dev == NULL)
1140 return -1;
1141
1142 printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name);
1143 dev->type = ARPHRD_ETHER;
1144 dev->hard_header_parse = local->saved_eth_header_parse;
1145 if (local->func->cmd(dev, HFA384X_CMDCODE_TEST |
1146 (HFA384X_TEST_STOP << 8),
1147 0, NULL, NULL))
1148 return -1;
1149 return hostap_set_encryption(local);
1150}
1151
1152
1153static int prism2_ioctl_siwmode(struct net_device *dev,
1154 struct iw_request_info *info,
1155 __u32 *mode, char *extra)
1156{
1157 struct hostap_interface *iface;
1158 local_info_t *local;
1159 int double_reset = 0;
1160
1161 iface = netdev_priv(dev);
1162 local = iface->local;
1163
1164 if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA &&
1165 *mode != IW_MODE_MASTER && *mode != IW_MODE_REPEAT &&
1166 *mode != IW_MODE_MONITOR)
1167 return -EOPNOTSUPP;
1168
1169#ifdef PRISM2_NO_STATION_MODES
1170 if (*mode == IW_MODE_ADHOC || *mode == IW_MODE_INFRA)
1171 return -EOPNOTSUPP;
1172#endif /* PRISM2_NO_STATION_MODES */
1173
1174 if (*mode == local->iw_mode)
1175 return 0;
1176
1177 if (*mode == IW_MODE_MASTER && local->essid[0] == '\0') {
1178 printk(KERN_WARNING "%s: empty SSID not allowed in Master "
1179 "mode\n", dev->name);
1180 return -EINVAL;
1181 }
1182
1183 if (local->iw_mode == IW_MODE_MONITOR)
1184 hostap_monitor_mode_disable(local);
1185
1186 if ((local->iw_mode == IW_MODE_ADHOC ||
1187 local->iw_mode == IW_MODE_MONITOR) && *mode == IW_MODE_MASTER) {
1188 /* There seems to be a firmware bug in at least STA f/w v1.5.6
1189 * that leaves beacon frames to use IBSS type when moving from
1190 * IBSS to Host AP mode. Doing double Port0 reset seems to be
1191 * enough to workaround this. */
1192 double_reset = 1;
1193 }
1194
1195 printk(KERN_DEBUG "prism2: %s: operating mode changed "
1196 "%d -> %d\n", dev->name, local->iw_mode, *mode);
1197 local->iw_mode = *mode;
1198
1199 if (local->iw_mode == IW_MODE_MONITOR)
1200 hostap_monitor_mode_enable(local);
1201 else if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
1202 !local->fw_encrypt_ok) {
1203 printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
1204 "a workaround for firmware bug in Host AP mode WEP\n",
1205 dev->name);
1206 local->host_encrypt = 1;
1207 }
1208
1209 if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
1210 hostap_get_porttype(local)))
1211 return -EOPNOTSUPP;
1212
1213 if (local->func->reset_port(dev))
1214 return -EINVAL;
1215 if (double_reset && local->func->reset_port(dev))
1216 return -EINVAL;
1217
1218 if (local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC)
1219 {
1220 /* netif_carrier is used only in client modes for now, so make
1221 * sure carrier is on when moving to non-client modes. */
1222 netif_carrier_on(local->dev);
1223 netif_carrier_on(local->ddev);
1224 }
1225 return 0;
1226}
1227
1228
1229static int prism2_ioctl_giwmode(struct net_device *dev,
1230 struct iw_request_info *info,
1231 __u32 *mode, char *extra)
1232{
1233 struct hostap_interface *iface;
1234 local_info_t *local;
1235
1236 iface = netdev_priv(dev);
1237 local = iface->local;
1238
1239 switch (iface->type) {
1240 case HOSTAP_INTERFACE_STA:
1241 *mode = IW_MODE_INFRA;
1242 break;
1243 case HOSTAP_INTERFACE_WDS:
1244 *mode = IW_MODE_REPEAT;
1245 break;
1246 default:
1247 *mode = local->iw_mode;
1248 break;
1249 }
1250 return 0;
1251}
1252
1253
1254static int prism2_ioctl_siwpower(struct net_device *dev,
1255 struct iw_request_info *info,
1256 struct iw_param *wrq, char *extra)
1257{
1258#ifdef PRISM2_NO_STATION_MODES
1259 return -EOPNOTSUPP;
1260#else /* PRISM2_NO_STATION_MODES */
1261 int ret = 0;
1262
1263 if (wrq->disabled)
1264 return hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 0);
1265
1266 switch (wrq->flags & IW_POWER_MODE) {
1267 case IW_POWER_UNICAST_R:
1268 ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 0);
1269 if (ret)
1270 return ret;
1271 ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
1272 if (ret)
1273 return ret;
1274 break;
1275 case IW_POWER_ALL_R:
1276 ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 1);
1277 if (ret)
1278 return ret;
1279 ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
1280 if (ret)
1281 return ret;
1282 break;
1283 case IW_POWER_ON:
1284 break;
1285 default:
1286 return -EINVAL;
1287 }
1288
1289 if (wrq->flags & IW_POWER_TIMEOUT) {
1290 ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
1291 if (ret)
1292 return ret;
1293 ret = hostap_set_word(dev, HFA384X_RID_CNFPMHOLDOVERDURATION,
1294 wrq->value / 1024);
1295 if (ret)
1296 return ret;
1297 }
1298 if (wrq->flags & IW_POWER_PERIOD) {
1299 ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
1300 if (ret)
1301 return ret;
1302 ret = hostap_set_word(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
1303 wrq->value / 1024);
1304 if (ret)
1305 return ret;
1306 }
1307
1308 return ret;
1309#endif /* PRISM2_NO_STATION_MODES */
1310}
1311
1312
1313static int prism2_ioctl_giwpower(struct net_device *dev,
1314 struct iw_request_info *info,
1315 struct iw_param *rrq, char *extra)
1316{
1317#ifdef PRISM2_NO_STATION_MODES
1318 return -EOPNOTSUPP;
1319#else /* PRISM2_NO_STATION_MODES */
1320 struct hostap_interface *iface;
1321 local_info_t *local;
1322 u16 enable, mcast;
1323
1324 iface = netdev_priv(dev);
1325 local = iface->local;
1326
1327 if (local->func->get_rid(dev, HFA384X_RID_CNFPMENABLED, &enable, 2, 1)
1328 < 0)
1329 return -EINVAL;
1330
1331 if (!__le16_to_cpu(enable)) {
1332 rrq->disabled = 1;
1333 return 0;
1334 }
1335
1336 rrq->disabled = 0;
1337
1338 if ((rrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
1339 u16 timeout;
1340 if (local->func->get_rid(dev,
1341 HFA384X_RID_CNFPMHOLDOVERDURATION,
1342 &timeout, 2, 1) < 0)
1343 return -EINVAL;
1344
1345 rrq->flags = IW_POWER_TIMEOUT;
1346 rrq->value = __le16_to_cpu(timeout) * 1024;
1347 } else {
1348 u16 period;
1349 if (local->func->get_rid(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
1350 &period, 2, 1) < 0)
1351 return -EINVAL;
1352
1353 rrq->flags = IW_POWER_PERIOD;
1354 rrq->value = __le16_to_cpu(period) * 1024;
1355 }
1356
1357 if (local->func->get_rid(dev, HFA384X_RID_CNFMULTICASTRECEIVE, &mcast,
1358 2, 1) < 0)
1359 return -EINVAL;
1360
1361 if (__le16_to_cpu(mcast))
1362 rrq->flags |= IW_POWER_ALL_R;
1363 else
1364 rrq->flags |= IW_POWER_UNICAST_R;
1365
1366 return 0;
1367#endif /* PRISM2_NO_STATION_MODES */
1368}
1369
1370
1371static int prism2_ioctl_siwretry(struct net_device *dev,
1372 struct iw_request_info *info,
1373 struct iw_param *rrq, char *extra)
1374{
1375 struct hostap_interface *iface;
1376 local_info_t *local;
1377
1378 iface = netdev_priv(dev);
1379 local = iface->local;
1380
1381 if (rrq->disabled)
1382 return -EINVAL;
1383
1384 /* setting retry limits is not supported with the current station
1385 * firmware code; simulate this with alternative retry count for now */
1386 if (rrq->flags == IW_RETRY_LIMIT) {
1387 if (rrq->value < 0) {
1388 /* disable manual retry count setting and use firmware
1389 * defaults */
1390 local->manual_retry_count = -1;
1391 local->tx_control &= ~HFA384X_TX_CTRL_ALT_RTRY;
1392 } else {
1393 if (hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
1394 rrq->value)) {
1395 printk(KERN_DEBUG "%s: Alternate retry count "
1396 "setting to %d failed\n",
1397 dev->name, rrq->value);
1398 return -EOPNOTSUPP;
1399 }
1400
1401 local->manual_retry_count = rrq->value;
1402 local->tx_control |= HFA384X_TX_CTRL_ALT_RTRY;
1403 }
1404 return 0;
1405 }
1406
1407 return -EOPNOTSUPP;
1408
1409#if 0
1410 /* what could be done, if firmware would support this.. */
1411
1412 if (rrq->flags & IW_RETRY_LIMIT) {
1413 if (rrq->flags & IW_RETRY_MAX)
1414 HFA384X_RID_LONGRETRYLIMIT = rrq->value;
1415 else if (rrq->flags & IW_RETRY_MIN)
1416 HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
1417 else {
1418 HFA384X_RID_LONGRETRYLIMIT = rrq->value;
1419 HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
1420 }
1421
1422 }
1423
1424 if (rrq->flags & IW_RETRY_LIFETIME) {
1425 HFA384X_RID_MAXTRANSMITLIFETIME = rrq->value / 1024;
1426 }
1427
1428 return 0;
1429#endif /* 0 */
1430}
1431
1432static int prism2_ioctl_giwretry(struct net_device *dev,
1433 struct iw_request_info *info,
1434 struct iw_param *rrq, char *extra)
1435{
1436 struct hostap_interface *iface;
1437 local_info_t *local;
1438 u16 shortretry, longretry, lifetime, altretry;
1439
1440 iface = netdev_priv(dev);
1441 local = iface->local;
1442
1443 if (local->func->get_rid(dev, HFA384X_RID_SHORTRETRYLIMIT, &shortretry,
1444 2, 1) < 0 ||
1445 local->func->get_rid(dev, HFA384X_RID_LONGRETRYLIMIT, &longretry,
1446 2, 1) < 0 ||
1447 local->func->get_rid(dev, HFA384X_RID_MAXTRANSMITLIFETIME,
1448 &lifetime, 2, 1) < 0)
1449 return -EINVAL;
1450
1451 le16_to_cpus(&shortretry);
1452 le16_to_cpus(&longretry);
1453 le16_to_cpus(&lifetime);
1454
1455 rrq->disabled = 0;
1456
1457 if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
1458 rrq->flags = IW_RETRY_LIFETIME;
1459 rrq->value = lifetime * 1024;
1460 } else {
1461 if (local->manual_retry_count >= 0) {
1462 rrq->flags = IW_RETRY_LIMIT;
1463 if (local->func->get_rid(dev,
1464 HFA384X_RID_CNFALTRETRYCOUNT,
1465 &altretry, 2, 1) >= 0)
1466 rrq->value = le16_to_cpu(altretry);
1467 else
1468 rrq->value = local->manual_retry_count;
1469 } else if ((rrq->flags & IW_RETRY_MAX)) {
1470 rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
1471 rrq->value = longretry;
1472 } else {
1473 rrq->flags = IW_RETRY_LIMIT;
1474 rrq->value = shortretry;
1475 if (shortretry != longretry)
1476 rrq->flags |= IW_RETRY_MIN;
1477 }
1478 }
1479 return 0;
1480}
1481
1482
1483/* Note! This TX power controlling is experimental and should not be used in
1484 * production use. It just sets raw power register and does not use any kind of
1485 * feedback information from the measured TX power (CR58). This is now
1486 * commented out to make sure that it is not used by accident. TX power
1487 * configuration will be enabled again after proper algorithm using feedback
1488 * has been implemented. */
1489
1490#ifdef RAW_TXPOWER_SETTING
1491/* Map HFA386x's CR31 to and from dBm with some sort of ad hoc mapping..
1492 * This version assumes following mapping:
1493 * CR31 is 7-bit value with -64 to +63 range.
1494 * -64 is mapped into +20dBm and +63 into -43dBm.
1495 * This is certainly not an exact mapping for every card, but at least
1496 * increasing dBm value should correspond to increasing TX power.
1497 */
1498
1499static int prism2_txpower_hfa386x_to_dBm(u16 val)
1500{
1501 signed char tmp;
1502
1503 if (val > 255)
1504 val = 255;
1505
1506 tmp = val;
1507 tmp >>= 2;
1508
1509 return -12 - tmp;
1510}
1511
1512static u16 prism2_txpower_dBm_to_hfa386x(int val)
1513{
1514 signed char tmp;
1515
1516 if (val > 20)
1517 return 128;
1518 else if (val < -43)
1519 return 127;
1520
1521 tmp = val;
1522 tmp = -12 - tmp;
1523 tmp <<= 2;
1524
1525 return (unsigned char) tmp;
1526}
1527#endif /* RAW_TXPOWER_SETTING */
1528
1529
1530static int prism2_ioctl_siwtxpow(struct net_device *dev,
1531 struct iw_request_info *info,
1532 struct iw_param *rrq, char *extra)
1533{
1534 struct hostap_interface *iface;
1535 local_info_t *local;
1536#ifdef RAW_TXPOWER_SETTING
1537 char *tmp;
1538#endif
1539 u16 val;
1540 int ret = 0;
1541
1542 iface = netdev_priv(dev);
1543 local = iface->local;
1544
1545 if (rrq->disabled) {
1546 if (local->txpower_type != PRISM2_TXPOWER_OFF) {
1547 val = 0xff; /* use all standby and sleep modes */
1548 ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
1549 HFA386X_CR_A_D_TEST_MODES2,
1550 &val, NULL);
1551 printk(KERN_DEBUG "%s: Turning radio off: %s\n",
1552 dev->name, ret ? "failed" : "OK");
1553 local->txpower_type = PRISM2_TXPOWER_OFF;
1554 }
1555 return (ret ? -EOPNOTSUPP : 0);
1556 }
1557
1558 if (local->txpower_type == PRISM2_TXPOWER_OFF) {
1559 val = 0; /* disable all standby and sleep modes */
1560 ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
1561 HFA386X_CR_A_D_TEST_MODES2, &val, NULL);
1562 printk(KERN_DEBUG "%s: Turning radio on: %s\n",
1563 dev->name, ret ? "failed" : "OK");
1564 local->txpower_type = PRISM2_TXPOWER_UNKNOWN;
1565 }
1566
1567#ifdef RAW_TXPOWER_SETTING
1568 if (!rrq->fixed && local->txpower_type != PRISM2_TXPOWER_AUTO) {
1569 printk(KERN_DEBUG "Setting ALC on\n");
1570 val = HFA384X_TEST_CFG_BIT_ALC;
1571 local->func->cmd(dev, HFA384X_CMDCODE_TEST |
1572 (HFA384X_TEST_CFG_BITS << 8), 1, &val, NULL);
1573 local->txpower_type = PRISM2_TXPOWER_AUTO;
1574 return 0;
1575 }
1576
1577 if (local->txpower_type != PRISM2_TXPOWER_FIXED) {
1578 printk(KERN_DEBUG "Setting ALC off\n");
1579 val = HFA384X_TEST_CFG_BIT_ALC;
1580 local->func->cmd(dev, HFA384X_CMDCODE_TEST |
1581 (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
1582 local->txpower_type = PRISM2_TXPOWER_FIXED;
1583 }
1584
1585 if (rrq->flags == IW_TXPOW_DBM)
1586 tmp = "dBm";
1587 else if (rrq->flags == IW_TXPOW_MWATT)
1588 tmp = "mW";
1589 else
1590 tmp = "UNKNOWN";
1591 printk(KERN_DEBUG "Setting TX power to %d %s\n", rrq->value, tmp);
1592
1593 if (rrq->flags != IW_TXPOW_DBM) {
1594 printk("SIOCSIWTXPOW with mW is not supported; use dBm\n");
1595 return -EOPNOTSUPP;
1596 }
1597
1598 local->txpower = rrq->value;
1599 val = prism2_txpower_dBm_to_hfa386x(local->txpower);
1600 if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
1601 HFA386X_CR_MANUAL_TX_POWER, &val, NULL))
1602 ret = -EOPNOTSUPP;
1603#else /* RAW_TXPOWER_SETTING */
1604 if (rrq->fixed)
1605 ret = -EOPNOTSUPP;
1606#endif /* RAW_TXPOWER_SETTING */
1607
1608 return ret;
1609}
1610
1611static int prism2_ioctl_giwtxpow(struct net_device *dev,
1612 struct iw_request_info *info,
1613 struct iw_param *rrq, char *extra)
1614{
1615#ifdef RAW_TXPOWER_SETTING
1616 struct hostap_interface *iface;
1617 local_info_t *local;
1618 u16 resp0;
1619
1620 iface = netdev_priv(dev);
1621 local = iface->local;
1622
1623 rrq->flags = IW_TXPOW_DBM;
1624 rrq->disabled = 0;
1625 rrq->fixed = 0;
1626
1627 if (local->txpower_type == PRISM2_TXPOWER_AUTO) {
1628 if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF,
1629 HFA386X_CR_MANUAL_TX_POWER,
1630 NULL, &resp0) == 0) {
1631 rrq->value = prism2_txpower_hfa386x_to_dBm(resp0);
1632 } else {
1633 /* Could not get real txpower; guess 15 dBm */
1634 rrq->value = 15;
1635 }
1636 } else if (local->txpower_type == PRISM2_TXPOWER_OFF) {
1637 rrq->value = 0;
1638 rrq->disabled = 1;
1639 } else if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
1640 rrq->value = local->txpower;
1641 rrq->fixed = 1;
1642 } else {
1643 printk("SIOCGIWTXPOW - unknown txpower_type=%d\n",
1644 local->txpower_type);
1645 }
1646 return 0;
1647#else /* RAW_TXPOWER_SETTING */
1648 return -EOPNOTSUPP;
1649#endif /* RAW_TXPOWER_SETTING */
1650}
1651
1652
1653#ifndef PRISM2_NO_STATION_MODES
1654
1655/* HostScan request works with and without host_roaming mode. In addition, it
1656 * does not break current association. However, it requires newer station
1657 * firmware version (>= 1.3.1) than scan request. */
1658static int prism2_request_hostscan(struct net_device *dev,
1659 u8 *ssid, u8 ssid_len)
1660{
1661 struct hostap_interface *iface;
1662 local_info_t *local;
1663 struct hfa384x_hostscan_request scan_req;
1664
1665 iface = netdev_priv(dev);
1666 local = iface->local;
1667
1668 memset(&scan_req, 0, sizeof(scan_req));
1669 scan_req.channel_list = cpu_to_le16(local->channel_mask &
1670 local->scan_channel_mask);
1671 scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
1672 if (ssid) {
1673 if (ssid_len > 32)
1674 return -EINVAL;
1675 scan_req.target_ssid_len = cpu_to_le16(ssid_len);
1676 memcpy(scan_req.target_ssid, ssid, ssid_len);
1677 }
1678
1679 if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
1680 sizeof(scan_req))) {
1681 printk(KERN_DEBUG "%s: HOSTSCAN failed\n", dev->name);
1682 return -EINVAL;
1683 }
1684 return 0;
1685}
1686
1687
1688static int prism2_request_scan(struct net_device *dev)
1689{
1690 struct hostap_interface *iface;
1691 local_info_t *local;
1692 struct hfa384x_scan_request scan_req;
1693 int ret = 0;
1694
1695 iface = netdev_priv(dev);
1696 local = iface->local;
1697
1698 memset(&scan_req, 0, sizeof(scan_req));
1699 scan_req.channel_list = cpu_to_le16(local->channel_mask &
1700 local->scan_channel_mask);
1701 scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
1702
1703 /* FIX:
1704 * It seems to be enough to set roaming mode for a short moment to
1705 * host-based and then setup scanrequest data and return the mode to
1706 * firmware-based.
1707 *
1708 * Master mode would need to drop to Managed mode for a short while
1709 * to make scanning work.. Or sweep through the different channels and
1710 * use passive scan based on beacons. */
1711
1712 if (!local->host_roaming)
1713 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
1714 HFA384X_ROAMING_HOST);
1715
1716 if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req,
1717 sizeof(scan_req))) {
1718 printk(KERN_DEBUG "SCANREQUEST failed\n");
1719 ret = -EINVAL;
1720 }
1721
1722 if (!local->host_roaming)
1723 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
1724 HFA384X_ROAMING_FIRMWARE);
1725
1726 return 0;
1727}
1728
1729#else /* !PRISM2_NO_STATION_MODES */
1730
1731static inline int prism2_request_hostscan(struct net_device *dev,
1732 u8 *ssid, u8 ssid_len)
1733{
1734 return -EOPNOTSUPP;
1735}
1736
1737
1738static inline int prism2_request_scan(struct net_device *dev)
1739{
1740 return -EOPNOTSUPP;
1741}
1742
1743#endif /* !PRISM2_NO_STATION_MODES */
1744
1745
1746static int prism2_ioctl_siwscan(struct net_device *dev,
1747 struct iw_request_info *info,
1748 struct iw_point *data, char *extra)
1749{
1750 struct hostap_interface *iface;
1751 local_info_t *local;
1752 int ret;
1753 u8 *ssid = NULL, ssid_len = 0;
1754 struct iw_scan_req *req = (struct iw_scan_req *) extra;
1755
1756 iface = netdev_priv(dev);
1757 local = iface->local;
1758
1759 if (data->length < sizeof(struct iw_scan_req))
1760 req = NULL;
1761
1762 if (local->iw_mode == IW_MODE_MASTER) {
1763 /* In master mode, we just return the results of our local
1764 * tables, so we don't need to start anything...
1765 * Jean II */
1766 data->length = 0;
1767 return 0;
1768 }
1769
1770 if (!local->dev_enabled)
1771 return -ENETDOWN;
1772
1773 if (req && data->flags & IW_SCAN_THIS_ESSID) {
1774 ssid = req->essid;
1775 ssid_len = req->essid_len;
1776
1777 if (ssid_len &&
1778 ((local->iw_mode != IW_MODE_INFRA &&
1779 local->iw_mode != IW_MODE_ADHOC) ||
1780 (local->sta_fw_ver < PRISM2_FW_VER(1,3,1))))
1781 return -EOPNOTSUPP;
1782 }
1783
1784 if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
1785 ret = prism2_request_hostscan(dev, ssid, ssid_len);
1786 else
1787 ret = prism2_request_scan(dev);
1788
1789 if (ret == 0)
1790 local->scan_timestamp = jiffies;
1791
1792 /* Could inquire F101, F103 or wait for SIOCGIWSCAN and read RID */
1793
1794 return ret;
1795}
1796
1797
1798#ifndef PRISM2_NO_STATION_MODES
1799static char * __prism2_translate_scan(local_info_t *local,
1800 struct hfa384x_hostscan_result *scan,
1801 struct hostap_bss_info *bss,
1802 char *current_ev, char *end_buf)
1803{
1804 int i, chan;
1805 struct iw_event iwe;
1806 char *current_val;
1807 u16 capabilities;
1808 u8 *pos;
1809 u8 *ssid, *bssid;
1810 size_t ssid_len;
1811 char *buf;
1812
1813 if (bss) {
1814 ssid = bss->ssid;
1815 ssid_len = bss->ssid_len;
1816 bssid = bss->bssid;
1817 } else {
1818 ssid = scan->ssid;
1819 ssid_len = le16_to_cpu(scan->ssid_len);
1820 bssid = scan->bssid;
1821 }
1822 if (ssid_len > 32)
1823 ssid_len = 32;
1824
1825 /* First entry *MUST* be the AP MAC address */
1826 memset(&iwe, 0, sizeof(iwe));
1827 iwe.cmd = SIOCGIWAP;
1828 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1829 memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
1830 /* FIX:
1831 * I do not know how this is possible, but iwe_stream_add_event
1832 * seems to re-order memcpy execution so that len is set only
1833 * after copying.. Pre-setting len here "fixes" this, but real
1834 * problems should be solved (after which these iwe.len
1835 * settings could be removed from this function). */
1836 iwe.len = IW_EV_ADDR_LEN;
1837 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1838 IW_EV_ADDR_LEN);
1839
1840 /* Other entries will be displayed in the order we give them */
1841
1842 memset(&iwe, 0, sizeof(iwe));
1843 iwe.cmd = SIOCGIWESSID;
1844 iwe.u.data.length = ssid_len;
1845 iwe.u.data.flags = 1;
1846 iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
1847 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid);
1848
1849 memset(&iwe, 0, sizeof(iwe));
1850 iwe.cmd = SIOCGIWMODE;
1851 if (bss) {
1852 capabilities = bss->capab_info;
1853 } else {
1854 capabilities = le16_to_cpu(scan->capability);
1855 }
1856 if (capabilities & (WLAN_CAPABILITY_ESS |
1857 WLAN_CAPABILITY_IBSS)) {
1858 if (capabilities & WLAN_CAPABILITY_ESS)
1859 iwe.u.mode = IW_MODE_MASTER;
1860 else
1861 iwe.u.mode = IW_MODE_ADHOC;
1862 iwe.len = IW_EV_UINT_LEN;
1863 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1864 IW_EV_UINT_LEN);
1865 }
1866
1867 memset(&iwe, 0, sizeof(iwe));
1868 iwe.cmd = SIOCGIWFREQ;
1869 if (scan) {
1870 chan = scan->chid;
1871 } else if (bss) {
1872 chan = bss->chan;
1873 } else {
1874 chan = 0;
1875 }
1876
1877 if (chan > 0) {
1878 iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000;
1879 iwe.u.freq.e = 1;
1880 iwe.len = IW_EV_FREQ_LEN;
1881 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1882 IW_EV_FREQ_LEN);
1883 }
1884
1885 if (scan) {
1886 memset(&iwe, 0, sizeof(iwe));
1887 iwe.cmd = IWEVQUAL;
1888 if (local->last_scan_type == PRISM2_HOSTSCAN) {
1889 iwe.u.qual.level = le16_to_cpu(scan->sl);
1890 iwe.u.qual.noise = le16_to_cpu(scan->anl);
1891 } else {
1892 iwe.u.qual.level =
1893 HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->sl));
1894 iwe.u.qual.noise =
1895 HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
1896 }
1897 iwe.len = IW_EV_QUAL_LEN;
1898 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1899 IW_EV_QUAL_LEN);
1900 }
1901
1902 memset(&iwe, 0, sizeof(iwe));
1903 iwe.cmd = SIOCGIWENCODE;
1904 if (capabilities & WLAN_CAPABILITY_PRIVACY)
1905 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
1906 else
1907 iwe.u.data.flags = IW_ENCODE_DISABLED;
1908 iwe.u.data.length = 0;
1909 iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
1910 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
1911
1912 /* TODO: add SuppRates into BSS table */
1913 if (scan) {
1914 memset(&iwe, 0, sizeof(iwe));
1915 iwe.cmd = SIOCGIWRATE;
1916 current_val = current_ev + IW_EV_LCP_LEN;
1917 pos = scan->sup_rates;
1918 for (i = 0; i < sizeof(scan->sup_rates); i++) {
1919 if (pos[i] == 0)
1920 break;
1921 /* Bit rate given in 500 kb/s units (+ 0x80) */
1922 iwe.u.bitrate.value = ((pos[i] & 0x7f) * 500000);
1923 current_val = iwe_stream_add_value(
1924 current_ev, current_val, end_buf, &iwe,
1925 IW_EV_PARAM_LEN);
1926 }
1927 /* Check if we added any event */
1928 if ((current_val - current_ev) > IW_EV_LCP_LEN)
1929 current_ev = current_val;
1930 }
1931
1932 /* TODO: add BeaconInt,resp_rate,atim into BSS table */
1933 buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL);
1934 if (buf && scan) {
1935 memset(&iwe, 0, sizeof(iwe));
1936 iwe.cmd = IWEVCUSTOM;
1937 sprintf(buf, "bcn_int=%d", le16_to_cpu(scan->beacon_interval));
1938 iwe.u.data.length = strlen(buf);
1939 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
1940 buf);
1941
1942 memset(&iwe, 0, sizeof(iwe));
1943 iwe.cmd = IWEVCUSTOM;
1944 sprintf(buf, "resp_rate=%d", le16_to_cpu(scan->rate));
1945 iwe.u.data.length = strlen(buf);
1946 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
1947 buf);
1948
1949 if (local->last_scan_type == PRISM2_HOSTSCAN &&
1950 (capabilities & WLAN_CAPABILITY_IBSS)) {
1951 memset(&iwe, 0, sizeof(iwe));
1952 iwe.cmd = IWEVCUSTOM;
1953 sprintf(buf, "atim=%d", le16_to_cpu(scan->atim));
1954 iwe.u.data.length = strlen(buf);
1955 current_ev = iwe_stream_add_point(current_ev, end_buf,
1956 &iwe, buf);
1957 }
1958 }
1959 kfree(buf);
1960
1961 if (bss && bss->wpa_ie_len > 0 && bss->wpa_ie_len <= MAX_WPA_IE_LEN) {
1962 memset(&iwe, 0, sizeof(iwe));
1963 iwe.cmd = IWEVGENIE;
1964 iwe.u.data.length = bss->wpa_ie_len;
1965 current_ev = iwe_stream_add_point(
1966 current_ev, end_buf, &iwe, bss->wpa_ie);
1967 }
1968
1969 if (bss && bss->rsn_ie_len > 0 && bss->rsn_ie_len <= MAX_WPA_IE_LEN) {
1970 memset(&iwe, 0, sizeof(iwe));
1971 iwe.cmd = IWEVGENIE;
1972 iwe.u.data.length = bss->rsn_ie_len;
1973 current_ev = iwe_stream_add_point(
1974 current_ev, end_buf, &iwe, bss->rsn_ie);
1975 }
1976
1977 return current_ev;
1978}
1979
1980
1981/* Translate scan data returned from the card to a card independant
1982 * format that the Wireless Tools will understand - Jean II */
1983static inline int prism2_translate_scan(local_info_t *local,
1984 char *buffer, int buflen)
1985{
1986 struct hfa384x_hostscan_result *scan;
1987 int entry, hostscan;
1988 char *current_ev = buffer;
1989 char *end_buf = buffer + buflen;
1990 struct list_head *ptr;
1991
1992 spin_lock_bh(&local->lock);
1993
1994 list_for_each(ptr, &local->bss_list) {
1995 struct hostap_bss_info *bss;
1996 bss = list_entry(ptr, struct hostap_bss_info, list);
1997 bss->included = 0;
1998 }
1999
2000 hostscan = local->last_scan_type == PRISM2_HOSTSCAN;
2001 for (entry = 0; entry < local->last_scan_results_count; entry++) {
2002 int found = 0;
2003 scan = &local->last_scan_results[entry];
2004
2005 /* Report every SSID if the AP is using multiple SSIDs. If no
2006 * BSS record is found (e.g., when WPA mode is disabled),
2007 * report the AP once. */
2008 list_for_each(ptr, &local->bss_list) {
2009 struct hostap_bss_info *bss;
2010 bss = list_entry(ptr, struct hostap_bss_info, list);
2011 if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
2012 bss->included = 1;
2013 current_ev = __prism2_translate_scan(
2014 local, scan, bss, current_ev, end_buf);
2015 found++;
2016 }
2017 }
2018 if (!found) {
2019 current_ev = __prism2_translate_scan(
2020 local, scan, NULL, current_ev, end_buf);
2021 }
2022 /* Check if there is space for one more entry */
2023 if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
2024 /* Ask user space to try again with a bigger buffer */
2025 spin_unlock_bh(&local->lock);
2026 return -E2BIG;
2027 }
2028 }
2029
2030 /* Prism2 firmware has limits (32 at least in some versions) for number
2031 * of BSSes in scan results. Extend this limit by using local BSS list.
2032 */
2033 list_for_each(ptr, &local->bss_list) {
2034 struct hostap_bss_info *bss;
2035 bss = list_entry(ptr, struct hostap_bss_info, list);
2036 if (bss->included)
2037 continue;
2038 current_ev = __prism2_translate_scan(local, NULL, bss,
2039 current_ev, end_buf);
2040 /* Check if there is space for one more entry */
2041 if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
2042 /* Ask user space to try again with a bigger buffer */
2043 spin_unlock_bh(&local->lock);
2044 return -E2BIG;
2045 }
2046 }
2047
2048 spin_unlock_bh(&local->lock);
2049
2050 return current_ev - buffer;
2051}
2052#endif /* PRISM2_NO_STATION_MODES */
2053
2054
2055static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
2056 struct iw_request_info *info,
2057 struct iw_point *data, char *extra)
2058{
2059#ifdef PRISM2_NO_STATION_MODES
2060 return -EOPNOTSUPP;
2061#else /* PRISM2_NO_STATION_MODES */
2062 struct hostap_interface *iface;
2063 local_info_t *local;
2064 int res;
2065
2066 iface = netdev_priv(dev);
2067 local = iface->local;
2068
2069 /* Wait until the scan is finished. We can probably do better
2070 * than that - Jean II */
2071 if (local->scan_timestamp &&
2072 time_before(jiffies, local->scan_timestamp + 3 * HZ)) {
2073 /* Important note : we don't want to block the caller
2074 * until results are ready for various reasons.
2075 * First, managing wait queues is complex and racy
2076 * (there may be multiple simultaneous callers).
2077 * Second, we grab some rtnetlink lock before comming
2078 * here (in dev_ioctl()).
2079 * Third, the caller can wait on the Wireless Event
2080 * - Jean II */
2081 return -EAGAIN;
2082 }
2083 local->scan_timestamp = 0;
2084
2085 res = prism2_translate_scan(local, extra, data->length);
2086
2087 if (res >= 0) {
2088 data->length = res;
2089 return 0;
2090 } else {
2091 data->length = 0;
2092 return res;
2093 }
2094#endif /* PRISM2_NO_STATION_MODES */
2095}
2096
2097
2098static int prism2_ioctl_giwscan(struct net_device *dev,
2099 struct iw_request_info *info,
2100 struct iw_point *data, char *extra)
2101{
2102 struct hostap_interface *iface;
2103 local_info_t *local;
2104 int res;
2105
2106 iface = netdev_priv(dev);
2107 local = iface->local;
2108
2109 if (local->iw_mode == IW_MODE_MASTER) {
2110 /* In MASTER mode, it doesn't make sense to go around
2111 * scanning the frequencies and make the stations we serve
2112 * wait when what the user is really interested about is the
2113 * list of stations and access points we are talking to.
2114 * So, just extract results from our cache...
2115 * Jean II */
2116
2117 /* Translate to WE format */
2118 res = prism2_ap_translate_scan(dev, extra);
2119 if (res >= 0) {
2120 printk(KERN_DEBUG "Scan result translation succeeded "
2121 "(length=%d)\n", res);
2122 data->length = res;
2123 return 0;
2124 } else {
2125 printk(KERN_DEBUG
2126 "Scan result translation failed (res=%d)\n",
2127 res);
2128 data->length = 0;
2129 return res;
2130 }
2131 } else {
2132 /* Station mode */
2133 return prism2_ioctl_giwscan_sta(dev, info, data, extra);
2134 }
2135}
2136
2137
2138static const struct iw_priv_args prism2_priv[] = {
2139 { PRISM2_IOCTL_MONITOR,
2140 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor" },
2141 { PRISM2_IOCTL_READMIF,
2142 IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
2143 IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "readmif" },
2144 { PRISM2_IOCTL_WRITEMIF,
2145 IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 2, 0, "writemif" },
2146 { PRISM2_IOCTL_RESET,
2147 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset" },
2148 { PRISM2_IOCTL_INQUIRE,
2149 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inquire" },
2150 { PRISM2_IOCTL_SET_RID_WORD,
2151 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_rid_word" },
2152 { PRISM2_IOCTL_MACCMD,
2153 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" },
2154 { PRISM2_IOCTL_WDS_ADD,
2155 IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_add" },
2156 { PRISM2_IOCTL_WDS_DEL,
2157 IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_del" },
2158 { PRISM2_IOCTL_ADDMAC,
2159 IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addmac" },
2160 { PRISM2_IOCTL_DELMAC,
2161 IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delmac" },
2162 { PRISM2_IOCTL_KICKMAC,
2163 IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac" },
2164 /* --- raw access to sub-ioctls --- */
2165 { PRISM2_IOCTL_PRISM2_PARAM,
2166 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "prism2_param" },
2167 { PRISM2_IOCTL_GET_PRISM2_PARAM,
2168 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
2169 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprism2_param" },
2170 /* --- sub-ioctls handlers --- */
2171 { PRISM2_IOCTL_PRISM2_PARAM,
2172 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
2173 { PRISM2_IOCTL_GET_PRISM2_PARAM,
2174 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
2175 /* --- sub-ioctls definitions --- */
2176 { PRISM2_PARAM_TXRATECTRL,
2177 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txratectrl" },
2178 { PRISM2_PARAM_TXRATECTRL,
2179 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettxratectrl" },
2180 { PRISM2_PARAM_BEACON_INT,
2181 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beacon_int" },
2182 { PRISM2_PARAM_BEACON_INT,
2183 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbeacon_int" },
2184#ifndef PRISM2_NO_STATION_MODES
2185 { PRISM2_PARAM_PSEUDO_IBSS,
2186 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pseudo_ibss" },
2187 { PRISM2_PARAM_PSEUDO_IBSS,
2188 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpseudo_ibss" },
2189#endif /* PRISM2_NO_STATION_MODES */
2190 { PRISM2_PARAM_ALC,
2191 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "alc" },
2192 { PRISM2_PARAM_ALC,
2193 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getalc" },
2194 { PRISM2_PARAM_DUMP,
2195 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump" },
2196 { PRISM2_PARAM_DUMP,
2197 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdump" },
2198 { PRISM2_PARAM_OTHER_AP_POLICY,
2199 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "other_ap_policy" },
2200 { PRISM2_PARAM_OTHER_AP_POLICY,
2201 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getother_ap_pol" },
2202 { PRISM2_PARAM_AP_MAX_INACTIVITY,
2203 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_inactivity" },
2204 { PRISM2_PARAM_AP_MAX_INACTIVITY,
2205 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_inactivi" },
2206 { PRISM2_PARAM_AP_BRIDGE_PACKETS,
2207 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bridge_packets" },
2208 { PRISM2_PARAM_AP_BRIDGE_PACKETS,
2209 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbridge_packe" },
2210 { PRISM2_PARAM_DTIM_PERIOD,
2211 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
2212 { PRISM2_PARAM_DTIM_PERIOD,
2213 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdtim_period" },
2214 { PRISM2_PARAM_AP_NULLFUNC_ACK,
2215 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "nullfunc_ack" },
2216 { PRISM2_PARAM_AP_NULLFUNC_ACK,
2217 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getnullfunc_ack" },
2218 { PRISM2_PARAM_MAX_WDS,
2219 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_wds" },
2220 { PRISM2_PARAM_MAX_WDS,
2221 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_wds" },
2222 { PRISM2_PARAM_AP_AUTOM_AP_WDS,
2223 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autom_ap_wds" },
2224 { PRISM2_PARAM_AP_AUTOM_AP_WDS,
2225 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getautom_ap_wds" },
2226 { PRISM2_PARAM_AP_AUTH_ALGS,
2227 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_auth_algs" },
2228 { PRISM2_PARAM_AP_AUTH_ALGS,
2229 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_auth_algs" },
2230 { PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
2231 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "allow_fcserr" },
2232 { PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
2233 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getallow_fcserr" },
2234 { PRISM2_PARAM_HOST_ENCRYPT,
2235 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_encrypt" },
2236 { PRISM2_PARAM_HOST_ENCRYPT,
2237 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_encrypt" },
2238 { PRISM2_PARAM_HOST_DECRYPT,
2239 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_decrypt" },
2240 { PRISM2_PARAM_HOST_DECRYPT,
2241 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_decrypt" },
2242#ifndef PRISM2_NO_STATION_MODES
2243 { PRISM2_PARAM_HOST_ROAMING,
2244 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_roaming" },
2245 { PRISM2_PARAM_HOST_ROAMING,
2246 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_roaming" },
2247#endif /* PRISM2_NO_STATION_MODES */
2248 { PRISM2_PARAM_BCRX_STA_KEY,
2249 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcrx_sta_key" },
2250 { PRISM2_PARAM_BCRX_STA_KEY,
2251 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbcrx_sta_key" },
2252 { PRISM2_PARAM_IEEE_802_1X,
2253 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ieee_802_1x" },
2254 { PRISM2_PARAM_IEEE_802_1X,
2255 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getieee_802_1x" },
2256 { PRISM2_PARAM_ANTSEL_TX,
2257 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_tx" },
2258 { PRISM2_PARAM_ANTSEL_TX,
2259 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_tx" },
2260 { PRISM2_PARAM_ANTSEL_RX,
2261 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_rx" },
2262 { PRISM2_PARAM_ANTSEL_RX,
2263 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_rx" },
2264 { PRISM2_PARAM_MONITOR_TYPE,
2265 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor_type" },
2266 { PRISM2_PARAM_MONITOR_TYPE,
2267 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmonitor_type" },
2268 { PRISM2_PARAM_WDS_TYPE,
2269 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds_type" },
2270 { PRISM2_PARAM_WDS_TYPE,
2271 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwds_type" },
2272 { PRISM2_PARAM_HOSTSCAN,
2273 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostscan" },
2274 { PRISM2_PARAM_HOSTSCAN,
2275 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostscan" },
2276 { PRISM2_PARAM_AP_SCAN,
2277 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_scan" },
2278 { PRISM2_PARAM_AP_SCAN,
2279 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_scan" },
2280 { PRISM2_PARAM_ENH_SEC,
2281 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enh_sec" },
2282 { PRISM2_PARAM_ENH_SEC,
2283 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getenh_sec" },
2284#ifdef PRISM2_IO_DEBUG
2285 { PRISM2_PARAM_IO_DEBUG,
2286 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "io_debug" },
2287 { PRISM2_PARAM_IO_DEBUG,
2288 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getio_debug" },
2289#endif /* PRISM2_IO_DEBUG */
2290 { PRISM2_PARAM_BASIC_RATES,
2291 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "basic_rates" },
2292 { PRISM2_PARAM_BASIC_RATES,
2293 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbasic_rates" },
2294 { PRISM2_PARAM_OPER_RATES,
2295 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "oper_rates" },
2296 { PRISM2_PARAM_OPER_RATES,
2297 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getoper_rates" },
2298 { PRISM2_PARAM_HOSTAPD,
2299 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd" },
2300 { PRISM2_PARAM_HOSTAPD,
2301 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd" },
2302 { PRISM2_PARAM_HOSTAPD_STA,
2303 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd_sta" },
2304 { PRISM2_PARAM_HOSTAPD_STA,
2305 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd_sta" },
2306 { PRISM2_PARAM_WPA,
2307 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
2308 { PRISM2_PARAM_WPA,
2309 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwpa" },
2310 { PRISM2_PARAM_PRIVACY_INVOKED,
2311 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy_invoked" },
2312 { PRISM2_PARAM_PRIVACY_INVOKED,
2313 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprivacy_invo" },
2314 { PRISM2_PARAM_TKIP_COUNTERMEASURES,
2315 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tkip_countermea" },
2316 { PRISM2_PARAM_TKIP_COUNTERMEASURES,
2317 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettkip_counter" },
2318 { PRISM2_PARAM_DROP_UNENCRYPTED,
2319 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "drop_unencrypte" },
2320 { PRISM2_PARAM_DROP_UNENCRYPTED,
2321 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdrop_unencry" },
2322 { PRISM2_PARAM_SCAN_CHANNEL_MASK,
2323 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_channels" },
2324 { PRISM2_PARAM_SCAN_CHANNEL_MASK,
2325 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getscan_channel" },
2326};
2327
2328
2329static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i)
2330{
2331 struct hostap_interface *iface;
2332 local_info_t *local;
2333
2334 iface = netdev_priv(dev);
2335 local = iface->local;
2336
2337 if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL))
2338 return -EOPNOTSUPP;
2339
2340 return 0;
2341}
2342
2343
2344static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
2345 struct iw_request_info *info,
2346 void *wrqu, char *extra)
2347{
2348 struct hostap_interface *iface;
2349 local_info_t *local;
2350 int *i = (int *) extra;
2351 int param = *i;
2352 int value = *(i + 1);
2353 int ret = 0;
2354 u16 val;
2355
2356 iface = netdev_priv(dev);
2357 local = iface->local;
2358
2359 switch (param) {
2360 case PRISM2_PARAM_TXRATECTRL:
2361 local->fw_tx_rate_control = value;
2362 break;
2363
2364 case PRISM2_PARAM_BEACON_INT:
2365 if (hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, value) ||
2366 local->func->reset_port(dev))
2367 ret = -EINVAL;
2368 else
2369 local->beacon_int = value;
2370 break;
2371
2372#ifndef PRISM2_NO_STATION_MODES
2373 case PRISM2_PARAM_PSEUDO_IBSS:
2374 if (value == local->pseudo_adhoc)
2375 break;
2376
2377 if (value != 0 && value != 1) {
2378 ret = -EINVAL;
2379 break;
2380 }
2381
2382 printk(KERN_DEBUG "prism2: %s: pseudo IBSS change %d -> %d\n",
2383 dev->name, local->pseudo_adhoc, value);
2384 local->pseudo_adhoc = value;
2385 if (local->iw_mode != IW_MODE_ADHOC)
2386 break;
2387
2388 if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
2389 hostap_get_porttype(local))) {
2390 ret = -EOPNOTSUPP;
2391 break;
2392 }
2393
2394 if (local->func->reset_port(dev))
2395 ret = -EINVAL;
2396 break;
2397#endif /* PRISM2_NO_STATION_MODES */
2398
2399 case PRISM2_PARAM_ALC:
2400 printk(KERN_DEBUG "%s: %s ALC\n", dev->name,
2401 value == 0 ? "Disabling" : "Enabling");
2402 val = HFA384X_TEST_CFG_BIT_ALC;
2403 local->func->cmd(dev, HFA384X_CMDCODE_TEST |
2404 (HFA384X_TEST_CFG_BITS << 8),
2405 value == 0 ? 0 : 1, &val, NULL);
2406 break;
2407
2408 case PRISM2_PARAM_DUMP:
2409 local->frame_dump = value;
2410 break;
2411
2412 case PRISM2_PARAM_OTHER_AP_POLICY:
2413 if (value < 0 || value > 3) {
2414 ret = -EINVAL;
2415 break;
2416 }
2417 if (local->ap != NULL)
2418 local->ap->ap_policy = value;
2419 break;
2420
2421 case PRISM2_PARAM_AP_MAX_INACTIVITY:
2422 if (value < 0 || value > 7 * 24 * 60 * 60) {
2423 ret = -EINVAL;
2424 break;
2425 }
2426 if (local->ap != NULL)
2427 local->ap->max_inactivity = value * HZ;
2428 break;
2429
2430 case PRISM2_PARAM_AP_BRIDGE_PACKETS:
2431 if (local->ap != NULL)
2432 local->ap->bridge_packets = value;
2433 break;
2434
2435 case PRISM2_PARAM_DTIM_PERIOD:
2436 if (value < 0 || value > 65535) {
2437 ret = -EINVAL;
2438 break;
2439 }
2440 if (hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, value)
2441 || local->func->reset_port(dev))
2442 ret = -EINVAL;
2443 else
2444 local->dtim_period = value;
2445 break;
2446
2447 case PRISM2_PARAM_AP_NULLFUNC_ACK:
2448 if (local->ap != NULL)
2449 local->ap->nullfunc_ack = value;
2450 break;
2451
2452 case PRISM2_PARAM_MAX_WDS:
2453 local->wds_max_connections = value;
2454 break;
2455
2456 case PRISM2_PARAM_AP_AUTOM_AP_WDS:
2457 if (local->ap != NULL) {
2458 if (!local->ap->autom_ap_wds && value) {
2459 /* add WDS link to all APs in STA table */
2460 hostap_add_wds_links(local);
2461 }
2462 local->ap->autom_ap_wds = value;
2463 }
2464 break;
2465
2466 case PRISM2_PARAM_AP_AUTH_ALGS:
2467 local->auth_algs = value;
2468 if (hostap_set_auth_algs(local))
2469 ret = -EINVAL;
2470 break;
2471
2472 case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
2473 local->monitor_allow_fcserr = value;
2474 break;
2475
2476 case PRISM2_PARAM_HOST_ENCRYPT:
2477 local->host_encrypt = value;
2478 if (hostap_set_encryption(local) ||
2479 local->func->reset_port(dev))
2480 ret = -EINVAL;
2481 break;
2482
2483 case PRISM2_PARAM_HOST_DECRYPT:
2484 local->host_decrypt = value;
2485 if (hostap_set_encryption(local) ||
2486 local->func->reset_port(dev))
2487 ret = -EINVAL;
2488 break;
2489
2490#ifndef PRISM2_NO_STATION_MODES
2491 case PRISM2_PARAM_HOST_ROAMING:
2492 if (value < 0 || value > 2) {
2493 ret = -EINVAL;
2494 break;
2495 }
2496 local->host_roaming = value;
2497 if (hostap_set_roaming(local) || local->func->reset_port(dev))
2498 ret = -EINVAL;
2499 break;
2500#endif /* PRISM2_NO_STATION_MODES */
2501
2502 case PRISM2_PARAM_BCRX_STA_KEY:
2503 local->bcrx_sta_key = value;
2504 break;
2505
2506 case PRISM2_PARAM_IEEE_802_1X:
2507 local->ieee_802_1x = value;
2508 break;
2509
2510 case PRISM2_PARAM_ANTSEL_TX:
2511 if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
2512 ret = -EINVAL;
2513 break;
2514 }
2515 local->antsel_tx = value;
2516 hostap_set_antsel(local);
2517 break;
2518
2519 case PRISM2_PARAM_ANTSEL_RX:
2520 if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
2521 ret = -EINVAL;
2522 break;
2523 }
2524 local->antsel_rx = value;
2525 hostap_set_antsel(local);
2526 break;
2527
2528 case PRISM2_PARAM_MONITOR_TYPE:
2529 if (value != PRISM2_MONITOR_80211 &&
2530 value != PRISM2_MONITOR_CAPHDR &&
2531 value != PRISM2_MONITOR_PRISM) {
2532 ret = -EINVAL;
2533 break;
2534 }
2535 local->monitor_type = value;
2536 if (local->iw_mode == IW_MODE_MONITOR)
2537 hostap_monitor_set_type(local);
2538 break;
2539
2540 case PRISM2_PARAM_WDS_TYPE:
2541 local->wds_type = value;
2542 break;
2543
2544 case PRISM2_PARAM_HOSTSCAN:
2545 {
2546 struct hfa384x_hostscan_request scan_req;
2547 u16 rate;
2548
2549 memset(&scan_req, 0, sizeof(scan_req));
2550 scan_req.channel_list = __constant_cpu_to_le16(0x3fff);
2551 switch (value) {
2552 case 1: rate = HFA384X_RATES_1MBPS; break;
2553 case 2: rate = HFA384X_RATES_2MBPS; break;
2554 case 3: rate = HFA384X_RATES_5MBPS; break;
2555 case 4: rate = HFA384X_RATES_11MBPS; break;
2556 default: rate = HFA384X_RATES_1MBPS; break;
2557 }
2558 scan_req.txrate = cpu_to_le16(rate);
2559 /* leave SSID empty to accept all SSIDs */
2560
2561 if (local->iw_mode == IW_MODE_MASTER) {
2562 if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
2563 HFA384X_PORTTYPE_BSS) ||
2564 local->func->reset_port(dev))
2565 printk(KERN_DEBUG "Leaving Host AP mode "
2566 "for HostScan failed\n");
2567 }
2568
2569 if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
2570 sizeof(scan_req))) {
2571 printk(KERN_DEBUG "HOSTSCAN failed\n");
2572 ret = -EINVAL;
2573 }
2574 if (local->iw_mode == IW_MODE_MASTER) {
2575 wait_queue_t __wait;
2576 init_waitqueue_entry(&__wait, current);
2577 add_wait_queue(&local->hostscan_wq, &__wait);
2578 set_current_state(TASK_INTERRUPTIBLE);
2579 schedule_timeout(HZ);
2580 if (signal_pending(current))
2581 ret = -EINTR;
2582 set_current_state(TASK_RUNNING);
2583 remove_wait_queue(&local->hostscan_wq, &__wait);
2584
2585 if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
2586 HFA384X_PORTTYPE_HOSTAP) ||
2587 local->func->reset_port(dev))
2588 printk(KERN_DEBUG "Returning to Host AP mode "
2589 "after HostScan failed\n");
2590 }
2591 break;
2592 }
2593
2594 case PRISM2_PARAM_AP_SCAN:
2595 local->passive_scan_interval = value;
2596 if (timer_pending(&local->passive_scan_timer))
2597 del_timer(&local->passive_scan_timer);
2598 if (value > 0) {
2599 local->passive_scan_timer.expires = jiffies +
2600 local->passive_scan_interval * HZ;
2601 add_timer(&local->passive_scan_timer);
2602 }
2603 break;
2604
2605 case PRISM2_PARAM_ENH_SEC:
2606 if (value < 0 || value > 3) {
2607 ret = -EINVAL;
2608 break;
2609 }
2610 local->enh_sec = value;
2611 if (hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY,
2612 local->enh_sec) ||
2613 local->func->reset_port(dev)) {
2614 printk(KERN_INFO "%s: cnfEnhSecurity requires STA f/w "
2615 "1.6.3 or newer\n", dev->name);
2616 ret = -EOPNOTSUPP;
2617 }
2618 break;
2619
2620#ifdef PRISM2_IO_DEBUG
2621 case PRISM2_PARAM_IO_DEBUG:
2622 local->io_debug_enabled = value;
2623 break;
2624#endif /* PRISM2_IO_DEBUG */
2625
2626 case PRISM2_PARAM_BASIC_RATES:
2627 if ((value & local->tx_rate_control) != value || value == 0) {
2628 printk(KERN_INFO "%s: invalid basic rate set - basic "
2629 "rates must be in supported rate set\n",
2630 dev->name);
2631 ret = -EINVAL;
2632 break;
2633 }
2634 local->basic_rates = value;
2635 if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
2636 local->basic_rates) ||
2637 local->func->reset_port(dev))
2638 ret = -EINVAL;
2639 break;
2640
2641 case PRISM2_PARAM_OPER_RATES:
2642 local->tx_rate_control = value;
2643 if (hostap_set_rate(dev))
2644 ret = -EINVAL;
2645 break;
2646
2647 case PRISM2_PARAM_HOSTAPD:
2648 ret = hostap_set_hostapd(local, value, 1);
2649 break;
2650
2651 case PRISM2_PARAM_HOSTAPD_STA:
2652 ret = hostap_set_hostapd_sta(local, value, 1);
2653 break;
2654
2655 case PRISM2_PARAM_WPA:
2656 local->wpa = value;
2657 if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
2658 ret = -EOPNOTSUPP;
2659 else if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
2660 value ? 1 : 0))
2661 ret = -EINVAL;
2662 break;
2663
2664 case PRISM2_PARAM_PRIVACY_INVOKED:
2665 local->privacy_invoked = value;
2666 if (hostap_set_encryption(local) ||
2667 local->func->reset_port(dev))
2668 ret = -EINVAL;
2669 break;
2670
2671 case PRISM2_PARAM_TKIP_COUNTERMEASURES:
2672 local->tkip_countermeasures = value;
2673 break;
2674
2675 case PRISM2_PARAM_DROP_UNENCRYPTED:
2676 local->drop_unencrypted = value;
2677 break;
2678
2679 case PRISM2_PARAM_SCAN_CHANNEL_MASK:
2680 local->scan_channel_mask = value;
2681 break;
2682
2683 default:
2684 printk(KERN_DEBUG "%s: prism2_param: unknown param %d\n",
2685 dev->name, param);
2686 ret = -EOPNOTSUPP;
2687 break;
2688 }
2689
2690 return ret;
2691}
2692
2693
2694static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
2695 struct iw_request_info *info,
2696 void *wrqu, char *extra)
2697{
2698 struct hostap_interface *iface;
2699 local_info_t *local;
2700 int *param = (int *) extra;
2701 int ret = 0;
2702
2703 iface = netdev_priv(dev);
2704 local = iface->local;
2705
2706 switch (*param) {
2707 case PRISM2_PARAM_TXRATECTRL:
2708 *param = local->fw_tx_rate_control;
2709 break;
2710
2711 case PRISM2_PARAM_BEACON_INT:
2712 *param = local->beacon_int;
2713 break;
2714
2715 case PRISM2_PARAM_PSEUDO_IBSS:
2716 *param = local->pseudo_adhoc;
2717 break;
2718
2719 case PRISM2_PARAM_ALC:
2720 ret = -EOPNOTSUPP; /* FIX */
2721 break;
2722
2723 case PRISM2_PARAM_DUMP:
2724 *param = local->frame_dump;
2725 break;
2726
2727 case PRISM2_PARAM_OTHER_AP_POLICY:
2728 if (local->ap != NULL)
2729 *param = local->ap->ap_policy;
2730 else
2731 ret = -EOPNOTSUPP;
2732 break;
2733
2734 case PRISM2_PARAM_AP_MAX_INACTIVITY:
2735 if (local->ap != NULL)
2736 *param = local->ap->max_inactivity / HZ;
2737 else
2738 ret = -EOPNOTSUPP;
2739 break;
2740
2741 case PRISM2_PARAM_AP_BRIDGE_PACKETS:
2742 if (local->ap != NULL)
2743 *param = local->ap->bridge_packets;
2744 else
2745 ret = -EOPNOTSUPP;
2746 break;
2747
2748 case PRISM2_PARAM_DTIM_PERIOD:
2749 *param = local->dtim_period;
2750 break;
2751
2752 case PRISM2_PARAM_AP_NULLFUNC_ACK:
2753 if (local->ap != NULL)
2754 *param = local->ap->nullfunc_ack;
2755 else
2756 ret = -EOPNOTSUPP;
2757 break;
2758
2759 case PRISM2_PARAM_MAX_WDS:
2760 *param = local->wds_max_connections;
2761 break;
2762
2763 case PRISM2_PARAM_AP_AUTOM_AP_WDS:
2764 if (local->ap != NULL)
2765 *param = local->ap->autom_ap_wds;
2766 else
2767 ret = -EOPNOTSUPP;
2768 break;
2769
2770 case PRISM2_PARAM_AP_AUTH_ALGS:
2771 *param = local->auth_algs;
2772 break;
2773
2774 case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
2775 *param = local->monitor_allow_fcserr;
2776 break;
2777
2778 case PRISM2_PARAM_HOST_ENCRYPT:
2779 *param = local->host_encrypt;
2780 break;
2781
2782 case PRISM2_PARAM_HOST_DECRYPT:
2783 *param = local->host_decrypt;
2784 break;
2785
2786 case PRISM2_PARAM_HOST_ROAMING:
2787 *param = local->host_roaming;
2788 break;
2789
2790 case PRISM2_PARAM_BCRX_STA_KEY:
2791 *param = local->bcrx_sta_key;
2792 break;
2793
2794 case PRISM2_PARAM_IEEE_802_1X:
2795 *param = local->ieee_802_1x;
2796 break;
2797
2798 case PRISM2_PARAM_ANTSEL_TX:
2799 *param = local->antsel_tx;
2800 break;
2801
2802 case PRISM2_PARAM_ANTSEL_RX:
2803 *param = local->antsel_rx;
2804 break;
2805
2806 case PRISM2_PARAM_MONITOR_TYPE:
2807 *param = local->monitor_type;
2808 break;
2809
2810 case PRISM2_PARAM_WDS_TYPE:
2811 *param = local->wds_type;
2812 break;
2813
2814 case PRISM2_PARAM_HOSTSCAN:
2815 ret = -EOPNOTSUPP;
2816 break;
2817
2818 case PRISM2_PARAM_AP_SCAN:
2819 *param = local->passive_scan_interval;
2820 break;
2821
2822 case PRISM2_PARAM_ENH_SEC:
2823 *param = local->enh_sec;
2824 break;
2825
2826#ifdef PRISM2_IO_DEBUG
2827 case PRISM2_PARAM_IO_DEBUG:
2828 *param = local->io_debug_enabled;
2829 break;
2830#endif /* PRISM2_IO_DEBUG */
2831
2832 case PRISM2_PARAM_BASIC_RATES:
2833 *param = local->basic_rates;
2834 break;
2835
2836 case PRISM2_PARAM_OPER_RATES:
2837 *param = local->tx_rate_control;
2838 break;
2839
2840 case PRISM2_PARAM_HOSTAPD:
2841 *param = local->hostapd;
2842 break;
2843
2844 case PRISM2_PARAM_HOSTAPD_STA:
2845 *param = local->hostapd_sta;
2846 break;
2847
2848 case PRISM2_PARAM_WPA:
2849 if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
2850 ret = -EOPNOTSUPP;
2851 *param = local->wpa;
2852 break;
2853
2854 case PRISM2_PARAM_PRIVACY_INVOKED:
2855 *param = local->privacy_invoked;
2856 break;
2857
2858 case PRISM2_PARAM_TKIP_COUNTERMEASURES:
2859 *param = local->tkip_countermeasures;
2860 break;
2861
2862 case PRISM2_PARAM_DROP_UNENCRYPTED:
2863 *param = local->drop_unencrypted;
2864 break;
2865
2866 case PRISM2_PARAM_SCAN_CHANNEL_MASK:
2867 *param = local->scan_channel_mask;
2868 break;
2869
2870 default:
2871 printk(KERN_DEBUG "%s: get_prism2_param: unknown param %d\n",
2872 dev->name, *param);
2873 ret = -EOPNOTSUPP;
2874 break;
2875 }
2876
2877 return ret;
2878}
2879
2880
2881static int prism2_ioctl_priv_readmif(struct net_device *dev,
2882 struct iw_request_info *info,
2883 void *wrqu, char *extra)
2884{
2885 struct hostap_interface *iface;
2886 local_info_t *local;
2887 u16 resp0;
2888
2889 iface = netdev_priv(dev);
2890 local = iface->local;
2891
2892 if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, *extra, NULL,
2893 &resp0))
2894 return -EOPNOTSUPP;
2895 else
2896 *extra = resp0;
2897
2898 return 0;
2899}
2900
2901
2902static int prism2_ioctl_priv_writemif(struct net_device *dev,
2903 struct iw_request_info *info,
2904 void *wrqu, char *extra)
2905{
2906 struct hostap_interface *iface;
2907 local_info_t *local;
2908 u16 cr, val;
2909
2910 iface = netdev_priv(dev);
2911 local = iface->local;
2912
2913 cr = *extra;
2914 val = *(extra + 1);
2915 if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, cr, &val, NULL))
2916 return -EOPNOTSUPP;
2917
2918 return 0;
2919}
2920
2921
2922static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
2923{
2924 struct hostap_interface *iface;
2925 local_info_t *local;
2926 int ret = 0;
2927 u32 mode;
2928
2929 iface = netdev_priv(dev);
2930 local = iface->local;
2931
2932 printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor "
2933 "- update software to use iwconfig mode monitor\n",
2934 dev->name, current->pid, current->comm);
2935
2936 /* Backward compatibility code - this can be removed at some point */
2937
2938 if (*i == 0) {
2939 /* Disable monitor mode - old mode was not saved, so go to
2940 * Master mode */
2941 mode = IW_MODE_MASTER;
2942 ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
2943 } else if (*i == 1) {
2944 /* netlink socket mode is not supported anymore since it did
2945 * not separate different devices from each other and was not
2946 * best method for delivering large amount of packets to
2947 * user space */
2948 ret = -EOPNOTSUPP;
2949 } else if (*i == 2 || *i == 3) {
2950 switch (*i) {
2951 case 2:
2952 local->monitor_type = PRISM2_MONITOR_80211;
2953 break;
2954 case 3:
2955 local->monitor_type = PRISM2_MONITOR_PRISM;
2956 break;
2957 }
2958 mode = IW_MODE_MONITOR;
2959 ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
2960 hostap_monitor_mode_enable(local);
2961 } else
2962 ret = -EINVAL;
2963
2964 return ret;
2965}
2966
2967
2968static int prism2_ioctl_priv_reset(struct net_device *dev, int *i)
2969{
2970 struct hostap_interface *iface;
2971 local_info_t *local;
2972
2973 iface = netdev_priv(dev);
2974 local = iface->local;
2975
2976 printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i);
2977 switch (*i) {
2978 case 0:
2979 /* Disable and enable card */
2980 local->func->hw_shutdown(dev, 1);
2981 local->func->hw_config(dev, 0);
2982 break;
2983
2984 case 1:
2985 /* COR sreset */
2986 local->func->hw_reset(dev);
2987 break;
2988
2989 case 2:
2990 /* Disable and enable port 0 */
2991 local->func->reset_port(dev);
2992 break;
2993
2994 case 3:
2995 prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
2996 if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL,
2997 NULL))
2998 return -EINVAL;
2999 break;
3000
3001 case 4:
3002 if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL,
3003 NULL))
3004 return -EINVAL;
3005 break;
3006
3007 default:
3008 printk(KERN_DEBUG "Unknown reset request %d\n", *i);
3009 return -EOPNOTSUPP;
3010 }
3011
3012 return 0;
3013}
3014
3015
3016static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i)
3017{
3018 int rid = *i;
3019 int value = *(i + 1);
3020
3021 printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value);
3022
3023 if (hostap_set_word(dev, rid, value))
3024 return -EINVAL;
3025
3026 return 0;
3027}
3028
3029
3030#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
3031static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd)
3032{
3033 int ret = 0;
3034
3035 switch (*cmd) {
3036 case AP_MAC_CMD_POLICY_OPEN:
3037 local->ap->mac_restrictions.policy = MAC_POLICY_OPEN;
3038 break;
3039 case AP_MAC_CMD_POLICY_ALLOW:
3040 local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW;
3041 break;
3042 case AP_MAC_CMD_POLICY_DENY:
3043 local->ap->mac_restrictions.policy = MAC_POLICY_DENY;
3044 break;
3045 case AP_MAC_CMD_FLUSH:
3046 ap_control_flush_macs(&local->ap->mac_restrictions);
3047 break;
3048 case AP_MAC_CMD_KICKALL:
3049 ap_control_kickall(local->ap);
3050 hostap_deauth_all_stas(local->dev, local->ap, 0);
3051 break;
3052 default:
3053 ret = -EOPNOTSUPP;
3054 break;
3055 }
3056
3057 return ret;
3058}
3059#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
3060
3061
3062#ifdef PRISM2_DOWNLOAD_SUPPORT
3063static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
3064{
3065 struct prism2_download_param *param;
3066 int ret = 0;
3067
3068 if (p->length < sizeof(struct prism2_download_param) ||
3069 p->length > 1024 || !p->pointer)
3070 return -EINVAL;
3071
3072 param = (struct prism2_download_param *)
3073 kmalloc(p->length, GFP_KERNEL);
3074 if (param == NULL)
3075 return -ENOMEM;
3076
3077 if (copy_from_user(param, p->pointer, p->length)) {
3078 ret = -EFAULT;
3079 goto out;
3080 }
3081
3082 if (p->length < sizeof(struct prism2_download_param) +
3083 param->num_areas * sizeof(struct prism2_download_area)) {
3084 ret = -EINVAL;
3085 goto out;
3086 }
3087
3088 ret = local->func->download(local, param);
3089
3090 out:
3091 if (param != NULL)
3092 kfree(param);
3093
3094 return ret;
3095}
3096#endif /* PRISM2_DOWNLOAD_SUPPORT */
3097
3098
3099static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
3100 size_t len)
3101{
3102 struct hostap_interface *iface = dev->priv;
3103 local_info_t *local = iface->local;
3104 u8 *buf;
3105
3106 /*
3107 * Add 16-bit length in the beginning of the buffer because Prism2 RID
3108 * includes it.
3109 */
3110 buf = kmalloc(len + 2, GFP_KERNEL);
3111 if (buf == NULL)
3112 return -ENOMEM;
3113
3114 *((u16 *) buf) = cpu_to_le16(len);
3115 memcpy(buf + 2, elem, len);
3116
3117 kfree(local->generic_elem);
3118 local->generic_elem = buf;
3119 local->generic_elem_len = len + 2;
3120
3121 return local->func->set_rid(local->dev, HFA384X_RID_GENERICELEMENT,
3122 buf, len + 2);
3123}
3124
3125
3126static int prism2_ioctl_siwauth(struct net_device *dev,
3127 struct iw_request_info *info,
3128 struct iw_param *data, char *extra)
3129{
3130 struct hostap_interface *iface = dev->priv;
3131 local_info_t *local = iface->local;
3132
3133 switch (data->flags & IW_AUTH_INDEX) {
3134 case IW_AUTH_WPA_VERSION:
3135 case IW_AUTH_CIPHER_PAIRWISE:
3136 case IW_AUTH_CIPHER_GROUP:
3137 case IW_AUTH_KEY_MGMT:
3138 /*
3139 * Host AP driver does not use these parameters and allows
3140 * wpa_supplicant to control them internally.
3141 */
3142 break;
3143 case IW_AUTH_TKIP_COUNTERMEASURES:
3144 local->tkip_countermeasures = data->value;
3145 break;
3146 case IW_AUTH_DROP_UNENCRYPTED:
3147 local->drop_unencrypted = data->value;
3148 break;
3149 case IW_AUTH_80211_AUTH_ALG:
3150 local->auth_algs = data->value;
3151 break;
3152 case IW_AUTH_WPA_ENABLED:
3153 if (data->value == 0) {
3154 local->wpa = 0;
3155 if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
3156 break;
3157 prism2_set_genericelement(dev, "", 0);
3158 local->host_roaming = 0;
3159 local->privacy_invoked = 0;
3160 if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
3161 0) ||
3162 hostap_set_roaming(local) ||
3163 hostap_set_encryption(local) ||
3164 local->func->reset_port(dev))
3165 return -EINVAL;
3166 break;
3167 }
3168 if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
3169 return -EOPNOTSUPP;
3170 local->host_roaming = 2;
3171 local->privacy_invoked = 1;
3172 local->wpa = 1;
3173 if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1) ||
3174 hostap_set_roaming(local) ||
3175 hostap_set_encryption(local) ||
3176 local->func->reset_port(dev))
3177 return -EINVAL;
3178 break;
3179 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
3180 local->ieee_802_1x = data->value;
3181 break;
3182 case IW_AUTH_PRIVACY_INVOKED:
3183 local->privacy_invoked = data->value;
3184 break;
3185 default:
3186 return -EOPNOTSUPP;
3187 }
3188 return 0;
3189}
3190
3191
3192static int prism2_ioctl_giwauth(struct net_device *dev,
3193 struct iw_request_info *info,
3194 struct iw_param *data, char *extra)
3195{
3196 struct hostap_interface *iface = dev->priv;
3197 local_info_t *local = iface->local;
3198
3199 switch (data->flags & IW_AUTH_INDEX) {
3200 case IW_AUTH_WPA_VERSION:
3201 case IW_AUTH_CIPHER_PAIRWISE:
3202 case IW_AUTH_CIPHER_GROUP:
3203 case IW_AUTH_KEY_MGMT:
3204 /*
3205 * Host AP driver does not use these parameters and allows
3206 * wpa_supplicant to control them internally.
3207 */
3208 return -EOPNOTSUPP;
3209 case IW_AUTH_TKIP_COUNTERMEASURES:
3210 data->value = local->tkip_countermeasures;
3211 break;
3212 case IW_AUTH_DROP_UNENCRYPTED:
3213 data->value = local->drop_unencrypted;
3214 break;
3215 case IW_AUTH_80211_AUTH_ALG:
3216 data->value = local->auth_algs;
3217 break;
3218 case IW_AUTH_WPA_ENABLED:
3219 data->value = local->wpa;
3220 break;
3221 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
3222 data->value = local->ieee_802_1x;
3223 break;
3224 default:
3225 return -EOPNOTSUPP;
3226 }
3227 return 0;
3228}
3229
3230
3231static int prism2_ioctl_siwencodeext(struct net_device *dev,
3232 struct iw_request_info *info,
3233 struct iw_point *erq, char *extra)
3234{
3235 struct hostap_interface *iface = dev->priv;
3236 local_info_t *local = iface->local;
3237 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
3238 int i, ret = 0;
3239 struct ieee80211_crypto_ops *ops;
3240 struct ieee80211_crypt_data **crypt;
3241 void *sta_ptr;
3242 u8 *addr;
3243 const char *alg, *module;
3244
3245 i = erq->flags & IW_ENCODE_INDEX;
3246 if (i > WEP_KEYS)
3247 return -EINVAL;
3248 if (i < 1 || i > WEP_KEYS)
3249 i = local->tx_keyidx;
3250 else
3251 i--;
3252 if (i < 0 || i >= WEP_KEYS)
3253 return -EINVAL;
3254
3255 addr = ext->addr.sa_data;
3256 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
3257 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3258 sta_ptr = NULL;
3259 crypt = &local->crypt[i];
3260 } else {
3261 if (i != 0)
3262 return -EINVAL;
3263 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
3264 if (sta_ptr == NULL) {
3265 if (local->iw_mode == IW_MODE_INFRA) {
3266 /*
3267 * TODO: add STA entry for the current AP so
3268 * that unicast key can be used. For now, this
3269 * is emulated by using default key idx 0.
3270 */
3271 i = 0;
3272 crypt = &local->crypt[i];
3273 } else
3274 return -EINVAL;
3275 }
3276 }
3277
3278 if ((erq->flags & IW_ENCODE_DISABLED) ||
3279 ext->alg == IW_ENCODE_ALG_NONE) {
3280 if (*crypt)
3281 prism2_crypt_delayed_deinit(local, crypt);
3282 goto done;
3283 }
3284
3285 switch (ext->alg) {
3286 case IW_ENCODE_ALG_WEP:
3287 alg = "WEP";
3288 module = "ieee80211_crypt_wep";
3289 break;
3290 case IW_ENCODE_ALG_TKIP:
3291 alg = "TKIP";
3292 module = "ieee80211_crypt_tkip";
3293 break;
3294 case IW_ENCODE_ALG_CCMP:
3295 alg = "CCMP";
3296 module = "ieee80211_crypt_ccmp";
3297 break;
3298 default:
3299 printk(KERN_DEBUG "%s: unsupported algorithm %d\n",
3300 local->dev->name, ext->alg);
3301 ret = -EOPNOTSUPP;
3302 goto done;
3303 }
3304
3305 ops = ieee80211_get_crypto_ops(alg);
3306 if (ops == NULL) {
3307 request_module(module);
3308 ops = ieee80211_get_crypto_ops(alg);
3309 }
3310 if (ops == NULL) {
3311 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
3312 local->dev->name, alg);
3313 ret = -EOPNOTSUPP;
3314 goto done;
3315 }
3316
3317 if (sta_ptr || ext->alg != IW_ENCODE_ALG_WEP) {
3318 /*
3319 * Per station encryption and other than WEP algorithms
3320 * require host-based encryption, so force them on
3321 * automatically.
3322 */
3323 local->host_decrypt = local->host_encrypt = 1;
3324 }
3325
3326 if (*crypt == NULL || (*crypt)->ops != ops) {
3327 struct ieee80211_crypt_data *new_crypt;
3328
3329 prism2_crypt_delayed_deinit(local, crypt);
3330
3331 new_crypt = (struct ieee80211_crypt_data *)
3332 kmalloc(sizeof(struct ieee80211_crypt_data),
3333 GFP_KERNEL);
3334 if (new_crypt == NULL) {
3335 ret = -ENOMEM;
3336 goto done;
3337 }
3338 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
3339 new_crypt->ops = ops;
3340 new_crypt->priv = new_crypt->ops->init(i);
3341 if (new_crypt->priv == NULL) {
3342 kfree(new_crypt);
3343 ret = -EINVAL;
3344 goto done;
3345 }
3346
3347 *crypt = new_crypt;
3348 }
3349
3350 /*
3351 * TODO: if ext_flags does not have IW_ENCODE_EXT_RX_SEQ_VALID, the
3352 * existing seq# should not be changed.
3353 * TODO: if ext_flags has IW_ENCODE_EXT_TX_SEQ_VALID, next TX seq#
3354 * should be changed to something else than zero.
3355 */
3356 if ((!(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) || ext->key_len > 0)
3357 && (*crypt)->ops->set_key &&
3358 (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
3359 (*crypt)->priv) < 0) {
3360 printk(KERN_DEBUG "%s: key setting failed\n",
3361 local->dev->name);
3362 ret = -EINVAL;
3363 goto done;
3364 }
3365
3366 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
3367 if (!sta_ptr)
3368 local->tx_keyidx = i;
3369 else if (i) {
3370 ret = -EINVAL;
3371 goto done;
3372 }
3373 }
3374
3375
3376 if (sta_ptr == NULL && ext->key_len > 0) {
3377 int first = 1, j;
3378 for (j = 0; j < WEP_KEYS; j++) {
3379 if (j != i && local->crypt[j]) {
3380 first = 0;
3381 break;
3382 }
3383 }
3384 if (first)
3385 local->tx_keyidx = i;
3386 }
3387
3388 done:
3389 if (sta_ptr)
3390 hostap_handle_sta_release(sta_ptr);
3391
3392 local->open_wep = erq->flags & IW_ENCODE_OPEN;
3393
3394 /*
3395 * Do not reset port0 if card is in Managed mode since resetting will
3396 * generate new IEEE 802.11 authentication which may end up in looping
3397 * with IEEE 802.1X. Prism2 documentation seem to require port reset
3398 * after WEP configuration. However, keys are apparently changed at
3399 * least in Managed mode.
3400 */
3401 if (ret == 0 &&
3402 (hostap_set_encryption(local) ||
3403 (local->iw_mode != IW_MODE_INFRA &&
3404 local->func->reset_port(local->dev))))
3405 ret = -EINVAL;
3406
3407 return ret;
3408}
3409
3410
3411static int prism2_ioctl_giwencodeext(struct net_device *dev,
3412 struct iw_request_info *info,
3413 struct iw_point *erq, char *extra)
3414{
3415 struct hostap_interface *iface = dev->priv;
3416 local_info_t *local = iface->local;
3417 struct ieee80211_crypt_data **crypt;
3418 void *sta_ptr;
3419 int max_key_len, i;
3420 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
3421 u8 *addr;
3422
3423 max_key_len = erq->length - sizeof(*ext);
3424 if (max_key_len < 0)
3425 return -EINVAL;
3426
3427 i = erq->flags & IW_ENCODE_INDEX;
3428 if (i < 1 || i > WEP_KEYS)
3429 i = local->tx_keyidx;
3430 else
3431 i--;
3432
3433 addr = ext->addr.sa_data;
3434 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
3435 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3436 sta_ptr = NULL;
3437 crypt = &local->crypt[i];
3438 } else {
3439 i = 0;
3440 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
3441 if (sta_ptr == NULL)
3442 return -EINVAL;
3443 }
3444 erq->flags = i + 1;
3445 memset(ext, 0, sizeof(*ext));
3446
3447 if (*crypt == NULL || (*crypt)->ops == NULL) {
3448 ext->alg = IW_ENCODE_ALG_NONE;
3449 ext->key_len = 0;
3450 erq->flags |= IW_ENCODE_DISABLED;
3451 } else {
3452 if (strcmp((*crypt)->ops->name, "WEP") == 0)
3453 ext->alg = IW_ENCODE_ALG_WEP;
3454 else if (strcmp((*crypt)->ops->name, "TKIP") == 0)
3455 ext->alg = IW_ENCODE_ALG_TKIP;
3456 else if (strcmp((*crypt)->ops->name, "CCMP") == 0)
3457 ext->alg = IW_ENCODE_ALG_CCMP;
3458 else
3459 return -EINVAL;
3460
3461 if ((*crypt)->ops->get_key) {
3462 ext->key_len =
3463 (*crypt)->ops->get_key(ext->key,
3464 max_key_len,
3465 ext->tx_seq,
3466 (*crypt)->priv);
3467 if (ext->key_len &&
3468 (ext->alg == IW_ENCODE_ALG_TKIP ||
3469 ext->alg == IW_ENCODE_ALG_CCMP))
3470 ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
3471 }
3472 }
3473
3474 if (sta_ptr)
3475 hostap_handle_sta_release(sta_ptr);
3476
3477 return 0;
3478}
3479
3480
3481static int prism2_ioctl_set_encryption(local_info_t *local,
3482 struct prism2_hostapd_param *param,
3483 int param_len)
3484{
3485 int ret = 0;
3486 struct ieee80211_crypto_ops *ops;
3487 struct ieee80211_crypt_data **crypt;
3488 void *sta_ptr;
3489
3490 param->u.crypt.err = 0;
3491 param->u.crypt.alg[HOSTAP_CRYPT_ALG_NAME_LEN - 1] = '\0';
3492
3493 if (param_len !=
3494 (int) ((char *) param->u.crypt.key - (char *) param) +
3495 param->u.crypt.key_len)
3496 return -EINVAL;
3497
3498 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
3499 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3500 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3501 if (param->u.crypt.idx >= WEP_KEYS)
3502 return -EINVAL;
3503 sta_ptr = NULL;
3504 crypt = &local->crypt[param->u.crypt.idx];
3505 } else {
3506 if (param->u.crypt.idx)
3507 return -EINVAL;
3508 sta_ptr = ap_crypt_get_ptrs(
3509 local->ap, param->sta_addr,
3510 (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_PERMANENT),
3511 &crypt);
3512
3513 if (sta_ptr == NULL) {
3514 param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
3515 return -EINVAL;
3516 }
3517 }
3518
3519 if (strcmp(param->u.crypt.alg, "none") == 0) {
3520 if (crypt)
3521 prism2_crypt_delayed_deinit(local, crypt);
3522 goto done;
3523 }
3524
3525 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3526 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
3527 request_module("ieee80211_crypt_wep");
3528 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3529 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
3530 request_module("ieee80211_crypt_tkip");
3531 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3532 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
3533 request_module("ieee80211_crypt_ccmp");
3534 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
3535 }
3536 if (ops == NULL) {
3537 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
3538 local->dev->name, param->u.crypt.alg);
3539 param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ALG;
3540 ret = -EINVAL;
3541 goto done;
3542 }
3543
3544 /* station based encryption and other than WEP algorithms require
3545 * host-based encryption, so force them on automatically */
3546 local->host_decrypt = local->host_encrypt = 1;
3547
3548 if (*crypt == NULL || (*crypt)->ops != ops) {
3549 struct ieee80211_crypt_data *new_crypt;
3550
3551 prism2_crypt_delayed_deinit(local, crypt);
3552
3553 new_crypt = (struct ieee80211_crypt_data *)
3554 kmalloc(sizeof(struct ieee80211_crypt_data),
3555 GFP_KERNEL);
3556 if (new_crypt == NULL) {
3557 ret = -ENOMEM;
3558 goto done;
3559 }
3560 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
3561 new_crypt->ops = ops;
3562 new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
3563 if (new_crypt->priv == NULL) {
3564 kfree(new_crypt);
3565 param->u.crypt.err =
3566 HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED;
3567 ret = -EINVAL;
3568 goto done;
3569 }
3570
3571 *crypt = new_crypt;
3572 }
3573
3574 if ((!(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) ||
3575 param->u.crypt.key_len > 0) && (*crypt)->ops->set_key &&
3576 (*crypt)->ops->set_key(param->u.crypt.key,
3577 param->u.crypt.key_len, param->u.crypt.seq,
3578 (*crypt)->priv) < 0) {
3579 printk(KERN_DEBUG "%s: key setting failed\n",
3580 local->dev->name);
3581 param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
3582 ret = -EINVAL;
3583 goto done;
3584 }
3585
3586 if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
3587 if (!sta_ptr)
3588 local->tx_keyidx = param->u.crypt.idx;
3589 else if (param->u.crypt.idx) {
3590 printk(KERN_DEBUG "%s: TX key idx setting failed\n",
3591 local->dev->name);
3592 param->u.crypt.err =
3593 HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED;
3594 ret = -EINVAL;
3595 goto done;
3596 }
3597 }
3598
3599 done:
3600 if (sta_ptr)
3601 hostap_handle_sta_release(sta_ptr);
3602
3603 /* Do not reset port0 if card is in Managed mode since resetting will
3604 * generate new IEEE 802.11 authentication which may end up in looping
3605 * with IEEE 802.1X. Prism2 documentation seem to require port reset
3606 * after WEP configuration. However, keys are apparently changed at
3607 * least in Managed mode. */
3608 if (ret == 0 &&
3609 (hostap_set_encryption(local) ||
3610 (local->iw_mode != IW_MODE_INFRA &&
3611 local->func->reset_port(local->dev)))) {
3612 param->u.crypt.err = HOSTAP_CRYPT_ERR_CARD_CONF_FAILED;
3613 return -EINVAL;
3614 }
3615
3616 return ret;
3617}
3618
3619
3620static int prism2_ioctl_get_encryption(local_info_t *local,
3621 struct prism2_hostapd_param *param,
3622 int param_len)
3623{
3624 struct ieee80211_crypt_data **crypt;
3625 void *sta_ptr;
3626 int max_key_len;
3627
3628 param->u.crypt.err = 0;
3629
3630 max_key_len = param_len -
3631 (int) ((char *) param->u.crypt.key - (char *) param);
3632 if (max_key_len < 0)
3633 return -EINVAL;
3634
3635 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
3636 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3637 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3638 sta_ptr = NULL;
3639 if (param->u.crypt.idx >= WEP_KEYS)
3640 param->u.crypt.idx = local->tx_keyidx;
3641 crypt = &local->crypt[param->u.crypt.idx];
3642 } else {
3643 param->u.crypt.idx = 0;
3644 sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0,
3645 &crypt);
3646
3647 if (sta_ptr == NULL) {
3648 param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
3649 return -EINVAL;
3650 }
3651 }
3652
3653 if (*crypt == NULL || (*crypt)->ops == NULL) {
3654 memcpy(param->u.crypt.alg, "none", 5);
3655 param->u.crypt.key_len = 0;
3656 param->u.crypt.idx = 0xff;
3657 } else {
3658 strncpy(param->u.crypt.alg, (*crypt)->ops->name,
3659 HOSTAP_CRYPT_ALG_NAME_LEN);
3660 param->u.crypt.key_len = 0;
3661
3662 memset(param->u.crypt.seq, 0, 8);
3663 if ((*crypt)->ops->get_key) {
3664 param->u.crypt.key_len =
3665 (*crypt)->ops->get_key(param->u.crypt.key,
3666 max_key_len,
3667 param->u.crypt.seq,
3668 (*crypt)->priv);
3669 }
3670 }
3671
3672 if (sta_ptr)
3673 hostap_handle_sta_release(sta_ptr);
3674
3675 return 0;
3676}
3677
3678
3679static int prism2_ioctl_get_rid(local_info_t *local,
3680 struct prism2_hostapd_param *param,
3681 int param_len)
3682{
3683 int max_len, res;
3684
3685 max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
3686 if (max_len < 0)
3687 return -EINVAL;
3688
3689 res = local->func->get_rid(local->dev, param->u.rid.rid,
3690 param->u.rid.data, param->u.rid.len, 0);
3691 if (res >= 0) {
3692 param->u.rid.len = res;
3693 return 0;
3694 }
3695
3696 return res;
3697}
3698
3699
3700static int prism2_ioctl_set_rid(local_info_t *local,
3701 struct prism2_hostapd_param *param,
3702 int param_len)
3703{
3704 int max_len;
3705
3706 max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
3707 if (max_len < 0 || max_len < param->u.rid.len)
3708 return -EINVAL;
3709
3710 return local->func->set_rid(local->dev, param->u.rid.rid,
3711 param->u.rid.data, param->u.rid.len);
3712}
3713
3714
3715static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
3716 struct prism2_hostapd_param *param,
3717 int param_len)
3718{
3719 printk(KERN_DEBUG "%ssta: associated as client with AP " MACSTR "\n",
3720 local->dev->name, MAC2STR(param->sta_addr));
3721 memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN);
3722 return 0;
3723}
3724
3725
3726static int prism2_ioctl_siwgenie(struct net_device *dev,
3727 struct iw_request_info *info,
3728 struct iw_point *data, char *extra)
3729{
3730 return prism2_set_genericelement(dev, extra, data->length);
3731}
3732
3733
3734static int prism2_ioctl_giwgenie(struct net_device *dev,
3735 struct iw_request_info *info,
3736 struct iw_point *data, char *extra)
3737{
3738 struct hostap_interface *iface = dev->priv;
3739 local_info_t *local = iface->local;
3740 int len = local->generic_elem_len - 2;
3741
3742 if (len <= 0 || local->generic_elem == NULL) {
3743 data->length = 0;
3744 return 0;
3745 }
3746
3747 if (data->length < len)
3748 return -E2BIG;
3749
3750 data->length = len;
3751 memcpy(extra, local->generic_elem + 2, len);
3752
3753 return 0;
3754}
3755
3756
3757static int prism2_ioctl_set_generic_element(local_info_t *local,
3758 struct prism2_hostapd_param *param,
3759 int param_len)
3760{
3761 int max_len, len;
3762
3763 len = param->u.generic_elem.len;
3764 max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
3765 if (max_len < 0 || max_len < len)
3766 return -EINVAL;
3767
3768 return prism2_set_genericelement(local->dev,
3769 param->u.generic_elem.data, len);
3770}
3771
3772
3773static int prism2_ioctl_siwmlme(struct net_device *dev,
3774 struct iw_request_info *info,
3775 struct iw_point *data, char *extra)
3776{
3777 struct hostap_interface *iface = dev->priv;
3778 local_info_t *local = iface->local;
3779 struct iw_mlme *mlme = (struct iw_mlme *) extra;
3780 u16 reason;
3781
3782 reason = cpu_to_le16(mlme->reason_code);
3783
3784 switch (mlme->cmd) {
3785 case IW_MLME_DEAUTH:
3786 return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
3787 IEEE80211_STYPE_DEAUTH,
3788 (u8 *) &reason, 2);
3789 case IW_MLME_DISASSOC:
3790 return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
3791 IEEE80211_STYPE_DISASSOC,
3792 (u8 *) &reason, 2);
3793 default:
3794 return -EOPNOTSUPP;
3795 }
3796}
3797
3798
3799static int prism2_ioctl_mlme(local_info_t *local,
3800 struct prism2_hostapd_param *param)
3801{
3802 u16 reason;
3803
3804 reason = cpu_to_le16(param->u.mlme.reason_code);
3805 switch (param->u.mlme.cmd) {
3806 case MLME_STA_DEAUTH:
3807 return prism2_sta_send_mgmt(local, param->sta_addr,
3808 IEEE80211_STYPE_DEAUTH,
3809 (u8 *) &reason, 2);
3810 case MLME_STA_DISASSOC:
3811 return prism2_sta_send_mgmt(local, param->sta_addr,
3812 IEEE80211_STYPE_DISASSOC,
3813 (u8 *) &reason, 2);
3814 default:
3815 return -EOPNOTSUPP;
3816 }
3817}
3818
3819
3820static int prism2_ioctl_scan_req(local_info_t *local,
3821 struct prism2_hostapd_param *param)
3822{
3823#ifndef PRISM2_NO_STATION_MODES
3824 if ((local->iw_mode != IW_MODE_INFRA &&
3825 local->iw_mode != IW_MODE_ADHOC) ||
3826 (local->sta_fw_ver < PRISM2_FW_VER(1,3,1)))
3827 return -EOPNOTSUPP;
3828
3829 if (!local->dev_enabled)
3830 return -ENETDOWN;
3831
3832 return prism2_request_hostscan(local->dev, param->u.scan_req.ssid,
3833 param->u.scan_req.ssid_len);
3834#else /* PRISM2_NO_STATION_MODES */
3835 return -EOPNOTSUPP;
3836#endif /* PRISM2_NO_STATION_MODES */
3837}
3838
3839
3840static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3841{
3842 struct prism2_hostapd_param *param;
3843 int ret = 0;
3844 int ap_ioctl = 0;
3845
3846 if (p->length < sizeof(struct prism2_hostapd_param) ||
3847 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
3848 return -EINVAL;
3849
3850 param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL);
3851 if (param == NULL)
3852 return -ENOMEM;
3853
3854 if (copy_from_user(param, p->pointer, p->length)) {
3855 ret = -EFAULT;
3856 goto out;
3857 }
3858
3859 switch (param->cmd) {
3860 case PRISM2_SET_ENCRYPTION:
3861 ret = prism2_ioctl_set_encryption(local, param, p->length);
3862 break;
3863 case PRISM2_GET_ENCRYPTION:
3864 ret = prism2_ioctl_get_encryption(local, param, p->length);
3865 break;
3866 case PRISM2_HOSTAPD_GET_RID:
3867 ret = prism2_ioctl_get_rid(local, param, p->length);
3868 break;
3869 case PRISM2_HOSTAPD_SET_RID:
3870 ret = prism2_ioctl_set_rid(local, param, p->length);
3871 break;
3872 case PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR:
3873 ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length);
3874 break;
3875 case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
3876 ret = prism2_ioctl_set_generic_element(local, param,
3877 p->length);
3878 break;
3879 case PRISM2_HOSTAPD_MLME:
3880 ret = prism2_ioctl_mlme(local, param);
3881 break;
3882 case PRISM2_HOSTAPD_SCAN_REQ:
3883 ret = prism2_ioctl_scan_req(local, param);
3884 break;
3885 default:
3886 ret = prism2_hostapd(local->ap, param);
3887 ap_ioctl = 1;
3888 break;
3889 }
3890
3891 if (ret == 1 || !ap_ioctl) {
3892 if (copy_to_user(p->pointer, param, p->length)) {
3893 ret = -EFAULT;
3894 goto out;
3895 } else if (ap_ioctl)
3896 ret = 0;
3897 }
3898
3899 out:
3900 if (param != NULL)
3901 kfree(param);
3902
3903 return ret;
3904}
3905
3906
3907static void prism2_get_drvinfo(struct net_device *dev,
3908 struct ethtool_drvinfo *info)
3909{
3910 struct hostap_interface *iface;
3911 local_info_t *local;
3912
3913 iface = netdev_priv(dev);
3914 local = iface->local;
3915
3916 strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
3917 strncpy(info->version, PRISM2_VERSION,
3918 sizeof(info->version) - 1);
3919 snprintf(info->fw_version, sizeof(info->fw_version) - 1,
3920 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
3921 (local->sta_fw_ver >> 8) & 0xff,
3922 local->sta_fw_ver & 0xff);
3923}
3924
3925static struct ethtool_ops prism2_ethtool_ops = {
3926 .get_drvinfo = prism2_get_drvinfo
3927};
3928
3929
3930/* Structures to export the Wireless Handlers */
3931
3932static const iw_handler prism2_handler[] =
3933{
3934 (iw_handler) NULL, /* SIOCSIWCOMMIT */
3935 (iw_handler) prism2_get_name, /* SIOCGIWNAME */
3936 (iw_handler) NULL, /* SIOCSIWNWID */
3937 (iw_handler) NULL, /* SIOCGIWNWID */
3938 (iw_handler) prism2_ioctl_siwfreq, /* SIOCSIWFREQ */
3939 (iw_handler) prism2_ioctl_giwfreq, /* SIOCGIWFREQ */
3940 (iw_handler) prism2_ioctl_siwmode, /* SIOCSIWMODE */
3941 (iw_handler) prism2_ioctl_giwmode, /* SIOCGIWMODE */
3942 (iw_handler) prism2_ioctl_siwsens, /* SIOCSIWSENS */
3943 (iw_handler) prism2_ioctl_giwsens, /* SIOCGIWSENS */
3944 (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */
3945 (iw_handler) prism2_ioctl_giwrange, /* SIOCGIWRANGE */
3946 (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
3947 (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
3948 (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
3949 (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
3950 iw_handler_set_spy, /* SIOCSIWSPY */
3951 iw_handler_get_spy, /* SIOCGIWSPY */
3952 iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
3953 iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
3954 (iw_handler) prism2_ioctl_siwap, /* SIOCSIWAP */
3955 (iw_handler) prism2_ioctl_giwap, /* SIOCGIWAP */
3956 (iw_handler) prism2_ioctl_siwmlme, /* SIOCSIWMLME */
3957 (iw_handler) prism2_ioctl_giwaplist, /* SIOCGIWAPLIST */
3958 (iw_handler) prism2_ioctl_siwscan, /* SIOCSIWSCAN */
3959 (iw_handler) prism2_ioctl_giwscan, /* SIOCGIWSCAN */
3960 (iw_handler) prism2_ioctl_siwessid, /* SIOCSIWESSID */
3961 (iw_handler) prism2_ioctl_giwessid, /* SIOCGIWESSID */
3962 (iw_handler) prism2_ioctl_siwnickn, /* SIOCSIWNICKN */
3963 (iw_handler) prism2_ioctl_giwnickn, /* SIOCGIWNICKN */
3964 (iw_handler) NULL, /* -- hole -- */
3965 (iw_handler) NULL, /* -- hole -- */
3966 (iw_handler) prism2_ioctl_siwrate, /* SIOCSIWRATE */
3967 (iw_handler) prism2_ioctl_giwrate, /* SIOCGIWRATE */
3968 (iw_handler) prism2_ioctl_siwrts, /* SIOCSIWRTS */
3969 (iw_handler) prism2_ioctl_giwrts, /* SIOCGIWRTS */
3970 (iw_handler) prism2_ioctl_siwfrag, /* SIOCSIWFRAG */
3971 (iw_handler) prism2_ioctl_giwfrag, /* SIOCGIWFRAG */
3972 (iw_handler) prism2_ioctl_siwtxpow, /* SIOCSIWTXPOW */
3973 (iw_handler) prism2_ioctl_giwtxpow, /* SIOCGIWTXPOW */
3974 (iw_handler) prism2_ioctl_siwretry, /* SIOCSIWRETRY */
3975 (iw_handler) prism2_ioctl_giwretry, /* SIOCGIWRETRY */
3976 (iw_handler) prism2_ioctl_siwencode, /* SIOCSIWENCODE */
3977 (iw_handler) prism2_ioctl_giwencode, /* SIOCGIWENCODE */
3978 (iw_handler) prism2_ioctl_siwpower, /* SIOCSIWPOWER */
3979 (iw_handler) prism2_ioctl_giwpower, /* SIOCGIWPOWER */
3980 (iw_handler) NULL, /* -- hole -- */
3981 (iw_handler) NULL, /* -- hole -- */
3982 (iw_handler) prism2_ioctl_siwgenie, /* SIOCSIWGENIE */
3983 (iw_handler) prism2_ioctl_giwgenie, /* SIOCGIWGENIE */
3984 (iw_handler) prism2_ioctl_siwauth, /* SIOCSIWAUTH */
3985 (iw_handler) prism2_ioctl_giwauth, /* SIOCGIWAUTH */
3986 (iw_handler) prism2_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */
3987 (iw_handler) prism2_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */
3988 (iw_handler) NULL, /* SIOCSIWPMKSA */
3989 (iw_handler) NULL, /* -- hole -- */
3990};
3991
3992static const iw_handler prism2_private_handler[] =
3993{ /* SIOCIWFIRSTPRIV + */
3994 (iw_handler) prism2_ioctl_priv_prism2_param, /* 0 */
3995 (iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */
3996 (iw_handler) prism2_ioctl_priv_writemif, /* 2 */
3997 (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
3998};
3999
4000static const struct iw_handler_def hostap_iw_handler_def =
4001{
4002 .num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
4003 .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
4004 .num_private_args = sizeof(prism2_priv) / sizeof(struct iw_priv_args),
4005 .standard = (iw_handler *) prism2_handler,
4006 .private = (iw_handler *) prism2_private_handler,
4007 .private_args = (struct iw_priv_args *) prism2_priv,
4008 .get_wireless_stats = hostap_get_wireless_stats,
4009};
4010
4011
4012int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4013{
4014 struct iwreq *wrq = (struct iwreq *) ifr;
4015 struct hostap_interface *iface;
4016 local_info_t *local;
4017 int ret = 0;
4018
4019 iface = netdev_priv(dev);
4020 local = iface->local;
4021
4022 switch (cmd) {
4023 /* Private ioctls (iwpriv) that have not yet been converted
4024 * into new wireless extensions API */
4025
4026 case PRISM2_IOCTL_INQUIRE:
4027 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4028 else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name);
4029 break;
4030
4031 case PRISM2_IOCTL_MONITOR:
4032 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4033 else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name);
4034 break;
4035
4036 case PRISM2_IOCTL_RESET:
4037 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4038 else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name);
4039 break;
4040
4041 case PRISM2_IOCTL_WDS_ADD:
4042 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4043 else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1);
4044 break;
4045
4046 case PRISM2_IOCTL_WDS_DEL:
4047 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4048 else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0);
4049 break;
4050
4051 case PRISM2_IOCTL_SET_RID_WORD:
4052 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4053 else ret = prism2_ioctl_priv_set_rid_word(dev,
4054 (int *) wrq->u.name);
4055 break;
4056
4057#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
4058 case PRISM2_IOCTL_MACCMD:
4059 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4060 else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name);
4061 break;
4062
4063 case PRISM2_IOCTL_ADDMAC:
4064 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4065 else ret = ap_control_add_mac(&local->ap->mac_restrictions,
4066 wrq->u.ap_addr.sa_data);
4067 break;
4068 case PRISM2_IOCTL_DELMAC:
4069 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4070 else ret = ap_control_del_mac(&local->ap->mac_restrictions,
4071 wrq->u.ap_addr.sa_data);
4072 break;
4073 case PRISM2_IOCTL_KICKMAC:
4074 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4075 else ret = ap_control_kick_mac(local->ap, local->dev,
4076 wrq->u.ap_addr.sa_data);
4077 break;
4078#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
4079
4080
4081 /* Private ioctls that are not used with iwpriv;
4082 * in SIOCDEVPRIVATE range */
4083
4084#ifdef PRISM2_DOWNLOAD_SUPPORT
4085 case PRISM2_IOCTL_DOWNLOAD:
4086 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4087 else ret = prism2_ioctl_priv_download(local, &wrq->u.data);
4088 break;
4089#endif /* PRISM2_DOWNLOAD_SUPPORT */
4090
4091 case PRISM2_IOCTL_HOSTAPD:
4092 if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
4093 else ret = prism2_ioctl_priv_hostapd(local, &wrq->u.data);
4094 break;
4095
4096 default:
4097 ret = -EOPNOTSUPP;
4098 break;
4099 }
4100
4101 return ret;
4102}
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
new file mode 100644
index 000000000000..025f8cdb5566
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -0,0 +1,473 @@
1#define PRISM2_PCI
2
3/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
4 * driver patches from Reyk Floeter <reyk@vantronix.net> and
5 * Andy Warner <andyw@pobox.com> */
6
7#include <linux/config.h>
8#include <linux/version.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/if.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/workqueue.h>
15#include <linux/wireless.h>
16#include <net/iw_handler.h>
17
18#include <linux/ioport.h>
19#include <linux/pci.h>
20#include <asm/io.h>
21
22#include "hostap_wlan.h"
23
24
25static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
26static char *dev_info = "hostap_pci";
27
28
29MODULE_AUTHOR("Jouni Malinen");
30MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
31 "PCI cards.");
32MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
33MODULE_LICENSE("GPL");
34MODULE_VERSION(PRISM2_VERSION);
35
36
37/* struct local_info::hw_priv */
38struct hostap_pci_priv {
39 void __iomem *mem_start;
40};
41
42
43/* FIX: do we need mb/wmb/rmb with memory operations? */
44
45
46static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
47 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
48 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
49 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
50 { 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
51 /* Samsung MagicLAN SWL-2210P */
52 { 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
53 { 0 }
54};
55
56
57#ifdef PRISM2_IO_DEBUG
58
59static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
60{
61 struct hostap_interface *iface;
62 local_info_t *local;
63 unsigned long flags;
64
65 iface = netdev_priv(dev);
66 local = iface->local;
67
68 spin_lock_irqsave(&local->lock, flags);
69 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
70 writeb(v, hw_priv->mem_start + a);
71 spin_unlock_irqrestore(&local->lock, flags);
72}
73
74static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
75{
76 struct hostap_interface *iface;
77 local_info_t *local;
78 unsigned long flags;
79 u8 v;
80
81 iface = netdev_priv(dev);
82 local = iface->local;
83
84 spin_lock_irqsave(&local->lock, flags);
85 v = readb(hw_priv->mem_start + a);
86 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
87 spin_unlock_irqrestore(&local->lock, flags);
88 return v;
89}
90
91static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
92{
93 struct hostap_interface *iface;
94 local_info_t *local;
95 unsigned long flags;
96
97 iface = netdev_priv(dev);
98 local = iface->local;
99
100 spin_lock_irqsave(&local->lock, flags);
101 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
102 writew(v, hw_priv->mem_start + a);
103 spin_unlock_irqrestore(&local->lock, flags);
104}
105
106static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
107{
108 struct hostap_interface *iface;
109 local_info_t *local;
110 unsigned long flags;
111 u16 v;
112
113 iface = netdev_priv(dev);
114 local = iface->local;
115
116 spin_lock_irqsave(&local->lock, flags);
117 v = readw(hw_priv->mem_start + a);
118 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
119 spin_unlock_irqrestore(&local->lock, flags);
120 return v;
121}
122
123#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
124#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
125#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
126#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
127#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), cpu_to_le16((v)))
128#define HFA384X_INW_DATA(a) (u16) le16_to_cpu(hfa384x_inw_debug(dev, (a)))
129
130#else /* PRISM2_IO_DEBUG */
131
132static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
133{
134 struct hostap_interface *iface;
135 struct hostap_pci_priv *hw_priv;
136 iface = netdev_priv(dev);
137 hw_priv = iface->local->hw_priv;
138 writeb(v, hw_priv->mem_start + a);
139}
140
141static inline u8 hfa384x_inb(struct net_device *dev, int a)
142{
143 struct hostap_interface *iface;
144 struct hostap_pci_priv *hw_priv;
145 iface = netdev_priv(dev);
146 hw_priv = iface->local->hw_priv;
147 return readb(hw_priv->mem_start + a);
148}
149
150static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
151{
152 struct hostap_interface *iface;
153 struct hostap_pci_priv *hw_priv;
154 iface = netdev_priv(dev);
155 hw_priv = iface->local->hw_priv;
156 writew(v, hw_priv->mem_start + a);
157}
158
159static inline u16 hfa384x_inw(struct net_device *dev, int a)
160{
161 struct hostap_interface *iface;
162 struct hostap_pci_priv *hw_priv;
163 iface = netdev_priv(dev);
164 hw_priv = iface->local->hw_priv;
165 return readw(hw_priv->mem_start + a);
166}
167
168#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
169#define HFA384X_INB(a) hfa384x_inb(dev, (a))
170#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
171#define HFA384X_INW(a) hfa384x_inw(dev, (a))
172#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), cpu_to_le16((v)))
173#define HFA384X_INW_DATA(a) (u16) le16_to_cpu(hfa384x_inw(dev, (a)))
174
175#endif /* PRISM2_IO_DEBUG */
176
177
178static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
179 int len)
180{
181 u16 d_off;
182 u16 *pos;
183
184 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
185 pos = (u16 *) buf;
186
187 for ( ; len > 1; len -= 2)
188 *pos++ = HFA384X_INW_DATA(d_off);
189
190 if (len & 1)
191 *((char *) pos) = HFA384X_INB(d_off);
192
193 return 0;
194}
195
196
197static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
198{
199 u16 d_off;
200 u16 *pos;
201
202 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
203 pos = (u16 *) buf;
204
205 for ( ; len > 1; len -= 2)
206 HFA384X_OUTW_DATA(*pos++, d_off);
207
208 if (len & 1)
209 HFA384X_OUTB(*((char *) pos), d_off);
210
211 return 0;
212}
213
214
215/* FIX: This might change at some point.. */
216#include "hostap_hw.c"
217
218static void prism2_pci_cor_sreset(local_info_t *local)
219{
220 struct net_device *dev = local->dev;
221 u16 reg;
222
223 reg = HFA384X_INB(HFA384X_PCICOR_OFF);
224 printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
225
226 /* linux-wlan-ng uses extremely long hold and settle times for
227 * COR sreset. A comment in the driver code mentions that the long
228 * delays appear to be necessary. However, at least IBM 22P6901 seems
229 * to work fine with shorter delays.
230 *
231 * Longer delays can be configured by uncommenting following line: */
232/* #define PRISM2_PCI_USE_LONG_DELAYS */
233
234#ifdef PRISM2_PCI_USE_LONG_DELAYS
235 int i;
236
237 HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
238 mdelay(250);
239
240 HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
241 mdelay(500);
242
243 /* Wait for f/w to complete initialization (CMD:BUSY == 0) */
244 i = 2000000 / 10;
245 while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
246 udelay(10);
247
248#else /* PRISM2_PCI_USE_LONG_DELAYS */
249
250 HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
251 mdelay(2);
252 HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
253 mdelay(2);
254
255#endif /* PRISM2_PCI_USE_LONG_DELAYS */
256
257 if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
258 printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
259 }
260}
261
262
263static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
264{
265 struct net_device *dev = local->dev;
266
267 HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
268 mdelay(10);
269 HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
270 mdelay(10);
271 HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
272 mdelay(10);
273}
274
275
276static struct prism2_helper_functions prism2_pci_funcs =
277{
278 .card_present = NULL,
279 .cor_sreset = prism2_pci_cor_sreset,
280 .dev_open = NULL,
281 .dev_close = NULL,
282 .genesis_reset = prism2_pci_genesis_reset,
283 .hw_type = HOSTAP_HW_PCI,
284};
285
286
287static int prism2_pci_probe(struct pci_dev *pdev,
288 const struct pci_device_id *id)
289{
290 unsigned long phymem;
291 void __iomem *mem = NULL;
292 local_info_t *local = NULL;
293 struct net_device *dev = NULL;
294 static int cards_found /* = 0 */;
295 int irq_registered = 0;
296 struct hostap_interface *iface;
297 struct hostap_pci_priv *hw_priv;
298
299 hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
300 if (hw_priv == NULL)
301 return -ENOMEM;
302 memset(hw_priv, 0, sizeof(*hw_priv));
303
304 if (pci_enable_device(pdev))
305 return -EIO;
306
307 phymem = pci_resource_start(pdev, 0);
308
309 if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
310 printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
311 goto err_out_disable;
312 }
313
314 mem = ioremap(phymem, pci_resource_len(pdev, 0));
315 if (mem == NULL) {
316 printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
317 goto fail;
318 }
319
320 dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
321 &pdev->dev);
322 if (dev == NULL)
323 goto fail;
324 iface = netdev_priv(dev);
325 local = iface->local;
326 local->hw_priv = hw_priv;
327 cards_found++;
328
329 dev->irq = pdev->irq;
330 hw_priv->mem_start = mem;
331
332 prism2_pci_cor_sreset(local);
333
334 pci_set_drvdata(pdev, dev);
335
336 if (request_irq(dev->irq, prism2_interrupt, SA_SHIRQ, dev->name,
337 dev)) {
338 printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
339 goto fail;
340 } else
341 irq_registered = 1;
342
343 if (!local->pri_only && prism2_hw_config(dev, 1)) {
344 printk(KERN_DEBUG "%s: hardware initialization failed\n",
345 dev_info);
346 goto fail;
347 }
348
349 printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
350 "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
351
352 return hostap_hw_ready(dev);
353
354 fail:
355 kfree(hw_priv);
356
357 if (irq_registered && dev)
358 free_irq(dev->irq, dev);
359
360 if (mem)
361 iounmap(mem);
362
363 release_mem_region(phymem, pci_resource_len(pdev, 0));
364
365 err_out_disable:
366 pci_disable_device(pdev);
367 kfree(hw_priv);
368 if (local)
369 local->hw_priv = NULL;
370 prism2_free_local_data(dev);
371
372 return -ENODEV;
373}
374
375
376static void prism2_pci_remove(struct pci_dev *pdev)
377{
378 struct net_device *dev;
379 struct hostap_interface *iface;
380 void __iomem *mem_start;
381 struct hostap_pci_priv *hw_priv;
382
383 dev = pci_get_drvdata(pdev);
384 iface = netdev_priv(dev);
385 hw_priv = iface->local->hw_priv;
386
387 /* Reset the hardware, and ensure interrupts are disabled. */
388 prism2_pci_cor_sreset(iface->local);
389 hfa384x_disable_interrupts(dev);
390
391 if (dev->irq)
392 free_irq(dev->irq, dev);
393
394 mem_start = hw_priv->mem_start;
395 kfree(hw_priv);
396 iface->local->hw_priv = NULL;
397 prism2_free_local_data(dev);
398
399 iounmap(mem_start);
400
401 release_mem_region(pci_resource_start(pdev, 0),
402 pci_resource_len(pdev, 0));
403 pci_disable_device(pdev);
404}
405
406
407#ifdef CONFIG_PM
408static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
409{
410 struct net_device *dev = pci_get_drvdata(pdev);
411
412 if (netif_running(dev)) {
413 netif_stop_queue(dev);
414 netif_device_detach(dev);
415 }
416 prism2_suspend(dev);
417 pci_save_state(pdev);
418 pci_disable_device(pdev);
419 pci_set_power_state(pdev, PCI_D3hot);
420
421 return 0;
422}
423
424static int prism2_pci_resume(struct pci_dev *pdev)
425{
426 struct net_device *dev = pci_get_drvdata(pdev);
427
428 pci_enable_device(pdev);
429 pci_restore_state(pdev);
430 prism2_hw_config(dev, 0);
431 if (netif_running(dev)) {
432 netif_device_attach(dev);
433 netif_start_queue(dev);
434 }
435
436 return 0;
437}
438#endif /* CONFIG_PM */
439
440
441MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
442
443static struct pci_driver prism2_pci_drv_id = {
444 .name = "prism2_pci",
445 .id_table = prism2_pci_id_table,
446 .probe = prism2_pci_probe,
447 .remove = prism2_pci_remove,
448#ifdef CONFIG_PM
449 .suspend = prism2_pci_suspend,
450 .resume = prism2_pci_resume,
451#endif /* CONFIG_PM */
452 /* Linux 2.4.6 added save_state and enable_wake that are not used here
453 */
454};
455
456
457static int __init init_prism2_pci(void)
458{
459 printk(KERN_INFO "%s: %s\n", dev_info, version);
460
461 return pci_register_driver(&prism2_pci_drv_id);
462}
463
464
465static void __exit exit_prism2_pci(void)
466{
467 pci_unregister_driver(&prism2_pci_drv_id);
468 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
469}
470
471
472module_init(init_prism2_pci);
473module_exit(exit_prism2_pci);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
new file mode 100644
index 000000000000..474ef83d813e
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -0,0 +1,645 @@
1#define PRISM2_PLX
2
3/* Host AP driver's support for PC Cards on PCI adapters using PLX9052 is
4 * based on:
5 * - Host AP driver patch from james@madingley.org
6 * - linux-wlan-ng driver, Copyright (C) AbsoluteValue Systems, Inc.
7 */
8
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/if.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/workqueue.h>
18#include <linux/wireless.h>
19#include <net/iw_handler.h>
20
21#include <linux/ioport.h>
22#include <linux/pci.h>
23#include <asm/io.h>
24
25#include "hostap_wlan.h"
26
27
28static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
29static char *dev_info = "hostap_plx";
30
31
32MODULE_AUTHOR("Jouni Malinen");
33MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
34 "cards (PLX).");
35MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");
36MODULE_LICENSE("GPL");
37MODULE_VERSION(PRISM2_VERSION);
38
39
40static int ignore_cis;
41module_param(ignore_cis, int, 0444);
42MODULE_PARM_DESC(ignore_cis, "Do not verify manfid information in CIS");
43
44
45/* struct local_info::hw_priv */
46struct hostap_plx_priv {
47 void __iomem *attr_mem;
48 unsigned int cor_offset;
49};
50
51
52#define PLX_MIN_ATTR_LEN 512 /* at least 2 x 256 is needed for CIS */
53#define COR_SRESET 0x80
54#define COR_LEVLREQ 0x40
55#define COR_ENABLE_FUNC 0x01
56/* PCI Configuration Registers */
57#define PLX_PCIIPR 0x3d /* PCI Interrupt Pin */
58/* Local Configuration Registers */
59#define PLX_INTCSR 0x4c /* Interrupt Control/Status Register */
60#define PLX_INTCSR_PCI_INTEN BIT(6) /* PCI Interrupt Enable */
61#define PLX_CNTRL 0x50
62#define PLX_CNTRL_SERIAL_EEPROM_PRESENT BIT(28)
63
64
65#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
66
67static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
68 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
69 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
70 PLXDEV(0x126c, 0x8030, "Nortel emobility"),
71 PLXDEV(0x1385, 0x4100, "Netgear MA301"),
72 PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"),
73 PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"),
74 PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"),
75 PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"),
76 PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"),
77 PLXDEV(0x16ab, 0x1103, "Longshine 8031"),
78 PLXDEV(0x16ec, 0x3685, "US Robotics USR2415"),
79 PLXDEV(0xec80, 0xec00, "Belkin F5D6000"),
80 { 0 }
81};
82
83
84/* Array of known Prism2/2.5 PC Card manufactured ids. If your card's manfid
85 * is not listed here, you will need to add it here to get the driver
86 * initialized. */
87static struct prism2_plx_manfid {
88 u16 manfid1, manfid2;
89} prism2_plx_known_manfids[] = {
90 { 0x000b, 0x7110 } /* D-Link DWL-650 Rev. P1 */,
91 { 0x000b, 0x7300 } /* Philips 802.11b WLAN PCMCIA */,
92 { 0x0101, 0x0777 } /* 3Com AirConnect PCI 777A */,
93 { 0x0126, 0x8000 } /* Proxim RangeLAN */,
94 { 0x0138, 0x0002 } /* Compaq WL100 */,
95 { 0x0156, 0x0002 } /* Intersil Prism II Ref. Design (and others) */,
96 { 0x026f, 0x030b } /* Buffalo WLI-CF-S11G */,
97 { 0x0274, 0x1612 } /* Linksys WPC11 Ver 2.5 */,
98 { 0x0274, 0x1613 } /* Linksys WPC11 Ver 3 */,
99 { 0x028a, 0x0002 } /* D-Link DRC-650 */,
100 { 0x0250, 0x0002 } /* Samsung SWL2000-N */,
101 { 0xc250, 0x0002 } /* EMTAC A2424i */,
102 { 0xd601, 0x0002 } /* Z-Com XI300 */,
103 { 0xd601, 0x0005 } /* Zcomax XI-325H 200mW */,
104 { 0, 0}
105};
106
107
108#ifdef PRISM2_IO_DEBUG
109
110static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
111{
112 struct hostap_interface *iface;
113 local_info_t *local;
114 unsigned long flags;
115
116 iface = netdev_priv(dev);
117 local = iface->local;
118
119 spin_lock_irqsave(&local->lock, flags);
120 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
121 outb(v, dev->base_addr + a);
122 spin_unlock_irqrestore(&local->lock, flags);
123}
124
125static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
126{
127 struct hostap_interface *iface;
128 local_info_t *local;
129 unsigned long flags;
130 u8 v;
131
132 iface = netdev_priv(dev);
133 local = iface->local;
134
135 spin_lock_irqsave(&local->lock, flags);
136 v = inb(dev->base_addr + a);
137 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
138 spin_unlock_irqrestore(&local->lock, flags);
139 return v;
140}
141
142static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
143{
144 struct hostap_interface *iface;
145 local_info_t *local;
146 unsigned long flags;
147
148 iface = netdev_priv(dev);
149 local = iface->local;
150
151 spin_lock_irqsave(&local->lock, flags);
152 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
153 outw(v, dev->base_addr + a);
154 spin_unlock_irqrestore(&local->lock, flags);
155}
156
157static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
158{
159 struct hostap_interface *iface;
160 local_info_t *local;
161 unsigned long flags;
162 u16 v;
163
164 iface = netdev_priv(dev);
165 local = iface->local;
166
167 spin_lock_irqsave(&local->lock, flags);
168 v = inw(dev->base_addr + a);
169 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
170 spin_unlock_irqrestore(&local->lock, flags);
171 return v;
172}
173
174static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
175 u8 *buf, int wc)
176{
177 struct hostap_interface *iface;
178 local_info_t *local;
179 unsigned long flags;
180
181 iface = netdev_priv(dev);
182 local = iface->local;
183
184 spin_lock_irqsave(&local->lock, flags);
185 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
186 outsw(dev->base_addr + a, buf, wc);
187 spin_unlock_irqrestore(&local->lock, flags);
188}
189
190static inline void hfa384x_insw_debug(struct net_device *dev, int a,
191 u8 *buf, int wc)
192{
193 struct hostap_interface *iface;
194 local_info_t *local;
195 unsigned long flags;
196
197 iface = netdev_priv(dev);
198 local = iface->local;
199
200 spin_lock_irqsave(&local->lock, flags);
201 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
202 insw(dev->base_addr + a, buf, wc);
203 spin_unlock_irqrestore(&local->lock, flags);
204}
205
206#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
207#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
208#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
209#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
210#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
211#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
212
213#else /* PRISM2_IO_DEBUG */
214
215#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
216#define HFA384X_INB(a) inb(dev->base_addr + (a))
217#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
218#define HFA384X_INW(a) inw(dev->base_addr + (a))
219#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
220#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
221
222#endif /* PRISM2_IO_DEBUG */
223
224
225static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
226 int len)
227{
228 u16 d_off;
229 u16 *pos;
230
231 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
232 pos = (u16 *) buf;
233
234 if (len / 2)
235 HFA384X_INSW(d_off, buf, len / 2);
236 pos += len / 2;
237
238 if (len & 1)
239 *((char *) pos) = HFA384X_INB(d_off);
240
241 return 0;
242}
243
244
245static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
246{
247 u16 d_off;
248 u16 *pos;
249
250 d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
251 pos = (u16 *) buf;
252
253 if (len / 2)
254 HFA384X_OUTSW(d_off, buf, len / 2);
255 pos += len / 2;
256
257 if (len & 1)
258 HFA384X_OUTB(*((char *) pos), d_off);
259
260 return 0;
261}
262
263
264/* FIX: This might change at some point.. */
265#include "hostap_hw.c"
266
267
268static void prism2_plx_cor_sreset(local_info_t *local)
269{
270 unsigned char corsave;
271 struct hostap_plx_priv *hw_priv = local->hw_priv;
272
273 printk(KERN_DEBUG "%s: Doing reset via direct COR access.\n",
274 dev_info);
275
276 /* Set sreset bit of COR and clear it after hold time */
277
278 if (hw_priv->attr_mem == NULL) {
279 /* TMD7160 - COR at card's first I/O addr */
280 corsave = inb(hw_priv->cor_offset);
281 outb(corsave | COR_SRESET, hw_priv->cor_offset);
282 mdelay(2);
283 outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
284 mdelay(2);
285 } else {
286 /* PLX9052 */
287 corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
288 writeb(corsave | COR_SRESET,
289 hw_priv->attr_mem + hw_priv->cor_offset);
290 mdelay(2);
291 writeb(corsave & ~COR_SRESET,
292 hw_priv->attr_mem + hw_priv->cor_offset);
293 mdelay(2);
294 }
295}
296
297
298static void prism2_plx_genesis_reset(local_info_t *local, int hcr)
299{
300 unsigned char corsave;
301 struct hostap_plx_priv *hw_priv = local->hw_priv;
302
303 if (hw_priv->attr_mem == NULL) {
304 /* TMD7160 - COR at card's first I/O addr */
305 corsave = inb(hw_priv->cor_offset);
306 outb(corsave | COR_SRESET, hw_priv->cor_offset);
307 mdelay(10);
308 outb(hcr, hw_priv->cor_offset + 2);
309 mdelay(10);
310 outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
311 mdelay(10);
312 } else {
313 /* PLX9052 */
314 corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
315 writeb(corsave | COR_SRESET,
316 hw_priv->attr_mem + hw_priv->cor_offset);
317 mdelay(10);
318 writeb(hcr, hw_priv->attr_mem + hw_priv->cor_offset + 2);
319 mdelay(10);
320 writeb(corsave & ~COR_SRESET,
321 hw_priv->attr_mem + hw_priv->cor_offset);
322 mdelay(10);
323 }
324}
325
326
327static struct prism2_helper_functions prism2_plx_funcs =
328{
329 .card_present = NULL,
330 .cor_sreset = prism2_plx_cor_sreset,
331 .dev_open = NULL,
332 .dev_close = NULL,
333 .genesis_reset = prism2_plx_genesis_reset,
334 .hw_type = HOSTAP_HW_PLX,
335};
336
337
338static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
339 unsigned int *cor_offset,
340 unsigned int *cor_index)
341{
342#define CISTPL_CONFIG 0x1A
343#define CISTPL_MANFID 0x20
344#define CISTPL_END 0xFF
345#define CIS_MAX_LEN 256
346 u8 *cis;
347 int i, pos;
348 unsigned int rmsz, rasz, manfid1, manfid2;
349 struct prism2_plx_manfid *manfid;
350
351 cis = kmalloc(CIS_MAX_LEN, GFP_KERNEL);
352 if (cis == NULL)
353 return -ENOMEM;
354
355 /* read CIS; it is in even offsets in the beginning of attr_mem */
356 for (i = 0; i < CIS_MAX_LEN; i++)
357 cis[i] = readb(attr_mem + 2 * i);
358 printk(KERN_DEBUG "%s: CIS: %02x %02x %02x %02x %02x %02x ...\n",
359 dev_info, cis[0], cis[1], cis[2], cis[3], cis[4], cis[5]);
360
361 /* set reasonable defaults for Prism2 cards just in case CIS parsing
362 * fails */
363 *cor_offset = 0x3e0;
364 *cor_index = 0x01;
365 manfid1 = manfid2 = 0;
366
367 pos = 0;
368 while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) {
369 if (pos + cis[pos + 1] >= CIS_MAX_LEN)
370 goto cis_error;
371
372 switch (cis[pos]) {
373 case CISTPL_CONFIG:
374 if (cis[pos + 1] < 1)
375 goto cis_error;
376 rmsz = (cis[pos + 2] & 0x3c) >> 2;
377 rasz = cis[pos + 2] & 0x03;
378 if (4 + rasz + rmsz > cis[pos + 1])
379 goto cis_error;
380 *cor_index = cis[pos + 3] & 0x3F;
381 *cor_offset = 0;
382 for (i = 0; i <= rasz; i++)
383 *cor_offset += cis[pos + 4 + i] << (8 * i);
384 printk(KERN_DEBUG "%s: cor_index=0x%x "
385 "cor_offset=0x%x\n", dev_info,
386 *cor_index, *cor_offset);
387 if (*cor_offset > attr_len) {
388 printk(KERN_ERR "%s: COR offset not within "
389 "attr_mem\n", dev_info);
390 kfree(cis);
391 return -1;
392 }
393 break;
394
395 case CISTPL_MANFID:
396 if (cis[pos + 1] < 4)
397 goto cis_error;
398 manfid1 = cis[pos + 2] + (cis[pos + 3] << 8);
399 manfid2 = cis[pos + 4] + (cis[pos + 5] << 8);
400 printk(KERN_DEBUG "%s: manfid=0x%04x, 0x%04x\n",
401 dev_info, manfid1, manfid2);
402 break;
403 }
404
405 pos += cis[pos + 1] + 2;
406 }
407
408 if (pos >= CIS_MAX_LEN || cis[pos] != CISTPL_END)
409 goto cis_error;
410
411 for (manfid = prism2_plx_known_manfids; manfid->manfid1 != 0; manfid++)
412 if (manfid1 == manfid->manfid1 && manfid2 == manfid->manfid2) {
413 kfree(cis);
414 return 0;
415 }
416
417 printk(KERN_INFO "%s: unknown manfid 0x%04x, 0x%04x - assuming this is"
418 " not supported card\n", dev_info, manfid1, manfid2);
419 goto fail;
420
421 cis_error:
422 printk(KERN_WARNING "%s: invalid CIS data\n", dev_info);
423
424 fail:
425 kfree(cis);
426 if (ignore_cis) {
427 printk(KERN_INFO "%s: ignore_cis parameter set - ignoring "
428 "errors during CIS verification\n", dev_info);
429 return 0;
430 }
431 return -1;
432}
433
434
435static int prism2_plx_probe(struct pci_dev *pdev,
436 const struct pci_device_id *id)
437{
438 unsigned int pccard_ioaddr, plx_ioaddr;
439 unsigned long pccard_attr_mem;
440 unsigned int pccard_attr_len;
441 void __iomem *attr_mem = NULL;
442 unsigned int cor_offset, cor_index;
443 u32 reg;
444 local_info_t *local = NULL;
445 struct net_device *dev = NULL;
446 struct hostap_interface *iface;
447 static int cards_found /* = 0 */;
448 int irq_registered = 0;
449 int tmd7160;
450 struct hostap_plx_priv *hw_priv;
451
452 hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
453 if (hw_priv == NULL)
454 return -ENOMEM;
455 memset(hw_priv, 0, sizeof(*hw_priv));
456
457 if (pci_enable_device(pdev))
458 return -EIO;
459
460 /* National Datacomm NCP130 based on TMD7160, not PLX9052. */
461 tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131);
462
463 plx_ioaddr = pci_resource_start(pdev, 1);
464 pccard_ioaddr = pci_resource_start(pdev, tmd7160 ? 2 : 3);
465
466 if (tmd7160) {
467 /* TMD7160 */
468 attr_mem = NULL; /* no access to PC Card attribute memory */
469
470 printk(KERN_INFO "TMD7160 PCI/PCMCIA adapter: io=0x%x, "
471 "irq=%d, pccard_io=0x%x\n",
472 plx_ioaddr, pdev->irq, pccard_ioaddr);
473
474 cor_offset = plx_ioaddr;
475 cor_index = 0x04;
476
477 outb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, plx_ioaddr);
478 mdelay(1);
479 reg = inb(plx_ioaddr);
480 if (reg != (cor_index | COR_LEVLREQ | COR_ENABLE_FUNC)) {
481 printk(KERN_ERR "%s: Error setting COR (expected="
482 "0x%02x, was=0x%02x)\n", dev_info,
483 cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, reg);
484 goto fail;
485 }
486 } else {
487 /* PLX9052 */
488 pccard_attr_mem = pci_resource_start(pdev, 2);
489 pccard_attr_len = pci_resource_len(pdev, 2);
490 if (pccard_attr_len < PLX_MIN_ATTR_LEN)
491 goto fail;
492
493
494 attr_mem = ioremap(pccard_attr_mem, pccard_attr_len);
495 if (attr_mem == NULL) {
496 printk(KERN_ERR "%s: cannot remap attr_mem\n",
497 dev_info);
498 goto fail;
499 }
500
501 printk(KERN_INFO "PLX9052 PCI/PCMCIA adapter: "
502 "mem=0x%lx, plx_io=0x%x, irq=%d, pccard_io=0x%x\n",
503 pccard_attr_mem, plx_ioaddr, pdev->irq, pccard_ioaddr);
504
505 if (prism2_plx_check_cis(attr_mem, pccard_attr_len,
506 &cor_offset, &cor_index)) {
507 printk(KERN_INFO "Unknown PC Card CIS - not a "
508 "Prism2/2.5 card?\n");
509 goto fail;
510 }
511
512 printk(KERN_DEBUG "Prism2/2.5 PC Card detected in PLX9052 "
513 "adapter\n");
514
515 /* Write COR to enable PC Card */
516 writeb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC,
517 attr_mem + cor_offset);
518
519 /* Enable PCI interrupts if they are not already enabled */
520 reg = inl(plx_ioaddr + PLX_INTCSR);
521 printk(KERN_DEBUG "PLX_INTCSR=0x%x\n", reg);
522 if (!(reg & PLX_INTCSR_PCI_INTEN)) {
523 outl(reg | PLX_INTCSR_PCI_INTEN,
524 plx_ioaddr + PLX_INTCSR);
525 if (!(inl(plx_ioaddr + PLX_INTCSR) &
526 PLX_INTCSR_PCI_INTEN)) {
527 printk(KERN_WARNING "%s: Could not enable "
528 "Local Interrupts\n", dev_info);
529 goto fail;
530 }
531 }
532
533 reg = inl(plx_ioaddr + PLX_CNTRL);
534 printk(KERN_DEBUG "PLX_CNTRL=0x%x (Serial EEPROM "
535 "present=%d)\n",
536 reg, (reg & PLX_CNTRL_SERIAL_EEPROM_PRESENT) != 0);
537 /* should set PLX_PCIIPR to 0x01 (INTA#) if Serial EEPROM is
538 * not present; but are there really such cards in use(?) */
539 }
540
541 dev = prism2_init_local_data(&prism2_plx_funcs, cards_found,
542 &pdev->dev);
543 if (dev == NULL)
544 goto fail;
545 iface = netdev_priv(dev);
546 local = iface->local;
547 local->hw_priv = hw_priv;
548 cards_found++;
549
550 dev->irq = pdev->irq;
551 dev->base_addr = pccard_ioaddr;
552 hw_priv->attr_mem = attr_mem;
553 hw_priv->cor_offset = cor_offset;
554
555 pci_set_drvdata(pdev, dev);
556
557 if (request_irq(dev->irq, prism2_interrupt, SA_SHIRQ, dev->name,
558 dev)) {
559 printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
560 goto fail;
561 } else
562 irq_registered = 1;
563
564 if (prism2_hw_config(dev, 1)) {
565 printk(KERN_DEBUG "%s: hardware initialization failed\n",
566 dev_info);
567 goto fail;
568 }
569
570 return hostap_hw_ready(dev);
571
572 fail:
573 kfree(hw_priv);
574 if (local)
575 local->hw_priv = NULL;
576 prism2_free_local_data(dev);
577
578 if (irq_registered && dev)
579 free_irq(dev->irq, dev);
580
581 if (attr_mem)
582 iounmap(attr_mem);
583
584 pci_disable_device(pdev);
585
586 return -ENODEV;
587}
588
589
590static void prism2_plx_remove(struct pci_dev *pdev)
591{
592 struct net_device *dev;
593 struct hostap_interface *iface;
594 struct hostap_plx_priv *hw_priv;
595
596 dev = pci_get_drvdata(pdev);
597 iface = netdev_priv(dev);
598 hw_priv = iface->local->hw_priv;
599
600 /* Reset the hardware, and ensure interrupts are disabled. */
601 prism2_plx_cor_sreset(iface->local);
602 hfa384x_disable_interrupts(dev);
603
604 if (hw_priv->attr_mem)
605 iounmap(hw_priv->attr_mem);
606 if (dev->irq)
607 free_irq(dev->irq, dev);
608
609 kfree(iface->local->hw_priv);
610 iface->local->hw_priv = NULL;
611 prism2_free_local_data(dev);
612 pci_disable_device(pdev);
613}
614
615
616MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
617
618static struct pci_driver prism2_plx_drv_id = {
619 .name = "prism2_plx",
620 .id_table = prism2_plx_id_table,
621 .probe = prism2_plx_probe,
622 .remove = prism2_plx_remove,
623 .suspend = NULL,
624 .resume = NULL,
625 .enable_wake = NULL
626};
627
628
629static int __init init_prism2_plx(void)
630{
631 printk(KERN_INFO "%s: %s\n", dev_info, version);
632
633 return pci_register_driver(&prism2_plx_drv_id);
634}
635
636
637static void __exit exit_prism2_plx(void)
638{
639 pci_unregister_driver(&prism2_plx_drv_id);
640 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
641}
642
643
644module_init(init_prism2_plx);
645module_exit(exit_prism2_plx);
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
new file mode 100644
index 000000000000..a0a4cbd4937a
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -0,0 +1,448 @@
1/* /proc routines for Host AP driver */
2
3#define PROC_LIMIT (PAGE_SIZE - 80)
4
5
6#ifndef PRISM2_NO_PROCFS_DEBUG
7static int prism2_debug_proc_read(char *page, char **start, off_t off,
8 int count, int *eof, void *data)
9{
10 char *p = page;
11 local_info_t *local = (local_info_t *) data;
12 int i;
13
14 if (off != 0) {
15 *eof = 1;
16 return 0;
17 }
18
19 p += sprintf(p, "next_txfid=%d next_alloc=%d\n",
20 local->next_txfid, local->next_alloc);
21 for (i = 0; i < PRISM2_TXFID_COUNT; i++)
22 p += sprintf(p, "FID: tx=%04X intransmit=%04X\n",
23 local->txfid[i], local->intransmitfid[i]);
24 p += sprintf(p, "FW TX rate control: %d\n", local->fw_tx_rate_control);
25 p += sprintf(p, "beacon_int=%d\n", local->beacon_int);
26 p += sprintf(p, "dtim_period=%d\n", local->dtim_period);
27 p += sprintf(p, "wds_max_connections=%d\n",
28 local->wds_max_connections);
29 p += sprintf(p, "dev_enabled=%d\n", local->dev_enabled);
30 p += sprintf(p, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
31 for (i = 0; i < WEP_KEYS; i++) {
32 if (local->crypt[i] && local->crypt[i]->ops) {
33 p += sprintf(p, "crypt[%d]=%s\n",
34 i, local->crypt[i]->ops->name);
35 }
36 }
37 p += sprintf(p, "pri_only=%d\n", local->pri_only);
38 p += sprintf(p, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI);
39 p += sprintf(p, "sram_type=%d\n", local->sram_type);
40 p += sprintf(p, "no_pri=%d\n", local->no_pri);
41
42 return (p - page);
43}
44#endif /* PRISM2_NO_PROCFS_DEBUG */
45
46
47static int prism2_stats_proc_read(char *page, char **start, off_t off,
48 int count, int *eof, void *data)
49{
50 char *p = page;
51 local_info_t *local = (local_info_t *) data;
52 struct comm_tallies_sums *sums = (struct comm_tallies_sums *)
53 &local->comm_tallies;
54
55 if (off != 0) {
56 *eof = 1;
57 return 0;
58 }
59
60 p += sprintf(p, "TxUnicastFrames=%u\n", sums->tx_unicast_frames);
61 p += sprintf(p, "TxMulticastframes=%u\n", sums->tx_multicast_frames);
62 p += sprintf(p, "TxFragments=%u\n", sums->tx_fragments);
63 p += sprintf(p, "TxUnicastOctets=%u\n", sums->tx_unicast_octets);
64 p += sprintf(p, "TxMulticastOctets=%u\n", sums->tx_multicast_octets);
65 p += sprintf(p, "TxDeferredTransmissions=%u\n",
66 sums->tx_deferred_transmissions);
67 p += sprintf(p, "TxSingleRetryFrames=%u\n",
68 sums->tx_single_retry_frames);
69 p += sprintf(p, "TxMultipleRetryFrames=%u\n",
70 sums->tx_multiple_retry_frames);
71 p += sprintf(p, "TxRetryLimitExceeded=%u\n",
72 sums->tx_retry_limit_exceeded);
73 p += sprintf(p, "TxDiscards=%u\n", sums->tx_discards);
74 p += sprintf(p, "RxUnicastFrames=%u\n", sums->rx_unicast_frames);
75 p += sprintf(p, "RxMulticastFrames=%u\n", sums->rx_multicast_frames);
76 p += sprintf(p, "RxFragments=%u\n", sums->rx_fragments);
77 p += sprintf(p, "RxUnicastOctets=%u\n", sums->rx_unicast_octets);
78 p += sprintf(p, "RxMulticastOctets=%u\n", sums->rx_multicast_octets);
79 p += sprintf(p, "RxFCSErrors=%u\n", sums->rx_fcs_errors);
80 p += sprintf(p, "RxDiscardsNoBuffer=%u\n",
81 sums->rx_discards_no_buffer);
82 p += sprintf(p, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa);
83 p += sprintf(p, "RxDiscardsWEPUndecryptable=%u\n",
84 sums->rx_discards_wep_undecryptable);
85 p += sprintf(p, "RxMessageInMsgFragments=%u\n",
86 sums->rx_message_in_msg_fragments);
87 p += sprintf(p, "RxMessageInBadMsgFragments=%u\n",
88 sums->rx_message_in_bad_msg_fragments);
89 /* FIX: this may grow too long for one page(?) */
90
91 return (p - page);
92}
93
94
95static int prism2_wds_proc_read(char *page, char **start, off_t off,
96 int count, int *eof, void *data)
97{
98 char *p = page;
99 local_info_t *local = (local_info_t *) data;
100 struct list_head *ptr;
101 struct hostap_interface *iface;
102
103 if (off > PROC_LIMIT) {
104 *eof = 1;
105 return 0;
106 }
107
108 read_lock_bh(&local->iface_lock);
109 list_for_each(ptr, &local->hostap_interfaces) {
110 iface = list_entry(ptr, struct hostap_interface, list);
111 if (iface->type != HOSTAP_INTERFACE_WDS)
112 continue;
113 p += sprintf(p, "%s\t" MACSTR "\n",
114 iface->dev->name,
115 MAC2STR(iface->u.wds.remote_addr));
116 if ((p - page) > PROC_LIMIT) {
117 printk(KERN_DEBUG "%s: wds proc did not fit\n",
118 local->dev->name);
119 break;
120 }
121 }
122 read_unlock_bh(&local->iface_lock);
123
124 if ((p - page) <= off) {
125 *eof = 1;
126 return 0;
127 }
128
129 *start = page + off;
130
131 return (p - page - off);
132}
133
134
135static int prism2_bss_list_proc_read(char *page, char **start, off_t off,
136 int count, int *eof, void *data)
137{
138 char *p = page;
139 local_info_t *local = (local_info_t *) data;
140 struct list_head *ptr;
141 struct hostap_bss_info *bss;
142 int i;
143
144 if (off > PROC_LIMIT) {
145 *eof = 1;
146 return 0;
147 }
148
149 p += sprintf(p, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t"
150 "SSID(hex)\tWPA IE\n");
151 spin_lock_bh(&local->lock);
152 list_for_each(ptr, &local->bss_list) {
153 bss = list_entry(ptr, struct hostap_bss_info, list);
154 p += sprintf(p, MACSTR "\t%lu\t%u\t0x%x\t",
155 MAC2STR(bss->bssid), bss->last_update,
156 bss->count, bss->capab_info);
157 for (i = 0; i < bss->ssid_len; i++) {
158 p += sprintf(p, "%c",
159 bss->ssid[i] >= 32 && bss->ssid[i] < 127 ?
160 bss->ssid[i] : '_');
161 }
162 p += sprintf(p, "\t");
163 for (i = 0; i < bss->ssid_len; i++) {
164 p += sprintf(p, "%02x", bss->ssid[i]);
165 }
166 p += sprintf(p, "\t");
167 for (i = 0; i < bss->wpa_ie_len; i++) {
168 p += sprintf(p, "%02x", bss->wpa_ie[i]);
169 }
170 p += sprintf(p, "\n");
171 if ((p - page) > PROC_LIMIT) {
172 printk(KERN_DEBUG "%s: BSS proc did not fit\n",
173 local->dev->name);
174 break;
175 }
176 }
177 spin_unlock_bh(&local->lock);
178
179 if ((p - page) <= off) {
180 *eof = 1;
181 return 0;
182 }
183
184 *start = page + off;
185
186 return (p - page - off);
187}
188
189
190static int prism2_crypt_proc_read(char *page, char **start, off_t off,
191 int count, int *eof, void *data)
192{
193 char *p = page;
194 local_info_t *local = (local_info_t *) data;
195 int i;
196
197 if (off > PROC_LIMIT) {
198 *eof = 1;
199 return 0;
200 }
201
202 p += sprintf(p, "tx_keyidx=%d\n", local->tx_keyidx);
203 for (i = 0; i < WEP_KEYS; i++) {
204 if (local->crypt[i] && local->crypt[i]->ops &&
205 local->crypt[i]->ops->print_stats) {
206 p = local->crypt[i]->ops->print_stats(
207 p, local->crypt[i]->priv);
208 }
209 }
210
211 if ((p - page) <= off) {
212 *eof = 1;
213 return 0;
214 }
215
216 *start = page + off;
217
218 return (p - page - off);
219}
220
221
222static int prism2_pda_proc_read(char *page, char **start, off_t off,
223 int count, int *eof, void *data)
224{
225 local_info_t *local = (local_info_t *) data;
226
227 if (local->pda == NULL || off >= PRISM2_PDA_SIZE) {
228 *eof = 1;
229 return 0;
230 }
231
232 if (off + count > PRISM2_PDA_SIZE)
233 count = PRISM2_PDA_SIZE - off;
234
235 memcpy(page, local->pda + off, count);
236 return count;
237}
238
239
240static int prism2_aux_dump_proc_read(char *page, char **start, off_t off,
241 int count, int *eof, void *data)
242{
243 local_info_t *local = (local_info_t *) data;
244
245 if (local->func->read_aux == NULL) {
246 *eof = 1;
247 return 0;
248 }
249
250 if (local->func->read_aux(local->dev, off, count, page)) {
251 *eof = 1;
252 return 0;
253 }
254 *start = page;
255
256 return count;
257}
258
259
260#ifdef PRISM2_IO_DEBUG
261static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
262 int count, int *eof, void *data)
263{
264 local_info_t *local = (local_info_t *) data;
265 int head = local->io_debug_head;
266 int start_bytes, left, copy, copied;
267
268 if (off + count > PRISM2_IO_DEBUG_SIZE * 4) {
269 *eof = 1;
270 if (off >= PRISM2_IO_DEBUG_SIZE * 4)
271 return 0;
272 count = PRISM2_IO_DEBUG_SIZE * 4 - off;
273 }
274
275 copied = 0;
276 start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4;
277 left = count;
278
279 if (off < start_bytes) {
280 copy = start_bytes - off;
281 if (copy > count)
282 copy = count;
283 memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy);
284 left -= copy;
285 if (left > 0)
286 memcpy(&page[copy], local->io_debug, left);
287 } else {
288 memcpy(page, ((u8 *) local->io_debug) + (off - start_bytes),
289 left);
290 }
291
292 *start = page;
293
294 return count;
295}
296#endif /* PRISM2_IO_DEBUG */
297
298
299#ifndef PRISM2_NO_STATION_MODES
300static int prism2_scan_results_proc_read(char *page, char **start, off_t off,
301 int count, int *eof, void *data)
302{
303 char *p = page;
304 local_info_t *local = (local_info_t *) data;
305 int entry, i, len, total = 0;
306 struct hfa384x_hostscan_result *scanres;
307 u8 *pos;
308
309 p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates "
310 "SSID\n");
311
312 spin_lock_bh(&local->lock);
313 for (entry = 0; entry < local->last_scan_results_count; entry++) {
314 scanres = &local->last_scan_results[entry];
315
316 if (total + (p - page) <= off) {
317 total += p - page;
318 p = page;
319 }
320 if (total + (p - page) > off + count)
321 break;
322 if ((p - page) > (PAGE_SIZE - 200))
323 break;
324
325 p += sprintf(p, "%d %d %d %d 0x%02x %d " MACSTR " %d ",
326 le16_to_cpu(scanres->chid),
327 (s16) le16_to_cpu(scanres->anl),
328 (s16) le16_to_cpu(scanres->sl),
329 le16_to_cpu(scanres->beacon_interval),
330 le16_to_cpu(scanres->capability),
331 le16_to_cpu(scanres->rate),
332 MAC2STR(scanres->bssid),
333 le16_to_cpu(scanres->atim));
334
335 pos = scanres->sup_rates;
336 for (i = 0; i < sizeof(scanres->sup_rates); i++) {
337 if (pos[i] == 0)
338 break;
339 p += sprintf(p, "<%02x>", pos[i]);
340 }
341 p += sprintf(p, " ");
342
343 pos = scanres->ssid;
344 len = le16_to_cpu(scanres->ssid_len);
345 if (len > 32)
346 len = 32;
347 for (i = 0; i < len; i++) {
348 unsigned char c = pos[i];
349 if (c >= 32 && c < 127)
350 p += sprintf(p, "%c", c);
351 else
352 p += sprintf(p, "<%02x>", c);
353 }
354 p += sprintf(p, "\n");
355 }
356 spin_unlock_bh(&local->lock);
357
358 total += (p - page);
359 if (total >= off + count)
360 *eof = 1;
361
362 if (total < off) {
363 *eof = 1;
364 return 0;
365 }
366
367 len = total - off;
368 if (len > (p - page))
369 len = p - page;
370 *start = p - len;
371 if (len > count)
372 len = count;
373
374 return len;
375}
376#endif /* PRISM2_NO_STATION_MODES */
377
378
379void hostap_init_proc(local_info_t *local)
380{
381 local->proc = NULL;
382
383 if (hostap_proc == NULL) {
384 printk(KERN_WARNING "%s: hostap proc directory not created\n",
385 local->dev->name);
386 return;
387 }
388
389 local->proc = proc_mkdir(local->ddev->name, hostap_proc);
390 if (local->proc == NULL) {
391 printk(KERN_INFO "/proc/net/hostap/%s creation failed\n",
392 local->ddev->name);
393 return;
394 }
395
396#ifndef PRISM2_NO_PROCFS_DEBUG
397 create_proc_read_entry("debug", 0, local->proc,
398 prism2_debug_proc_read, local);
399#endif /* PRISM2_NO_PROCFS_DEBUG */
400 create_proc_read_entry("stats", 0, local->proc,
401 prism2_stats_proc_read, local);
402 create_proc_read_entry("wds", 0, local->proc,
403 prism2_wds_proc_read, local);
404 create_proc_read_entry("pda", 0, local->proc,
405 prism2_pda_proc_read, local);
406 create_proc_read_entry("aux_dump", 0, local->proc,
407 prism2_aux_dump_proc_read, local);
408 create_proc_read_entry("bss_list", 0, local->proc,
409 prism2_bss_list_proc_read, local);
410 create_proc_read_entry("crypt", 0, local->proc,
411 prism2_crypt_proc_read, local);
412#ifdef PRISM2_IO_DEBUG
413 create_proc_read_entry("io_debug", 0, local->proc,
414 prism2_io_debug_proc_read, local);
415#endif /* PRISM2_IO_DEBUG */
416#ifndef PRISM2_NO_STATION_MODES
417 create_proc_read_entry("scan_results", 0, local->proc,
418 prism2_scan_results_proc_read, local);
419#endif /* PRISM2_NO_STATION_MODES */
420}
421
422
423void hostap_remove_proc(local_info_t *local)
424{
425 if (local->proc != NULL) {
426#ifndef PRISM2_NO_STATION_MODES
427 remove_proc_entry("scan_results", local->proc);
428#endif /* PRISM2_NO_STATION_MODES */
429#ifdef PRISM2_IO_DEBUG
430 remove_proc_entry("io_debug", local->proc);
431#endif /* PRISM2_IO_DEBUG */
432 remove_proc_entry("pda", local->proc);
433 remove_proc_entry("aux_dump", local->proc);
434 remove_proc_entry("wds", local->proc);
435 remove_proc_entry("stats", local->proc);
436 remove_proc_entry("bss_list", local->proc);
437 remove_proc_entry("crypt", local->proc);
438#ifndef PRISM2_NO_PROCFS_DEBUG
439 remove_proc_entry("debug", local->proc);
440#endif /* PRISM2_NO_PROCFS_DEBUG */
441 if (hostap_proc != NULL)
442 remove_proc_entry(local->proc->name, hostap_proc);
443 }
444}
445
446
447EXPORT_SYMBOL(hostap_init_proc);
448EXPORT_SYMBOL(hostap_remove_proc);
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
new file mode 100644
index 000000000000..cc061e1560d3
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -0,0 +1,1033 @@
1#ifndef HOSTAP_WLAN_H
2#define HOSTAP_WLAN_H
3
4#include "hostap_config.h"
5#include "hostap_common.h"
6
7#define MAX_PARM_DEVICES 8
8#define PARM_MIN_MAX "1-" __MODULE_STRING(MAX_PARM_DEVICES)
9#define DEF_INTS -1, -1, -1, -1, -1, -1, -1
10#define GET_INT_PARM(var,idx) var[var[idx] < 0 ? 0 : idx]
11
12
13/* Specific skb->protocol value that indicates that the packet already contains
14 * txdesc header.
15 * FIX: This might need own value that would be allocated especially for Prism2
16 * txdesc; ETH_P_CONTROL is commented as "Card specific control frames".
17 * However, these skb's should have only minimal path in the kernel side since
18 * prism2_send_mgmt() sends these with dev_queue_xmit() to prism2_tx(). */
19#define ETH_P_HOSTAP ETH_P_CONTROL
20
21/* ARPHRD_IEEE80211_PRISM uses a bloated version of Prism2 RX frame header
22 * (from linux-wlan-ng) */
23struct linux_wlan_ng_val {
24 u32 did;
25 u16 status, len;
26 u32 data;
27} __attribute__ ((packed));
28
29struct linux_wlan_ng_prism_hdr {
30 u32 msgcode, msglen;
31 char devname[16];
32 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
33 noise, rate, istx, frmlen;
34} __attribute__ ((packed));
35
36struct linux_wlan_ng_cap_hdr {
37 u32 version;
38 u32 length;
39 u64 mactime;
40 u64 hosttime;
41 u32 phytype;
42 u32 channel;
43 u32 datarate;
44 u32 antenna;
45 u32 priority;
46 u32 ssi_type;
47 s32 ssi_signal;
48 s32 ssi_noise;
49 u32 preamble;
50 u32 encoding;
51} __attribute__ ((packed));
52
53#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */
54#define LWNG_CAPHDR_VERSION 0x80211001
55
56struct hfa384x_rx_frame {
57 /* HFA384X RX frame descriptor */
58 u16 status; /* HFA384X_RX_STATUS_ flags */
59 u32 time; /* timestamp, 1 microsecond resolution */
60 u8 silence; /* 27 .. 154; seems to be 0 */
61 u8 signal; /* 27 .. 154 */
62 u8 rate; /* 10, 20, 55, or 110 */
63 u8 rxflow;
64 u32 reserved;
65
66 /* 802.11 */
67 u16 frame_control;
68 u16 duration_id;
69 u8 addr1[6];
70 u8 addr2[6];
71 u8 addr3[6];
72 u16 seq_ctrl;
73 u8 addr4[6];
74 u16 data_len;
75
76 /* 802.3 */
77 u8 dst_addr[6];
78 u8 src_addr[6];
79 u16 len;
80
81 /* followed by frame data; max 2304 bytes */
82} __attribute__ ((packed));
83
84
85struct hfa384x_tx_frame {
86 /* HFA384X TX frame descriptor */
87 u16 status; /* HFA384X_TX_STATUS_ flags */
88 u16 reserved1;
89 u16 reserved2;
90 u32 sw_support;
91 u8 retry_count; /* not yet implemented */
92 u8 tx_rate; /* Host AP only; 0 = firmware, or 10, 20, 55, 110 */
93 u16 tx_control; /* HFA384X_TX_CTRL_ flags */
94
95 /* 802.11 */
96 u16 frame_control; /* parts not used */
97 u16 duration_id;
98 u8 addr1[6];
99 u8 addr2[6]; /* filled by firmware */
100 u8 addr3[6];
101 u16 seq_ctrl; /* filled by firmware */
102 u8 addr4[6];
103 u16 data_len;
104
105 /* 802.3 */
106 u8 dst_addr[6];
107 u8 src_addr[6];
108 u16 len;
109
110 /* followed by frame data; max 2304 bytes */
111} __attribute__ ((packed));
112
113
114struct hfa384x_rid_hdr
115{
116 u16 len;
117 u16 rid;
118} __attribute__ ((packed));
119
120
121/* Macro for converting signal levels (range 27 .. 154) to wireless ext
122 * dBm value with some accuracy */
123#define HFA384X_LEVEL_TO_dBm(v) 0x100 + (v) * 100 / 255 - 100
124
125#define HFA384X_LEVEL_TO_dBm_sign(v) (v) * 100 / 255 - 100
126
127struct hfa384x_scan_request {
128 u16 channel_list;
129 u16 txrate; /* HFA384X_RATES_* */
130} __attribute__ ((packed));
131
132struct hfa384x_hostscan_request {
133 u16 channel_list;
134 u16 txrate;
135 u16 target_ssid_len;
136 u8 target_ssid[32];
137} __attribute__ ((packed));
138
139struct hfa384x_join_request {
140 u8 bssid[6];
141 u16 channel;
142} __attribute__ ((packed));
143
144struct hfa384x_info_frame {
145 u16 len;
146 u16 type;
147} __attribute__ ((packed));
148
149struct hfa384x_comm_tallies {
150 u16 tx_unicast_frames;
151 u16 tx_multicast_frames;
152 u16 tx_fragments;
153 u16 tx_unicast_octets;
154 u16 tx_multicast_octets;
155 u16 tx_deferred_transmissions;
156 u16 tx_single_retry_frames;
157 u16 tx_multiple_retry_frames;
158 u16 tx_retry_limit_exceeded;
159 u16 tx_discards;
160 u16 rx_unicast_frames;
161 u16 rx_multicast_frames;
162 u16 rx_fragments;
163 u16 rx_unicast_octets;
164 u16 rx_multicast_octets;
165 u16 rx_fcs_errors;
166 u16 rx_discards_no_buffer;
167 u16 tx_discards_wrong_sa;
168 u16 rx_discards_wep_undecryptable;
169 u16 rx_message_in_msg_fragments;
170 u16 rx_message_in_bad_msg_fragments;
171} __attribute__ ((packed));
172
173struct hfa384x_comm_tallies32 {
174 u32 tx_unicast_frames;
175 u32 tx_multicast_frames;
176 u32 tx_fragments;
177 u32 tx_unicast_octets;
178 u32 tx_multicast_octets;
179 u32 tx_deferred_transmissions;
180 u32 tx_single_retry_frames;
181 u32 tx_multiple_retry_frames;
182 u32 tx_retry_limit_exceeded;
183 u32 tx_discards;
184 u32 rx_unicast_frames;
185 u32 rx_multicast_frames;
186 u32 rx_fragments;
187 u32 rx_unicast_octets;
188 u32 rx_multicast_octets;
189 u32 rx_fcs_errors;
190 u32 rx_discards_no_buffer;
191 u32 tx_discards_wrong_sa;
192 u32 rx_discards_wep_undecryptable;
193 u32 rx_message_in_msg_fragments;
194 u32 rx_message_in_bad_msg_fragments;
195} __attribute__ ((packed));
196
197struct hfa384x_scan_result_hdr {
198 u16 reserved;
199 u16 scan_reason;
200#define HFA384X_SCAN_IN_PROGRESS 0 /* no results available yet */
201#define HFA384X_SCAN_HOST_INITIATED 1
202#define HFA384X_SCAN_FIRMWARE_INITIATED 2
203#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
204} __attribute__ ((packed));
205
206#define HFA384X_SCAN_MAX_RESULTS 32
207
208struct hfa384x_scan_result {
209 u16 chid;
210 u16 anl;
211 u16 sl;
212 u8 bssid[6];
213 u16 beacon_interval;
214 u16 capability;
215 u16 ssid_len;
216 u8 ssid[32];
217 u8 sup_rates[10];
218 u16 rate;
219} __attribute__ ((packed));
220
221struct hfa384x_hostscan_result {
222 u16 chid;
223 u16 anl;
224 u16 sl;
225 u8 bssid[6];
226 u16 beacon_interval;
227 u16 capability;
228 u16 ssid_len;
229 u8 ssid[32];
230 u8 sup_rates[10];
231 u16 rate;
232 u16 atim;
233} __attribute__ ((packed));
234
235struct comm_tallies_sums {
236 unsigned int tx_unicast_frames;
237 unsigned int tx_multicast_frames;
238 unsigned int tx_fragments;
239 unsigned int tx_unicast_octets;
240 unsigned int tx_multicast_octets;
241 unsigned int tx_deferred_transmissions;
242 unsigned int tx_single_retry_frames;
243 unsigned int tx_multiple_retry_frames;
244 unsigned int tx_retry_limit_exceeded;
245 unsigned int tx_discards;
246 unsigned int rx_unicast_frames;
247 unsigned int rx_multicast_frames;
248 unsigned int rx_fragments;
249 unsigned int rx_unicast_octets;
250 unsigned int rx_multicast_octets;
251 unsigned int rx_fcs_errors;
252 unsigned int rx_discards_no_buffer;
253 unsigned int tx_discards_wrong_sa;
254 unsigned int rx_discards_wep_undecryptable;
255 unsigned int rx_message_in_msg_fragments;
256 unsigned int rx_message_in_bad_msg_fragments;
257};
258
259
260struct hfa384x_regs {
261 u16 cmd;
262 u16 evstat;
263 u16 offset0;
264 u16 offset1;
265 u16 swsupport0;
266};
267
268
269#if defined(PRISM2_PCCARD) || defined(PRISM2_PLX)
270/* I/O ports for HFA384X Controller access */
271#define HFA384X_CMD_OFF 0x00
272#define HFA384X_PARAM0_OFF 0x02
273#define HFA384X_PARAM1_OFF 0x04
274#define HFA384X_PARAM2_OFF 0x06
275#define HFA384X_STATUS_OFF 0x08
276#define HFA384X_RESP0_OFF 0x0A
277#define HFA384X_RESP1_OFF 0x0C
278#define HFA384X_RESP2_OFF 0x0E
279#define HFA384X_INFOFID_OFF 0x10
280#define HFA384X_CONTROL_OFF 0x14
281#define HFA384X_SELECT0_OFF 0x18
282#define HFA384X_SELECT1_OFF 0x1A
283#define HFA384X_OFFSET0_OFF 0x1C
284#define HFA384X_OFFSET1_OFF 0x1E
285#define HFA384X_RXFID_OFF 0x20
286#define HFA384X_ALLOCFID_OFF 0x22
287#define HFA384X_TXCOMPLFID_OFF 0x24
288#define HFA384X_SWSUPPORT0_OFF 0x28
289#define HFA384X_SWSUPPORT1_OFF 0x2A
290#define HFA384X_SWSUPPORT2_OFF 0x2C
291#define HFA384X_EVSTAT_OFF 0x30
292#define HFA384X_INTEN_OFF 0x32
293#define HFA384X_EVACK_OFF 0x34
294#define HFA384X_DATA0_OFF 0x36
295#define HFA384X_DATA1_OFF 0x38
296#define HFA384X_AUXPAGE_OFF 0x3A
297#define HFA384X_AUXOFFSET_OFF 0x3C
298#define HFA384X_AUXDATA_OFF 0x3E
299#endif /* PRISM2_PCCARD || PRISM2_PLX */
300
301#ifdef PRISM2_PCI
302/* Memory addresses for ISL3874 controller access */
303#define HFA384X_CMD_OFF 0x00
304#define HFA384X_PARAM0_OFF 0x04
305#define HFA384X_PARAM1_OFF 0x08
306#define HFA384X_PARAM2_OFF 0x0C
307#define HFA384X_STATUS_OFF 0x10
308#define HFA384X_RESP0_OFF 0x14
309#define HFA384X_RESP1_OFF 0x18
310#define HFA384X_RESP2_OFF 0x1C
311#define HFA384X_INFOFID_OFF 0x20
312#define HFA384X_CONTROL_OFF 0x28
313#define HFA384X_SELECT0_OFF 0x30
314#define HFA384X_SELECT1_OFF 0x34
315#define HFA384X_OFFSET0_OFF 0x38
316#define HFA384X_OFFSET1_OFF 0x3C
317#define HFA384X_RXFID_OFF 0x40
318#define HFA384X_ALLOCFID_OFF 0x44
319#define HFA384X_TXCOMPLFID_OFF 0x48
320#define HFA384X_PCICOR_OFF 0x4C
321#define HFA384X_SWSUPPORT0_OFF 0x50
322#define HFA384X_SWSUPPORT1_OFF 0x54
323#define HFA384X_SWSUPPORT2_OFF 0x58
324#define HFA384X_PCIHCR_OFF 0x5C
325#define HFA384X_EVSTAT_OFF 0x60
326#define HFA384X_INTEN_OFF 0x64
327#define HFA384X_EVACK_OFF 0x68
328#define HFA384X_DATA0_OFF 0x6C
329#define HFA384X_DATA1_OFF 0x70
330#define HFA384X_AUXPAGE_OFF 0x74
331#define HFA384X_AUXOFFSET_OFF 0x78
332#define HFA384X_AUXDATA_OFF 0x7C
333#define HFA384X_PCI_M0_ADDRH_OFF 0x80
334#define HFA384X_PCI_M0_ADDRL_OFF 0x84
335#define HFA384X_PCI_M0_LEN_OFF 0x88
336#define HFA384X_PCI_M0_CTL_OFF 0x8C
337#define HFA384X_PCI_STATUS_OFF 0x98
338#define HFA384X_PCI_M1_ADDRH_OFF 0xA0
339#define HFA384X_PCI_M1_ADDRL_OFF 0xA4
340#define HFA384X_PCI_M1_LEN_OFF 0xA8
341#define HFA384X_PCI_M1_CTL_OFF 0xAC
342
343/* PCI bus master control bits (these are undocumented; based on guessing and
344 * experimenting..) */
345#define HFA384X_PCI_CTL_FROM_BAP (BIT(5) | BIT(1) | BIT(0))
346#define HFA384X_PCI_CTL_TO_BAP (BIT(5) | BIT(0))
347
348#endif /* PRISM2_PCI */
349
350
351/* Command codes for CMD reg. */
352#define HFA384X_CMDCODE_INIT 0x00
353#define HFA384X_CMDCODE_ENABLE 0x01
354#define HFA384X_CMDCODE_DISABLE 0x02
355#define HFA384X_CMDCODE_ALLOC 0x0A
356#define HFA384X_CMDCODE_TRANSMIT 0x0B
357#define HFA384X_CMDCODE_INQUIRE 0x11
358#define HFA384X_CMDCODE_ACCESS 0x21
359#define HFA384X_CMDCODE_ACCESS_WRITE (0x21 | BIT(8))
360#define HFA384X_CMDCODE_DOWNLOAD 0x22
361#define HFA384X_CMDCODE_READMIF 0x30
362#define HFA384X_CMDCODE_WRITEMIF 0x31
363#define HFA384X_CMDCODE_TEST 0x38
364
365#define HFA384X_CMDCODE_MASK 0x3F
366
367/* Test mode operations */
368#define HFA384X_TEST_CHANGE_CHANNEL 0x08
369#define HFA384X_TEST_MONITOR 0x0B
370#define HFA384X_TEST_STOP 0x0F
371#define HFA384X_TEST_CFG_BITS 0x15
372#define HFA384X_TEST_CFG_BIT_ALC BIT(3)
373
374#define HFA384X_CMD_BUSY BIT(15)
375
376#define HFA384X_CMD_TX_RECLAIM BIT(8)
377
378#define HFA384X_OFFSET_ERR BIT(14)
379#define HFA384X_OFFSET_BUSY BIT(15)
380
381
382/* ProgMode for download command */
383#define HFA384X_PROGMODE_DISABLE 0
384#define HFA384X_PROGMODE_ENABLE_VOLATILE 1
385#define HFA384X_PROGMODE_ENABLE_NON_VOLATILE 2
386#define HFA384X_PROGMODE_PROGRAM_NON_VOLATILE 3
387
388#define HFA384X_AUX_MAGIC0 0xfe01
389#define HFA384X_AUX_MAGIC1 0xdc23
390#define HFA384X_AUX_MAGIC2 0xba45
391
392#define HFA384X_AUX_PORT_DISABLED 0
393#define HFA384X_AUX_PORT_DISABLE BIT(14)
394#define HFA384X_AUX_PORT_ENABLE BIT(15)
395#define HFA384X_AUX_PORT_ENABLED (BIT(14) | BIT(15))
396#define HFA384X_AUX_PORT_MASK (BIT(14) | BIT(15))
397
398#define PRISM2_PDA_SIZE 1024
399
400
401/* Events; EvStat, Interrupt mask (IntEn), and acknowledge bits (EvAck) */
402#define HFA384X_EV_TICK BIT(15)
403#define HFA384X_EV_WTERR BIT(14)
404#define HFA384X_EV_INFDROP BIT(13)
405#ifdef PRISM2_PCI
406#define HFA384X_EV_PCI_M1 BIT(9)
407#define HFA384X_EV_PCI_M0 BIT(8)
408#endif /* PRISM2_PCI */
409#define HFA384X_EV_INFO BIT(7)
410#define HFA384X_EV_DTIM BIT(5)
411#define HFA384X_EV_CMD BIT(4)
412#define HFA384X_EV_ALLOC BIT(3)
413#define HFA384X_EV_TXEXC BIT(2)
414#define HFA384X_EV_TX BIT(1)
415#define HFA384X_EV_RX BIT(0)
416
417
418/* HFA384X Information frames */
419#define HFA384X_INFO_HANDOVERADDR 0xF000 /* AP f/w ? */
420#define HFA384X_INFO_HANDOVERDEAUTHADDR 0xF001 /* AP f/w 1.3.7 */
421#define HFA384X_INFO_COMMTALLIES 0xF100
422#define HFA384X_INFO_SCANRESULTS 0xF101
423#define HFA384X_INFO_CHANNELINFORESULTS 0xF102 /* AP f/w only */
424#define HFA384X_INFO_HOSTSCANRESULTS 0xF103
425#define HFA384X_INFO_LINKSTATUS 0xF200
426#define HFA384X_INFO_ASSOCSTATUS 0xF201 /* ? */
427#define HFA384X_INFO_AUTHREQ 0xF202 /* ? */
428#define HFA384X_INFO_PSUSERCNT 0xF203 /* ? */
429#define HFA384X_INFO_KEYIDCHANGED 0xF204 /* ? */
430
431enum { HFA384X_LINKSTATUS_CONNECTED = 1,
432 HFA384X_LINKSTATUS_DISCONNECTED = 2,
433 HFA384X_LINKSTATUS_AP_CHANGE = 3,
434 HFA384X_LINKSTATUS_AP_OUT_OF_RANGE = 4,
435 HFA384X_LINKSTATUS_AP_IN_RANGE = 5,
436 HFA384X_LINKSTATUS_ASSOC_FAILED = 6 };
437
438enum { HFA384X_PORTTYPE_BSS = 1, HFA384X_PORTTYPE_WDS = 2,
439 HFA384X_PORTTYPE_PSEUDO_IBSS = 3, HFA384X_PORTTYPE_IBSS = 0,
440 HFA384X_PORTTYPE_HOSTAP = 6 };
441
442#define HFA384X_RATES_1MBPS BIT(0)
443#define HFA384X_RATES_2MBPS BIT(1)
444#define HFA384X_RATES_5MBPS BIT(2)
445#define HFA384X_RATES_11MBPS BIT(3)
446
447#define HFA384X_ROAMING_FIRMWARE 1
448#define HFA384X_ROAMING_HOST 2
449#define HFA384X_ROAMING_DISABLED 3
450
451#define HFA384X_WEPFLAGS_PRIVACYINVOKED BIT(0)
452#define HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED BIT(1)
453#define HFA384X_WEPFLAGS_HOSTENCRYPT BIT(4)
454#define HFA384X_WEPFLAGS_HOSTDECRYPT BIT(7)
455
456#define HFA384X_RX_STATUS_MSGTYPE (BIT(15) | BIT(14) | BIT(13))
457#define HFA384X_RX_STATUS_PCF BIT(12)
458#define HFA384X_RX_STATUS_MACPORT (BIT(10) | BIT(9) | BIT(8))
459#define HFA384X_RX_STATUS_UNDECR BIT(1)
460#define HFA384X_RX_STATUS_FCSERR BIT(0)
461
462#define HFA384X_RX_STATUS_GET_MSGTYPE(s) \
463(((s) & HFA384X_RX_STATUS_MSGTYPE) >> 13)
464#define HFA384X_RX_STATUS_GET_MACPORT(s) \
465(((s) & HFA384X_RX_STATUS_MACPORT) >> 8)
466
467enum { HFA384X_RX_MSGTYPE_NORMAL = 0, HFA384X_RX_MSGTYPE_RFC1042 = 1,
468 HFA384X_RX_MSGTYPE_BRIDGETUNNEL = 2, HFA384X_RX_MSGTYPE_MGMT = 4 };
469
470
471#define HFA384X_TX_CTRL_ALT_RTRY BIT(5)
472#define HFA384X_TX_CTRL_802_11 BIT(3)
473#define HFA384X_TX_CTRL_802_3 0
474#define HFA384X_TX_CTRL_TX_EX BIT(2)
475#define HFA384X_TX_CTRL_TX_OK BIT(1)
476
477#define HFA384X_TX_STATUS_RETRYERR BIT(0)
478#define HFA384X_TX_STATUS_AGEDERR BIT(1)
479#define HFA384X_TX_STATUS_DISCON BIT(2)
480#define HFA384X_TX_STATUS_FORMERR BIT(3)
481
482/* HFA3861/3863 (BBP) Control Registers */
483#define HFA386X_CR_TX_CONFIGURE 0x12 /* CR9 */
484#define HFA386X_CR_RX_CONFIGURE 0x14 /* CR10 */
485#define HFA386X_CR_A_D_TEST_MODES2 0x1A /* CR13 */
486#define HFA386X_CR_MANUAL_TX_POWER 0x3E /* CR31 */
487#define HFA386X_CR_MEASURED_TX_POWER 0x74 /* CR58 */
488
489
490#ifdef __KERNEL__
491
492#define PRISM2_TXFID_COUNT 8
493#define PRISM2_DATA_MAXLEN 2304
494#define PRISM2_TXFID_LEN (PRISM2_DATA_MAXLEN + sizeof(struct hfa384x_tx_frame))
495#define PRISM2_TXFID_EMPTY 0xffff
496#define PRISM2_TXFID_RESERVED 0xfffe
497#define PRISM2_DUMMY_FID 0xffff
498#define MAX_SSID_LEN 32
499#define MAX_NAME_LEN 32 /* this is assumed to be equal to MAX_SSID_LEN */
500
501#define PRISM2_DUMP_RX_HDR BIT(0)
502#define PRISM2_DUMP_TX_HDR BIT(1)
503#define PRISM2_DUMP_TXEXC_HDR BIT(2)
504
505struct hostap_tx_callback_info {
506 u16 idx;
507 void (*func)(struct sk_buff *, int ok, void *);
508 void *data;
509 struct hostap_tx_callback_info *next;
510};
511
512
513/* IEEE 802.11 requires that STA supports concurrent reception of at least
514 * three fragmented frames. This define can be increased to support more
515 * concurrent frames, but it should be noted that each entry can consume about
516 * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
517#define PRISM2_FRAG_CACHE_LEN 4
518
519struct prism2_frag_entry {
520 unsigned long first_frag_time;
521 unsigned int seq;
522 unsigned int last_frag;
523 struct sk_buff *skb;
524 u8 src_addr[ETH_ALEN];
525 u8 dst_addr[ETH_ALEN];
526};
527
528
529struct hostap_cmd_queue {
530 struct list_head list;
531 wait_queue_head_t compl;
532 volatile enum { CMD_SLEEP, CMD_CALLBACK, CMD_COMPLETED } type;
533 void (*callback)(struct net_device *dev, long context, u16 resp0,
534 u16 res);
535 long context;
536 u16 cmd, param0, param1;
537 u16 resp0, res;
538 volatile int issued, issuing;
539
540 atomic_t usecnt;
541 int del_req;
542};
543
544/* options for hw_shutdown */
545#define HOSTAP_HW_NO_DISABLE BIT(0)
546#define HOSTAP_HW_ENABLE_CMDCOMPL BIT(1)
547
548typedef struct local_info local_info_t;
549
550struct prism2_helper_functions {
551 /* these functions are defined in hardware model specific files
552 * (hostap_{cs,plx,pci}.c */
553 int (*card_present)(local_info_t *local);
554 void (*cor_sreset)(local_info_t *local);
555 int (*dev_open)(local_info_t *local);
556 int (*dev_close)(local_info_t *local);
557 void (*genesis_reset)(local_info_t *local, int hcr);
558
559 /* the following functions are from hostap_hw.c, but they may have some
560 * hardware model specific code */
561
562 /* FIX: low-level commands like cmd might disappear at some point to
563 * make it easier to change them if needed (e.g., cmd would be replaced
564 * with write_mif/read_mif/testcmd/inquire); at least get_rid and
565 * set_rid might move to hostap_{cs,plx,pci}.c */
566 int (*cmd)(struct net_device *dev, u16 cmd, u16 param0, u16 *param1,
567 u16 *resp0);
568 void (*read_regs)(struct net_device *dev, struct hfa384x_regs *regs);
569 int (*get_rid)(struct net_device *dev, u16 rid, void *buf, int len,
570 int exact_len);
571 int (*set_rid)(struct net_device *dev, u16 rid, void *buf, int len);
572 int (*hw_enable)(struct net_device *dev, int initial);
573 int (*hw_config)(struct net_device *dev, int initial);
574 void (*hw_reset)(struct net_device *dev);
575 void (*hw_shutdown)(struct net_device *dev, int no_disable);
576 int (*reset_port)(struct net_device *dev);
577 void (*schedule_reset)(local_info_t *local);
578 int (*download)(local_info_t *local,
579 struct prism2_download_param *param);
580 int (*tx)(struct sk_buff *skb, struct net_device *dev);
581 int (*set_tim)(struct net_device *dev, int aid, int set);
582 int (*read_aux)(struct net_device *dev, unsigned addr, int len,
583 u8 *buf);
584
585 int need_tx_headroom; /* number of bytes of headroom needed before
586 * IEEE 802.11 header */
587 enum { HOSTAP_HW_PCCARD, HOSTAP_HW_PLX, HOSTAP_HW_PCI } hw_type;
588};
589
590
591struct prism2_download_data {
592 u32 dl_cmd;
593 u32 start_addr;
594 u32 num_areas;
595 struct prism2_download_data_area {
596 u32 addr; /* wlan card address */
597 u32 len;
598 u8 *data; /* allocated data */
599 } data[0];
600};
601
602
603#define HOSTAP_MAX_BSS_COUNT 64
604#define MAX_WPA_IE_LEN 64
605
606struct hostap_bss_info {
607 struct list_head list;
608 unsigned long last_update;
609 unsigned int count;
610 u8 bssid[ETH_ALEN];
611 u16 capab_info;
612 u8 ssid[32];
613 size_t ssid_len;
614 u8 wpa_ie[MAX_WPA_IE_LEN];
615 size_t wpa_ie_len;
616 u8 rsn_ie[MAX_WPA_IE_LEN];
617 size_t rsn_ie_len;
618 int chan;
619 int included;
620};
621
622
623/* Per radio private Host AP data - shared by all net devices interfaces used
624 * by each radio (wlan#, wlan#ap, wlan#sta, WDS).
625 * ((struct hostap_interface *) netdev_priv(dev))->local points to this
626 * structure. */
627struct local_info {
628 struct module *hw_module;
629 int card_idx;
630 int dev_enabled;
631 int master_dev_auto_open; /* was master device opened automatically */
632 int num_dev_open; /* number of open devices */
633 struct net_device *dev; /* master radio device */
634 struct net_device *ddev; /* main data device */
635 struct list_head hostap_interfaces; /* Host AP interface list (contains
636 * struct hostap_interface entries)
637 */
638 rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
639 * when removing entries from the list.
640 * TX and RX paths can use read lock. */
641 spinlock_t cmdlock, baplock, lock;
642 struct semaphore rid_bap_sem;
643 u16 infofid; /* MAC buffer id for info frame */
644 /* txfid, intransmitfid, next_txtid, and next_alloc are protected by
645 * txfidlock */
646 spinlock_t txfidlock;
647 int txfid_len; /* length of allocated TX buffers */
648 u16 txfid[PRISM2_TXFID_COUNT]; /* buffer IDs for TX frames */
649 /* buffer IDs for intransmit frames or PRISM2_TXFID_EMPTY if
650 * corresponding txfid is free for next TX frame */
651 u16 intransmitfid[PRISM2_TXFID_COUNT];
652 int next_txfid; /* index to the next txfid to be checked for
653 * availability */
654 int next_alloc; /* index to the next intransmitfid to be checked for
655 * allocation events */
656
657 /* bitfield for atomic bitops */
658#define HOSTAP_BITS_TRANSMIT 0
659#define HOSTAP_BITS_BAP_TASKLET 1
660#define HOSTAP_BITS_BAP_TASKLET2 2
661 long bits;
662
663 struct ap_data *ap;
664
665 char essid[MAX_SSID_LEN + 1];
666 char name[MAX_NAME_LEN + 1];
667 int name_set;
668 u16 channel_mask; /* mask of allowed channels */
669 u16 scan_channel_mask; /* mask of channels to be scanned */
670 struct comm_tallies_sums comm_tallies;
671 struct net_device_stats stats;
672 struct proc_dir_entry *proc;
673 int iw_mode; /* operating mode (IW_MODE_*) */
674 int pseudo_adhoc; /* 0: IW_MODE_ADHOC is real 802.11 compliant IBSS
675 * 1: IW_MODE_ADHOC is "pseudo IBSS" */
676 char bssid[ETH_ALEN];
677 int channel;
678 int beacon_int;
679 int dtim_period;
680 int mtu;
681 int frame_dump; /* dump RX/TX frame headers, PRISM2_DUMP_ flags */
682 int fw_tx_rate_control;
683 u16 tx_rate_control;
684 u16 basic_rates;
685 int hw_resetting;
686 int hw_ready;
687 int hw_reset_tries; /* how many times reset has been tried */
688 int hw_downloading;
689 int shutdown;
690 int pri_only;
691 int no_pri; /* no PRI f/w present */
692 int sram_type; /* 8 = x8 SRAM, 16 = x16 SRAM, -1 = unknown */
693
694 enum {
695 PRISM2_TXPOWER_AUTO = 0, PRISM2_TXPOWER_OFF,
696 PRISM2_TXPOWER_FIXED, PRISM2_TXPOWER_UNKNOWN
697 } txpower_type;
698 int txpower; /* if txpower_type == PRISM2_TXPOWER_FIXED */
699
700 /* command queue for hfa384x_cmd(); protected with cmdlock */
701 struct list_head cmd_queue;
702 /* max_len for cmd_queue; in addition, cmd_callback can use two
703 * additional entries to prevent sleeping commands from stopping
704 * transmits */
705#define HOSTAP_CMD_QUEUE_MAX_LEN 16
706 int cmd_queue_len; /* number of entries in cmd_queue */
707
708 /* if card timeout is detected in interrupt context, reset_queue is
709 * used to schedule card reseting to be done in user context */
710 struct work_struct reset_queue;
711
712 /* For scheduling a change of the promiscuous mode RID */
713 int is_promisc;
714 struct work_struct set_multicast_list_queue;
715
716 struct work_struct set_tim_queue;
717 struct list_head set_tim_list;
718 spinlock_t set_tim_lock;
719
720 int wds_max_connections;
721 int wds_connections;
722#define HOSTAP_WDS_BROADCAST_RA BIT(0)
723#define HOSTAP_WDS_AP_CLIENT BIT(1)
724#define HOSTAP_WDS_STANDARD_FRAME BIT(2)
725 u32 wds_type;
726 u16 tx_control; /* flags to be used in TX description */
727 int manual_retry_count; /* -1 = use f/w default; otherwise retry count
728 * to be used with all frames */
729
730 struct iw_statistics wstats;
731 unsigned long scan_timestamp; /* Time started to scan */
732 enum {
733 PRISM2_MONITOR_80211 = 0, PRISM2_MONITOR_PRISM = 1,
734 PRISM2_MONITOR_CAPHDR = 2
735 } monitor_type;
736 int (*saved_eth_header_parse)(struct sk_buff *skb,
737 unsigned char *haddr);
738 int monitor_allow_fcserr;
739
740 int hostapd; /* whether user space daemon, hostapd, is used for AP
741 * management */
742 int hostapd_sta; /* whether hostapd is used with an extra STA interface
743 */
744 struct net_device *apdev;
745 struct net_device_stats apdevstats;
746
747 char assoc_ap_addr[ETH_ALEN];
748 struct net_device *stadev;
749 struct net_device_stats stadevstats;
750
751#define WEP_KEYS 4
752#define WEP_KEY_LEN 13
753 struct ieee80211_crypt_data *crypt[WEP_KEYS];
754 int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
755 struct timer_list crypt_deinit_timer;
756 struct list_head crypt_deinit_list;
757
758 int open_wep; /* allow unencrypted frames */
759 int host_encrypt;
760 int host_decrypt;
761 int privacy_invoked; /* force privacy invoked flag even if no keys are
762 * configured */
763 int fw_encrypt_ok; /* whether firmware-based WEP encrypt is working
764 * in Host AP mode (STA f/w 1.4.9 or newer) */
765 int bcrx_sta_key; /* use individual keys to override default keys even
766 * with RX of broad/multicast frames */
767
768 struct prism2_frag_entry frag_cache[PRISM2_FRAG_CACHE_LEN];
769 unsigned int frag_next_idx;
770
771 int ieee_802_1x; /* is IEEE 802.1X used */
772
773 int antsel_tx, antsel_rx;
774 int rts_threshold; /* dot11RTSThreshold */
775 int fragm_threshold; /* dot11FragmentationThreshold */
776 int auth_algs; /* PRISM2_AUTH_ flags */
777
778 int enh_sec; /* cnfEnhSecurity options (broadcast SSID hide/ignore) */
779 int tallies32; /* 32-bit tallies in use */
780
781 struct prism2_helper_functions *func;
782
783 u8 *pda;
784 int fw_ap;
785#define PRISM2_FW_VER(major, minor, variant) \
786(((major) << 16) | ((minor) << 8) | variant)
787 u32 sta_fw_ver;
788
789 /* Tasklets for handling hardware IRQ related operations outside hw IRQ
790 * handler */
791 struct tasklet_struct bap_tasklet;
792
793 struct tasklet_struct info_tasklet;
794 struct sk_buff_head info_list; /* info frames as skb's for
795 * info_tasklet */
796
797 struct hostap_tx_callback_info *tx_callback; /* registered TX callbacks
798 */
799
800 struct tasklet_struct rx_tasklet;
801 struct sk_buff_head rx_list;
802
803 struct tasklet_struct sta_tx_exc_tasklet;
804 struct sk_buff_head sta_tx_exc_list;
805
806 int host_roaming;
807 unsigned long last_join_time; /* time of last JoinRequest */
808 struct hfa384x_hostscan_result *last_scan_results;
809 int last_scan_results_count;
810 enum { PRISM2_SCAN, PRISM2_HOSTSCAN } last_scan_type;
811 struct work_struct info_queue;
812 long pending_info; /* bit field of pending info_queue items */
813#define PRISM2_INFO_PENDING_LINKSTATUS 0
814#define PRISM2_INFO_PENDING_SCANRESULTS 1
815 int prev_link_status; /* previous received LinkStatus info */
816 int prev_linkstatus_connected;
817 u8 preferred_ap[6]; /* use this AP if possible */
818
819#ifdef PRISM2_CALLBACK
820 void *callback_data; /* Can be used in callbacks; e.g., allocate
821 * on enable event and free on disable event.
822 * Host AP driver code does not touch this. */
823#endif /* PRISM2_CALLBACK */
824
825 wait_queue_head_t hostscan_wq;
826
827 /* Passive scan in Host AP mode */
828 struct timer_list passive_scan_timer;
829 int passive_scan_interval; /* in seconds, 0 = disabled */
830 int passive_scan_channel;
831 enum { PASSIVE_SCAN_WAIT, PASSIVE_SCAN_LISTEN } passive_scan_state;
832
833 struct timer_list tick_timer;
834 unsigned long last_tick_timer;
835 unsigned int sw_tick_stuck;
836
837 /* commsQuality / dBmCommsQuality data from periodic polling; only
838 * valid for Managed and Ad-hoc modes */
839 unsigned long last_comms_qual_update;
840 int comms_qual; /* in some odd unit.. */
841 int avg_signal; /* in dB (note: negative) */
842 int avg_noise; /* in dB (note: negative) */
843 struct work_struct comms_qual_update;
844
845 /* RSSI to dBm adjustment (for RX descriptor fields) */
846 int rssi_to_dBm; /* substract from RSSI to get approximate dBm value */
847
848 /* BSS list / protected by local->lock */
849 struct list_head bss_list;
850 int num_bss_info;
851 int wpa; /* WPA support enabled */
852 int tkip_countermeasures;
853 int drop_unencrypted;
854 /* Generic IEEE 802.11 info element to be added to
855 * ProbeResp/Beacon/(Re)AssocReq */
856 u8 *generic_elem;
857 size_t generic_elem_len;
858
859#ifdef PRISM2_DOWNLOAD_SUPPORT
860 /* Persistent volatile download data */
861 struct prism2_download_data *dl_pri;
862 struct prism2_download_data *dl_sec;
863#endif /* PRISM2_DOWNLOAD_SUPPORT */
864
865#ifdef PRISM2_IO_DEBUG
866#define PRISM2_IO_DEBUG_SIZE 10000
867 u32 io_debug[PRISM2_IO_DEBUG_SIZE];
868 int io_debug_head;
869 int io_debug_enabled;
870#endif /* PRISM2_IO_DEBUG */
871
872 /* Pointer to hardware model specific (cs,pci,plx) private data. */
873 void *hw_priv;
874};
875
876
877/* Per interface private Host AP data
878 * Allocated for each net device that Host AP uses (wlan#, wlan#ap, wlan#sta,
879 * WDS) and netdev_priv(dev) points to this structure. */
880struct hostap_interface {
881 struct list_head list; /* list entry in Host AP interface list */
882 struct net_device *dev; /* pointer to this device */
883 struct local_info *local; /* pointer to shared private data */
884 struct net_device_stats stats;
885 struct iw_spy_data spy_data; /* iwspy support */
886 struct iw_public_data wireless_data;
887
888 enum {
889 HOSTAP_INTERFACE_MASTER,
890 HOSTAP_INTERFACE_MAIN,
891 HOSTAP_INTERFACE_AP,
892 HOSTAP_INTERFACE_STA,
893 HOSTAP_INTERFACE_WDS,
894 } type;
895
896 union {
897 struct hostap_interface_wds {
898 u8 remote_addr[ETH_ALEN];
899 } wds;
900 } u;
901};
902
903
904#define HOSTAP_SKB_TX_DATA_MAGIC 0xf08a36a2
905
906/*
907 * TX meta data - stored in skb->cb buffer, so this must not be increased over
908 * the 40-byte limit
909 */
910struct hostap_skb_tx_data {
911 u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
912 u8 rate; /* transmit rate */
913#define HOSTAP_TX_FLAGS_WDS BIT(0)
914#define HOSTAP_TX_FLAGS_BUFFERED_FRAME BIT(1)
915#define HOSTAP_TX_FLAGS_ADD_MOREDATA BIT(2)
916 u8 flags; /* HOSTAP_TX_FLAGS_* */
917 u16 tx_cb_idx;
918 struct hostap_interface *iface;
919 unsigned long jiffies; /* queueing timestamp */
920 unsigned short ethertype;
921};
922
923
924#ifndef PRISM2_NO_DEBUG
925
926#define DEBUG_FID BIT(0)
927#define DEBUG_PS BIT(1)
928#define DEBUG_FLOW BIT(2)
929#define DEBUG_AP BIT(3)
930#define DEBUG_HW BIT(4)
931#define DEBUG_EXTRA BIT(5)
932#define DEBUG_EXTRA2 BIT(6)
933#define DEBUG_PS2 BIT(7)
934#define DEBUG_MASK (DEBUG_PS | DEBUG_AP | DEBUG_HW | DEBUG_EXTRA)
935#define PDEBUG(n, args...) \
936do { if ((n) & DEBUG_MASK) printk(KERN_DEBUG args); } while (0)
937#define PDEBUG2(n, args...) \
938do { if ((n) & DEBUG_MASK) printk(args); } while (0)
939
940#else /* PRISM2_NO_DEBUG */
941
942#define PDEBUG(n, args...)
943#define PDEBUG2(n, args...)
944
945#endif /* PRISM2_NO_DEBUG */
946
947enum { BAP0 = 0, BAP1 = 1 };
948
949#define PRISM2_IO_DEBUG_CMD_INB 0
950#define PRISM2_IO_DEBUG_CMD_INW 1
951#define PRISM2_IO_DEBUG_CMD_INSW 2
952#define PRISM2_IO_DEBUG_CMD_OUTB 3
953#define PRISM2_IO_DEBUG_CMD_OUTW 4
954#define PRISM2_IO_DEBUG_CMD_OUTSW 5
955#define PRISM2_IO_DEBUG_CMD_ERROR 6
956#define PRISM2_IO_DEBUG_CMD_INTERRUPT 7
957
958#ifdef PRISM2_IO_DEBUG
959
960#define PRISM2_IO_DEBUG_ENTRY(cmd, reg, value) \
961(((cmd) << 24) | ((reg) << 16) | value)
962
963static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
964 int reg, int value)
965{
966 struct hostap_interface *iface = netdev_priv(dev);
967 local_info_t *local = iface->local;
968
969 if (!local->io_debug_enabled)
970 return;
971
972 local->io_debug[local->io_debug_head] = jiffies & 0xffffffff;
973 if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
974 local->io_debug_head = 0;
975 local->io_debug[local->io_debug_head] =
976 PRISM2_IO_DEBUG_ENTRY(cmd, reg, value);
977 if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
978 local->io_debug_head = 0;
979}
980
981
982static inline void prism2_io_debug_error(struct net_device *dev, int err)
983{
984 struct hostap_interface *iface = netdev_priv(dev);
985 local_info_t *local = iface->local;
986 unsigned long flags;
987
988 if (!local->io_debug_enabled)
989 return;
990
991 spin_lock_irqsave(&local->lock, flags);
992 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_ERROR, 0, err);
993 if (local->io_debug_enabled == 1) {
994 local->io_debug_enabled = 0;
995 printk(KERN_DEBUG "%s: I/O debug stopped\n", dev->name);
996 }
997 spin_unlock_irqrestore(&local->lock, flags);
998}
999
1000#else /* PRISM2_IO_DEBUG */
1001
1002static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
1003 int reg, int value)
1004{
1005}
1006
1007static inline void prism2_io_debug_error(struct net_device *dev, int err)
1008{
1009}
1010
1011#endif /* PRISM2_IO_DEBUG */
1012
1013
1014#ifdef PRISM2_CALLBACK
1015enum {
1016 /* Called when card is enabled */
1017 PRISM2_CALLBACK_ENABLE,
1018
1019 /* Called when card is disabled */
1020 PRISM2_CALLBACK_DISABLE,
1021
1022 /* Called when RX/TX starts/ends */
1023 PRISM2_CALLBACK_RX_START, PRISM2_CALLBACK_RX_END,
1024 PRISM2_CALLBACK_TX_START, PRISM2_CALLBACK_TX_END
1025};
1026void prism2_callback(local_info_t *local, int event);
1027#else /* PRISM2_CALLBACK */
1028#define prism2_callback(d, e) do { } while (0)
1029#endif /* PRISM2_CALLBACK */
1030
1031#endif /* __KERNEL__ */
1032
1033#endif /* HOSTAP_WLAN_H */
diff --git a/drivers/net/wireless/ieee802_11.h b/drivers/net/wireless/ieee802_11.h
deleted file mode 100644
index 53dd5248f9f1..000000000000
--- a/drivers/net/wireless/ieee802_11.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _IEEE802_11_H
2#define _IEEE802_11_H
3
4#define IEEE802_11_DATA_LEN 2304
5/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6 6.2.1.1.2.
7
8 The figure in section 7.1.2 suggests a body size of up to 2312
9 bytes is allowed, which is a bit confusing, I suspect this
10 represents the 2304 bytes of real data, plus a possible 8 bytes of
11 WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
12
13
14#define IEEE802_11_HLEN 30
15#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
16
17struct ieee802_11_hdr {
18 u16 frame_ctl;
19 u16 duration_id;
20 u8 addr1[ETH_ALEN];
21 u8 addr2[ETH_ALEN];
22 u8 addr3[ETH_ALEN];
23 u16 seq_ctl;
24 u8 addr4[ETH_ALEN];
25} __attribute__ ((packed));
26
27/* Frame control field constants */
28#define IEEE802_11_FCTL_VERS 0x0002
29#define IEEE802_11_FCTL_FTYPE 0x000c
30#define IEEE802_11_FCTL_STYPE 0x00f0
31#define IEEE802_11_FCTL_TODS 0x0100
32#define IEEE802_11_FCTL_FROMDS 0x0200
33#define IEEE802_11_FCTL_MOREFRAGS 0x0400
34#define IEEE802_11_FCTL_RETRY 0x0800
35#define IEEE802_11_FCTL_PM 0x1000
36#define IEEE802_11_FCTL_MOREDATA 0x2000
37#define IEEE802_11_FCTL_WEP 0x4000
38#define IEEE802_11_FCTL_ORDER 0x8000
39
40#define IEEE802_11_FTYPE_MGMT 0x0000
41#define IEEE802_11_FTYPE_CTL 0x0004
42#define IEEE802_11_FTYPE_DATA 0x0008
43
44/* management */
45#define IEEE802_11_STYPE_ASSOC_REQ 0x0000
46#define IEEE802_11_STYPE_ASSOC_RESP 0x0010
47#define IEEE802_11_STYPE_REASSOC_REQ 0x0020
48#define IEEE802_11_STYPE_REASSOC_RESP 0x0030
49#define IEEE802_11_STYPE_PROBE_REQ 0x0040
50#define IEEE802_11_STYPE_PROBE_RESP 0x0050
51#define IEEE802_11_STYPE_BEACON 0x0080
52#define IEEE802_11_STYPE_ATIM 0x0090
53#define IEEE802_11_STYPE_DISASSOC 0x00A0
54#define IEEE802_11_STYPE_AUTH 0x00B0
55#define IEEE802_11_STYPE_DEAUTH 0x00C0
56
57/* control */
58#define IEEE802_11_STYPE_PSPOLL 0x00A0
59#define IEEE802_11_STYPE_RTS 0x00B0
60#define IEEE802_11_STYPE_CTS 0x00C0
61#define IEEE802_11_STYPE_ACK 0x00D0
62#define IEEE802_11_STYPE_CFEND 0x00E0
63#define IEEE802_11_STYPE_CFENDACK 0x00F0
64
65/* data */
66#define IEEE802_11_STYPE_DATA 0x0000
67#define IEEE802_11_STYPE_DATA_CFACK 0x0010
68#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020
69#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030
70#define IEEE802_11_STYPE_NULLFUNC 0x0040
71#define IEEE802_11_STYPE_CFACK 0x0050
72#define IEEE802_11_STYPE_CFPOLL 0x0060
73#define IEEE802_11_STYPE_CFACKPOLL 0x0070
74
75#define IEEE802_11_SCTL_FRAG 0x000F
76#define IEEE802_11_SCTL_SEQ 0xFFF0
77
78#endif /* _IEEE802_11_H */
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
new file mode 100644
index 000000000000..2414e6493aa5
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.c
@@ -0,0 +1,8680 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 Portions of this file are based on the sample_* files provided by Wireless
26 Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes
27 <jt@hpl.hp.com>
28
29 Portions of this file are based on the Host AP project,
30 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
31 <jkmaline@cc.hut.fi>
32 Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
33
34 Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and
35 ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c
36 available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox
37
38******************************************************************************/
39/*
40
41 Initial driver on which this is based was developed by Janusz Gorycki,
42 Maciej Urbaniak, and Maciej Sosnowski.
43
44 Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak.
45
46Theory of Operation
47
48Tx - Commands and Data
49
50Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs)
51Each TBD contains a pointer to the physical (dma_addr_t) address of data being
52sent to the firmware as well as the length of the data.
53
54The host writes to the TBD queue at the WRITE index. The WRITE index points
55to the _next_ packet to be written and is advanced when after the TBD has been
56filled.
57
58The firmware pulls from the TBD queue at the READ index. The READ index points
59to the currently being read entry, and is advanced once the firmware is
60done with a packet.
61
62When data is sent to the firmware, the first TBD is used to indicate to the
63firmware if a Command or Data is being sent. If it is Command, all of the
64command information is contained within the physical address referred to by the
65TBD. If it is Data, the first TBD indicates the type of data packet, number
66of fragments, etc. The next TBD then referrs to the actual packet location.
67
68The Tx flow cycle is as follows:
69
701) ipw2100_tx() is called by kernel with SKB to transmit
712) Packet is move from the tx_free_list and appended to the transmit pending
72 list (tx_pend_list)
733) work is scheduled to move pending packets into the shared circular queue.
744) when placing packet in the circular queue, the incoming SKB is DMA mapped
75 to a physical address. That address is entered into a TBD. Two TBDs are
76 filled out. The first indicating a data packet, the second referring to the
77 actual payload data.
785) the packet is removed from tx_pend_list and placed on the end of the
79 firmware pending list (fw_pend_list)
806) firmware is notified that the WRITE index has
817) Once the firmware has processed the TBD, INTA is triggered.
828) For each Tx interrupt received from the firmware, the READ index is checked
83 to see which TBDs are done being processed.
849) For each TBD that has been processed, the ISR pulls the oldest packet
85 from the fw_pend_list.
8610)The packet structure contained in the fw_pend_list is then used
87 to unmap the DMA address and to free the SKB originally passed to the driver
88 from the kernel.
8911)The packet structure is placed onto the tx_free_list
90
91The above steps are the same for commands, only the msg_free_list/msg_pend_list
92are used instead of tx_free_list/tx_pend_list
93
94...
95
96Critical Sections / Locking :
97
98There are two locks utilized. The first is the low level lock (priv->low_lock)
99that protects the following:
100
101- Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows:
102
103 tx_free_list : Holds pre-allocated Tx buffers.
104 TAIL modified in __ipw2100_tx_process()
105 HEAD modified in ipw2100_tx()
106
107 tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring
108 TAIL modified ipw2100_tx()
109 HEAD modified by ipw2100_tx_send_data()
110
111 msg_free_list : Holds pre-allocated Msg (Command) buffers
112 TAIL modified in __ipw2100_tx_process()
113 HEAD modified in ipw2100_hw_send_command()
114
115 msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring
116 TAIL modified in ipw2100_hw_send_command()
117 HEAD modified in ipw2100_tx_send_commands()
118
119 The flow of data on the TX side is as follows:
120
121 MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST
122 TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST
123
124 The methods that work on the TBD ring are protected via priv->low_lock.
125
126- The internal data state of the device itself
127- Access to the firmware read/write indexes for the BD queues
128 and associated logic
129
130All external entry functions are locked with the priv->action_lock to ensure
131that only one external action is invoked at a time.
132
133
134*/
135
136#include <linux/compiler.h>
137#include <linux/config.h>
138#include <linux/errno.h>
139#include <linux/if_arp.h>
140#include <linux/in6.h>
141#include <linux/in.h>
142#include <linux/ip.h>
143#include <linux/kernel.h>
144#include <linux/kmod.h>
145#include <linux/module.h>
146#include <linux/netdevice.h>
147#include <linux/ethtool.h>
148#include <linux/pci.h>
149#include <linux/dma-mapping.h>
150#include <linux/proc_fs.h>
151#include <linux/skbuff.h>
152#include <asm/uaccess.h>
153#include <asm/io.h>
154#define __KERNEL_SYSCALLS__
155#include <linux/fs.h>
156#include <linux/mm.h>
157#include <linux/slab.h>
158#include <linux/unistd.h>
159#include <linux/stringify.h>
160#include <linux/tcp.h>
161#include <linux/types.h>
162#include <linux/version.h>
163#include <linux/time.h>
164#include <linux/firmware.h>
165#include <linux/acpi.h>
166#include <linux/ctype.h>
167
168#include "ipw2100.h"
169
170#define IPW2100_VERSION "1.1.0"
171
172#define DRV_NAME "ipw2100"
173#define DRV_VERSION IPW2100_VERSION
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
176
177
178/* Debugging stuff */
179#ifdef CONFIG_IPW_DEBUG
180#define CONFIG_IPW2100_RX_DEBUG /* Reception debugging */
181#endif
182
183MODULE_DESCRIPTION(DRV_DESCRIPTION);
184MODULE_VERSION(DRV_VERSION);
185MODULE_AUTHOR(DRV_COPYRIGHT);
186MODULE_LICENSE("GPL");
187
188static int debug = 0;
189static int mode = 0;
190static int channel = 0;
191static int associate = 1;
192static int disable = 0;
193#ifdef CONFIG_PM
194static struct ipw2100_fw ipw2100_firmware;
195#endif
196
197#include <linux/moduleparam.h>
198module_param(debug, int, 0444);
199module_param(mode, int, 0444);
200module_param(channel, int, 0444);
201module_param(associate, int, 0444);
202module_param(disable, int, 0444);
203
204MODULE_PARM_DESC(debug, "debug level");
205MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
206MODULE_PARM_DESC(channel, "channel");
207MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
208MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
209
210static u32 ipw2100_debug_level = IPW_DL_NONE;
211
212#ifdef CONFIG_IPW_DEBUG
213#define IPW_DEBUG(level, message...) \
214do { \
215 if (ipw2100_debug_level & (level)) { \
216 printk(KERN_DEBUG "ipw2100: %c %s ", \
217 in_interrupt() ? 'I' : 'U', __FUNCTION__); \
218 printk(message); \
219 } \
220} while (0)
221#else
222#define IPW_DEBUG(level, message...) do {} while (0)
223#endif /* CONFIG_IPW_DEBUG */
224
225#ifdef CONFIG_IPW_DEBUG
226static const char *command_types[] = {
227 "undefined",
228 "unused", /* HOST_ATTENTION */
229 "HOST_COMPLETE",
230 "unused", /* SLEEP */
231 "unused", /* HOST_POWER_DOWN */
232 "unused",
233 "SYSTEM_CONFIG",
234 "unused", /* SET_IMR */
235 "SSID",
236 "MANDATORY_BSSID",
237 "AUTHENTICATION_TYPE",
238 "ADAPTER_ADDRESS",
239 "PORT_TYPE",
240 "INTERNATIONAL_MODE",
241 "CHANNEL",
242 "RTS_THRESHOLD",
243 "FRAG_THRESHOLD",
244 "POWER_MODE",
245 "TX_RATES",
246 "BASIC_TX_RATES",
247 "WEP_KEY_INFO",
248 "unused",
249 "unused",
250 "unused",
251 "unused",
252 "WEP_KEY_INDEX",
253 "WEP_FLAGS",
254 "ADD_MULTICAST",
255 "CLEAR_ALL_MULTICAST",
256 "BEACON_INTERVAL",
257 "ATIM_WINDOW",
258 "CLEAR_STATISTICS",
259 "undefined",
260 "undefined",
261 "undefined",
262 "undefined",
263 "TX_POWER_INDEX",
264 "undefined",
265 "undefined",
266 "undefined",
267 "undefined",
268 "undefined",
269 "undefined",
270 "BROADCAST_SCAN",
271 "CARD_DISABLE",
272 "PREFERRED_BSSID",
273 "SET_SCAN_OPTIONS",
274 "SCAN_DWELL_TIME",
275 "SWEEP_TABLE",
276 "AP_OR_STATION_TABLE",
277 "GROUP_ORDINALS",
278 "SHORT_RETRY_LIMIT",
279 "LONG_RETRY_LIMIT",
280 "unused", /* SAVE_CALIBRATION */
281 "unused", /* RESTORE_CALIBRATION */
282 "undefined",
283 "undefined",
284 "undefined",
285 "HOST_PRE_POWER_DOWN",
286 "unused", /* HOST_INTERRUPT_COALESCING */
287 "undefined",
288 "CARD_DISABLE_PHY_OFF",
289 "MSDU_TX_RATES"
290 "undefined",
291 "undefined",
292 "SET_STATION_STAT_BITS",
293 "CLEAR_STATIONS_STAT_BITS",
294 "LEAP_ROGUE_MODE",
295 "SET_SECURITY_INFORMATION",
296 "DISASSOCIATION_BSSID",
297 "SET_WPA_ASS_IE"
298};
299#endif
300
301
302/* Pre-decl until we get the code solid and then we can clean it up */
303static void ipw2100_tx_send_commands(struct ipw2100_priv *priv);
304static void ipw2100_tx_send_data(struct ipw2100_priv *priv);
305static int ipw2100_adapter_setup(struct ipw2100_priv *priv);
306
307static void ipw2100_queues_initialize(struct ipw2100_priv *priv);
308static void ipw2100_queues_free(struct ipw2100_priv *priv);
309static int ipw2100_queues_allocate(struct ipw2100_priv *priv);
310
311static int ipw2100_fw_download(struct ipw2100_priv *priv,
312 struct ipw2100_fw *fw);
313static int ipw2100_get_firmware(struct ipw2100_priv *priv,
314 struct ipw2100_fw *fw);
315static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf,
316 size_t max);
317static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf,
318 size_t max);
319static void ipw2100_release_firmware(struct ipw2100_priv *priv,
320 struct ipw2100_fw *fw);
321static int ipw2100_ucode_download(struct ipw2100_priv *priv,
322 struct ipw2100_fw *fw);
323static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
324static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev);
325static struct iw_handler_def ipw2100_wx_handler_def;
326
327
328static inline void read_register(struct net_device *dev, u32 reg, u32 *val)
329{
330 *val = readl((void __iomem *)(dev->base_addr + reg));
331 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
332}
333
334static inline void write_register(struct net_device *dev, u32 reg, u32 val)
335{
336 writel(val, (void __iomem *)(dev->base_addr + reg));
337 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
338}
339
340static inline void read_register_word(struct net_device *dev, u32 reg, u16 *val)
341{
342 *val = readw((void __iomem *)(dev->base_addr + reg));
343 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
344}
345
346static inline void read_register_byte(struct net_device *dev, u32 reg, u8 *val)
347{
348 *val = readb((void __iomem *)(dev->base_addr + reg));
349 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
350}
351
352static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
353{
354 writew(val, (void __iomem *)(dev->base_addr + reg));
355 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
356}
357
358
359static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
360{
361 writeb(val, (void __iomem *)(dev->base_addr + reg));
362 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
363}
364
365static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 *val)
366{
367 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
368 addr & IPW_REG_INDIRECT_ADDR_MASK);
369 read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
370}
371
372static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val)
373{
374 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
375 addr & IPW_REG_INDIRECT_ADDR_MASK);
376 write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
377}
378
379static inline void read_nic_word(struct net_device *dev, u32 addr, u16 *val)
380{
381 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
382 addr & IPW_REG_INDIRECT_ADDR_MASK);
383 read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
384}
385
386static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val)
387{
388 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
389 addr & IPW_REG_INDIRECT_ADDR_MASK);
390 write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
391}
392
393static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 *val)
394{
395 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
396 addr & IPW_REG_INDIRECT_ADDR_MASK);
397 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
398}
399
400static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val)
401{
402 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
403 addr & IPW_REG_INDIRECT_ADDR_MASK);
404 write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
405}
406
407static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr)
408{
409 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
410 addr & IPW_REG_INDIRECT_ADDR_MASK);
411}
412
413static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val)
414{
415 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val);
416}
417
418static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
419 const u8 *buf)
420{
421 u32 aligned_addr;
422 u32 aligned_len;
423 u32 dif_len;
424 u32 i;
425
426 /* read first nibble byte by byte */
427 aligned_addr = addr & (~0x3);
428 dif_len = addr - aligned_addr;
429 if (dif_len) {
430 /* Start reading at aligned_addr + dif_len */
431 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
432 aligned_addr);
433 for (i = dif_len; i < 4; i++, buf++)
434 write_register_byte(
435 dev, IPW_REG_INDIRECT_ACCESS_DATA + i,
436 *buf);
437
438 len -= dif_len;
439 aligned_addr += 4;
440 }
441
442 /* read DWs through autoincrement registers */
443 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
444 aligned_addr);
445 aligned_len = len & (~0x3);
446 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
447 write_register(
448 dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *)buf);
449
450 /* copy the last nibble */
451 dif_len = len - aligned_len;
452 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr);
453 for (i = 0; i < dif_len; i++, buf++)
454 write_register_byte(
455 dev, IPW_REG_INDIRECT_ACCESS_DATA + i, *buf);
456}
457
458static inline void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
459 u8 *buf)
460{
461 u32 aligned_addr;
462 u32 aligned_len;
463 u32 dif_len;
464 u32 i;
465
466 /* read first nibble byte by byte */
467 aligned_addr = addr & (~0x3);
468 dif_len = addr - aligned_addr;
469 if (dif_len) {
470 /* Start reading at aligned_addr + dif_len */
471 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
472 aligned_addr);
473 for (i = dif_len; i < 4; i++, buf++)
474 read_register_byte(
475 dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
476
477 len -= dif_len;
478 aligned_addr += 4;
479 }
480
481 /* read DWs through autoincrement registers */
482 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
483 aligned_addr);
484 aligned_len = len & (~0x3);
485 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
486 read_register(dev, IPW_REG_AUTOINCREMENT_DATA,
487 (u32 *)buf);
488
489 /* copy the last nibble */
490 dif_len = len - aligned_len;
491 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
492 aligned_addr);
493 for (i = 0; i < dif_len; i++, buf++)
494 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA +
495 i, buf);
496}
497
498static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
499{
500 return (dev->base_addr &&
501 (readl((void __iomem *)(dev->base_addr + IPW_REG_DOA_DEBUG_AREA_START))
502 == IPW_DATA_DOA_DEBUG_VALUE));
503}
504
505static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
506 void *val, u32 *len)
507{
508 struct ipw2100_ordinals *ordinals = &priv->ordinals;
509 u32 addr;
510 u32 field_info;
511 u16 field_len;
512 u16 field_count;
513 u32 total_length;
514
515 if (ordinals->table1_addr == 0) {
516 printk(KERN_WARNING DRV_NAME ": attempt to use fw ordinals "
517 "before they have been loaded.\n");
518 return -EINVAL;
519 }
520
521 if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
522 if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) {
523 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
524
525 printk(KERN_WARNING DRV_NAME
526 ": ordinal buffer length too small, need %zd\n",
527 IPW_ORD_TAB_1_ENTRY_SIZE);
528
529 return -EINVAL;
530 }
531
532 read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
533 &addr);
534 read_nic_dword(priv->net_dev, addr, val);
535
536 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
537
538 return 0;
539 }
540
541 if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) {
542
543 ord -= IPW_START_ORD_TAB_2;
544
545 /* get the address of statistic */
546 read_nic_dword(priv->net_dev, ordinals->table2_addr + (ord << 3),
547 &addr);
548
549 /* get the second DW of statistics ;
550 * two 16-bit words - first is length, second is count */
551 read_nic_dword(priv->net_dev,
552 ordinals->table2_addr + (ord << 3) + sizeof(u32),
553 &field_info);
554
555 /* get each entry length */
556 field_len = *((u16 *)&field_info);
557
558 /* get number of entries */
559 field_count = *(((u16 *)&field_info) + 1);
560
561 /* abort if no enought memory */
562 total_length = field_len * field_count;
563 if (total_length > *len) {
564 *len = total_length;
565 return -EINVAL;
566 }
567
568 *len = total_length;
569 if (!total_length)
570 return 0;
571
572 /* read the ordinal data from the SRAM */
573 read_nic_memory(priv->net_dev, addr, total_length, val);
574
575 return 0;
576 }
577
578 printk(KERN_WARNING DRV_NAME ": ordinal %d neither in table 1 nor "
579 "in table 2\n", ord);
580
581 return -EINVAL;
582}
583
584static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 *val,
585 u32 *len)
586{
587 struct ipw2100_ordinals *ordinals = &priv->ordinals;
588 u32 addr;
589
590 if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
591 if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) {
592 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
593 IPW_DEBUG_INFO("wrong size\n");
594 return -EINVAL;
595 }
596
597 read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
598 &addr);
599
600 write_nic_dword(priv->net_dev, addr, *val);
601
602 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
603
604 return 0;
605 }
606
607 IPW_DEBUG_INFO("wrong table\n");
608 if (IS_ORDINAL_TABLE_TWO(ordinals, ord))
609 return -EINVAL;
610
611 return -EINVAL;
612}
613
614static char *snprint_line(char *buf, size_t count,
615 const u8 *data, u32 len, u32 ofs)
616{
617 int out, i, j, l;
618 char c;
619
620 out = snprintf(buf, count, "%08X", ofs);
621
622 for (l = 0, i = 0; i < 2; i++) {
623 out += snprintf(buf + out, count - out, " ");
624 for (j = 0; j < 8 && l < len; j++, l++)
625 out += snprintf(buf + out, count - out, "%02X ",
626 data[(i * 8 + j)]);
627 for (; j < 8; j++)
628 out += snprintf(buf + out, count - out, " ");
629 }
630
631 out += snprintf(buf + out, count - out, " ");
632 for (l = 0, i = 0; i < 2; i++) {
633 out += snprintf(buf + out, count - out, " ");
634 for (j = 0; j < 8 && l < len; j++, l++) {
635 c = data[(i * 8 + j)];
636 if (!isascii(c) || !isprint(c))
637 c = '.';
638
639 out += snprintf(buf + out, count - out, "%c", c);
640 }
641
642 for (; j < 8; j++)
643 out += snprintf(buf + out, count - out, " ");
644 }
645
646 return buf;
647}
648
649static void printk_buf(int level, const u8 *data, u32 len)
650{
651 char line[81];
652 u32 ofs = 0;
653 if (!(ipw2100_debug_level & level))
654 return;
655
656 while (len) {
657 printk(KERN_DEBUG "%s\n",
658 snprint_line(line, sizeof(line), &data[ofs],
659 min(len, 16U), ofs));
660 ofs += 16;
661 len -= min(len, 16U);
662 }
663}
664
665
666
667#define MAX_RESET_BACKOFF 10
668
669static inline void schedule_reset(struct ipw2100_priv *priv)
670{
671 unsigned long now = get_seconds();
672
673 /* If we haven't received a reset request within the backoff period,
674 * then we can reset the backoff interval so this reset occurs
675 * immediately */
676 if (priv->reset_backoff &&
677 (now - priv->last_reset > priv->reset_backoff))
678 priv->reset_backoff = 0;
679
680 priv->last_reset = get_seconds();
681
682 if (!(priv->status & STATUS_RESET_PENDING)) {
683 IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
684 priv->net_dev->name, priv->reset_backoff);
685 netif_carrier_off(priv->net_dev);
686 netif_stop_queue(priv->net_dev);
687 priv->status |= STATUS_RESET_PENDING;
688 if (priv->reset_backoff)
689 queue_delayed_work(priv->workqueue, &priv->reset_work,
690 priv->reset_backoff * HZ);
691 else
692 queue_work(priv->workqueue, &priv->reset_work);
693
694 if (priv->reset_backoff < MAX_RESET_BACKOFF)
695 priv->reset_backoff++;
696
697 wake_up_interruptible(&priv->wait_command_queue);
698 } else
699 IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n",
700 priv->net_dev->name);
701
702}
703
704#define HOST_COMPLETE_TIMEOUT (2 * HZ)
705static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
706 struct host_command * cmd)
707{
708 struct list_head *element;
709 struct ipw2100_tx_packet *packet;
710 unsigned long flags;
711 int err = 0;
712
713 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
714 command_types[cmd->host_command], cmd->host_command,
715 cmd->host_command_length);
716 printk_buf(IPW_DL_HC, (u8*)cmd->host_command_parameters,
717 cmd->host_command_length);
718
719 spin_lock_irqsave(&priv->low_lock, flags);
720
721 if (priv->fatal_error) {
722 IPW_DEBUG_INFO("Attempt to send command while hardware in fatal error condition.\n");
723 err = -EIO;
724 goto fail_unlock;
725 }
726
727 if (!(priv->status & STATUS_RUNNING)) {
728 IPW_DEBUG_INFO("Attempt to send command while hardware is not running.\n");
729 err = -EIO;
730 goto fail_unlock;
731 }
732
733 if (priv->status & STATUS_CMD_ACTIVE) {
734 IPW_DEBUG_INFO("Attempt to send command while another command is pending.\n");
735 err = -EBUSY;
736 goto fail_unlock;
737 }
738
739 if (list_empty(&priv->msg_free_list)) {
740 IPW_DEBUG_INFO("no available msg buffers\n");
741 goto fail_unlock;
742 }
743
744 priv->status |= STATUS_CMD_ACTIVE;
745 priv->messages_sent++;
746
747 element = priv->msg_free_list.next;
748
749 packet = list_entry(element, struct ipw2100_tx_packet, list);
750 packet->jiffy_start = jiffies;
751
752 /* initialize the firmware command packet */
753 packet->info.c_struct.cmd->host_command_reg = cmd->host_command;
754 packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1;
755 packet->info.c_struct.cmd->host_command_len_reg = cmd->host_command_length;
756 packet->info.c_struct.cmd->sequence = cmd->host_command_sequence;
757
758 memcpy(packet->info.c_struct.cmd->host_command_params_reg,
759 cmd->host_command_parameters,
760 sizeof(packet->info.c_struct.cmd->host_command_params_reg));
761
762 list_del(element);
763 DEC_STAT(&priv->msg_free_stat);
764
765 list_add_tail(element, &priv->msg_pend_list);
766 INC_STAT(&priv->msg_pend_stat);
767
768 ipw2100_tx_send_commands(priv);
769 ipw2100_tx_send_data(priv);
770
771 spin_unlock_irqrestore(&priv->low_lock, flags);
772
773 /*
774 * We must wait for this command to complete before another
775 * command can be sent... but if we wait more than 3 seconds
776 * then there is a problem.
777 */
778
779 err = wait_event_interruptible_timeout(
780 priv->wait_command_queue, !(priv->status & STATUS_CMD_ACTIVE),
781 HOST_COMPLETE_TIMEOUT);
782
783 if (err == 0) {
784 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
785 HOST_COMPLETE_TIMEOUT / (HZ / 100));
786 priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT;
787 priv->status &= ~STATUS_CMD_ACTIVE;
788 schedule_reset(priv);
789 return -EIO;
790 }
791
792 if (priv->fatal_error) {
793 printk(KERN_WARNING DRV_NAME ": %s: firmware fatal error\n",
794 priv->net_dev->name);
795 return -EIO;
796 }
797
798 /* !!!!! HACK TEST !!!!!
799 * When lots of debug trace statements are enabled, the driver
800 * doesn't seem to have as many firmware restart cycles...
801 *
802 * As a test, we're sticking in a 1/100s delay here */
803 set_current_state(TASK_UNINTERRUPTIBLE);
804 schedule_timeout(HZ / 100);
805
806 return 0;
807
808 fail_unlock:
809 spin_unlock_irqrestore(&priv->low_lock, flags);
810
811 return err;
812}
813
814
815/*
816 * Verify the values and data access of the hardware
817 * No locks needed or used. No functions called.
818 */
819static int ipw2100_verify(struct ipw2100_priv *priv)
820{
821 u32 data1, data2;
822 u32 address;
823
824 u32 val1 = 0x76543210;
825 u32 val2 = 0xFEDCBA98;
826
827 /* Domain 0 check - all values should be DOA_DEBUG */
828 for (address = IPW_REG_DOA_DEBUG_AREA_START;
829 address < IPW_REG_DOA_DEBUG_AREA_END;
830 address += sizeof(u32)) {
831 read_register(priv->net_dev, address, &data1);
832 if (data1 != IPW_DATA_DOA_DEBUG_VALUE)
833 return -EIO;
834 }
835
836 /* Domain 1 check - use arbitrary read/write compare */
837 for (address = 0; address < 5; address++) {
838 /* The memory area is not used now */
839 write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
840 val1);
841 write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
842 val2);
843 read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
844 &data1);
845 read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
846 &data2);
847 if (val1 == data1 && val2 == data2)
848 return 0;
849 }
850
851 return -EIO;
852}
853
854/*
855 *
856 * Loop until the CARD_DISABLED bit is the same value as the
857 * supplied parameter
858 *
859 * TODO: See if it would be more efficient to do a wait/wake
860 * cycle and have the completion event trigger the wakeup
861 *
862 */
863#define IPW_CARD_DISABLE_COMPLETE_WAIT 100 // 100 milli
864static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state)
865{
866 int i;
867 u32 card_state;
868 u32 len = sizeof(card_state);
869 int err;
870
871 for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) {
872 err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED,
873 &card_state, &len);
874 if (err) {
875 IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal "
876 "failed.\n");
877 return 0;
878 }
879
880 /* We'll break out if either the HW state says it is
881 * in the state we want, or if HOST_COMPLETE command
882 * finishes */
883 if ((card_state == state) ||
884 ((priv->status & STATUS_ENABLED) ?
885 IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) {
886 if (state == IPW_HW_STATE_ENABLED)
887 priv->status |= STATUS_ENABLED;
888 else
889 priv->status &= ~STATUS_ENABLED;
890
891 return 0;
892 }
893
894 udelay(50);
895 }
896
897 IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n",
898 state ? "DISABLED" : "ENABLED");
899 return -EIO;
900}
901
902
903/*********************************************************************
904 Procedure : sw_reset_and_clock
905 Purpose : Asserts s/w reset, asserts clock initialization
906 and waits for clock stabilization
907 ********************************************************************/
908static int sw_reset_and_clock(struct ipw2100_priv *priv)
909{
910 int i;
911 u32 r;
912
913 // assert s/w reset
914 write_register(priv->net_dev, IPW_REG_RESET_REG,
915 IPW_AUX_HOST_RESET_REG_SW_RESET);
916
917 // wait for clock stabilization
918 for (i = 0; i < 1000; i++) {
919 udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY);
920
921 // check clock ready bit
922 read_register(priv->net_dev, IPW_REG_RESET_REG, &r);
923 if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET)
924 break;
925 }
926
927 if (i == 1000)
928 return -EIO; // TODO: better error value
929
930 /* set "initialization complete" bit to move adapter to
931 * D0 state */
932 write_register(priv->net_dev, IPW_REG_GP_CNTRL,
933 IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE);
934
935 /* wait for clock stabilization */
936 for (i = 0; i < 10000; i++) {
937 udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4);
938
939 /* check clock ready bit */
940 read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
941 if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY)
942 break;
943 }
944
945 if (i == 10000)
946 return -EIO; /* TODO: better error value */
947
948 /* set D0 standby bit */
949 read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
950 write_register(priv->net_dev, IPW_REG_GP_CNTRL,
951 r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
952
953 return 0;
954}
955
956/*********************************************************************
957 Procedure : ipw2100_download_firmware
958 Purpose : Initiaze adapter after power on.
959 The sequence is:
960 1. assert s/w reset first!
961 2. awake clocks & wait for clock stabilization
962 3. hold ARC (don't ask me why...)
963 4. load Dino ucode and reset/clock init again
964 5. zero-out shared mem
965 6. download f/w
966 *******************************************************************/
967static int ipw2100_download_firmware(struct ipw2100_priv *priv)
968{
969 u32 address;
970 int err;
971
972#ifndef CONFIG_PM
973 /* Fetch the firmware and microcode */
974 struct ipw2100_fw ipw2100_firmware;
975#endif
976
977 if (priv->fatal_error) {
978 IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after "
979 "fatal error %d. Interface must be brought down.\n",
980 priv->net_dev->name, priv->fatal_error);
981 return -EINVAL;
982 }
983
984#ifdef CONFIG_PM
985 if (!ipw2100_firmware.version) {
986 err = ipw2100_get_firmware(priv, &ipw2100_firmware);
987 if (err) {
988 IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
989 priv->net_dev->name, err);
990 priv->fatal_error = IPW2100_ERR_FW_LOAD;
991 goto fail;
992 }
993 }
994#else
995 err = ipw2100_get_firmware(priv, &ipw2100_firmware);
996 if (err) {
997 IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
998 priv->net_dev->name, err);
999 priv->fatal_error = IPW2100_ERR_FW_LOAD;
1000 goto fail;
1001 }
1002#endif
1003 priv->firmware_version = ipw2100_firmware.version;
1004
1005 /* s/w reset and clock stabilization */
1006 err = sw_reset_and_clock(priv);
1007 if (err) {
1008 IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
1009 priv->net_dev->name, err);
1010 goto fail;
1011 }
1012
1013 err = ipw2100_verify(priv);
1014 if (err) {
1015 IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n",
1016 priv->net_dev->name, err);
1017 goto fail;
1018 }
1019
1020 /* Hold ARC */
1021 write_nic_dword(priv->net_dev,
1022 IPW_INTERNAL_REGISTER_HALT_AND_RESET,
1023 0x80000000);
1024
1025 /* allow ARC to run */
1026 write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
1027
1028 /* load microcode */
1029 err = ipw2100_ucode_download(priv, &ipw2100_firmware);
1030 if (err) {
1031 printk(KERN_ERR DRV_NAME ": %s: Error loading microcode: %d\n",
1032 priv->net_dev->name, err);
1033 goto fail;
1034 }
1035
1036 /* release ARC */
1037 write_nic_dword(priv->net_dev,
1038 IPW_INTERNAL_REGISTER_HALT_AND_RESET,
1039 0x00000000);
1040
1041 /* s/w reset and clock stabilization (again!!!) */
1042 err = sw_reset_and_clock(priv);
1043 if (err) {
1044 printk(KERN_ERR DRV_NAME ": %s: sw_reset_and_clock failed: %d\n",
1045 priv->net_dev->name, err);
1046 goto fail;
1047 }
1048
1049 /* load f/w */
1050 err = ipw2100_fw_download(priv, &ipw2100_firmware);
1051 if (err) {
1052 IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n",
1053 priv->net_dev->name, err);
1054 goto fail;
1055 }
1056
1057#ifndef CONFIG_PM
1058 /*
1059 * When the .resume method of the driver is called, the other
1060 * part of the system, i.e. the ide driver could still stay in
1061 * the suspend stage. This prevents us from loading the firmware
1062 * from the disk. --YZ
1063 */
1064
1065 /* free any storage allocated for firmware image */
1066 ipw2100_release_firmware(priv, &ipw2100_firmware);
1067#endif
1068
1069 /* zero out Domain 1 area indirectly (Si requirement) */
1070 for (address = IPW_HOST_FW_SHARED_AREA0;
1071 address < IPW_HOST_FW_SHARED_AREA0_END; address += 4)
1072 write_nic_dword(priv->net_dev, address, 0);
1073 for (address = IPW_HOST_FW_SHARED_AREA1;
1074 address < IPW_HOST_FW_SHARED_AREA1_END; address += 4)
1075 write_nic_dword(priv->net_dev, address, 0);
1076 for (address = IPW_HOST_FW_SHARED_AREA2;
1077 address < IPW_HOST_FW_SHARED_AREA2_END; address += 4)
1078 write_nic_dword(priv->net_dev, address, 0);
1079 for (address = IPW_HOST_FW_SHARED_AREA3;
1080 address < IPW_HOST_FW_SHARED_AREA3_END; address += 4)
1081 write_nic_dword(priv->net_dev, address, 0);
1082 for (address = IPW_HOST_FW_INTERRUPT_AREA;
1083 address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4)
1084 write_nic_dword(priv->net_dev, address, 0);
1085
1086 return 0;
1087
1088 fail:
1089 ipw2100_release_firmware(priv, &ipw2100_firmware);
1090 return err;
1091}
1092
1093static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv)
1094{
1095 if (priv->status & STATUS_INT_ENABLED)
1096 return;
1097 priv->status |= STATUS_INT_ENABLED;
1098 write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK);
1099}
1100
1101static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv)
1102{
1103 if (!(priv->status & STATUS_INT_ENABLED))
1104 return;
1105 priv->status &= ~STATUS_INT_ENABLED;
1106 write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0);
1107}
1108
1109
1110static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv)
1111{
1112 struct ipw2100_ordinals *ord = &priv->ordinals;
1113
1114 IPW_DEBUG_INFO("enter\n");
1115
1116 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1,
1117 &ord->table1_addr);
1118
1119 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2,
1120 &ord->table2_addr);
1121
1122 read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size);
1123 read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size);
1124
1125 ord->table2_size &= 0x0000FFFF;
1126
1127 IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size);
1128 IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size);
1129 IPW_DEBUG_INFO("exit\n");
1130}
1131
1132static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv)
1133{
1134 u32 reg = 0;
1135 /*
1136 * Set GPIO 3 writable by FW; GPIO 1 writable
1137 * by driver and enable clock
1138 */
1139 reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE |
1140 IPW_BIT_GPIO_LED_OFF);
1141 write_register(priv->net_dev, IPW_REG_GPIO, reg);
1142}
1143
1144static inline int rf_kill_active(struct ipw2100_priv *priv)
1145{
1146#define MAX_RF_KILL_CHECKS 5
1147#define RF_KILL_CHECK_DELAY 40
1148
1149 unsigned short value = 0;
1150 u32 reg = 0;
1151 int i;
1152
1153 if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
1154 priv->status &= ~STATUS_RF_KILL_HW;
1155 return 0;
1156 }
1157
1158 for (i = 0; i < MAX_RF_KILL_CHECKS; i++) {
1159 udelay(RF_KILL_CHECK_DELAY);
1160 read_register(priv->net_dev, IPW_REG_GPIO, &reg);
1161 value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
1162 }
1163
1164 if (value == 0)
1165 priv->status |= STATUS_RF_KILL_HW;
1166 else
1167 priv->status &= ~STATUS_RF_KILL_HW;
1168
1169 return (value == 0);
1170}
1171
1172static int ipw2100_get_hw_features(struct ipw2100_priv *priv)
1173{
1174 u32 addr, len;
1175 u32 val;
1176
1177 /*
1178 * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1
1179 */
1180 len = sizeof(addr);
1181 if (ipw2100_get_ordinal(
1182 priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS,
1183 &addr, &len)) {
1184 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1185 __LINE__);
1186 return -EIO;
1187 }
1188
1189 IPW_DEBUG_INFO("EEPROM address: %08X\n", addr);
1190
1191 /*
1192 * EEPROM version is the byte at offset 0xfd in firmware
1193 * We read 4 bytes, then shift out the byte we actually want */
1194 read_nic_dword(priv->net_dev, addr + 0xFC, &val);
1195 priv->eeprom_version = (val >> 24) & 0xFF;
1196 IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version);
1197
1198 /*
1199 * HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware
1200 *
1201 * notice that the EEPROM bit is reverse polarity, i.e.
1202 * bit = 0 signifies HW RF kill switch is supported
1203 * bit = 1 signifies HW RF kill switch is NOT supported
1204 */
1205 read_nic_dword(priv->net_dev, addr + 0x20, &val);
1206 if (!((val >> 24) & 0x01))
1207 priv->hw_features |= HW_FEATURE_RFKILL;
1208
1209 IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n",
1210 (priv->hw_features & HW_FEATURE_RFKILL) ?
1211 "" : "not ");
1212
1213 return 0;
1214}
1215
1216/*
1217 * Start firmware execution after power on and intialization
1218 * The sequence is:
1219 * 1. Release ARC
1220 * 2. Wait for f/w initialization completes;
1221 */
1222static int ipw2100_start_adapter(struct ipw2100_priv *priv)
1223{
1224 int i;
1225 u32 inta, inta_mask, gpio;
1226
1227 IPW_DEBUG_INFO("enter\n");
1228
1229 if (priv->status & STATUS_RUNNING)
1230 return 0;
1231
1232 /*
1233 * Initialize the hw - drive adapter to DO state by setting
1234 * init_done bit. Wait for clk_ready bit and Download
1235 * fw & dino ucode
1236 */
1237 if (ipw2100_download_firmware(priv)) {
1238 printk(KERN_ERR DRV_NAME ": %s: Failed to power on the adapter.\n",
1239 priv->net_dev->name);
1240 return -EIO;
1241 }
1242
1243 /* Clear the Tx, Rx and Msg queues and the r/w indexes
1244 * in the firmware RBD and TBD ring queue */
1245 ipw2100_queues_initialize(priv);
1246
1247 ipw2100_hw_set_gpio(priv);
1248
1249 /* TODO -- Look at disabling interrupts here to make sure none
1250 * get fired during FW initialization */
1251
1252 /* Release ARC - clear reset bit */
1253 write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
1254
1255 /* wait for f/w intialization complete */
1256 IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
1257 i = 5000;
1258 do {
1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 schedule_timeout(40 * HZ / 1000);
1261 /* Todo... wait for sync command ... */
1262
1263 read_register(priv->net_dev, IPW_REG_INTA, &inta);
1264
1265 /* check "init done" bit */
1266 if (inta & IPW2100_INTA_FW_INIT_DONE) {
1267 /* reset "init done" bit */
1268 write_register(priv->net_dev, IPW_REG_INTA,
1269 IPW2100_INTA_FW_INIT_DONE);
1270 break;
1271 }
1272
1273 /* check error conditions : we check these after the firmware
1274 * check so that if there is an error, the interrupt handler
1275 * will see it and the adapter will be reset */
1276 if (inta &
1277 (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) {
1278 /* clear error conditions */
1279 write_register(priv->net_dev, IPW_REG_INTA,
1280 IPW2100_INTA_FATAL_ERROR |
1281 IPW2100_INTA_PARITY_ERROR);
1282 }
1283 } while (i--);
1284
1285 /* Clear out any pending INTAs since we aren't supposed to have
1286 * interrupts enabled at this point... */
1287 read_register(priv->net_dev, IPW_REG_INTA, &inta);
1288 read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
1289 inta &= IPW_INTERRUPT_MASK;
1290 /* Clear out any pending interrupts */
1291 if (inta & inta_mask)
1292 write_register(priv->net_dev, IPW_REG_INTA, inta);
1293
1294 IPW_DEBUG_FW("f/w initialization complete: %s\n",
1295 i ? "SUCCESS" : "FAILED");
1296
1297 if (!i) {
1298 printk(KERN_WARNING DRV_NAME ": %s: Firmware did not initialize.\n",
1299 priv->net_dev->name);
1300 return -EIO;
1301 }
1302
1303 /* allow firmware to write to GPIO1 & GPIO3 */
1304 read_register(priv->net_dev, IPW_REG_GPIO, &gpio);
1305
1306 gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK);
1307
1308 write_register(priv->net_dev, IPW_REG_GPIO, gpio);
1309
1310 /* Ready to receive commands */
1311 priv->status |= STATUS_RUNNING;
1312
1313 /* The adapter has been reset; we are not associated */
1314 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
1315
1316 IPW_DEBUG_INFO("exit\n");
1317
1318 return 0;
1319}
1320
1321static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv)
1322{
1323 if (!priv->fatal_error)
1324 return;
1325
1326 priv->fatal_errors[priv->fatal_index++] = priv->fatal_error;
1327 priv->fatal_index %= IPW2100_ERROR_QUEUE;
1328 priv->fatal_error = 0;
1329}
1330
1331
1332/* NOTE: Our interrupt is disabled when this method is called */
1333static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
1334{
1335 u32 reg;
1336 int i;
1337
1338 IPW_DEBUG_INFO("Power cycling the hardware.\n");
1339
1340 ipw2100_hw_set_gpio(priv);
1341
1342 /* Step 1. Stop Master Assert */
1343 write_register(priv->net_dev, IPW_REG_RESET_REG,
1344 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
1345
1346 /* Step 2. Wait for stop Master Assert
1347 * (not more then 50us, otherwise ret error */
1348 i = 5;
1349 do {
1350 udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
1351 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
1352
1353 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
1354 break;
1355 } while(i--);
1356
1357 priv->status &= ~STATUS_RESET_PENDING;
1358
1359 if (!i) {
1360 IPW_DEBUG_INFO("exit - waited too long for master assert stop\n");
1361 return -EIO;
1362 }
1363
1364 write_register(priv->net_dev, IPW_REG_RESET_REG,
1365 IPW_AUX_HOST_RESET_REG_SW_RESET);
1366
1367
1368 /* Reset any fatal_error conditions */
1369 ipw2100_reset_fatalerror(priv);
1370
1371 /* At this point, the adapter is now stopped and disabled */
1372 priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING |
1373 STATUS_ASSOCIATED | STATUS_ENABLED);
1374
1375 return 0;
1376}
1377
1378/*
1379 * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
1380 *
1381 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
1382 *
1383 * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of
1384 * if STATUS_ASSN_LOST is sent.
1385 */
1386static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
1387{
1388
1389#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
1390
1391 struct host_command cmd = {
1392 .host_command = CARD_DISABLE_PHY_OFF,
1393 .host_command_sequence = 0,
1394 .host_command_length = 0,
1395 };
1396 int err, i;
1397 u32 val1, val2;
1398
1399 IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n");
1400
1401 /* Turn off the radio */
1402 err = ipw2100_hw_send_command(priv, &cmd);
1403 if (err)
1404 return err;
1405
1406 for (i = 0; i < 2500; i++) {
1407 read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1);
1408 read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2);
1409
1410 if ((val1 & IPW2100_CONTROL_PHY_OFF) &&
1411 (val2 & IPW2100_COMMAND_PHY_OFF))
1412 return 0;
1413
1414 set_current_state(TASK_UNINTERRUPTIBLE);
1415 schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
1416 }
1417
1418 return -EIO;
1419}
1420
1421
1422static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1423{
1424 struct host_command cmd = {
1425 .host_command = HOST_COMPLETE,
1426 .host_command_sequence = 0,
1427 .host_command_length = 0
1428 };
1429 int err = 0;
1430
1431 IPW_DEBUG_HC("HOST_COMPLETE\n");
1432
1433 if (priv->status & STATUS_ENABLED)
1434 return 0;
1435
1436 down(&priv->adapter_sem);
1437
1438 if (rf_kill_active(priv)) {
1439 IPW_DEBUG_HC("Command aborted due to RF kill active.\n");
1440 goto fail_up;
1441 }
1442
1443 err = ipw2100_hw_send_command(priv, &cmd);
1444 if (err) {
1445 IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n");
1446 goto fail_up;
1447 }
1448
1449 err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED);
1450 if (err) {
1451 IPW_DEBUG_INFO(
1452 "%s: card not responding to init command.\n",
1453 priv->net_dev->name);
1454 goto fail_up;
1455 }
1456
1457 if (priv->stop_hang_check) {
1458 priv->stop_hang_check = 0;
1459 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
1460 }
1461
1462fail_up:
1463 up(&priv->adapter_sem);
1464 return err;
1465}
1466
1467static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
1468{
1469#define HW_POWER_DOWN_DELAY (HZ / 10)
1470
1471 struct host_command cmd = {
1472 .host_command = HOST_PRE_POWER_DOWN,
1473 .host_command_sequence = 0,
1474 .host_command_length = 0,
1475 };
1476 int err, i;
1477 u32 reg;
1478
1479 if (!(priv->status & STATUS_RUNNING))
1480 return 0;
1481
1482 priv->status |= STATUS_STOPPING;
1483
1484 /* We can only shut down the card if the firmware is operational. So,
1485 * if we haven't reset since a fatal_error, then we can not send the
1486 * shutdown commands. */
1487 if (!priv->fatal_error) {
1488 /* First, make sure the adapter is enabled so that the PHY_OFF
1489 * command can shut it down */
1490 ipw2100_enable_adapter(priv);
1491
1492 err = ipw2100_hw_phy_off(priv);
1493 if (err)
1494 printk(KERN_WARNING DRV_NAME ": Error disabling radio %d\n", err);
1495
1496 /*
1497 * If in D0-standby mode going directly to D3 may cause a
1498 * PCI bus violation. Therefore we must change out of the D0
1499 * state.
1500 *
1501 * Sending the PREPARE_FOR_POWER_DOWN will restrict the
1502 * hardware from going into standby mode and will transition
1503 * out of D0-standy if it is already in that state.
1504 *
1505 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
1506 * driver upon completion. Once received, the driver can
1507 * proceed to the D3 state.
1508 *
1509 * Prepare for power down command to fw. This command would
1510 * take HW out of D0-standby and prepare it for D3 state.
1511 *
1512 * Currently FW does not support event notification for this
1513 * event. Therefore, skip waiting for it. Just wait a fixed
1514 * 100ms
1515 */
1516 IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n");
1517
1518 err = ipw2100_hw_send_command(priv, &cmd);
1519 if (err)
1520 printk(KERN_WARNING DRV_NAME ": "
1521 "%s: Power down command failed: Error %d\n",
1522 priv->net_dev->name, err);
1523 else {
1524 set_current_state(TASK_UNINTERRUPTIBLE);
1525 schedule_timeout(HW_POWER_DOWN_DELAY);
1526 }
1527 }
1528
1529 priv->status &= ~STATUS_ENABLED;
1530
1531 /*
1532 * Set GPIO 3 writable by FW; GPIO 1 writable
1533 * by driver and enable clock
1534 */
1535 ipw2100_hw_set_gpio(priv);
1536
1537 /*
1538 * Power down adapter. Sequence:
1539 * 1. Stop master assert (RESET_REG[9]=1)
1540 * 2. Wait for stop master (RESET_REG[8]==1)
1541 * 3. S/w reset assert (RESET_REG[7] = 1)
1542 */
1543
1544 /* Stop master assert */
1545 write_register(priv->net_dev, IPW_REG_RESET_REG,
1546 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
1547
1548 /* wait stop master not more than 50 usec.
1549 * Otherwise return error. */
1550 for (i = 5; i > 0; i--) {
1551 udelay(10);
1552
1553 /* Check master stop bit */
1554 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
1555
1556 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
1557 break;
1558 }
1559
1560 if (i == 0)
1561 printk(KERN_WARNING DRV_NAME
1562 ": %s: Could now power down adapter.\n",
1563 priv->net_dev->name);
1564
1565 /* assert s/w reset */
1566 write_register(priv->net_dev, IPW_REG_RESET_REG,
1567 IPW_AUX_HOST_RESET_REG_SW_RESET);
1568
1569 priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING);
1570
1571 return 0;
1572}
1573
1574
1575static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
1576{
1577 struct host_command cmd = {
1578 .host_command = CARD_DISABLE,
1579 .host_command_sequence = 0,
1580 .host_command_length = 0
1581 };
1582 int err = 0;
1583
1584 IPW_DEBUG_HC("CARD_DISABLE\n");
1585
1586 if (!(priv->status & STATUS_ENABLED))
1587 return 0;
1588
1589 /* Make sure we clear the associated state */
1590 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1591
1592 if (!priv->stop_hang_check) {
1593 priv->stop_hang_check = 1;
1594 cancel_delayed_work(&priv->hang_check);
1595 }
1596
1597 down(&priv->adapter_sem);
1598
1599 err = ipw2100_hw_send_command(priv, &cmd);
1600 if (err) {
1601 printk(KERN_WARNING DRV_NAME ": exit - failed to send CARD_DISABLE command\n");
1602 goto fail_up;
1603 }
1604
1605 err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED);
1606 if (err) {
1607 printk(KERN_WARNING DRV_NAME ": exit - card failed to change to DISABLED\n");
1608 goto fail_up;
1609 }
1610
1611 IPW_DEBUG_INFO("TODO: implement scan state machine\n");
1612
1613fail_up:
1614 up(&priv->adapter_sem);
1615 return err;
1616}
1617
1618static int ipw2100_set_scan_options(struct ipw2100_priv *priv)
1619{
1620 struct host_command cmd = {
1621 .host_command = SET_SCAN_OPTIONS,
1622 .host_command_sequence = 0,
1623 .host_command_length = 8
1624 };
1625 int err;
1626
1627 IPW_DEBUG_INFO("enter\n");
1628
1629 IPW_DEBUG_SCAN("setting scan options\n");
1630
1631 cmd.host_command_parameters[0] = 0;
1632
1633 if (!(priv->config & CFG_ASSOCIATE))
1634 cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE;
1635 if ((priv->sec.flags & SEC_ENABLED) && priv->sec.enabled)
1636 cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL;
1637 if (priv->config & CFG_PASSIVE_SCAN)
1638 cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE;
1639
1640 cmd.host_command_parameters[1] = priv->channel_mask;
1641
1642 err = ipw2100_hw_send_command(priv, &cmd);
1643
1644 IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n",
1645 cmd.host_command_parameters[0]);
1646
1647 return err;
1648}
1649
1650static int ipw2100_start_scan(struct ipw2100_priv *priv)
1651{
1652 struct host_command cmd = {
1653 .host_command = BROADCAST_SCAN,
1654 .host_command_sequence = 0,
1655 .host_command_length = 4
1656 };
1657 int err;
1658
1659 IPW_DEBUG_HC("START_SCAN\n");
1660
1661 cmd.host_command_parameters[0] = 0;
1662
1663 /* No scanning if in monitor mode */
1664 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
1665 return 1;
1666
1667 if (priv->status & STATUS_SCANNING) {
1668 IPW_DEBUG_SCAN("Scan requested while already in scan...\n");
1669 return 0;
1670 }
1671
1672 IPW_DEBUG_INFO("enter\n");
1673
1674 /* Not clearing here; doing so makes iwlist always return nothing...
1675 *
1676 * We should modify the table logic to use aging tables vs. clearing
1677 * the table on each scan start.
1678 */
1679 IPW_DEBUG_SCAN("starting scan\n");
1680
1681 priv->status |= STATUS_SCANNING;
1682 err = ipw2100_hw_send_command(priv, &cmd);
1683 if (err)
1684 priv->status &= ~STATUS_SCANNING;
1685
1686 IPW_DEBUG_INFO("exit\n");
1687
1688 return err;
1689}
1690
1691static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1692{
1693 unsigned long flags;
1694 int rc = 0;
1695 u32 lock;
1696 u32 ord_len = sizeof(lock);
1697
1698 /* Quite if manually disabled. */
1699 if (priv->status & STATUS_RF_KILL_SW) {
1700 IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable "
1701 "switch\n", priv->net_dev->name);
1702 return 0;
1703 }
1704
1705 /* If the interrupt is enabled, turn it off... */
1706 spin_lock_irqsave(&priv->low_lock, flags);
1707 ipw2100_disable_interrupts(priv);
1708
1709 /* Reset any fatal_error conditions */
1710 ipw2100_reset_fatalerror(priv);
1711 spin_unlock_irqrestore(&priv->low_lock, flags);
1712
1713 if (priv->status & STATUS_POWERED ||
1714 (priv->status & STATUS_RESET_PENDING)) {
1715 /* Power cycle the card ... */
1716 if (ipw2100_power_cycle_adapter(priv)) {
1717 printk(KERN_WARNING DRV_NAME ": %s: Could not cycle adapter.\n",
1718 priv->net_dev->name);
1719 rc = 1;
1720 goto exit;
1721 }
1722 } else
1723 priv->status |= STATUS_POWERED;
1724
1725 /* Load the firmware, start the clocks, etc. */
1726 if (ipw2100_start_adapter(priv)) {
1727 printk(KERN_ERR DRV_NAME ": %s: Failed to start the firmware.\n",
1728 priv->net_dev->name);
1729 rc = 1;
1730 goto exit;
1731 }
1732
1733 ipw2100_initialize_ordinals(priv);
1734
1735 /* Determine capabilities of this particular HW configuration */
1736 if (ipw2100_get_hw_features(priv)) {
1737 printk(KERN_ERR DRV_NAME ": %s: Failed to determine HW features.\n",
1738 priv->net_dev->name);
1739 rc = 1;
1740 goto exit;
1741 }
1742
1743 lock = LOCK_NONE;
1744 if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) {
1745 printk(KERN_ERR DRV_NAME ": %s: Failed to clear ordinal lock.\n",
1746 priv->net_dev->name);
1747 rc = 1;
1748 goto exit;
1749 }
1750
1751 priv->status &= ~STATUS_SCANNING;
1752
1753 if (rf_kill_active(priv)) {
1754 printk(KERN_INFO "%s: Radio is disabled by RF switch.\n",
1755 priv->net_dev->name);
1756
1757 if (priv->stop_rf_kill) {
1758 priv->stop_rf_kill = 0;
1759 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
1760 }
1761
1762 deferred = 1;
1763 }
1764
1765 /* Turn on the interrupt so that commands can be processed */
1766 ipw2100_enable_interrupts(priv);
1767
1768 /* Send all of the commands that must be sent prior to
1769 * HOST_COMPLETE */
1770 if (ipw2100_adapter_setup(priv)) {
1771 printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n",
1772 priv->net_dev->name);
1773 rc = 1;
1774 goto exit;
1775 }
1776
1777 if (!deferred) {
1778 /* Enable the adapter - sends HOST_COMPLETE */
1779 if (ipw2100_enable_adapter(priv)) {
1780 printk(KERN_ERR DRV_NAME ": "
1781 "%s: failed in call to enable adapter.\n",
1782 priv->net_dev->name);
1783 ipw2100_hw_stop_adapter(priv);
1784 rc = 1;
1785 goto exit;
1786 }
1787
1788
1789 /* Start a scan . . . */
1790 ipw2100_set_scan_options(priv);
1791 ipw2100_start_scan(priv);
1792 }
1793
1794 exit:
1795 return rc;
1796}
1797
1798/* Called by register_netdev() */
1799static int ipw2100_net_init(struct net_device *dev)
1800{
1801 struct ipw2100_priv *priv = ieee80211_priv(dev);
1802 return ipw2100_up(priv, 1);
1803}
1804
1805static void ipw2100_down(struct ipw2100_priv *priv)
1806{
1807 unsigned long flags;
1808 union iwreq_data wrqu = {
1809 .ap_addr = {
1810 .sa_family = ARPHRD_ETHER
1811 }
1812 };
1813 int associated = priv->status & STATUS_ASSOCIATED;
1814
1815 /* Kill the RF switch timer */
1816 if (!priv->stop_rf_kill) {
1817 priv->stop_rf_kill = 1;
1818 cancel_delayed_work(&priv->rf_kill);
1819 }
1820
1821 /* Kill the firmare hang check timer */
1822 if (!priv->stop_hang_check) {
1823 priv->stop_hang_check = 1;
1824 cancel_delayed_work(&priv->hang_check);
1825 }
1826
1827 /* Kill any pending resets */
1828 if (priv->status & STATUS_RESET_PENDING)
1829 cancel_delayed_work(&priv->reset_work);
1830
1831 /* Make sure the interrupt is on so that FW commands will be
1832 * processed correctly */
1833 spin_lock_irqsave(&priv->low_lock, flags);
1834 ipw2100_enable_interrupts(priv);
1835 spin_unlock_irqrestore(&priv->low_lock, flags);
1836
1837 if (ipw2100_hw_stop_adapter(priv))
1838 printk(KERN_ERR DRV_NAME ": %s: Error stopping adapter.\n",
1839 priv->net_dev->name);
1840
1841 /* Do not disable the interrupt until _after_ we disable
1842 * the adaptor. Otherwise the CARD_DISABLE command will never
1843 * be ack'd by the firmware */
1844 spin_lock_irqsave(&priv->low_lock, flags);
1845 ipw2100_disable_interrupts(priv);
1846 spin_unlock_irqrestore(&priv->low_lock, flags);
1847
1848#ifdef ACPI_CSTATE_LIMIT_DEFINED
1849 if (priv->config & CFG_C3_DISABLED) {
1850 IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
1851 acpi_set_cstate_limit(priv->cstate_limit);
1852 priv->config &= ~CFG_C3_DISABLED;
1853 }
1854#endif
1855
1856 /* We have to signal any supplicant if we are disassociating */
1857 if (associated)
1858 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1859
1860 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1861 netif_carrier_off(priv->net_dev);
1862 netif_stop_queue(priv->net_dev);
1863}
1864
1865static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
1866{
1867 unsigned long flags;
1868 union iwreq_data wrqu = {
1869 .ap_addr = {
1870 .sa_family = ARPHRD_ETHER
1871 }
1872 };
1873 int associated = priv->status & STATUS_ASSOCIATED;
1874
1875 spin_lock_irqsave(&priv->low_lock, flags);
1876 IPW_DEBUG_INFO(DRV_NAME ": %s: Restarting adapter.\n",
1877 priv->net_dev->name);
1878 priv->resets++;
1879 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1880 priv->status |= STATUS_SECURITY_UPDATED;
1881
1882 /* Force a power cycle even if interface hasn't been opened
1883 * yet */
1884 cancel_delayed_work(&priv->reset_work);
1885 priv->status |= STATUS_RESET_PENDING;
1886 spin_unlock_irqrestore(&priv->low_lock, flags);
1887
1888 down(&priv->action_sem);
1889 /* stop timed checks so that they don't interfere with reset */
1890 priv->stop_hang_check = 1;
1891 cancel_delayed_work(&priv->hang_check);
1892
1893 /* We have to signal any supplicant if we are disassociating */
1894 if (associated)
1895 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1896
1897 ipw2100_up(priv, 0);
1898 up(&priv->action_sem);
1899
1900}
1901
1902
1903static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
1904{
1905
1906#define MAC_ASSOCIATION_READ_DELAY (HZ)
1907 int ret, len, essid_len;
1908 char essid[IW_ESSID_MAX_SIZE];
1909 u32 txrate;
1910 u32 chan;
1911 char *txratename;
1912 u8 bssid[ETH_ALEN];
1913
1914 /*
1915 * TBD: BSSID is usually 00:00:00:00:00:00 here and not
1916 * an actual MAC of the AP. Seems like FW sets this
1917 * address too late. Read it later and expose through
1918 * /proc or schedule a later task to query and update
1919 */
1920
1921 essid_len = IW_ESSID_MAX_SIZE;
1922 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID,
1923 essid, &essid_len);
1924 if (ret) {
1925 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1926 __LINE__);
1927 return;
1928 }
1929
1930 len = sizeof(u32);
1931 ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE,
1932 &txrate, &len);
1933 if (ret) {
1934 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1935 __LINE__);
1936 return;
1937 }
1938
1939 len = sizeof(u32);
1940 ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len);
1941 if (ret) {
1942 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1943 __LINE__);
1944 return;
1945 }
1946 len = ETH_ALEN;
1947 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
1948 if (ret) {
1949 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1950 __LINE__);
1951 return;
1952 }
1953 memcpy(priv->ieee->bssid, bssid, ETH_ALEN);
1954
1955
1956 switch (txrate) {
1957 case TX_RATE_1_MBIT:
1958 txratename = "1Mbps";
1959 break;
1960 case TX_RATE_2_MBIT:
1961 txratename = "2Mbsp";
1962 break;
1963 case TX_RATE_5_5_MBIT:
1964 txratename = "5.5Mbps";
1965 break;
1966 case TX_RATE_11_MBIT:
1967 txratename = "11Mbps";
1968 break;
1969 default:
1970 IPW_DEBUG_INFO("Unknown rate: %d\n", txrate);
1971 txratename = "unknown rate";
1972 break;
1973 }
1974
1975 IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID="
1976 MAC_FMT ")\n",
1977 priv->net_dev->name, escape_essid(essid, essid_len),
1978 txratename, chan, MAC_ARG(bssid));
1979
1980 /* now we copy read ssid into dev */
1981 if (!(priv->config & CFG_STATIC_ESSID)) {
1982 priv->essid_len = min((u8)essid_len, (u8)IW_ESSID_MAX_SIZE);
1983 memcpy(priv->essid, essid, priv->essid_len);
1984 }
1985 priv->channel = chan;
1986 memcpy(priv->bssid, bssid, ETH_ALEN);
1987
1988 priv->status |= STATUS_ASSOCIATING;
1989 priv->connect_start = get_seconds();
1990
1991 queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
1992}
1993
1994
1995static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
1996 int length, int batch_mode)
1997{
1998 int ssid_len = min(length, IW_ESSID_MAX_SIZE);
1999 struct host_command cmd = {
2000 .host_command = SSID,
2001 .host_command_sequence = 0,
2002 .host_command_length = ssid_len
2003 };
2004 int err;
2005
2006 IPW_DEBUG_HC("SSID: '%s'\n", escape_essid(essid, ssid_len));
2007
2008 if (ssid_len)
2009 memcpy((char*)cmd.host_command_parameters,
2010 essid, ssid_len);
2011
2012 if (!batch_mode) {
2013 err = ipw2100_disable_adapter(priv);
2014 if (err)
2015 return err;
2016 }
2017
2018 /* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to
2019 * disable auto association -- so we cheat by setting a bogus SSID */
2020 if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) {
2021 int i;
2022 u8 *bogus = (u8*)cmd.host_command_parameters;
2023 for (i = 0; i < IW_ESSID_MAX_SIZE; i++)
2024 bogus[i] = 0x18 + i;
2025 cmd.host_command_length = IW_ESSID_MAX_SIZE;
2026 }
2027
2028 /* NOTE: We always send the SSID command even if the provided ESSID is
2029 * the same as what we currently think is set. */
2030
2031 err = ipw2100_hw_send_command(priv, &cmd);
2032 if (!err) {
2033 memset(priv->essid + ssid_len, 0,
2034 IW_ESSID_MAX_SIZE - ssid_len);
2035 memcpy(priv->essid, essid, ssid_len);
2036 priv->essid_len = ssid_len;
2037 }
2038
2039 if (!batch_mode) {
2040 if (ipw2100_enable_adapter(priv))
2041 err = -EIO;
2042 }
2043
2044 return err;
2045}
2046
2047static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2048{
2049 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
2050 "disassociated: '%s' " MAC_FMT " \n",
2051 escape_essid(priv->essid, priv->essid_len),
2052 MAC_ARG(priv->bssid));
2053
2054 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2055
2056 if (priv->status & STATUS_STOPPING) {
2057 IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n");
2058 return;
2059 }
2060
2061 memset(priv->bssid, 0, ETH_ALEN);
2062 memset(priv->ieee->bssid, 0, ETH_ALEN);
2063
2064 netif_carrier_off(priv->net_dev);
2065 netif_stop_queue(priv->net_dev);
2066
2067 if (!(priv->status & STATUS_RUNNING))
2068 return;
2069
2070 if (priv->status & STATUS_SECURITY_UPDATED)
2071 queue_work(priv->workqueue, &priv->security_work);
2072
2073 queue_work(priv->workqueue, &priv->wx_event_work);
2074}
2075
2076static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2077{
2078 IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n",
2079 priv->net_dev->name);
2080
2081 /* RF_KILL is now enabled (else we wouldn't be here) */
2082 priv->status |= STATUS_RF_KILL_HW;
2083
2084#ifdef ACPI_CSTATE_LIMIT_DEFINED
2085 if (priv->config & CFG_C3_DISABLED) {
2086 IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
2087 acpi_set_cstate_limit(priv->cstate_limit);
2088 priv->config &= ~CFG_C3_DISABLED;
2089 }
2090#endif
2091
2092 /* Make sure the RF Kill check timer is running */
2093 priv->stop_rf_kill = 0;
2094 cancel_delayed_work(&priv->rf_kill);
2095 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
2096}
2097
2098static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2099{
2100 IPW_DEBUG_SCAN("scan complete\n");
2101 /* Age the scan results... */
2102 priv->ieee->scans++;
2103 priv->status &= ~STATUS_SCANNING;
2104}
2105
2106#ifdef CONFIG_IPW_DEBUG
2107#define IPW2100_HANDLER(v, f) { v, f, # v }
2108struct ipw2100_status_indicator {
2109 int status;
2110 void (*cb)(struct ipw2100_priv *priv, u32 status);
2111 char *name;
2112};
2113#else
2114#define IPW2100_HANDLER(v, f) { v, f }
2115struct ipw2100_status_indicator {
2116 int status;
2117 void (*cb)(struct ipw2100_priv *priv, u32 status);
2118};
2119#endif /* CONFIG_IPW_DEBUG */
2120
2121static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status)
2122{
2123 IPW_DEBUG_SCAN("Scanning...\n");
2124 priv->status |= STATUS_SCANNING;
2125}
2126
2127static const struct ipw2100_status_indicator status_handlers[] = {
2128 IPW2100_HANDLER(IPW_STATE_INITIALIZED, NULL),
2129 IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, NULL),
2130 IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated),
2131 IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost),
2132 IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, NULL),
2133 IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete),
2134 IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, NULL),
2135 IPW2100_HANDLER(IPW_STATE_LEFT_PSP, NULL),
2136 IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill),
2137 IPW2100_HANDLER(IPW_STATE_DISABLED, NULL),
2138 IPW2100_HANDLER(IPW_STATE_POWER_DOWN, NULL),
2139 IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning),
2140 IPW2100_HANDLER(-1, NULL)
2141};
2142
2143
2144static void isr_status_change(struct ipw2100_priv *priv, int status)
2145{
2146 int i;
2147
2148 if (status == IPW_STATE_SCANNING &&
2149 priv->status & STATUS_ASSOCIATED &&
2150 !(priv->status & STATUS_SCANNING)) {
2151 IPW_DEBUG_INFO("Scan detected while associated, with "
2152 "no scan request. Restarting firmware.\n");
2153
2154 /* Wake up any sleeping jobs */
2155 schedule_reset(priv);
2156 }
2157
2158 for (i = 0; status_handlers[i].status != -1; i++) {
2159 if (status == status_handlers[i].status) {
2160 IPW_DEBUG_NOTIF("Status change: %s\n",
2161 status_handlers[i].name);
2162 if (status_handlers[i].cb)
2163 status_handlers[i].cb(priv, status);
2164 priv->wstats.status = status;
2165 return;
2166 }
2167 }
2168
2169 IPW_DEBUG_NOTIF("unknown status received: %04x\n", status);
2170}
2171
2172static void isr_rx_complete_command(
2173 struct ipw2100_priv *priv,
2174 struct ipw2100_cmd_header *cmd)
2175{
2176#ifdef CONFIG_IPW_DEBUG
2177 if (cmd->host_command_reg < ARRAY_SIZE(command_types)) {
2178 IPW_DEBUG_HC("Command completed '%s (%d)'\n",
2179 command_types[cmd->host_command_reg],
2180 cmd->host_command_reg);
2181 }
2182#endif
2183 if (cmd->host_command_reg == HOST_COMPLETE)
2184 priv->status |= STATUS_ENABLED;
2185
2186 if (cmd->host_command_reg == CARD_DISABLE)
2187 priv->status &= ~STATUS_ENABLED;
2188
2189 priv->status &= ~STATUS_CMD_ACTIVE;
2190
2191 wake_up_interruptible(&priv->wait_command_queue);
2192}
2193
2194#ifdef CONFIG_IPW_DEBUG
2195static const char *frame_types[] = {
2196 "COMMAND_STATUS_VAL",
2197 "STATUS_CHANGE_VAL",
2198 "P80211_DATA_VAL",
2199 "P8023_DATA_VAL",
2200 "HOST_NOTIFICATION_VAL"
2201};
2202#endif
2203
2204
2205static inline int ipw2100_alloc_skb(
2206 struct ipw2100_priv *priv,
2207 struct ipw2100_rx_packet *packet)
2208{
2209 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
2210 if (!packet->skb)
2211 return -ENOMEM;
2212
2213 packet->rxp = (struct ipw2100_rx *)packet->skb->data;
2214 packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
2215 sizeof(struct ipw2100_rx),
2216 PCI_DMA_FROMDEVICE);
2217 /* NOTE: pci_map_single does not return an error code, and 0 is a valid
2218 * dma_addr */
2219
2220 return 0;
2221}
2222
2223
2224#define SEARCH_ERROR 0xffffffff
2225#define SEARCH_FAIL 0xfffffffe
2226#define SEARCH_SUCCESS 0xfffffff0
2227#define SEARCH_DISCARD 0
2228#define SEARCH_SNAPSHOT 1
2229
2230#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
2231static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2232{
2233 int i;
2234 if (priv->snapshot[0])
2235 return 1;
2236 for (i = 0; i < 0x30; i++) {
2237 priv->snapshot[i] = (u8*)kmalloc(0x1000, GFP_ATOMIC);
2238 if (!priv->snapshot[i]) {
2239 IPW_DEBUG_INFO("%s: Error allocating snapshot "
2240 "buffer %d\n", priv->net_dev->name, i);
2241 while (i > 0)
2242 kfree(priv->snapshot[--i]);
2243 priv->snapshot[0] = NULL;
2244 return 0;
2245 }
2246 }
2247
2248 return 1;
2249}
2250
2251static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv)
2252{
2253 int i;
2254 if (!priv->snapshot[0])
2255 return;
2256 for (i = 0; i < 0x30; i++)
2257 kfree(priv->snapshot[i]);
2258 priv->snapshot[0] = NULL;
2259}
2260
2261static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 *in_buf,
2262 size_t len, int mode)
2263{
2264 u32 i, j;
2265 u32 tmp;
2266 u8 *s, *d;
2267 u32 ret;
2268
2269 s = in_buf;
2270 if (mode == SEARCH_SNAPSHOT) {
2271 if (!ipw2100_snapshot_alloc(priv))
2272 mode = SEARCH_DISCARD;
2273 }
2274
2275 for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) {
2276 read_nic_dword(priv->net_dev, i, &tmp);
2277 if (mode == SEARCH_SNAPSHOT)
2278 *(u32 *)SNAPSHOT_ADDR(i) = tmp;
2279 if (ret == SEARCH_FAIL) {
2280 d = (u8*)&tmp;
2281 for (j = 0; j < 4; j++) {
2282 if (*s != *d) {
2283 s = in_buf;
2284 continue;
2285 }
2286
2287 s++;
2288 d++;
2289
2290 if ((s - in_buf) == len)
2291 ret = (i + j) - len + 1;
2292 }
2293 } else if (mode == SEARCH_DISCARD)
2294 return ret;
2295 }
2296
2297 return ret;
2298}
2299
2300/*
2301 *
2302 * 0) Disconnect the SKB from the firmware (just unmap)
2303 * 1) Pack the ETH header into the SKB
2304 * 2) Pass the SKB to the network stack
2305 *
2306 * When packet is provided by the firmware, it contains the following:
2307 *
2308 * . ieee80211_hdr
2309 * . ieee80211_snap_hdr
2310 *
2311 * The size of the constructed ethernet
2312 *
2313 */
2314#ifdef CONFIG_IPW2100_RX_DEBUG
2315static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
2316#endif
2317
2318static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv,
2319 int i)
2320{
2321#ifdef CONFIG_IPW_DEBUG_C3
2322 struct ipw2100_status *status = &priv->status_queue.drv[i];
2323 u32 match, reg;
2324 int j;
2325#endif
2326#ifdef ACPI_CSTATE_LIMIT_DEFINED
2327 int limit;
2328#endif
2329
2330 IPW_DEBUG_INFO(DRV_NAME ": PCI latency error detected at "
2331 "0x%04zX.\n", i * sizeof(struct ipw2100_status));
2332
2333#ifdef ACPI_CSTATE_LIMIT_DEFINED
2334 IPW_DEBUG_INFO(DRV_NAME ": Disabling C3 transitions.\n");
2335 limit = acpi_get_cstate_limit();
2336 if (limit > 2) {
2337 priv->cstate_limit = limit;
2338 acpi_set_cstate_limit(2);
2339 priv->config |= CFG_C3_DISABLED;
2340 }
2341#endif
2342
2343#ifdef CONFIG_IPW_DEBUG_C3
2344 /* Halt the fimrware so we can get a good image */
2345 write_register(priv->net_dev, IPW_REG_RESET_REG,
2346 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
2347 j = 5;
2348 do {
2349 udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
2350 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
2351
2352 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
2353 break;
2354 } while (j--);
2355
2356 match = ipw2100_match_buf(priv, (u8*)status,
2357 sizeof(struct ipw2100_status),
2358 SEARCH_SNAPSHOT);
2359 if (match < SEARCH_SUCCESS)
2360 IPW_DEBUG_INFO("%s: DMA status match in Firmware at "
2361 "offset 0x%06X, length %d:\n",
2362 priv->net_dev->name, match,
2363 sizeof(struct ipw2100_status));
2364 else
2365 IPW_DEBUG_INFO("%s: No DMA status match in "
2366 "Firmware.\n", priv->net_dev->name);
2367
2368 printk_buf((u8*)priv->status_queue.drv,
2369 sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH);
2370#endif
2371
2372 priv->fatal_error = IPW2100_ERR_C3_CORRUPTION;
2373 priv->ieee->stats.rx_errors++;
2374 schedule_reset(priv);
2375}
2376
2377static inline void isr_rx(struct ipw2100_priv *priv, int i,
2378 struct ieee80211_rx_stats *stats)
2379{
2380 struct ipw2100_status *status = &priv->status_queue.drv[i];
2381 struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
2382
2383 IPW_DEBUG_RX("Handler...\n");
2384
2385 if (unlikely(status->frame_size > skb_tailroom(packet->skb))) {
2386 IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
2387 " Dropping.\n",
2388 priv->net_dev->name,
2389 status->frame_size, skb_tailroom(packet->skb));
2390 priv->ieee->stats.rx_errors++;
2391 return;
2392 }
2393
2394 if (unlikely(!netif_running(priv->net_dev))) {
2395 priv->ieee->stats.rx_errors++;
2396 priv->wstats.discard.misc++;
2397 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
2398 return;
2399 }
2400
2401 if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR &&
2402 status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
2403 IPW_DEBUG_RX("CRC error in packet. Dropping.\n");
2404 priv->ieee->stats.rx_errors++;
2405 return;
2406 }
2407
2408 if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR &&
2409 !(priv->status & STATUS_ASSOCIATED))) {
2410 IPW_DEBUG_DROP("Dropping packet while not associated.\n");
2411 priv->wstats.discard.misc++;
2412 return;
2413 }
2414
2415
2416 pci_unmap_single(priv->pci_dev,
2417 packet->dma_addr,
2418 sizeof(struct ipw2100_rx),
2419 PCI_DMA_FROMDEVICE);
2420
2421 skb_put(packet->skb, status->frame_size);
2422
2423#ifdef CONFIG_IPW2100_RX_DEBUG
2424 /* Make a copy of the frame so we can dump it to the logs if
2425 * ieee80211_rx fails */
2426 memcpy(packet_data, packet->skb->data,
2427 min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH));
2428#endif
2429
2430 if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
2431#ifdef CONFIG_IPW2100_RX_DEBUG
2432 IPW_DEBUG_DROP("%s: Non consumed packet:\n",
2433 priv->net_dev->name);
2434 printk_buf(IPW_DL_DROP, packet_data, status->frame_size);
2435#endif
2436 priv->ieee->stats.rx_errors++;
2437
2438 /* ieee80211_rx failed, so it didn't free the SKB */
2439 dev_kfree_skb_any(packet->skb);
2440 packet->skb = NULL;
2441 }
2442
2443 /* We need to allocate a new SKB and attach it to the RDB. */
2444 if (unlikely(ipw2100_alloc_skb(priv, packet))) {
2445 printk(KERN_WARNING DRV_NAME ": "
2446 "%s: Unable to allocate SKB onto RBD ring - disabling "
2447 "adapter.\n", priv->net_dev->name);
2448 /* TODO: schedule adapter shutdown */
2449 IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
2450 }
2451
2452 /* Update the RDB entry */
2453 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
2454}
2455
2456static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
2457{
2458 struct ipw2100_status *status = &priv->status_queue.drv[i];
2459 struct ipw2100_rx *u = priv->rx_buffers[i].rxp;
2460 u16 frame_type = status->status_fields & STATUS_TYPE_MASK;
2461
2462 switch (frame_type) {
2463 case COMMAND_STATUS_VAL:
2464 return (status->frame_size != sizeof(u->rx_data.command));
2465 case STATUS_CHANGE_VAL:
2466 return (status->frame_size != sizeof(u->rx_data.status));
2467 case HOST_NOTIFICATION_VAL:
2468 return (status->frame_size < sizeof(u->rx_data.notification));
2469 case P80211_DATA_VAL:
2470 case P8023_DATA_VAL:
2471#ifdef CONFIG_IPW2100_MONITOR
2472 return 0;
2473#else
2474 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
2475 case IEEE80211_FTYPE_MGMT:
2476 case IEEE80211_FTYPE_CTL:
2477 return 0;
2478 case IEEE80211_FTYPE_DATA:
2479 return (status->frame_size >
2480 IPW_MAX_802_11_PAYLOAD_LENGTH);
2481 }
2482#endif
2483 }
2484
2485 return 1;
2486}
2487
2488/*
2489 * ipw2100 interrupts are disabled at this point, and the ISR
2490 * is the only code that calls this method. So, we do not need
2491 * to play with any locks.
2492 *
2493 * RX Queue works as follows:
2494 *
2495 * Read index - firmware places packet in entry identified by the
2496 * Read index and advances Read index. In this manner,
2497 * Read index will always point to the next packet to
2498 * be filled--but not yet valid.
2499 *
2500 * Write index - driver fills this entry with an unused RBD entry.
2501 * This entry has not filled by the firmware yet.
2502 *
2503 * In between the W and R indexes are the RBDs that have been received
2504 * but not yet processed.
2505 *
2506 * The process of handling packets will start at WRITE + 1 and advance
2507 * until it reaches the READ index.
2508 *
2509 * The WRITE index is cached in the variable 'priv->rx_queue.next'.
2510 *
2511 */
2512static inline void __ipw2100_rx_process(struct ipw2100_priv *priv)
2513{
2514 struct ipw2100_bd_queue *rxq = &priv->rx_queue;
2515 struct ipw2100_status_queue *sq = &priv->status_queue;
2516 struct ipw2100_rx_packet *packet;
2517 u16 frame_type;
2518 u32 r, w, i, s;
2519 struct ipw2100_rx *u;
2520 struct ieee80211_rx_stats stats = {
2521 .mac_time = jiffies,
2522 };
2523
2524 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r);
2525 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w);
2526
2527 if (r >= rxq->entries) {
2528 IPW_DEBUG_RX("exit - bad read index\n");
2529 return;
2530 }
2531
2532 i = (rxq->next + 1) % rxq->entries;
2533 s = i;
2534 while (i != r) {
2535 /* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n",
2536 r, rxq->next, i); */
2537
2538 packet = &priv->rx_buffers[i];
2539
2540 /* Sync the DMA for the STATUS buffer so CPU is sure to get
2541 * the correct values */
2542 pci_dma_sync_single_for_cpu(
2543 priv->pci_dev,
2544 sq->nic + sizeof(struct ipw2100_status) * i,
2545 sizeof(struct ipw2100_status),
2546 PCI_DMA_FROMDEVICE);
2547
2548 /* Sync the DMA for the RX buffer so CPU is sure to get
2549 * the correct values */
2550 pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
2551 sizeof(struct ipw2100_rx),
2552 PCI_DMA_FROMDEVICE);
2553
2554 if (unlikely(ipw2100_corruption_check(priv, i))) {
2555 ipw2100_corruption_detected(priv, i);
2556 goto increment;
2557 }
2558
2559 u = packet->rxp;
2560 frame_type = sq->drv[i].status_fields &
2561 STATUS_TYPE_MASK;
2562 stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM;
2563 stats.len = sq->drv[i].frame_size;
2564
2565 stats.mask = 0;
2566 if (stats.rssi != 0)
2567 stats.mask |= IEEE80211_STATMASK_RSSI;
2568 stats.freq = IEEE80211_24GHZ_BAND;
2569
2570 IPW_DEBUG_RX(
2571 "%s: '%s' frame type received (%d).\n",
2572 priv->net_dev->name, frame_types[frame_type],
2573 stats.len);
2574
2575 switch (frame_type) {
2576 case COMMAND_STATUS_VAL:
2577 /* Reset Rx watchdog */
2578 isr_rx_complete_command(
2579 priv, &u->rx_data.command);
2580 break;
2581
2582 case STATUS_CHANGE_VAL:
2583 isr_status_change(priv, u->rx_data.status);
2584 break;
2585
2586 case P80211_DATA_VAL:
2587 case P8023_DATA_VAL:
2588#ifdef CONFIG_IPW2100_MONITOR
2589 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
2590 isr_rx(priv, i, &stats);
2591 break;
2592 }
2593#endif
2594 if (stats.len < sizeof(u->rx_data.header))
2595 break;
2596 switch (WLAN_FC_GET_TYPE(u->rx_data.header.
2597 frame_ctl)) {
2598 case IEEE80211_FTYPE_MGMT:
2599 ieee80211_rx_mgt(priv->ieee,
2600 &u->rx_data.header,
2601 &stats);
2602 break;
2603
2604 case IEEE80211_FTYPE_CTL:
2605 break;
2606
2607 case IEEE80211_FTYPE_DATA:
2608 isr_rx(priv, i, &stats);
2609 break;
2610
2611 }
2612 break;
2613 }
2614
2615 increment:
2616 /* clear status field associated with this RBD */
2617 rxq->drv[i].status.info.field = 0;
2618
2619 i = (i + 1) % rxq->entries;
2620 }
2621
2622 if (i != s) {
2623 /* backtrack one entry, wrapping to end if at 0 */
2624 rxq->next = (i ? i : rxq->entries) - 1;
2625
2626 write_register(priv->net_dev,
2627 IPW_MEM_HOST_SHARED_RX_WRITE_INDEX,
2628 rxq->next);
2629 }
2630}
2631
2632
2633/*
2634 * __ipw2100_tx_process
2635 *
2636 * This routine will determine whether the next packet on
2637 * the fw_pend_list has been processed by the firmware yet.
2638 *
2639 * If not, then it does nothing and returns.
2640 *
2641 * If so, then it removes the item from the fw_pend_list, frees
2642 * any associated storage, and places the item back on the
2643 * free list of its source (either msg_free_list or tx_free_list)
2644 *
2645 * TX Queue works as follows:
2646 *
2647 * Read index - points to the next TBD that the firmware will
2648 * process. The firmware will read the data, and once
2649 * done processing, it will advance the Read index.
2650 *
2651 * Write index - driver fills this entry with an constructed TBD
2652 * entry. The Write index is not advanced until the
2653 * packet has been configured.
2654 *
2655 * In between the W and R indexes are the TBDs that have NOT been
2656 * processed. Lagging behind the R index are packets that have
2657 * been processed but have not been freed by the driver.
2658 *
2659 * In order to free old storage, an internal index will be maintained
2660 * that points to the next packet to be freed. When all used
2661 * packets have been freed, the oldest index will be the same as the
2662 * firmware's read index.
2663 *
2664 * The OLDEST index is cached in the variable 'priv->tx_queue.oldest'
2665 *
2666 * Because the TBD structure can not contain arbitrary data, the
2667 * driver must keep an internal queue of cached allocations such that
2668 * it can put that data back into the tx_free_list and msg_free_list
2669 * for use by future command and data packets.
2670 *
2671 */
2672static inline int __ipw2100_tx_process(struct ipw2100_priv *priv)
2673{
2674 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2675 struct ipw2100_bd *tbd;
2676 struct list_head *element;
2677 struct ipw2100_tx_packet *packet;
2678 int descriptors_used;
2679 int e, i;
2680 u32 r, w, frag_num = 0;
2681
2682 if (list_empty(&priv->fw_pend_list))
2683 return 0;
2684
2685 element = priv->fw_pend_list.next;
2686
2687 packet = list_entry(element, struct ipw2100_tx_packet, list);
2688 tbd = &txq->drv[packet->index];
2689
2690 /* Determine how many TBD entries must be finished... */
2691 switch (packet->type) {
2692 case COMMAND:
2693 /* COMMAND uses only one slot; don't advance */
2694 descriptors_used = 1;
2695 e = txq->oldest;
2696 break;
2697
2698 case DATA:
2699 /* DATA uses two slots; advance and loop position. */
2700 descriptors_used = tbd->num_fragments;
2701 frag_num = tbd->num_fragments - 1;
2702 e = txq->oldest + frag_num;
2703 e %= txq->entries;
2704 break;
2705
2706 default:
2707 printk(KERN_WARNING DRV_NAME ": %s: Bad fw_pend_list entry!\n",
2708 priv->net_dev->name);
2709 return 0;
2710 }
2711
2712 /* if the last TBD is not done by NIC yet, then packet is
2713 * not ready to be released.
2714 *
2715 */
2716 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
2717 &r);
2718 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
2719 &w);
2720 if (w != txq->next)
2721 printk(KERN_WARNING DRV_NAME ": %s: write index mismatch\n",
2722 priv->net_dev->name);
2723
2724 /*
2725 * txq->next is the index of the last packet written txq->oldest is
2726 * the index of the r is the index of the next packet to be read by
2727 * firmware
2728 */
2729
2730
2731 /*
2732 * Quick graphic to help you visualize the following
2733 * if / else statement
2734 *
2735 * ===>| s---->|===============
2736 * e>|
2737 * | a | b | c | d | e | f | g | h | i | j | k | l
2738 * r---->|
2739 * w
2740 *
2741 * w - updated by driver
2742 * r - updated by firmware
2743 * s - start of oldest BD entry (txq->oldest)
2744 * e - end of oldest BD entry
2745 *
2746 */
2747 if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) {
2748 IPW_DEBUG_TX("exit - no processed packets ready to release.\n");
2749 return 0;
2750 }
2751
2752 list_del(element);
2753 DEC_STAT(&priv->fw_pend_stat);
2754
2755#ifdef CONFIG_IPW_DEBUG
2756 {
2757 int i = txq->oldest;
2758 IPW_DEBUG_TX(
2759 "TX%d V=%p P=%04X T=%04X L=%d\n", i,
2760 &txq->drv[i],
2761 (u32)(txq->nic + i * sizeof(struct ipw2100_bd)),
2762 txq->drv[i].host_addr,
2763 txq->drv[i].buf_length);
2764
2765 if (packet->type == DATA) {
2766 i = (i + 1) % txq->entries;
2767
2768 IPW_DEBUG_TX(
2769 "TX%d V=%p P=%04X T=%04X L=%d\n", i,
2770 &txq->drv[i],
2771 (u32)(txq->nic + i *
2772 sizeof(struct ipw2100_bd)),
2773 (u32)txq->drv[i].host_addr,
2774 txq->drv[i].buf_length);
2775 }
2776 }
2777#endif
2778
2779 switch (packet->type) {
2780 case DATA:
2781 if (txq->drv[txq->oldest].status.info.fields.txType != 0)
2782 printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. "
2783 "Expecting DATA TBD but pulled "
2784 "something else: ids %d=%d.\n",
2785 priv->net_dev->name, txq->oldest, packet->index);
2786
2787 /* DATA packet; we have to unmap and free the SKB */
2788 priv->ieee->stats.tx_packets++;
2789 for (i = 0; i < frag_num; i++) {
2790 tbd = &txq->drv[(packet->index + 1 + i) %
2791 txq->entries];
2792
2793 IPW_DEBUG_TX(
2794 "TX%d P=%08x L=%d\n",
2795 (packet->index + 1 + i) % txq->entries,
2796 tbd->host_addr, tbd->buf_length);
2797
2798 pci_unmap_single(priv->pci_dev,
2799 tbd->host_addr,
2800 tbd->buf_length,
2801 PCI_DMA_TODEVICE);
2802 }
2803
2804 priv->ieee->stats.tx_bytes += packet->info.d_struct.txb->payload_size;
2805 ieee80211_txb_free(packet->info.d_struct.txb);
2806 packet->info.d_struct.txb = NULL;
2807
2808 list_add_tail(element, &priv->tx_free_list);
2809 INC_STAT(&priv->tx_free_stat);
2810
2811 /* We have a free slot in the Tx queue, so wake up the
2812 * transmit layer if it is stopped. */
2813 if (priv->status & STATUS_ASSOCIATED &&
2814 netif_queue_stopped(priv->net_dev)) {
2815 IPW_DEBUG_INFO(KERN_INFO
2816 "%s: Waking net queue.\n",
2817 priv->net_dev->name);
2818 netif_wake_queue(priv->net_dev);
2819 }
2820
2821 /* A packet was processed by the hardware, so update the
2822 * watchdog */
2823 priv->net_dev->trans_start = jiffies;
2824
2825 break;
2826
2827 case COMMAND:
2828 if (txq->drv[txq->oldest].status.info.fields.txType != 1)
2829 printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. "
2830 "Expecting COMMAND TBD but pulled "
2831 "something else: ids %d=%d.\n",
2832 priv->net_dev->name, txq->oldest, packet->index);
2833
2834#ifdef CONFIG_IPW_DEBUG
2835 if (packet->info.c_struct.cmd->host_command_reg <
2836 sizeof(command_types) / sizeof(*command_types))
2837 IPW_DEBUG_TX(
2838 "Command '%s (%d)' processed: %d.\n",
2839 command_types[packet->info.c_struct.cmd->host_command_reg],
2840 packet->info.c_struct.cmd->host_command_reg,
2841 packet->info.c_struct.cmd->cmd_status_reg);
2842#endif
2843
2844 list_add_tail(element, &priv->msg_free_list);
2845 INC_STAT(&priv->msg_free_stat);
2846 break;
2847 }
2848
2849 /* advance oldest used TBD pointer to start of next entry */
2850 txq->oldest = (e + 1) % txq->entries;
2851 /* increase available TBDs number */
2852 txq->available += descriptors_used;
2853 SET_STAT(&priv->txq_stat, txq->available);
2854
2855 IPW_DEBUG_TX("packet latency (send to process) %ld jiffies\n",
2856 jiffies - packet->jiffy_start);
2857
2858 return (!list_empty(&priv->fw_pend_list));
2859}
2860
2861
2862static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv)
2863{
2864 int i = 0;
2865
2866 while (__ipw2100_tx_process(priv) && i < 200) i++;
2867
2868 if (i == 200) {
2869 printk(KERN_WARNING DRV_NAME ": "
2870 "%s: Driver is running slow (%d iters).\n",
2871 priv->net_dev->name, i);
2872 }
2873}
2874
2875
2876static void ipw2100_tx_send_commands(struct ipw2100_priv *priv)
2877{
2878 struct list_head *element;
2879 struct ipw2100_tx_packet *packet;
2880 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2881 struct ipw2100_bd *tbd;
2882 int next = txq->next;
2883
2884 while (!list_empty(&priv->msg_pend_list)) {
2885 /* if there isn't enough space in TBD queue, then
2886 * don't stuff a new one in.
2887 * NOTE: 3 are needed as a command will take one,
2888 * and there is a minimum of 2 that must be
2889 * maintained between the r and w indexes
2890 */
2891 if (txq->available <= 3) {
2892 IPW_DEBUG_TX("no room in tx_queue\n");
2893 break;
2894 }
2895
2896 element = priv->msg_pend_list.next;
2897 list_del(element);
2898 DEC_STAT(&priv->msg_pend_stat);
2899
2900 packet = list_entry(element,
2901 struct ipw2100_tx_packet, list);
2902
2903 IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n",
2904 &txq->drv[txq->next],
2905 (void*)(txq->nic + txq->next *
2906 sizeof(struct ipw2100_bd)));
2907
2908 packet->index = txq->next;
2909
2910 tbd = &txq->drv[txq->next];
2911
2912 /* initialize TBD */
2913 tbd->host_addr = packet->info.c_struct.cmd_phys;
2914 tbd->buf_length = sizeof(struct ipw2100_cmd_header);
2915 /* not marking number of fragments causes problems
2916 * with f/w debug version */
2917 tbd->num_fragments = 1;
2918 tbd->status.info.field =
2919 IPW_BD_STATUS_TX_FRAME_COMMAND |
2920 IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
2921
2922 /* update TBD queue counters */
2923 txq->next++;
2924 txq->next %= txq->entries;
2925 txq->available--;
2926 DEC_STAT(&priv->txq_stat);
2927
2928 list_add_tail(element, &priv->fw_pend_list);
2929 INC_STAT(&priv->fw_pend_stat);
2930 }
2931
2932 if (txq->next != next) {
2933 /* kick off the DMA by notifying firmware the
2934 * write index has moved; make sure TBD stores are sync'd */
2935 wmb();
2936 write_register(priv->net_dev,
2937 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
2938 txq->next);
2939 }
2940}
2941
2942
2943/*
2944 * ipw2100_tx_send_data
2945 *
2946 */
2947static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
2948{
2949 struct list_head *element;
2950 struct ipw2100_tx_packet *packet;
2951 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2952 struct ipw2100_bd *tbd;
2953 int next = txq->next;
2954 int i = 0;
2955 struct ipw2100_data_header *ipw_hdr;
2956 struct ieee80211_hdr *hdr;
2957
2958 while (!list_empty(&priv->tx_pend_list)) {
2959 /* if there isn't enough space in TBD queue, then
2960 * don't stuff a new one in.
2961 * NOTE: 4 are needed as a data will take two,
2962 * and there is a minimum of 2 that must be
2963 * maintained between the r and w indexes
2964 */
2965 element = priv->tx_pend_list.next;
2966 packet = list_entry(element, struct ipw2100_tx_packet, list);
2967
2968 if (unlikely(1 + packet->info.d_struct.txb->nr_frags >
2969 IPW_MAX_BDS)) {
2970 /* TODO: Support merging buffers if more than
2971 * IPW_MAX_BDS are used */
2972 IPW_DEBUG_INFO(
2973 "%s: Maximum BD theshold exceeded. "
2974 "Increase fragmentation level.\n",
2975 priv->net_dev->name);
2976 }
2977
2978 if (txq->available <= 3 +
2979 packet->info.d_struct.txb->nr_frags) {
2980 IPW_DEBUG_TX("no room in tx_queue\n");
2981 break;
2982 }
2983
2984 list_del(element);
2985 DEC_STAT(&priv->tx_pend_stat);
2986
2987 tbd = &txq->drv[txq->next];
2988
2989 packet->index = txq->next;
2990
2991 ipw_hdr = packet->info.d_struct.data;
2992 hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb->
2993 fragments[0]->data;
2994
2995 if (priv->ieee->iw_mode == IW_MODE_INFRA) {
2996 /* To DS: Addr1 = BSSID, Addr2 = SA,
2997 Addr3 = DA */
2998 memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
2999 memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN);
3000 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
3001 /* not From/To DS: Addr1 = DA, Addr2 = SA,
3002 Addr3 = BSSID */
3003 memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
3004 memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN);
3005 }
3006
3007 ipw_hdr->host_command_reg = SEND;
3008 ipw_hdr->host_command_reg1 = 0;
3009
3010 /* For now we only support host based encryption */
3011 ipw_hdr->needs_encryption = 0;
3012 ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted;
3013 if (packet->info.d_struct.txb->nr_frags > 1)
3014 ipw_hdr->fragment_size =
3015 packet->info.d_struct.txb->frag_size - IEEE80211_3ADDR_LEN;
3016 else
3017 ipw_hdr->fragment_size = 0;
3018
3019 tbd->host_addr = packet->info.d_struct.data_phys;
3020 tbd->buf_length = sizeof(struct ipw2100_data_header);
3021 tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags;
3022 tbd->status.info.field =
3023 IPW_BD_STATUS_TX_FRAME_802_3 |
3024 IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
3025 txq->next++;
3026 txq->next %= txq->entries;
3027
3028 IPW_DEBUG_TX(
3029 "data header tbd TX%d P=%08x L=%d\n",
3030 packet->index, tbd->host_addr,
3031 tbd->buf_length);
3032#ifdef CONFIG_IPW_DEBUG
3033 if (packet->info.d_struct.txb->nr_frags > 1)
3034 IPW_DEBUG_FRAG("fragment Tx: %d frames\n",
3035 packet->info.d_struct.txb->nr_frags);
3036#endif
3037
3038 for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) {
3039 tbd = &txq->drv[txq->next];
3040 if (i == packet->info.d_struct.txb->nr_frags - 1)
3041 tbd->status.info.field =
3042 IPW_BD_STATUS_TX_FRAME_802_3 |
3043 IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
3044 else
3045 tbd->status.info.field =
3046 IPW_BD_STATUS_TX_FRAME_802_3 |
3047 IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
3048
3049 tbd->buf_length = packet->info.d_struct.txb->
3050 fragments[i]->len - IEEE80211_3ADDR_LEN;
3051
3052 tbd->host_addr = pci_map_single(
3053 priv->pci_dev,
3054 packet->info.d_struct.txb->fragments[i]->data +
3055 IEEE80211_3ADDR_LEN,
3056 tbd->buf_length,
3057 PCI_DMA_TODEVICE);
3058
3059 IPW_DEBUG_TX(
3060 "data frag tbd TX%d P=%08x L=%d\n",
3061 txq->next, tbd->host_addr, tbd->buf_length);
3062
3063 pci_dma_sync_single_for_device(
3064 priv->pci_dev, tbd->host_addr,
3065 tbd->buf_length,
3066 PCI_DMA_TODEVICE);
3067
3068 txq->next++;
3069 txq->next %= txq->entries;
3070 }
3071
3072 txq->available -= 1 + packet->info.d_struct.txb->nr_frags;
3073 SET_STAT(&priv->txq_stat, txq->available);
3074
3075 list_add_tail(element, &priv->fw_pend_list);
3076 INC_STAT(&priv->fw_pend_stat);
3077 }
3078
3079 if (txq->next != next) {
3080 /* kick off the DMA by notifying firmware the
3081 * write index has moved; make sure TBD stores are sync'd */
3082 write_register(priv->net_dev,
3083 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
3084 txq->next);
3085 }
3086 return;
3087}
3088
3089static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
3090{
3091 struct net_device *dev = priv->net_dev;
3092 unsigned long flags;
3093 u32 inta, tmp;
3094
3095 spin_lock_irqsave(&priv->low_lock, flags);
3096 ipw2100_disable_interrupts(priv);
3097
3098 read_register(dev, IPW_REG_INTA, &inta);
3099
3100 IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n",
3101 (unsigned long)inta & IPW_INTERRUPT_MASK);
3102
3103 priv->in_isr++;
3104 priv->interrupts++;
3105
3106 /* We do not loop and keep polling for more interrupts as this
3107 * is frowned upon and doesn't play nicely with other potentially
3108 * chained IRQs */
3109 IPW_DEBUG_ISR("INTA: 0x%08lX\n",
3110 (unsigned long)inta & IPW_INTERRUPT_MASK);
3111
3112 if (inta & IPW2100_INTA_FATAL_ERROR) {
3113 printk(KERN_WARNING DRV_NAME
3114 ": Fatal interrupt. Scheduling firmware restart.\n");
3115 priv->inta_other++;
3116 write_register(
3117 dev, IPW_REG_INTA,
3118 IPW2100_INTA_FATAL_ERROR);
3119
3120 read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error);
3121 IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n",
3122 priv->net_dev->name, priv->fatal_error);
3123
3124 read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp);
3125 IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n",
3126 priv->net_dev->name, tmp);
3127
3128 /* Wake up any sleeping jobs */
3129 schedule_reset(priv);
3130 }
3131
3132 if (inta & IPW2100_INTA_PARITY_ERROR) {
3133 printk(KERN_ERR DRV_NAME ": ***** PARITY ERROR INTERRUPT !!!! \n");
3134 priv->inta_other++;
3135 write_register(
3136 dev, IPW_REG_INTA,
3137 IPW2100_INTA_PARITY_ERROR);
3138 }
3139
3140 if (inta & IPW2100_INTA_RX_TRANSFER) {
3141 IPW_DEBUG_ISR("RX interrupt\n");
3142
3143 priv->rx_interrupts++;
3144
3145 write_register(
3146 dev, IPW_REG_INTA,
3147 IPW2100_INTA_RX_TRANSFER);
3148
3149 __ipw2100_rx_process(priv);
3150 __ipw2100_tx_complete(priv);
3151 }
3152
3153 if (inta & IPW2100_INTA_TX_TRANSFER) {
3154 IPW_DEBUG_ISR("TX interrupt\n");
3155
3156 priv->tx_interrupts++;
3157
3158 write_register(dev, IPW_REG_INTA,
3159 IPW2100_INTA_TX_TRANSFER);
3160
3161 __ipw2100_tx_complete(priv);
3162 ipw2100_tx_send_commands(priv);
3163 ipw2100_tx_send_data(priv);
3164 }
3165
3166 if (inta & IPW2100_INTA_TX_COMPLETE) {
3167 IPW_DEBUG_ISR("TX complete\n");
3168 priv->inta_other++;
3169 write_register(
3170 dev, IPW_REG_INTA,
3171 IPW2100_INTA_TX_COMPLETE);
3172
3173 __ipw2100_tx_complete(priv);
3174 }
3175
3176 if (inta & IPW2100_INTA_EVENT_INTERRUPT) {
3177 /* ipw2100_handle_event(dev); */
3178 priv->inta_other++;
3179 write_register(
3180 dev, IPW_REG_INTA,
3181 IPW2100_INTA_EVENT_INTERRUPT);
3182 }
3183
3184 if (inta & IPW2100_INTA_FW_INIT_DONE) {
3185 IPW_DEBUG_ISR("FW init done interrupt\n");
3186 priv->inta_other++;
3187
3188 read_register(dev, IPW_REG_INTA, &tmp);
3189 if (tmp & (IPW2100_INTA_FATAL_ERROR |
3190 IPW2100_INTA_PARITY_ERROR)) {
3191 write_register(
3192 dev, IPW_REG_INTA,
3193 IPW2100_INTA_FATAL_ERROR |
3194 IPW2100_INTA_PARITY_ERROR);
3195 }
3196
3197 write_register(dev, IPW_REG_INTA,
3198 IPW2100_INTA_FW_INIT_DONE);
3199 }
3200
3201 if (inta & IPW2100_INTA_STATUS_CHANGE) {
3202 IPW_DEBUG_ISR("Status change interrupt\n");
3203 priv->inta_other++;
3204 write_register(
3205 dev, IPW_REG_INTA,
3206 IPW2100_INTA_STATUS_CHANGE);
3207 }
3208
3209 if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) {
3210 IPW_DEBUG_ISR("slave host mode interrupt\n");
3211 priv->inta_other++;
3212 write_register(
3213 dev, IPW_REG_INTA,
3214 IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE);
3215 }
3216
3217 priv->in_isr--;
3218 ipw2100_enable_interrupts(priv);
3219
3220 spin_unlock_irqrestore(&priv->low_lock, flags);
3221
3222 IPW_DEBUG_ISR("exit\n");
3223}
3224
3225
3226static irqreturn_t ipw2100_interrupt(int irq, void *data,
3227 struct pt_regs *regs)
3228{
3229 struct ipw2100_priv *priv = data;
3230 u32 inta, inta_mask;
3231
3232 if (!data)
3233 return IRQ_NONE;
3234
3235 spin_lock(&priv->low_lock);
3236
3237 /* We check to see if we should be ignoring interrupts before
3238 * we touch the hardware. During ucode load if we try and handle
3239 * an interrupt we can cause keyboard problems as well as cause
3240 * the ucode to fail to initialize */
3241 if (!(priv->status & STATUS_INT_ENABLED)) {
3242 /* Shared IRQ */
3243 goto none;
3244 }
3245
3246 read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
3247 read_register(priv->net_dev, IPW_REG_INTA, &inta);
3248
3249 if (inta == 0xFFFFFFFF) {
3250 /* Hardware disappeared */
3251 printk(KERN_WARNING DRV_NAME ": IRQ INTA == 0xFFFFFFFF\n");
3252 goto none;
3253 }
3254
3255 inta &= IPW_INTERRUPT_MASK;
3256
3257 if (!(inta & inta_mask)) {
3258 /* Shared interrupt */
3259 goto none;
3260 }
3261
3262 /* We disable the hardware interrupt here just to prevent unneeded
3263 * calls to be made. We disable this again within the actual
3264 * work tasklet, so if another part of the code re-enables the
3265 * interrupt, that is fine */
3266 ipw2100_disable_interrupts(priv);
3267
3268 tasklet_schedule(&priv->irq_tasklet);
3269 spin_unlock(&priv->low_lock);
3270
3271 return IRQ_HANDLED;
3272 none:
3273 spin_unlock(&priv->low_lock);
3274 return IRQ_NONE;
3275}
3276
3277static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev)
3278{
3279 struct ipw2100_priv *priv = ieee80211_priv(dev);
3280 struct list_head *element;
3281 struct ipw2100_tx_packet *packet;
3282 unsigned long flags;
3283
3284 spin_lock_irqsave(&priv->low_lock, flags);
3285
3286 if (!(priv->status & STATUS_ASSOCIATED)) {
3287 IPW_DEBUG_INFO("Can not transmit when not connected.\n");
3288 priv->ieee->stats.tx_carrier_errors++;
3289 netif_stop_queue(dev);
3290 goto fail_unlock;
3291 }
3292
3293 if (list_empty(&priv->tx_free_list))
3294 goto fail_unlock;
3295
3296 element = priv->tx_free_list.next;
3297 packet = list_entry(element, struct ipw2100_tx_packet, list);
3298
3299 packet->info.d_struct.txb = txb;
3300
3301 IPW_DEBUG_TX("Sending fragment (%d bytes):\n",
3302 txb->fragments[0]->len);
3303 printk_buf(IPW_DL_TX, txb->fragments[0]->data,
3304 txb->fragments[0]->len);
3305
3306 packet->jiffy_start = jiffies;
3307
3308 list_del(element);
3309 DEC_STAT(&priv->tx_free_stat);
3310
3311 list_add_tail(element, &priv->tx_pend_list);
3312 INC_STAT(&priv->tx_pend_stat);
3313
3314 ipw2100_tx_send_data(priv);
3315
3316 spin_unlock_irqrestore(&priv->low_lock, flags);
3317 return 0;
3318
3319 fail_unlock:
3320 netif_stop_queue(dev);
3321 spin_unlock_irqrestore(&priv->low_lock, flags);
3322 return 1;
3323}
3324
3325
3326static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
3327{
3328 int i, j, err = -EINVAL;
3329 void *v;
3330 dma_addr_t p;
3331
3332 priv->msg_buffers = (struct ipw2100_tx_packet *)kmalloc(
3333 IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
3334 GFP_KERNEL);
3335 if (!priv->msg_buffers) {
3336 printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
3337 "buffers.\n", priv->net_dev->name);
3338 return -ENOMEM;
3339 }
3340
3341 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
3342 v = pci_alloc_consistent(
3343 priv->pci_dev,
3344 sizeof(struct ipw2100_cmd_header),
3345 &p);
3346 if (!v) {
3347 printk(KERN_ERR DRV_NAME ": "
3348 "%s: PCI alloc failed for msg "
3349 "buffers.\n",
3350 priv->net_dev->name);
3351 err = -ENOMEM;
3352 break;
3353 }
3354
3355 memset(v, 0, sizeof(struct ipw2100_cmd_header));
3356
3357 priv->msg_buffers[i].type = COMMAND;
3358 priv->msg_buffers[i].info.c_struct.cmd =
3359 (struct ipw2100_cmd_header*)v;
3360 priv->msg_buffers[i].info.c_struct.cmd_phys = p;
3361 }
3362
3363 if (i == IPW_COMMAND_POOL_SIZE)
3364 return 0;
3365
3366 for (j = 0; j < i; j++) {
3367 pci_free_consistent(
3368 priv->pci_dev,
3369 sizeof(struct ipw2100_cmd_header),
3370 priv->msg_buffers[j].info.c_struct.cmd,
3371 priv->msg_buffers[j].info.c_struct.cmd_phys);
3372 }
3373
3374 kfree(priv->msg_buffers);
3375 priv->msg_buffers = NULL;
3376
3377 return err;
3378}
3379
3380static int ipw2100_msg_initialize(struct ipw2100_priv *priv)
3381{
3382 int i;
3383
3384 INIT_LIST_HEAD(&priv->msg_free_list);
3385 INIT_LIST_HEAD(&priv->msg_pend_list);
3386
3387 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++)
3388 list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list);
3389 SET_STAT(&priv->msg_free_stat, i);
3390
3391 return 0;
3392}
3393
3394static void ipw2100_msg_free(struct ipw2100_priv *priv)
3395{
3396 int i;
3397
3398 if (!priv->msg_buffers)
3399 return;
3400
3401 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
3402 pci_free_consistent(priv->pci_dev,
3403 sizeof(struct ipw2100_cmd_header),
3404 priv->msg_buffers[i].info.c_struct.cmd,
3405 priv->msg_buffers[i].info.c_struct.cmd_phys);
3406 }
3407
3408 kfree(priv->msg_buffers);
3409 priv->msg_buffers = NULL;
3410}
3411
3412static ssize_t show_pci(struct device *d, struct device_attribute *attr,
3413 char *buf)
3414{
3415 struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
3416 char *out = buf;
3417 int i, j;
3418 u32 val;
3419
3420 for (i = 0; i < 16; i++) {
3421 out += sprintf(out, "[%08X] ", i * 16);
3422 for (j = 0; j < 16; j += 4) {
3423 pci_read_config_dword(pci_dev, i * 16 + j, &val);
3424 out += sprintf(out, "%08X ", val);
3425 }
3426 out += sprintf(out, "\n");
3427 }
3428
3429 return out - buf;
3430}
3431static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL);
3432
3433static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
3434 char *buf)
3435{
3436 struct ipw2100_priv *p = d->driver_data;
3437 return sprintf(buf, "0x%08x\n", (int)p->config);
3438}
3439static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
3440
3441static ssize_t show_status(struct device *d, struct device_attribute *attr,
3442 char *buf)
3443{
3444 struct ipw2100_priv *p = d->driver_data;
3445 return sprintf(buf, "0x%08x\n", (int)p->status);
3446}
3447static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
3448
3449static ssize_t show_capability(struct device *d, struct device_attribute *attr,
3450 char *buf)
3451{
3452 struct ipw2100_priv *p = d->driver_data;
3453 return sprintf(buf, "0x%08x\n", (int)p->capability);
3454}
3455static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL);
3456
3457
3458#define IPW2100_REG(x) { IPW_ ##x, #x }
3459static const struct {
3460 u32 addr;
3461 const char *name;
3462} hw_data[] = {
3463 IPW2100_REG(REG_GP_CNTRL),
3464 IPW2100_REG(REG_GPIO),
3465 IPW2100_REG(REG_INTA),
3466 IPW2100_REG(REG_INTA_MASK),
3467 IPW2100_REG(REG_RESET_REG),
3468};
3469#define IPW2100_NIC(x, s) { x, #x, s }
3470static const struct {
3471 u32 addr;
3472 const char *name;
3473 size_t size;
3474} nic_data[] = {
3475 IPW2100_NIC(IPW2100_CONTROL_REG, 2),
3476 IPW2100_NIC(0x210014, 1),
3477 IPW2100_NIC(0x210000, 1),
3478};
3479#define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d }
3480static const struct {
3481 u8 index;
3482 const char *name;
3483 const char *desc;
3484} ord_data[] = {
3485 IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
3486 IPW2100_ORD(STAT_TX_HOST_COMPLETE, "successful Host Tx's (MSDU)"),
3487 IPW2100_ORD(STAT_TX_DIR_DATA, "successful Directed Tx's (MSDU)"),
3488 IPW2100_ORD(STAT_TX_DIR_DATA1, "successful Directed Tx's (MSDU) @ 1MB"),
3489 IPW2100_ORD(STAT_TX_DIR_DATA2, "successful Directed Tx's (MSDU) @ 2MB"),
3490 IPW2100_ORD(STAT_TX_DIR_DATA5_5, "successful Directed Tx's (MSDU) @ 5_5MB"),
3491 IPW2100_ORD(STAT_TX_DIR_DATA11, "successful Directed Tx's (MSDU) @ 11MB"),
3492 IPW2100_ORD(STAT_TX_NODIR_DATA1, "successful Non_Directed Tx's (MSDU) @ 1MB"),
3493 IPW2100_ORD(STAT_TX_NODIR_DATA2, "successful Non_Directed Tx's (MSDU) @ 2MB"),
3494 IPW2100_ORD(STAT_TX_NODIR_DATA5_5, "successful Non_Directed Tx's (MSDU) @ 5.5MB"),
3495 IPW2100_ORD(STAT_TX_NODIR_DATA11, "successful Non_Directed Tx's (MSDU) @ 11MB"),
3496 IPW2100_ORD(STAT_NULL_DATA, "successful NULL data Tx's"),
3497 IPW2100_ORD(STAT_TX_RTS, "successful Tx RTS"),
3498 IPW2100_ORD(STAT_TX_CTS, "successful Tx CTS"),
3499 IPW2100_ORD(STAT_TX_ACK, "successful Tx ACK"),
3500 IPW2100_ORD(STAT_TX_ASSN, "successful Association Tx's"),
3501 IPW2100_ORD(STAT_TX_ASSN_RESP, "successful Association response Tx's"),
3502 IPW2100_ORD(STAT_TX_REASSN, "successful Reassociation Tx's"),
3503 IPW2100_ORD(STAT_TX_REASSN_RESP, "successful Reassociation response Tx's"),
3504 IPW2100_ORD(STAT_TX_PROBE, "probes successfully transmitted"),
3505 IPW2100_ORD(STAT_TX_PROBE_RESP, "probe responses successfully transmitted"),
3506 IPW2100_ORD(STAT_TX_BEACON, "tx beacon"),
3507 IPW2100_ORD(STAT_TX_ATIM, "Tx ATIM"),
3508 IPW2100_ORD(STAT_TX_DISASSN, "successful Disassociation TX"),
3509 IPW2100_ORD(STAT_TX_AUTH, "successful Authentication Tx"),
3510 IPW2100_ORD(STAT_TX_DEAUTH, "successful Deauthentication TX"),
3511 IPW2100_ORD(STAT_TX_TOTAL_BYTES, "Total successful Tx data bytes"),
3512 IPW2100_ORD(STAT_TX_RETRIES, "Tx retries"),
3513 IPW2100_ORD(STAT_TX_RETRY1, "Tx retries at 1MBPS"),
3514 IPW2100_ORD(STAT_TX_RETRY2, "Tx retries at 2MBPS"),
3515 IPW2100_ORD(STAT_TX_RETRY5_5, "Tx retries at 5.5MBPS"),
3516 IPW2100_ORD(STAT_TX_RETRY11, "Tx retries at 11MBPS"),
3517 IPW2100_ORD(STAT_TX_FAILURES, "Tx Failures"),
3518 IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP,"times max tries in a hop failed"),
3519 IPW2100_ORD(STAT_TX_DISASSN_FAIL, "times disassociation failed"),
3520 IPW2100_ORD(STAT_TX_ERR_CTS, "missed/bad CTS frames"),
3521 IPW2100_ORD(STAT_TX_ERR_ACK, "tx err due to acks"),
3522 IPW2100_ORD(STAT_RX_HOST, "packets passed to host"),
3523 IPW2100_ORD(STAT_RX_DIR_DATA, "directed packets"),
3524 IPW2100_ORD(STAT_RX_DIR_DATA1, "directed packets at 1MB"),
3525 IPW2100_ORD(STAT_RX_DIR_DATA2, "directed packets at 2MB"),
3526 IPW2100_ORD(STAT_RX_DIR_DATA5_5, "directed packets at 5.5MB"),
3527 IPW2100_ORD(STAT_RX_DIR_DATA11, "directed packets at 11MB"),
3528 IPW2100_ORD(STAT_RX_NODIR_DATA,"nondirected packets"),
3529 IPW2100_ORD(STAT_RX_NODIR_DATA1, "nondirected packets at 1MB"),
3530 IPW2100_ORD(STAT_RX_NODIR_DATA2, "nondirected packets at 2MB"),
3531 IPW2100_ORD(STAT_RX_NODIR_DATA5_5, "nondirected packets at 5.5MB"),
3532 IPW2100_ORD(STAT_RX_NODIR_DATA11, "nondirected packets at 11MB"),
3533 IPW2100_ORD(STAT_RX_NULL_DATA, "null data rx's"),
3534 IPW2100_ORD(STAT_RX_RTS, "Rx RTS"),
3535 IPW2100_ORD(STAT_RX_CTS, "Rx CTS"),
3536 IPW2100_ORD(STAT_RX_ACK, "Rx ACK"),
3537 IPW2100_ORD(STAT_RX_CFEND, "Rx CF End"),
3538 IPW2100_ORD(STAT_RX_CFEND_ACK, "Rx CF End + CF Ack"),
3539 IPW2100_ORD(STAT_RX_ASSN, "Association Rx's"),
3540 IPW2100_ORD(STAT_RX_ASSN_RESP, "Association response Rx's"),
3541 IPW2100_ORD(STAT_RX_REASSN, "Reassociation Rx's"),
3542 IPW2100_ORD(STAT_RX_REASSN_RESP, "Reassociation response Rx's"),
3543 IPW2100_ORD(STAT_RX_PROBE, "probe Rx's"),
3544 IPW2100_ORD(STAT_RX_PROBE_RESP, "probe response Rx's"),
3545 IPW2100_ORD(STAT_RX_BEACON, "Rx beacon"),
3546 IPW2100_ORD(STAT_RX_ATIM, "Rx ATIM"),
3547 IPW2100_ORD(STAT_RX_DISASSN, "disassociation Rx"),
3548 IPW2100_ORD(STAT_RX_AUTH, "authentication Rx"),
3549 IPW2100_ORD(STAT_RX_DEAUTH, "deauthentication Rx"),
3550 IPW2100_ORD(STAT_RX_TOTAL_BYTES,"Total rx data bytes received"),
3551 IPW2100_ORD(STAT_RX_ERR_CRC, "packets with Rx CRC error"),
3552 IPW2100_ORD(STAT_RX_ERR_CRC1, "Rx CRC errors at 1MB"),
3553 IPW2100_ORD(STAT_RX_ERR_CRC2, "Rx CRC errors at 2MB"),
3554 IPW2100_ORD(STAT_RX_ERR_CRC5_5, "Rx CRC errors at 5.5MB"),
3555 IPW2100_ORD(STAT_RX_ERR_CRC11, "Rx CRC errors at 11MB"),
3556 IPW2100_ORD(STAT_RX_DUPLICATE1, "duplicate rx packets at 1MB"),
3557 IPW2100_ORD(STAT_RX_DUPLICATE2, "duplicate rx packets at 2MB"),
3558 IPW2100_ORD(STAT_RX_DUPLICATE5_5, "duplicate rx packets at 5.5MB"),
3559 IPW2100_ORD(STAT_RX_DUPLICATE11, "duplicate rx packets at 11MB"),
3560 IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"),
3561 IPW2100_ORD(PERS_DB_LOCK, "locking fw permanent db"),
3562 IPW2100_ORD(PERS_DB_SIZE, "size of fw permanent db"),
3563 IPW2100_ORD(PERS_DB_ADDR, "address of fw permanent db"),
3564 IPW2100_ORD(STAT_RX_INVALID_PROTOCOL, "rx frames with invalid protocol"),
3565 IPW2100_ORD(SYS_BOOT_TIME, "Boot time"),
3566 IPW2100_ORD(STAT_RX_NO_BUFFER, "rx frames rejected due to no buffer"),
3567 IPW2100_ORD(STAT_RX_MISSING_FRAG, "rx frames dropped due to missing fragment"),
3568 IPW2100_ORD(STAT_RX_ORPHAN_FRAG, "rx frames dropped due to non-sequential fragment"),
3569 IPW2100_ORD(STAT_RX_ORPHAN_FRAME, "rx frames dropped due to unmatched 1st frame"),
3570 IPW2100_ORD(STAT_RX_FRAG_AGEOUT, "rx frames dropped due to uncompleted frame"),
3571 IPW2100_ORD(STAT_RX_ICV_ERRORS, "ICV errors during decryption"),
3572 IPW2100_ORD(STAT_PSP_SUSPENSION,"times adapter suspended"),
3573 IPW2100_ORD(STAT_PSP_BCN_TIMEOUT, "beacon timeout"),
3574 IPW2100_ORD(STAT_PSP_POLL_TIMEOUT, "poll response timeouts"),
3575 IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT, "timeouts waiting for last {broad,multi}cast pkt"),
3576 IPW2100_ORD(STAT_PSP_RX_DTIMS, "PSP DTIMs received"),
3577 IPW2100_ORD(STAT_PSP_RX_TIMS, "PSP TIMs received"),
3578 IPW2100_ORD(STAT_PSP_STATION_ID,"PSP Station ID"),
3579 IPW2100_ORD(LAST_ASSN_TIME, "RTC time of last association"),
3580 IPW2100_ORD(STAT_PERCENT_MISSED_BCNS,"current calculation of % missed beacons"),
3581 IPW2100_ORD(STAT_PERCENT_RETRIES,"current calculation of % missed tx retries"),
3582 IPW2100_ORD(ASSOCIATED_AP_PTR, "0 if not associated, else pointer to AP table entry"),
3583 IPW2100_ORD(AVAILABLE_AP_CNT, "AP's decsribed in the AP table"),
3584 IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"),
3585 IPW2100_ORD(STAT_AP_ASSNS, "associations"),
3586 IPW2100_ORD(STAT_ASSN_FAIL, "association failures"),
3587 IPW2100_ORD(STAT_ASSN_RESP_FAIL,"failures due to response fail"),
3588 IPW2100_ORD(STAT_FULL_SCANS, "full scans"),
3589 IPW2100_ORD(CARD_DISABLED, "Card Disabled"),
3590 IPW2100_ORD(STAT_ROAM_INHIBIT, "times roaming was inhibited due to activity"),
3591 IPW2100_ORD(RSSI_AT_ASSN, "RSSI of associated AP at time of association"),
3592 IPW2100_ORD(STAT_ASSN_CAUSE1, "reassociation: no probe response or TX on hop"),
3593 IPW2100_ORD(STAT_ASSN_CAUSE2, "reassociation: poor tx/rx quality"),
3594 IPW2100_ORD(STAT_ASSN_CAUSE3, "reassociation: tx/rx quality (excessive AP load"),
3595 IPW2100_ORD(STAT_ASSN_CAUSE4, "reassociation: AP RSSI level"),
3596 IPW2100_ORD(STAT_ASSN_CAUSE5, "reassociations due to load leveling"),
3597 IPW2100_ORD(STAT_AUTH_FAIL, "times authentication failed"),
3598 IPW2100_ORD(STAT_AUTH_RESP_FAIL,"times authentication response failed"),
3599 IPW2100_ORD(STATION_TABLE_CNT, "entries in association table"),
3600 IPW2100_ORD(RSSI_AVG_CURR, "Current avg RSSI"),
3601 IPW2100_ORD(POWER_MGMT_MODE, "Power mode - 0=CAM, 1=PSP"),
3602 IPW2100_ORD(COUNTRY_CODE, "IEEE country code as recv'd from beacon"),
3603 IPW2100_ORD(COUNTRY_CHANNELS, "channels suported by country"),
3604 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
3605 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
3606 IPW2100_ORD(ANTENNA_DIVERSITY, "TRUE if antenna diversity is disabled"),
3607 IPW2100_ORD(DTIM_PERIOD, "beacon intervals between DTIMs"),
3608 IPW2100_ORD(OUR_FREQ, "current radio freq lower digits - channel ID"),
3609 IPW2100_ORD(RTC_TIME, "current RTC time"),
3610 IPW2100_ORD(PORT_TYPE, "operating mode"),
3611 IPW2100_ORD(CURRENT_TX_RATE, "current tx rate"),
3612 IPW2100_ORD(SUPPORTED_RATES, "supported tx rates"),
3613 IPW2100_ORD(ATIM_WINDOW, "current ATIM Window"),
3614 IPW2100_ORD(BASIC_RATES, "basic tx rates"),
3615 IPW2100_ORD(NIC_HIGHEST_RATE, "NIC highest tx rate"),
3616 IPW2100_ORD(AP_HIGHEST_RATE, "AP highest tx rate"),
3617 IPW2100_ORD(CAPABILITIES, "Management frame capability field"),
3618 IPW2100_ORD(AUTH_TYPE, "Type of authentication"),
3619 IPW2100_ORD(RADIO_TYPE, "Adapter card platform type"),
3620 IPW2100_ORD(RTS_THRESHOLD, "Min packet length for RTS handshaking"),
3621 IPW2100_ORD(INT_MODE, "International mode"),
3622 IPW2100_ORD(FRAGMENTATION_THRESHOLD, "protocol frag threshold"),
3623 IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS, "EEPROM offset in SRAM"),
3624 IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE, "EEPROM size in SRAM"),
3625 IPW2100_ORD(EEPROM_SKU_CAPABILITY, "EEPROM SKU Capability"),
3626 IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS, "EEPROM IBSS 11b channel set"),
3627 IPW2100_ORD(MAC_VERSION, "MAC Version"),
3628 IPW2100_ORD(MAC_REVISION, "MAC Revision"),
3629 IPW2100_ORD(RADIO_VERSION, "Radio Version"),
3630 IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"),
3631 IPW2100_ORD(UCODE_VERSION, "Ucode Version"),
3632};
3633
3634
3635static ssize_t show_registers(struct device *d, struct device_attribute *attr,
3636 char *buf)
3637{
3638 int i;
3639 struct ipw2100_priv *priv = dev_get_drvdata(d);
3640 struct net_device *dev = priv->net_dev;
3641 char * out = buf;
3642 u32 val = 0;
3643
3644 out += sprintf(out, "%30s [Address ] : Hex\n", "Register");
3645
3646 for (i = 0; i < (sizeof(hw_data) / sizeof(*hw_data)); i++) {
3647 read_register(dev, hw_data[i].addr, &val);
3648 out += sprintf(out, "%30s [%08X] : %08X\n",
3649 hw_data[i].name, hw_data[i].addr, val);
3650 }
3651
3652 return out - buf;
3653}
3654static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
3655
3656
3657static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
3658 char *buf)
3659{
3660 struct ipw2100_priv *priv = dev_get_drvdata(d);
3661 struct net_device *dev = priv->net_dev;
3662 char * out = buf;
3663 int i;
3664
3665 out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry");
3666
3667 for (i = 0; i < (sizeof(nic_data) / sizeof(*nic_data)); i++) {
3668 u8 tmp8;
3669 u16 tmp16;
3670 u32 tmp32;
3671
3672 switch (nic_data[i].size) {
3673 case 1:
3674 read_nic_byte(dev, nic_data[i].addr, &tmp8);
3675 out += sprintf(out, "%30s [%08X] : %02X\n",
3676 nic_data[i].name, nic_data[i].addr,
3677 tmp8);
3678 break;
3679 case 2:
3680 read_nic_word(dev, nic_data[i].addr, &tmp16);
3681 out += sprintf(out, "%30s [%08X] : %04X\n",
3682 nic_data[i].name, nic_data[i].addr,
3683 tmp16);
3684 break;
3685 case 4:
3686 read_nic_dword(dev, nic_data[i].addr, &tmp32);
3687 out += sprintf(out, "%30s [%08X] : %08X\n",
3688 nic_data[i].name, nic_data[i].addr,
3689 tmp32);
3690 break;
3691 }
3692 }
3693 return out - buf;
3694}
3695static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
3696
3697
3698static ssize_t show_memory(struct device *d, struct device_attribute *attr,
3699 char *buf)
3700{
3701 struct ipw2100_priv *priv = dev_get_drvdata(d);
3702 struct net_device *dev = priv->net_dev;
3703 static unsigned long loop = 0;
3704 int len = 0;
3705 u32 buffer[4];
3706 int i;
3707 char line[81];
3708
3709 if (loop >= 0x30000)
3710 loop = 0;
3711
3712 /* sysfs provides us PAGE_SIZE buffer */
3713 while (len < PAGE_SIZE - 128 && loop < 0x30000) {
3714
3715 if (priv->snapshot[0]) for (i = 0; i < 4; i++)
3716 buffer[i] = *(u32 *)SNAPSHOT_ADDR(loop + i * 4);
3717 else for (i = 0; i < 4; i++)
3718 read_nic_dword(dev, loop + i * 4, &buffer[i]);
3719
3720 if (priv->dump_raw)
3721 len += sprintf(buf + len,
3722 "%c%c%c%c"
3723 "%c%c%c%c"
3724 "%c%c%c%c"
3725 "%c%c%c%c",
3726 ((u8*)buffer)[0x0],
3727 ((u8*)buffer)[0x1],
3728 ((u8*)buffer)[0x2],
3729 ((u8*)buffer)[0x3],
3730 ((u8*)buffer)[0x4],
3731 ((u8*)buffer)[0x5],
3732 ((u8*)buffer)[0x6],
3733 ((u8*)buffer)[0x7],
3734 ((u8*)buffer)[0x8],
3735 ((u8*)buffer)[0x9],
3736 ((u8*)buffer)[0xa],
3737 ((u8*)buffer)[0xb],
3738 ((u8*)buffer)[0xc],
3739 ((u8*)buffer)[0xd],
3740 ((u8*)buffer)[0xe],
3741 ((u8*)buffer)[0xf]);
3742 else
3743 len += sprintf(buf + len, "%s\n",
3744 snprint_line(line, sizeof(line),
3745 (u8*)buffer, 16, loop));
3746 loop += 16;
3747 }
3748
3749 return len;
3750}
3751
3752static ssize_t store_memory(struct device *d, struct device_attribute *attr,
3753 const char *buf, size_t count)
3754{
3755 struct ipw2100_priv *priv = dev_get_drvdata(d);
3756 struct net_device *dev = priv->net_dev;
3757 const char *p = buf;
3758
3759 if (count < 1)
3760 return count;
3761
3762 if (p[0] == '1' ||
3763 (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) {
3764 IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n",
3765 dev->name);
3766 priv->dump_raw = 1;
3767
3768 } else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' &&
3769 tolower(p[1]) == 'f')) {
3770 IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n",
3771 dev->name);
3772 priv->dump_raw = 0;
3773
3774 } else if (tolower(p[0]) == 'r') {
3775 IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n",
3776 dev->name);
3777 ipw2100_snapshot_free(priv);
3778
3779 } else
3780 IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, "
3781 "reset = clear memory snapshot\n",
3782 dev->name);
3783
3784 return count;
3785}
3786static DEVICE_ATTR(memory, S_IWUSR|S_IRUGO, show_memory, store_memory);
3787
3788
3789static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
3790 char *buf)
3791{
3792 struct ipw2100_priv *priv = dev_get_drvdata(d);
3793 u32 val = 0;
3794 int len = 0;
3795 u32 val_len;
3796 static int loop = 0;
3797
3798 if (loop >= sizeof(ord_data) / sizeof(*ord_data))
3799 loop = 0;
3800
3801 /* sysfs provides us PAGE_SIZE buffer */
3802 while (len < PAGE_SIZE - 128 &&
3803 loop < (sizeof(ord_data) / sizeof(*ord_data))) {
3804
3805 val_len = sizeof(u32);
3806
3807 if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val,
3808 &val_len))
3809 len += sprintf(buf + len, "[0x%02X] = ERROR %s\n",
3810 ord_data[loop].index,
3811 ord_data[loop].desc);
3812 else
3813 len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n",
3814 ord_data[loop].index, val,
3815 ord_data[loop].desc);
3816 loop++;
3817 }
3818
3819 return len;
3820}
3821static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL);
3822
3823
3824static ssize_t show_stats(struct device *d, struct device_attribute *attr,
3825 char *buf)
3826{
3827 struct ipw2100_priv *priv = dev_get_drvdata(d);
3828 char * out = buf;
3829
3830 out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n",
3831 priv->interrupts, priv->tx_interrupts,
3832 priv->rx_interrupts, priv->inta_other);
3833 out += sprintf(out, "firmware resets: %d\n", priv->resets);
3834 out += sprintf(out, "firmware hangs: %d\n", priv->hangs);
3835#ifdef CONFIG_IPW_DEBUG
3836 out += sprintf(out, "packet mismatch image: %s\n",
3837 priv->snapshot[0] ? "YES" : "NO");
3838#endif
3839
3840 return out - buf;
3841}
3842static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
3843
3844
3845static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
3846{
3847 int err;
3848
3849 if (mode == priv->ieee->iw_mode)
3850 return 0;
3851
3852 err = ipw2100_disable_adapter(priv);
3853 if (err) {
3854 printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
3855 priv->net_dev->name, err);
3856 return err;
3857 }
3858
3859 switch (mode) {
3860 case IW_MODE_INFRA:
3861 priv->net_dev->type = ARPHRD_ETHER;
3862 break;
3863 case IW_MODE_ADHOC:
3864 priv->net_dev->type = ARPHRD_ETHER;
3865 break;
3866#ifdef CONFIG_IPW2100_MONITOR
3867 case IW_MODE_MONITOR:
3868 priv->last_mode = priv->ieee->iw_mode;
3869 priv->net_dev->type = ARPHRD_IEEE80211;
3870 break;
3871#endif /* CONFIG_IPW2100_MONITOR */
3872 }
3873
3874 priv->ieee->iw_mode = mode;
3875
3876#ifdef CONFIG_PM
3877 /* Indicate ipw2100_download_firmware download firmware
3878 * from disk instead of memory. */
3879 ipw2100_firmware.version = 0;
3880#endif
3881
3882 printk(KERN_INFO "%s: Reseting on mode change.\n",
3883 priv->net_dev->name);
3884 priv->reset_backoff = 0;
3885 schedule_reset(priv);
3886
3887 return 0;
3888}
3889
3890static ssize_t show_internals(struct device *d, struct device_attribute *attr,
3891 char *buf)
3892{
3893 struct ipw2100_priv *priv = dev_get_drvdata(d);
3894 int len = 0;
3895
3896#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" # y "\n", priv-> x)
3897
3898 if (priv->status & STATUS_ASSOCIATED)
3899 len += sprintf(buf + len, "connected: %lu\n",
3900 get_seconds() - priv->connect_start);
3901 else
3902 len += sprintf(buf + len, "not connected\n");
3903
3904 DUMP_VAR(ieee->crypt[priv->ieee->tx_keyidx], p);
3905 DUMP_VAR(status, 08lx);
3906 DUMP_VAR(config, 08lx);
3907 DUMP_VAR(capability, 08lx);
3908
3909 len += sprintf(buf + len, "last_rtc: %lu\n", (unsigned long)priv->last_rtc);
3910
3911 DUMP_VAR(fatal_error, d);
3912 DUMP_VAR(stop_hang_check, d);
3913 DUMP_VAR(stop_rf_kill, d);
3914 DUMP_VAR(messages_sent, d);
3915
3916 DUMP_VAR(tx_pend_stat.value, d);
3917 DUMP_VAR(tx_pend_stat.hi, d);
3918
3919 DUMP_VAR(tx_free_stat.value, d);
3920 DUMP_VAR(tx_free_stat.lo, d);
3921
3922 DUMP_VAR(msg_free_stat.value, d);
3923 DUMP_VAR(msg_free_stat.lo, d);
3924
3925 DUMP_VAR(msg_pend_stat.value, d);
3926 DUMP_VAR(msg_pend_stat.hi, d);
3927
3928 DUMP_VAR(fw_pend_stat.value, d);
3929 DUMP_VAR(fw_pend_stat.hi, d);
3930
3931 DUMP_VAR(txq_stat.value, d);
3932 DUMP_VAR(txq_stat.lo, d);
3933
3934 DUMP_VAR(ieee->scans, d);
3935 DUMP_VAR(reset_backoff, d);
3936
3937 return len;
3938}
3939static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL);
3940
3941
3942static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
3943 char *buf)
3944{
3945 struct ipw2100_priv *priv = dev_get_drvdata(d);
3946 char essid[IW_ESSID_MAX_SIZE + 1];
3947 u8 bssid[ETH_ALEN];
3948 u32 chan = 0;
3949 char * out = buf;
3950 int length;
3951 int ret;
3952
3953 memset(essid, 0, sizeof(essid));
3954 memset(bssid, 0, sizeof(bssid));
3955
3956 length = IW_ESSID_MAX_SIZE;
3957 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length);
3958 if (ret)
3959 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3960 __LINE__);
3961
3962 length = sizeof(bssid);
3963 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
3964 bssid, &length);
3965 if (ret)
3966 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3967 __LINE__);
3968
3969 length = sizeof(u32);
3970 ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length);
3971 if (ret)
3972 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3973 __LINE__);
3974
3975 out += sprintf(out, "ESSID: %s\n", essid);
3976 out += sprintf(out, "BSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
3977 bssid[0], bssid[1], bssid[2],
3978 bssid[3], bssid[4], bssid[5]);
3979 out += sprintf(out, "Channel: %d\n", chan);
3980
3981 return out - buf;
3982}
3983static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL);
3984
3985
3986#ifdef CONFIG_IPW_DEBUG
3987static ssize_t show_debug_level(struct device_driver *d, char *buf)
3988{
3989 return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
3990}
3991
3992static ssize_t store_debug_level(struct device_driver *d, const char *buf,
3993 size_t count)
3994{
3995 char *p = (char *)buf;
3996 u32 val;
3997
3998 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
3999 p++;
4000 if (p[0] == 'x' || p[0] == 'X')
4001 p++;
4002 val = simple_strtoul(p, &p, 16);
4003 } else
4004 val = simple_strtoul(p, &p, 10);
4005 if (p == buf)
4006 IPW_DEBUG_INFO(DRV_NAME
4007 ": %s is not in hex or decimal form.\n", buf);
4008 else
4009 ipw2100_debug_level = val;
4010
4011 return strnlen(buf, count);
4012}
4013static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level,
4014 store_debug_level);
4015#endif /* CONFIG_IPW_DEBUG */
4016
4017
4018static ssize_t show_fatal_error(struct device *d,
4019 struct device_attribute *attr, char *buf)
4020{
4021 struct ipw2100_priv *priv = dev_get_drvdata(d);
4022 char *out = buf;
4023 int i;
4024
4025 if (priv->fatal_error)
4026 out += sprintf(out, "0x%08X\n",
4027 priv->fatal_error);
4028 else
4029 out += sprintf(out, "0\n");
4030
4031 for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) {
4032 if (!priv->fatal_errors[(priv->fatal_index - i) %
4033 IPW2100_ERROR_QUEUE])
4034 continue;
4035
4036 out += sprintf(out, "%d. 0x%08X\n", i,
4037 priv->fatal_errors[(priv->fatal_index - i) %
4038 IPW2100_ERROR_QUEUE]);
4039 }
4040
4041 return out - buf;
4042}
4043
4044static ssize_t store_fatal_error(struct device *d,
4045 struct device_attribute *attr, const char *buf, size_t count)
4046{
4047 struct ipw2100_priv *priv = dev_get_drvdata(d);
4048 schedule_reset(priv);
4049 return count;
4050}
4051static DEVICE_ATTR(fatal_error, S_IWUSR|S_IRUGO, show_fatal_error, store_fatal_error);
4052
4053
4054static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
4055 char *buf)
4056{
4057 struct ipw2100_priv *priv = dev_get_drvdata(d);
4058 return sprintf(buf, "%d\n", priv->ieee->scan_age);
4059}
4060
4061static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
4062 const char *buf, size_t count)
4063{
4064 struct ipw2100_priv *priv = dev_get_drvdata(d);
4065 struct net_device *dev = priv->net_dev;
4066 char buffer[] = "00000000";
4067 unsigned long len =
4068 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
4069 unsigned long val;
4070 char *p = buffer;
4071
4072 IPW_DEBUG_INFO("enter\n");
4073
4074 strncpy(buffer, buf, len);
4075 buffer[len] = 0;
4076
4077 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
4078 p++;
4079 if (p[0] == 'x' || p[0] == 'X')
4080 p++;
4081 val = simple_strtoul(p, &p, 16);
4082 } else
4083 val = simple_strtoul(p, &p, 10);
4084 if (p == buffer) {
4085 IPW_DEBUG_INFO("%s: user supplied invalid value.\n",
4086 dev->name);
4087 } else {
4088 priv->ieee->scan_age = val;
4089 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
4090 }
4091
4092 IPW_DEBUG_INFO("exit\n");
4093 return len;
4094}
4095static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
4096
4097
4098static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
4099 char *buf)
4100{
4101 /* 0 - RF kill not enabled
4102 1 - SW based RF kill active (sysfs)
4103 2 - HW based RF kill active
4104 3 - Both HW and SW baed RF kill active */
4105 struct ipw2100_priv *priv = (struct ipw2100_priv *)d->driver_data;
4106 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
4107 (rf_kill_active(priv) ? 0x2 : 0x0);
4108 return sprintf(buf, "%i\n", val);
4109}
4110
4111static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4112{
4113 if ((disable_radio ? 1 : 0) ==
4114 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
4115 return 0 ;
4116
4117 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
4118 disable_radio ? "OFF" : "ON");
4119
4120 down(&priv->action_sem);
4121
4122 if (disable_radio) {
4123 priv->status |= STATUS_RF_KILL_SW;
4124 ipw2100_down(priv);
4125 } else {
4126 priv->status &= ~STATUS_RF_KILL_SW;
4127 if (rf_kill_active(priv)) {
4128 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
4129 "disabled by HW switch\n");
4130 /* Make sure the RF_KILL check timer is running */
4131 priv->stop_rf_kill = 0;
4132 cancel_delayed_work(&priv->rf_kill);
4133 queue_delayed_work(priv->workqueue, &priv->rf_kill,
4134 HZ);
4135 } else
4136 schedule_reset(priv);
4137 }
4138
4139 up(&priv->action_sem);
4140 return 1;
4141}
4142
4143static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
4144 const char *buf, size_t count)
4145{
4146 struct ipw2100_priv *priv = dev_get_drvdata(d);
4147 ipw_radio_kill_sw(priv, buf[0] == '1');
4148 return count;
4149}
4150static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
4151
4152
4153static struct attribute *ipw2100_sysfs_entries[] = {
4154 &dev_attr_hardware.attr,
4155 &dev_attr_registers.attr,
4156 &dev_attr_ordinals.attr,
4157 &dev_attr_pci.attr,
4158 &dev_attr_stats.attr,
4159 &dev_attr_internals.attr,
4160 &dev_attr_bssinfo.attr,
4161 &dev_attr_memory.attr,
4162 &dev_attr_scan_age.attr,
4163 &dev_attr_fatal_error.attr,
4164 &dev_attr_rf_kill.attr,
4165 &dev_attr_cfg.attr,
4166 &dev_attr_status.attr,
4167 &dev_attr_capability.attr,
4168 NULL,
4169};
4170
4171static struct attribute_group ipw2100_attribute_group = {
4172 .attrs = ipw2100_sysfs_entries,
4173};
4174
4175
4176static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
4177{
4178 struct ipw2100_status_queue *q = &priv->status_queue;
4179
4180 IPW_DEBUG_INFO("enter\n");
4181
4182 q->size = entries * sizeof(struct ipw2100_status);
4183 q->drv = (struct ipw2100_status *)pci_alloc_consistent(
4184 priv->pci_dev, q->size, &q->nic);
4185 if (!q->drv) {
4186 IPW_DEBUG_WARNING(
4187 "Can not allocate status queue.\n");
4188 return -ENOMEM;
4189 }
4190
4191 memset(q->drv, 0, q->size);
4192
4193 IPW_DEBUG_INFO("exit\n");
4194
4195 return 0;
4196}
4197
4198static void status_queue_free(struct ipw2100_priv *priv)
4199{
4200 IPW_DEBUG_INFO("enter\n");
4201
4202 if (priv->status_queue.drv) {
4203 pci_free_consistent(
4204 priv->pci_dev, priv->status_queue.size,
4205 priv->status_queue.drv, priv->status_queue.nic);
4206 priv->status_queue.drv = NULL;
4207 }
4208
4209 IPW_DEBUG_INFO("exit\n");
4210}
4211
4212static int bd_queue_allocate(struct ipw2100_priv *priv,
4213 struct ipw2100_bd_queue *q, int entries)
4214{
4215 IPW_DEBUG_INFO("enter\n");
4216
4217 memset(q, 0, sizeof(struct ipw2100_bd_queue));
4218
4219 q->entries = entries;
4220 q->size = entries * sizeof(struct ipw2100_bd);
4221 q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic);
4222 if (!q->drv) {
4223 IPW_DEBUG_INFO("can't allocate shared memory for buffer descriptors\n");
4224 return -ENOMEM;
4225 }
4226 memset(q->drv, 0, q->size);
4227
4228 IPW_DEBUG_INFO("exit\n");
4229
4230 return 0;
4231}
4232
4233static void bd_queue_free(struct ipw2100_priv *priv,
4234 struct ipw2100_bd_queue *q)
4235{
4236 IPW_DEBUG_INFO("enter\n");
4237
4238 if (!q)
4239 return;
4240
4241 if (q->drv) {
4242 pci_free_consistent(priv->pci_dev,
4243 q->size, q->drv, q->nic);
4244 q->drv = NULL;
4245 }
4246
4247 IPW_DEBUG_INFO("exit\n");
4248}
4249
4250static void bd_queue_initialize(
4251 struct ipw2100_priv *priv, struct ipw2100_bd_queue * q,
4252 u32 base, u32 size, u32 r, u32 w)
4253{
4254 IPW_DEBUG_INFO("enter\n");
4255
4256 IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, (u32)q->nic);
4257
4258 write_register(priv->net_dev, base, q->nic);
4259 write_register(priv->net_dev, size, q->entries);
4260 write_register(priv->net_dev, r, q->oldest);
4261 write_register(priv->net_dev, w, q->next);
4262
4263 IPW_DEBUG_INFO("exit\n");
4264}
4265
4266static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
4267{
4268 if (priv->workqueue) {
4269 priv->stop_rf_kill = 1;
4270 priv->stop_hang_check = 1;
4271 cancel_delayed_work(&priv->reset_work);
4272 cancel_delayed_work(&priv->security_work);
4273 cancel_delayed_work(&priv->wx_event_work);
4274 cancel_delayed_work(&priv->hang_check);
4275 cancel_delayed_work(&priv->rf_kill);
4276 destroy_workqueue(priv->workqueue);
4277 priv->workqueue = NULL;
4278 }
4279}
4280
4281static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
4282{
4283 int i, j, err = -EINVAL;
4284 void *v;
4285 dma_addr_t p;
4286
4287 IPW_DEBUG_INFO("enter\n");
4288
4289 err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH);
4290 if (err) {
4291 IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n",
4292 priv->net_dev->name);
4293 return err;
4294 }
4295
4296 priv->tx_buffers = (struct ipw2100_tx_packet *)kmalloc(
4297 TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
4298 GFP_ATOMIC);
4299 if (!priv->tx_buffers) {
4300 printk(KERN_ERR DRV_NAME ": %s: alloc failed form tx buffers.\n",
4301 priv->net_dev->name);
4302 bd_queue_free(priv, &priv->tx_queue);
4303 return -ENOMEM;
4304 }
4305
4306 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4307 v = pci_alloc_consistent(
4308 priv->pci_dev, sizeof(struct ipw2100_data_header), &p);
4309 if (!v) {
4310 printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for tx "
4311 "buffers.\n", priv->net_dev->name);
4312 err = -ENOMEM;
4313 break;
4314 }
4315
4316 priv->tx_buffers[i].type = DATA;
4317 priv->tx_buffers[i].info.d_struct.data = (struct ipw2100_data_header*)v;
4318 priv->tx_buffers[i].info.d_struct.data_phys = p;
4319 priv->tx_buffers[i].info.d_struct.txb = NULL;
4320 }
4321
4322 if (i == TX_PENDED_QUEUE_LENGTH)
4323 return 0;
4324
4325 for (j = 0; j < i; j++) {
4326 pci_free_consistent(
4327 priv->pci_dev,
4328 sizeof(struct ipw2100_data_header),
4329 priv->tx_buffers[j].info.d_struct.data,
4330 priv->tx_buffers[j].info.d_struct.data_phys);
4331 }
4332
4333 kfree(priv->tx_buffers);
4334 priv->tx_buffers = NULL;
4335
4336 return err;
4337}
4338
4339static void ipw2100_tx_initialize(struct ipw2100_priv *priv)
4340{
4341 int i;
4342
4343 IPW_DEBUG_INFO("enter\n");
4344
4345 /*
4346 * reinitialize packet info lists
4347 */
4348 INIT_LIST_HEAD(&priv->fw_pend_list);
4349 INIT_STAT(&priv->fw_pend_stat);
4350
4351 /*
4352 * reinitialize lists
4353 */
4354 INIT_LIST_HEAD(&priv->tx_pend_list);
4355 INIT_LIST_HEAD(&priv->tx_free_list);
4356 INIT_STAT(&priv->tx_pend_stat);
4357 INIT_STAT(&priv->tx_free_stat);
4358
4359 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4360 /* We simply drop any SKBs that have been queued for
4361 * transmit */
4362 if (priv->tx_buffers[i].info.d_struct.txb) {
4363 ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
4364 priv->tx_buffers[i].info.d_struct.txb = NULL;
4365 }
4366
4367 list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list);
4368 }
4369
4370 SET_STAT(&priv->tx_free_stat, i);
4371
4372 priv->tx_queue.oldest = 0;
4373 priv->tx_queue.available = priv->tx_queue.entries;
4374 priv->tx_queue.next = 0;
4375 INIT_STAT(&priv->txq_stat);
4376 SET_STAT(&priv->txq_stat, priv->tx_queue.available);
4377
4378 bd_queue_initialize(priv, &priv->tx_queue,
4379 IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE,
4380 IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE,
4381 IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
4382 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX);
4383
4384 IPW_DEBUG_INFO("exit\n");
4385
4386}
4387
4388static void ipw2100_tx_free(struct ipw2100_priv *priv)
4389{
4390 int i;
4391
4392 IPW_DEBUG_INFO("enter\n");
4393
4394 bd_queue_free(priv, &priv->tx_queue);
4395
4396 if (!priv->tx_buffers)
4397 return;
4398
4399 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4400 if (priv->tx_buffers[i].info.d_struct.txb) {
4401 ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
4402 priv->tx_buffers[i].info.d_struct.txb = NULL;
4403 }
4404 if (priv->tx_buffers[i].info.d_struct.data)
4405 pci_free_consistent(
4406 priv->pci_dev,
4407 sizeof(struct ipw2100_data_header),
4408 priv->tx_buffers[i].info.d_struct.data,
4409 priv->tx_buffers[i].info.d_struct.data_phys);
4410 }
4411
4412 kfree(priv->tx_buffers);
4413 priv->tx_buffers = NULL;
4414
4415 IPW_DEBUG_INFO("exit\n");
4416}
4417
4418
4419
4420static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
4421{
4422 int i, j, err = -EINVAL;
4423
4424 IPW_DEBUG_INFO("enter\n");
4425
4426 err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH);
4427 if (err) {
4428 IPW_DEBUG_INFO("failed bd_queue_allocate\n");
4429 return err;
4430 }
4431
4432 err = status_queue_allocate(priv, RX_QUEUE_LENGTH);
4433 if (err) {
4434 IPW_DEBUG_INFO("failed status_queue_allocate\n");
4435 bd_queue_free(priv, &priv->rx_queue);
4436 return err;
4437 }
4438
4439 /*
4440 * allocate packets
4441 */
4442 priv->rx_buffers = (struct ipw2100_rx_packet *)
4443 kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet),
4444 GFP_KERNEL);
4445 if (!priv->rx_buffers) {
4446 IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
4447
4448 bd_queue_free(priv, &priv->rx_queue);
4449
4450 status_queue_free(priv);
4451
4452 return -ENOMEM;
4453 }
4454
4455 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
4456 struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
4457
4458 err = ipw2100_alloc_skb(priv, packet);
4459 if (unlikely(err)) {
4460 err = -ENOMEM;
4461 break;
4462 }
4463
4464 /* The BD holds the cache aligned address */
4465 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
4466 priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH;
4467 priv->status_queue.drv[i].status_fields = 0;
4468 }
4469
4470 if (i == RX_QUEUE_LENGTH)
4471 return 0;
4472
4473 for (j = 0; j < i; j++) {
4474 pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
4475 sizeof(struct ipw2100_rx_packet),
4476 PCI_DMA_FROMDEVICE);
4477 dev_kfree_skb(priv->rx_buffers[j].skb);
4478 }
4479
4480 kfree(priv->rx_buffers);
4481 priv->rx_buffers = NULL;
4482
4483 bd_queue_free(priv, &priv->rx_queue);
4484
4485 status_queue_free(priv);
4486
4487 return err;
4488}
4489
4490static void ipw2100_rx_initialize(struct ipw2100_priv *priv)
4491{
4492 IPW_DEBUG_INFO("enter\n");
4493
4494 priv->rx_queue.oldest = 0;
4495 priv->rx_queue.available = priv->rx_queue.entries - 1;
4496 priv->rx_queue.next = priv->rx_queue.entries - 1;
4497
4498 INIT_STAT(&priv->rxq_stat);
4499 SET_STAT(&priv->rxq_stat, priv->rx_queue.available);
4500
4501 bd_queue_initialize(priv, &priv->rx_queue,
4502 IPW_MEM_HOST_SHARED_RX_BD_BASE,
4503 IPW_MEM_HOST_SHARED_RX_BD_SIZE,
4504 IPW_MEM_HOST_SHARED_RX_READ_INDEX,
4505 IPW_MEM_HOST_SHARED_RX_WRITE_INDEX);
4506
4507 /* set up the status queue */
4508 write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE,
4509 priv->status_queue.nic);
4510
4511 IPW_DEBUG_INFO("exit\n");
4512}
4513
4514static void ipw2100_rx_free(struct ipw2100_priv *priv)
4515{
4516 int i;
4517
4518 IPW_DEBUG_INFO("enter\n");
4519
4520 bd_queue_free(priv, &priv->rx_queue);
4521 status_queue_free(priv);
4522
4523 if (!priv->rx_buffers)
4524 return;
4525
4526 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
4527 if (priv->rx_buffers[i].rxp) {
4528 pci_unmap_single(priv->pci_dev,
4529 priv->rx_buffers[i].dma_addr,
4530 sizeof(struct ipw2100_rx),
4531 PCI_DMA_FROMDEVICE);
4532 dev_kfree_skb(priv->rx_buffers[i].skb);
4533 }
4534 }
4535
4536 kfree(priv->rx_buffers);
4537 priv->rx_buffers = NULL;
4538
4539 IPW_DEBUG_INFO("exit\n");
4540}
4541
4542static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
4543{
4544 u32 length = ETH_ALEN;
4545 u8 mac[ETH_ALEN];
4546
4547 int err;
4548
4549 err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC,
4550 mac, &length);
4551 if (err) {
4552 IPW_DEBUG_INFO("MAC address read failed\n");
4553 return -EIO;
4554 }
4555 IPW_DEBUG_INFO("card MAC is %02X:%02X:%02X:%02X:%02X:%02X\n",
4556 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4557
4558 memcpy(priv->net_dev->dev_addr, mac, ETH_ALEN);
4559
4560 return 0;
4561}
4562
4563/********************************************************************
4564 *
4565 * Firmware Commands
4566 *
4567 ********************************************************************/
4568
4569static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode)
4570{
4571 struct host_command cmd = {
4572 .host_command = ADAPTER_ADDRESS,
4573 .host_command_sequence = 0,
4574 .host_command_length = ETH_ALEN
4575 };
4576 int err;
4577
4578 IPW_DEBUG_HC("SET_MAC_ADDRESS\n");
4579
4580 IPW_DEBUG_INFO("enter\n");
4581
4582 if (priv->config & CFG_CUSTOM_MAC) {
4583 memcpy(cmd.host_command_parameters, priv->mac_addr,
4584 ETH_ALEN);
4585 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
4586 } else
4587 memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr,
4588 ETH_ALEN);
4589
4590 err = ipw2100_hw_send_command(priv, &cmd);
4591
4592 IPW_DEBUG_INFO("exit\n");
4593 return err;
4594}
4595
4596static int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type,
4597 int batch_mode)
4598{
4599 struct host_command cmd = {
4600 .host_command = PORT_TYPE,
4601 .host_command_sequence = 0,
4602 .host_command_length = sizeof(u32)
4603 };
4604 int err;
4605
4606 switch (port_type) {
4607 case IW_MODE_INFRA:
4608 cmd.host_command_parameters[0] = IPW_BSS;
4609 break;
4610 case IW_MODE_ADHOC:
4611 cmd.host_command_parameters[0] = IPW_IBSS;
4612 break;
4613 }
4614
4615 IPW_DEBUG_HC("PORT_TYPE: %s\n",
4616 port_type == IPW_IBSS ? "Ad-Hoc" : "Managed");
4617
4618 if (!batch_mode) {
4619 err = ipw2100_disable_adapter(priv);
4620 if (err) {
4621 printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
4622 priv->net_dev->name, err);
4623 return err;
4624 }
4625 }
4626
4627 /* send cmd to firmware */
4628 err = ipw2100_hw_send_command(priv, &cmd);
4629
4630 if (!batch_mode)
4631 ipw2100_enable_adapter(priv);
4632
4633 return err;
4634}
4635
4636
4637static int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel,
4638 int batch_mode)
4639{
4640 struct host_command cmd = {
4641 .host_command = CHANNEL,
4642 .host_command_sequence = 0,
4643 .host_command_length = sizeof(u32)
4644 };
4645 int err;
4646
4647 cmd.host_command_parameters[0] = channel;
4648
4649 IPW_DEBUG_HC("CHANNEL: %d\n", channel);
4650
4651 /* If BSS then we don't support channel selection */
4652 if (priv->ieee->iw_mode == IW_MODE_INFRA)
4653 return 0;
4654
4655 if ((channel != 0) &&
4656 ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL)))
4657 return -EINVAL;
4658
4659 if (!batch_mode) {
4660 err = ipw2100_disable_adapter(priv);
4661 if (err)
4662 return err;
4663 }
4664
4665 err = ipw2100_hw_send_command(priv, &cmd);
4666 if (err) {
4667 IPW_DEBUG_INFO("Failed to set channel to %d",
4668 channel);
4669 return err;
4670 }
4671
4672 if (channel)
4673 priv->config |= CFG_STATIC_CHANNEL;
4674 else
4675 priv->config &= ~CFG_STATIC_CHANNEL;
4676
4677 priv->channel = channel;
4678
4679 if (!batch_mode) {
4680 err = ipw2100_enable_adapter(priv);
4681 if (err)
4682 return err;
4683 }
4684
4685 return 0;
4686}
4687
4688static int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode)
4689{
4690 struct host_command cmd = {
4691 .host_command = SYSTEM_CONFIG,
4692 .host_command_sequence = 0,
4693 .host_command_length = 12,
4694 };
4695 u32 ibss_mask, len = sizeof(u32);
4696 int err;
4697
4698 /* Set system configuration */
4699
4700 if (!batch_mode) {
4701 err = ipw2100_disable_adapter(priv);
4702 if (err)
4703 return err;
4704 }
4705
4706 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
4707 cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START;
4708
4709 cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK |
4710 IPW_CFG_BSS_MASK |
4711 IPW_CFG_802_1x_ENABLE;
4712
4713 if (!(priv->config & CFG_LONG_PREAMBLE))
4714 cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO;
4715
4716 err = ipw2100_get_ordinal(priv,
4717 IPW_ORD_EEPROM_IBSS_11B_CHANNELS,
4718 &ibss_mask, &len);
4719 if (err)
4720 ibss_mask = IPW_IBSS_11B_DEFAULT_MASK;
4721
4722 cmd.host_command_parameters[1] = REG_CHANNEL_MASK;
4723 cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask;
4724
4725 /* 11b only */
4726 /*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A;*/
4727
4728 err = ipw2100_hw_send_command(priv, &cmd);
4729 if (err)
4730 return err;
4731
4732/* If IPv6 is configured in the kernel then we don't want to filter out all
4733 * of the multicast packets as IPv6 needs some. */
4734#if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE)
4735 cmd.host_command = ADD_MULTICAST;
4736 cmd.host_command_sequence = 0;
4737 cmd.host_command_length = 0;
4738
4739 ipw2100_hw_send_command(priv, &cmd);
4740#endif
4741 if (!batch_mode) {
4742 err = ipw2100_enable_adapter(priv);
4743 if (err)
4744 return err;
4745 }
4746
4747 return 0;
4748}
4749
4750static int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate,
4751 int batch_mode)
4752{
4753 struct host_command cmd = {
4754 .host_command = BASIC_TX_RATES,
4755 .host_command_sequence = 0,
4756 .host_command_length = 4
4757 };
4758 int err;
4759
4760 cmd.host_command_parameters[0] = rate & TX_RATE_MASK;
4761
4762 if (!batch_mode) {
4763 err = ipw2100_disable_adapter(priv);
4764 if (err)
4765 return err;
4766 }
4767
4768 /* Set BASIC TX Rate first */
4769 ipw2100_hw_send_command(priv, &cmd);
4770
4771 /* Set TX Rate */
4772 cmd.host_command = TX_RATES;
4773 ipw2100_hw_send_command(priv, &cmd);
4774
4775 /* Set MSDU TX Rate */
4776 cmd.host_command = MSDU_TX_RATES;
4777 ipw2100_hw_send_command(priv, &cmd);
4778
4779 if (!batch_mode) {
4780 err = ipw2100_enable_adapter(priv);
4781 if (err)
4782 return err;
4783 }
4784
4785 priv->tx_rates = rate;
4786
4787 return 0;
4788}
4789
4790static int ipw2100_set_power_mode(struct ipw2100_priv *priv,
4791 int power_level)
4792{
4793 struct host_command cmd = {
4794 .host_command = POWER_MODE,
4795 .host_command_sequence = 0,
4796 .host_command_length = 4
4797 };
4798 int err;
4799
4800 cmd.host_command_parameters[0] = power_level;
4801
4802 err = ipw2100_hw_send_command(priv, &cmd);
4803 if (err)
4804 return err;
4805
4806 if (power_level == IPW_POWER_MODE_CAM)
4807 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
4808 else
4809 priv->power_mode = IPW_POWER_ENABLED | power_level;
4810
4811#ifdef CONFIG_IPW2100_TX_POWER
4812 if (priv->port_type == IBSS &&
4813 priv->adhoc_power != DFTL_IBSS_TX_POWER) {
4814 /* Set beacon interval */
4815 cmd.host_command = TX_POWER_INDEX;
4816 cmd.host_command_parameters[0] = (u32)priv->adhoc_power;
4817
4818 err = ipw2100_hw_send_command(priv, &cmd);
4819 if (err)
4820 return err;
4821 }
4822#endif
4823
4824 return 0;
4825}
4826
4827
4828static int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold)
4829{
4830 struct host_command cmd = {
4831 .host_command = RTS_THRESHOLD,
4832 .host_command_sequence = 0,
4833 .host_command_length = 4
4834 };
4835 int err;
4836
4837 if (threshold & RTS_DISABLED)
4838 cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD;
4839 else
4840 cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED;
4841
4842 err = ipw2100_hw_send_command(priv, &cmd);
4843 if (err)
4844 return err;
4845
4846 priv->rts_threshold = threshold;
4847
4848 return 0;
4849}
4850
4851#if 0
4852int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv,
4853 u32 threshold, int batch_mode)
4854{
4855 struct host_command cmd = {
4856 .host_command = FRAG_THRESHOLD,
4857 .host_command_sequence = 0,
4858 .host_command_length = 4,
4859 .host_command_parameters[0] = 0,
4860 };
4861 int err;
4862
4863 if (!batch_mode) {
4864 err = ipw2100_disable_adapter(priv);
4865 if (err)
4866 return err;
4867 }
4868
4869 if (threshold == 0)
4870 threshold = DEFAULT_FRAG_THRESHOLD;
4871 else {
4872 threshold = max(threshold, MIN_FRAG_THRESHOLD);
4873 threshold = min(threshold, MAX_FRAG_THRESHOLD);
4874 }
4875
4876 cmd.host_command_parameters[0] = threshold;
4877
4878 IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold);
4879
4880 err = ipw2100_hw_send_command(priv, &cmd);
4881
4882 if (!batch_mode)
4883 ipw2100_enable_adapter(priv);
4884
4885 if (!err)
4886 priv->frag_threshold = threshold;
4887
4888 return err;
4889}
4890#endif
4891
4892static int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry)
4893{
4894 struct host_command cmd = {
4895 .host_command = SHORT_RETRY_LIMIT,
4896 .host_command_sequence = 0,
4897 .host_command_length = 4
4898 };
4899 int err;
4900
4901 cmd.host_command_parameters[0] = retry;
4902
4903 err = ipw2100_hw_send_command(priv, &cmd);
4904 if (err)
4905 return err;
4906
4907 priv->short_retry_limit = retry;
4908
4909 return 0;
4910}
4911
4912static int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry)
4913{
4914 struct host_command cmd = {
4915 .host_command = LONG_RETRY_LIMIT,
4916 .host_command_sequence = 0,
4917 .host_command_length = 4
4918 };
4919 int err;
4920
4921 cmd.host_command_parameters[0] = retry;
4922
4923 err = ipw2100_hw_send_command(priv, &cmd);
4924 if (err)
4925 return err;
4926
4927 priv->long_retry_limit = retry;
4928
4929 return 0;
4930}
4931
4932
4933static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 *bssid,
4934 int batch_mode)
4935{
4936 struct host_command cmd = {
4937 .host_command = MANDATORY_BSSID,
4938 .host_command_sequence = 0,
4939 .host_command_length = (bssid == NULL) ? 0 : ETH_ALEN
4940 };
4941 int err;
4942
4943#ifdef CONFIG_IPW_DEBUG
4944 if (bssid != NULL)
4945 IPW_DEBUG_HC(
4946 "MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n",
4947 bssid[0], bssid[1], bssid[2], bssid[3], bssid[4],
4948 bssid[5]);
4949 else
4950 IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n");
4951#endif
4952 /* if BSSID is empty then we disable mandatory bssid mode */
4953 if (bssid != NULL)
4954 memcpy((u8 *)cmd.host_command_parameters, bssid, ETH_ALEN);
4955
4956 if (!batch_mode) {
4957 err = ipw2100_disable_adapter(priv);
4958 if (err)
4959 return err;
4960 }
4961
4962 err = ipw2100_hw_send_command(priv, &cmd);
4963
4964 if (!batch_mode)
4965 ipw2100_enable_adapter(priv);
4966
4967 return err;
4968}
4969
4970#ifdef CONFIG_IEEE80211_WPA
4971static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
4972{
4973 struct host_command cmd = {
4974 .host_command = DISASSOCIATION_BSSID,
4975 .host_command_sequence = 0,
4976 .host_command_length = ETH_ALEN
4977 };
4978 int err;
4979 int len;
4980
4981 IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
4982
4983 len = ETH_ALEN;
4984 /* The Firmware currently ignores the BSSID and just disassociates from
4985 * the currently associated AP -- but in the off chance that a future
4986 * firmware does use the BSSID provided here, we go ahead and try and
4987 * set it to the currently associated AP's BSSID */
4988 memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN);
4989
4990 err = ipw2100_hw_send_command(priv, &cmd);
4991
4992 return err;
4993}
4994#endif
4995
4996/*
4997 * Pseudo code for setting up wpa_frame:
4998 */
4999#if 0
5000void x(struct ieee80211_assoc_frame *wpa_assoc)
5001{
5002 struct ipw2100_wpa_assoc_frame frame;
5003 frame->fixed_ie_mask = IPW_WPA_CAPABILTIES |
5004 IPW_WPA_LISTENINTERVAL |
5005 IPW_WPA_AP_ADDRESS;
5006 frame->capab_info = wpa_assoc->capab_info;
5007 frame->lisen_interval = wpa_assoc->listent_interval;
5008 memcpy(frame->current_ap, wpa_assoc->current_ap, ETH_ALEN);
5009
5010 /* UNKNOWN -- I'm not postivive about this part; don't have any WPA
5011 * setup here to test it with.
5012 *
5013 * Walk the IEs in the wpa_assoc and figure out the total size of all
5014 * that data. Stick that into frame->var_ie_len. Then memcpy() all of
5015 * the IEs from wpa_frame into frame.
5016 */
5017 frame->var_ie_len = calculate_ie_len(wpa_assoc);
5018 memcpy(frame->var_ie, wpa_assoc->variable, frame->var_ie_len);
5019
5020 ipw2100_set_wpa_ie(priv, &frame, 0);
5021}
5022#endif
5023
5024
5025
5026
5027static int ipw2100_set_wpa_ie(struct ipw2100_priv *,
5028 struct ipw2100_wpa_assoc_frame *, int)
5029__attribute__ ((unused));
5030
5031static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv,
5032 struct ipw2100_wpa_assoc_frame *wpa_frame,
5033 int batch_mode)
5034{
5035 struct host_command cmd = {
5036 .host_command = SET_WPA_IE,
5037 .host_command_sequence = 0,
5038 .host_command_length = sizeof(struct ipw2100_wpa_assoc_frame),
5039 };
5040 int err;
5041
5042 IPW_DEBUG_HC("SET_WPA_IE\n");
5043
5044 if (!batch_mode) {
5045 err = ipw2100_disable_adapter(priv);
5046 if (err)
5047 return err;
5048 }
5049
5050 memcpy(cmd.host_command_parameters, wpa_frame,
5051 sizeof(struct ipw2100_wpa_assoc_frame));
5052
5053 err = ipw2100_hw_send_command(priv, &cmd);
5054
5055 if (!batch_mode) {
5056 if (ipw2100_enable_adapter(priv))
5057 err = -EIO;
5058 }
5059
5060 return err;
5061}
5062
5063struct security_info_params {
5064 u32 allowed_ciphers;
5065 u16 version;
5066 u8 auth_mode;
5067 u8 replay_counters_number;
5068 u8 unicast_using_group;
5069} __attribute__ ((packed));
5070
5071static int ipw2100_set_security_information(struct ipw2100_priv *priv,
5072 int auth_mode,
5073 int security_level,
5074 int unicast_using_group,
5075 int batch_mode)
5076{
5077 struct host_command cmd = {
5078 .host_command = SET_SECURITY_INFORMATION,
5079 .host_command_sequence = 0,
5080 .host_command_length = sizeof(struct security_info_params)
5081 };
5082 struct security_info_params *security =
5083 (struct security_info_params *)&cmd.host_command_parameters;
5084 int err;
5085 memset(security, 0, sizeof(*security));
5086
5087 /* If shared key AP authentication is turned on, then we need to
5088 * configure the firmware to try and use it.
5089 *
5090 * Actual data encryption/decryption is handled by the host. */
5091 security->auth_mode = auth_mode;
5092 security->unicast_using_group = unicast_using_group;
5093
5094 switch (security_level) {
5095 default:
5096 case SEC_LEVEL_0:
5097 security->allowed_ciphers = IPW_NONE_CIPHER;
5098 break;
5099 case SEC_LEVEL_1:
5100 security->allowed_ciphers = IPW_WEP40_CIPHER |
5101 IPW_WEP104_CIPHER;
5102 break;
5103 case SEC_LEVEL_2:
5104 security->allowed_ciphers = IPW_WEP40_CIPHER |
5105 IPW_WEP104_CIPHER | IPW_TKIP_CIPHER;
5106 break;
5107 case SEC_LEVEL_2_CKIP:
5108 security->allowed_ciphers = IPW_WEP40_CIPHER |
5109 IPW_WEP104_CIPHER | IPW_CKIP_CIPHER;
5110 break;
5111 case SEC_LEVEL_3:
5112 security->allowed_ciphers = IPW_WEP40_CIPHER |
5113 IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER;
5114 break;
5115 }
5116
5117 IPW_DEBUG_HC(
5118 "SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n",
5119 security->auth_mode, security->allowed_ciphers, security_level);
5120
5121 security->replay_counters_number = 0;
5122
5123 if (!batch_mode) {
5124 err = ipw2100_disable_adapter(priv);
5125 if (err)
5126 return err;
5127 }
5128
5129 err = ipw2100_hw_send_command(priv, &cmd);
5130
5131 if (!batch_mode)
5132 ipw2100_enable_adapter(priv);
5133
5134 return err;
5135}
5136
5137static int ipw2100_set_tx_power(struct ipw2100_priv *priv,
5138 u32 tx_power)
5139{
5140 struct host_command cmd = {
5141 .host_command = TX_POWER_INDEX,
5142 .host_command_sequence = 0,
5143 .host_command_length = 4
5144 };
5145 int err = 0;
5146
5147 cmd.host_command_parameters[0] = tx_power;
5148
5149 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
5150 err = ipw2100_hw_send_command(priv, &cmd);
5151 if (!err)
5152 priv->tx_power = tx_power;
5153
5154 return 0;
5155}
5156
5157static int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv,
5158 u32 interval, int batch_mode)
5159{
5160 struct host_command cmd = {
5161 .host_command = BEACON_INTERVAL,
5162 .host_command_sequence = 0,
5163 .host_command_length = 4
5164 };
5165 int err;
5166
5167 cmd.host_command_parameters[0] = interval;
5168
5169 IPW_DEBUG_INFO("enter\n");
5170
5171 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5172 if (!batch_mode) {
5173 err = ipw2100_disable_adapter(priv);
5174 if (err)
5175 return err;
5176 }
5177
5178 ipw2100_hw_send_command(priv, &cmd);
5179
5180 if (!batch_mode) {
5181 err = ipw2100_enable_adapter(priv);
5182 if (err)
5183 return err;
5184 }
5185 }
5186
5187 IPW_DEBUG_INFO("exit\n");
5188
5189 return 0;
5190}
5191
5192
5193void ipw2100_queues_initialize(struct ipw2100_priv *priv)
5194{
5195 ipw2100_tx_initialize(priv);
5196 ipw2100_rx_initialize(priv);
5197 ipw2100_msg_initialize(priv);
5198}
5199
5200void ipw2100_queues_free(struct ipw2100_priv *priv)
5201{
5202 ipw2100_tx_free(priv);
5203 ipw2100_rx_free(priv);
5204 ipw2100_msg_free(priv);
5205}
5206
5207int ipw2100_queues_allocate(struct ipw2100_priv *priv)
5208{
5209 if (ipw2100_tx_allocate(priv) ||
5210 ipw2100_rx_allocate(priv) ||
5211 ipw2100_msg_allocate(priv))
5212 goto fail;
5213
5214 return 0;
5215
5216 fail:
5217 ipw2100_tx_free(priv);
5218 ipw2100_rx_free(priv);
5219 ipw2100_msg_free(priv);
5220 return -ENOMEM;
5221}
5222
5223#define IPW_PRIVACY_CAPABLE 0x0008
5224
5225static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags,
5226 int batch_mode)
5227{
5228 struct host_command cmd = {
5229 .host_command = WEP_FLAGS,
5230 .host_command_sequence = 0,
5231 .host_command_length = 4
5232 };
5233 int err;
5234
5235 cmd.host_command_parameters[0] = flags;
5236
5237 IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags);
5238
5239 if (!batch_mode) {
5240 err = ipw2100_disable_adapter(priv);
5241 if (err) {
5242 printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
5243 priv->net_dev->name, err);
5244 return err;
5245 }
5246 }
5247
5248 /* send cmd to firmware */
5249 err = ipw2100_hw_send_command(priv, &cmd);
5250
5251 if (!batch_mode)
5252 ipw2100_enable_adapter(priv);
5253
5254 return err;
5255}
5256
5257struct ipw2100_wep_key {
5258 u8 idx;
5259 u8 len;
5260 u8 key[13];
5261};
5262
5263/* Macros to ease up priting WEP keys */
5264#define WEP_FMT_64 "%02X%02X%02X%02X-%02X"
5265#define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X"
5266#define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4]
5267#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
5268
5269
5270/**
5271 * Set a the wep key
5272 *
5273 * @priv: struct to work on
5274 * @idx: index of the key we want to set
5275 * @key: ptr to the key data to set
5276 * @len: length of the buffer at @key
5277 * @batch_mode: FIXME perform the operation in batch mode, not
5278 * disabling the device.
5279 *
5280 * @returns 0 if OK, < 0 errno code on error.
5281 *
5282 * Fill out a command structure with the new wep key, length an
5283 * index and send it down the wire.
5284 */
5285static int ipw2100_set_key(struct ipw2100_priv *priv,
5286 int idx, char *key, int len, int batch_mode)
5287{
5288 int keylen = len ? (len <= 5 ? 5 : 13) : 0;
5289 struct host_command cmd = {
5290 .host_command = WEP_KEY_INFO,
5291 .host_command_sequence = 0,
5292 .host_command_length = sizeof(struct ipw2100_wep_key),
5293 };
5294 struct ipw2100_wep_key *wep_key = (void*)cmd.host_command_parameters;
5295 int err;
5296
5297 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
5298 idx, keylen, len);
5299
5300 /* NOTE: We don't check cached values in case the firmware was reset
5301 * or some other problem is occuring. If the user is setting the key,
5302 * then we push the change */
5303
5304 wep_key->idx = idx;
5305 wep_key->len = keylen;
5306
5307 if (keylen) {
5308 memcpy(wep_key->key, key, len);
5309 memset(wep_key->key + len, 0, keylen - len);
5310 }
5311
5312 /* Will be optimized out on debug not being configured in */
5313 if (keylen == 0)
5314 IPW_DEBUG_WEP("%s: Clearing key %d\n",
5315 priv->net_dev->name, wep_key->idx);
5316 else if (keylen == 5)
5317 IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n",
5318 priv->net_dev->name, wep_key->idx, wep_key->len,
5319 WEP_STR_64(wep_key->key));
5320 else
5321 IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128
5322 "\n",
5323 priv->net_dev->name, wep_key->idx, wep_key->len,
5324 WEP_STR_128(wep_key->key));
5325
5326 if (!batch_mode) {
5327 err = ipw2100_disable_adapter(priv);
5328 /* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */
5329 if (err) {
5330 printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
5331 priv->net_dev->name, err);
5332 return err;
5333 }
5334 }
5335
5336 /* send cmd to firmware */
5337 err = ipw2100_hw_send_command(priv, &cmd);
5338
5339 if (!batch_mode) {
5340 int err2 = ipw2100_enable_adapter(priv);
5341 if (err == 0)
5342 err = err2;
5343 }
5344 return err;
5345}
5346
5347static int ipw2100_set_key_index(struct ipw2100_priv *priv,
5348 int idx, int batch_mode)
5349{
5350 struct host_command cmd = {
5351 .host_command = WEP_KEY_INDEX,
5352 .host_command_sequence = 0,
5353 .host_command_length = 4,
5354 .host_command_parameters = { idx },
5355 };
5356 int err;
5357
5358 IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx);
5359
5360 if (idx < 0 || idx > 3)
5361 return -EINVAL;
5362
5363 if (!batch_mode) {
5364 err = ipw2100_disable_adapter(priv);
5365 if (err) {
5366 printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
5367 priv->net_dev->name, err);
5368 return err;
5369 }
5370 }
5371
5372 /* send cmd to firmware */
5373 err = ipw2100_hw_send_command(priv, &cmd);
5374
5375 if (!batch_mode)
5376 ipw2100_enable_adapter(priv);
5377
5378 return err;
5379}
5380
5381
5382static int ipw2100_configure_security(struct ipw2100_priv *priv,
5383 int batch_mode)
5384{
5385 int i, err, auth_mode, sec_level, use_group;
5386
5387 if (!(priv->status & STATUS_RUNNING))
5388 return 0;
5389
5390 if (!batch_mode) {
5391 err = ipw2100_disable_adapter(priv);
5392 if (err)
5393 return err;
5394 }
5395
5396 if (!priv->sec.enabled) {
5397 err = ipw2100_set_security_information(
5398 priv, IPW_AUTH_OPEN, SEC_LEVEL_0, 0, 1);
5399 } else {
5400 auth_mode = IPW_AUTH_OPEN;
5401 if ((priv->sec.flags & SEC_AUTH_MODE) &&
5402 (priv->sec.auth_mode == WLAN_AUTH_SHARED_KEY))
5403 auth_mode = IPW_AUTH_SHARED;
5404
5405 sec_level = SEC_LEVEL_0;
5406 if (priv->sec.flags & SEC_LEVEL)
5407 sec_level = priv->sec.level;
5408
5409 use_group = 0;
5410 if (priv->sec.flags & SEC_UNICAST_GROUP)
5411 use_group = priv->sec.unicast_uses_group;
5412
5413 err = ipw2100_set_security_information(
5414 priv, auth_mode, sec_level, use_group, 1);
5415 }
5416
5417 if (err)
5418 goto exit;
5419
5420 if (priv->sec.enabled) {
5421 for (i = 0; i < 4; i++) {
5422 if (!(priv->sec.flags & (1 << i))) {
5423 memset(priv->sec.keys[i], 0, WEP_KEY_LEN);
5424 priv->sec.key_sizes[i] = 0;
5425 } else {
5426 err = ipw2100_set_key(priv, i,
5427 priv->sec.keys[i],
5428 priv->sec.key_sizes[i],
5429 1);
5430 if (err)
5431 goto exit;
5432 }
5433 }
5434
5435 ipw2100_set_key_index(priv, priv->ieee->tx_keyidx, 1);
5436 }
5437
5438 /* Always enable privacy so the Host can filter WEP packets if
5439 * encrypted data is sent up */
5440 err = ipw2100_set_wep_flags(
5441 priv, priv->sec.enabled ? IPW_PRIVACY_CAPABLE : 0, 1);
5442 if (err)
5443 goto exit;
5444
5445 priv->status &= ~STATUS_SECURITY_UPDATED;
5446
5447 exit:
5448 if (!batch_mode)
5449 ipw2100_enable_adapter(priv);
5450
5451 return err;
5452}
5453
5454static void ipw2100_security_work(struct ipw2100_priv *priv)
5455{
5456 /* If we happen to have reconnected before we get a chance to
5457 * process this, then update the security settings--which causes
5458 * a disassociation to occur */
5459 if (!(priv->status & STATUS_ASSOCIATED) &&
5460 priv->status & STATUS_SECURITY_UPDATED)
5461 ipw2100_configure_security(priv, 0);
5462}
5463
5464static void shim__set_security(struct net_device *dev,
5465 struct ieee80211_security *sec)
5466{
5467 struct ipw2100_priv *priv = ieee80211_priv(dev);
5468 int i, force_update = 0;
5469
5470 down(&priv->action_sem);
5471 if (!(priv->status & STATUS_INITIALIZED))
5472 goto done;
5473
5474 for (i = 0; i < 4; i++) {
5475 if (sec->flags & (1 << i)) {
5476 priv->sec.key_sizes[i] = sec->key_sizes[i];
5477 if (sec->key_sizes[i] == 0)
5478 priv->sec.flags &= ~(1 << i);
5479 else
5480 memcpy(priv->sec.keys[i], sec->keys[i],
5481 sec->key_sizes[i]);
5482 priv->sec.flags |= (1 << i);
5483 priv->status |= STATUS_SECURITY_UPDATED;
5484 }
5485 }
5486
5487 if ((sec->flags & SEC_ACTIVE_KEY) &&
5488 priv->sec.active_key != sec->active_key) {
5489 if (sec->active_key <= 3) {
5490 priv->sec.active_key = sec->active_key;
5491 priv->sec.flags |= SEC_ACTIVE_KEY;
5492 } else
5493 priv->sec.flags &= ~SEC_ACTIVE_KEY;
5494
5495 priv->status |= STATUS_SECURITY_UPDATED;
5496 }
5497
5498 if ((sec->flags & SEC_AUTH_MODE) &&
5499 (priv->sec.auth_mode != sec->auth_mode)) {
5500 priv->sec.auth_mode = sec->auth_mode;
5501 priv->sec.flags |= SEC_AUTH_MODE;
5502 priv->status |= STATUS_SECURITY_UPDATED;
5503 }
5504
5505 if (sec->flags & SEC_ENABLED &&
5506 priv->sec.enabled != sec->enabled) {
5507 priv->sec.flags |= SEC_ENABLED;
5508 priv->sec.enabled = sec->enabled;
5509 priv->status |= STATUS_SECURITY_UPDATED;
5510 force_update = 1;
5511 }
5512
5513 if (sec->flags & SEC_LEVEL &&
5514 priv->sec.level != sec->level) {
5515 priv->sec.level = sec->level;
5516 priv->sec.flags |= SEC_LEVEL;
5517 priv->status |= STATUS_SECURITY_UPDATED;
5518 }
5519
5520 IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n",
5521 priv->sec.flags & (1<<8) ? '1' : '0',
5522 priv->sec.flags & (1<<7) ? '1' : '0',
5523 priv->sec.flags & (1<<6) ? '1' : '0',
5524 priv->sec.flags & (1<<5) ? '1' : '0',
5525 priv->sec.flags & (1<<4) ? '1' : '0',
5526 priv->sec.flags & (1<<3) ? '1' : '0',
5527 priv->sec.flags & (1<<2) ? '1' : '0',
5528 priv->sec.flags & (1<<1) ? '1' : '0',
5529 priv->sec.flags & (1<<0) ? '1' : '0');
5530
5531/* As a temporary work around to enable WPA until we figure out why
5532 * wpa_supplicant toggles the security capability of the driver, which
5533 * forces a disassocation with force_update...
5534 *
5535 * if (force_update || !(priv->status & STATUS_ASSOCIATED))*/
5536 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
5537 ipw2100_configure_security(priv, 0);
5538done:
5539 up(&priv->action_sem);
5540}
5541
5542static int ipw2100_adapter_setup(struct ipw2100_priv *priv)
5543{
5544 int err;
5545 int batch_mode = 1;
5546 u8 *bssid;
5547
5548 IPW_DEBUG_INFO("enter\n");
5549
5550 err = ipw2100_disable_adapter(priv);
5551 if (err)
5552 return err;
5553#ifdef CONFIG_IPW2100_MONITOR
5554 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
5555 err = ipw2100_set_channel(priv, priv->channel, batch_mode);
5556 if (err)
5557 return err;
5558
5559 IPW_DEBUG_INFO("exit\n");
5560
5561 return 0;
5562 }
5563#endif /* CONFIG_IPW2100_MONITOR */
5564
5565 err = ipw2100_read_mac_address(priv);
5566 if (err)
5567 return -EIO;
5568
5569 err = ipw2100_set_mac_address(priv, batch_mode);
5570 if (err)
5571 return err;
5572
5573 err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode);
5574 if (err)
5575 return err;
5576
5577 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5578 err = ipw2100_set_channel(priv, priv->channel, batch_mode);
5579 if (err)
5580 return err;
5581 }
5582
5583 err = ipw2100_system_config(priv, batch_mode);
5584 if (err)
5585 return err;
5586
5587 err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode);
5588 if (err)
5589 return err;
5590
5591 /* Default to power mode OFF */
5592 err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
5593 if (err)
5594 return err;
5595
5596 err = ipw2100_set_rts_threshold(priv, priv->rts_threshold);
5597 if (err)
5598 return err;
5599
5600 if (priv->config & CFG_STATIC_BSSID)
5601 bssid = priv->bssid;
5602 else
5603 bssid = NULL;
5604 err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode);
5605 if (err)
5606 return err;
5607
5608 if (priv->config & CFG_STATIC_ESSID)
5609 err = ipw2100_set_essid(priv, priv->essid, priv->essid_len,
5610 batch_mode);
5611 else
5612 err = ipw2100_set_essid(priv, NULL, 0, batch_mode);
5613 if (err)
5614 return err;
5615
5616 err = ipw2100_configure_security(priv, batch_mode);
5617 if (err)
5618 return err;
5619
5620 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5621 err = ipw2100_set_ibss_beacon_interval(
5622 priv, priv->beacon_interval, batch_mode);
5623 if (err)
5624 return err;
5625
5626 err = ipw2100_set_tx_power(priv, priv->tx_power);
5627 if (err)
5628 return err;
5629 }
5630
5631 /*
5632 err = ipw2100_set_fragmentation_threshold(
5633 priv, priv->frag_threshold, batch_mode);
5634 if (err)
5635 return err;
5636 */
5637
5638 IPW_DEBUG_INFO("exit\n");
5639
5640 return 0;
5641}
5642
5643
5644/*************************************************************************
5645 *
5646 * EXTERNALLY CALLED METHODS
5647 *
5648 *************************************************************************/
5649
5650/* This method is called by the network layer -- not to be confused with
5651 * ipw2100_set_mac_address() declared above called by this driver (and this
5652 * method as well) to talk to the firmware */
5653static int ipw2100_set_address(struct net_device *dev, void *p)
5654{
5655 struct ipw2100_priv *priv = ieee80211_priv(dev);
5656 struct sockaddr *addr = p;
5657 int err = 0;
5658
5659 if (!is_valid_ether_addr(addr->sa_data))
5660 return -EADDRNOTAVAIL;
5661
5662 down(&priv->action_sem);
5663
5664 priv->config |= CFG_CUSTOM_MAC;
5665 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
5666
5667 err = ipw2100_set_mac_address(priv, 0);
5668 if (err)
5669 goto done;
5670
5671 priv->reset_backoff = 0;
5672 up(&priv->action_sem);
5673 ipw2100_reset_adapter(priv);
5674 return 0;
5675
5676 done:
5677 up(&priv->action_sem);
5678 return err;
5679}
5680
5681static int ipw2100_open(struct net_device *dev)
5682{
5683 struct ipw2100_priv *priv = ieee80211_priv(dev);
5684 unsigned long flags;
5685 IPW_DEBUG_INFO("dev->open\n");
5686
5687 spin_lock_irqsave(&priv->low_lock, flags);
5688 if (priv->status & STATUS_ASSOCIATED) {
5689 netif_carrier_on(dev);
5690 netif_start_queue(dev);
5691 }
5692 spin_unlock_irqrestore(&priv->low_lock, flags);
5693
5694 return 0;
5695}
5696
5697static int ipw2100_close(struct net_device *dev)
5698{
5699 struct ipw2100_priv *priv = ieee80211_priv(dev);
5700 unsigned long flags;
5701 struct list_head *element;
5702 struct ipw2100_tx_packet *packet;
5703
5704 IPW_DEBUG_INFO("enter\n");
5705
5706 spin_lock_irqsave(&priv->low_lock, flags);
5707
5708 if (priv->status & STATUS_ASSOCIATED)
5709 netif_carrier_off(dev);
5710 netif_stop_queue(dev);
5711
5712 /* Flush the TX queue ... */
5713 while (!list_empty(&priv->tx_pend_list)) {
5714 element = priv->tx_pend_list.next;
5715 packet = list_entry(element, struct ipw2100_tx_packet, list);
5716
5717 list_del(element);
5718 DEC_STAT(&priv->tx_pend_stat);
5719
5720 ieee80211_txb_free(packet->info.d_struct.txb);
5721 packet->info.d_struct.txb = NULL;
5722
5723 list_add_tail(element, &priv->tx_free_list);
5724 INC_STAT(&priv->tx_free_stat);
5725 }
5726 spin_unlock_irqrestore(&priv->low_lock, flags);
5727
5728 IPW_DEBUG_INFO("exit\n");
5729
5730 return 0;
5731}
5732
5733
5734
5735/*
5736 * TODO: Fix this function... its just wrong
5737 */
5738static void ipw2100_tx_timeout(struct net_device *dev)
5739{
5740 struct ipw2100_priv *priv = ieee80211_priv(dev);
5741
5742 priv->ieee->stats.tx_errors++;
5743
5744#ifdef CONFIG_IPW2100_MONITOR
5745 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5746 return;
5747#endif
5748
5749 IPW_DEBUG_INFO("%s: TX timed out. Scheduling firmware restart.\n",
5750 dev->name);
5751 schedule_reset(priv);
5752}
5753
5754
5755/*
5756 * TODO: reimplement it so that it reads statistics
5757 * from the adapter using ordinal tables
5758 * instead of/in addition to collecting them
5759 * in the driver
5760 */
5761static struct net_device_stats *ipw2100_stats(struct net_device *dev)
5762{
5763 struct ipw2100_priv *priv = ieee80211_priv(dev);
5764
5765 return &priv->ieee->stats;
5766}
5767
5768/* Support for wpa_supplicant. Will be replaced with WEXT once
5769 * they get WPA support. */
5770#ifdef CONFIG_IEEE80211_WPA
5771
5772/* following definitions must match definitions in driver_ipw2100.c */
5773
5774#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5775
5776#define IPW2100_CMD_SET_WPA_PARAM 1
5777#define IPW2100_CMD_SET_WPA_IE 2
5778#define IPW2100_CMD_SET_ENCRYPTION 3
5779#define IPW2100_CMD_MLME 4
5780
5781#define IPW2100_PARAM_WPA_ENABLED 1
5782#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
5783#define IPW2100_PARAM_DROP_UNENCRYPTED 3
5784#define IPW2100_PARAM_PRIVACY_INVOKED 4
5785#define IPW2100_PARAM_AUTH_ALGS 5
5786#define IPW2100_PARAM_IEEE_802_1X 6
5787
5788#define IPW2100_MLME_STA_DEAUTH 1
5789#define IPW2100_MLME_STA_DISASSOC 2
5790
5791#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
5792#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
5793#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
5794#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
5795#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
5796#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
5797
5798#define IPW2100_CRYPT_ALG_NAME_LEN 16
5799
5800struct ipw2100_param {
5801 u32 cmd;
5802 u8 sta_addr[ETH_ALEN];
5803 union {
5804 struct {
5805 u8 name;
5806 u32 value;
5807 } wpa_param;
5808 struct {
5809 u32 len;
5810 u8 *data;
5811 } wpa_ie;
5812 struct{
5813 int command;
5814 int reason_code;
5815 } mlme;
5816 struct {
5817 u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
5818 u8 set_tx;
5819 u32 err;
5820 u8 idx;
5821 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5822 u16 key_len;
5823 u8 key[0];
5824 } crypt;
5825
5826 } u;
5827};
5828
5829/* end of driver_ipw2100.c code */
5830
5831static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value){
5832
5833 struct ieee80211_device *ieee = priv->ieee;
5834 struct ieee80211_security sec = {
5835 .flags = SEC_LEVEL | SEC_ENABLED,
5836 };
5837 int ret = 0;
5838
5839 ieee->wpa_enabled = value;
5840
5841 if (value){
5842 sec.level = SEC_LEVEL_3;
5843 sec.enabled = 1;
5844 } else {
5845 sec.level = SEC_LEVEL_0;
5846 sec.enabled = 0;
5847 }
5848
5849 if (ieee->set_security)
5850 ieee->set_security(ieee->dev, &sec);
5851 else
5852 ret = -EOPNOTSUPP;
5853
5854 return ret;
5855}
5856
5857#define AUTH_ALG_OPEN_SYSTEM 0x1
5858#define AUTH_ALG_SHARED_KEY 0x2
5859
5860static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value){
5861
5862 struct ieee80211_device *ieee = priv->ieee;
5863 struct ieee80211_security sec = {
5864 .flags = SEC_AUTH_MODE,
5865 };
5866 int ret = 0;
5867
5868 if (value & AUTH_ALG_SHARED_KEY){
5869 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
5870 ieee->open_wep = 0;
5871 } else {
5872 sec.auth_mode = WLAN_AUTH_OPEN;
5873 ieee->open_wep = 1;
5874 }
5875
5876 if (ieee->set_security)
5877 ieee->set_security(ieee->dev, &sec);
5878 else
5879 ret = -EOPNOTSUPP;
5880
5881 return ret;
5882}
5883
5884
5885static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value){
5886
5887 struct ipw2100_priv *priv = ieee80211_priv(dev);
5888 int ret=0;
5889
5890 switch(name){
5891 case IPW2100_PARAM_WPA_ENABLED:
5892 ret = ipw2100_wpa_enable(priv, value);
5893 break;
5894
5895 case IPW2100_PARAM_TKIP_COUNTERMEASURES:
5896 priv->ieee->tkip_countermeasures=value;
5897 break;
5898
5899 case IPW2100_PARAM_DROP_UNENCRYPTED:
5900 priv->ieee->drop_unencrypted=value;
5901 break;
5902
5903 case IPW2100_PARAM_PRIVACY_INVOKED:
5904 priv->ieee->privacy_invoked=value;
5905 break;
5906
5907 case IPW2100_PARAM_AUTH_ALGS:
5908 ret = ipw2100_wpa_set_auth_algs(priv, value);
5909 break;
5910
5911 case IPW2100_PARAM_IEEE_802_1X:
5912 priv->ieee->ieee802_1x=value;
5913 break;
5914
5915 default:
5916 printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
5917 dev->name, name);
5918 ret = -EOPNOTSUPP;
5919 }
5920
5921 return ret;
5922}
5923
5924static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason){
5925
5926 struct ipw2100_priv *priv = ieee80211_priv(dev);
5927 int ret=0;
5928
5929 switch(command){
5930 case IPW2100_MLME_STA_DEAUTH:
5931 // silently ignore
5932 break;
5933
5934 case IPW2100_MLME_STA_DISASSOC:
5935 ipw2100_disassociate_bssid(priv);
5936 break;
5937
5938 default:
5939 printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
5940 dev->name, command);
5941 ret = -EOPNOTSUPP;
5942 }
5943
5944 return ret;
5945}
5946
5947
5948void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5949 char *wpa_ie, int wpa_ie_len){
5950
5951 struct ipw2100_wpa_assoc_frame frame;
5952
5953 frame.fixed_ie_mask = 0;
5954
5955 /* copy WPA IE */
5956 memcpy(frame.var_ie, wpa_ie, wpa_ie_len);
5957 frame.var_ie_len = wpa_ie_len;
5958
5959 /* make sure WPA is enabled */
5960 ipw2100_wpa_enable(priv, 1);
5961 ipw2100_set_wpa_ie(priv, &frame, 0);
5962}
5963
5964
5965static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
5966 struct ipw2100_param *param, int plen){
5967
5968 struct ipw2100_priv *priv = ieee80211_priv(dev);
5969 struct ieee80211_device *ieee = priv->ieee;
5970 u8 *buf;
5971
5972 if (! ieee->wpa_enabled)
5973 return -EOPNOTSUPP;
5974
5975 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5976 (param->u.wpa_ie.len &&
5977 param->u.wpa_ie.data==NULL))
5978 return -EINVAL;
5979
5980 if (param->u.wpa_ie.len){
5981 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5982 if (buf == NULL)
5983 return -ENOMEM;
5984
5985 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5986
5987 kfree(ieee->wpa_ie);
5988 ieee->wpa_ie = buf;
5989 ieee->wpa_ie_len = param->u.wpa_ie.len;
5990
5991 } else {
5992 kfree(ieee->wpa_ie);
5993 ieee->wpa_ie = NULL;
5994 ieee->wpa_ie_len = 0;
5995 }
5996
5997 ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5998
5999 return 0;
6000}
6001
6002/* implementation borrowed from hostap driver */
6003
6004static int ipw2100_wpa_set_encryption(struct net_device *dev,
6005 struct ipw2100_param *param, int param_len){
6006
6007 int ret = 0;
6008 struct ipw2100_priv *priv = ieee80211_priv(dev);
6009 struct ieee80211_device *ieee = priv->ieee;
6010 struct ieee80211_crypto_ops *ops;
6011 struct ieee80211_crypt_data **crypt;
6012
6013 struct ieee80211_security sec = {
6014 .flags = 0,
6015 };
6016
6017 param->u.crypt.err = 0;
6018 param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
6019
6020 if (param_len !=
6021 (int) ((char *) param->u.crypt.key - (char *) param) +
6022 param->u.crypt.key_len){
6023 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len, param->u.crypt.key_len);
6024 return -EINVAL;
6025 }
6026 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
6027 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
6028 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
6029 if (param->u.crypt.idx >= WEP_KEYS)
6030 return -EINVAL;
6031 crypt = &ieee->crypt[param->u.crypt.idx];
6032 } else {
6033 return -EINVAL;
6034 }
6035
6036 if (strcmp(param->u.crypt.alg, "none") == 0) {
6037 if (crypt){
6038 sec.enabled = 0;
6039 sec.level = SEC_LEVEL_0;
6040 sec.flags |= SEC_ENABLED | SEC_LEVEL;
6041 ieee80211_crypt_delayed_deinit(ieee, crypt);
6042 }
6043 goto done;
6044 }
6045 sec.enabled = 1;
6046 sec.flags |= SEC_ENABLED;
6047
6048 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6049 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
6050 request_module("ieee80211_crypt_wep");
6051 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6052 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
6053 request_module("ieee80211_crypt_tkip");
6054 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6055 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
6056 request_module("ieee80211_crypt_ccmp");
6057 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6058 }
6059 if (ops == NULL) {
6060 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
6061 dev->name, param->u.crypt.alg);
6062 param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
6063 ret = -EINVAL;
6064 goto done;
6065 }
6066
6067 if (*crypt == NULL || (*crypt)->ops != ops) {
6068 struct ieee80211_crypt_data *new_crypt;
6069
6070 ieee80211_crypt_delayed_deinit(ieee, crypt);
6071
6072 new_crypt = (struct ieee80211_crypt_data *)
6073 kmalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
6074 if (new_crypt == NULL) {
6075 ret = -ENOMEM;
6076 goto done;
6077 }
6078 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
6079 new_crypt->ops = ops;
6080 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
6081 new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
6082
6083 if (new_crypt->priv == NULL) {
6084 kfree(new_crypt);
6085 param->u.crypt.err =
6086 IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
6087 ret = -EINVAL;
6088 goto done;
6089 }
6090
6091 *crypt = new_crypt;
6092 }
6093
6094 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
6095 (*crypt)->ops->set_key(param->u.crypt.key,
6096 param->u.crypt.key_len, param->u.crypt.seq,
6097 (*crypt)->priv) < 0) {
6098 IPW_DEBUG_INFO("%s: key setting failed\n",
6099 dev->name);
6100 param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
6101 ret = -EINVAL;
6102 goto done;
6103 }
6104
6105 if (param->u.crypt.set_tx){
6106 ieee->tx_keyidx = param->u.crypt.idx;
6107 sec.active_key = param->u.crypt.idx;
6108 sec.flags |= SEC_ACTIVE_KEY;
6109 }
6110
6111 if (ops->name != NULL){
6112
6113 if (strcmp(ops->name, "WEP") == 0) {
6114 memcpy(sec.keys[param->u.crypt.idx], param->u.crypt.key, param->u.crypt.key_len);
6115 sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
6116 sec.flags |= (1 << param->u.crypt.idx);
6117 sec.flags |= SEC_LEVEL;
6118 sec.level = SEC_LEVEL_1;
6119 } else if (strcmp(ops->name, "TKIP") == 0) {
6120 sec.flags |= SEC_LEVEL;
6121 sec.level = SEC_LEVEL_2;
6122 } else if (strcmp(ops->name, "CCMP") == 0) {
6123 sec.flags |= SEC_LEVEL;
6124 sec.level = SEC_LEVEL_3;
6125 }
6126 }
6127 done:
6128 if (ieee->set_security)
6129 ieee->set_security(ieee->dev, &sec);
6130
6131 /* Do not reset port if card is in Managed mode since resetting will
6132 * generate new IEEE 802.11 authentication which may end up in looping
6133 * with IEEE 802.1X. If your hardware requires a reset after WEP
6134 * configuration (for example... Prism2), implement the reset_port in
6135 * the callbacks structures used to initialize the 802.11 stack. */
6136 if (ieee->reset_on_keychange &&
6137 ieee->iw_mode != IW_MODE_INFRA &&
6138 ieee->reset_port &&
6139 ieee->reset_port(dev)) {
6140 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
6141 param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
6142 return -EINVAL;
6143 }
6144
6145 return ret;
6146}
6147
6148
6149static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p){
6150
6151 struct ipw2100_param *param;
6152 int ret=0;
6153
6154 IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
6155
6156 if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
6157 return -EINVAL;
6158
6159 param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
6160 if (param == NULL)
6161 return -ENOMEM;
6162
6163 if (copy_from_user(param, p->pointer, p->length)){
6164 kfree(param);
6165 return -EFAULT;
6166 }
6167
6168 switch (param->cmd){
6169
6170 case IPW2100_CMD_SET_WPA_PARAM:
6171 ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
6172 param->u.wpa_param.value);
6173 break;
6174
6175 case IPW2100_CMD_SET_WPA_IE:
6176 ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
6177 break;
6178
6179 case IPW2100_CMD_SET_ENCRYPTION:
6180 ret = ipw2100_wpa_set_encryption(dev, param, p->length);
6181 break;
6182
6183 case IPW2100_CMD_MLME:
6184 ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
6185 param->u.mlme.reason_code);
6186 break;
6187
6188 default:
6189 printk(KERN_ERR DRV_NAME ": %s: Unknown WPA supplicant request: %d\n",
6190 dev->name, param->cmd);
6191 ret = -EOPNOTSUPP;
6192
6193 }
6194
6195 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
6196 ret = -EFAULT;
6197
6198 kfree(param);
6199 return ret;
6200}
6201#endif /* CONFIG_IEEE80211_WPA */
6202
6203static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6204{
6205#ifdef CONFIG_IEEE80211_WPA
6206 struct iwreq *wrq = (struct iwreq *) rq;
6207 int ret=-1;
6208 switch (cmd){
6209 case IPW2100_IOCTL_WPA_SUPPLICANT:
6210 ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
6211 return ret;
6212
6213 default:
6214 return -EOPNOTSUPP;
6215 }
6216
6217#endif /* CONFIG_IEEE80211_WPA */
6218
6219 return -EOPNOTSUPP;
6220}
6221
6222
6223static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6224 struct ethtool_drvinfo *info)
6225{
6226 struct ipw2100_priv *priv = ieee80211_priv(dev);
6227 char fw_ver[64], ucode_ver[64];
6228
6229 strcpy(info->driver, DRV_NAME);
6230 strcpy(info->version, DRV_VERSION);
6231
6232 ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
6233 ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
6234
6235 snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
6236 fw_ver, priv->eeprom_version, ucode_ver);
6237
6238 strcpy(info->bus_info, pci_name(priv->pci_dev));
6239}
6240
6241static u32 ipw2100_ethtool_get_link(struct net_device *dev)
6242{
6243 struct ipw2100_priv *priv = ieee80211_priv(dev);
6244 return (priv->status & STATUS_ASSOCIATED) ? 1 : 0;
6245}
6246
6247
6248static struct ethtool_ops ipw2100_ethtool_ops = {
6249 .get_link = ipw2100_ethtool_get_link,
6250 .get_drvinfo = ipw_ethtool_get_drvinfo,
6251};
6252
6253static void ipw2100_hang_check(void *adapter)
6254{
6255 struct ipw2100_priv *priv = adapter;
6256 unsigned long flags;
6257 u32 rtc = 0xa5a5a5a5;
6258 u32 len = sizeof(rtc);
6259 int restart = 0;
6260
6261 spin_lock_irqsave(&priv->low_lock, flags);
6262
6263 if (priv->fatal_error != 0) {
6264 /* If fatal_error is set then we need to restart */
6265 IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n",
6266 priv->net_dev->name);
6267
6268 restart = 1;
6269 } else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) ||
6270 (rtc == priv->last_rtc)) {
6271 /* Check if firmware is hung */
6272 IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n",
6273 priv->net_dev->name);
6274
6275 restart = 1;
6276 }
6277
6278 if (restart) {
6279 /* Kill timer */
6280 priv->stop_hang_check = 1;
6281 priv->hangs++;
6282
6283 /* Restart the NIC */
6284 schedule_reset(priv);
6285 }
6286
6287 priv->last_rtc = rtc;
6288
6289 if (!priv->stop_hang_check)
6290 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
6291
6292 spin_unlock_irqrestore(&priv->low_lock, flags);
6293}
6294
6295
6296static void ipw2100_rf_kill(void *adapter)
6297{
6298 struct ipw2100_priv *priv = adapter;
6299 unsigned long flags;
6300
6301 spin_lock_irqsave(&priv->low_lock, flags);
6302
6303 if (rf_kill_active(priv)) {
6304 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6305 if (!priv->stop_rf_kill)
6306 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
6307 goto exit_unlock;
6308 }
6309
6310 /* RF Kill is now disabled, so bring the device back up */
6311
6312 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6313 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6314 "device\n");
6315 schedule_reset(priv);
6316 } else
6317 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6318 "enabled\n");
6319
6320 exit_unlock:
6321 spin_unlock_irqrestore(&priv->low_lock, flags);
6322}
6323
6324static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
6325
6326/* Look into using netdev destructor to shutdown ieee80211? */
6327
6328static struct net_device *ipw2100_alloc_device(
6329 struct pci_dev *pci_dev,
6330 void __iomem *base_addr,
6331 unsigned long mem_start,
6332 unsigned long mem_len)
6333{
6334 struct ipw2100_priv *priv;
6335 struct net_device *dev;
6336
6337 dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
6338 if (!dev)
6339 return NULL;
6340 priv = ieee80211_priv(dev);
6341 priv->ieee = netdev_priv(dev);
6342 priv->pci_dev = pci_dev;
6343 priv->net_dev = dev;
6344
6345 priv->ieee->hard_start_xmit = ipw2100_tx;
6346 priv->ieee->set_security = shim__set_security;
6347
6348 dev->open = ipw2100_open;
6349 dev->stop = ipw2100_close;
6350 dev->init = ipw2100_net_init;
6351 dev->do_ioctl = ipw2100_ioctl;
6352 dev->get_stats = ipw2100_stats;
6353 dev->ethtool_ops = &ipw2100_ethtool_ops;
6354 dev->tx_timeout = ipw2100_tx_timeout;
6355 dev->wireless_handlers = &ipw2100_wx_handler_def;
6356 dev->get_wireless_stats = ipw2100_wx_wireless_stats;
6357 dev->set_mac_address = ipw2100_set_address;
6358 dev->watchdog_timeo = 3*HZ;
6359 dev->irq = 0;
6360
6361 dev->base_addr = (unsigned long)base_addr;
6362 dev->mem_start = mem_start;
6363 dev->mem_end = dev->mem_start + mem_len - 1;
6364
6365 /* NOTE: We don't use the wireless_handlers hook
6366 * in dev as the system will start throwing WX requests
6367 * to us before we're actually initialized and it just
6368 * ends up causing problems. So, we just handle
6369 * the WX extensions through the ipw2100_ioctl interface */
6370
6371
6372 /* memset() puts everything to 0, so we only have explicitely set
6373 * those values that need to be something else */
6374
6375 /* If power management is turned on, default to AUTO mode */
6376 priv->power_mode = IPW_POWER_AUTO;
6377
6378
6379
6380#ifdef CONFIG_IEEE80211_WPA
6381 priv->ieee->wpa_enabled = 0;
6382 priv->ieee->tkip_countermeasures = 0;
6383 priv->ieee->drop_unencrypted = 0;
6384 priv->ieee->privacy_invoked = 0;
6385 priv->ieee->ieee802_1x = 1;
6386#endif /* CONFIG_IEEE80211_WPA */
6387
6388 /* Set module parameters */
6389 switch (mode) {
6390 case 1:
6391 priv->ieee->iw_mode = IW_MODE_ADHOC;
6392 break;
6393#ifdef CONFIG_IPW2100_MONITOR
6394 case 2:
6395 priv->ieee->iw_mode = IW_MODE_MONITOR;
6396 break;
6397#endif
6398 default:
6399 case 0:
6400 priv->ieee->iw_mode = IW_MODE_INFRA;
6401 break;
6402 }
6403
6404 if (disable == 1)
6405 priv->status |= STATUS_RF_KILL_SW;
6406
6407 if (channel != 0 &&
6408 ((channel >= REG_MIN_CHANNEL) &&
6409 (channel <= REG_MAX_CHANNEL))) {
6410 priv->config |= CFG_STATIC_CHANNEL;
6411 priv->channel = channel;
6412 }
6413
6414 if (associate)
6415 priv->config |= CFG_ASSOCIATE;
6416
6417 priv->beacon_interval = DEFAULT_BEACON_INTERVAL;
6418 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
6419 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
6420 priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED;
6421 priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED;
6422 priv->tx_power = IPW_TX_POWER_DEFAULT;
6423 priv->tx_rates = DEFAULT_TX_RATES;
6424
6425 strcpy(priv->nick, "ipw2100");
6426
6427 spin_lock_init(&priv->low_lock);
6428 sema_init(&priv->action_sem, 1);
6429 sema_init(&priv->adapter_sem, 1);
6430
6431 init_waitqueue_head(&priv->wait_command_queue);
6432
6433 netif_carrier_off(dev);
6434
6435 INIT_LIST_HEAD(&priv->msg_free_list);
6436 INIT_LIST_HEAD(&priv->msg_pend_list);
6437 INIT_STAT(&priv->msg_free_stat);
6438 INIT_STAT(&priv->msg_pend_stat);
6439
6440 INIT_LIST_HEAD(&priv->tx_free_list);
6441 INIT_LIST_HEAD(&priv->tx_pend_list);
6442 INIT_STAT(&priv->tx_free_stat);
6443 INIT_STAT(&priv->tx_pend_stat);
6444
6445 INIT_LIST_HEAD(&priv->fw_pend_list);
6446 INIT_STAT(&priv->fw_pend_stat);
6447
6448
6449#ifdef CONFIG_SOFTWARE_SUSPEND2
6450 priv->workqueue = create_workqueue(DRV_NAME, 0);
6451#else
6452 priv->workqueue = create_workqueue(DRV_NAME);
6453#endif
6454 INIT_WORK(&priv->reset_work,
6455 (void (*)(void *))ipw2100_reset_adapter, priv);
6456 INIT_WORK(&priv->security_work,
6457 (void (*)(void *))ipw2100_security_work, priv);
6458 INIT_WORK(&priv->wx_event_work,
6459 (void (*)(void *))ipw2100_wx_event_work, priv);
6460 INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
6461 INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
6462
6463 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6464 ipw2100_irq_tasklet, (unsigned long)priv);
6465
6466 /* NOTE: We do not start the deferred work for status checks yet */
6467 priv->stop_rf_kill = 1;
6468 priv->stop_hang_check = 1;
6469
6470 return dev;
6471}
6472
6473static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6474 const struct pci_device_id *ent)
6475{
6476 unsigned long mem_start, mem_len, mem_flags;
6477 void __iomem *base_addr = NULL;
6478 struct net_device *dev = NULL;
6479 struct ipw2100_priv *priv = NULL;
6480 int err = 0;
6481 int registered = 0;
6482 u32 val;
6483
6484 IPW_DEBUG_INFO("enter\n");
6485
6486 mem_start = pci_resource_start(pci_dev, 0);
6487 mem_len = pci_resource_len(pci_dev, 0);
6488 mem_flags = pci_resource_flags(pci_dev, 0);
6489
6490 if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
6491 IPW_DEBUG_INFO("weird - resource type is not memory\n");
6492 err = -ENODEV;
6493 goto fail;
6494 }
6495
6496 base_addr = ioremap_nocache(mem_start, mem_len);
6497 if (!base_addr) {
6498 printk(KERN_WARNING DRV_NAME
6499 "Error calling ioremap_nocache.\n");
6500 err = -EIO;
6501 goto fail;
6502 }
6503
6504 /* allocate and initialize our net_device */
6505 dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
6506 if (!dev) {
6507 printk(KERN_WARNING DRV_NAME
6508 "Error calling ipw2100_alloc_device.\n");
6509 err = -ENOMEM;
6510 goto fail;
6511 }
6512
6513 /* set up PCI mappings for device */
6514 err = pci_enable_device(pci_dev);
6515 if (err) {
6516 printk(KERN_WARNING DRV_NAME
6517 "Error calling pci_enable_device.\n");
6518 return err;
6519 }
6520
6521 priv = ieee80211_priv(dev);
6522
6523 pci_set_master(pci_dev);
6524 pci_set_drvdata(pci_dev, priv);
6525
6526 err = pci_set_dma_mask(pci_dev, DMA_32BIT_MASK);
6527 if (err) {
6528 printk(KERN_WARNING DRV_NAME
6529 "Error calling pci_set_dma_mask.\n");
6530 pci_disable_device(pci_dev);
6531 return err;
6532 }
6533
6534 err = pci_request_regions(pci_dev, DRV_NAME);
6535 if (err) {
6536 printk(KERN_WARNING DRV_NAME
6537 "Error calling pci_request_regions.\n");
6538 pci_disable_device(pci_dev);
6539 return err;
6540 }
6541
6542 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6543 * PCI Tx retries from interfering with C3 CPU state */
6544 pci_read_config_dword(pci_dev, 0x40, &val);
6545 if ((val & 0x0000ff00) != 0)
6546 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
6547
6548 pci_set_power_state(pci_dev, PCI_D0);
6549
6550 if (!ipw2100_hw_is_adapter_in_system(dev)) {
6551 printk(KERN_WARNING DRV_NAME
6552 "Device not found via register read.\n");
6553 err = -ENODEV;
6554 goto fail;
6555 }
6556
6557 SET_NETDEV_DEV(dev, &pci_dev->dev);
6558
6559 /* Force interrupts to be shut off on the device */
6560 priv->status |= STATUS_INT_ENABLED;
6561 ipw2100_disable_interrupts(priv);
6562
6563 /* Allocate and initialize the Tx/Rx queues and lists */
6564 if (ipw2100_queues_allocate(priv)) {
6565 printk(KERN_WARNING DRV_NAME
6566 "Error calilng ipw2100_queues_allocate.\n");
6567 err = -ENOMEM;
6568 goto fail;
6569 }
6570 ipw2100_queues_initialize(priv);
6571
6572 err = request_irq(pci_dev->irq,
6573 ipw2100_interrupt, SA_SHIRQ,
6574 dev->name, priv);
6575 if (err) {
6576 printk(KERN_WARNING DRV_NAME
6577 "Error calling request_irq: %d.\n",
6578 pci_dev->irq);
6579 goto fail;
6580 }
6581 dev->irq = pci_dev->irq;
6582
6583 IPW_DEBUG_INFO("Attempting to register device...\n");
6584
6585 SET_MODULE_OWNER(dev);
6586
6587 printk(KERN_INFO DRV_NAME
6588 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6589
6590 /* Bring up the interface. Pre 0.46, after we registered the
6591 * network device we would call ipw2100_up. This introduced a race
6592 * condition with newer hotplug configurations (network was coming
6593 * up and making calls before the device was initialized).
6594 *
6595 * If we called ipw2100_up before we registered the device, then the
6596 * device name wasn't registered. So, we instead use the net_dev->init
6597 * member to call a function that then just turns and calls ipw2100_up.
6598 * net_dev->init is called after name allocation but before the
6599 * notifier chain is called */
6600 down(&priv->action_sem);
6601 err = register_netdev(dev);
6602 if (err) {
6603 printk(KERN_WARNING DRV_NAME
6604 "Error calling register_netdev.\n");
6605 goto fail_unlock;
6606 }
6607 registered = 1;
6608
6609 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
6610
6611 /* perform this after register_netdev so that dev->name is set */
6612 sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6613 netif_carrier_off(dev);
6614
6615 /* If the RF Kill switch is disabled, go ahead and complete the
6616 * startup sequence */
6617 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6618 /* Enable the adapter - sends HOST_COMPLETE */
6619 if (ipw2100_enable_adapter(priv)) {
6620 printk(KERN_WARNING DRV_NAME
6621 ": %s: failed in call to enable adapter.\n",
6622 priv->net_dev->name);
6623 ipw2100_hw_stop_adapter(priv);
6624 err = -EIO;
6625 goto fail_unlock;
6626 }
6627
6628 /* Start a scan . . . */
6629 ipw2100_set_scan_options(priv);
6630 ipw2100_start_scan(priv);
6631 }
6632
6633 IPW_DEBUG_INFO("exit\n");
6634
6635 priv->status |= STATUS_INITIALIZED;
6636
6637 up(&priv->action_sem);
6638
6639 return 0;
6640
6641 fail_unlock:
6642 up(&priv->action_sem);
6643
6644 fail:
6645 if (dev) {
6646 if (registered)
6647 unregister_netdev(dev);
6648
6649 ipw2100_hw_stop_adapter(priv);
6650
6651 ipw2100_disable_interrupts(priv);
6652
6653 if (dev->irq)
6654 free_irq(dev->irq, priv);
6655
6656 ipw2100_kill_workqueue(priv);
6657
6658 /* These are safe to call even if they weren't allocated */
6659 ipw2100_queues_free(priv);
6660 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6661
6662 free_ieee80211(dev);
6663 pci_set_drvdata(pci_dev, NULL);
6664 }
6665
6666 if (base_addr)
6667 iounmap(base_addr);
6668
6669 pci_release_regions(pci_dev);
6670 pci_disable_device(pci_dev);
6671
6672 return err;
6673}
6674
6675static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6676{
6677 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6678 struct net_device *dev;
6679
6680 if (priv) {
6681 down(&priv->action_sem);
6682
6683 priv->status &= ~STATUS_INITIALIZED;
6684
6685 dev = priv->net_dev;
6686 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6687
6688#ifdef CONFIG_PM
6689 if (ipw2100_firmware.version)
6690 ipw2100_release_firmware(priv, &ipw2100_firmware);
6691#endif
6692 /* Take down the hardware */
6693 ipw2100_down(priv);
6694
6695 /* Release the semaphore so that the network subsystem can
6696 * complete any needed calls into the driver... */
6697 up(&priv->action_sem);
6698
6699 /* Unregister the device first - this results in close()
6700 * being called if the device is open. If we free storage
6701 * first, then close() will crash. */
6702 unregister_netdev(dev);
6703
6704 /* ipw2100_down will ensure that there is no more pending work
6705 * in the workqueue's, so we can safely remove them now. */
6706 ipw2100_kill_workqueue(priv);
6707
6708 ipw2100_queues_free(priv);
6709
6710 /* Free potential debugging firmware snapshot */
6711 ipw2100_snapshot_free(priv);
6712
6713 if (dev->irq)
6714 free_irq(dev->irq, priv);
6715
6716 if (dev->base_addr)
6717 iounmap((void __iomem *)dev->base_addr);
6718
6719 free_ieee80211(dev);
6720 }
6721
6722 pci_release_regions(pci_dev);
6723 pci_disable_device(pci_dev);
6724
6725 IPW_DEBUG_INFO("exit\n");
6726}
6727
6728
6729#ifdef CONFIG_PM
6730#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
6731static int ipw2100_suspend(struct pci_dev *pci_dev, u32 state)
6732#else
6733static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
6734#endif
6735{
6736 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6737 struct net_device *dev = priv->net_dev;
6738
6739 IPW_DEBUG_INFO("%s: Going into suspend...\n",
6740 dev->name);
6741
6742 down(&priv->action_sem);
6743 if (priv->status & STATUS_INITIALIZED) {
6744 /* Take down the device; powers it off, etc. */
6745 ipw2100_down(priv);
6746 }
6747
6748 /* Remove the PRESENT state of the device */
6749 netif_device_detach(dev);
6750
6751 pci_save_state(pci_dev);
6752 pci_disable_device (pci_dev);
6753 pci_set_power_state(pci_dev, PCI_D3hot);
6754
6755 up(&priv->action_sem);
6756
6757 return 0;
6758}
6759
6760static int ipw2100_resume(struct pci_dev *pci_dev)
6761{
6762 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6763 struct net_device *dev = priv->net_dev;
6764 u32 val;
6765
6766 if (IPW2100_PM_DISABLED)
6767 return 0;
6768
6769 down(&priv->action_sem);
6770
6771 IPW_DEBUG_INFO("%s: Coming out of suspend...\n",
6772 dev->name);
6773
6774 pci_set_power_state(pci_dev, PCI_D0);
6775 pci_enable_device(pci_dev);
6776 pci_restore_state(pci_dev);
6777
6778 /*
6779 * Suspend/Resume resets the PCI configuration space, so we have to
6780 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
6781 * from interfering with C3 CPU state. pci_restore_state won't help
6782 * here since it only restores the first 64 bytes pci config header.
6783 */
6784 pci_read_config_dword(pci_dev, 0x40, &val);
6785 if ((val & 0x0000ff00) != 0)
6786 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
6787
6788 /* Set the device back into the PRESENT state; this will also wake
6789 * the queue of needed */
6790 netif_device_attach(dev);
6791
6792 /* Bring the device back up */
6793 if (!(priv->status & STATUS_RF_KILL_SW))
6794 ipw2100_up(priv, 0);
6795
6796 up(&priv->action_sem);
6797
6798 return 0;
6799}
6800#endif
6801
6802
6803#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
6804
6805static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
6806 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
6807 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
6808 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
6809 IPW2100_DEV_ID(0x2525), /* IN 2100A mPCI 3B */
6810 IPW2100_DEV_ID(0x2526), /* IN 2100A mPCI Gen A3 */
6811 IPW2100_DEV_ID(0x2522), /* IN 2100 mPCI 3B */
6812 IPW2100_DEV_ID(0x2523), /* IN 2100 mPCI 3A */
6813 IPW2100_DEV_ID(0x2527), /* IN 2100 mPCI 3B */
6814 IPW2100_DEV_ID(0x2528), /* IN 2100 mPCI 3B */
6815 IPW2100_DEV_ID(0x2529), /* IN 2100 mPCI 3B */
6816 IPW2100_DEV_ID(0x252B), /* IN 2100 mPCI 3A */
6817 IPW2100_DEV_ID(0x252C), /* IN 2100 mPCI 3A */
6818 IPW2100_DEV_ID(0x252D), /* IN 2100 mPCI 3A */
6819
6820 IPW2100_DEV_ID(0x2550), /* IB 2100A mPCI 3B */
6821 IPW2100_DEV_ID(0x2551), /* IB 2100 mPCI 3B */
6822 IPW2100_DEV_ID(0x2553), /* IB 2100 mPCI 3B */
6823 IPW2100_DEV_ID(0x2554), /* IB 2100 mPCI 3B */
6824 IPW2100_DEV_ID(0x2555), /* IB 2100 mPCI 3B */
6825
6826 IPW2100_DEV_ID(0x2560), /* DE 2100A mPCI 3A */
6827 IPW2100_DEV_ID(0x2562), /* DE 2100A mPCI 3A */
6828 IPW2100_DEV_ID(0x2563), /* DE 2100A mPCI 3A */
6829 IPW2100_DEV_ID(0x2561), /* DE 2100 mPCI 3A */
6830 IPW2100_DEV_ID(0x2565), /* DE 2100 mPCI 3A */
6831 IPW2100_DEV_ID(0x2566), /* DE 2100 mPCI 3A */
6832 IPW2100_DEV_ID(0x2567), /* DE 2100 mPCI 3A */
6833
6834 IPW2100_DEV_ID(0x2570), /* GA 2100 mPCI 3B */
6835
6836 IPW2100_DEV_ID(0x2580), /* TO 2100A mPCI 3B */
6837 IPW2100_DEV_ID(0x2582), /* TO 2100A mPCI 3B */
6838 IPW2100_DEV_ID(0x2583), /* TO 2100A mPCI 3B */
6839 IPW2100_DEV_ID(0x2581), /* TO 2100 mPCI 3B */
6840 IPW2100_DEV_ID(0x2585), /* TO 2100 mPCI 3B */
6841 IPW2100_DEV_ID(0x2586), /* TO 2100 mPCI 3B */
6842 IPW2100_DEV_ID(0x2587), /* TO 2100 mPCI 3B */
6843
6844 IPW2100_DEV_ID(0x2590), /* SO 2100A mPCI 3B */
6845 IPW2100_DEV_ID(0x2592), /* SO 2100A mPCI 3B */
6846 IPW2100_DEV_ID(0x2591), /* SO 2100 mPCI 3B */
6847 IPW2100_DEV_ID(0x2593), /* SO 2100 mPCI 3B */
6848 IPW2100_DEV_ID(0x2596), /* SO 2100 mPCI 3B */
6849 IPW2100_DEV_ID(0x2598), /* SO 2100 mPCI 3B */
6850
6851 IPW2100_DEV_ID(0x25A0), /* HP 2100 mPCI 3B */
6852 {0,},
6853};
6854
6855MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table);
6856
6857static struct pci_driver ipw2100_pci_driver = {
6858 .name = DRV_NAME,
6859 .id_table = ipw2100_pci_id_table,
6860 .probe = ipw2100_pci_init_one,
6861 .remove = __devexit_p(ipw2100_pci_remove_one),
6862#ifdef CONFIG_PM
6863 .suspend = ipw2100_suspend,
6864 .resume = ipw2100_resume,
6865#endif
6866};
6867
6868
6869/**
6870 * Initialize the ipw2100 driver/module
6871 *
6872 * @returns 0 if ok, < 0 errno node con error.
6873 *
6874 * Note: we cannot init the /proc stuff until the PCI driver is there,
6875 * or we risk an unlikely race condition on someone accessing
6876 * uninitialized data in the PCI dev struct through /proc.
6877 */
6878static int __init ipw2100_init(void)
6879{
6880 int ret;
6881
6882 printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
6883 printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
6884
6885#ifdef CONFIG_IEEE80211_NOWEP
6886 IPW_DEBUG_INFO(DRV_NAME ": Compiled with WEP disabled.\n");
6887#endif
6888
6889 ret = pci_module_init(&ipw2100_pci_driver);
6890
6891#ifdef CONFIG_IPW_DEBUG
6892 ipw2100_debug_level = debug;
6893 driver_create_file(&ipw2100_pci_driver.driver,
6894 &driver_attr_debug_level);
6895#endif
6896
6897 return ret;
6898}
6899
6900
6901/**
6902 * Cleanup ipw2100 driver registration
6903 */
6904static void __exit ipw2100_exit(void)
6905{
6906 /* FIXME: IPG: check that we have no instances of the devices open */
6907#ifdef CONFIG_IPW_DEBUG
6908 driver_remove_file(&ipw2100_pci_driver.driver,
6909 &driver_attr_debug_level);
6910#endif
6911 pci_unregister_driver(&ipw2100_pci_driver);
6912}
6913
6914module_init(ipw2100_init);
6915module_exit(ipw2100_exit);
6916
6917#define WEXT_USECHANNELS 1
6918
6919static const long ipw2100_frequencies[] = {
6920 2412, 2417, 2422, 2427,
6921 2432, 2437, 2442, 2447,
6922 2452, 2457, 2462, 2467,
6923 2472, 2484
6924};
6925
6926#define FREQ_COUNT (sizeof(ipw2100_frequencies) / \
6927 sizeof(ipw2100_frequencies[0]))
6928
6929static const long ipw2100_rates_11b[] = {
6930 1000000,
6931 2000000,
6932 5500000,
6933 11000000
6934};
6935
6936#define RATE_COUNT (sizeof(ipw2100_rates_11b) / sizeof(ipw2100_rates_11b[0]))
6937
6938static int ipw2100_wx_get_name(struct net_device *dev,
6939 struct iw_request_info *info,
6940 union iwreq_data *wrqu, char *extra)
6941{
6942 /*
6943 * This can be called at any time. No action lock required
6944 */
6945
6946 struct ipw2100_priv *priv = ieee80211_priv(dev);
6947 if (!(priv->status & STATUS_ASSOCIATED))
6948 strcpy(wrqu->name, "unassociated");
6949 else
6950 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
6951
6952 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
6953 return 0;
6954}
6955
6956
6957static int ipw2100_wx_set_freq(struct net_device *dev,
6958 struct iw_request_info *info,
6959 union iwreq_data *wrqu, char *extra)
6960{
6961 struct ipw2100_priv *priv = ieee80211_priv(dev);
6962 struct iw_freq *fwrq = &wrqu->freq;
6963 int err = 0;
6964
6965 if (priv->ieee->iw_mode == IW_MODE_INFRA)
6966 return -EOPNOTSUPP;
6967
6968 down(&priv->action_sem);
6969 if (!(priv->status & STATUS_INITIALIZED)) {
6970 err = -EIO;
6971 goto done;
6972 }
6973
6974 /* if setting by freq convert to channel */
6975 if (fwrq->e == 1) {
6976 if ((fwrq->m >= (int) 2.412e8 &&
6977 fwrq->m <= (int) 2.487e8)) {
6978 int f = fwrq->m / 100000;
6979 int c = 0;
6980
6981 while ((c < REG_MAX_CHANNEL) &&
6982 (f != ipw2100_frequencies[c]))
6983 c++;
6984
6985 /* hack to fall through */
6986 fwrq->e = 0;
6987 fwrq->m = c + 1;
6988 }
6989 }
6990
6991 if (fwrq->e > 0 || fwrq->m > 1000)
6992 return -EOPNOTSUPP;
6993 else { /* Set the channel */
6994 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
6995 err = ipw2100_set_channel(priv, fwrq->m, 0);
6996 }
6997
6998 done:
6999 up(&priv->action_sem);
7000 return err;
7001}
7002
7003
7004static int ipw2100_wx_get_freq(struct net_device *dev,
7005 struct iw_request_info *info,
7006 union iwreq_data *wrqu, char *extra)
7007{
7008 /*
7009 * This can be called at any time. No action lock required
7010 */
7011
7012 struct ipw2100_priv *priv = ieee80211_priv(dev);
7013
7014 wrqu->freq.e = 0;
7015
7016 /* If we are associated, trying to associate, or have a statically
7017 * configured CHANNEL then return that; otherwise return ANY */
7018 if (priv->config & CFG_STATIC_CHANNEL ||
7019 priv->status & STATUS_ASSOCIATED)
7020 wrqu->freq.m = priv->channel;
7021 else
7022 wrqu->freq.m = 0;
7023
7024 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
7025 return 0;
7026
7027}
7028
7029static int ipw2100_wx_set_mode(struct net_device *dev,
7030 struct iw_request_info *info,
7031 union iwreq_data *wrqu, char *extra)
7032{
7033 struct ipw2100_priv *priv = ieee80211_priv(dev);
7034 int err = 0;
7035
7036 IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
7037
7038 if (wrqu->mode == priv->ieee->iw_mode)
7039 return 0;
7040
7041 down(&priv->action_sem);
7042 if (!(priv->status & STATUS_INITIALIZED)) {
7043 err = -EIO;
7044 goto done;
7045 }
7046
7047 switch (wrqu->mode) {
7048#ifdef CONFIG_IPW2100_MONITOR
7049 case IW_MODE_MONITOR:
7050 err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
7051 break;
7052#endif /* CONFIG_IPW2100_MONITOR */
7053 case IW_MODE_ADHOC:
7054 err = ipw2100_switch_mode(priv, IW_MODE_ADHOC);
7055 break;
7056 case IW_MODE_INFRA:
7057 case IW_MODE_AUTO:
7058 default:
7059 err = ipw2100_switch_mode(priv, IW_MODE_INFRA);
7060 break;
7061 }
7062
7063done:
7064 up(&priv->action_sem);
7065 return err;
7066}
7067
7068static int ipw2100_wx_get_mode(struct net_device *dev,
7069 struct iw_request_info *info,
7070 union iwreq_data *wrqu, char *extra)
7071{
7072 /*
7073 * This can be called at any time. No action lock required
7074 */
7075
7076 struct ipw2100_priv *priv = ieee80211_priv(dev);
7077
7078 wrqu->mode = priv->ieee->iw_mode;
7079 IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode);
7080
7081 return 0;
7082}
7083
7084
7085#define POWER_MODES 5
7086
7087/* Values are in microsecond */
7088static const s32 timeout_duration[POWER_MODES] = {
7089 350000,
7090 250000,
7091 75000,
7092 37000,
7093 25000,
7094};
7095
7096static const s32 period_duration[POWER_MODES] = {
7097 400000,
7098 700000,
7099 1000000,
7100 1000000,
7101 1000000
7102};
7103
7104static int ipw2100_wx_get_range(struct net_device *dev,
7105 struct iw_request_info *info,
7106 union iwreq_data *wrqu, char *extra)
7107{
7108 /*
7109 * This can be called at any time. No action lock required
7110 */
7111
7112 struct ipw2100_priv *priv = ieee80211_priv(dev);
7113 struct iw_range *range = (struct iw_range *)extra;
7114 u16 val;
7115 int i, level;
7116
7117 wrqu->data.length = sizeof(*range);
7118 memset(range, 0, sizeof(*range));
7119
7120 /* Let's try to keep this struct in the same order as in
7121 * linux/include/wireless.h
7122 */
7123
7124 /* TODO: See what values we can set, and remove the ones we can't
7125 * set, or fill them with some default data.
7126 */
7127
7128 /* ~5 Mb/s real (802.11b) */
7129 range->throughput = 5 * 1000 * 1000;
7130
7131// range->sensitivity; /* signal level threshold range */
7132
7133 range->max_qual.qual = 100;
7134 /* TODO: Find real max RSSI and stick here */
7135 range->max_qual.level = 0;
7136 range->max_qual.noise = 0;
7137 range->max_qual.updated = 7; /* Updated all three */
7138
7139 range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
7140 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
7141 range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
7142 range->avg_qual.noise = 0;
7143 range->avg_qual.updated = 7; /* Updated all three */
7144
7145 range->num_bitrates = RATE_COUNT;
7146
7147 for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
7148 range->bitrate[i] = ipw2100_rates_11b[i];
7149 }
7150
7151 range->min_rts = MIN_RTS_THRESHOLD;
7152 range->max_rts = MAX_RTS_THRESHOLD;
7153 range->min_frag = MIN_FRAG_THRESHOLD;
7154 range->max_frag = MAX_FRAG_THRESHOLD;
7155
7156 range->min_pmp = period_duration[0]; /* Minimal PM period */
7157 range->max_pmp = period_duration[POWER_MODES-1];/* Maximal PM period */
7158 range->min_pmt = timeout_duration[POWER_MODES-1]; /* Minimal PM timeout */
7159 range->max_pmt = timeout_duration[0];/* Maximal PM timeout */
7160
7161 /* How to decode max/min PM period */
7162 range->pmp_flags = IW_POWER_PERIOD;
7163 /* How to decode max/min PM period */
7164 range->pmt_flags = IW_POWER_TIMEOUT;
7165 /* What PM options are supported */
7166 range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD;
7167
7168 range->encoding_size[0] = 5;
7169 range->encoding_size[1] = 13; /* Different token sizes */
7170 range->num_encoding_sizes = 2; /* Number of entry in the list */
7171 range->max_encoding_tokens = WEP_KEYS; /* Max number of tokens */
7172// range->encoding_login_index; /* token index for login token */
7173
7174 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7175 range->txpower_capa = IW_TXPOW_DBM;
7176 range->num_txpower = IW_MAX_TXPOWER;
7177 for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16); i < IW_MAX_TXPOWER;
7178 i++, level -= ((IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM) * 16) /
7179 (IW_MAX_TXPOWER - 1))
7180 range->txpower[i] = level / 16;
7181 } else {
7182 range->txpower_capa = 0;
7183 range->num_txpower = 0;
7184 }
7185
7186
7187 /* Set the Wireless Extension versions */
7188 range->we_version_compiled = WIRELESS_EXT;
7189 range->we_version_source = 16;
7190
7191// range->retry_capa; /* What retry options are supported */
7192// range->retry_flags; /* How to decode max/min retry limit */
7193// range->r_time_flags; /* How to decode max/min retry life */
7194// range->min_retry; /* Minimal number of retries */
7195// range->max_retry; /* Maximal number of retries */
7196// range->min_r_time; /* Minimal retry lifetime */
7197// range->max_r_time; /* Maximal retry lifetime */
7198
7199 range->num_channels = FREQ_COUNT;
7200
7201 val = 0;
7202 for (i = 0; i < FREQ_COUNT; i++) {
7203 // TODO: Include only legal frequencies for some countries
7204// if (local->channel_mask & (1 << i)) {
7205 range->freq[val].i = i + 1;
7206 range->freq[val].m = ipw2100_frequencies[i] * 100000;
7207 range->freq[val].e = 1;
7208 val++;
7209// }
7210 if (val == IW_MAX_FREQUENCIES)
7211 break;
7212 }
7213 range->num_frequency = val;
7214
7215 IPW_DEBUG_WX("GET Range\n");
7216
7217 return 0;
7218}
7219
7220static int ipw2100_wx_set_wap(struct net_device *dev,
7221 struct iw_request_info *info,
7222 union iwreq_data *wrqu, char *extra)
7223{
7224 struct ipw2100_priv *priv = ieee80211_priv(dev);
7225 int err = 0;
7226
7227 static const unsigned char any[] = {
7228 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
7229 };
7230 static const unsigned char off[] = {
7231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
7232 };
7233
7234 // sanity checks
7235 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
7236 return -EINVAL;
7237
7238 down(&priv->action_sem);
7239 if (!(priv->status & STATUS_INITIALIZED)) {
7240 err = -EIO;
7241 goto done;
7242 }
7243
7244 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
7245 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
7246 /* we disable mandatory BSSID association */
7247 IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
7248 priv->config &= ~CFG_STATIC_BSSID;
7249 err = ipw2100_set_mandatory_bssid(priv, NULL, 0);
7250 goto done;
7251 }
7252
7253 priv->config |= CFG_STATIC_BSSID;
7254 memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN);
7255
7256 err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0);
7257
7258 IPW_DEBUG_WX("SET BSSID -> %02X:%02X:%02X:%02X:%02X:%02X\n",
7259 wrqu->ap_addr.sa_data[0] & 0xff,
7260 wrqu->ap_addr.sa_data[1] & 0xff,
7261 wrqu->ap_addr.sa_data[2] & 0xff,
7262 wrqu->ap_addr.sa_data[3] & 0xff,
7263 wrqu->ap_addr.sa_data[4] & 0xff,
7264 wrqu->ap_addr.sa_data[5] & 0xff);
7265
7266 done:
7267 up(&priv->action_sem);
7268 return err;
7269}
7270
7271static int ipw2100_wx_get_wap(struct net_device *dev,
7272 struct iw_request_info *info,
7273 union iwreq_data *wrqu, char *extra)
7274{
7275 /*
7276 * This can be called at any time. No action lock required
7277 */
7278
7279 struct ipw2100_priv *priv = ieee80211_priv(dev);
7280
7281 /* If we are associated, trying to associate, or have a statically
7282 * configured BSSID then return that; otherwise return ANY */
7283 if (priv->config & CFG_STATIC_BSSID ||
7284 priv->status & STATUS_ASSOCIATED) {
7285 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
7286 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
7287 } else
7288 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
7289
7290 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
7291 MAC_ARG(wrqu->ap_addr.sa_data));
7292 return 0;
7293}
7294
7295static int ipw2100_wx_set_essid(struct net_device *dev,
7296 struct iw_request_info *info,
7297 union iwreq_data *wrqu, char *extra)
7298{
7299 struct ipw2100_priv *priv = ieee80211_priv(dev);
7300 char *essid = ""; /* ANY */
7301 int length = 0;
7302 int err = 0;
7303
7304 down(&priv->action_sem);
7305 if (!(priv->status & STATUS_INITIALIZED)) {
7306 err = -EIO;
7307 goto done;
7308 }
7309
7310 if (wrqu->essid.flags && wrqu->essid.length) {
7311 length = wrqu->essid.length - 1;
7312 essid = extra;
7313 }
7314
7315 if (length == 0) {
7316 IPW_DEBUG_WX("Setting ESSID to ANY\n");
7317 priv->config &= ~CFG_STATIC_ESSID;
7318 err = ipw2100_set_essid(priv, NULL, 0, 0);
7319 goto done;
7320 }
7321
7322 length = min(length, IW_ESSID_MAX_SIZE);
7323
7324 priv->config |= CFG_STATIC_ESSID;
7325
7326 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
7327 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
7328 err = 0;
7329 goto done;
7330 }
7331
7332 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
7333 length);
7334
7335 priv->essid_len = length;
7336 memcpy(priv->essid, essid, priv->essid_len);
7337
7338 err = ipw2100_set_essid(priv, essid, length, 0);
7339
7340 done:
7341 up(&priv->action_sem);
7342 return err;
7343}
7344
7345static int ipw2100_wx_get_essid(struct net_device *dev,
7346 struct iw_request_info *info,
7347 union iwreq_data *wrqu, char *extra)
7348{
7349 /*
7350 * This can be called at any time. No action lock required
7351 */
7352
7353 struct ipw2100_priv *priv = ieee80211_priv(dev);
7354
7355 /* If we are associated, trying to associate, or have a statically
7356 * configured ESSID then return that; otherwise return ANY */
7357 if (priv->config & CFG_STATIC_ESSID ||
7358 priv->status & STATUS_ASSOCIATED) {
7359 IPW_DEBUG_WX("Getting essid: '%s'\n",
7360 escape_essid(priv->essid, priv->essid_len));
7361 memcpy(extra, priv->essid, priv->essid_len);
7362 wrqu->essid.length = priv->essid_len;
7363 wrqu->essid.flags = 1; /* active */
7364 } else {
7365 IPW_DEBUG_WX("Getting essid: ANY\n");
7366 wrqu->essid.length = 0;
7367 wrqu->essid.flags = 0; /* active */
7368 }
7369
7370 return 0;
7371}
7372
7373static int ipw2100_wx_set_nick(struct net_device *dev,
7374 struct iw_request_info *info,
7375 union iwreq_data *wrqu, char *extra)
7376{
7377 /*
7378 * This can be called at any time. No action lock required
7379 */
7380
7381 struct ipw2100_priv *priv = ieee80211_priv(dev);
7382
7383 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
7384 return -E2BIG;
7385
7386 wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
7387 memset(priv->nick, 0, sizeof(priv->nick));
7388 memcpy(priv->nick, extra, wrqu->data.length);
7389
7390 IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
7391
7392 return 0;
7393}
7394
7395static int ipw2100_wx_get_nick(struct net_device *dev,
7396 struct iw_request_info *info,
7397 union iwreq_data *wrqu, char *extra)
7398{
7399 /*
7400 * This can be called at any time. No action lock required
7401 */
7402
7403 struct ipw2100_priv *priv = ieee80211_priv(dev);
7404
7405 wrqu->data.length = strlen(priv->nick) + 1;
7406 memcpy(extra, priv->nick, wrqu->data.length);
7407 wrqu->data.flags = 1; /* active */
7408
7409 IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
7410
7411 return 0;
7412}
7413
7414static int ipw2100_wx_set_rate(struct net_device *dev,
7415 struct iw_request_info *info,
7416 union iwreq_data *wrqu, char *extra)
7417{
7418 struct ipw2100_priv *priv = ieee80211_priv(dev);
7419 u32 target_rate = wrqu->bitrate.value;
7420 u32 rate;
7421 int err = 0;
7422
7423 down(&priv->action_sem);
7424 if (!(priv->status & STATUS_INITIALIZED)) {
7425 err = -EIO;
7426 goto done;
7427 }
7428
7429 rate = 0;
7430
7431 if (target_rate == 1000000 ||
7432 (!wrqu->bitrate.fixed && target_rate > 1000000))
7433 rate |= TX_RATE_1_MBIT;
7434 if (target_rate == 2000000 ||
7435 (!wrqu->bitrate.fixed && target_rate > 2000000))
7436 rate |= TX_RATE_2_MBIT;
7437 if (target_rate == 5500000 ||
7438 (!wrqu->bitrate.fixed && target_rate > 5500000))
7439 rate |= TX_RATE_5_5_MBIT;
7440 if (target_rate == 11000000 ||
7441 (!wrqu->bitrate.fixed && target_rate > 11000000))
7442 rate |= TX_RATE_11_MBIT;
7443 if (rate == 0)
7444 rate = DEFAULT_TX_RATES;
7445
7446 err = ipw2100_set_tx_rates(priv, rate, 0);
7447
7448 IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
7449 done:
7450 up(&priv->action_sem);
7451 return err;
7452}
7453
7454
7455static int ipw2100_wx_get_rate(struct net_device *dev,
7456 struct iw_request_info *info,
7457 union iwreq_data *wrqu, char *extra)
7458{
7459 struct ipw2100_priv *priv = ieee80211_priv(dev);
7460 int val;
7461 int len = sizeof(val);
7462 int err = 0;
7463
7464 if (!(priv->status & STATUS_ENABLED) ||
7465 priv->status & STATUS_RF_KILL_MASK ||
7466 !(priv->status & STATUS_ASSOCIATED)) {
7467 wrqu->bitrate.value = 0;
7468 return 0;
7469 }
7470
7471 down(&priv->action_sem);
7472 if (!(priv->status & STATUS_INITIALIZED)) {
7473 err = -EIO;
7474 goto done;
7475 }
7476
7477 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
7478 if (err) {
7479 IPW_DEBUG_WX("failed querying ordinals.\n");
7480 return err;
7481 }
7482
7483 switch (val & TX_RATE_MASK) {
7484 case TX_RATE_1_MBIT:
7485 wrqu->bitrate.value = 1000000;
7486 break;
7487 case TX_RATE_2_MBIT:
7488 wrqu->bitrate.value = 2000000;
7489 break;
7490 case TX_RATE_5_5_MBIT:
7491 wrqu->bitrate.value = 5500000;
7492 break;
7493 case TX_RATE_11_MBIT:
7494 wrqu->bitrate.value = 11000000;
7495 break;
7496 default:
7497 wrqu->bitrate.value = 0;
7498 }
7499
7500 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
7501
7502 done:
7503 up(&priv->action_sem);
7504 return err;
7505}
7506
7507static int ipw2100_wx_set_rts(struct net_device *dev,
7508 struct iw_request_info *info,
7509 union iwreq_data *wrqu, char *extra)
7510{
7511 struct ipw2100_priv *priv = ieee80211_priv(dev);
7512 int value, err;
7513
7514 /* Auto RTS not yet supported */
7515 if (wrqu->rts.fixed == 0)
7516 return -EINVAL;
7517
7518 down(&priv->action_sem);
7519 if (!(priv->status & STATUS_INITIALIZED)) {
7520 err = -EIO;
7521 goto done;
7522 }
7523
7524 if (wrqu->rts.disabled)
7525 value = priv->rts_threshold | RTS_DISABLED;
7526 else {
7527 if (wrqu->rts.value < 1 ||
7528 wrqu->rts.value > 2304) {
7529 err = -EINVAL;
7530 goto done;
7531 }
7532 value = wrqu->rts.value;
7533 }
7534
7535 err = ipw2100_set_rts_threshold(priv, value);
7536
7537 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
7538 done:
7539 up(&priv->action_sem);
7540 return err;
7541}
7542
7543static int ipw2100_wx_get_rts(struct net_device *dev,
7544 struct iw_request_info *info,
7545 union iwreq_data *wrqu, char *extra)
7546{
7547 /*
7548 * This can be called at any time. No action lock required
7549 */
7550
7551 struct ipw2100_priv *priv = ieee80211_priv(dev);
7552
7553 wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED;
7554 wrqu->rts.fixed = 1; /* no auto select */
7555
7556 /* If RTS is set to the default value, then it is disabled */
7557 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
7558
7559 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
7560
7561 return 0;
7562}
7563
7564static int ipw2100_wx_set_txpow(struct net_device *dev,
7565 struct iw_request_info *info,
7566 union iwreq_data *wrqu, char *extra)
7567{
7568 struct ipw2100_priv *priv = ieee80211_priv(dev);
7569 int err = 0, value;
7570
7571 if (priv->ieee->iw_mode != IW_MODE_ADHOC)
7572 return -EINVAL;
7573
7574 if (wrqu->txpower.disabled == 1 || wrqu->txpower.fixed == 0)
7575 value = IPW_TX_POWER_DEFAULT;
7576 else {
7577 if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
7578 wrqu->txpower.value > IPW_TX_POWER_MAX_DBM)
7579 return -EINVAL;
7580
7581 value = (wrqu->txpower.value - IPW_TX_POWER_MIN_DBM) * 16 /
7582 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM);
7583 }
7584
7585 down(&priv->action_sem);
7586 if (!(priv->status & STATUS_INITIALIZED)) {
7587 err = -EIO;
7588 goto done;
7589 }
7590
7591 err = ipw2100_set_tx_power(priv, value);
7592
7593 IPW_DEBUG_WX("SET TX Power -> %d \n", value);
7594
7595 done:
7596 up(&priv->action_sem);
7597 return err;
7598}
7599
7600static int ipw2100_wx_get_txpow(struct net_device *dev,
7601 struct iw_request_info *info,
7602 union iwreq_data *wrqu, char *extra)
7603{
7604 /*
7605 * This can be called at any time. No action lock required
7606 */
7607
7608 struct ipw2100_priv *priv = ieee80211_priv(dev);
7609
7610 if (priv->ieee->iw_mode != IW_MODE_ADHOC) {
7611 wrqu->power.disabled = 1;
7612 return 0;
7613 }
7614
7615 if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
7616 wrqu->power.fixed = 0;
7617 wrqu->power.value = IPW_TX_POWER_MAX_DBM;
7618 wrqu->power.disabled = 1;
7619 } else {
7620 wrqu->power.disabled = 0;
7621 wrqu->power.fixed = 1;
7622 wrqu->power.value =
7623 (priv->tx_power *
7624 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM)) /
7625 (IPW_TX_POWER_MAX - IPW_TX_POWER_MIN) +
7626 IPW_TX_POWER_MIN_DBM;
7627 }
7628
7629 wrqu->power.flags = IW_TXPOW_DBM;
7630
7631 IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->power.value);
7632
7633 return 0;
7634}
7635
7636static int ipw2100_wx_set_frag(struct net_device *dev,
7637 struct iw_request_info *info,
7638 union iwreq_data *wrqu, char *extra)
7639{
7640 /*
7641 * This can be called at any time. No action lock required
7642 */
7643
7644 struct ipw2100_priv *priv = ieee80211_priv(dev);
7645
7646 if (!wrqu->frag.fixed)
7647 return -EINVAL;
7648
7649 if (wrqu->frag.disabled) {
7650 priv->frag_threshold |= FRAG_DISABLED;
7651 priv->ieee->fts = DEFAULT_FTS;
7652 } else {
7653 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
7654 wrqu->frag.value > MAX_FRAG_THRESHOLD)
7655 return -EINVAL;
7656
7657 priv->ieee->fts = wrqu->frag.value & ~0x1;
7658 priv->frag_threshold = priv->ieee->fts;
7659 }
7660
7661 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
7662
7663 return 0;
7664}
7665
7666static int ipw2100_wx_get_frag(struct net_device *dev,
7667 struct iw_request_info *info,
7668 union iwreq_data *wrqu, char *extra)
7669{
7670 /*
7671 * This can be called at any time. No action lock required
7672 */
7673
7674 struct ipw2100_priv *priv = ieee80211_priv(dev);
7675 wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED;
7676 wrqu->frag.fixed = 0; /* no auto select */
7677 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
7678
7679 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
7680
7681 return 0;
7682}
7683
7684static int ipw2100_wx_set_retry(struct net_device *dev,
7685 struct iw_request_info *info,
7686 union iwreq_data *wrqu, char *extra)
7687{
7688 struct ipw2100_priv *priv = ieee80211_priv(dev);
7689 int err = 0;
7690
7691 if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
7692 wrqu->retry.disabled)
7693 return -EINVAL;
7694
7695 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
7696 return 0;
7697
7698 down(&priv->action_sem);
7699 if (!(priv->status & STATUS_INITIALIZED)) {
7700 err = -EIO;
7701 goto done;
7702 }
7703
7704 if (wrqu->retry.flags & IW_RETRY_MIN) {
7705 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7706 IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
7707 wrqu->retry.value);
7708 goto done;
7709 }
7710
7711 if (wrqu->retry.flags & IW_RETRY_MAX) {
7712 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7713 IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
7714 wrqu->retry.value);
7715 goto done;
7716 }
7717
7718 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7719 if (!err)
7720 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7721
7722 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
7723
7724 done:
7725 up(&priv->action_sem);
7726 return err;
7727}
7728
7729static int ipw2100_wx_get_retry(struct net_device *dev,
7730 struct iw_request_info *info,
7731 union iwreq_data *wrqu, char *extra)
7732{
7733 /*
7734 * This can be called at any time. No action lock required
7735 */
7736
7737 struct ipw2100_priv *priv = ieee80211_priv(dev);
7738
7739 wrqu->retry.disabled = 0; /* can't be disabled */
7740
7741 if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
7742 IW_RETRY_LIFETIME)
7743 return -EINVAL;
7744
7745 if (wrqu->retry.flags & IW_RETRY_MAX) {
7746 wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
7747 wrqu->retry.value = priv->long_retry_limit;
7748 } else {
7749 wrqu->retry.flags =
7750 (priv->short_retry_limit !=
7751 priv->long_retry_limit) ?
7752 IW_RETRY_LIMIT & IW_RETRY_MIN : IW_RETRY_LIMIT;
7753
7754 wrqu->retry.value = priv->short_retry_limit;
7755 }
7756
7757 IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
7758
7759 return 0;
7760}
7761
7762static int ipw2100_wx_set_scan(struct net_device *dev,
7763 struct iw_request_info *info,
7764 union iwreq_data *wrqu, char *extra)
7765{
7766 struct ipw2100_priv *priv = ieee80211_priv(dev);
7767 int err = 0;
7768
7769 down(&priv->action_sem);
7770 if (!(priv->status & STATUS_INITIALIZED)) {
7771 err = -EIO;
7772 goto done;
7773 }
7774
7775 IPW_DEBUG_WX("Initiating scan...\n");
7776 if (ipw2100_set_scan_options(priv) ||
7777 ipw2100_start_scan(priv)) {
7778 IPW_DEBUG_WX("Start scan failed.\n");
7779
7780 /* TODO: Mark a scan as pending so when hardware initialized
7781 * a scan starts */
7782 }
7783
7784 done:
7785 up(&priv->action_sem);
7786 return err;
7787}
7788
7789static int ipw2100_wx_get_scan(struct net_device *dev,
7790 struct iw_request_info *info,
7791 union iwreq_data *wrqu, char *extra)
7792{
7793 /*
7794 * This can be called at any time. No action lock required
7795 */
7796
7797 struct ipw2100_priv *priv = ieee80211_priv(dev);
7798 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
7799}
7800
7801
7802/*
7803 * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c
7804 */
7805static int ipw2100_wx_set_encode(struct net_device *dev,
7806 struct iw_request_info *info,
7807 union iwreq_data *wrqu, char *key)
7808{
7809 /*
7810 * No check of STATUS_INITIALIZED required
7811 */
7812
7813 struct ipw2100_priv *priv = ieee80211_priv(dev);
7814 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
7815}
7816
7817static int ipw2100_wx_get_encode(struct net_device *dev,
7818 struct iw_request_info *info,
7819 union iwreq_data *wrqu, char *key)
7820{
7821 /*
7822 * This can be called at any time. No action lock required
7823 */
7824
7825 struct ipw2100_priv *priv = ieee80211_priv(dev);
7826 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
7827}
7828
7829static int ipw2100_wx_set_power(struct net_device *dev,
7830 struct iw_request_info *info,
7831 union iwreq_data *wrqu, char *extra)
7832{
7833 struct ipw2100_priv *priv = ieee80211_priv(dev);
7834 int err = 0;
7835
7836 down(&priv->action_sem);
7837 if (!(priv->status & STATUS_INITIALIZED)) {
7838 err = -EIO;
7839 goto done;
7840 }
7841
7842 if (wrqu->power.disabled) {
7843 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
7844 err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
7845 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
7846 goto done;
7847 }
7848
7849 switch (wrqu->power.flags & IW_POWER_MODE) {
7850 case IW_POWER_ON: /* If not specified */
7851 case IW_POWER_MODE: /* If set all mask */
7852 case IW_POWER_ALL_R: /* If explicitely state all */
7853 break;
7854 default: /* Otherwise we don't support it */
7855 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
7856 wrqu->power.flags);
7857 err = -EOPNOTSUPP;
7858 goto done;
7859 }
7860
7861 /* If the user hasn't specified a power management mode yet, default
7862 * to BATTERY */
7863 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
7864 err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
7865
7866 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
7867 priv->power_mode);
7868
7869 done:
7870 up(&priv->action_sem);
7871 return err;
7872
7873}
7874
7875static int ipw2100_wx_get_power(struct net_device *dev,
7876 struct iw_request_info *info,
7877 union iwreq_data *wrqu, char *extra)
7878{
7879 /*
7880 * This can be called at any time. No action lock required
7881 */
7882
7883 struct ipw2100_priv *priv = ieee80211_priv(dev);
7884
7885 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
7886 wrqu->power.disabled = 1;
7887 } else {
7888 wrqu->power.disabled = 0;
7889 wrqu->power.flags = 0;
7890 }
7891
7892 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
7893
7894 return 0;
7895}
7896
7897
7898/*
7899 *
7900 * IWPRIV handlers
7901 *
7902 */
7903#ifdef CONFIG_IPW2100_MONITOR
7904static int ipw2100_wx_set_promisc(struct net_device *dev,
7905 struct iw_request_info *info,
7906 union iwreq_data *wrqu, char *extra)
7907{
7908 struct ipw2100_priv *priv = ieee80211_priv(dev);
7909 int *parms = (int *)extra;
7910 int enable = (parms[0] > 0);
7911 int err = 0;
7912
7913 down(&priv->action_sem);
7914 if (!(priv->status & STATUS_INITIALIZED)) {
7915 err = -EIO;
7916 goto done;
7917 }
7918
7919 if (enable) {
7920 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7921 err = ipw2100_set_channel(priv, parms[1], 0);
7922 goto done;
7923 }
7924 priv->channel = parms[1];
7925 err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
7926 } else {
7927 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
7928 err = ipw2100_switch_mode(priv, priv->last_mode);
7929 }
7930 done:
7931 up(&priv->action_sem);
7932 return err;
7933}
7934
7935static int ipw2100_wx_reset(struct net_device *dev,
7936 struct iw_request_info *info,
7937 union iwreq_data *wrqu, char *extra)
7938{
7939 struct ipw2100_priv *priv = ieee80211_priv(dev);
7940 if (priv->status & STATUS_INITIALIZED)
7941 schedule_reset(priv);
7942 return 0;
7943}
7944
7945#endif
7946
7947static int ipw2100_wx_set_powermode(struct net_device *dev,
7948 struct iw_request_info *info,
7949 union iwreq_data *wrqu, char *extra)
7950{
7951 struct ipw2100_priv *priv = ieee80211_priv(dev);
7952 int err = 0, mode = *(int *)extra;
7953
7954 down(&priv->action_sem);
7955 if (!(priv->status & STATUS_INITIALIZED)) {
7956 err = -EIO;
7957 goto done;
7958 }
7959
7960 if ((mode < 1) || (mode > POWER_MODES))
7961 mode = IPW_POWER_AUTO;
7962
7963 if (priv->power_mode != mode)
7964 err = ipw2100_set_power_mode(priv, mode);
7965 done:
7966 up(&priv->action_sem);
7967 return err;
7968}
7969
7970#define MAX_POWER_STRING 80
7971static int ipw2100_wx_get_powermode(struct net_device *dev,
7972 struct iw_request_info *info,
7973 union iwreq_data *wrqu, char *extra)
7974{
7975 /*
7976 * This can be called at any time. No action lock required
7977 */
7978
7979 struct ipw2100_priv *priv = ieee80211_priv(dev);
7980 int level = IPW_POWER_LEVEL(priv->power_mode);
7981 s32 timeout, period;
7982
7983 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
7984 snprintf(extra, MAX_POWER_STRING,
7985 "Power save level: %d (Off)", level);
7986 } else {
7987 switch (level) {
7988 case IPW_POWER_MODE_CAM:
7989 snprintf(extra, MAX_POWER_STRING,
7990 "Power save level: %d (None)", level);
7991 break;
7992 case IPW_POWER_AUTO:
7993 snprintf(extra, MAX_POWER_STRING,
7994 "Power save level: %d (Auto)", 0);
7995 break;
7996 default:
7997 timeout = timeout_duration[level - 1] / 1000;
7998 period = period_duration[level - 1] / 1000;
7999 snprintf(extra, MAX_POWER_STRING,
8000 "Power save level: %d "
8001 "(Timeout %dms, Period %dms)",
8002 level, timeout, period);
8003 }
8004 }
8005
8006 wrqu->data.length = strlen(extra) + 1;
8007
8008 return 0;
8009}
8010
8011
8012static int ipw2100_wx_set_preamble(struct net_device *dev,
8013 struct iw_request_info *info,
8014 union iwreq_data *wrqu, char *extra)
8015{
8016 struct ipw2100_priv *priv = ieee80211_priv(dev);
8017 int err, mode = *(int *)extra;
8018
8019 down(&priv->action_sem);
8020 if (!(priv->status & STATUS_INITIALIZED)) {
8021 err = -EIO;
8022 goto done;
8023 }
8024
8025 if (mode == 1)
8026 priv->config |= CFG_LONG_PREAMBLE;
8027 else if (mode == 0)
8028 priv->config &= ~CFG_LONG_PREAMBLE;
8029 else {
8030 err = -EINVAL;
8031 goto done;
8032 }
8033
8034 err = ipw2100_system_config(priv, 0);
8035
8036done:
8037 up(&priv->action_sem);
8038 return err;
8039}
8040
8041static int ipw2100_wx_get_preamble(struct net_device *dev,
8042 struct iw_request_info *info,
8043 union iwreq_data *wrqu, char *extra)
8044{
8045 /*
8046 * This can be called at any time. No action lock required
8047 */
8048
8049 struct ipw2100_priv *priv = ieee80211_priv(dev);
8050
8051 if (priv->config & CFG_LONG_PREAMBLE)
8052 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
8053 else
8054 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
8055
8056 return 0;
8057}
8058
8059static iw_handler ipw2100_wx_handlers[] =
8060{
8061 NULL, /* SIOCSIWCOMMIT */
8062 ipw2100_wx_get_name, /* SIOCGIWNAME */
8063 NULL, /* SIOCSIWNWID */
8064 NULL, /* SIOCGIWNWID */
8065 ipw2100_wx_set_freq, /* SIOCSIWFREQ */
8066 ipw2100_wx_get_freq, /* SIOCGIWFREQ */
8067 ipw2100_wx_set_mode, /* SIOCSIWMODE */
8068 ipw2100_wx_get_mode, /* SIOCGIWMODE */
8069 NULL, /* SIOCSIWSENS */
8070 NULL, /* SIOCGIWSENS */
8071 NULL, /* SIOCSIWRANGE */
8072 ipw2100_wx_get_range, /* SIOCGIWRANGE */
8073 NULL, /* SIOCSIWPRIV */
8074 NULL, /* SIOCGIWPRIV */
8075 NULL, /* SIOCSIWSTATS */
8076 NULL, /* SIOCGIWSTATS */
8077 NULL, /* SIOCSIWSPY */
8078 NULL, /* SIOCGIWSPY */
8079 NULL, /* SIOCGIWTHRSPY */
8080 NULL, /* SIOCWIWTHRSPY */
8081 ipw2100_wx_set_wap, /* SIOCSIWAP */
8082 ipw2100_wx_get_wap, /* SIOCGIWAP */
8083 NULL, /* -- hole -- */
8084 NULL, /* SIOCGIWAPLIST -- deprecated */
8085 ipw2100_wx_set_scan, /* SIOCSIWSCAN */
8086 ipw2100_wx_get_scan, /* SIOCGIWSCAN */
8087 ipw2100_wx_set_essid, /* SIOCSIWESSID */
8088 ipw2100_wx_get_essid, /* SIOCGIWESSID */
8089 ipw2100_wx_set_nick, /* SIOCSIWNICKN */
8090 ipw2100_wx_get_nick, /* SIOCGIWNICKN */
8091 NULL, /* -- hole -- */
8092 NULL, /* -- hole -- */
8093 ipw2100_wx_set_rate, /* SIOCSIWRATE */
8094 ipw2100_wx_get_rate, /* SIOCGIWRATE */
8095 ipw2100_wx_set_rts, /* SIOCSIWRTS */
8096 ipw2100_wx_get_rts, /* SIOCGIWRTS */
8097 ipw2100_wx_set_frag, /* SIOCSIWFRAG */
8098 ipw2100_wx_get_frag, /* SIOCGIWFRAG */
8099 ipw2100_wx_set_txpow, /* SIOCSIWTXPOW */
8100 ipw2100_wx_get_txpow, /* SIOCGIWTXPOW */
8101 ipw2100_wx_set_retry, /* SIOCSIWRETRY */
8102 ipw2100_wx_get_retry, /* SIOCGIWRETRY */
8103 ipw2100_wx_set_encode, /* SIOCSIWENCODE */
8104 ipw2100_wx_get_encode, /* SIOCGIWENCODE */
8105 ipw2100_wx_set_power, /* SIOCSIWPOWER */
8106 ipw2100_wx_get_power, /* SIOCGIWPOWER */
8107};
8108
8109#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
8110#define IPW2100_PRIV_RESET SIOCIWFIRSTPRIV+1
8111#define IPW2100_PRIV_SET_POWER SIOCIWFIRSTPRIV+2
8112#define IPW2100_PRIV_GET_POWER SIOCIWFIRSTPRIV+3
8113#define IPW2100_PRIV_SET_LONGPREAMBLE SIOCIWFIRSTPRIV+4
8114#define IPW2100_PRIV_GET_LONGPREAMBLE SIOCIWFIRSTPRIV+5
8115
8116static const struct iw_priv_args ipw2100_private_args[] = {
8117
8118#ifdef CONFIG_IPW2100_MONITOR
8119 {
8120 IPW2100_PRIV_SET_MONITOR,
8121 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
8122 },
8123 {
8124 IPW2100_PRIV_RESET,
8125 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
8126 },
8127#endif /* CONFIG_IPW2100_MONITOR */
8128
8129 {
8130 IPW2100_PRIV_SET_POWER,
8131 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"
8132 },
8133 {
8134 IPW2100_PRIV_GET_POWER,
8135 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING, "get_power"
8136 },
8137 {
8138 IPW2100_PRIV_SET_LONGPREAMBLE,
8139 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"
8140 },
8141 {
8142 IPW2100_PRIV_GET_LONGPREAMBLE,
8143 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"
8144 },
8145};
8146
8147static iw_handler ipw2100_private_handler[] = {
8148#ifdef CONFIG_IPW2100_MONITOR
8149 ipw2100_wx_set_promisc,
8150 ipw2100_wx_reset,
8151#else /* CONFIG_IPW2100_MONITOR */
8152 NULL,
8153 NULL,
8154#endif /* CONFIG_IPW2100_MONITOR */
8155 ipw2100_wx_set_powermode,
8156 ipw2100_wx_get_powermode,
8157 ipw2100_wx_set_preamble,
8158 ipw2100_wx_get_preamble,
8159};
8160
8161static struct iw_handler_def ipw2100_wx_handler_def =
8162{
8163 .standard = ipw2100_wx_handlers,
8164 .num_standard = sizeof(ipw2100_wx_handlers) / sizeof(iw_handler),
8165 .num_private = sizeof(ipw2100_private_handler) / sizeof(iw_handler),
8166 .num_private_args = sizeof(ipw2100_private_args) /
8167 sizeof(struct iw_priv_args),
8168 .private = (iw_handler *)ipw2100_private_handler,
8169 .private_args = (struct iw_priv_args *)ipw2100_private_args,
8170};
8171
8172/*
8173 * Get wireless statistics.
8174 * Called by /proc/net/wireless
8175 * Also called by SIOCGIWSTATS
8176 */
8177static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev)
8178{
8179 enum {
8180 POOR = 30,
8181 FAIR = 60,
8182 GOOD = 80,
8183 VERY_GOOD = 90,
8184 EXCELLENT = 95,
8185 PERFECT = 100
8186 };
8187 int rssi_qual;
8188 int tx_qual;
8189 int beacon_qual;
8190
8191 struct ipw2100_priv *priv = ieee80211_priv(dev);
8192 struct iw_statistics *wstats;
8193 u32 rssi, quality, tx_retries, missed_beacons, tx_failures;
8194 u32 ord_len = sizeof(u32);
8195
8196 if (!priv)
8197 return (struct iw_statistics *) NULL;
8198
8199 wstats = &priv->wstats;
8200
8201 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
8202 * ipw2100_wx_wireless_stats seems to be called before fw is
8203 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
8204 * and associated; if not associcated, the values are all meaningless
8205 * anyway, so set them all to NULL and INVALID */
8206 if (!(priv->status & STATUS_ASSOCIATED)) {
8207 wstats->miss.beacon = 0;
8208 wstats->discard.retries = 0;
8209 wstats->qual.qual = 0;
8210 wstats->qual.level = 0;
8211 wstats->qual.noise = 0;
8212 wstats->qual.updated = 7;
8213 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
8214 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
8215 return wstats;
8216 }
8217
8218 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS,
8219 &missed_beacons, &ord_len))
8220 goto fail_get_ordinal;
8221
8222 /* If we don't have a connection the quality and level is 0*/
8223 if (!(priv->status & STATUS_ASSOCIATED)) {
8224 wstats->qual.qual = 0;
8225 wstats->qual.level = 0;
8226 } else {
8227 if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR,
8228 &rssi, &ord_len))
8229 goto fail_get_ordinal;
8230 wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
8231 if (rssi < 10)
8232 rssi_qual = rssi * POOR / 10;
8233 else if (rssi < 15)
8234 rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR;
8235 else if (rssi < 20)
8236 rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR;
8237 else if (rssi < 30)
8238 rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) /
8239 10 + GOOD;
8240 else
8241 rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) /
8242 10 + VERY_GOOD;
8243
8244 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES,
8245 &tx_retries, &ord_len))
8246 goto fail_get_ordinal;
8247
8248 if (tx_retries > 75)
8249 tx_qual = (90 - tx_retries) * POOR / 15;
8250 else if (tx_retries > 70)
8251 tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
8252 else if (tx_retries > 65)
8253 tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
8254 else if (tx_retries > 50)
8255 tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
8256 15 + GOOD;
8257 else
8258 tx_qual = (50 - tx_retries) *
8259 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
8260
8261 if (missed_beacons > 50)
8262 beacon_qual = (60 - missed_beacons) * POOR / 10;
8263 else if (missed_beacons > 40)
8264 beacon_qual = (50 - missed_beacons) * (FAIR - POOR) /
8265 10 + POOR;
8266 else if (missed_beacons > 32)
8267 beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) /
8268 18 + FAIR;
8269 else if (missed_beacons > 20)
8270 beacon_qual = (32 - missed_beacons) *
8271 (VERY_GOOD - GOOD) / 20 + GOOD;
8272 else
8273 beacon_qual = (20 - missed_beacons) *
8274 (PERFECT - VERY_GOOD) / 20 + VERY_GOOD;
8275
8276 quality = min(beacon_qual, min(tx_qual, rssi_qual));
8277
8278#ifdef CONFIG_IPW_DEBUG
8279 if (beacon_qual == quality)
8280 IPW_DEBUG_WX("Quality clamped by Missed Beacons\n");
8281 else if (tx_qual == quality)
8282 IPW_DEBUG_WX("Quality clamped by Tx Retries\n");
8283 else if (quality != 100)
8284 IPW_DEBUG_WX("Quality clamped by Signal Strength\n");
8285 else
8286 IPW_DEBUG_WX("Quality not clamped.\n");
8287#endif
8288
8289 wstats->qual.qual = quality;
8290 wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
8291 }
8292
8293 wstats->qual.noise = 0;
8294 wstats->qual.updated = 7;
8295 wstats->qual.updated |= IW_QUAL_NOISE_INVALID;
8296
8297 /* FIXME: this is percent and not a # */
8298 wstats->miss.beacon = missed_beacons;
8299
8300 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES,
8301 &tx_failures, &ord_len))
8302 goto fail_get_ordinal;
8303 wstats->discard.retries = tx_failures;
8304
8305 return wstats;
8306
8307 fail_get_ordinal:
8308 IPW_DEBUG_WX("failed querying ordinals.\n");
8309
8310 return (struct iw_statistics *) NULL;
8311}
8312
8313static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
8314{
8315 union iwreq_data wrqu;
8316 int len = ETH_ALEN;
8317
8318 if (priv->status & STATUS_STOPPING)
8319 return;
8320
8321 down(&priv->action_sem);
8322
8323 IPW_DEBUG_WX("enter\n");
8324
8325 up(&priv->action_sem);
8326
8327 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
8328
8329 /* Fetch BSSID from the hardware */
8330 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) ||
8331 priv->status & STATUS_RF_KILL_MASK ||
8332 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
8333 &priv->bssid, &len)) {
8334 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
8335 } else {
8336 /* We now have the BSSID, so can finish setting to the full
8337 * associated state */
8338 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
8339 memcpy(&priv->ieee->bssid, priv->bssid, ETH_ALEN);
8340 priv->status &= ~STATUS_ASSOCIATING;
8341 priv->status |= STATUS_ASSOCIATED;
8342 netif_carrier_on(priv->net_dev);
8343 if (netif_queue_stopped(priv->net_dev)) {
8344 IPW_DEBUG_INFO("Waking net queue.\n");
8345 netif_wake_queue(priv->net_dev);
8346 } else {
8347 IPW_DEBUG_INFO("Starting net queue.\n");
8348 netif_start_queue(priv->net_dev);
8349 }
8350 }
8351
8352 if (!(priv->status & STATUS_ASSOCIATED)) {
8353 IPW_DEBUG_WX("Configuring ESSID\n");
8354 down(&priv->action_sem);
8355 /* This is a disassociation event, so kick the firmware to
8356 * look for another AP */
8357 if (priv->config & CFG_STATIC_ESSID)
8358 ipw2100_set_essid(priv, priv->essid, priv->essid_len, 0);
8359 else
8360 ipw2100_set_essid(priv, NULL, 0, 0);
8361 up(&priv->action_sem);
8362 }
8363
8364 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
8365}
8366
8367#define IPW2100_FW_MAJOR_VERSION 1
8368#define IPW2100_FW_MINOR_VERSION 3
8369
8370#define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8)
8371#define IPW2100_FW_MAJOR(x) (x & 0xff)
8372
8373#define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \
8374 IPW2100_FW_MAJOR_VERSION)
8375
8376#define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \
8377"." __stringify(IPW2100_FW_MINOR_VERSION)
8378
8379#define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw"
8380
8381
8382/*
8383
8384BINARY FIRMWARE HEADER FORMAT
8385
8386offset length desc
83870 2 version
83882 2 mode == 0:BSS,1:IBSS,2:MONITOR
83894 4 fw_len
83908 4 uc_len
8391C fw_len firmware data
839212 + fw_len uc_len microcode data
8393
8394*/
8395
8396struct ipw2100_fw_header {
8397 short version;
8398 short mode;
8399 unsigned int fw_size;
8400 unsigned int uc_size;
8401} __attribute__ ((packed));
8402
8403
8404
8405static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
8406{
8407 struct ipw2100_fw_header *h =
8408 (struct ipw2100_fw_header *)fw->fw_entry->data;
8409
8410 if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
8411 printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
8412 "(detected version id of %u). "
8413 "See Documentation/networking/README.ipw2100\n",
8414 h->version);
8415 return 1;
8416 }
8417
8418 fw->version = h->version;
8419 fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header);
8420 fw->fw.size = h->fw_size;
8421 fw->uc.data = fw->fw.data + h->fw_size;
8422 fw->uc.size = h->uc_size;
8423
8424 return 0;
8425}
8426
8427
8428static int ipw2100_get_firmware(struct ipw2100_priv *priv,
8429 struct ipw2100_fw *fw)
8430{
8431 char *fw_name;
8432 int rc;
8433
8434 IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n",
8435 priv->net_dev->name);
8436
8437 switch (priv->ieee->iw_mode) {
8438 case IW_MODE_ADHOC:
8439 fw_name = IPW2100_FW_NAME("-i");
8440 break;
8441#ifdef CONFIG_IPW2100_MONITOR
8442 case IW_MODE_MONITOR:
8443 fw_name = IPW2100_FW_NAME("-p");
8444 break;
8445#endif
8446 case IW_MODE_INFRA:
8447 default:
8448 fw_name = IPW2100_FW_NAME("");
8449 break;
8450 }
8451
8452 rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev);
8453
8454 if (rc < 0) {
8455 printk(KERN_ERR DRV_NAME ": "
8456 "%s: Firmware '%s' not available or load failed.\n",
8457 priv->net_dev->name, fw_name);
8458 return rc;
8459 }
8460 IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data,
8461 fw->fw_entry->size);
8462
8463 ipw2100_mod_firmware_load(fw);
8464
8465 return 0;
8466}
8467
8468static void ipw2100_release_firmware(struct ipw2100_priv *priv,
8469 struct ipw2100_fw *fw)
8470{
8471 fw->version = 0;
8472 if (fw->fw_entry)
8473 release_firmware(fw->fw_entry);
8474 fw->fw_entry = NULL;
8475}
8476
8477
8478static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf,
8479 size_t max)
8480{
8481 char ver[MAX_FW_VERSION_LEN];
8482 u32 len = MAX_FW_VERSION_LEN;
8483 u32 tmp;
8484 int i;
8485 /* firmware version is an ascii string (max len of 14) */
8486 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM,
8487 ver, &len))
8488 return -EIO;
8489 tmp = max;
8490 if (len >= max)
8491 len = max - 1;
8492 for (i = 0; i < len; i++)
8493 buf[i] = ver[i];
8494 buf[i] = '\0';
8495 return tmp;
8496}
8497
8498static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf,
8499 size_t max)
8500{
8501 u32 ver;
8502 u32 len = sizeof(ver);
8503 /* microcode version is a 32 bit integer */
8504 if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION,
8505 &ver, &len))
8506 return -EIO;
8507 return snprintf(buf, max, "%08X", ver);
8508}
8509
8510/*
8511 * On exit, the firmware will have been freed from the fw list
8512 */
8513static int ipw2100_fw_download(struct ipw2100_priv *priv,
8514 struct ipw2100_fw *fw)
8515{
8516 /* firmware is constructed of N contiguous entries, each entry is
8517 * structured as:
8518 *
8519 * offset sie desc
8520 * 0 4 address to write to
8521 * 4 2 length of data run
8522 * 6 length data
8523 */
8524 unsigned int addr;
8525 unsigned short len;
8526
8527 const unsigned char *firmware_data = fw->fw.data;
8528 unsigned int firmware_data_left = fw->fw.size;
8529
8530 while (firmware_data_left > 0) {
8531 addr = *(u32 *)(firmware_data);
8532 firmware_data += 4;
8533 firmware_data_left -= 4;
8534
8535 len = *(u16 *)(firmware_data);
8536 firmware_data += 2;
8537 firmware_data_left -= 2;
8538
8539 if (len > 32) {
8540 printk(KERN_ERR DRV_NAME ": "
8541 "Invalid firmware run-length of %d bytes\n",
8542 len);
8543 return -EINVAL;
8544 }
8545
8546 write_nic_memory(priv->net_dev, addr, len, firmware_data);
8547 firmware_data += len;
8548 firmware_data_left -= len;
8549 }
8550
8551 return 0;
8552}
8553
8554struct symbol_alive_response {
8555 u8 cmd_id;
8556 u8 seq_num;
8557 u8 ucode_rev;
8558 u8 eeprom_valid;
8559 u16 valid_flags;
8560 u8 IEEE_addr[6];
8561 u16 flags;
8562 u16 pcb_rev;
8563 u16 clock_settle_time; // 1us LSB
8564 u16 powerup_settle_time; // 1us LSB
8565 u16 hop_settle_time; // 1us LSB
8566 u8 date[3]; // month, day, year
8567 u8 time[2]; // hours, minutes
8568 u8 ucode_valid;
8569};
8570
8571static int ipw2100_ucode_download(struct ipw2100_priv *priv,
8572 struct ipw2100_fw *fw)
8573{
8574 struct net_device *dev = priv->net_dev;
8575 const unsigned char *microcode_data = fw->uc.data;
8576 unsigned int microcode_data_left = fw->uc.size;
8577 void __iomem *reg = (void __iomem *)dev->base_addr;
8578
8579 struct symbol_alive_response response;
8580 int i, j;
8581 u8 data;
8582
8583 /* Symbol control */
8584 write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
8585 readl(reg);
8586 write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
8587 readl(reg);
8588
8589 /* HW config */
8590 write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */
8591 readl(reg);
8592 write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */
8593 readl(reg);
8594
8595 /* EN_CS_ACCESS bit to reset control store pointer */
8596 write_nic_byte(dev, 0x210000, 0x40);
8597 readl(reg);
8598 write_nic_byte(dev, 0x210000, 0x0);
8599 readl(reg);
8600 write_nic_byte(dev, 0x210000, 0x40);
8601 readl(reg);
8602
8603 /* copy microcode from buffer into Symbol */
8604
8605 while (microcode_data_left > 0) {
8606 write_nic_byte(dev, 0x210010, *microcode_data++);
8607 write_nic_byte(dev, 0x210010, *microcode_data++);
8608 microcode_data_left -= 2;
8609 }
8610
8611 /* EN_CS_ACCESS bit to reset the control store pointer */
8612 write_nic_byte(dev, 0x210000, 0x0);
8613 readl(reg);
8614
8615 /* Enable System (Reg 0)
8616 * first enable causes garbage in RX FIFO */
8617 write_nic_byte(dev, 0x210000, 0x0);
8618 readl(reg);
8619 write_nic_byte(dev, 0x210000, 0x80);
8620 readl(reg);
8621
8622 /* Reset External Baseband Reg */
8623 write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
8624 readl(reg);
8625 write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
8626 readl(reg);
8627
8628 /* HW Config (Reg 5) */
8629 write_nic_byte(dev, 0x210014, 0x72); // fifo width =16
8630 readl(reg);
8631 write_nic_byte(dev, 0x210014, 0x72); // fifo width =16
8632 readl(reg);
8633
8634 /* Enable System (Reg 0)
8635 * second enable should be OK */
8636 write_nic_byte(dev, 0x210000, 0x00); // clear enable system
8637 readl(reg);
8638 write_nic_byte(dev, 0x210000, 0x80); // set enable system
8639
8640 /* check Symbol is enabled - upped this from 5 as it wasn't always
8641 * catching the update */
8642 for (i = 0; i < 10; i++) {
8643 udelay(10);
8644
8645 /* check Dino is enabled bit */
8646 read_nic_byte(dev, 0x210000, &data);
8647 if (data & 0x1)
8648 break;
8649 }
8650
8651 if (i == 10) {
8652 printk(KERN_ERR DRV_NAME ": %s: Error initializing Symbol\n",
8653 dev->name);
8654 return -EIO;
8655 }
8656
8657 /* Get Symbol alive response */
8658 for (i = 0; i < 30; i++) {
8659 /* Read alive response structure */
8660 for (j = 0;
8661 j < (sizeof(struct symbol_alive_response) >> 1);
8662 j++)
8663 read_nic_word(dev, 0x210004,
8664 ((u16 *)&response) + j);
8665
8666 if ((response.cmd_id == 1) &&
8667 (response.ucode_valid == 0x1))
8668 break;
8669 udelay(10);
8670 }
8671
8672 if (i == 30) {
8673 printk(KERN_ERR DRV_NAME ": %s: No response from Symbol - hw not alive\n",
8674 dev->name);
8675 printk_buf(IPW_DL_ERROR, (u8*)&response, sizeof(response));
8676 return -EIO;
8677 }
8678
8679 return 0;
8680}
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
new file mode 100644
index 000000000000..2a3cdbd50168
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.h
@@ -0,0 +1,1167 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26#ifndef _IPW2100_H
27#define _IPW2100_H
28
29#include <linux/sched.h>
30#include <linux/interrupt.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/list.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <asm/io.h>
37#include <linux/socket.h>
38#include <linux/if_arp.h>
39#include <linux/wireless.h>
40#include <linux/version.h>
41#include <net/iw_handler.h> // new driver API
42
43#include <net/ieee80211.h>
44
45#include <linux/workqueue.h>
46
47struct ipw2100_priv;
48struct ipw2100_tx_packet;
49struct ipw2100_rx_packet;
50
51#define IPW_DL_UNINIT 0x80000000
52#define IPW_DL_NONE 0x00000000
53#define IPW_DL_ALL 0x7FFFFFFF
54
55/*
56 * To use the debug system;
57 *
58 * If you are defining a new debug classification, simply add it to the #define
59 * list here in the form of:
60 *
61 * #define IPW_DL_xxxx VALUE
62 *
63 * shifting value to the left one bit from the previous entry. xxxx should be
64 * the name of the classification (for example, WEP)
65 *
66 * You then need to either add a IPW2100_xxxx_DEBUG() macro definition for your
67 * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
68 * to send output to that classification.
69 *
70 * To add your debug level to the list of levels seen when you perform
71 *
72 * % cat /proc/net/ipw2100/debug_level
73 *
74 * you simply need to add your entry to the ipw2100_debug_levels array.
75 *
76 * If you do not see debug_level in /proc/net/ipw2100 then you do not have
77 * CONFIG_IPW_DEBUG defined in your kernel configuration
78 *
79 */
80
81#define IPW_DL_ERROR (1<<0)
82#define IPW_DL_WARNING (1<<1)
83#define IPW_DL_INFO (1<<2)
84#define IPW_DL_WX (1<<3)
85#define IPW_DL_HC (1<<5)
86#define IPW_DL_STATE (1<<6)
87
88#define IPW_DL_NOTIF (1<<10)
89#define IPW_DL_SCAN (1<<11)
90#define IPW_DL_ASSOC (1<<12)
91#define IPW_DL_DROP (1<<13)
92
93#define IPW_DL_IOCTL (1<<14)
94#define IPW_DL_RF_KILL (1<<17)
95
96
97#define IPW_DL_MANAGE (1<<15)
98#define IPW_DL_FW (1<<16)
99
100#define IPW_DL_FRAG (1<<21)
101#define IPW_DL_WEP (1<<22)
102#define IPW_DL_TX (1<<23)
103#define IPW_DL_RX (1<<24)
104#define IPW_DL_ISR (1<<25)
105#define IPW_DL_IO (1<<26)
106#define IPW_DL_TRACE (1<<28)
107
108#define IPW_DEBUG_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
109#define IPW_DEBUG_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
110#define IPW_DEBUG_INFO(f...) IPW_DEBUG(IPW_DL_INFO, ## f)
111#define IPW_DEBUG_WX(f...) IPW_DEBUG(IPW_DL_WX, ## f)
112#define IPW_DEBUG_SCAN(f...) IPW_DEBUG(IPW_DL_SCAN, ## f)
113#define IPW_DEBUG_NOTIF(f...) IPW_DEBUG(IPW_DL_NOTIF, ## f)
114#define IPW_DEBUG_TRACE(f...) IPW_DEBUG(IPW_DL_TRACE, ## f)
115#define IPW_DEBUG_RX(f...) IPW_DEBUG(IPW_DL_RX, ## f)
116#define IPW_DEBUG_TX(f...) IPW_DEBUG(IPW_DL_TX, ## f)
117#define IPW_DEBUG_ISR(f...) IPW_DEBUG(IPW_DL_ISR, ## f)
118#define IPW_DEBUG_MANAGEMENT(f...) IPW_DEBUG(IPW_DL_MANAGE, ## f)
119#define IPW_DEBUG_WEP(f...) IPW_DEBUG(IPW_DL_WEP, ## f)
120#define IPW_DEBUG_HC(f...) IPW_DEBUG(IPW_DL_HC, ## f)
121#define IPW_DEBUG_FRAG(f...) IPW_DEBUG(IPW_DL_FRAG, ## f)
122#define IPW_DEBUG_FW(f...) IPW_DEBUG(IPW_DL_FW, ## f)
123#define IPW_DEBUG_RF_KILL(f...) IPW_DEBUG(IPW_DL_RF_KILL, ## f)
124#define IPW_DEBUG_DROP(f...) IPW_DEBUG(IPW_DL_DROP, ## f)
125#define IPW_DEBUG_IO(f...) IPW_DEBUG(IPW_DL_IO, ## f)
126#define IPW_DEBUG_IOCTL(f...) IPW_DEBUG(IPW_DL_IOCTL, ## f)
127#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
128#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
129
130enum {
131 IPW_HW_STATE_DISABLED = 1,
132 IPW_HW_STATE_ENABLED = 0
133};
134
135struct ssid_context {
136 char ssid[IW_ESSID_MAX_SIZE + 1];
137 int ssid_len;
138 unsigned char bssid[ETH_ALEN];
139 int port_type;
140 int channel;
141
142};
143
144extern const char *port_type_str[];
145extern const char *band_str[];
146
147#define NUMBER_OF_BD_PER_COMMAND_PACKET 1
148#define NUMBER_OF_BD_PER_DATA_PACKET 2
149
150#define IPW_MAX_BDS 6
151#define NUMBER_OF_OVERHEAD_BDS_PER_PACKETR 2
152#define NUMBER_OF_BDS_TO_LEAVE_FOR_COMMANDS 1
153
154#define REQUIRED_SPACE_IN_RING_FOR_COMMAND_PACKET \
155 (IPW_BD_QUEUE_W_R_MIN_SPARE + NUMBER_OF_BD_PER_COMMAND_PACKET)
156
157struct bd_status {
158 union {
159 struct { u8 nlf:1, txType:2, intEnabled:1, reserved:4;} fields;
160 u8 field;
161 } info;
162} __attribute__ ((packed));
163
164struct ipw2100_bd {
165 u32 host_addr;
166 u32 buf_length;
167 struct bd_status status;
168 /* number of fragments for frame (should be set only for
169 * 1st TBD) */
170 u8 num_fragments;
171 u8 reserved[6];
172} __attribute__ ((packed));
173
174#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
175#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd))
176
177#define IPW_BD_STATUS_TX_FRAME_802_3 0x00
178#define IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT 0x01
179#define IPW_BD_STATUS_TX_FRAME_COMMAND 0x02
180#define IPW_BD_STATUS_TX_FRAME_802_11 0x04
181#define IPW_BD_STATUS_TX_INTERRUPT_ENABLE 0x08
182
183struct ipw2100_bd_queue {
184 /* driver (virtual) pointer to queue */
185 struct ipw2100_bd *drv;
186
187 /* firmware (physical) pointer to queue */
188 dma_addr_t nic;
189
190 /* Length of phy memory allocated for BDs */
191 u32 size;
192
193 /* Number of BDs in queue (and in array) */
194 u32 entries;
195
196 /* Number of available BDs (invalid for NIC BDs) */
197 u32 available;
198
199 /* Offset of oldest used BD in array (next one to
200 * check for completion) */
201 u32 oldest;
202
203 /* Offset of next available (unused) BD */
204 u32 next;
205};
206
207#define RX_QUEUE_LENGTH 256
208#define TX_QUEUE_LENGTH 256
209#define HW_QUEUE_LENGTH 256
210
211#define TX_PENDED_QUEUE_LENGTH (TX_QUEUE_LENGTH / NUMBER_OF_BD_PER_DATA_PACKET)
212
213#define STATUS_TYPE_MASK 0x0000000f
214#define COMMAND_STATUS_VAL 0
215#define STATUS_CHANGE_VAL 1
216#define P80211_DATA_VAL 2
217#define P8023_DATA_VAL 3
218#define HOST_NOTIFICATION_VAL 4
219
220#define IPW2100_RSSI_TO_DBM (-98)
221
222struct ipw2100_status {
223 u32 frame_size;
224 u16 status_fields;
225 u8 flags;
226#define IPW_STATUS_FLAG_DECRYPTED (1<<0)
227#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1)
228#define IPW_STATUS_FLAG_CRC_ERROR (1<<2)
229 u8 rssi;
230} __attribute__ ((packed));
231
232struct ipw2100_status_queue {
233 /* driver (virtual) pointer to queue */
234 struct ipw2100_status *drv;
235
236 /* firmware (physical) pointer to queue */
237 dma_addr_t nic;
238
239 /* Length of phy memory allocated for BDs */
240 u32 size;
241};
242
243#define HOST_COMMAND_PARAMS_REG_LEN 100
244#define CMD_STATUS_PARAMS_REG_LEN 3
245
246#define IPW_WPA_CAPABILITIES 0x1
247#define IPW_WPA_LISTENINTERVAL 0x2
248#define IPW_WPA_AP_ADDRESS 0x4
249
250#define IPW_MAX_VAR_IE_LEN ((HOST_COMMAND_PARAMS_REG_LEN - 4) * sizeof(u32))
251
252struct ipw2100_wpa_assoc_frame {
253 u16 fixed_ie_mask;
254 struct {
255 u16 capab_info;
256 u16 listen_interval;
257 u8 current_ap[ETH_ALEN];
258 } fixed_ies;
259 u32 var_ie_len;
260 u8 var_ie[IPW_MAX_VAR_IE_LEN];
261};
262
263#define IPW_BSS 1
264#define IPW_MONITOR 2
265#define IPW_IBSS 3
266
267/**
268 * @struct _tx_cmd - HWCommand
269 * @brief H/W command structure.
270 */
271struct ipw2100_cmd_header {
272 u32 host_command_reg;
273 u32 host_command_reg1;
274 u32 sequence;
275 u32 host_command_len_reg;
276 u32 host_command_params_reg[HOST_COMMAND_PARAMS_REG_LEN];
277 u32 cmd_status_reg;
278 u32 cmd_status_params_reg[CMD_STATUS_PARAMS_REG_LEN];
279 u32 rxq_base_ptr;
280 u32 rxq_next_ptr;
281 u32 rxq_host_ptr;
282 u32 txq_base_ptr;
283 u32 txq_next_ptr;
284 u32 txq_host_ptr;
285 u32 tx_status_reg;
286 u32 reserved;
287 u32 status_change_reg;
288 u32 reserved1[3];
289 u32 *ordinal1_ptr;
290 u32 *ordinal2_ptr;
291} __attribute__ ((packed));
292
293struct ipw2100_data_header {
294 u32 host_command_reg;
295 u32 host_command_reg1;
296 u8 encrypted; // BOOLEAN in win! TRUE if frame is enc by driver
297 u8 needs_encryption; // BOOLEAN in win! TRUE if frma need to be enc in NIC
298 u8 wep_index; // 0 no key, 1-4 key index, 0xff immediate key
299 u8 key_size; // 0 no imm key, 0x5 64bit encr, 0xd 128bit encr, 0x10 128bit encr and 128bit IV
300 u8 key[16];
301 u8 reserved[10]; // f/w reserved
302 u8 src_addr[ETH_ALEN];
303 u8 dst_addr[ETH_ALEN];
304 u16 fragment_size;
305} __attribute__ ((packed));
306
307/* Host command data structure */
308struct host_command {
309 u32 host_command; // COMMAND ID
310 u32 host_command1; // COMMAND ID
311 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID)
312 u32 host_command_length; // LENGTH
313 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS
314} __attribute__ ((packed));
315
316
317typedef enum {
318 POWER_ON_RESET,
319 EXIT_POWER_DOWN_RESET,
320 SW_RESET,
321 EEPROM_RW,
322 SW_RE_INIT
323} ipw2100_reset_event;
324
325enum {
326 COMMAND = 0xCAFE,
327 DATA,
328 RX
329};
330
331
332struct ipw2100_tx_packet {
333 int type;
334 int index;
335 union {
336 struct { /* COMMAND */
337 struct ipw2100_cmd_header* cmd;
338 dma_addr_t cmd_phys;
339 } c_struct;
340 struct { /* DATA */
341 struct ipw2100_data_header* data;
342 dma_addr_t data_phys;
343 struct ieee80211_txb *txb;
344 } d_struct;
345 } info;
346 int jiffy_start;
347
348 struct list_head list;
349};
350
351
352struct ipw2100_rx_packet {
353 struct ipw2100_rx *rxp;
354 dma_addr_t dma_addr;
355 int jiffy_start;
356 struct sk_buff *skb;
357 struct list_head list;
358};
359
360#define FRAG_DISABLED (1<<31)
361#define RTS_DISABLED (1<<31)
362#define MAX_RTS_THRESHOLD 2304U
363#define MIN_RTS_THRESHOLD 1U
364#define DEFAULT_RTS_THRESHOLD 1000U
365
366#define DEFAULT_BEACON_INTERVAL 100U
367#define DEFAULT_SHORT_RETRY_LIMIT 7U
368#define DEFAULT_LONG_RETRY_LIMIT 4U
369
370struct ipw2100_ordinals {
371 u32 table1_addr;
372 u32 table2_addr;
373 u32 table1_size;
374 u32 table2_size;
375};
376
377/* Host Notification header */
378struct ipw2100_notification {
379 u32 hnhdr_subtype; /* type of host notification */
380 u32 hnhdr_size; /* size in bytes of data
381 or number of entries, if table.
382 Does NOT include header */
383} __attribute__ ((packed));
384
385#define MAX_KEY_SIZE 16
386#define MAX_KEYS 8
387
388#define IPW2100_WEP_ENABLE (1<<1)
389#define IPW2100_WEP_DROP_CLEAR (1<<2)
390
391#define IPW_NONE_CIPHER (1<<0)
392#define IPW_WEP40_CIPHER (1<<1)
393#define IPW_TKIP_CIPHER (1<<2)
394#define IPW_CCMP_CIPHER (1<<4)
395#define IPW_WEP104_CIPHER (1<<5)
396#define IPW_CKIP_CIPHER (1<<6)
397
398#define IPW_AUTH_OPEN 0
399#define IPW_AUTH_SHARED 1
400
401struct statistic {
402 int value;
403 int hi;
404 int lo;
405};
406
407#define INIT_STAT(x) do { \
408 (x)->value = (x)->hi = 0; \
409 (x)->lo = 0x7fffffff; \
410} while (0)
411#define SET_STAT(x,y) do { \
412 (x)->value = y; \
413 if ((x)->value > (x)->hi) (x)->hi = (x)->value; \
414 if ((x)->value < (x)->lo) (x)->lo = (x)->value; \
415} while (0)
416#define INC_STAT(x) do { if (++(x)->value > (x)->hi) (x)->hi = (x)->value; } \
417while (0)
418#define DEC_STAT(x) do { if (--(x)->value < (x)->lo) (x)->lo = (x)->value; } \
419while (0)
420
421#define IPW2100_ERROR_QUEUE 5
422
423/* Power management code: enable or disable? */
424enum {
425#ifdef CONFIG_PM
426 IPW2100_PM_DISABLED = 0,
427 PM_STATE_SIZE = 16,
428#else
429 IPW2100_PM_DISABLED = 1,
430 PM_STATE_SIZE = 0,
431#endif
432};
433
434#define STATUS_POWERED (1<<0)
435#define STATUS_CMD_ACTIVE (1<<1) /**< host command in progress */
436#define STATUS_RUNNING (1<<2) /* Card initialized, but not enabled */
437#define STATUS_ENABLED (1<<3) /* Card enabled -- can scan,Tx,Rx */
438#define STATUS_STOPPING (1<<4) /* Card is in shutdown phase */
439#define STATUS_INITIALIZED (1<<5) /* Card is ready for external calls */
440#define STATUS_ASSOCIATING (1<<9) /* Associated, but no BSSID yet */
441#define STATUS_ASSOCIATED (1<<10) /* Associated and BSSID valid */
442#define STATUS_INT_ENABLED (1<<11)
443#define STATUS_RF_KILL_HW (1<<12)
444#define STATUS_RF_KILL_SW (1<<13)
445#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
446#define STATUS_EXIT_PENDING (1<<14)
447
448#define STATUS_SCAN_PENDING (1<<23)
449#define STATUS_SCANNING (1<<24)
450#define STATUS_SCAN_ABORTING (1<<25)
451#define STATUS_SCAN_COMPLETE (1<<26)
452#define STATUS_WX_EVENT_PENDING (1<<27)
453#define STATUS_RESET_PENDING (1<<29)
454#define STATUS_SECURITY_UPDATED (1<<30) /* Security sync needed */
455
456
457
458/* Internal NIC states */
459#define IPW_STATE_INITIALIZED (1<<0)
460#define IPW_STATE_COUNTRY_FOUND (1<<1)
461#define IPW_STATE_ASSOCIATED (1<<2)
462#define IPW_STATE_ASSN_LOST (1<<3)
463#define IPW_STATE_ASSN_CHANGED (1<<4)
464#define IPW_STATE_SCAN_COMPLETE (1<<5)
465#define IPW_STATE_ENTERED_PSP (1<<6)
466#define IPW_STATE_LEFT_PSP (1<<7)
467#define IPW_STATE_RF_KILL (1<<8)
468#define IPW_STATE_DISABLED (1<<9)
469#define IPW_STATE_POWER_DOWN (1<<10)
470#define IPW_STATE_SCANNING (1<<11)
471
472
473
474#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */
475#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */
476#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */
477#define CFG_CUSTOM_MAC (1<<3)
478#define CFG_LONG_PREAMBLE (1<<4)
479#define CFG_ASSOCIATE (1<<6)
480#define CFG_FIXED_RATE (1<<7)
481#define CFG_ADHOC_CREATE (1<<8)
482#define CFG_C3_DISABLED (1<<9)
483#define CFG_PASSIVE_SCAN (1<<10)
484
485#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */
486#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
487
488struct ipw2100_priv {
489
490 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
491 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
492
493 struct ieee80211_device *ieee;
494 unsigned long status;
495 unsigned long config;
496 unsigned long capability;
497
498 /* Statistics */
499 int resets;
500 int reset_backoff;
501
502 /* Context */
503 u8 essid[IW_ESSID_MAX_SIZE];
504 u8 essid_len;
505 u8 bssid[ETH_ALEN];
506 u8 channel;
507 int last_mode;
508 int cstate_limit;
509
510 unsigned long connect_start;
511 unsigned long last_reset;
512
513 u32 channel_mask;
514 u32 fatal_error;
515 u32 fatal_errors[IPW2100_ERROR_QUEUE];
516 u32 fatal_index;
517 int eeprom_version;
518 int firmware_version;
519 unsigned long hw_features;
520 int hangs;
521 u32 last_rtc;
522 int dump_raw; /* 1 to dump raw bytes in /sys/.../memory */
523 u8* snapshot[0x30];
524
525 u8 mandatory_bssid_mac[ETH_ALEN];
526 u8 mac_addr[ETH_ALEN];
527
528 int power_mode;
529
530 /* WEP data */
531 struct ieee80211_security sec;
532 int messages_sent;
533
534
535 int short_retry_limit;
536 int long_retry_limit;
537
538 u32 rts_threshold;
539 u32 frag_threshold;
540
541 int in_isr;
542
543 u32 tx_rates;
544 int tx_power;
545 u32 beacon_interval;
546
547 char nick[IW_ESSID_MAX_SIZE + 1];
548
549 struct ipw2100_status_queue status_queue;
550
551 struct statistic txq_stat;
552 struct statistic rxq_stat;
553 struct ipw2100_bd_queue rx_queue;
554 struct ipw2100_bd_queue tx_queue;
555 struct ipw2100_rx_packet *rx_buffers;
556
557 struct statistic fw_pend_stat;
558 struct list_head fw_pend_list;
559
560 struct statistic msg_free_stat;
561 struct statistic msg_pend_stat;
562 struct list_head msg_free_list;
563 struct list_head msg_pend_list;
564 struct ipw2100_tx_packet *msg_buffers;
565
566 struct statistic tx_free_stat;
567 struct statistic tx_pend_stat;
568 struct list_head tx_free_list;
569 struct list_head tx_pend_list;
570 struct ipw2100_tx_packet *tx_buffers;
571
572 struct ipw2100_ordinals ordinals;
573
574 struct pci_dev *pci_dev;
575
576 struct proc_dir_entry *dir_dev;
577
578 struct net_device *net_dev;
579 struct iw_statistics wstats;
580
581 struct tasklet_struct irq_tasklet;
582
583 struct workqueue_struct *workqueue;
584 struct work_struct reset_work;
585 struct work_struct security_work;
586 struct work_struct wx_event_work;
587 struct work_struct hang_check;
588 struct work_struct rf_kill;
589
590 u32 interrupts;
591 int tx_interrupts;
592 int rx_interrupts;
593 int inta_other;
594
595 spinlock_t low_lock;
596 struct semaphore action_sem;
597 struct semaphore adapter_sem;
598
599 wait_queue_head_t wait_command_queue;
600};
601
602
603/*********************************************************
604 * Host Command -> From Driver to FW
605 *********************************************************/
606
607/**
608 * Host command identifiers
609 */
610#define HOST_COMPLETE 2
611#define SYSTEM_CONFIG 6
612#define SSID 8
613#define MANDATORY_BSSID 9
614#define AUTHENTICATION_TYPE 10
615#define ADAPTER_ADDRESS 11
616#define PORT_TYPE 12
617#define INTERNATIONAL_MODE 13
618#define CHANNEL 14
619#define RTS_THRESHOLD 15
620#define FRAG_THRESHOLD 16
621#define POWER_MODE 17
622#define TX_RATES 18
623#define BASIC_TX_RATES 19
624#define WEP_KEY_INFO 20
625#define WEP_KEY_INDEX 25
626#define WEP_FLAGS 26
627#define ADD_MULTICAST 27
628#define CLEAR_ALL_MULTICAST 28
629#define BEACON_INTERVAL 29
630#define ATIM_WINDOW 30
631#define CLEAR_STATISTICS 31
632#define SEND 33
633#define TX_POWER_INDEX 36
634#define BROADCAST_SCAN 43
635#define CARD_DISABLE 44
636#define PREFERRED_BSSID 45
637#define SET_SCAN_OPTIONS 46
638#define SCAN_DWELL_TIME 47
639#define SWEEP_TABLE 48
640#define AP_OR_STATION_TABLE 49
641#define GROUP_ORDINALS 50
642#define SHORT_RETRY_LIMIT 51
643#define LONG_RETRY_LIMIT 52
644
645#define HOST_PRE_POWER_DOWN 58
646#define CARD_DISABLE_PHY_OFF 61
647#define MSDU_TX_RATES 62
648
649
650/* Rogue AP Detection */
651#define SET_STATION_STAT_BITS 64
652#define CLEAR_STATIONS_STAT_BITS 65
653#define LEAP_ROGUE_MODE 66 //TODO tbw replaced by CFG_LEAP_ROGUE_AP
654#define SET_SECURITY_INFORMATION 67
655#define DISASSOCIATION_BSSID 68
656#define SET_WPA_IE 69
657
658
659
660/* system configuration bit mask: */
661#define IPW_CFG_MONITOR 0x00004
662#define IPW_CFG_PREAMBLE_AUTO 0x00010
663#define IPW_CFG_IBSS_AUTO_START 0x00020
664#define IPW_CFG_LOOPBACK 0x00100
665#define IPW_CFG_ANSWER_BCSSID_PROBE 0x00800
666#define IPW_CFG_BT_SIDEBAND_SIGNAL 0x02000
667#define IPW_CFG_802_1x_ENABLE 0x04000
668#define IPW_CFG_BSS_MASK 0x08000
669#define IPW_CFG_IBSS_MASK 0x10000
670
671#define IPW_SCAN_NOASSOCIATE (1<<0)
672#define IPW_SCAN_MIXED_CELL (1<<1)
673/* RESERVED (1<<2) */
674#define IPW_SCAN_PASSIVE (1<<3)
675
676#define IPW_NIC_FATAL_ERROR 0x2A7F0
677#define IPW_ERROR_ADDR(x) (x & 0x3FFFF)
678#define IPW_ERROR_CODE(x) ((x & 0xFF000000) >> 24)
679#define IPW2100_ERR_C3_CORRUPTION (0x10 << 24)
680#define IPW2100_ERR_MSG_TIMEOUT (0x11 << 24)
681#define IPW2100_ERR_FW_LOAD (0x12 << 24)
682
683#define IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND 0x200
684#define IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x0D80
685
686#define IPW_MEM_HOST_SHARED_RX_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x40)
687#define IPW_MEM_HOST_SHARED_RX_STATUS_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x44)
688#define IPW_MEM_HOST_SHARED_RX_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x48)
689#define IPW_MEM_HOST_SHARED_RX_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0xa0)
690
691#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x00)
692#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x04)
693#define IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x80)
694
695#define IPW_MEM_HOST_SHARED_RX_WRITE_INDEX \
696 (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND + 0x20)
697
698#define IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX \
699 (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND)
700
701#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x180)
702#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x184)
703
704#define IPW2100_INTA_TX_TRANSFER (0x00000001) // Bit 0 (LSB)
705#define IPW2100_INTA_RX_TRANSFER (0x00000002) // Bit 1
706#define IPW2100_INTA_TX_COMPLETE (0x00000004) // Bit 2
707#define IPW2100_INTA_EVENT_INTERRUPT (0x00000008) // Bit 3
708#define IPW2100_INTA_STATUS_CHANGE (0x00000010) // Bit 4
709#define IPW2100_INTA_BEACON_PERIOD_EXPIRED (0x00000020) // Bit 5
710#define IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE (0x00010000) // Bit 16
711#define IPW2100_INTA_FW_INIT_DONE (0x01000000) // Bit 24
712#define IPW2100_INTA_FW_CALIBRATION_CALC (0x02000000) // Bit 25
713#define IPW2100_INTA_FATAL_ERROR (0x40000000) // Bit 30
714#define IPW2100_INTA_PARITY_ERROR (0x80000000) // Bit 31 (MSB)
715
716#define IPW_AUX_HOST_RESET_REG_PRINCETON_RESET (0x00000001)
717#define IPW_AUX_HOST_RESET_REG_FORCE_NMI (0x00000002)
718#define IPW_AUX_HOST_RESET_REG_PCI_HOST_CLUSTER_FATAL_NMI (0x00000004)
719#define IPW_AUX_HOST_RESET_REG_CORE_FATAL_NMI (0x00000008)
720#define IPW_AUX_HOST_RESET_REG_SW_RESET (0x00000080)
721#define IPW_AUX_HOST_RESET_REG_MASTER_DISABLED (0x00000100)
722#define IPW_AUX_HOST_RESET_REG_STOP_MASTER (0x00000200)
723
724#define IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY (0x00000001) // Bit 0 (LSB)
725#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY (0x00000002) // Bit 1
726#define IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE (0x00000004) // Bit 2
727#define IPW_AUX_HOST_GP_CNTRL_BITS_SYS_CONFIG (0x000007c0) // Bits 6-10
728#define IPW_AUX_HOST_GP_CNTRL_BIT_BUS_TYPE (0x00000200) // Bit 9
729#define IPW_AUX_HOST_GP_CNTRL_BIT_BAR0_BLOCK_SIZE (0x00000400) // Bit 10
730#define IPW_AUX_HOST_GP_CNTRL_BIT_USB_MODE (0x20000000) // Bit 29
731#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_FORCES_SYS_CLK (0x40000000) // Bit 30
732#define IPW_AUX_HOST_GP_CNTRL_BIT_FW_FORCES_SYS_CLK (0x80000000) // Bit 31 (MSB)
733
734#define IPW_BIT_GPIO_GPIO1_MASK 0x0000000C
735#define IPW_BIT_GPIO_GPIO3_MASK 0x000000C0
736#define IPW_BIT_GPIO_GPIO1_ENABLE 0x00000008
737#define IPW_BIT_GPIO_RF_KILL 0x00010000
738
739#define IPW_BIT_GPIO_LED_OFF 0x00002000 // Bit 13 = 1
740
741#define IPW_REG_DOMAIN_0_OFFSET 0x0000
742#define IPW_REG_DOMAIN_1_OFFSET IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND
743
744#define IPW_REG_INTA IPW_REG_DOMAIN_0_OFFSET + 0x0008
745#define IPW_REG_INTA_MASK IPW_REG_DOMAIN_0_OFFSET + 0x000C
746#define IPW_REG_INDIRECT_ACCESS_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0010
747#define IPW_REG_INDIRECT_ACCESS_DATA IPW_REG_DOMAIN_0_OFFSET + 0x0014
748#define IPW_REG_AUTOINCREMENT_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0018
749#define IPW_REG_AUTOINCREMENT_DATA IPW_REG_DOMAIN_0_OFFSET + 0x001C
750#define IPW_REG_RESET_REG IPW_REG_DOMAIN_0_OFFSET + 0x0020
751#define IPW_REG_GP_CNTRL IPW_REG_DOMAIN_0_OFFSET + 0x0024
752#define IPW_REG_GPIO IPW_REG_DOMAIN_0_OFFSET + 0x0030
753#define IPW_REG_FW_TYPE IPW_REG_DOMAIN_1_OFFSET + 0x0188
754#define IPW_REG_FW_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x018C
755#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
756
757#define IPW_REG_INDIRECT_ADDR_MASK 0x00FFFFFC
758
759#define IPW_INTERRUPT_MASK 0xC1010013
760
761#define IPW2100_CONTROL_REG 0x220000
762#define IPW2100_CONTROL_PHY_OFF 0x8
763
764#define IPW2100_COMMAND 0x00300004
765#define IPW2100_COMMAND_PHY_ON 0x0
766#define IPW2100_COMMAND_PHY_OFF 0x1
767
768/* in DEBUG_AREA, values of memory always 0xd55555d5 */
769#define IPW_REG_DOA_DEBUG_AREA_START IPW_REG_DOMAIN_0_OFFSET + 0x0090
770#define IPW_REG_DOA_DEBUG_AREA_END IPW_REG_DOMAIN_0_OFFSET + 0x00FF
771#define IPW_DATA_DOA_DEBUG_VALUE 0xd55555d5
772
773#define IPW_INTERNAL_REGISTER_HALT_AND_RESET 0x003000e0
774
775#define IPW_WAIT_CLOCK_STABILIZATION_DELAY 50 // micro seconds
776#define IPW_WAIT_RESET_ARC_COMPLETE_DELAY 10 // micro seconds
777#define IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY 10 // micro seconds
778
779// BD ring queue read/write difference
780#define IPW_BD_QUEUE_W_R_MIN_SPARE 2
781
782#define IPW_CACHE_LINE_LENGTH_DEFAULT 0x80
783
784#define IPW_CARD_DISABLE_PHY_OFF_COMPLETE_WAIT 100 // 100 milli
785#define IPW_PREPARE_POWER_DOWN_COMPLETE_WAIT 100 // 100 milli
786
787
788
789
790#define IPW_HEADER_802_11_SIZE sizeof(struct ieee80211_hdr_3addr)
791#define IPW_MAX_80211_PAYLOAD_SIZE 2304U
792#define IPW_MAX_802_11_PAYLOAD_LENGTH 2312
793#define IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH 1536
794#define IPW_MIN_ACCEPTABLE_RX_FRAME_LENGTH 60
795#define IPW_MAX_ACCEPTABLE_RX_FRAME_LENGTH \
796 (IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH + IPW_HEADER_802_11_SIZE - \
797 sizeof(struct ethhdr))
798
799#define IPW_802_11_FCS_LENGTH 4
800#define IPW_RX_NIC_BUFFER_LENGTH \
801 (IPW_MAX_802_11_PAYLOAD_LENGTH + IPW_HEADER_802_11_SIZE + \
802 IPW_802_11_FCS_LENGTH)
803
804#define IPW_802_11_PAYLOAD_OFFSET \
805 (sizeof(struct ieee80211_hdr_3addr) + \
806 sizeof(struct ieee80211_snap_hdr))
807
808struct ipw2100_rx {
809 union {
810 unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
811 struct ieee80211_hdr header;
812 u32 status;
813 struct ipw2100_notification notification;
814 struct ipw2100_cmd_header command;
815 } rx_data;
816} __attribute__ ((packed));
817
818/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */
819#define TX_RATE_1_MBIT 0x0001
820#define TX_RATE_2_MBIT 0x0002
821#define TX_RATE_5_5_MBIT 0x0004
822#define TX_RATE_11_MBIT 0x0008
823#define TX_RATE_MASK 0x000F
824#define DEFAULT_TX_RATES 0x000F
825
826#define IPW_POWER_MODE_CAM 0x00 //(always on)
827#define IPW_POWER_INDEX_1 0x01
828#define IPW_POWER_INDEX_2 0x02
829#define IPW_POWER_INDEX_3 0x03
830#define IPW_POWER_INDEX_4 0x04
831#define IPW_POWER_INDEX_5 0x05
832#define IPW_POWER_AUTO 0x06
833#define IPW_POWER_MASK 0x0F
834#define IPW_POWER_ENABLED 0x10
835#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK)
836
837#define IPW_TX_POWER_AUTO 0
838#define IPW_TX_POWER_ENHANCED 1
839
840#define IPW_TX_POWER_DEFAULT 32
841#define IPW_TX_POWER_MIN 0
842#define IPW_TX_POWER_MAX 16
843#define IPW_TX_POWER_MIN_DBM (-12)
844#define IPW_TX_POWER_MAX_DBM 16
845
846#define FW_SCAN_DONOT_ASSOCIATE 0x0001 // Dont Attempt to Associate after Scan
847#define FW_SCAN_PASSIVE 0x0008 // Force PASSSIVE Scan
848
849#define REG_MIN_CHANNEL 0
850#define REG_MAX_CHANNEL 14
851
852#define REG_CHANNEL_MASK 0x00003FFF
853#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff
854
855#define DIVERSITY_EITHER 0 // Use both antennas
856#define DIVERSITY_ANTENNA_A 1 // Use antenna A
857#define DIVERSITY_ANTENNA_B 2 // Use antenna B
858
859
860#define HOST_COMMAND_WAIT 0
861#define HOST_COMMAND_NO_WAIT 1
862
863#define LOCK_NONE 0
864#define LOCK_DRIVER 1
865#define LOCK_FW 2
866
867#define TYPE_SWEEP_ORD 0x000D
868#define TYPE_IBSS_STTN_ORD 0x000E
869#define TYPE_BSS_AP_ORD 0x000F
870#define TYPE_RAW_BEACON_ENTRY 0x0010
871#define TYPE_CALIBRATION_DATA 0x0011
872#define TYPE_ROGUE_AP_DATA 0x0012
873#define TYPE_ASSOCIATION_REQUEST 0x0013
874#define TYPE_REASSOCIATION_REQUEST 0x0014
875
876
877#define HW_FEATURE_RFKILL (0x0001)
878#define RF_KILLSWITCH_OFF (1)
879#define RF_KILLSWITCH_ON (0)
880
881#define IPW_COMMAND_POOL_SIZE 40
882
883#define IPW_START_ORD_TAB_1 1
884#define IPW_START_ORD_TAB_2 1000
885
886#define IPW_ORD_TAB_1_ENTRY_SIZE sizeof(u32)
887
888#define IS_ORDINAL_TABLE_ONE(mgr,id) \
889 ((id >= IPW_START_ORD_TAB_1) && (id < mgr->table1_size))
890#define IS_ORDINAL_TABLE_TWO(mgr,id) \
891 ((id >= IPW_START_ORD_TAB_2) && (id < (mgr->table2_size + IPW_START_ORD_TAB_2)))
892
893#define BSS_ID_LENGTH 6
894
895// Fixed size data: Ordinal Table 1
896typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW
897// Transmit statistics
898 IPW_ORD_STAT_TX_HOST_REQUESTS = 1,// # of requested Host Tx's (MSDU)
899 IPW_ORD_STAT_TX_HOST_COMPLETE, // # of successful Host Tx's (MSDU)
900 IPW_ORD_STAT_TX_DIR_DATA, // # of successful Directed Tx's (MSDU)
901
902 IPW_ORD_STAT_TX_DIR_DATA1 = 4, // # of successful Directed Tx's (MSDU) @ 1MB
903 IPW_ORD_STAT_TX_DIR_DATA2, // # of successful Directed Tx's (MSDU) @ 2MB
904 IPW_ORD_STAT_TX_DIR_DATA5_5, // # of successful Directed Tx's (MSDU) @ 5_5MB
905 IPW_ORD_STAT_TX_DIR_DATA11, // # of successful Directed Tx's (MSDU) @ 11MB
906 IPW_ORD_STAT_TX_DIR_DATA22, // # of successful Directed Tx's (MSDU) @ 22MB
907
908 IPW_ORD_STAT_TX_NODIR_DATA1 = 13,// # of successful Non_Directed Tx's (MSDU) @ 1MB
909 IPW_ORD_STAT_TX_NODIR_DATA2, // # of successful Non_Directed Tx's (MSDU) @ 2MB
910 IPW_ORD_STAT_TX_NODIR_DATA5_5, // # of successful Non_Directed Tx's (MSDU) @ 5.5MB
911 IPW_ORD_STAT_TX_NODIR_DATA11, // # of successful Non_Directed Tx's (MSDU) @ 11MB
912
913 IPW_ORD_STAT_NULL_DATA = 21, // # of successful NULL data Tx's
914 IPW_ORD_STAT_TX_RTS, // # of successful Tx RTS
915 IPW_ORD_STAT_TX_CTS, // # of successful Tx CTS
916 IPW_ORD_STAT_TX_ACK, // # of successful Tx ACK
917 IPW_ORD_STAT_TX_ASSN, // # of successful Association Tx's
918 IPW_ORD_STAT_TX_ASSN_RESP, // # of successful Association response Tx's
919 IPW_ORD_STAT_TX_REASSN, // # of successful Reassociation Tx's
920 IPW_ORD_STAT_TX_REASSN_RESP, // # of successful Reassociation response Tx's
921 IPW_ORD_STAT_TX_PROBE, // # of probes successfully transmitted
922 IPW_ORD_STAT_TX_PROBE_RESP, // # of probe responses successfully transmitted
923 IPW_ORD_STAT_TX_BEACON, // # of tx beacon
924 IPW_ORD_STAT_TX_ATIM, // # of Tx ATIM
925 IPW_ORD_STAT_TX_DISASSN, // # of successful Disassociation TX
926 IPW_ORD_STAT_TX_AUTH, // # of successful Authentication Tx
927 IPW_ORD_STAT_TX_DEAUTH, // # of successful Deauthentication TX
928
929 IPW_ORD_STAT_TX_TOTAL_BYTES = 41,// Total successful Tx data bytes
930 IPW_ORD_STAT_TX_RETRIES, // # of Tx retries
931 IPW_ORD_STAT_TX_RETRY1, // # of Tx retries at 1MBPS
932 IPW_ORD_STAT_TX_RETRY2, // # of Tx retries at 2MBPS
933 IPW_ORD_STAT_TX_RETRY5_5, // # of Tx retries at 5.5MBPS
934 IPW_ORD_STAT_TX_RETRY11, // # of Tx retries at 11MBPS
935
936 IPW_ORD_STAT_TX_FAILURES = 51, // # of Tx Failures
937 IPW_ORD_STAT_TX_ABORT_AT_HOP, //NS // # of Tx's aborted at hop time
938 IPW_ORD_STAT_TX_MAX_TRIES_IN_HOP,// # of times max tries in a hop failed
939 IPW_ORD_STAT_TX_ABORT_LATE_DMA, //NS // # of times tx aborted due to late dma setup
940 IPW_ORD_STAT_TX_ABORT_STX, //NS // # of times backoff aborted
941 IPW_ORD_STAT_TX_DISASSN_FAIL, // # of times disassociation failed
942 IPW_ORD_STAT_TX_ERR_CTS, // # of missed/bad CTS frames
943 IPW_ORD_STAT_TX_BPDU, //NS // # of spanning tree BPDUs sent
944 IPW_ORD_STAT_TX_ERR_ACK, // # of tx err due to acks
945
946 // Receive statistics
947 IPW_ORD_STAT_RX_HOST = 61, // # of packets passed to host
948 IPW_ORD_STAT_RX_DIR_DATA, // # of directed packets
949 IPW_ORD_STAT_RX_DIR_DATA1, // # of directed packets at 1MB
950 IPW_ORD_STAT_RX_DIR_DATA2, // # of directed packets at 2MB
951 IPW_ORD_STAT_RX_DIR_DATA5_5, // # of directed packets at 5.5MB
952 IPW_ORD_STAT_RX_DIR_DATA11, // # of directed packets at 11MB
953 IPW_ORD_STAT_RX_DIR_DATA22, // # of directed packets at 22MB
954
955 IPW_ORD_STAT_RX_NODIR_DATA = 71,// # of nondirected packets
956 IPW_ORD_STAT_RX_NODIR_DATA1, // # of nondirected packets at 1MB
957 IPW_ORD_STAT_RX_NODIR_DATA2, // # of nondirected packets at 2MB
958 IPW_ORD_STAT_RX_NODIR_DATA5_5, // # of nondirected packets at 5.5MB
959 IPW_ORD_STAT_RX_NODIR_DATA11, // # of nondirected packets at 11MB
960
961 IPW_ORD_STAT_RX_NULL_DATA = 80, // # of null data rx's
962 IPW_ORD_STAT_RX_POLL, //NS // # of poll rx
963 IPW_ORD_STAT_RX_RTS, // # of Rx RTS
964 IPW_ORD_STAT_RX_CTS, // # of Rx CTS
965 IPW_ORD_STAT_RX_ACK, // # of Rx ACK
966 IPW_ORD_STAT_RX_CFEND, // # of Rx CF End
967 IPW_ORD_STAT_RX_CFEND_ACK, // # of Rx CF End + CF Ack
968 IPW_ORD_STAT_RX_ASSN, // # of Association Rx's
969 IPW_ORD_STAT_RX_ASSN_RESP, // # of Association response Rx's
970 IPW_ORD_STAT_RX_REASSN, // # of Reassociation Rx's
971 IPW_ORD_STAT_RX_REASSN_RESP, // # of Reassociation response Rx's
972 IPW_ORD_STAT_RX_PROBE, // # of probe Rx's
973 IPW_ORD_STAT_RX_PROBE_RESP, // # of probe response Rx's
974 IPW_ORD_STAT_RX_BEACON, // # of Rx beacon
975 IPW_ORD_STAT_RX_ATIM, // # of Rx ATIM
976 IPW_ORD_STAT_RX_DISASSN, // # of disassociation Rx
977 IPW_ORD_STAT_RX_AUTH, // # of authentication Rx
978 IPW_ORD_STAT_RX_DEAUTH, // # of deauthentication Rx
979
980 IPW_ORD_STAT_RX_TOTAL_BYTES = 101,// Total rx data bytes received
981 IPW_ORD_STAT_RX_ERR_CRC, // # of packets with Rx CRC error
982 IPW_ORD_STAT_RX_ERR_CRC1, // # of Rx CRC errors at 1MB
983 IPW_ORD_STAT_RX_ERR_CRC2, // # of Rx CRC errors at 2MB
984 IPW_ORD_STAT_RX_ERR_CRC5_5, // # of Rx CRC errors at 5.5MB
985 IPW_ORD_STAT_RX_ERR_CRC11, // # of Rx CRC errors at 11MB
986
987 IPW_ORD_STAT_RX_DUPLICATE1 = 112, // # of duplicate rx packets at 1MB
988 IPW_ORD_STAT_RX_DUPLICATE2, // # of duplicate rx packets at 2MB
989 IPW_ORD_STAT_RX_DUPLICATE5_5, // # of duplicate rx packets at 5.5MB
990 IPW_ORD_STAT_RX_DUPLICATE11, // # of duplicate rx packets at 11MB
991 IPW_ORD_STAT_RX_DUPLICATE = 119, // # of duplicate rx packets
992
993 IPW_ORD_PERS_DB_LOCK = 120, // # locking fw permanent db
994 IPW_ORD_PERS_DB_SIZE, // # size of fw permanent db
995 IPW_ORD_PERS_DB_ADDR, // # address of fw permanent db
996 IPW_ORD_STAT_RX_INVALID_PROTOCOL, // # of rx frames with invalid protocol
997 IPW_ORD_SYS_BOOT_TIME, // # Boot time
998 IPW_ORD_STAT_RX_NO_BUFFER, // # of rx frames rejected due to no buffer
999 IPW_ORD_STAT_RX_ABORT_LATE_DMA, //NS // # of rx frames rejected due to dma setup too late
1000 IPW_ORD_STAT_RX_ABORT_AT_HOP, //NS // # of rx frames aborted due to hop
1001 IPW_ORD_STAT_RX_MISSING_FRAG, // # of rx frames dropped due to missing fragment
1002 IPW_ORD_STAT_RX_ORPHAN_FRAG, // # of rx frames dropped due to non-sequential fragment
1003 IPW_ORD_STAT_RX_ORPHAN_FRAME, // # of rx frames dropped due to unmatched 1st frame
1004 IPW_ORD_STAT_RX_FRAG_AGEOUT, // # of rx frames dropped due to uncompleted frame
1005 IPW_ORD_STAT_RX_BAD_SSID, //NS // Bad SSID (unused)
1006 IPW_ORD_STAT_RX_ICV_ERRORS, // # of ICV errors during decryption
1007
1008// PSP Statistics
1009 IPW_ORD_STAT_PSP_SUSPENSION = 137,// # of times adapter suspended
1010 IPW_ORD_STAT_PSP_BCN_TIMEOUT, // # of beacon timeout
1011 IPW_ORD_STAT_PSP_POLL_TIMEOUT, // # of poll response timeouts
1012 IPW_ORD_STAT_PSP_NONDIR_TIMEOUT,// # of timeouts waiting for last broadcast/muticast pkt
1013 IPW_ORD_STAT_PSP_RX_DTIMS, // # of PSP DTIMs received
1014 IPW_ORD_STAT_PSP_RX_TIMS, // # of PSP TIMs received
1015 IPW_ORD_STAT_PSP_STATION_ID, // PSP Station ID
1016
1017// Association and roaming
1018 IPW_ORD_LAST_ASSN_TIME = 147, // RTC time of last association
1019 IPW_ORD_STAT_PERCENT_MISSED_BCNS,// current calculation of % missed beacons
1020 IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries
1021 IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated
1022 // AP table entry. set to 0 if not associated
1023 IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table
1024 IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs
1025 IPW_ORD_STAT_AP_ASSNS, // # of associations
1026 IPW_ORD_STAT_ASSN_FAIL, // # of association failures
1027 IPW_ORD_STAT_ASSN_RESP_FAIL, // # of failuresdue to response fail
1028 IPW_ORD_STAT_FULL_SCANS, // # of full scans
1029
1030 IPW_ORD_CARD_DISABLED, // # Card Disabled
1031 IPW_ORD_STAT_ROAM_INHIBIT, // # of times roaming was inhibited due to ongoing activity
1032 IPW_FILLER_40,
1033 IPW_ORD_RSSI_AT_ASSN = 160, // RSSI of associated AP at time of association
1034 IPW_ORD_STAT_ASSN_CAUSE1, // # of reassociations due to no tx from AP in last N
1035 // hops or no prob_ responses in last 3 minutes
1036 IPW_ORD_STAT_ASSN_CAUSE2, // # of reassociations due to poor tx/rx quality
1037 IPW_ORD_STAT_ASSN_CAUSE3, // # of reassociations due to tx/rx quality with excessive
1038 // load at the AP
1039 IPW_ORD_STAT_ASSN_CAUSE4, // # of reassociations due to AP RSSI level fell below
1040 // eligible group
1041 IPW_ORD_STAT_ASSN_CAUSE5, // # of reassociations due to load leveling
1042 IPW_ORD_STAT_ASSN_CAUSE6, //NS // # of reassociations due to dropped by Ap
1043 IPW_FILLER_41,
1044 IPW_FILLER_42,
1045 IPW_FILLER_43,
1046 IPW_ORD_STAT_AUTH_FAIL, // # of times authentication failed
1047 IPW_ORD_STAT_AUTH_RESP_FAIL, // # of times authentication response failed
1048 IPW_ORD_STATION_TABLE_CNT, // # of entries in association table
1049
1050// Other statistics
1051 IPW_ORD_RSSI_AVG_CURR = 173, // Current avg RSSI
1052 IPW_ORD_STEST_RESULTS_CURR, //NS // Current self test results word
1053 IPW_ORD_STEST_RESULTS_CUM, //NS // Cummulative self test results word
1054 IPW_ORD_SELF_TEST_STATUS, //NS //
1055 IPW_ORD_POWER_MGMT_MODE, // Power mode - 0=CAM, 1=PSP
1056 IPW_ORD_POWER_MGMT_INDEX, //NS //
1057 IPW_ORD_COUNTRY_CODE, // IEEE country code as recv'd from beacon
1058 IPW_ORD_COUNTRY_CHANNELS, // channels suported by country
1059// IPW_ORD_COUNTRY_CHANNELS:
1060// For 11b the lower 2-byte are used for channels from 1-14
1061// and the higher 2-byte are not used.
1062 IPW_ORD_RESET_CNT, // # of adapter resets (warm)
1063 IPW_ORD_BEACON_INTERVAL, // Beacon interval
1064
1065 IPW_ORD_PRINCETON_VERSION = 184, //NS // Princeton Version
1066 IPW_ORD_ANTENNA_DIVERSITY, // TRUE if antenna diversity is disabled
1067 IPW_ORD_CCA_RSSI, //NS // CCA RSSI value (factory programmed)
1068 IPW_ORD_STAT_EEPROM_UPDATE, //NS // # of times config EEPROM updated
1069 IPW_ORD_DTIM_PERIOD, // # of beacon intervals between DTIMs
1070 IPW_ORD_OUR_FREQ, // current radio freq lower digits - channel ID
1071
1072 IPW_ORD_RTC_TIME = 190, // current RTC time
1073 IPW_ORD_PORT_TYPE, // operating mode
1074 IPW_ORD_CURRENT_TX_RATE, // current tx rate
1075 IPW_ORD_SUPPORTED_RATES, // Bitmap of supported tx rates
1076 IPW_ORD_ATIM_WINDOW, // current ATIM Window
1077 IPW_ORD_BASIC_RATES, // bitmap of basic tx rates
1078 IPW_ORD_NIC_HIGHEST_RATE, // bitmap of basic tx rates
1079 IPW_ORD_AP_HIGHEST_RATE, // bitmap of basic tx rates
1080 IPW_ORD_CAPABILITIES, // Management frame capability field
1081 IPW_ORD_AUTH_TYPE, // Type of authentication
1082 IPW_ORD_RADIO_TYPE, // Adapter card platform type
1083 IPW_ORD_RTS_THRESHOLD = 201, // Min length of packet after which RTS handshaking is used
1084 IPW_ORD_INT_MODE, // International mode
1085 IPW_ORD_FRAGMENTATION_THRESHOLD, // protocol frag threshold
1086 IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, // EEPROM offset in SRAM
1087 IPW_ORD_EEPROM_SRAM_DB_BLOCK_SIZE, // EEPROM size in SRAM
1088 IPW_ORD_EEPROM_SKU_CAPABILITY, // EEPROM SKU Capability 206 =
1089 IPW_ORD_EEPROM_IBSS_11B_CHANNELS, // EEPROM IBSS 11b channel set
1090
1091 IPW_ORD_MAC_VERSION = 209, // MAC Version
1092 IPW_ORD_MAC_REVISION, // MAC Revision
1093 IPW_ORD_RADIO_VERSION, // Radio Version
1094 IPW_ORD_NIC_MANF_DATE_TIME, // MANF Date/Time STAMP
1095 IPW_ORD_UCODE_VERSION, // Ucode Version
1096 IPW_ORD_HW_RF_SWITCH_STATE = 214, // HW RF Kill Switch State
1097} ORDINALTABLE1;
1098
1099// ordinal table 2
1100// Variable length data:
1101#define IPW_FIRST_VARIABLE_LENGTH_ORDINAL 1001
1102
1103typedef enum _ORDINAL_TABLE_2 { // NS - means Not Supported by FW
1104 IPW_ORD_STAT_BASE = 1000, // contains number of variable ORDs
1105 IPW_ORD_STAT_ADAPTER_MAC = 1001, // 6 bytes: our adapter MAC address
1106 IPW_ORD_STAT_PREFERRED_BSSID = 1002, // 6 bytes: BSSID of the preferred AP
1107 IPW_ORD_STAT_MANDATORY_BSSID = 1003, // 6 bytes: BSSID of the mandatory AP
1108 IPW_FILL_1, //NS //
1109 IPW_ORD_STAT_COUNTRY_TEXT = 1005, // 36 bytes: Country name text, First two bytes are Country code
1110 IPW_ORD_STAT_ASSN_SSID = 1006, // 32 bytes: ESSID String
1111 IPW_ORD_STATION_TABLE = 1007, // ? bytes: Station/AP table (via Direct SSID Scans)
1112 IPW_ORD_STAT_SWEEP_TABLE = 1008, // ? bytes: Sweep/Host Table table (via Broadcast Scans)
1113 IPW_ORD_STAT_ROAM_LOG = 1009, // ? bytes: Roaming log
1114 IPW_ORD_STAT_RATE_LOG = 1010, //NS // 0 bytes: Rate log
1115 IPW_ORD_STAT_FIFO = 1011, //NS // 0 bytes: Fifo buffer data structures
1116 IPW_ORD_STAT_FW_VER_NUM = 1012, // 14 bytes: fw version ID string as in (a.bb.ccc; "0.08.011")
1117 IPW_ORD_STAT_FW_DATE = 1013, // 14 bytes: fw date string (mmm dd yyyy; "Mar 13 2002")
1118 IPW_ORD_STAT_ASSN_AP_BSSID = 1014, // 6 bytes: MAC address of associated AP
1119 IPW_ORD_STAT_DEBUG = 1015, //NS // ? bytes:
1120 IPW_ORD_STAT_NIC_BPA_NUM = 1016, // 11 bytes: NIC BPA number in ASCII
1121 IPW_ORD_STAT_UCODE_DATE = 1017, // 5 bytes: uCode date
1122 IPW_ORD_SECURITY_NGOTIATION_RESULT = 1018,
1123} ORDINALTABLE2; // NS - means Not Supported by FW
1124
1125#define IPW_LAST_VARIABLE_LENGTH_ORDINAL 1018
1126
1127#ifndef WIRELESS_SPY
1128#define WIRELESS_SPY // enable iwspy support
1129#endif
1130
1131#define IPW_HOST_FW_SHARED_AREA0 0x0002f200
1132#define IPW_HOST_FW_SHARED_AREA0_END 0x0002f510 // 0x310 bytes
1133
1134#define IPW_HOST_FW_SHARED_AREA1 0x0002f610
1135#define IPW_HOST_FW_SHARED_AREA1_END 0x0002f630 // 0x20 bytes
1136
1137#define IPW_HOST_FW_SHARED_AREA2 0x0002fa00
1138#define IPW_HOST_FW_SHARED_AREA2_END 0x0002fa20 // 0x20 bytes
1139
1140#define IPW_HOST_FW_SHARED_AREA3 0x0002fc00
1141#define IPW_HOST_FW_SHARED_AREA3_END 0x0002fc10 // 0x10 bytes
1142
1143#define IPW_HOST_FW_INTERRUPT_AREA 0x0002ff80
1144#define IPW_HOST_FW_INTERRUPT_AREA_END 0x00030000 // 0x80 bytes
1145
1146struct ipw2100_fw_chunk {
1147 unsigned char *buf;
1148 long len;
1149 long pos;
1150 struct list_head list;
1151};
1152
1153struct ipw2100_fw_chunk_set {
1154 const void *data;
1155 unsigned long size;
1156};
1157
1158struct ipw2100_fw {
1159 int version;
1160 struct ipw2100_fw_chunk_set fw;
1161 struct ipw2100_fw_chunk_set uc;
1162 const struct firmware *fw_entry;
1163};
1164
1165#define MAX_FW_VERSION_LEN 14
1166
1167#endif /* _IPW2100_H */
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
new file mode 100644
index 000000000000..b7f275c00de3
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.c
@@ -0,0 +1,7383 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include "ipw2200.h"
34
35#define IPW2200_VERSION "1.0.0"
36#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37#define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38#define DRV_VERSION IPW2200_VERSION
39
40MODULE_DESCRIPTION(DRV_DESCRIPTION);
41MODULE_VERSION(DRV_VERSION);
42MODULE_AUTHOR(DRV_COPYRIGHT);
43MODULE_LICENSE("GPL");
44
45static int debug = 0;
46static int channel = 0;
47static char *ifname;
48static int mode = 0;
49
50static u32 ipw_debug_level;
51static int associate = 1;
52static int auto_create = 1;
53static int disable = 0;
54static const char ipw_modes[] = {
55 'a', 'b', 'g', '?'
56};
57
58static void ipw_rx(struct ipw_priv *priv);
59static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
60 struct clx2_tx_queue *txq, int qindex);
61static int ipw_queue_reset(struct ipw_priv *priv);
62
63static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
64 int len, int sync);
65
66static void ipw_tx_queue_free(struct ipw_priv *);
67
68static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
69static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
70static void ipw_rx_queue_replenish(void *);
71
72static int ipw_up(struct ipw_priv *);
73static void ipw_down(struct ipw_priv *);
74static int ipw_config(struct ipw_priv *);
75static int init_supported_rates(struct ipw_priv *priv,
76 struct ipw_supported_rates *prates);
77
78static u8 band_b_active_channel[MAX_B_CHANNELS] = {
79 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
80};
81static u8 band_a_active_channel[MAX_A_CHANNELS] = {
82 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
83};
84
85static int is_valid_channel(int mode_mask, int channel)
86{
87 int i;
88
89 if (!channel)
90 return 0;
91
92 if (mode_mask & IEEE_A)
93 for (i = 0; i < MAX_A_CHANNELS; i++)
94 if (band_a_active_channel[i] == channel)
95 return IEEE_A;
96
97 if (mode_mask & (IEEE_B | IEEE_G))
98 for (i = 0; i < MAX_B_CHANNELS; i++)
99 if (band_b_active_channel[i] == channel)
100 return mode_mask & (IEEE_B | IEEE_G);
101
102 return 0;
103}
104
105static char *snprint_line(char *buf, size_t count,
106 const u8 * data, u32 len, u32 ofs)
107{
108 int out, i, j, l;
109 char c;
110
111 out = snprintf(buf, count, "%08X", ofs);
112
113 for (l = 0, i = 0; i < 2; i++) {
114 out += snprintf(buf + out, count - out, " ");
115 for (j = 0; j < 8 && l < len; j++, l++)
116 out += snprintf(buf + out, count - out, "%02X ",
117 data[(i * 8 + j)]);
118 for (; j < 8; j++)
119 out += snprintf(buf + out, count - out, " ");
120 }
121
122 out += snprintf(buf + out, count - out, " ");
123 for (l = 0, i = 0; i < 2; i++) {
124 out += snprintf(buf + out, count - out, " ");
125 for (j = 0; j < 8 && l < len; j++, l++) {
126 c = data[(i * 8 + j)];
127 if (!isascii(c) || !isprint(c))
128 c = '.';
129
130 out += snprintf(buf + out, count - out, "%c", c);
131 }
132
133 for (; j < 8; j++)
134 out += snprintf(buf + out, count - out, " ");
135 }
136
137 return buf;
138}
139
140static void printk_buf(int level, const u8 * data, u32 len)
141{
142 char line[81];
143 u32 ofs = 0;
144 if (!(ipw_debug_level & level))
145 return;
146
147 while (len) {
148 printk(KERN_DEBUG "%s\n",
149 snprint_line(line, sizeof(line), &data[ofs],
150 min(len, 16U), ofs));
151 ofs += 16;
152 len -= min(len, 16U);
153 }
154}
155
156static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
157#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
158
159static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
160#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
161
162static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
163static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
164{
165 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
166 __LINE__, (u32) (b), (u32) (c));
167 _ipw_write_reg8(a, b, c);
168}
169
170static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
171static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
172{
173 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
174 __LINE__, (u32) (b), (u32) (c));
175 _ipw_write_reg16(a, b, c);
176}
177
178static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
179static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
180{
181 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
182 __LINE__, (u32) (b), (u32) (c));
183 _ipw_write_reg32(a, b, c);
184}
185
186#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
187#define ipw_write8(ipw, ofs, val) \
188 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
189 _ipw_write8(ipw, ofs, val)
190
191#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
192#define ipw_write16(ipw, ofs, val) \
193 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
194 _ipw_write16(ipw, ofs, val)
195
196#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
197#define ipw_write32(ipw, ofs, val) \
198 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
199 _ipw_write32(ipw, ofs, val)
200
201#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
202static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
203{
204 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
205 return _ipw_read8(ipw, ofs);
206}
207
208#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
209
210#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
211static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
212{
213 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
214 return _ipw_read16(ipw, ofs);
215}
216
217#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
218
219#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
220static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
221{
222 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
223 return _ipw_read32(ipw, ofs);
224}
225
226#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
227
228static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
229#define ipw_read_indirect(a, b, c, d) \
230 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
231 _ipw_read_indirect(a, b, c, d)
232
233static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
234 int num);
235#define ipw_write_indirect(a, b, c, d) \
236 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
237 _ipw_write_indirect(a, b, c, d)
238
239/* indirect write s */
240static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
241{
242 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
243 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
244 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
245}
246
247static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
248{
249 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
250 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
251 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
252 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
253 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
254}
255
256static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
257{
258 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
259 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
260 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
261}
262
263/* indirect read s */
264
265static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
266{
267 u32 word;
268 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
269 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
270 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
271 return (word >> ((reg & 0x3) * 8)) & 0xff;
272}
273
274static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
275{
276 u32 value;
277
278 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
279
280 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
281 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
282 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
283 return value;
284}
285
286/* iterative/auto-increment 32 bit reads and writes */
287static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
288 int num)
289{
290 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
291 u32 dif_len = addr - aligned_addr;
292 u32 aligned_len;
293 u32 i;
294
295 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
296
297 /* Read the first nibble byte by byte */
298 if (unlikely(dif_len)) {
299 /* Start reading at aligned_addr + dif_len */
300 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
301 for (i = dif_len; i < 4; i++, buf++)
302 *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
303 num -= dif_len;
304 aligned_addr += 4;
305 }
306
307 /* Read DWs through autoinc register */
308 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
309 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
310 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
311 *(u32 *) buf = ipw_read32(priv, CX2_AUTOINC_DATA);
312
313 /* Copy the last nibble */
314 dif_len = num - aligned_len;
315 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
316 for (i = 0; i < dif_len; i++, buf++)
317 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
318}
319
320static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
321 int num)
322{
323 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
324 u32 dif_len = addr - aligned_addr;
325 u32 aligned_len;
326 u32 i;
327
328 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
329
330 /* Write the first nibble byte by byte */
331 if (unlikely(dif_len)) {
332 /* Start writing at aligned_addr + dif_len */
333 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
334 for (i = dif_len; i < 4; i++, buf++)
335 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
336 num -= dif_len;
337 aligned_addr += 4;
338 }
339
340 /* Write DWs through autoinc register */
341 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
342 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
343 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
344 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
345
346 /* Copy the last nibble */
347 dif_len = num - aligned_len;
348 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
349 for (i = 0; i < dif_len; i++, buf++)
350 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
351}
352
353static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
354 int num)
355{
356 memcpy_toio((priv->hw_base + addr), buf, num);
357}
358
359static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
360{
361 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
362}
363
364static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
365{
366 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
367}
368
369static inline void ipw_enable_interrupts(struct ipw_priv *priv)
370{
371 if (priv->status & STATUS_INT_ENABLED)
372 return;
373 priv->status |= STATUS_INT_ENABLED;
374 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
375}
376
377static inline void ipw_disable_interrupts(struct ipw_priv *priv)
378{
379 if (!(priv->status & STATUS_INT_ENABLED))
380 return;
381 priv->status &= ~STATUS_INT_ENABLED;
382 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
383}
384
385static char *ipw_error_desc(u32 val)
386{
387 switch (val) {
388 case IPW_FW_ERROR_OK:
389 return "ERROR_OK";
390 case IPW_FW_ERROR_FAIL:
391 return "ERROR_FAIL";
392 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
393 return "MEMORY_UNDERFLOW";
394 case IPW_FW_ERROR_MEMORY_OVERFLOW:
395 return "MEMORY_OVERFLOW";
396 case IPW_FW_ERROR_BAD_PARAM:
397 return "ERROR_BAD_PARAM";
398 case IPW_FW_ERROR_BAD_CHECKSUM:
399 return "ERROR_BAD_CHECKSUM";
400 case IPW_FW_ERROR_NMI_INTERRUPT:
401 return "ERROR_NMI_INTERRUPT";
402 case IPW_FW_ERROR_BAD_DATABASE:
403 return "ERROR_BAD_DATABASE";
404 case IPW_FW_ERROR_ALLOC_FAIL:
405 return "ERROR_ALLOC_FAIL";
406 case IPW_FW_ERROR_DMA_UNDERRUN:
407 return "ERROR_DMA_UNDERRUN";
408 case IPW_FW_ERROR_DMA_STATUS:
409 return "ERROR_DMA_STATUS";
410 case IPW_FW_ERROR_DINOSTATUS_ERROR:
411 return "ERROR_DINOSTATUS_ERROR";
412 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
413 return "ERROR_EEPROMSTATUS_ERROR";
414 case IPW_FW_ERROR_SYSASSERT:
415 return "ERROR_SYSASSERT";
416 case IPW_FW_ERROR_FATAL_ERROR:
417 return "ERROR_FATALSTATUS_ERROR";
418 default:
419 return "UNKNOWNSTATUS_ERROR";
420 }
421}
422
423static void ipw_dump_nic_error_log(struct ipw_priv *priv)
424{
425 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
426
427 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
428 count = ipw_read_reg32(priv, base);
429
430 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
431 IPW_ERROR("Start IPW Error Log Dump:\n");
432 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
433 priv->status, priv->config);
434 }
435
436 for (i = ERROR_START_OFFSET;
437 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
438 desc = ipw_read_reg32(priv, base + i);
439 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
440 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
441 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
442 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
443 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
444 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
445
446 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
447 ipw_error_desc(desc), time, blink1, blink2,
448 ilink1, ilink2, idata);
449 }
450}
451
452static void ipw_dump_nic_event_log(struct ipw_priv *priv)
453{
454 u32 ev, time, data, i, count, base;
455
456 base = ipw_read32(priv, IPW_EVENT_LOG);
457 count = ipw_read_reg32(priv, base);
458
459 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
460 IPW_ERROR("Start IPW Event Log Dump:\n");
461
462 for (i = EVENT_START_OFFSET;
463 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
464 ev = ipw_read_reg32(priv, base + i);
465 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
466 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
467
468#ifdef CONFIG_IPW_DEBUG
469 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
470#endif
471 }
472}
473
474static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
475{
476 u32 addr, field_info, field_len, field_count, total_len;
477
478 IPW_DEBUG_ORD("ordinal = %i\n", ord);
479
480 if (!priv || !val || !len) {
481 IPW_DEBUG_ORD("Invalid argument\n");
482 return -EINVAL;
483 }
484
485 /* verify device ordinal tables have been initialized */
486 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
487 IPW_DEBUG_ORD("Access ordinals before initialization\n");
488 return -EINVAL;
489 }
490
491 switch (IPW_ORD_TABLE_ID_MASK & ord) {
492 case IPW_ORD_TABLE_0_MASK:
493 /*
494 * TABLE 0: Direct access to a table of 32 bit values
495 *
496 * This is a very simple table with the data directly
497 * read from the table
498 */
499
500 /* remove the table id from the ordinal */
501 ord &= IPW_ORD_TABLE_VALUE_MASK;
502
503 /* boundary check */
504 if (ord > priv->table0_len) {
505 IPW_DEBUG_ORD("ordinal value (%i) longer then "
506 "max (%i)\n", ord, priv->table0_len);
507 return -EINVAL;
508 }
509
510 /* verify we have enough room to store the value */
511 if (*len < sizeof(u32)) {
512 IPW_DEBUG_ORD("ordinal buffer length too small, "
513 "need %zd\n", sizeof(u32));
514 return -EINVAL;
515 }
516
517 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
518 ord, priv->table0_addr + (ord << 2));
519
520 *len = sizeof(u32);
521 ord <<= 2;
522 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
523 break;
524
525 case IPW_ORD_TABLE_1_MASK:
526 /*
527 * TABLE 1: Indirect access to a table of 32 bit values
528 *
529 * This is a fairly large table of u32 values each
530 * representing starting addr for the data (which is
531 * also a u32)
532 */
533
534 /* remove the table id from the ordinal */
535 ord &= IPW_ORD_TABLE_VALUE_MASK;
536
537 /* boundary check */
538 if (ord > priv->table1_len) {
539 IPW_DEBUG_ORD("ordinal value too long\n");
540 return -EINVAL;
541 }
542
543 /* verify we have enough room to store the value */
544 if (*len < sizeof(u32)) {
545 IPW_DEBUG_ORD("ordinal buffer length too small, "
546 "need %zd\n", sizeof(u32));
547 return -EINVAL;
548 }
549
550 *((u32 *) val) =
551 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
552 *len = sizeof(u32);
553 break;
554
555 case IPW_ORD_TABLE_2_MASK:
556 /*
557 * TABLE 2: Indirect access to a table of variable sized values
558 *
559 * This table consist of six values, each containing
560 * - dword containing the starting offset of the data
561 * - dword containing the lengh in the first 16bits
562 * and the count in the second 16bits
563 */
564
565 /* remove the table id from the ordinal */
566 ord &= IPW_ORD_TABLE_VALUE_MASK;
567
568 /* boundary check */
569 if (ord > priv->table2_len) {
570 IPW_DEBUG_ORD("ordinal value too long\n");
571 return -EINVAL;
572 }
573
574 /* get the address of statistic */
575 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
576
577 /* get the second DW of statistics ;
578 * two 16-bit words - first is length, second is count */
579 field_info =
580 ipw_read_reg32(priv,
581 priv->table2_addr + (ord << 3) +
582 sizeof(u32));
583
584 /* get each entry length */
585 field_len = *((u16 *) & field_info);
586
587 /* get number of entries */
588 field_count = *(((u16 *) & field_info) + 1);
589
590 /* abort if not enought memory */
591 total_len = field_len * field_count;
592 if (total_len > *len) {
593 *len = total_len;
594 return -EINVAL;
595 }
596
597 *len = total_len;
598 if (!total_len)
599 return 0;
600
601 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
602 "field_info = 0x%08x\n",
603 addr, total_len, field_info);
604 ipw_read_indirect(priv, addr, val, total_len);
605 break;
606
607 default:
608 IPW_DEBUG_ORD("Invalid ordinal!\n");
609 return -EINVAL;
610
611 }
612
613 return 0;
614}
615
616static void ipw_init_ordinals(struct ipw_priv *priv)
617{
618 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
619 priv->table0_len = ipw_read32(priv, priv->table0_addr);
620
621 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
622 priv->table0_addr, priv->table0_len);
623
624 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
625 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
626
627 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
628 priv->table1_addr, priv->table1_len);
629
630 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
631 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
632 priv->table2_len &= 0x0000ffff; /* use first two bytes */
633
634 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
635 priv->table2_addr, priv->table2_len);
636
637}
638
639/*
640 * The following adds a new attribute to the sysfs representation
641 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
642 * used for controling the debug level.
643 *
644 * See the level definitions in ipw for details.
645 */
646static ssize_t show_debug_level(struct device_driver *d, char *buf)
647{
648 return sprintf(buf, "0x%08X\n", ipw_debug_level);
649}
650static ssize_t store_debug_level(struct device_driver *d,
651 const char *buf, size_t count)
652{
653 char *p = (char *)buf;
654 u32 val;
655
656 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
657 p++;
658 if (p[0] == 'x' || p[0] == 'X')
659 p++;
660 val = simple_strtoul(p, &p, 16);
661 } else
662 val = simple_strtoul(p, &p, 10);
663 if (p == buf)
664 printk(KERN_INFO DRV_NAME
665 ": %s is not in hex or decimal form.\n", buf);
666 else
667 ipw_debug_level = val;
668
669 return strnlen(buf, count);
670}
671
672static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
673 show_debug_level, store_debug_level);
674
675static ssize_t show_status(struct device *d,
676 struct device_attribute *attr, char *buf)
677{
678 struct ipw_priv *p = d->driver_data;
679 return sprintf(buf, "0x%08x\n", (int)p->status);
680}
681
682static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
683
684static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
685 char *buf)
686{
687 struct ipw_priv *p = d->driver_data;
688 return sprintf(buf, "0x%08x\n", (int)p->config);
689}
690
691static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
692
693static ssize_t show_nic_type(struct device *d,
694 struct device_attribute *attr, char *buf)
695{
696 struct ipw_priv *p = d->driver_data;
697 u8 type = p->eeprom[EEPROM_NIC_TYPE];
698
699 switch (type) {
700 case EEPROM_NIC_TYPE_STANDARD:
701 return sprintf(buf, "STANDARD\n");
702 case EEPROM_NIC_TYPE_DELL:
703 return sprintf(buf, "DELL\n");
704 case EEPROM_NIC_TYPE_FUJITSU:
705 return sprintf(buf, "FUJITSU\n");
706 case EEPROM_NIC_TYPE_IBM:
707 return sprintf(buf, "IBM\n");
708 case EEPROM_NIC_TYPE_HP:
709 return sprintf(buf, "HP\n");
710 }
711
712 return sprintf(buf, "UNKNOWN\n");
713}
714
715static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
716
717static ssize_t dump_error_log(struct device *d,
718 struct device_attribute *attr, const char *buf,
719 size_t count)
720{
721 char *p = (char *)buf;
722
723 if (p[0] == '1')
724 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
725
726 return strnlen(buf, count);
727}
728
729static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
730
731static ssize_t dump_event_log(struct device *d,
732 struct device_attribute *attr, const char *buf,
733 size_t count)
734{
735 char *p = (char *)buf;
736
737 if (p[0] == '1')
738 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
739
740 return strnlen(buf, count);
741}
742
743static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
744
745static ssize_t show_ucode_version(struct device *d,
746 struct device_attribute *attr, char *buf)
747{
748 u32 len = sizeof(u32), tmp = 0;
749 struct ipw_priv *p = d->driver_data;
750
751 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
752 return 0;
753
754 return sprintf(buf, "0x%08x\n", tmp);
755}
756
757static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
758
759static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
760 char *buf)
761{
762 u32 len = sizeof(u32), tmp = 0;
763 struct ipw_priv *p = d->driver_data;
764
765 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
766 return 0;
767
768 return sprintf(buf, "0x%08x\n", tmp);
769}
770
771static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
772
773/*
774 * Add a device attribute to view/control the delay between eeprom
775 * operations.
776 */
777static ssize_t show_eeprom_delay(struct device *d,
778 struct device_attribute *attr, char *buf)
779{
780 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
781 return sprintf(buf, "%i\n", n);
782}
783static ssize_t store_eeprom_delay(struct device *d,
784 struct device_attribute *attr,
785 const char *buf, size_t count)
786{
787 struct ipw_priv *p = d->driver_data;
788 sscanf(buf, "%i", &p->eeprom_delay);
789 return strnlen(buf, count);
790}
791
792static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
793 show_eeprom_delay, store_eeprom_delay);
794
795static ssize_t show_command_event_reg(struct device *d,
796 struct device_attribute *attr, char *buf)
797{
798 u32 reg = 0;
799 struct ipw_priv *p = d->driver_data;
800
801 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
802 return sprintf(buf, "0x%08x\n", reg);
803}
804static ssize_t store_command_event_reg(struct device *d,
805 struct device_attribute *attr,
806 const char *buf, size_t count)
807{
808 u32 reg;
809 struct ipw_priv *p = d->driver_data;
810
811 sscanf(buf, "%x", &reg);
812 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
813 return strnlen(buf, count);
814}
815
816static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
817 show_command_event_reg, store_command_event_reg);
818
819static ssize_t show_mem_gpio_reg(struct device *d,
820 struct device_attribute *attr, char *buf)
821{
822 u32 reg = 0;
823 struct ipw_priv *p = d->driver_data;
824
825 reg = ipw_read_reg32(p, 0x301100);
826 return sprintf(buf, "0x%08x\n", reg);
827}
828static ssize_t store_mem_gpio_reg(struct device *d,
829 struct device_attribute *attr,
830 const char *buf, size_t count)
831{
832 u32 reg;
833 struct ipw_priv *p = d->driver_data;
834
835 sscanf(buf, "%x", &reg);
836 ipw_write_reg32(p, 0x301100, reg);
837 return strnlen(buf, count);
838}
839
840static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
841 show_mem_gpio_reg, store_mem_gpio_reg);
842
843static ssize_t show_indirect_dword(struct device *d,
844 struct device_attribute *attr, char *buf)
845{
846 u32 reg = 0;
847 struct ipw_priv *priv = d->driver_data;
848 if (priv->status & STATUS_INDIRECT_DWORD)
849 reg = ipw_read_reg32(priv, priv->indirect_dword);
850 else
851 reg = 0;
852
853 return sprintf(buf, "0x%08x\n", reg);
854}
855static ssize_t store_indirect_dword(struct device *d,
856 struct device_attribute *attr,
857 const char *buf, size_t count)
858{
859 struct ipw_priv *priv = d->driver_data;
860
861 sscanf(buf, "%x", &priv->indirect_dword);
862 priv->status |= STATUS_INDIRECT_DWORD;
863 return strnlen(buf, count);
864}
865
866static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
867 show_indirect_dword, store_indirect_dword);
868
869static ssize_t show_indirect_byte(struct device *d,
870 struct device_attribute *attr, char *buf)
871{
872 u8 reg = 0;
873 struct ipw_priv *priv = d->driver_data;
874 if (priv->status & STATUS_INDIRECT_BYTE)
875 reg = ipw_read_reg8(priv, priv->indirect_byte);
876 else
877 reg = 0;
878
879 return sprintf(buf, "0x%02x\n", reg);
880}
881static ssize_t store_indirect_byte(struct device *d,
882 struct device_attribute *attr,
883 const char *buf, size_t count)
884{
885 struct ipw_priv *priv = d->driver_data;
886
887 sscanf(buf, "%x", &priv->indirect_byte);
888 priv->status |= STATUS_INDIRECT_BYTE;
889 return strnlen(buf, count);
890}
891
892static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
893 show_indirect_byte, store_indirect_byte);
894
895static ssize_t show_direct_dword(struct device *d,
896 struct device_attribute *attr, char *buf)
897{
898 u32 reg = 0;
899 struct ipw_priv *priv = d->driver_data;
900
901 if (priv->status & STATUS_DIRECT_DWORD)
902 reg = ipw_read32(priv, priv->direct_dword);
903 else
904 reg = 0;
905
906 return sprintf(buf, "0x%08x\n", reg);
907}
908static ssize_t store_direct_dword(struct device *d,
909 struct device_attribute *attr,
910 const char *buf, size_t count)
911{
912 struct ipw_priv *priv = d->driver_data;
913
914 sscanf(buf, "%x", &priv->direct_dword);
915 priv->status |= STATUS_DIRECT_DWORD;
916 return strnlen(buf, count);
917}
918
919static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
920 show_direct_dword, store_direct_dword);
921
922static inline int rf_kill_active(struct ipw_priv *priv)
923{
924 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
925 priv->status |= STATUS_RF_KILL_HW;
926 else
927 priv->status &= ~STATUS_RF_KILL_HW;
928
929 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
930}
931
932static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
933 char *buf)
934{
935 /* 0 - RF kill not enabled
936 1 - SW based RF kill active (sysfs)
937 2 - HW based RF kill active
938 3 - Both HW and SW baed RF kill active */
939 struct ipw_priv *priv = d->driver_data;
940 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
941 (rf_kill_active(priv) ? 0x2 : 0x0);
942 return sprintf(buf, "%i\n", val);
943}
944
945static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
946{
947 if ((disable_radio ? 1 : 0) ==
948 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
949 return 0;
950
951 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
952 disable_radio ? "OFF" : "ON");
953
954 if (disable_radio) {
955 priv->status |= STATUS_RF_KILL_SW;
956
957 if (priv->workqueue) {
958 cancel_delayed_work(&priv->request_scan);
959 }
960 wake_up_interruptible(&priv->wait_command_queue);
961 queue_work(priv->workqueue, &priv->down);
962 } else {
963 priv->status &= ~STATUS_RF_KILL_SW;
964 if (rf_kill_active(priv)) {
965 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
966 "disabled by HW switch\n");
967 /* Make sure the RF_KILL check timer is running */
968 cancel_delayed_work(&priv->rf_kill);
969 queue_delayed_work(priv->workqueue, &priv->rf_kill,
970 2 * HZ);
971 } else
972 queue_work(priv->workqueue, &priv->up);
973 }
974
975 return 1;
976}
977
978static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
979 const char *buf, size_t count)
980{
981 struct ipw_priv *priv = d->driver_data;
982
983 ipw_radio_kill_sw(priv, buf[0] == '1');
984
985 return count;
986}
987
988static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
989
990static void ipw_irq_tasklet(struct ipw_priv *priv)
991{
992 u32 inta, inta_mask, handled = 0;
993 unsigned long flags;
994 int rc = 0;
995
996 spin_lock_irqsave(&priv->lock, flags);
997
998 inta = ipw_read32(priv, CX2_INTA_RW);
999 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
1000 inta &= (CX2_INTA_MASK_ALL & inta_mask);
1001
1002 /* Add any cached INTA values that need to be handled */
1003 inta |= priv->isr_inta;
1004
1005 /* handle all the justifications for the interrupt */
1006 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
1007 ipw_rx(priv);
1008 handled |= CX2_INTA_BIT_RX_TRANSFER;
1009 }
1010
1011 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
1012 IPW_DEBUG_HC("Command completed.\n");
1013 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1014 priv->status &= ~STATUS_HCMD_ACTIVE;
1015 wake_up_interruptible(&priv->wait_command_queue);
1016 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
1017 }
1018
1019 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1020 IPW_DEBUG_TX("TX_QUEUE_1\n");
1021 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1022 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1023 }
1024
1025 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1026 IPW_DEBUG_TX("TX_QUEUE_2\n");
1027 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1028 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1029 }
1030
1031 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1032 IPW_DEBUG_TX("TX_QUEUE_3\n");
1033 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1034 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1035 }
1036
1037 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1038 IPW_DEBUG_TX("TX_QUEUE_4\n");
1039 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1040 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1041 }
1042
1043 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1044 IPW_WARNING("STATUS_CHANGE\n");
1045 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1046 }
1047
1048 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1049 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1050 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1051 }
1052
1053 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1054 IPW_WARNING("HOST_CMD_DONE\n");
1055 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1056 }
1057
1058 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1059 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1060 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1061 }
1062
1063 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1064 IPW_WARNING("PHY_OFF_DONE\n");
1065 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1066 }
1067
1068 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1069 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1070 priv->status |= STATUS_RF_KILL_HW;
1071 wake_up_interruptible(&priv->wait_command_queue);
1072 netif_carrier_off(priv->net_dev);
1073 netif_stop_queue(priv->net_dev);
1074 cancel_delayed_work(&priv->request_scan);
1075 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1076 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1077 }
1078
1079 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1080 IPW_ERROR("Firmware error detected. Restarting.\n");
1081#ifdef CONFIG_IPW_DEBUG
1082 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1083 ipw_dump_nic_error_log(priv);
1084 ipw_dump_nic_event_log(priv);
1085 }
1086#endif
1087 queue_work(priv->workqueue, &priv->adapter_restart);
1088 handled |= CX2_INTA_BIT_FATAL_ERROR;
1089 }
1090
1091 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1092 IPW_ERROR("Parity error\n");
1093 handled |= CX2_INTA_BIT_PARITY_ERROR;
1094 }
1095
1096 if (handled != inta) {
1097 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1098 }
1099
1100 /* enable all interrupts */
1101 ipw_enable_interrupts(priv);
1102
1103 spin_unlock_irqrestore(&priv->lock, flags);
1104}
1105
1106#ifdef CONFIG_IPW_DEBUG
1107#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1108static char *get_cmd_string(u8 cmd)
1109{
1110 switch (cmd) {
1111 IPW_CMD(HOST_COMPLETE);
1112 IPW_CMD(POWER_DOWN);
1113 IPW_CMD(SYSTEM_CONFIG);
1114 IPW_CMD(MULTICAST_ADDRESS);
1115 IPW_CMD(SSID);
1116 IPW_CMD(ADAPTER_ADDRESS);
1117 IPW_CMD(PORT_TYPE);
1118 IPW_CMD(RTS_THRESHOLD);
1119 IPW_CMD(FRAG_THRESHOLD);
1120 IPW_CMD(POWER_MODE);
1121 IPW_CMD(WEP_KEY);
1122 IPW_CMD(TGI_TX_KEY);
1123 IPW_CMD(SCAN_REQUEST);
1124 IPW_CMD(SCAN_REQUEST_EXT);
1125 IPW_CMD(ASSOCIATE);
1126 IPW_CMD(SUPPORTED_RATES);
1127 IPW_CMD(SCAN_ABORT);
1128 IPW_CMD(TX_FLUSH);
1129 IPW_CMD(QOS_PARAMETERS);
1130 IPW_CMD(DINO_CONFIG);
1131 IPW_CMD(RSN_CAPABILITIES);
1132 IPW_CMD(RX_KEY);
1133 IPW_CMD(CARD_DISABLE);
1134 IPW_CMD(SEED_NUMBER);
1135 IPW_CMD(TX_POWER);
1136 IPW_CMD(COUNTRY_INFO);
1137 IPW_CMD(AIRONET_INFO);
1138 IPW_CMD(AP_TX_POWER);
1139 IPW_CMD(CCKM_INFO);
1140 IPW_CMD(CCX_VER_INFO);
1141 IPW_CMD(SET_CALIBRATION);
1142 IPW_CMD(SENSITIVITY_CALIB);
1143 IPW_CMD(RETRY_LIMIT);
1144 IPW_CMD(IPW_PRE_POWER_DOWN);
1145 IPW_CMD(VAP_BEACON_TEMPLATE);
1146 IPW_CMD(VAP_DTIM_PERIOD);
1147 IPW_CMD(EXT_SUPPORTED_RATES);
1148 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1149 IPW_CMD(VAP_QUIET_INTERVALS);
1150 IPW_CMD(VAP_CHANNEL_SWITCH);
1151 IPW_CMD(VAP_MANDATORY_CHANNELS);
1152 IPW_CMD(VAP_CELL_PWR_LIMIT);
1153 IPW_CMD(VAP_CF_PARAM_SET);
1154 IPW_CMD(VAP_SET_BEACONING_STATE);
1155 IPW_CMD(MEASUREMENT);
1156 IPW_CMD(POWER_CAPABILITY);
1157 IPW_CMD(SUPPORTED_CHANNELS);
1158 IPW_CMD(TPC_REPORT);
1159 IPW_CMD(WME_INFO);
1160 IPW_CMD(PRODUCTION_COMMAND);
1161 default:
1162 return "UNKNOWN";
1163 }
1164}
1165#endif /* CONFIG_IPW_DEBUG */
1166
1167#define HOST_COMPLETE_TIMEOUT HZ
1168static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1169{
1170 int rc = 0;
1171
1172 if (priv->status & STATUS_HCMD_ACTIVE) {
1173 IPW_ERROR("Already sending a command\n");
1174 return -1;
1175 }
1176
1177 priv->status |= STATUS_HCMD_ACTIVE;
1178
1179 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1180 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1181 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1182
1183 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1184 if (rc)
1185 return rc;
1186
1187 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1188 !(priv->
1189 status & STATUS_HCMD_ACTIVE),
1190 HOST_COMPLETE_TIMEOUT);
1191 if (rc == 0) {
1192 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1193 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1194 priv->status &= ~STATUS_HCMD_ACTIVE;
1195 return -EIO;
1196 }
1197 if (priv->status & STATUS_RF_KILL_MASK) {
1198 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1199 return -EIO;
1200 }
1201
1202 return 0;
1203}
1204
1205static int ipw_send_host_complete(struct ipw_priv *priv)
1206{
1207 struct host_cmd cmd = {
1208 .cmd = IPW_CMD_HOST_COMPLETE,
1209 .len = 0
1210 };
1211
1212 if (!priv) {
1213 IPW_ERROR("Invalid args\n");
1214 return -1;
1215 }
1216
1217 if (ipw_send_cmd(priv, &cmd)) {
1218 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1219 return -1;
1220 }
1221
1222 return 0;
1223}
1224
1225static int ipw_send_system_config(struct ipw_priv *priv,
1226 struct ipw_sys_config *config)
1227{
1228 struct host_cmd cmd = {
1229 .cmd = IPW_CMD_SYSTEM_CONFIG,
1230 .len = sizeof(*config)
1231 };
1232
1233 if (!priv || !config) {
1234 IPW_ERROR("Invalid args\n");
1235 return -1;
1236 }
1237
1238 memcpy(&cmd.param, config, sizeof(*config));
1239 if (ipw_send_cmd(priv, &cmd)) {
1240 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1241 return -1;
1242 }
1243
1244 return 0;
1245}
1246
1247static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1248{
1249 struct host_cmd cmd = {
1250 .cmd = IPW_CMD_SSID,
1251 .len = min(len, IW_ESSID_MAX_SIZE)
1252 };
1253
1254 if (!priv || !ssid) {
1255 IPW_ERROR("Invalid args\n");
1256 return -1;
1257 }
1258
1259 memcpy(&cmd.param, ssid, cmd.len);
1260 if (ipw_send_cmd(priv, &cmd)) {
1261 IPW_ERROR("failed to send SSID command\n");
1262 return -1;
1263 }
1264
1265 return 0;
1266}
1267
1268static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1269{
1270 struct host_cmd cmd = {
1271 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1272 .len = ETH_ALEN
1273 };
1274
1275 if (!priv || !mac) {
1276 IPW_ERROR("Invalid args\n");
1277 return -1;
1278 }
1279
1280 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1281 priv->net_dev->name, MAC_ARG(mac));
1282
1283 memcpy(&cmd.param, mac, ETH_ALEN);
1284
1285 if (ipw_send_cmd(priv, &cmd)) {
1286 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1287 return -1;
1288 }
1289
1290 return 0;
1291}
1292
1293static void ipw_adapter_restart(void *adapter)
1294{
1295 struct ipw_priv *priv = adapter;
1296
1297 if (priv->status & STATUS_RF_KILL_MASK)
1298 return;
1299
1300 ipw_down(priv);
1301 if (ipw_up(priv)) {
1302 IPW_ERROR("Failed to up device\n");
1303 return;
1304 }
1305}
1306
1307#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1308
1309static void ipw_scan_check(void *data)
1310{
1311 struct ipw_priv *priv = data;
1312 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1313 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1314 "adapter (%dms).\n",
1315 IPW_SCAN_CHECK_WATCHDOG / 100);
1316 ipw_adapter_restart(priv);
1317 }
1318}
1319
1320static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1321 struct ipw_scan_request_ext *request)
1322{
1323 struct host_cmd cmd = {
1324 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1325 .len = sizeof(*request)
1326 };
1327
1328 if (!priv || !request) {
1329 IPW_ERROR("Invalid args\n");
1330 return -1;
1331 }
1332
1333 memcpy(&cmd.param, request, sizeof(*request));
1334 if (ipw_send_cmd(priv, &cmd)) {
1335 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1336 return -1;
1337 }
1338
1339 queue_delayed_work(priv->workqueue, &priv->scan_check,
1340 IPW_SCAN_CHECK_WATCHDOG);
1341 return 0;
1342}
1343
1344static int ipw_send_scan_abort(struct ipw_priv *priv)
1345{
1346 struct host_cmd cmd = {
1347 .cmd = IPW_CMD_SCAN_ABORT,
1348 .len = 0
1349 };
1350
1351 if (!priv) {
1352 IPW_ERROR("Invalid args\n");
1353 return -1;
1354 }
1355
1356 if (ipw_send_cmd(priv, &cmd)) {
1357 IPW_ERROR("failed to send SCAN_ABORT command\n");
1358 return -1;
1359 }
1360
1361 return 0;
1362}
1363
1364static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1365{
1366 struct host_cmd cmd = {
1367 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1368 .len = sizeof(struct ipw_sensitivity_calib)
1369 };
1370 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1371 &cmd.param;
1372 calib->beacon_rssi_raw = sens;
1373 if (ipw_send_cmd(priv, &cmd)) {
1374 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1375 return -1;
1376 }
1377
1378 return 0;
1379}
1380
1381static int ipw_send_associate(struct ipw_priv *priv,
1382 struct ipw_associate *associate)
1383{
1384 struct host_cmd cmd = {
1385 .cmd = IPW_CMD_ASSOCIATE,
1386 .len = sizeof(*associate)
1387 };
1388
1389 if (!priv || !associate) {
1390 IPW_ERROR("Invalid args\n");
1391 return -1;
1392 }
1393
1394 memcpy(&cmd.param, associate, sizeof(*associate));
1395 if (ipw_send_cmd(priv, &cmd)) {
1396 IPW_ERROR("failed to send ASSOCIATE command\n");
1397 return -1;
1398 }
1399
1400 return 0;
1401}
1402
1403static int ipw_send_supported_rates(struct ipw_priv *priv,
1404 struct ipw_supported_rates *rates)
1405{
1406 struct host_cmd cmd = {
1407 .cmd = IPW_CMD_SUPPORTED_RATES,
1408 .len = sizeof(*rates)
1409 };
1410
1411 if (!priv || !rates) {
1412 IPW_ERROR("Invalid args\n");
1413 return -1;
1414 }
1415
1416 memcpy(&cmd.param, rates, sizeof(*rates));
1417 if (ipw_send_cmd(priv, &cmd)) {
1418 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1419 return -1;
1420 }
1421
1422 return 0;
1423}
1424
1425static int ipw_set_random_seed(struct ipw_priv *priv)
1426{
1427 struct host_cmd cmd = {
1428 .cmd = IPW_CMD_SEED_NUMBER,
1429 .len = sizeof(u32)
1430 };
1431
1432 if (!priv) {
1433 IPW_ERROR("Invalid args\n");
1434 return -1;
1435 }
1436
1437 get_random_bytes(&cmd.param, sizeof(u32));
1438
1439 if (ipw_send_cmd(priv, &cmd)) {
1440 IPW_ERROR("failed to send SEED_NUMBER command\n");
1441 return -1;
1442 }
1443
1444 return 0;
1445}
1446
1447#if 0
1448static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1449{
1450 struct host_cmd cmd = {
1451 .cmd = IPW_CMD_CARD_DISABLE,
1452 .len = sizeof(u32)
1453 };
1454
1455 if (!priv) {
1456 IPW_ERROR("Invalid args\n");
1457 return -1;
1458 }
1459
1460 *((u32 *) & cmd.param) = phy_off;
1461
1462 if (ipw_send_cmd(priv, &cmd)) {
1463 IPW_ERROR("failed to send CARD_DISABLE command\n");
1464 return -1;
1465 }
1466
1467 return 0;
1468}
1469#endif
1470
1471static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
1472{
1473 struct host_cmd cmd = {
1474 .cmd = IPW_CMD_TX_POWER,
1475 .len = sizeof(*power)
1476 };
1477
1478 if (!priv || !power) {
1479 IPW_ERROR("Invalid args\n");
1480 return -1;
1481 }
1482
1483 memcpy(&cmd.param, power, sizeof(*power));
1484 if (ipw_send_cmd(priv, &cmd)) {
1485 IPW_ERROR("failed to send TX_POWER command\n");
1486 return -1;
1487 }
1488
1489 return 0;
1490}
1491
1492static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1493{
1494 struct ipw_rts_threshold rts_threshold = {
1495 .rts_threshold = rts,
1496 };
1497 struct host_cmd cmd = {
1498 .cmd = IPW_CMD_RTS_THRESHOLD,
1499 .len = sizeof(rts_threshold)
1500 };
1501
1502 if (!priv) {
1503 IPW_ERROR("Invalid args\n");
1504 return -1;
1505 }
1506
1507 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1508 if (ipw_send_cmd(priv, &cmd)) {
1509 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1510 return -1;
1511 }
1512
1513 return 0;
1514}
1515
1516static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1517{
1518 struct ipw_frag_threshold frag_threshold = {
1519 .frag_threshold = frag,
1520 };
1521 struct host_cmd cmd = {
1522 .cmd = IPW_CMD_FRAG_THRESHOLD,
1523 .len = sizeof(frag_threshold)
1524 };
1525
1526 if (!priv) {
1527 IPW_ERROR("Invalid args\n");
1528 return -1;
1529 }
1530
1531 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1532 if (ipw_send_cmd(priv, &cmd)) {
1533 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1534 return -1;
1535 }
1536
1537 return 0;
1538}
1539
1540static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1541{
1542 struct host_cmd cmd = {
1543 .cmd = IPW_CMD_POWER_MODE,
1544 .len = sizeof(u32)
1545 };
1546 u32 *param = (u32 *) (&cmd.param);
1547
1548 if (!priv) {
1549 IPW_ERROR("Invalid args\n");
1550 return -1;
1551 }
1552
1553 /* If on battery, set to 3, if AC set to CAM, else user
1554 * level */
1555 switch (mode) {
1556 case IPW_POWER_BATTERY:
1557 *param = IPW_POWER_INDEX_3;
1558 break;
1559 case IPW_POWER_AC:
1560 *param = IPW_POWER_MODE_CAM;
1561 break;
1562 default:
1563 *param = mode;
1564 break;
1565 }
1566
1567 if (ipw_send_cmd(priv, &cmd)) {
1568 IPW_ERROR("failed to send POWER_MODE command\n");
1569 return -1;
1570 }
1571
1572 return 0;
1573}
1574
1575/*
1576 * The IPW device contains a Microwire compatible EEPROM that stores
1577 * various data like the MAC address. Usually the firmware has exclusive
1578 * access to the eeprom, but during device initialization (before the
1579 * device driver has sent the HostComplete command to the firmware) the
1580 * device driver has read access to the EEPROM by way of indirect addressing
1581 * through a couple of memory mapped registers.
1582 *
1583 * The following is a simplified implementation for pulling data out of the
1584 * the eeprom, along with some helper functions to find information in
1585 * the per device private data's copy of the eeprom.
1586 *
1587 * NOTE: To better understand how these functions work (i.e what is a chip
1588 * select and why do have to keep driving the eeprom clock?), read
1589 * just about any data sheet for a Microwire compatible EEPROM.
1590 */
1591
1592/* write a 32 bit value into the indirect accessor register */
1593static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1594{
1595 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1596
1597 /* the eeprom requires some time to complete the operation */
1598 udelay(p->eeprom_delay);
1599
1600 return;
1601}
1602
1603/* perform a chip select operation */
1604static inline void eeprom_cs(struct ipw_priv *priv)
1605{
1606 eeprom_write_reg(priv, 0);
1607 eeprom_write_reg(priv, EEPROM_BIT_CS);
1608 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1609 eeprom_write_reg(priv, EEPROM_BIT_CS);
1610}
1611
1612/* perform a chip select operation */
1613static inline void eeprom_disable_cs(struct ipw_priv *priv)
1614{
1615 eeprom_write_reg(priv, EEPROM_BIT_CS);
1616 eeprom_write_reg(priv, 0);
1617 eeprom_write_reg(priv, EEPROM_BIT_SK);
1618}
1619
1620/* push a single bit down to the eeprom */
1621static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
1622{
1623 int d = (bit ? EEPROM_BIT_DI : 0);
1624 eeprom_write_reg(p, EEPROM_BIT_CS | d);
1625 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
1626}
1627
1628/* push an opcode followed by an address down to the eeprom */
1629static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
1630{
1631 int i;
1632
1633 eeprom_cs(priv);
1634 eeprom_write_bit(priv, 1);
1635 eeprom_write_bit(priv, op & 2);
1636 eeprom_write_bit(priv, op & 1);
1637 for (i = 7; i >= 0; i--) {
1638 eeprom_write_bit(priv, addr & (1 << i));
1639 }
1640}
1641
1642/* pull 16 bits off the eeprom, one bit at a time */
1643static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
1644{
1645 int i;
1646 u16 r = 0;
1647
1648 /* Send READ Opcode */
1649 eeprom_op(priv, EEPROM_CMD_READ, addr);
1650
1651 /* Send dummy bit */
1652 eeprom_write_reg(priv, EEPROM_BIT_CS);
1653
1654 /* Read the byte off the eeprom one bit at a time */
1655 for (i = 0; i < 16; i++) {
1656 u32 data = 0;
1657 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1658 eeprom_write_reg(priv, EEPROM_BIT_CS);
1659 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
1660 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
1661 }
1662
1663 /* Send another dummy bit */
1664 eeprom_write_reg(priv, 0);
1665 eeprom_disable_cs(priv);
1666
1667 return r;
1668}
1669
1670/* helper function for pulling the mac address out of the private */
1671/* data's copy of the eeprom data */
1672static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
1673{
1674 u8 *ee = (u8 *) priv->eeprom;
1675 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1676}
1677
1678/*
1679 * Either the device driver (i.e. the host) or the firmware can
1680 * load eeprom data into the designated region in SRAM. If neither
1681 * happens then the FW will shutdown with a fatal error.
1682 *
1683 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1684 * bit needs region of shared SRAM needs to be non-zero.
1685 */
1686static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1687{
1688 int i;
1689 u16 *eeprom = (u16 *) priv->eeprom;
1690
1691 IPW_DEBUG_TRACE(">>\n");
1692
1693 /* read entire contents of eeprom into private buffer */
1694 for (i = 0; i < 128; i++)
1695 eeprom[i] = eeprom_read_u16(priv, (u8) i);
1696
1697 /*
1698 If the data looks correct, then copy it to our private
1699 copy. Otherwise let the firmware know to perform the operation
1700 on it's own
1701 */
1702 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1703 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1704
1705 /* write the eeprom data to sram */
1706 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
1707 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
1708
1709 /* Do not load eeprom data on fatal error or suspend */
1710 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1711 } else {
1712 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1713
1714 /* Load eeprom data on fatal error or suspend */
1715 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1716 }
1717
1718 IPW_DEBUG_TRACE("<<\n");
1719}
1720
1721static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1722{
1723 count >>= 2;
1724 if (!count)
1725 return;
1726 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1727 while (count--)
1728 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1729}
1730
1731static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1732{
1733 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1734 CB_NUMBER_OF_ELEMENTS_SMALL *
1735 sizeof(struct command_block));
1736}
1737
1738static int ipw_fw_dma_enable(struct ipw_priv *priv)
1739{ /* start dma engine but no transfers yet */
1740
1741 IPW_DEBUG_FW(">> : \n");
1742
1743 /* Start the dma */
1744 ipw_fw_dma_reset_command_blocks(priv);
1745
1746 /* Write CB base address */
1747 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1748
1749 IPW_DEBUG_FW("<< : \n");
1750 return 0;
1751}
1752
1753static void ipw_fw_dma_abort(struct ipw_priv *priv)
1754{
1755 u32 control = 0;
1756
1757 IPW_DEBUG_FW(">> :\n");
1758
1759 //set the Stop and Abort bit
1760 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1761 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1762 priv->sram_desc.last_cb_index = 0;
1763
1764 IPW_DEBUG_FW("<< \n");
1765}
1766
1767static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
1768 struct command_block *cb)
1769{
1770 u32 address =
1771 CX2_SHARED_SRAM_DMA_CONTROL +
1772 (sizeof(struct command_block) * index);
1773 IPW_DEBUG_FW(">> :\n");
1774
1775 ipw_write_indirect(priv, address, (u8 *) cb,
1776 (int)sizeof(struct command_block));
1777
1778 IPW_DEBUG_FW("<< :\n");
1779 return 0;
1780
1781}
1782
1783static int ipw_fw_dma_kick(struct ipw_priv *priv)
1784{
1785 u32 control = 0;
1786 u32 index = 0;
1787
1788 IPW_DEBUG_FW(">> :\n");
1789
1790 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1791 ipw_fw_dma_write_command_block(priv, index,
1792 &priv->sram_desc.cb_list[index]);
1793
1794 /* Enable the DMA in the CSR register */
1795 ipw_clear_bit(priv, CX2_RESET_REG,
1796 CX2_RESET_REG_MASTER_DISABLED |
1797 CX2_RESET_REG_STOP_MASTER);
1798
1799 /* Set the Start bit. */
1800 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1801 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1802
1803 IPW_DEBUG_FW("<< :\n");
1804 return 0;
1805}
1806
1807static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1808{
1809 u32 address;
1810 u32 register_value = 0;
1811 u32 cb_fields_address = 0;
1812
1813 IPW_DEBUG_FW(">> :\n");
1814 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1815 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
1816
1817 /* Read the DMA Controlor register */
1818 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1819 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
1820
1821 /* Print the CB values */
1822 cb_fields_address = address;
1823 register_value = ipw_read_reg32(priv, cb_fields_address);
1824 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
1825
1826 cb_fields_address += sizeof(u32);
1827 register_value = ipw_read_reg32(priv, cb_fields_address);
1828 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
1829
1830 cb_fields_address += sizeof(u32);
1831 register_value = ipw_read_reg32(priv, cb_fields_address);
1832 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1833 register_value);
1834
1835 cb_fields_address += sizeof(u32);
1836 register_value = ipw_read_reg32(priv, cb_fields_address);
1837 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
1838
1839 IPW_DEBUG_FW(">> :\n");
1840}
1841
1842static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1843{
1844 u32 current_cb_address = 0;
1845 u32 current_cb_index = 0;
1846
1847 IPW_DEBUG_FW("<< :\n");
1848 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1849
1850 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
1851 sizeof(struct command_block);
1852
1853 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1854 current_cb_index, current_cb_address);
1855
1856 IPW_DEBUG_FW(">> :\n");
1857 return current_cb_index;
1858
1859}
1860
1861static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1862 u32 src_address,
1863 u32 dest_address,
1864 u32 length,
1865 int interrupt_enabled, int is_last)
1866{
1867
1868 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1869 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1870 CB_DEST_SIZE_LONG;
1871 struct command_block *cb;
1872 u32 last_cb_element = 0;
1873
1874 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1875 src_address, dest_address, length);
1876
1877 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1878 return -1;
1879
1880 last_cb_element = priv->sram_desc.last_cb_index;
1881 cb = &priv->sram_desc.cb_list[last_cb_element];
1882 priv->sram_desc.last_cb_index++;
1883
1884 /* Calculate the new CB control word */
1885 if (interrupt_enabled)
1886 control |= CB_INT_ENABLED;
1887
1888 if (is_last)
1889 control |= CB_LAST_VALID;
1890
1891 control |= length;
1892
1893 /* Calculate the CB Element's checksum value */
1894 cb->status = control ^ src_address ^ dest_address;
1895
1896 /* Copy the Source and Destination addresses */
1897 cb->dest_addr = dest_address;
1898 cb->source_addr = src_address;
1899
1900 /* Copy the Control Word last */
1901 cb->control = control;
1902
1903 return 0;
1904}
1905
1906static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1907 u32 src_phys, u32 dest_address, u32 length)
1908{
1909 u32 bytes_left = length;
1910 u32 src_offset = 0;
1911 u32 dest_offset = 0;
1912 int status = 0;
1913 IPW_DEBUG_FW(">> \n");
1914 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1915 src_phys, dest_address, length);
1916 while (bytes_left > CB_MAX_LENGTH) {
1917 status = ipw_fw_dma_add_command_block(priv,
1918 src_phys + src_offset,
1919 dest_address +
1920 dest_offset,
1921 CB_MAX_LENGTH, 0, 0);
1922 if (status) {
1923 IPW_DEBUG_FW_INFO(": Failed\n");
1924 return -1;
1925 } else
1926 IPW_DEBUG_FW_INFO(": Added new cb\n");
1927
1928 src_offset += CB_MAX_LENGTH;
1929 dest_offset += CB_MAX_LENGTH;
1930 bytes_left -= CB_MAX_LENGTH;
1931 }
1932
1933 /* add the buffer tail */
1934 if (bytes_left > 0) {
1935 status =
1936 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
1937 dest_address + dest_offset,
1938 bytes_left, 0, 0);
1939 if (status) {
1940 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1941 return -1;
1942 } else
1943 IPW_DEBUG_FW_INFO
1944 (": Adding new cb - the buffer tail\n");
1945 }
1946
1947 IPW_DEBUG_FW("<< \n");
1948 return 0;
1949}
1950
1951static int ipw_fw_dma_wait(struct ipw_priv *priv)
1952{
1953 u32 current_index = 0;
1954 u32 watchdog = 0;
1955
1956 IPW_DEBUG_FW(">> : \n");
1957
1958 current_index = ipw_fw_dma_command_block_index(priv);
1959 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1960 (int)priv->sram_desc.last_cb_index);
1961
1962 while (current_index < priv->sram_desc.last_cb_index) {
1963 udelay(50);
1964 current_index = ipw_fw_dma_command_block_index(priv);
1965
1966 watchdog++;
1967
1968 if (watchdog > 400) {
1969 IPW_DEBUG_FW_INFO("Timeout\n");
1970 ipw_fw_dma_dump_command_block(priv);
1971 ipw_fw_dma_abort(priv);
1972 return -1;
1973 }
1974 }
1975
1976 ipw_fw_dma_abort(priv);
1977
1978 /*Disable the DMA in the CSR register */
1979 ipw_set_bit(priv, CX2_RESET_REG,
1980 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1981
1982 IPW_DEBUG_FW("<< dmaWaitSync \n");
1983 return 0;
1984}
1985
1986static void ipw_remove_current_network(struct ipw_priv *priv)
1987{
1988 struct list_head *element, *safe;
1989 struct ieee80211_network *network = NULL;
1990 list_for_each_safe(element, safe, &priv->ieee->network_list) {
1991 network = list_entry(element, struct ieee80211_network, list);
1992 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
1993 list_del(element);
1994 list_add_tail(&network->list,
1995 &priv->ieee->network_free_list);
1996 }
1997 }
1998}
1999
2000/**
2001 * Check that card is still alive.
2002 * Reads debug register from domain0.
2003 * If card is present, pre-defined value should
2004 * be found there.
2005 *
2006 * @param priv
2007 * @return 1 if card is present, 0 otherwise
2008 */
2009static inline int ipw_alive(struct ipw_priv *priv)
2010{
2011 return ipw_read32(priv, 0x90) == 0xd55555d5;
2012}
2013
2014static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2015 int timeout)
2016{
2017 int i = 0;
2018
2019 do {
2020 if ((ipw_read32(priv, addr) & mask) == mask)
2021 return i;
2022 mdelay(10);
2023 i += 10;
2024 } while (i < timeout);
2025
2026 return -ETIME;
2027}
2028
2029/* These functions load the firmware and micro code for the operation of
2030 * the ipw hardware. It assumes the buffer has all the bits for the
2031 * image and the caller is handling the memory allocation and clean up.
2032 */
2033
2034static int ipw_stop_master(struct ipw_priv *priv)
2035{
2036 int rc;
2037
2038 IPW_DEBUG_TRACE(">> \n");
2039 /* stop master. typical delay - 0 */
2040 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2041
2042 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2043 CX2_RESET_REG_MASTER_DISABLED, 100);
2044 if (rc < 0) {
2045 IPW_ERROR("stop master failed in 10ms\n");
2046 return -1;
2047 }
2048
2049 IPW_DEBUG_INFO("stop master %dms\n", rc);
2050
2051 return rc;
2052}
2053
2054static void ipw_arc_release(struct ipw_priv *priv)
2055{
2056 IPW_DEBUG_TRACE(">> \n");
2057 mdelay(5);
2058
2059 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2060
2061 /* no one knows timing, for safety add some delay */
2062 mdelay(5);
2063}
2064
2065struct fw_header {
2066 u32 version;
2067 u32 mode;
2068};
2069
2070struct fw_chunk {
2071 u32 address;
2072 u32 length;
2073};
2074
2075#define IPW_FW_MAJOR_VERSION 2
2076#define IPW_FW_MINOR_VERSION 2
2077
2078#define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2079#define IPW_FW_MAJOR(x) (x & 0xff)
2080
2081#define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2082 IPW_FW_MAJOR_VERSION)
2083
2084#define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2085"." __stringify(IPW_FW_MINOR_VERSION) "-"
2086
2087#if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2088#define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2089#else
2090#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2091#endif
2092
2093static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2094{
2095 int rc = 0, i, addr;
2096 u8 cr = 0;
2097 u16 *image;
2098
2099 image = (u16 *) data;
2100
2101 IPW_DEBUG_TRACE(">> \n");
2102
2103 rc = ipw_stop_master(priv);
2104
2105 if (rc < 0)
2106 return rc;
2107
2108// spin_lock_irqsave(&priv->lock, flags);
2109
2110 for (addr = CX2_SHARED_LOWER_BOUND;
2111 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2112 ipw_write32(priv, addr, 0);
2113 }
2114
2115 /* no ucode (yet) */
2116 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2117 /* destroy DMA queues */
2118 /* reset sequence */
2119
2120 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
2121 ipw_arc_release(priv);
2122 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2123 mdelay(1);
2124
2125 /* reset PHY */
2126 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2127 mdelay(1);
2128
2129 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2130 mdelay(1);
2131
2132 /* enable ucode store */
2133 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2134 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2135 mdelay(1);
2136
2137 /* write ucode */
2138 /**
2139 * @bug
2140 * Do NOT set indirect address register once and then
2141 * store data to indirect data register in the loop.
2142 * It seems very reasonable, but in this case DINO do not
2143 * accept ucode. It is essential to set address each time.
2144 */
2145 /* load new ipw uCode */
2146 for (i = 0; i < len / 2; i++)
2147 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2148
2149 /* enable DINO */
2150 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2151 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2152
2153 /* this is where the igx / win driver deveates from the VAP driver. */
2154
2155 /* wait for alive response */
2156 for (i = 0; i < 100; i++) {
2157 /* poll for incoming data */
2158 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2159 if (cr & DINO_RXFIFO_DATA)
2160 break;
2161 mdelay(1);
2162 }
2163
2164 if (cr & DINO_RXFIFO_DATA) {
2165 /* alive_command_responce size is NOT multiple of 4 */
2166 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2167
2168 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2169 response_buffer[i] =
2170 ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
2171 memcpy(&priv->dino_alive, response_buffer,
2172 sizeof(priv->dino_alive));
2173 if (priv->dino_alive.alive_command == 1
2174 && priv->dino_alive.ucode_valid == 1) {
2175 rc = 0;
2176 IPW_DEBUG_INFO
2177 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2178 "of %02d/%02d/%02d %02d:%02d\n",
2179 priv->dino_alive.software_revision,
2180 priv->dino_alive.software_revision,
2181 priv->dino_alive.device_identifier,
2182 priv->dino_alive.device_identifier,
2183 priv->dino_alive.time_stamp[0],
2184 priv->dino_alive.time_stamp[1],
2185 priv->dino_alive.time_stamp[2],
2186 priv->dino_alive.time_stamp[3],
2187 priv->dino_alive.time_stamp[4]);
2188 } else {
2189 IPW_DEBUG_INFO("Microcode is not alive\n");
2190 rc = -EINVAL;
2191 }
2192 } else {
2193 IPW_DEBUG_INFO("No alive response from DINO\n");
2194 rc = -ETIME;
2195 }
2196
2197 /* disable DINO, otherwise for some reason
2198 firmware have problem getting alive resp. */
2199 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2200
2201// spin_unlock_irqrestore(&priv->lock, flags);
2202
2203 return rc;
2204}
2205
2206static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2207{
2208 int rc = -1;
2209 int offset = 0;
2210 struct fw_chunk *chunk;
2211 dma_addr_t shared_phys;
2212 u8 *shared_virt;
2213
2214 IPW_DEBUG_TRACE("<< : \n");
2215 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2216
2217 if (!shared_virt)
2218 return -ENOMEM;
2219
2220 memmove(shared_virt, data, len);
2221
2222 /* Start the Dma */
2223 rc = ipw_fw_dma_enable(priv);
2224
2225 if (priv->sram_desc.last_cb_index > 0) {
2226 /* the DMA is already ready this would be a bug. */
2227 BUG();
2228 goto out;
2229 }
2230
2231 do {
2232 chunk = (struct fw_chunk *)(data + offset);
2233 offset += sizeof(struct fw_chunk);
2234 /* build DMA packet and queue up for sending */
2235 /* dma to chunk->address, the chunk->length bytes from data +
2236 * offeset*/
2237 /* Dma loading */
2238 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2239 chunk->address, chunk->length);
2240 if (rc) {
2241 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2242 goto out;
2243 }
2244
2245 offset += chunk->length;
2246 } while (offset < len);
2247
2248 /* Run the DMA and wait for the answer */
2249 rc = ipw_fw_dma_kick(priv);
2250 if (rc) {
2251 IPW_ERROR("dmaKick Failed\n");
2252 goto out;
2253 }
2254
2255 rc = ipw_fw_dma_wait(priv);
2256 if (rc) {
2257 IPW_ERROR("dmaWaitSync Failed\n");
2258 goto out;
2259 }
2260 out:
2261 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
2262 return rc;
2263}
2264
2265/* stop nic */
2266static int ipw_stop_nic(struct ipw_priv *priv)
2267{
2268 int rc = 0;
2269
2270 /* stop */
2271 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2272
2273 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2274 CX2_RESET_REG_MASTER_DISABLED, 500);
2275 if (rc < 0) {
2276 IPW_ERROR("wait for reg master disabled failed\n");
2277 return rc;
2278 }
2279
2280 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2281
2282 return rc;
2283}
2284
2285static void ipw_start_nic(struct ipw_priv *priv)
2286{
2287 IPW_DEBUG_TRACE(">>\n");
2288
2289 /* prvHwStartNic release ARC */
2290 ipw_clear_bit(priv, CX2_RESET_REG,
2291 CX2_RESET_REG_MASTER_DISABLED |
2292 CX2_RESET_REG_STOP_MASTER |
2293 CBD_RESET_REG_PRINCETON_RESET);
2294
2295 /* enable power management */
2296 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2297 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2298
2299 IPW_DEBUG_TRACE("<<\n");
2300}
2301
2302static int ipw_init_nic(struct ipw_priv *priv)
2303{
2304 int rc;
2305
2306 IPW_DEBUG_TRACE(">>\n");
2307 /* reset */
2308 /*prvHwInitNic */
2309 /* set "initialization complete" bit to move adapter to D0 state */
2310 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2311
2312 /* low-level PLL activation */
2313 ipw_write32(priv, CX2_READ_INT_REGISTER,
2314 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2315
2316 /* wait for clock stabilization */
2317 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2318 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2319 if (rc < 0)
2320 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2321
2322 /* assert SW reset */
2323 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2324
2325 udelay(10);
2326
2327 /* set "initialization complete" bit to move adapter to D0 state */
2328 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2329
2330 IPW_DEBUG_TRACE(">>\n");
2331 return 0;
2332}
2333
2334/* Call this function from process context, it will sleep in request_firmware.
2335 * Probe is an ok place to call this from.
2336 */
2337static int ipw_reset_nic(struct ipw_priv *priv)
2338{
2339 int rc = 0;
2340
2341 IPW_DEBUG_TRACE(">>\n");
2342
2343 rc = ipw_init_nic(priv);
2344
2345 /* Clear the 'host command active' bit... */
2346 priv->status &= ~STATUS_HCMD_ACTIVE;
2347 wake_up_interruptible(&priv->wait_command_queue);
2348
2349 IPW_DEBUG_TRACE("<<\n");
2350 return rc;
2351}
2352
2353static int ipw_get_fw(struct ipw_priv *priv,
2354 const struct firmware **fw, const char *name)
2355{
2356 struct fw_header *header;
2357 int rc;
2358
2359 /* ask firmware_class module to get the boot firmware off disk */
2360 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2361 if (rc < 0) {
2362 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2363 return rc;
2364 }
2365
2366 header = (struct fw_header *)(*fw)->data;
2367 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2368 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2369 name,
2370 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2371 return -EINVAL;
2372 }
2373
2374 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
2375 name,
2376 IPW_FW_MAJOR(header->version),
2377 IPW_FW_MINOR(header->version),
2378 (*fw)->size - sizeof(struct fw_header));
2379 return 0;
2380}
2381
2382#define CX2_RX_BUF_SIZE (3000)
2383
2384static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2385 struct ipw_rx_queue *rxq)
2386{
2387 unsigned long flags;
2388 int i;
2389
2390 spin_lock_irqsave(&rxq->lock, flags);
2391
2392 INIT_LIST_HEAD(&rxq->rx_free);
2393 INIT_LIST_HEAD(&rxq->rx_used);
2394
2395 /* Fill the rx_used queue with _all_ of the Rx buffers */
2396 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2397 /* In the reset function, these buffers may have been allocated
2398 * to an SKB, so we need to unmap and free potential storage */
2399 if (rxq->pool[i].skb != NULL) {
2400 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2401 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
2402 dev_kfree_skb(rxq->pool[i].skb);
2403 }
2404 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2405 }
2406
2407 /* Set us so that we have processed and used all buffers, but have
2408 * not restocked the Rx queue with fresh buffers */
2409 rxq->read = rxq->write = 0;
2410 rxq->processed = RX_QUEUE_SIZE - 1;
2411 rxq->free_count = 0;
2412 spin_unlock_irqrestore(&rxq->lock, flags);
2413}
2414
2415#ifdef CONFIG_PM
2416static int fw_loaded = 0;
2417static const struct firmware *bootfw = NULL;
2418static const struct firmware *firmware = NULL;
2419static const struct firmware *ucode = NULL;
2420#endif
2421
2422static int ipw_load(struct ipw_priv *priv)
2423{
2424#ifndef CONFIG_PM
2425 const struct firmware *bootfw = NULL;
2426 const struct firmware *firmware = NULL;
2427 const struct firmware *ucode = NULL;
2428#endif
2429 int rc = 0, retries = 3;
2430
2431#ifdef CONFIG_PM
2432 if (!fw_loaded) {
2433#endif
2434 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2435 if (rc)
2436 goto error;
2437
2438 switch (priv->ieee->iw_mode) {
2439 case IW_MODE_ADHOC:
2440 rc = ipw_get_fw(priv, &ucode,
2441 IPW_FW_NAME("ibss_ucode"));
2442 if (rc)
2443 goto error;
2444
2445 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2446 break;
2447
2448#ifdef CONFIG_IPW_PROMISC
2449 case IW_MODE_MONITOR:
2450 rc = ipw_get_fw(priv, &ucode,
2451 IPW_FW_NAME("ibss_ucode"));
2452 if (rc)
2453 goto error;
2454
2455 rc = ipw_get_fw(priv, &firmware,
2456 IPW_FW_NAME("sniffer"));
2457 break;
2458#endif
2459 case IW_MODE_INFRA:
2460 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
2461 if (rc)
2462 goto error;
2463
2464 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2465 break;
2466
2467 default:
2468 rc = -EINVAL;
2469 }
2470
2471 if (rc)
2472 goto error;
2473
2474#ifdef CONFIG_PM
2475 fw_loaded = 1;
2476 }
2477#endif
2478
2479 if (!priv->rxq)
2480 priv->rxq = ipw_rx_queue_alloc(priv);
2481 else
2482 ipw_rx_queue_reset(priv, priv->rxq);
2483 if (!priv->rxq) {
2484 IPW_ERROR("Unable to initialize Rx queue\n");
2485 goto error;
2486 }
2487
2488 retry:
2489 /* Ensure interrupts are disabled */
2490 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2491 priv->status &= ~STATUS_INT_ENABLED;
2492
2493 /* ack pending interrupts */
2494 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2495
2496 ipw_stop_nic(priv);
2497
2498 rc = ipw_reset_nic(priv);
2499 if (rc) {
2500 IPW_ERROR("Unable to reset NIC\n");
2501 goto error;
2502 }
2503
2504 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2505 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2506
2507 /* DMA the initial boot firmware into the device */
2508 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2509 bootfw->size - sizeof(struct fw_header));
2510 if (rc < 0) {
2511 IPW_ERROR("Unable to load boot firmware\n");
2512 goto error;
2513 }
2514
2515 /* kick start the device */
2516 ipw_start_nic(priv);
2517
2518 /* wait for the device to finish it's initial startup sequence */
2519 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2520 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2521 if (rc < 0) {
2522 IPW_ERROR("device failed to boot initial fw image\n");
2523 goto error;
2524 }
2525 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2526
2527 /* ack fw init done interrupt */
2528 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2529
2530 /* DMA the ucode into the device */
2531 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2532 ucode->size - sizeof(struct fw_header));
2533 if (rc < 0) {
2534 IPW_ERROR("Unable to load ucode\n");
2535 goto error;
2536 }
2537
2538 /* stop nic */
2539 ipw_stop_nic(priv);
2540
2541 /* DMA bss firmware into the device */
2542 rc = ipw_load_firmware(priv, firmware->data +
2543 sizeof(struct fw_header),
2544 firmware->size - sizeof(struct fw_header));
2545 if (rc < 0) {
2546 IPW_ERROR("Unable to load firmware\n");
2547 goto error;
2548 }
2549
2550 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2551
2552 rc = ipw_queue_reset(priv);
2553 if (rc) {
2554 IPW_ERROR("Unable to initialize queues\n");
2555 goto error;
2556 }
2557
2558 /* Ensure interrupts are disabled */
2559 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2560
2561 /* kick start the device */
2562 ipw_start_nic(priv);
2563
2564 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2565 if (retries > 0) {
2566 IPW_WARNING("Parity error. Retrying init.\n");
2567 retries--;
2568 goto retry;
2569 }
2570
2571 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2572 rc = -EIO;
2573 goto error;
2574 }
2575
2576 /* wait for the device */
2577 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2578 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2579 if (rc < 0) {
2580 IPW_ERROR("device failed to start after 500ms\n");
2581 goto error;
2582 }
2583 IPW_DEBUG_INFO("device response after %dms\n", rc);
2584
2585 /* ack fw init done interrupt */
2586 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2587
2588 /* read eeprom data and initialize the eeprom region of sram */
2589 priv->eeprom_delay = 1;
2590 ipw_eeprom_init_sram(priv);
2591
2592 /* enable interrupts */
2593 ipw_enable_interrupts(priv);
2594
2595 /* Ensure our queue has valid packets */
2596 ipw_rx_queue_replenish(priv);
2597
2598 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2599
2600 /* ack pending interrupts */
2601 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2602
2603#ifndef CONFIG_PM
2604 release_firmware(bootfw);
2605 release_firmware(ucode);
2606 release_firmware(firmware);
2607#endif
2608 return 0;
2609
2610 error:
2611 if (priv->rxq) {
2612 ipw_rx_queue_free(priv, priv->rxq);
2613 priv->rxq = NULL;
2614 }
2615 ipw_tx_queue_free(priv);
2616 if (bootfw)
2617 release_firmware(bootfw);
2618 if (ucode)
2619 release_firmware(ucode);
2620 if (firmware)
2621 release_firmware(firmware);
2622#ifdef CONFIG_PM
2623 fw_loaded = 0;
2624 bootfw = ucode = firmware = NULL;
2625#endif
2626
2627 return rc;
2628}
2629
2630/**
2631 * DMA services
2632 *
2633 * Theory of operation
2634 *
2635 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2636 * 2 empty entries always kept in the buffer to protect from overflow.
2637 *
2638 * For Tx queue, there are low mark and high mark limits. If, after queuing
2639 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2640 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2641 * Tx queue resumed.
2642 *
2643 * The IPW operates with six queues, one receive queue in the device's
2644 * sram, one transmit queue for sending commands to the device firmware,
2645 * and four transmit queues for data.
2646 *
2647 * The four transmit queues allow for performing quality of service (qos)
2648 * transmissions as per the 802.11 protocol. Currently Linux does not
2649 * provide a mechanism to the user for utilizing prioritized queues, so
2650 * we only utilize the first data transmit queue (queue1).
2651 */
2652
2653/**
2654 * Driver allocates buffers of this size for Rx
2655 */
2656
2657static inline int ipw_queue_space(const struct clx2_queue *q)
2658{
2659 int s = q->last_used - q->first_empty;
2660 if (s <= 0)
2661 s += q->n_bd;
2662 s -= 2; /* keep some reserve to not confuse empty and full situations */
2663 if (s < 0)
2664 s = 0;
2665 return s;
2666}
2667
2668static inline int ipw_queue_inc_wrap(int index, int n_bd)
2669{
2670 return (++index == n_bd) ? 0 : index;
2671}
2672
2673/**
2674 * Initialize common DMA queue structure
2675 *
2676 * @param q queue to init
2677 * @param count Number of BD's to allocate. Should be power of 2
2678 * @param read_register Address for 'read' register
2679 * (not offset within BAR, full address)
2680 * @param write_register Address for 'write' register
2681 * (not offset within BAR, full address)
2682 * @param base_register Address for 'base' register
2683 * (not offset within BAR, full address)
2684 * @param size Address for 'size' register
2685 * (not offset within BAR, full address)
2686 */
2687static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2688 int count, u32 read, u32 write, u32 base, u32 size)
2689{
2690 q->n_bd = count;
2691
2692 q->low_mark = q->n_bd / 4;
2693 if (q->low_mark < 4)
2694 q->low_mark = 4;
2695
2696 q->high_mark = q->n_bd / 8;
2697 if (q->high_mark < 2)
2698 q->high_mark = 2;
2699
2700 q->first_empty = q->last_used = 0;
2701 q->reg_r = read;
2702 q->reg_w = write;
2703
2704 ipw_write32(priv, base, q->dma_addr);
2705 ipw_write32(priv, size, count);
2706 ipw_write32(priv, read, 0);
2707 ipw_write32(priv, write, 0);
2708
2709 _ipw_read32(priv, 0x90);
2710}
2711
2712static int ipw_queue_tx_init(struct ipw_priv *priv,
2713 struct clx2_tx_queue *q,
2714 int count, u32 read, u32 write, u32 base, u32 size)
2715{
2716 struct pci_dev *dev = priv->pci_dev;
2717
2718 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2719 if (!q->txb) {
2720 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2721 return -ENOMEM;
2722 }
2723
2724 q->bd =
2725 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
2726 if (!q->bd) {
2727 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2728 sizeof(q->bd[0]) * count);
2729 kfree(q->txb);
2730 q->txb = NULL;
2731 return -ENOMEM;
2732 }
2733
2734 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2735 return 0;
2736}
2737
2738/**
2739 * Free one TFD, those at index [txq->q.last_used].
2740 * Do NOT advance any indexes
2741 *
2742 * @param dev
2743 * @param txq
2744 */
2745static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2746 struct clx2_tx_queue *txq)
2747{
2748 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2749 struct pci_dev *dev = priv->pci_dev;
2750 int i;
2751
2752 /* classify bd */
2753 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2754 /* nothing to cleanup after for host commands */
2755 return;
2756
2757 /* sanity check */
2758 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2759 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2760 /** @todo issue fatal error, it is quite serious situation */
2761 return;
2762 }
2763
2764 /* unmap chunks if any */
2765 for (i = 0; i < bd->u.data.num_chunks; i++) {
2766 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2767 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2768 if (txq->txb[txq->q.last_used]) {
2769 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2770 txq->txb[txq->q.last_used] = NULL;
2771 }
2772 }
2773}
2774
2775/**
2776 * Deallocate DMA queue.
2777 *
2778 * Empty queue by removing and destroying all BD's.
2779 * Free all buffers.
2780 *
2781 * @param dev
2782 * @param q
2783 */
2784static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
2785{
2786 struct clx2_queue *q = &txq->q;
2787 struct pci_dev *dev = priv->pci_dev;
2788
2789 if (q->n_bd == 0)
2790 return;
2791
2792 /* first, empty all BD's */
2793 for (; q->first_empty != q->last_used;
2794 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2795 ipw_queue_tx_free_tfd(priv, txq);
2796 }
2797
2798 /* free buffers belonging to queue itself */
2799 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
2800 q->dma_addr);
2801 kfree(txq->txb);
2802
2803 /* 0 fill whole structure */
2804 memset(txq, 0, sizeof(*txq));
2805}
2806
2807/**
2808 * Destroy all DMA queues and structures
2809 *
2810 * @param priv
2811 */
2812static void ipw_tx_queue_free(struct ipw_priv *priv)
2813{
2814 /* Tx CMD queue */
2815 ipw_queue_tx_free(priv, &priv->txq_cmd);
2816
2817 /* Tx queues */
2818 ipw_queue_tx_free(priv, &priv->txq[0]);
2819 ipw_queue_tx_free(priv, &priv->txq[1]);
2820 ipw_queue_tx_free(priv, &priv->txq[2]);
2821 ipw_queue_tx_free(priv, &priv->txq[3]);
2822}
2823
2824static void inline __maybe_wake_tx(struct ipw_priv *priv)
2825{
2826 if (netif_running(priv->net_dev)) {
2827 switch (priv->port_type) {
2828 case DCR_TYPE_MU_BSS:
2829 case DCR_TYPE_MU_IBSS:
2830 if (!(priv->status & STATUS_ASSOCIATED)) {
2831 return;
2832 }
2833 }
2834 netif_wake_queue(priv->net_dev);
2835 }
2836
2837}
2838
2839static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
2840{
2841 /* First 3 bytes are manufacturer */
2842 bssid[0] = priv->mac_addr[0];
2843 bssid[1] = priv->mac_addr[1];
2844 bssid[2] = priv->mac_addr[2];
2845
2846 /* Last bytes are random */
2847 get_random_bytes(&bssid[3], ETH_ALEN - 3);
2848
2849 bssid[0] &= 0xfe; /* clear multicast bit */
2850 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2851}
2852
2853static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
2854{
2855 struct ipw_station_entry entry;
2856 int i;
2857
2858 for (i = 0; i < priv->num_stations; i++) {
2859 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2860 /* Another node is active in network */
2861 priv->missed_adhoc_beacons = 0;
2862 if (!(priv->config & CFG_STATIC_CHANNEL))
2863 /* when other nodes drop out, we drop out */
2864 priv->config &= ~CFG_ADHOC_PERSIST;
2865
2866 return i;
2867 }
2868 }
2869
2870 if (i == MAX_STATIONS)
2871 return IPW_INVALID_STATION;
2872
2873 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2874
2875 entry.reserved = 0;
2876 entry.support_mode = 0;
2877 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2878 memcpy(priv->stations[i], bssid, ETH_ALEN);
2879 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2880 &entry, sizeof(entry));
2881 priv->num_stations++;
2882
2883 return i;
2884}
2885
2886static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
2887{
2888 int i;
2889
2890 for (i = 0; i < priv->num_stations; i++)
2891 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2892 return i;
2893
2894 return IPW_INVALID_STATION;
2895}
2896
2897static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2898{
2899 int err;
2900
2901 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2902 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2903 return;
2904 }
2905
2906 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2907 "on channel %d.\n",
2908 MAC_ARG(priv->assoc_request.bssid),
2909 priv->assoc_request.channel);
2910
2911 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2912 priv->status |= STATUS_DISASSOCIATING;
2913
2914 if (quiet)
2915 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2916 else
2917 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2918 err = ipw_send_associate(priv, &priv->assoc_request);
2919 if (err) {
2920 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2921 "failed.\n");
2922 return;
2923 }
2924
2925}
2926
2927static void ipw_disassociate(void *data)
2928{
2929 ipw_send_disassociate(data, 0);
2930}
2931
2932static void notify_wx_assoc_event(struct ipw_priv *priv)
2933{
2934 union iwreq_data wrqu;
2935 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2936 if (priv->status & STATUS_ASSOCIATED)
2937 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
2938 else
2939 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2940 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
2941}
2942
2943struct ipw_status_code {
2944 u16 status;
2945 const char *reason;
2946};
2947
2948static const struct ipw_status_code ipw_status_codes[] = {
2949 {0x00, "Successful"},
2950 {0x01, "Unspecified failure"},
2951 {0x0A, "Cannot support all requested capabilities in the "
2952 "Capability information field"},
2953 {0x0B, "Reassociation denied due to inability to confirm that "
2954 "association exists"},
2955 {0x0C, "Association denied due to reason outside the scope of this "
2956 "standard"},
2957 {0x0D,
2958 "Responding station does not support the specified authentication "
2959 "algorithm"},
2960 {0x0E,
2961 "Received an Authentication frame with authentication sequence "
2962 "transaction sequence number out of expected sequence"},
2963 {0x0F, "Authentication rejected because of challenge failure"},
2964 {0x10, "Authentication rejected due to timeout waiting for next "
2965 "frame in sequence"},
2966 {0x11, "Association denied because AP is unable to handle additional "
2967 "associated stations"},
2968 {0x12,
2969 "Association denied due to requesting station not supporting all "
2970 "of the datarates in the BSSBasicServiceSet Parameter"},
2971 {0x13,
2972 "Association denied due to requesting station not supporting "
2973 "short preamble operation"},
2974 {0x14,
2975 "Association denied due to requesting station not supporting "
2976 "PBCC encoding"},
2977 {0x15,
2978 "Association denied due to requesting station not supporting "
2979 "channel agility"},
2980 {0x19,
2981 "Association denied due to requesting station not supporting "
2982 "short slot operation"},
2983 {0x1A,
2984 "Association denied due to requesting station not supporting "
2985 "DSSS-OFDM operation"},
2986 {0x28, "Invalid Information Element"},
2987 {0x29, "Group Cipher is not valid"},
2988 {0x2A, "Pairwise Cipher is not valid"},
2989 {0x2B, "AKMP is not valid"},
2990 {0x2C, "Unsupported RSN IE version"},
2991 {0x2D, "Invalid RSN IE Capabilities"},
2992 {0x2E, "Cipher suite is rejected per security policy"},
2993};
2994
2995#ifdef CONFIG_IPW_DEBUG
2996static const char *ipw_get_status_code(u16 status)
2997{
2998 int i;
2999 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3000 if (ipw_status_codes[i].status == status)
3001 return ipw_status_codes[i].reason;
3002 return "Unknown status value.";
3003}
3004#endif
3005
3006static void inline average_init(struct average *avg)
3007{
3008 memset(avg, 0, sizeof(*avg));
3009}
3010
3011static void inline average_add(struct average *avg, s16 val)
3012{
3013 avg->sum -= avg->entries[avg->pos];
3014 avg->sum += val;
3015 avg->entries[avg->pos++] = val;
3016 if (unlikely(avg->pos == AVG_ENTRIES)) {
3017 avg->init = 1;
3018 avg->pos = 0;
3019 }
3020}
3021
3022static s16 inline average_value(struct average *avg)
3023{
3024 if (!unlikely(avg->init)) {
3025 if (avg->pos)
3026 return avg->sum / avg->pos;
3027 return 0;
3028 }
3029
3030 return avg->sum / AVG_ENTRIES;
3031}
3032
3033static void ipw_reset_stats(struct ipw_priv *priv)
3034{
3035 u32 len = sizeof(u32);
3036
3037 priv->quality = 0;
3038
3039 average_init(&priv->average_missed_beacons);
3040 average_init(&priv->average_rssi);
3041 average_init(&priv->average_noise);
3042
3043 priv->last_rate = 0;
3044 priv->last_missed_beacons = 0;
3045 priv->last_rx_packets = 0;
3046 priv->last_tx_packets = 0;
3047 priv->last_tx_failures = 0;
3048
3049 /* Firmware managed, reset only when NIC is restarted, so we have to
3050 * normalize on the current value */
3051 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3052 &priv->last_rx_err, &len);
3053 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3054 &priv->last_tx_failures, &len);
3055
3056 /* Driver managed, reset with each association */
3057 priv->missed_adhoc_beacons = 0;
3058 priv->missed_beacons = 0;
3059 priv->tx_packets = 0;
3060 priv->rx_packets = 0;
3061
3062}
3063
3064static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3065{
3066 u32 i = 0x80000000;
3067 u32 mask = priv->rates_mask;
3068 /* If currently associated in B mode, restrict the maximum
3069 * rate match to B rates */
3070 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3071 mask &= IEEE80211_CCK_RATES_MASK;
3072
3073 /* TODO: Verify that the rate is supported by the current rates
3074 * list. */
3075
3076 while (i && !(mask & i))
3077 i >>= 1;
3078 switch (i) {
3079 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3080 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3081 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3082 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3083 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3084 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3085 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3086 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3087 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3088 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3089 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3090 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3091 }
3092
3093 if (priv->ieee->mode == IEEE_B)
3094 return 11000000;
3095 else
3096 return 54000000;
3097}
3098
3099static u32 ipw_get_current_rate(struct ipw_priv *priv)
3100{
3101 u32 rate, len = sizeof(rate);
3102 int err;
3103
3104 if (!(priv->status & STATUS_ASSOCIATED))
3105 return 0;
3106
3107 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3108 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3109 &len);
3110 if (err) {
3111 IPW_DEBUG_INFO("failed querying ordinals.\n");
3112 return 0;
3113 }
3114 } else
3115 return ipw_get_max_rate(priv);
3116
3117 switch (rate) {
3118 case IPW_TX_RATE_1MB: return 1000000;
3119 case IPW_TX_RATE_2MB: return 2000000;
3120 case IPW_TX_RATE_5MB: return 5500000;
3121 case IPW_TX_RATE_6MB: return 6000000;
3122 case IPW_TX_RATE_9MB: return 9000000;
3123 case IPW_TX_RATE_11MB: return 11000000;
3124 case IPW_TX_RATE_12MB: return 12000000;
3125 case IPW_TX_RATE_18MB: return 18000000;
3126 case IPW_TX_RATE_24MB: return 24000000;
3127 case IPW_TX_RATE_36MB: return 36000000;
3128 case IPW_TX_RATE_48MB: return 48000000;
3129 case IPW_TX_RATE_54MB: return 54000000;
3130 }
3131
3132 return 0;
3133}
3134
3135#define PERFECT_RSSI (-50)
3136#define WORST_RSSI (-85)
3137#define IPW_STATS_INTERVAL (2 * HZ)
3138static void ipw_gather_stats(struct ipw_priv *priv)
3139{
3140 u32 rx_err, rx_err_delta, rx_packets_delta;
3141 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3142 u32 missed_beacons_percent, missed_beacons_delta;
3143 u32 quality = 0;
3144 u32 len = sizeof(u32);
3145 s16 rssi;
3146 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3147 rate_quality;
3148
3149 if (!(priv->status & STATUS_ASSOCIATED)) {
3150 priv->quality = 0;
3151 return;
3152 }
3153
3154 /* Update the statistics */
3155 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3156 &priv->missed_beacons, &len);
3157 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3158 priv->last_missed_beacons = priv->missed_beacons;
3159 if (priv->assoc_request.beacon_interval) {
3160 missed_beacons_percent = missed_beacons_delta *
3161 (HZ * priv->assoc_request.beacon_interval) /
3162 (IPW_STATS_INTERVAL * 10);
3163 } else {
3164 missed_beacons_percent = 0;
3165 }
3166 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3167
3168 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3169 rx_err_delta = rx_err - priv->last_rx_err;
3170 priv->last_rx_err = rx_err;
3171
3172 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3173 tx_failures_delta = tx_failures - priv->last_tx_failures;
3174 priv->last_tx_failures = tx_failures;
3175
3176 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3177 priv->last_rx_packets = priv->rx_packets;
3178
3179 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3180 priv->last_tx_packets = priv->tx_packets;
3181
3182 /* Calculate quality based on the following:
3183 *
3184 * Missed beacon: 100% = 0, 0% = 70% missed
3185 * Rate: 60% = 1Mbs, 100% = Max
3186 * Rx and Tx errors represent a straight % of total Rx/Tx
3187 * RSSI: 100% = > -50, 0% = < -80
3188 * Rx errors: 100% = 0, 0% = 50% missed
3189 *
3190 * The lowest computed quality is used.
3191 *
3192 */
3193#define BEACON_THRESHOLD 5
3194 beacon_quality = 100 - missed_beacons_percent;
3195 if (beacon_quality < BEACON_THRESHOLD)
3196 beacon_quality = 0;
3197 else
3198 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3199 (100 - BEACON_THRESHOLD);
3200 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3201 beacon_quality, missed_beacons_percent);
3202
3203 priv->last_rate = ipw_get_current_rate(priv);
3204 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3205 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3206 rate_quality, priv->last_rate / 1000000);
3207
3208 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3209 rx_quality = 100 - (rx_err_delta * 100) /
3210 (rx_packets_delta + rx_err_delta);
3211 else
3212 rx_quality = 100;
3213 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3214 rx_quality, rx_err_delta, rx_packets_delta);
3215
3216 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
3217 tx_quality = 100 - (tx_failures_delta * 100) /
3218 (tx_packets_delta + tx_failures_delta);
3219 else
3220 tx_quality = 100;
3221 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3222 tx_quality, tx_failures_delta, tx_packets_delta);
3223
3224 rssi = average_value(&priv->average_rssi);
3225 if (rssi > PERFECT_RSSI)
3226 signal_quality = 100;
3227 else if (rssi < WORST_RSSI)
3228 signal_quality = 0;
3229 else
3230 signal_quality = (rssi - WORST_RSSI) * 100 /
3231 (PERFECT_RSSI - WORST_RSSI);
3232 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3233 signal_quality, rssi);
3234
3235 quality = min(beacon_quality,
3236 min(rate_quality,
3237 min(tx_quality, min(rx_quality, signal_quality))));
3238 if (quality == beacon_quality)
3239 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3240 quality);
3241 if (quality == rate_quality)
3242 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3243 quality);
3244 if (quality == tx_quality)
3245 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3246 quality);
3247 if (quality == rx_quality)
3248 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3249 quality);
3250 if (quality == signal_quality)
3251 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3252 quality);
3253
3254 priv->quality = quality;
3255
3256 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3257 IPW_STATS_INTERVAL);
3258}
3259
3260/**
3261 * Handle host notification packet.
3262 * Called from interrupt routine
3263 */
3264static inline void ipw_rx_notification(struct ipw_priv *priv,
3265 struct ipw_rx_notification *notif)
3266{
3267 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
3268
3269 switch (notif->subtype) {
3270 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3271 struct notif_association *assoc = &notif->u.assoc;
3272
3273 switch (assoc->state) {
3274 case CMAS_ASSOCIATED:{
3275 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3276 IPW_DL_ASSOC,
3277 "associated: '%s' " MAC_FMT
3278 " \n",
3279 escape_essid(priv->essid,
3280 priv->essid_len),
3281 MAC_ARG(priv->bssid));
3282
3283 switch (priv->ieee->iw_mode) {
3284 case IW_MODE_INFRA:
3285 memcpy(priv->ieee->bssid,
3286 priv->bssid, ETH_ALEN);
3287 break;
3288
3289 case IW_MODE_ADHOC:
3290 memcpy(priv->ieee->bssid,
3291 priv->bssid, ETH_ALEN);
3292
3293 /* clear out the station table */
3294 priv->num_stations = 0;
3295
3296 IPW_DEBUG_ASSOC
3297 ("queueing adhoc check\n");
3298 queue_delayed_work(priv->
3299 workqueue,
3300 &priv->
3301 adhoc_check,
3302 priv->
3303 assoc_request.
3304 beacon_interval);
3305 break;
3306 }
3307
3308 priv->status &= ~STATUS_ASSOCIATING;
3309 priv->status |= STATUS_ASSOCIATED;
3310
3311 netif_carrier_on(priv->net_dev);
3312 if (netif_queue_stopped(priv->net_dev)) {
3313 IPW_DEBUG_NOTIF
3314 ("waking queue\n");
3315 netif_wake_queue(priv->net_dev);
3316 } else {
3317 IPW_DEBUG_NOTIF
3318 ("starting queue\n");
3319 netif_start_queue(priv->
3320 net_dev);
3321 }
3322
3323 ipw_reset_stats(priv);
3324 /* Ensure the rate is updated immediately */
3325 priv->last_rate =
3326 ipw_get_current_rate(priv);
3327 schedule_work(&priv->gather_stats);
3328 notify_wx_assoc_event(priv);
3329
3330/* queue_delayed_work(priv->workqueue,
3331 &priv->request_scan,
3332 SCAN_ASSOCIATED_INTERVAL);
3333*/
3334 break;
3335 }
3336
3337 case CMAS_AUTHENTICATED:{
3338 if (priv->
3339 status & (STATUS_ASSOCIATED |
3340 STATUS_AUTH)) {
3341#ifdef CONFIG_IPW_DEBUG
3342 struct notif_authenticate *auth
3343 = &notif->u.auth;
3344 IPW_DEBUG(IPW_DL_NOTIF |
3345 IPW_DL_STATE |
3346 IPW_DL_ASSOC,
3347 "deauthenticated: '%s' "
3348 MAC_FMT
3349 ": (0x%04X) - %s \n",
3350 escape_essid(priv->
3351 essid,
3352 priv->
3353 essid_len),
3354 MAC_ARG(priv->bssid),
3355 ntohs(auth->status),
3356 ipw_get_status_code
3357 (ntohs
3358 (auth->status)));
3359#endif
3360
3361 priv->status &=
3362 ~(STATUS_ASSOCIATING |
3363 STATUS_AUTH |
3364 STATUS_ASSOCIATED);
3365
3366 netif_carrier_off(priv->
3367 net_dev);
3368 netif_stop_queue(priv->net_dev);
3369 queue_work(priv->workqueue,
3370 &priv->request_scan);
3371 notify_wx_assoc_event(priv);
3372 break;
3373 }
3374
3375 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3376 IPW_DL_ASSOC,
3377 "authenticated: '%s' " MAC_FMT
3378 "\n",
3379 escape_essid(priv->essid,
3380 priv->essid_len),
3381 MAC_ARG(priv->bssid));
3382 break;
3383 }
3384
3385 case CMAS_INIT:{
3386 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3387 IPW_DL_ASSOC,
3388 "disassociated: '%s' " MAC_FMT
3389 " \n",
3390 escape_essid(priv->essid,
3391 priv->essid_len),
3392 MAC_ARG(priv->bssid));
3393
3394 priv->status &=
3395 ~(STATUS_DISASSOCIATING |
3396 STATUS_ASSOCIATING |
3397 STATUS_ASSOCIATED | STATUS_AUTH);
3398
3399 netif_stop_queue(priv->net_dev);
3400 if (!(priv->status & STATUS_ROAMING)) {
3401 netif_carrier_off(priv->
3402 net_dev);
3403 notify_wx_assoc_event(priv);
3404
3405 /* Cancel any queued work ... */
3406 cancel_delayed_work(&priv->
3407 request_scan);
3408 cancel_delayed_work(&priv->
3409 adhoc_check);
3410
3411 /* Queue up another scan... */
3412 queue_work(priv->workqueue,
3413 &priv->request_scan);
3414
3415 cancel_delayed_work(&priv->
3416 gather_stats);
3417 } else {
3418 priv->status |= STATUS_ROAMING;
3419 queue_work(priv->workqueue,
3420 &priv->request_scan);
3421 }
3422
3423 ipw_reset_stats(priv);
3424 break;
3425 }
3426
3427 default:
3428 IPW_ERROR("assoc: unknown (%d)\n",
3429 assoc->state);
3430 break;
3431 }
3432
3433 break;
3434 }
3435
3436 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3437 struct notif_authenticate *auth = &notif->u.auth;
3438 switch (auth->state) {
3439 case CMAS_AUTHENTICATED:
3440 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3441 "authenticated: '%s' " MAC_FMT " \n",
3442 escape_essid(priv->essid,
3443 priv->essid_len),
3444 MAC_ARG(priv->bssid));
3445 priv->status |= STATUS_AUTH;
3446 break;
3447
3448 case CMAS_INIT:
3449 if (priv->status & STATUS_AUTH) {
3450 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3451 IPW_DL_ASSOC,
3452 "authentication failed (0x%04X): %s\n",
3453 ntohs(auth->status),
3454 ipw_get_status_code(ntohs
3455 (auth->
3456 status)));
3457 }
3458 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3459 IPW_DL_ASSOC,
3460 "deauthenticated: '%s' " MAC_FMT "\n",
3461 escape_essid(priv->essid,
3462 priv->essid_len),
3463 MAC_ARG(priv->bssid));
3464
3465 priv->status &= ~(STATUS_ASSOCIATING |
3466 STATUS_AUTH |
3467 STATUS_ASSOCIATED);
3468
3469 netif_carrier_off(priv->net_dev);
3470 netif_stop_queue(priv->net_dev);
3471 queue_work(priv->workqueue,
3472 &priv->request_scan);
3473 notify_wx_assoc_event(priv);
3474 break;
3475
3476 case CMAS_TX_AUTH_SEQ_1:
3477 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3478 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3479 break;
3480 case CMAS_RX_AUTH_SEQ_2:
3481 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3482 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3483 break;
3484 case CMAS_AUTH_SEQ_1_PASS:
3485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3486 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3487 break;
3488 case CMAS_AUTH_SEQ_1_FAIL:
3489 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3490 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3491 break;
3492 case CMAS_TX_AUTH_SEQ_3:
3493 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3494 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3495 break;
3496 case CMAS_RX_AUTH_SEQ_4:
3497 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3498 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3499 break;
3500 case CMAS_AUTH_SEQ_2_PASS:
3501 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3502 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3503 break;
3504 case CMAS_AUTH_SEQ_2_FAIL:
3505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3506 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3507 break;
3508 case CMAS_TX_ASSOC:
3509 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3510 IPW_DL_ASSOC, "TX_ASSOC\n");
3511 break;
3512 case CMAS_RX_ASSOC_RESP:
3513 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3514 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3515 break;
3516 case CMAS_ASSOCIATED:
3517 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3518 IPW_DL_ASSOC, "ASSOCIATED\n");
3519 break;
3520 default:
3521 IPW_DEBUG_NOTIF("auth: failure - %d\n",
3522 auth->state);
3523 break;
3524 }
3525 break;
3526 }
3527
3528 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
3529 struct notif_channel_result *x =
3530 &notif->u.channel_result;
3531
3532 if (notif->size == sizeof(*x)) {
3533 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3534 x->channel_num);
3535 } else {
3536 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3537 "(should be %zd)\n",
3538 notif->size, sizeof(*x));
3539 }
3540 break;
3541 }
3542
3543 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
3544 struct notif_scan_complete *x = &notif->u.scan_complete;
3545 if (notif->size == sizeof(*x)) {
3546 IPW_DEBUG_SCAN
3547 ("Scan completed: type %d, %d channels, "
3548 "%d status\n", x->scan_type,
3549 x->num_channels, x->status);
3550 } else {
3551 IPW_ERROR("Scan completed of wrong size %d "
3552 "(should be %zd)\n",
3553 notif->size, sizeof(*x));
3554 }
3555
3556 priv->status &=
3557 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3558
3559 cancel_delayed_work(&priv->scan_check);
3560
3561 if (!(priv->status & (STATUS_ASSOCIATED |
3562 STATUS_ASSOCIATING |
3563 STATUS_ROAMING |
3564 STATUS_DISASSOCIATING)))
3565 queue_work(priv->workqueue, &priv->associate);
3566 else if (priv->status & STATUS_ROAMING) {
3567 /* If a scan completed and we are in roam mode, then
3568 * the scan that completed was the one requested as a
3569 * result of entering roam... so, schedule the
3570 * roam work */
3571 queue_work(priv->workqueue, &priv->roam);
3572 } else if (priv->status & STATUS_SCAN_PENDING)
3573 queue_work(priv->workqueue,
3574 &priv->request_scan);
3575
3576 priv->ieee->scans++;
3577 break;
3578 }
3579
3580 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
3581 struct notif_frag_length *x = &notif->u.frag_len;
3582
3583 if (notif->size == sizeof(*x)) {
3584 IPW_ERROR("Frag length: %d\n", x->frag_length);
3585 } else {
3586 IPW_ERROR("Frag length of wrong size %d "
3587 "(should be %zd)\n",
3588 notif->size, sizeof(*x));
3589 }
3590 break;
3591 }
3592
3593 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
3594 struct notif_link_deterioration *x =
3595 &notif->u.link_deterioration;
3596 if (notif->size == sizeof(*x)) {
3597 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3598 "link deterioration: '%s' " MAC_FMT
3599 " \n", escape_essid(priv->essid,
3600 priv->essid_len),
3601 MAC_ARG(priv->bssid));
3602 memcpy(&priv->last_link_deterioration, x,
3603 sizeof(*x));
3604 } else {
3605 IPW_ERROR("Link Deterioration of wrong size %d "
3606 "(should be %zd)\n",
3607 notif->size, sizeof(*x));
3608 }
3609 break;
3610 }
3611
3612 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
3613 IPW_ERROR("Dino config\n");
3614 if (priv->hcmd
3615 && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3616 /* TODO: Do anything special? */
3617 } else {
3618 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3619 }
3620 break;
3621 }
3622
3623 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
3624 struct notif_beacon_state *x = &notif->u.beacon_state;
3625 if (notif->size != sizeof(*x)) {
3626 IPW_ERROR
3627 ("Beacon state of wrong size %d (should "
3628 "be %zd)\n", notif->size, sizeof(*x));
3629 break;
3630 }
3631
3632 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3633 if (priv->status & STATUS_SCANNING) {
3634 /* Stop scan to keep fw from getting
3635 * stuck... */
3636 queue_work(priv->workqueue,
3637 &priv->abort_scan);
3638 }
3639
3640 if (x->number > priv->missed_beacon_threshold &&
3641 priv->status & STATUS_ASSOCIATED) {
3642 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3643 IPW_DL_STATE,
3644 "Missed beacon: %d - disassociate\n",
3645 x->number);
3646 queue_work(priv->workqueue,
3647 &priv->disassociate);
3648 } else if (x->number > priv->roaming_threshold) {
3649 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3650 "Missed beacon: %d - initiate "
3651 "roaming\n", x->number);
3652 queue_work(priv->workqueue,
3653 &priv->roam);
3654 } else {
3655 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3656 x->number);
3657 }
3658
3659 priv->notif_missed_beacons = x->number;
3660
3661 }
3662
3663 break;
3664 }
3665
3666 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
3667 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
3668 if (notif->size == sizeof(*x)) {
3669 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3670 "0x%02x station %d\n",
3671 x->key_state, x->security_type,
3672 x->station_index);
3673 break;
3674 }
3675
3676 IPW_ERROR
3677 ("TGi Tx Key of wrong size %d (should be %zd)\n",
3678 notif->size, sizeof(*x));
3679 break;
3680 }
3681
3682 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
3683 struct notif_calibration *x = &notif->u.calibration;
3684
3685 if (notif->size == sizeof(*x)) {
3686 memcpy(&priv->calib, x, sizeof(*x));
3687 IPW_DEBUG_INFO("TODO: Calibration\n");
3688 break;
3689 }
3690
3691 IPW_ERROR
3692 ("Calibration of wrong size %d (should be %zd)\n",
3693 notif->size, sizeof(*x));
3694 break;
3695 }
3696
3697 case HOST_NOTIFICATION_NOISE_STATS:{
3698 if (notif->size == sizeof(u32)) {
3699 priv->last_noise =
3700 (u8) (notif->u.noise.value & 0xff);
3701 average_add(&priv->average_noise,
3702 priv->last_noise);
3703 break;
3704 }
3705
3706 IPW_ERROR
3707 ("Noise stat is wrong size %d (should be %zd)\n",
3708 notif->size, sizeof(u32));
3709 break;
3710 }
3711
3712 default:
3713 IPW_ERROR("Unknown notification: "
3714 "subtype=%d,flags=0x%2x,size=%d\n",
3715 notif->subtype, notif->flags, notif->size);
3716 }
3717}
3718
3719/**
3720 * Destroys all DMA structures and initialise them again
3721 *
3722 * @param priv
3723 * @return error code
3724 */
3725static int ipw_queue_reset(struct ipw_priv *priv)
3726{
3727 int rc = 0;
3728 /** @todo customize queue sizes */
3729 int nTx = 64, nTxCmd = 8;
3730 ipw_tx_queue_free(priv);
3731 /* Tx CMD queue */
3732 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3733 CX2_TX_CMD_QUEUE_READ_INDEX,
3734 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3735 CX2_TX_CMD_QUEUE_BD_BASE,
3736 CX2_TX_CMD_QUEUE_BD_SIZE);
3737 if (rc) {
3738 IPW_ERROR("Tx Cmd queue init failed\n");
3739 goto error;
3740 }
3741 /* Tx queue(s) */
3742 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3743 CX2_TX_QUEUE_0_READ_INDEX,
3744 CX2_TX_QUEUE_0_WRITE_INDEX,
3745 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
3746 if (rc) {
3747 IPW_ERROR("Tx 0 queue init failed\n");
3748 goto error;
3749 }
3750 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3751 CX2_TX_QUEUE_1_READ_INDEX,
3752 CX2_TX_QUEUE_1_WRITE_INDEX,
3753 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
3754 if (rc) {
3755 IPW_ERROR("Tx 1 queue init failed\n");
3756 goto error;
3757 }
3758 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3759 CX2_TX_QUEUE_2_READ_INDEX,
3760 CX2_TX_QUEUE_2_WRITE_INDEX,
3761 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
3762 if (rc) {
3763 IPW_ERROR("Tx 2 queue init failed\n");
3764 goto error;
3765 }
3766 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3767 CX2_TX_QUEUE_3_READ_INDEX,
3768 CX2_TX_QUEUE_3_WRITE_INDEX,
3769 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
3770 if (rc) {
3771 IPW_ERROR("Tx 3 queue init failed\n");
3772 goto error;
3773 }
3774 /* statistics */
3775 priv->rx_bufs_min = 0;
3776 priv->rx_pend_max = 0;
3777 return rc;
3778
3779 error:
3780 ipw_tx_queue_free(priv);
3781 return rc;
3782}
3783
3784/**
3785 * Reclaim Tx queue entries no more used by NIC.
3786 *
3787 * When FW adwances 'R' index, all entries between old and
3788 * new 'R' index need to be reclaimed. As result, some free space
3789 * forms. If there is enough free space (> low mark), wake Tx queue.
3790 *
3791 * @note Need to protect against garbage in 'R' index
3792 * @param priv
3793 * @param txq
3794 * @param qindex
3795 * @return Number of used entries remains in the queue
3796 */
3797static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3798 struct clx2_tx_queue *txq, int qindex)
3799{
3800 u32 hw_tail;
3801 int used;
3802 struct clx2_queue *q = &txq->q;
3803
3804 hw_tail = ipw_read32(priv, q->reg_r);
3805 if (hw_tail >= q->n_bd) {
3806 IPW_ERROR
3807 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3808 hw_tail, q->n_bd);
3809 goto done;
3810 }
3811 for (; q->last_used != hw_tail;
3812 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3813 ipw_queue_tx_free_tfd(priv, txq);
3814 priv->tx_packets++;
3815 }
3816 done:
3817 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3818 __maybe_wake_tx(priv);
3819 }
3820 used = q->first_empty - q->last_used;
3821 if (used < 0)
3822 used += q->n_bd;
3823
3824 return used;
3825}
3826
3827static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3828 int len, int sync)
3829{
3830 struct clx2_tx_queue *txq = &priv->txq_cmd;
3831 struct clx2_queue *q = &txq->q;
3832 struct tfd_frame *tfd;
3833
3834 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3835 IPW_ERROR("No space for Tx\n");
3836 return -EBUSY;
3837 }
3838
3839 tfd = &txq->bd[q->first_empty];
3840 txq->txb[q->first_empty] = NULL;
3841
3842 memset(tfd, 0, sizeof(*tfd));
3843 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3844 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3845 priv->hcmd_seq++;
3846 tfd->u.cmd.index = hcmd;
3847 tfd->u.cmd.length = len;
3848 memcpy(tfd->u.cmd.payload, buf, len);
3849 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3850 ipw_write32(priv, q->reg_w, q->first_empty);
3851 _ipw_read32(priv, 0x90);
3852
3853 return 0;
3854}
3855
3856/*
3857 * Rx theory of operation
3858 *
3859 * The host allocates 32 DMA target addresses and passes the host address
3860 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3861 * 0 to 31
3862 *
3863 * Rx Queue Indexes
3864 * The host/firmware share two index registers for managing the Rx buffers.
3865 *
3866 * The READ index maps to the first position that the firmware may be writing
3867 * to -- the driver can read up to (but not including) this position and get
3868 * good data.
3869 * The READ index is managed by the firmware once the card is enabled.
3870 *
3871 * The WRITE index maps to the last position the driver has read from -- the
3872 * position preceding WRITE is the last slot the firmware can place a packet.
3873 *
3874 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3875 * WRITE = READ.
3876 *
3877 * During initialization the host sets up the READ queue position to the first
3878 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3879 *
3880 * When the firmware places a packet in a buffer it will advance the READ index
3881 * and fire the RX interrupt. The driver can then query the READ index and
3882 * process as many packets as possible, moving the WRITE index forward as it
3883 * resets the Rx queue buffers with new memory.
3884 *
3885 * The management in the driver is as follows:
3886 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3887 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3888 * to replensish the ipw->rxq->rx_free.
3889 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3890 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3891 * 'processed' and 'read' driver indexes as well)
3892 * + A received packet is processed and handed to the kernel network stack,
3893 * detached from the ipw->rxq. The driver 'processed' index is updated.
3894 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3895 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3896 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3897 * were enough free buffers and RX_STALLED is set it is cleared.
3898 *
3899 *
3900 * Driver sequence:
3901 *
3902 * ipw_rx_queue_alloc() Allocates rx_free
3903 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3904 * ipw_rx_queue_restock
3905 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3906 * queue, updates firmware pointers, and updates
3907 * the WRITE index. If insufficient rx_free buffers
3908 * are available, schedules ipw_rx_queue_replenish
3909 *
3910 * -- enable interrupts --
3911 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3912 * READ INDEX, detaching the SKB from the pool.
3913 * Moves the packet buffer from queue to rx_used.
3914 * Calls ipw_rx_queue_restock to refill any empty
3915 * slots.
3916 * ...
3917 *
3918 */
3919
3920/*
3921 * If there are slots in the RX queue that need to be restocked,
3922 * and we have free pre-allocated buffers, fill the ranks as much
3923 * as we can pulling from rx_free.
3924 *
3925 * This moves the 'write' index forward to catch up with 'processed', and
3926 * also updates the memory address in the firmware to reference the new
3927 * target buffer.
3928 */
3929static void ipw_rx_queue_restock(struct ipw_priv *priv)
3930{
3931 struct ipw_rx_queue *rxq = priv->rxq;
3932 struct list_head *element;
3933 struct ipw_rx_mem_buffer *rxb;
3934 unsigned long flags;
3935 int write;
3936
3937 spin_lock_irqsave(&rxq->lock, flags);
3938 write = rxq->write;
3939 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
3940 element = rxq->rx_free.next;
3941 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3942 list_del(element);
3943
3944 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
3945 rxb->dma_addr);
3946 rxq->queue[rxq->write] = rxb;
3947 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
3948 rxq->free_count--;
3949 }
3950 spin_unlock_irqrestore(&rxq->lock, flags);
3951
3952 /* If the pre-allocated buffer pool is dropping low, schedule to
3953 * refill it */
3954 if (rxq->free_count <= RX_LOW_WATERMARK)
3955 queue_work(priv->workqueue, &priv->rx_replenish);
3956
3957 /* If we've added more space for the firmware to place data, tell it */
3958 if (write != rxq->write)
3959 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
3960}
3961
3962/*
3963 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
3964 * Also restock the Rx queue via ipw_rx_queue_restock.
3965 *
3966 * This is called as a scheduled work item (except for during intialization)
3967 */
3968static void ipw_rx_queue_replenish(void *data)
3969{
3970 struct ipw_priv *priv = data;
3971 struct ipw_rx_queue *rxq = priv->rxq;
3972 struct list_head *element;
3973 struct ipw_rx_mem_buffer *rxb;
3974 unsigned long flags;
3975
3976 spin_lock_irqsave(&rxq->lock, flags);
3977 while (!list_empty(&rxq->rx_used)) {
3978 element = rxq->rx_used.next;
3979 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3980 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
3981 if (!rxb->skb) {
3982 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
3983 priv->net_dev->name);
3984 /* We don't reschedule replenish work here -- we will
3985 * call the restock method and if it still needs
3986 * more buffers it will schedule replenish */
3987 break;
3988 }
3989 list_del(element);
3990
3991 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3992 rxb->dma_addr =
3993 pci_map_single(priv->pci_dev, rxb->skb->data,
3994 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3995
3996 list_add_tail(&rxb->list, &rxq->rx_free);
3997 rxq->free_count++;
3998 }
3999 spin_unlock_irqrestore(&rxq->lock, flags);
4000
4001 ipw_rx_queue_restock(priv);
4002}
4003
4004/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4005 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4006 * This free routine walks the list of POOL entries and if SKB is set to
4007 * non NULL it is unmapped and freed
4008 */
4009static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4010{
4011 int i;
4012
4013 if (!rxq)
4014 return;
4015
4016 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4017 if (rxq->pool[i].skb != NULL) {
4018 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4019 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4020 dev_kfree_skb(rxq->pool[i].skb);
4021 }
4022 }
4023
4024 kfree(rxq);
4025}
4026
4027static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4028{
4029 struct ipw_rx_queue *rxq;
4030 int i;
4031
4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4033 memset(rxq, 0, sizeof(*rxq));
4034 spin_lock_init(&rxq->lock);
4035 INIT_LIST_HEAD(&rxq->rx_free);
4036 INIT_LIST_HEAD(&rxq->rx_used);
4037
4038 /* Fill the rx_used queue with _all_ of the Rx buffers */
4039 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4040 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4041
4042 /* Set us so that we have processed and used all buffers, but have
4043 * not restocked the Rx queue with fresh buffers */
4044 rxq->read = rxq->write = 0;
4045 rxq->processed = RX_QUEUE_SIZE - 1;
4046 rxq->free_count = 0;
4047
4048 return rxq;
4049}
4050
4051static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4052{
4053 rate &= ~IEEE80211_BASIC_RATE_MASK;
4054 if (ieee_mode == IEEE_A) {
4055 switch (rate) {
4056 case IEEE80211_OFDM_RATE_6MB:
4057 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4058 1 : 0;
4059 case IEEE80211_OFDM_RATE_9MB:
4060 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4061 1 : 0;
4062 case IEEE80211_OFDM_RATE_12MB:
4063 return priv->
4064 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4065 case IEEE80211_OFDM_RATE_18MB:
4066 return priv->
4067 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4068 case IEEE80211_OFDM_RATE_24MB:
4069 return priv->
4070 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4071 case IEEE80211_OFDM_RATE_36MB:
4072 return priv->
4073 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4074 case IEEE80211_OFDM_RATE_48MB:
4075 return priv->
4076 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4077 case IEEE80211_OFDM_RATE_54MB:
4078 return priv->
4079 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4080 default:
4081 return 0;
4082 }
4083 }
4084
4085 /* B and G mixed */
4086 switch (rate) {
4087 case IEEE80211_CCK_RATE_1MB:
4088 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4089 case IEEE80211_CCK_RATE_2MB:
4090 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4091 case IEEE80211_CCK_RATE_5MB:
4092 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4093 case IEEE80211_CCK_RATE_11MB:
4094 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4095 }
4096
4097 /* If we are limited to B modulations, bail at this point */
4098 if (ieee_mode == IEEE_B)
4099 return 0;
4100
4101 /* G */
4102 switch (rate) {
4103 case IEEE80211_OFDM_RATE_6MB:
4104 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4105 case IEEE80211_OFDM_RATE_9MB:
4106 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4107 case IEEE80211_OFDM_RATE_12MB:
4108 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4109 case IEEE80211_OFDM_RATE_18MB:
4110 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4111 case IEEE80211_OFDM_RATE_24MB:
4112 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4113 case IEEE80211_OFDM_RATE_36MB:
4114 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4115 case IEEE80211_OFDM_RATE_48MB:
4116 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4117 case IEEE80211_OFDM_RATE_54MB:
4118 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4119 }
4120
4121 return 0;
4122}
4123
4124static int ipw_compatible_rates(struct ipw_priv *priv,
4125 const struct ieee80211_network *network,
4126 struct ipw_supported_rates *rates)
4127{
4128 int num_rates, i;
4129
4130 memset(rates, 0, sizeof(*rates));
4131 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
4132 rates->num_rates = 0;
4133 for (i = 0; i < num_rates; i++) {
4134 if (!ipw_is_rate_in_mask
4135 (priv, network->mode, network->rates[i])) {
4136 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4137 network->rates[i], priv->rates_mask);
4138 continue;
4139 }
4140
4141 rates->supported_rates[rates->num_rates++] = network->rates[i];
4142 }
4143
4144 num_rates =
4145 min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
4146 for (i = 0; i < num_rates; i++) {
4147 if (!ipw_is_rate_in_mask
4148 (priv, network->mode, network->rates_ex[i])) {
4149 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4150 network->rates_ex[i], priv->rates_mask);
4151 continue;
4152 }
4153
4154 rates->supported_rates[rates->num_rates++] =
4155 network->rates_ex[i];
4156 }
4157
4158 return rates->num_rates;
4159}
4160
4161static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4162 const struct ipw_supported_rates *src)
4163{
4164 u8 i;
4165 for (i = 0; i < src->num_rates; i++)
4166 dest->supported_rates[i] = src->supported_rates[i];
4167 dest->num_rates = src->num_rates;
4168}
4169
4170/* TODO: Look at sniffed packets in the air to determine if the basic rate
4171 * mask should ever be used -- right now all callers to add the scan rates are
4172 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4173static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4174 u8 modulation, u32 rate_mask)
4175{
4176 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4177 IEEE80211_BASIC_RATE_MASK : 0;
4178
4179 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4180 rates->supported_rates[rates->num_rates++] =
4181 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4182
4183 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4184 rates->supported_rates[rates->num_rates++] =
4185 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4186
4187 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4188 rates->supported_rates[rates->num_rates++] = basic_mask |
4189 IEEE80211_CCK_RATE_5MB;
4190
4191 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4192 rates->supported_rates[rates->num_rates++] = basic_mask |
4193 IEEE80211_CCK_RATE_11MB;
4194}
4195
4196static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4197 u8 modulation, u32 rate_mask)
4198{
4199 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4200 IEEE80211_BASIC_RATE_MASK : 0;
4201
4202 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4203 rates->supported_rates[rates->num_rates++] = basic_mask |
4204 IEEE80211_OFDM_RATE_6MB;
4205
4206 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4207 rates->supported_rates[rates->num_rates++] =
4208 IEEE80211_OFDM_RATE_9MB;
4209
4210 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4211 rates->supported_rates[rates->num_rates++] = basic_mask |
4212 IEEE80211_OFDM_RATE_12MB;
4213
4214 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4215 rates->supported_rates[rates->num_rates++] =
4216 IEEE80211_OFDM_RATE_18MB;
4217
4218 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4219 rates->supported_rates[rates->num_rates++] = basic_mask |
4220 IEEE80211_OFDM_RATE_24MB;
4221
4222 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4223 rates->supported_rates[rates->num_rates++] =
4224 IEEE80211_OFDM_RATE_36MB;
4225
4226 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4227 rates->supported_rates[rates->num_rates++] =
4228 IEEE80211_OFDM_RATE_48MB;
4229
4230 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4231 rates->supported_rates[rates->num_rates++] =
4232 IEEE80211_OFDM_RATE_54MB;
4233}
4234
4235struct ipw_network_match {
4236 struct ieee80211_network *network;
4237 struct ipw_supported_rates rates;
4238};
4239
4240static int ipw_best_network(struct ipw_priv *priv,
4241 struct ipw_network_match *match,
4242 struct ieee80211_network *network, int roaming)
4243{
4244 struct ipw_supported_rates rates;
4245
4246 /* Verify that this network's capability is compatible with the
4247 * current mode (AdHoc or Infrastructure) */
4248 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4249 !(network->capability & WLAN_CAPABILITY_ESS)) ||
4250 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4251 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4252 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4253 "capability mismatch.\n",
4254 escape_essid(network->ssid, network->ssid_len),
4255 MAC_ARG(network->bssid));
4256 return 0;
4257 }
4258
4259 /* If we do not have an ESSID for this AP, we can not associate with
4260 * it */
4261 if (network->flags & NETWORK_EMPTY_ESSID) {
4262 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4263 "because of hidden ESSID.\n",
4264 escape_essid(network->ssid, network->ssid_len),
4265 MAC_ARG(network->bssid));
4266 return 0;
4267 }
4268
4269 if (unlikely(roaming)) {
4270 /* If we are roaming, then ensure check if this is a valid
4271 * network to try and roam to */
4272 if ((network->ssid_len != match->network->ssid_len) ||
4273 memcmp(network->ssid, match->network->ssid,
4274 network->ssid_len)) {
4275 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4276 "because of non-network ESSID.\n",
4277 escape_essid(network->ssid,
4278 network->ssid_len),
4279 MAC_ARG(network->bssid));
4280 return 0;
4281 }
4282 } else {
4283 /* If an ESSID has been configured then compare the broadcast
4284 * ESSID to ours */
4285 if ((priv->config & CFG_STATIC_ESSID) &&
4286 ((network->ssid_len != priv->essid_len) ||
4287 memcmp(network->ssid, priv->essid,
4288 min(network->ssid_len, priv->essid_len)))) {
4289 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4290 strncpy(escaped,
4291 escape_essid(network->ssid, network->ssid_len),
4292 sizeof(escaped));
4293 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4294 "because of ESSID mismatch: '%s'.\n",
4295 escaped, MAC_ARG(network->bssid),
4296 escape_essid(priv->essid,
4297 priv->essid_len));
4298 return 0;
4299 }
4300 }
4301
4302 /* If the old network rate is better than this one, don't bother
4303 * testing everything else. */
4304 if (match->network && match->network->stats.rssi > network->stats.rssi) {
4305 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4306 strncpy(escaped,
4307 escape_essid(network->ssid, network->ssid_len),
4308 sizeof(escaped));
4309 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4310 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4311 escaped, MAC_ARG(network->bssid),
4312 escape_essid(match->network->ssid,
4313 match->network->ssid_len),
4314 MAC_ARG(match->network->bssid));
4315 return 0;
4316 }
4317
4318 /* If this network has already had an association attempt within the
4319 * last 3 seconds, do not try and associate again... */
4320 if (network->last_associate &&
4321 time_after(network->last_associate + (HZ * 5UL), jiffies)) {
4322 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4323 "because of storming (%lu since last "
4324 "assoc attempt).\n",
4325 escape_essid(network->ssid, network->ssid_len),
4326 MAC_ARG(network->bssid),
4327 (jiffies - network->last_associate) / HZ);
4328 return 0;
4329 }
4330
4331 /* Now go through and see if the requested network is valid... */
4332 if (priv->ieee->scan_age != 0 &&
4333 jiffies - network->last_scanned > priv->ieee->scan_age) {
4334 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4335 "because of age: %lums.\n",
4336 escape_essid(network->ssid, network->ssid_len),
4337 MAC_ARG(network->bssid),
4338 (jiffies - network->last_scanned) / (HZ / 100));
4339 return 0;
4340 }
4341
4342 if ((priv->config & CFG_STATIC_CHANNEL) &&
4343 (network->channel != priv->channel)) {
4344 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4345 "because of channel mismatch: %d != %d.\n",
4346 escape_essid(network->ssid, network->ssid_len),
4347 MAC_ARG(network->bssid),
4348 network->channel, priv->channel);
4349 return 0;
4350 }
4351
4352 /* Verify privacy compatability */
4353 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4354 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4355 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4356 "because of privacy mismatch: %s != %s.\n",
4357 escape_essid(network->ssid, network->ssid_len),
4358 MAC_ARG(network->bssid),
4359 priv->capability & CAP_PRIVACY_ON ? "on" :
4360 "off",
4361 network->capability &
4362 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
4363 return 0;
4364 }
4365
4366 if ((priv->config & CFG_STATIC_BSSID) &&
4367 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4368 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4369 "because of BSSID mismatch: " MAC_FMT ".\n",
4370 escape_essid(network->ssid, network->ssid_len),
4371 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
4372 return 0;
4373 }
4374
4375 /* Filter out any incompatible freq / mode combinations */
4376 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4377 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4378 "because of invalid frequency/mode "
4379 "combination.\n",
4380 escape_essid(network->ssid, network->ssid_len),
4381 MAC_ARG(network->bssid));
4382 return 0;
4383 }
4384
4385 ipw_compatible_rates(priv, network, &rates);
4386 if (rates.num_rates == 0) {
4387 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4388 "because of no compatible rates.\n",
4389 escape_essid(network->ssid, network->ssid_len),
4390 MAC_ARG(network->bssid));
4391 return 0;
4392 }
4393
4394 /* TODO: Perform any further minimal comparititive tests. We do not
4395 * want to put too much policy logic here; intelligent scan selection
4396 * should occur within a generic IEEE 802.11 user space tool. */
4397
4398 /* Set up 'new' AP to this network */
4399 ipw_copy_rates(&match->rates, &rates);
4400 match->network = network;
4401
4402 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4403 escape_essid(network->ssid, network->ssid_len),
4404 MAC_ARG(network->bssid));
4405
4406 return 1;
4407}
4408
4409static void ipw_adhoc_create(struct ipw_priv *priv,
4410 struct ieee80211_network *network)
4411{
4412 /*
4413 * For the purposes of scanning, we can set our wireless mode
4414 * to trigger scans across combinations of bands, but when it
4415 * comes to creating a new ad-hoc network, we have tell the FW
4416 * exactly which band to use.
4417 *
4418 * We also have the possibility of an invalid channel for the
4419 * chossen band. Attempting to create a new ad-hoc network
4420 * with an invalid channel for wireless mode will trigger a
4421 * FW fatal error.
4422 */
4423 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4424 if (network->mode) {
4425 network->channel = priv->channel;
4426 } else {
4427 IPW_WARNING("Overriding invalid channel\n");
4428 if (priv->ieee->mode & IEEE_A) {
4429 network->mode = IEEE_A;
4430 priv->channel = band_a_active_channel[0];
4431 } else if (priv->ieee->mode & IEEE_G) {
4432 network->mode = IEEE_G;
4433 priv->channel = band_b_active_channel[0];
4434 } else {
4435 network->mode = IEEE_B;
4436 priv->channel = band_b_active_channel[0];
4437 }
4438 }
4439
4440 network->channel = priv->channel;
4441 priv->config |= CFG_ADHOC_PERSIST;
4442 ipw_create_bssid(priv, network->bssid);
4443 network->ssid_len = priv->essid_len;
4444 memcpy(network->ssid, priv->essid, priv->essid_len);
4445 memset(&network->stats, 0, sizeof(network->stats));
4446 network->capability = WLAN_CAPABILITY_IBSS;
4447 if (priv->capability & CAP_PRIVACY_ON)
4448 network->capability |= WLAN_CAPABILITY_PRIVACY;
4449 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4450 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
4451 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4452 memcpy(network->rates_ex,
4453 &priv->rates.supported_rates[network->rates_len],
4454 network->rates_ex_len);
4455 network->last_scanned = 0;
4456 network->flags = 0;
4457 network->last_associate = 0;
4458 network->time_stamp[0] = 0;
4459 network->time_stamp[1] = 0;
4460 network->beacon_interval = 100; /* Default */
4461 network->listen_interval = 10; /* Default */
4462 network->atim_window = 0; /* Default */
4463#ifdef CONFIG_IEEE80211_WPA
4464 network->wpa_ie_len = 0;
4465 network->rsn_ie_len = 0;
4466#endif /* CONFIG_IEEE80211_WPA */
4467}
4468
4469static void ipw_send_wep_keys(struct ipw_priv *priv)
4470{
4471 struct ipw_wep_key *key;
4472 int i;
4473 struct host_cmd cmd = {
4474 .cmd = IPW_CMD_WEP_KEY,
4475 .len = sizeof(*key)
4476 };
4477
4478 key = (struct ipw_wep_key *)&cmd.param;
4479 key->cmd_id = DINO_CMD_WEP_KEY;
4480 key->seq_num = 0;
4481
4482 for (i = 0; i < 4; i++) {
4483 key->key_index = i;
4484 if (!(priv->sec.flags & (1 << i))) {
4485 key->key_size = 0;
4486 } else {
4487 key->key_size = priv->sec.key_sizes[i];
4488 memcpy(key->key, priv->sec.keys[i], key->key_size);
4489 }
4490
4491 if (ipw_send_cmd(priv, &cmd)) {
4492 IPW_ERROR("failed to send WEP_KEY command\n");
4493 return;
4494 }
4495 }
4496}
4497
4498static void ipw_adhoc_check(void *data)
4499{
4500 struct ipw_priv *priv = data;
4501
4502 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4503 !(priv->config & CFG_ADHOC_PERSIST)) {
4504 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4505 ipw_remove_current_network(priv);
4506 ipw_disassociate(priv);
4507 return;
4508 }
4509
4510 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4511 priv->assoc_request.beacon_interval);
4512}
4513
4514#ifdef CONFIG_IPW_DEBUG
4515static void ipw_debug_config(struct ipw_priv *priv)
4516{
4517 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4518 "[CFG 0x%08X]\n", priv->config);
4519 if (priv->config & CFG_STATIC_CHANNEL)
4520 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
4521 else
4522 IPW_DEBUG_INFO("Channel unlocked.\n");
4523 if (priv->config & CFG_STATIC_ESSID)
4524 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4525 escape_essid(priv->essid, priv->essid_len));
4526 else
4527 IPW_DEBUG_INFO("ESSID unlocked.\n");
4528 if (priv->config & CFG_STATIC_BSSID)
4529 IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
4530 else
4531 IPW_DEBUG_INFO("BSSID unlocked.\n");
4532 if (priv->capability & CAP_PRIVACY_ON)
4533 IPW_DEBUG_INFO("PRIVACY on\n");
4534 else
4535 IPW_DEBUG_INFO("PRIVACY off\n");
4536 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4537}
4538#else
4539#define ipw_debug_config(x) do {} while (0)
4540#endif
4541
4542static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4543 struct ieee80211_network *network)
4544{
4545 /* TODO: Verify that this works... */
4546 struct ipw_fixed_rate fr = {
4547 .tx_rates = priv->rates_mask
4548 };
4549 u32 reg;
4550 u16 mask = 0;
4551
4552 /* Identify 'current FW band' and match it with the fixed
4553 * Tx rates */
4554
4555 switch (priv->ieee->freq_band) {
4556 case IEEE80211_52GHZ_BAND: /* A only */
4557 /* IEEE_A */
4558 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4559 /* Invalid fixed rate mask */
4560 fr.tx_rates = 0;
4561 break;
4562 }
4563
4564 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4565 break;
4566
4567 default: /* 2.4Ghz or Mixed */
4568 /* IEEE_B */
4569 if (network->mode == IEEE_B) {
4570 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4571 /* Invalid fixed rate mask */
4572 fr.tx_rates = 0;
4573 }
4574 break;
4575 }
4576
4577 /* IEEE_G */
4578 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4579 IEEE80211_OFDM_RATES_MASK)) {
4580 /* Invalid fixed rate mask */
4581 fr.tx_rates = 0;
4582 break;
4583 }
4584
4585 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4586 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4587 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4588 }
4589
4590 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4591 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4592 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4593 }
4594
4595 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4596 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4597 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4598 }
4599
4600 fr.tx_rates |= mask;
4601 break;
4602 }
4603
4604 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4605 ipw_write_reg32(priv, reg, *(u32 *) & fr);
4606}
4607
4608static int ipw_associate_network(struct ipw_priv *priv,
4609 struct ieee80211_network *network,
4610 struct ipw_supported_rates *rates, int roaming)
4611{
4612 int err;
4613
4614 if (priv->config & CFG_FIXED_RATE)
4615 ipw_set_fixed_rate(priv, network);
4616
4617 if (!(priv->config & CFG_STATIC_ESSID)) {
4618 priv->essid_len = min(network->ssid_len,
4619 (u8) IW_ESSID_MAX_SIZE);
4620 memcpy(priv->essid, network->ssid, priv->essid_len);
4621 }
4622
4623 network->last_associate = jiffies;
4624
4625 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
4626 priv->assoc_request.channel = network->channel;
4627 if ((priv->capability & CAP_PRIVACY_ON) &&
4628 (priv->capability & CAP_SHARED_KEY)) {
4629 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
4630 priv->assoc_request.auth_key = priv->sec.active_key;
4631 } else {
4632 priv->assoc_request.auth_type = AUTH_OPEN;
4633 priv->assoc_request.auth_key = 0;
4634 }
4635
4636 if (priv->capability & CAP_PRIVACY_ON)
4637 ipw_send_wep_keys(priv);
4638
4639 /*
4640 * It is valid for our ieee device to support multiple modes, but
4641 * when it comes to associating to a given network we have to choose
4642 * just one mode.
4643 */
4644 if (network->mode & priv->ieee->mode & IEEE_A)
4645 priv->assoc_request.ieee_mode = IPW_A_MODE;
4646 else if (network->mode & priv->ieee->mode & IEEE_G)
4647 priv->assoc_request.ieee_mode = IPW_G_MODE;
4648 else if (network->mode & priv->ieee->mode & IEEE_B)
4649 priv->assoc_request.ieee_mode = IPW_B_MODE;
4650
4651 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
4652 "802.11%c [%d], enc=%s%s%s%c%c\n",
4653 roaming ? "Rea" : "A",
4654 escape_essid(priv->essid, priv->essid_len),
4655 network->channel,
4656 ipw_modes[priv->assoc_request.ieee_mode],
4657 rates->num_rates,
4658 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
4659 priv->capability & CAP_PRIVACY_ON ?
4660 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
4661 "(open)") : "",
4662 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4663 priv->capability & CAP_PRIVACY_ON ?
4664 '1' + priv->sec.active_key : '.',
4665 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
4666
4667 priv->assoc_request.beacon_interval = network->beacon_interval;
4668 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4669 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
4670 priv->assoc_request.assoc_type = HC_IBSS_START;
4671 priv->assoc_request.assoc_tsf_msw = 0;
4672 priv->assoc_request.assoc_tsf_lsw = 0;
4673 } else {
4674 if (unlikely(roaming))
4675 priv->assoc_request.assoc_type = HC_REASSOCIATE;
4676 else
4677 priv->assoc_request.assoc_type = HC_ASSOCIATE;
4678 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
4679 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
4680 }
4681
4682 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
4683
4684 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
4685 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4686 priv->assoc_request.atim_window = network->atim_window;
4687 } else {
4688 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
4689 priv->assoc_request.atim_window = 0;
4690 }
4691
4692 priv->assoc_request.capability = network->capability;
4693 priv->assoc_request.listen_interval = network->listen_interval;
4694
4695 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4696 if (err) {
4697 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
4698 return err;
4699 }
4700
4701 rates->ieee_mode = priv->assoc_request.ieee_mode;
4702 rates->purpose = IPW_RATE_CONNECT;
4703 ipw_send_supported_rates(priv, rates);
4704
4705 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
4706 priv->sys_config.dot11g_auto_detection = 1;
4707 else
4708 priv->sys_config.dot11g_auto_detection = 0;
4709 err = ipw_send_system_config(priv, &priv->sys_config);
4710 if (err) {
4711 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
4712 return err;
4713 }
4714
4715 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
4716 err = ipw_set_sensitivity(priv, network->stats.rssi);
4717 if (err) {
4718 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4719 return err;
4720 }
4721
4722 /*
4723 * If preemption is enabled, it is possible for the association
4724 * to complete before we return from ipw_send_associate. Therefore
4725 * we have to be sure and update our priviate data first.
4726 */
4727 priv->channel = network->channel;
4728 memcpy(priv->bssid, network->bssid, ETH_ALEN);
4729 priv->status |= STATUS_ASSOCIATING;
4730 priv->status &= ~STATUS_SECURITY_UPDATED;
4731
4732 priv->assoc_network = network;
4733
4734 err = ipw_send_associate(priv, &priv->assoc_request);
4735 if (err) {
4736 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4737 return err;
4738 }
4739
4740 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
4741 escape_essid(priv->essid, priv->essid_len),
4742 MAC_ARG(priv->bssid));
4743
4744 return 0;
4745}
4746
4747static void ipw_roam(void *data)
4748{
4749 struct ipw_priv *priv = data;
4750 struct ieee80211_network *network = NULL;
4751 struct ipw_network_match match = {
4752 .network = priv->assoc_network
4753 };
4754
4755 /* The roaming process is as follows:
4756 *
4757 * 1. Missed beacon threshold triggers the roaming process by
4758 * setting the status ROAM bit and requesting a scan.
4759 * 2. When the scan completes, it schedules the ROAM work
4760 * 3. The ROAM work looks at all of the known networks for one that
4761 * is a better network than the currently associated. If none
4762 * found, the ROAM process is over (ROAM bit cleared)
4763 * 4. If a better network is found, a disassociation request is
4764 * sent.
4765 * 5. When the disassociation completes, the roam work is again
4766 * scheduled. The second time through, the driver is no longer
4767 * associated, and the newly selected network is sent an
4768 * association request.
4769 * 6. At this point ,the roaming process is complete and the ROAM
4770 * status bit is cleared.
4771 */
4772
4773 /* If we are no longer associated, and the roaming bit is no longer
4774 * set, then we are not actively roaming, so just return */
4775 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
4776 return;
4777
4778 if (priv->status & STATUS_ASSOCIATED) {
4779 /* First pass through ROAM process -- look for a better
4780 * network */
4781 u8 rssi = priv->assoc_network->stats.rssi;
4782 priv->assoc_network->stats.rssi = -128;
4783 list_for_each_entry(network, &priv->ieee->network_list, list) {
4784 if (network != priv->assoc_network)
4785 ipw_best_network(priv, &match, network, 1);
4786 }
4787 priv->assoc_network->stats.rssi = rssi;
4788
4789 if (match.network == priv->assoc_network) {
4790 IPW_DEBUG_ASSOC("No better APs in this network to "
4791 "roam to.\n");
4792 priv->status &= ~STATUS_ROAMING;
4793 ipw_debug_config(priv);
4794 return;
4795 }
4796
4797 ipw_send_disassociate(priv, 1);
4798 priv->assoc_network = match.network;
4799
4800 return;
4801 }
4802
4803 /* Second pass through ROAM process -- request association */
4804 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
4805 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
4806 priv->status &= ~STATUS_ROAMING;
4807}
4808
4809static void ipw_associate(void *data)
4810{
4811 struct ipw_priv *priv = data;
4812
4813 struct ieee80211_network *network = NULL;
4814 struct ipw_network_match match = {
4815 .network = NULL
4816 };
4817 struct ipw_supported_rates *rates;
4818 struct list_head *element;
4819
4820 if (!(priv->config & CFG_ASSOCIATE) &&
4821 !(priv->config & (CFG_STATIC_ESSID |
4822 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
4823 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4824 return;
4825 }
4826
4827 list_for_each_entry(network, &priv->ieee->network_list, list)
4828 ipw_best_network(priv, &match, network, 0);
4829
4830 network = match.network;
4831 rates = &match.rates;
4832
4833 if (network == NULL &&
4834 priv->ieee->iw_mode == IW_MODE_ADHOC &&
4835 priv->config & CFG_ADHOC_CREATE &&
4836 priv->config & CFG_STATIC_ESSID &&
4837 !list_empty(&priv->ieee->network_free_list)) {
4838 element = priv->ieee->network_free_list.next;
4839 network = list_entry(element, struct ieee80211_network, list);
4840 ipw_adhoc_create(priv, network);
4841 rates = &priv->rates;
4842 list_del(element);
4843 list_add_tail(&network->list, &priv->ieee->network_list);
4844 }
4845
4846 /* If we reached the end of the list, then we don't have any valid
4847 * matching APs */
4848 if (!network) {
4849 ipw_debug_config(priv);
4850
4851 queue_delayed_work(priv->workqueue, &priv->request_scan,
4852 SCAN_INTERVAL);
4853
4854 return;
4855 }
4856
4857 ipw_associate_network(priv, network, rates, 0);
4858}
4859
4860static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4861 struct ipw_rx_mem_buffer *rxb,
4862 struct ieee80211_rx_stats *stats)
4863{
4864 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4865
4866 /* We received data from the HW, so stop the watchdog */
4867 priv->net_dev->trans_start = jiffies;
4868
4869 /* We only process data packets if the
4870 * interface is open */
4871 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
4872 skb_tailroom(rxb->skb))) {
4873 priv->ieee->stats.rx_errors++;
4874 priv->wstats.discard.misc++;
4875 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
4876 return;
4877 } else if (unlikely(!netif_running(priv->net_dev))) {
4878 priv->ieee->stats.rx_dropped++;
4879 priv->wstats.discard.misc++;
4880 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
4881 return;
4882 }
4883
4884 /* Advance skb->data to the start of the actual payload */
4885 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
4886
4887 /* Set the size of the skb to the size of the frame */
4888 skb_put(rxb->skb, pkt->u.frame.length);
4889
4890 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
4891
4892 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4893 priv->ieee->stats.rx_errors++;
4894 else /* ieee80211_rx succeeded, so it now owns the SKB */
4895 rxb->skb = NULL;
4896}
4897
4898/*
4899 * Main entry function for recieving a packet with 80211 headers. This
4900 * should be called when ever the FW has notified us that there is a new
4901 * skb in the recieve queue.
4902 */
4903static void ipw_rx(struct ipw_priv *priv)
4904{
4905 struct ipw_rx_mem_buffer *rxb;
4906 struct ipw_rx_packet *pkt;
4907 struct ieee80211_hdr *header;
4908 u32 r, w, i;
4909 u8 network_packet;
4910
4911 r = ipw_read32(priv, CX2_RX_READ_INDEX);
4912 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
4913 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
4914
4915 while (i != r) {
4916 rxb = priv->rxq->queue[i];
4917#ifdef CONFIG_IPW_DEBUG
4918 if (unlikely(rxb == NULL)) {
4919 printk(KERN_CRIT "Queue not allocated!\n");
4920 break;
4921 }
4922#endif
4923 priv->rxq->queue[i] = NULL;
4924
4925 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4926 CX2_RX_BUF_SIZE,
4927 PCI_DMA_FROMDEVICE);
4928
4929 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4930 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4931 pkt->header.message_type,
4932 pkt->header.rx_seq_num, pkt->header.control_bits);
4933
4934 switch (pkt->header.message_type) {
4935 case RX_FRAME_TYPE: /* 802.11 frame */ {
4936 struct ieee80211_rx_stats stats = {
4937 .rssi = pkt->u.frame.rssi_dbm -
4938 IPW_RSSI_TO_DBM,
4939 .signal = pkt->u.frame.signal,
4940 .rate = pkt->u.frame.rate,
4941 .mac_time = jiffies,
4942 .received_channel =
4943 pkt->u.frame.received_channel,
4944 .freq =
4945 (pkt->u.frame.
4946 control & (1 << 0)) ?
4947 IEEE80211_24GHZ_BAND :
4948 IEEE80211_52GHZ_BAND,
4949 .len = pkt->u.frame.length,
4950 };
4951
4952 if (stats.rssi != 0)
4953 stats.mask |= IEEE80211_STATMASK_RSSI;
4954 if (stats.signal != 0)
4955 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4956 if (stats.rate != 0)
4957 stats.mask |= IEEE80211_STATMASK_RATE;
4958
4959 priv->rx_packets++;
4960
4961#ifdef CONFIG_IPW_PROMISC
4962 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4963 ipw_handle_data_packet(priv, rxb,
4964 &stats);
4965 break;
4966 }
4967#endif
4968
4969 header =
4970 (struct ieee80211_hdr *)(rxb->skb->data +
4971 IPW_RX_FRAME_SIZE);
4972 /* TODO: Check Ad-Hoc dest/source and make sure
4973 * that we are actually parsing these packets
4974 * correctly -- we should probably use the
4975 * frame control of the packet and disregard
4976 * the current iw_mode */
4977 switch (priv->ieee->iw_mode) {
4978 case IW_MODE_ADHOC:
4979 network_packet =
4980 !memcmp(header->addr1,
4981 priv->net_dev->dev_addr,
4982 ETH_ALEN) ||
4983 !memcmp(header->addr3,
4984 priv->bssid, ETH_ALEN) ||
4985 is_broadcast_ether_addr(header->
4986 addr1)
4987 || is_multicast_ether_addr(header->
4988 addr1);
4989 break;
4990
4991 case IW_MODE_INFRA:
4992 default:
4993 network_packet =
4994 !memcmp(header->addr3,
4995 priv->bssid, ETH_ALEN) ||
4996 !memcmp(header->addr1,
4997 priv->net_dev->dev_addr,
4998 ETH_ALEN) ||
4999 is_broadcast_ether_addr(header->
5000 addr1)
5001 || is_multicast_ether_addr(header->
5002 addr1);
5003 break;
5004 }
5005
5006 if (network_packet && priv->assoc_network) {
5007 priv->assoc_network->stats.rssi =
5008 stats.rssi;
5009 average_add(&priv->average_rssi,
5010 stats.rssi);
5011 priv->last_rx_rssi = stats.rssi;
5012 }
5013
5014 IPW_DEBUG_RX("Frame: len=%u\n",
5015 pkt->u.frame.length);
5016
5017 if (pkt->u.frame.length < frame_hdr_len(header)) {
5018 IPW_DEBUG_DROP
5019 ("Received packet is too small. "
5020 "Dropping.\n");
5021 priv->ieee->stats.rx_errors++;
5022 priv->wstats.discard.misc++;
5023 break;
5024 }
5025
5026 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
5027 case IEEE80211_FTYPE_MGMT:
5028 ieee80211_rx_mgt(priv->ieee, header,
5029 &stats);
5030 if (priv->ieee->iw_mode == IW_MODE_ADHOC
5031 &&
5032 ((WLAN_FC_GET_STYPE
5033 (header->frame_ctl) ==
5034 IEEE80211_STYPE_PROBE_RESP)
5035 ||
5036 (WLAN_FC_GET_STYPE
5037 (header->frame_ctl) ==
5038 IEEE80211_STYPE_BEACON))
5039 && !memcmp(header->addr3,
5040 priv->bssid, ETH_ALEN))
5041 ipw_add_station(priv,
5042 header->addr2);
5043 break;
5044
5045 case IEEE80211_FTYPE_CTL:
5046 break;
5047
5048 case IEEE80211_FTYPE_DATA:
5049 if (network_packet)
5050 ipw_handle_data_packet(priv,
5051 rxb,
5052 &stats);
5053 else
5054 IPW_DEBUG_DROP("Dropping: "
5055 MAC_FMT ", "
5056 MAC_FMT ", "
5057 MAC_FMT "\n",
5058 MAC_ARG(header->
5059 addr1),
5060 MAC_ARG(header->
5061 addr2),
5062 MAC_ARG(header->
5063 addr3));
5064 break;
5065 }
5066 break;
5067 }
5068
5069 case RX_HOST_NOTIFICATION_TYPE:{
5070 IPW_DEBUG_RX
5071 ("Notification: subtype=%02X flags=%02X size=%d\n",
5072 pkt->u.notification.subtype,
5073 pkt->u.notification.flags,
5074 pkt->u.notification.size);
5075 ipw_rx_notification(priv, &pkt->u.notification);
5076 break;
5077 }
5078
5079 default:
5080 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
5081 pkt->header.message_type);
5082 break;
5083 }
5084
5085 /* For now we just don't re-use anything. We can tweak this
5086 * later to try and re-use notification packets and SKBs that
5087 * fail to Rx correctly */
5088 if (rxb->skb != NULL) {
5089 dev_kfree_skb_any(rxb->skb);
5090 rxb->skb = NULL;
5091 }
5092
5093 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5094 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5095 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5096
5097 i = (i + 1) % RX_QUEUE_SIZE;
5098 }
5099
5100 /* Backtrack one entry */
5101 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5102
5103 ipw_rx_queue_restock(priv);
5104}
5105
5106static void ipw_abort_scan(struct ipw_priv *priv)
5107{
5108 int err;
5109
5110 if (priv->status & STATUS_SCAN_ABORTING) {
5111 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5112 return;
5113 }
5114 priv->status |= STATUS_SCAN_ABORTING;
5115
5116 err = ipw_send_scan_abort(priv);
5117 if (err)
5118 IPW_DEBUG_HC("Request to abort scan failed.\n");
5119}
5120
5121static int ipw_request_scan(struct ipw_priv *priv)
5122{
5123 struct ipw_scan_request_ext scan;
5124 int channel_index = 0;
5125 int i, err, scan_type;
5126
5127 if (priv->status & STATUS_EXIT_PENDING) {
5128 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5129 priv->status |= STATUS_SCAN_PENDING;
5130 return 0;
5131 }
5132
5133 if (priv->status & STATUS_SCANNING) {
5134 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
5135 priv->status |= STATUS_SCAN_PENDING;
5136 ipw_abort_scan(priv);
5137 return 0;
5138 }
5139
5140 if (priv->status & STATUS_SCAN_ABORTING) {
5141 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5142 priv->status |= STATUS_SCAN_PENDING;
5143 return 0;
5144 }
5145
5146 if (priv->status & STATUS_RF_KILL_MASK) {
5147 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5148 priv->status |= STATUS_SCAN_PENDING;
5149 return 0;
5150 }
5151
5152 memset(&scan, 0, sizeof(scan));
5153
5154 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
5155 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
5156 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
5157
5158 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
5159 /* If we are roaming, then make this a directed scan for the current
5160 * network. Otherwise, ensure that every other scan is a fast
5161 * channel hop scan */
5162 if ((priv->status & STATUS_ROAMING)
5163 || (!(priv->status & STATUS_ASSOCIATED)
5164 && (priv->config & CFG_STATIC_ESSID)
5165 && (scan.full_scan_index % 2))) {
5166 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5167 if (err) {
5168 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5169 return err;
5170 }
5171
5172 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5173 } else {
5174 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5175 }
5176
5177 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5178 int start = channel_index;
5179 for (i = 0; i < MAX_A_CHANNELS; i++) {
5180 if (band_a_active_channel[i] == 0)
5181 break;
5182 if ((priv->status & STATUS_ASSOCIATED) &&
5183 band_a_active_channel[i] == priv->channel)
5184 continue;
5185 channel_index++;
5186 scan.channels_list[channel_index] =
5187 band_a_active_channel[i];
5188 ipw_set_scan_type(&scan, channel_index, scan_type);
5189 }
5190
5191 if (start != channel_index) {
5192 scan.channels_list[start] = (u8) (IPW_A_MODE << 6) |
5193 (channel_index - start);
5194 channel_index++;
5195 }
5196 }
5197
5198 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5199 int start = channel_index;
5200 for (i = 0; i < MAX_B_CHANNELS; i++) {
5201 if (band_b_active_channel[i] == 0)
5202 break;
5203 if ((priv->status & STATUS_ASSOCIATED) &&
5204 band_b_active_channel[i] == priv->channel)
5205 continue;
5206 channel_index++;
5207 scan.channels_list[channel_index] =
5208 band_b_active_channel[i];
5209 ipw_set_scan_type(&scan, channel_index, scan_type);
5210 }
5211
5212 if (start != channel_index) {
5213 scan.channels_list[start] = (u8) (IPW_B_MODE << 6) |
5214 (channel_index - start);
5215 }
5216 }
5217
5218 err = ipw_send_scan_request_ext(priv, &scan);
5219 if (err) {
5220 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
5221 return -EIO;
5222 }
5223
5224 priv->status |= STATUS_SCANNING;
5225 priv->status &= ~STATUS_SCAN_PENDING;
5226
5227 return 0;
5228}
5229
5230/*
5231 * This file defines the Wireless Extension handlers. It does not
5232 * define any methods of hardware manipulation and relies on the
5233 * functions defined in ipw_main to provide the HW interaction.
5234 *
5235 * The exception to this is the use of the ipw_get_ordinal()
5236 * function used to poll the hardware vs. making unecessary calls.
5237 *
5238 */
5239
5240static int ipw_wx_get_name(struct net_device *dev,
5241 struct iw_request_info *info,
5242 union iwreq_data *wrqu, char *extra)
5243{
5244 struct ipw_priv *priv = ieee80211_priv(dev);
5245 if (!(priv->status & STATUS_ASSOCIATED))
5246 strcpy(wrqu->name, "unassociated");
5247 else
5248 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5249 ipw_modes[priv->assoc_request.ieee_mode]);
5250 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5251 return 0;
5252}
5253
5254static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5255{
5256 if (channel == 0) {
5257 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5258 priv->config &= ~CFG_STATIC_CHANNEL;
5259 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5260 STATUS_ASSOCIATING))) {
5261 IPW_DEBUG_ASSOC("Attempting to associate with new "
5262 "parameters.\n");
5263 ipw_associate(priv);
5264 }
5265
5266 return 0;
5267 }
5268
5269 priv->config |= CFG_STATIC_CHANNEL;
5270
5271 if (priv->channel == channel) {
5272 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
5273 channel);
5274 return 0;
5275 }
5276
5277 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5278 priv->channel = channel;
5279
5280 /* If we are currently associated, or trying to associate
5281 * then see if this is a new channel (causing us to disassociate) */
5282 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5283 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5284 ipw_disassociate(priv);
5285 } else {
5286 ipw_associate(priv);
5287 }
5288
5289 return 0;
5290}
5291
5292static int ipw_wx_set_freq(struct net_device *dev,
5293 struct iw_request_info *info,
5294 union iwreq_data *wrqu, char *extra)
5295{
5296 struct ipw_priv *priv = ieee80211_priv(dev);
5297 struct iw_freq *fwrq = &wrqu->freq;
5298
5299 /* if setting by freq convert to channel */
5300 if (fwrq->e == 1) {
5301 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
5302 int f = fwrq->m / 100000;
5303 int c = 0;
5304
5305 while ((c < REG_MAX_CHANNEL) &&
5306 (f != ipw_frequencies[c]))
5307 c++;
5308
5309 /* hack to fall through */
5310 fwrq->e = 0;
5311 fwrq->m = c + 1;
5312 }
5313 }
5314
5315 if (fwrq->e > 0 || fwrq->m > 1000)
5316 return -EOPNOTSUPP;
5317
5318 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5319 return ipw_set_channel(priv, (u8) fwrq->m);
5320
5321 return 0;
5322}
5323
5324static int ipw_wx_get_freq(struct net_device *dev,
5325 struct iw_request_info *info,
5326 union iwreq_data *wrqu, char *extra)
5327{
5328 struct ipw_priv *priv = ieee80211_priv(dev);
5329
5330 wrqu->freq.e = 0;
5331
5332 /* If we are associated, trying to associate, or have a statically
5333 * configured CHANNEL then return that; otherwise return ANY */
5334 if (priv->config & CFG_STATIC_CHANNEL ||
5335 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5336 wrqu->freq.m = priv->channel;
5337 else
5338 wrqu->freq.m = 0;
5339
5340 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5341 return 0;
5342}
5343
5344static int ipw_wx_set_mode(struct net_device *dev,
5345 struct iw_request_info *info,
5346 union iwreq_data *wrqu, char *extra)
5347{
5348 struct ipw_priv *priv = ieee80211_priv(dev);
5349 int err = 0;
5350
5351 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5352
5353 if (wrqu->mode == priv->ieee->iw_mode)
5354 return 0;
5355
5356 switch (wrqu->mode) {
5357#ifdef CONFIG_IPW_PROMISC
5358 case IW_MODE_MONITOR:
5359#endif
5360 case IW_MODE_ADHOC:
5361 case IW_MODE_INFRA:
5362 break;
5363 case IW_MODE_AUTO:
5364 wrqu->mode = IW_MODE_INFRA;
5365 break;
5366 default:
5367 return -EINVAL;
5368 }
5369
5370#ifdef CONFIG_IPW_PROMISC
5371 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5372 priv->net_dev->type = ARPHRD_ETHER;
5373
5374 if (wrqu->mode == IW_MODE_MONITOR)
5375 priv->net_dev->type = ARPHRD_IEEE80211;
5376#endif /* CONFIG_IPW_PROMISC */
5377
5378#ifdef CONFIG_PM
5379 /* Free the existing firmware and reset the fw_loaded
5380 * flag so ipw_load() will bring in the new firmawre */
5381 if (fw_loaded) {
5382 fw_loaded = 0;
5383 }
5384
5385 release_firmware(bootfw);
5386 release_firmware(ucode);
5387 release_firmware(firmware);
5388 bootfw = ucode = firmware = NULL;
5389#endif
5390
5391 priv->ieee->iw_mode = wrqu->mode;
5392 ipw_adapter_restart(priv);
5393
5394 return err;
5395}
5396
5397static int ipw_wx_get_mode(struct net_device *dev,
5398 struct iw_request_info *info,
5399 union iwreq_data *wrqu, char *extra)
5400{
5401 struct ipw_priv *priv = ieee80211_priv(dev);
5402
5403 wrqu->mode = priv->ieee->iw_mode;
5404 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
5405
5406 return 0;
5407}
5408
5409#define DEFAULT_RTS_THRESHOLD 2304U
5410#define MIN_RTS_THRESHOLD 1U
5411#define MAX_RTS_THRESHOLD 2304U
5412#define DEFAULT_BEACON_INTERVAL 100U
5413#define DEFAULT_SHORT_RETRY_LIMIT 7U
5414#define DEFAULT_LONG_RETRY_LIMIT 4U
5415
5416/* Values are in microsecond */
5417static const s32 timeout_duration[] = {
5418 350000,
5419 250000,
5420 75000,
5421 37000,
5422 25000,
5423};
5424
5425static const s32 period_duration[] = {
5426 400000,
5427 700000,
5428 1000000,
5429 1000000,
5430 1000000
5431};
5432
5433static int ipw_wx_get_range(struct net_device *dev,
5434 struct iw_request_info *info,
5435 union iwreq_data *wrqu, char *extra)
5436{
5437 struct ipw_priv *priv = ieee80211_priv(dev);
5438 struct iw_range *range = (struct iw_range *)extra;
5439 u16 val;
5440 int i;
5441
5442 wrqu->data.length = sizeof(*range);
5443 memset(range, 0, sizeof(*range));
5444
5445 /* 54Mbs == ~27 Mb/s real (802.11g) */
5446 range->throughput = 27 * 1000 * 1000;
5447
5448 range->max_qual.qual = 100;
5449 /* TODO: Find real max RSSI and stick here */
5450 range->max_qual.level = 0;
5451 range->max_qual.noise = 0;
5452 range->max_qual.updated = 7; /* Updated all three */
5453
5454 range->avg_qual.qual = 70;
5455 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5456 range->avg_qual.level = 0; /* FIXME to real average level */
5457 range->avg_qual.noise = 0;
5458 range->avg_qual.updated = 7; /* Updated all three */
5459
5460 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
5461
5462 for (i = 0; i < range->num_bitrates; i++)
5463 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5464 500000;
5465
5466 range->max_rts = DEFAULT_RTS_THRESHOLD;
5467 range->min_frag = MIN_FRAG_THRESHOLD;
5468 range->max_frag = MAX_FRAG_THRESHOLD;
5469
5470 range->encoding_size[0] = 5;
5471 range->encoding_size[1] = 13;
5472 range->num_encoding_sizes = 2;
5473 range->max_encoding_tokens = WEP_KEYS;
5474
5475 /* Set the Wireless Extension versions */
5476 range->we_version_compiled = WIRELESS_EXT;
5477 range->we_version_source = 16;
5478
5479 range->num_channels = FREQ_COUNT;
5480
5481 val = 0;
5482 for (i = 0; i < FREQ_COUNT; i++) {
5483 range->freq[val].i = i + 1;
5484 range->freq[val].m = ipw_frequencies[i] * 100000;
5485 range->freq[val].e = 1;
5486 val++;
5487
5488 if (val == IW_MAX_FREQUENCIES)
5489 break;
5490 }
5491 range->num_frequency = val;
5492
5493 IPW_DEBUG_WX("GET Range\n");
5494 return 0;
5495}
5496
5497static int ipw_wx_set_wap(struct net_device *dev,
5498 struct iw_request_info *info,
5499 union iwreq_data *wrqu, char *extra)
5500{
5501 struct ipw_priv *priv = ieee80211_priv(dev);
5502
5503 static const unsigned char any[] = {
5504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5505 };
5506 static const unsigned char off[] = {
5507 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
5508 };
5509
5510 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
5511 return -EINVAL;
5512
5513 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
5514 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5515 /* we disable mandatory BSSID association */
5516 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
5517 priv->config &= ~CFG_STATIC_BSSID;
5518 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5519 STATUS_ASSOCIATING))) {
5520 IPW_DEBUG_ASSOC("Attempting to associate with new "
5521 "parameters.\n");
5522 ipw_associate(priv);
5523 }
5524
5525 return 0;
5526 }
5527
5528 priv->config |= CFG_STATIC_BSSID;
5529 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5530 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
5531 return 0;
5532 }
5533
5534 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
5535 MAC_ARG(wrqu->ap_addr.sa_data));
5536
5537 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
5538
5539 /* If we are currently associated, or trying to associate
5540 * then see if this is a new BSSID (causing us to disassociate) */
5541 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5542 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
5543 ipw_disassociate(priv);
5544 } else {
5545 ipw_associate(priv);
5546 }
5547
5548 return 0;
5549}
5550
5551static int ipw_wx_get_wap(struct net_device *dev,
5552 struct iw_request_info *info,
5553 union iwreq_data *wrqu, char *extra)
5554{
5555 struct ipw_priv *priv = ieee80211_priv(dev);
5556 /* If we are associated, trying to associate, or have a statically
5557 * configured BSSID then return that; otherwise return ANY */
5558 if (priv->config & CFG_STATIC_BSSID ||
5559 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5560 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
5561 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
5562 } else
5563 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
5564
5565 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
5566 MAC_ARG(wrqu->ap_addr.sa_data));
5567 return 0;
5568}
5569
5570static int ipw_wx_set_essid(struct net_device *dev,
5571 struct iw_request_info *info,
5572 union iwreq_data *wrqu, char *extra)
5573{
5574 struct ipw_priv *priv = ieee80211_priv(dev);
5575 char *essid = ""; /* ANY */
5576 int length = 0;
5577
5578 if (wrqu->essid.flags && wrqu->essid.length) {
5579 length = wrqu->essid.length - 1;
5580 essid = extra;
5581 }
5582 if (length == 0) {
5583 IPW_DEBUG_WX("Setting ESSID to ANY\n");
5584 priv->config &= ~CFG_STATIC_ESSID;
5585 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5586 STATUS_ASSOCIATING))) {
5587 IPW_DEBUG_ASSOC("Attempting to associate with new "
5588 "parameters.\n");
5589 ipw_associate(priv);
5590 }
5591
5592 return 0;
5593 }
5594
5595 length = min(length, IW_ESSID_MAX_SIZE);
5596
5597 priv->config |= CFG_STATIC_ESSID;
5598
5599 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
5600 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
5601 return 0;
5602 }
5603
5604 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
5605 length);
5606
5607 priv->essid_len = length;
5608 memcpy(priv->essid, essid, priv->essid_len);
5609
5610 /* If we are currently associated, or trying to associate
5611 * then see if this is a new ESSID (causing us to disassociate) */
5612 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5613 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
5614 ipw_disassociate(priv);
5615 } else {
5616 ipw_associate(priv);
5617 }
5618
5619 return 0;
5620}
5621
5622static int ipw_wx_get_essid(struct net_device *dev,
5623 struct iw_request_info *info,
5624 union iwreq_data *wrqu, char *extra)
5625{
5626 struct ipw_priv *priv = ieee80211_priv(dev);
5627
5628 /* If we are associated, trying to associate, or have a statically
5629 * configured ESSID then return that; otherwise return ANY */
5630 if (priv->config & CFG_STATIC_ESSID ||
5631 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5632 IPW_DEBUG_WX("Getting essid: '%s'\n",
5633 escape_essid(priv->essid, priv->essid_len));
5634 memcpy(extra, priv->essid, priv->essid_len);
5635 wrqu->essid.length = priv->essid_len;
5636 wrqu->essid.flags = 1; /* active */
5637 } else {
5638 IPW_DEBUG_WX("Getting essid: ANY\n");
5639 wrqu->essid.length = 0;
5640 wrqu->essid.flags = 0; /* active */
5641 }
5642
5643 return 0;
5644}
5645
5646static int ipw_wx_set_nick(struct net_device *dev,
5647 struct iw_request_info *info,
5648 union iwreq_data *wrqu, char *extra)
5649{
5650 struct ipw_priv *priv = ieee80211_priv(dev);
5651
5652 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
5653 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5654 return -E2BIG;
5655
5656 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
5657 memset(priv->nick, 0, sizeof(priv->nick));
5658 memcpy(priv->nick, extra, wrqu->data.length);
5659 IPW_DEBUG_TRACE("<<\n");
5660 return 0;
5661
5662}
5663
5664static int ipw_wx_get_nick(struct net_device *dev,
5665 struct iw_request_info *info,
5666 union iwreq_data *wrqu, char *extra)
5667{
5668 struct ipw_priv *priv = ieee80211_priv(dev);
5669 IPW_DEBUG_WX("Getting nick\n");
5670 wrqu->data.length = strlen(priv->nick) + 1;
5671 memcpy(extra, priv->nick, wrqu->data.length);
5672 wrqu->data.flags = 1; /* active */
5673 return 0;
5674}
5675
5676static int ipw_wx_set_rate(struct net_device *dev,
5677 struct iw_request_info *info,
5678 union iwreq_data *wrqu, char *extra)
5679{
5680 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5681 return -EOPNOTSUPP;
5682}
5683
5684static int ipw_wx_get_rate(struct net_device *dev,
5685 struct iw_request_info *info,
5686 union iwreq_data *wrqu, char *extra)
5687{
5688 struct ipw_priv *priv = ieee80211_priv(dev);
5689 wrqu->bitrate.value = priv->last_rate;
5690
5691 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5692 return 0;
5693}
5694
5695static int ipw_wx_set_rts(struct net_device *dev,
5696 struct iw_request_info *info,
5697 union iwreq_data *wrqu, char *extra)
5698{
5699 struct ipw_priv *priv = ieee80211_priv(dev);
5700
5701 if (wrqu->rts.disabled)
5702 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
5703 else {
5704 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
5705 wrqu->rts.value > MAX_RTS_THRESHOLD)
5706 return -EINVAL;
5707
5708 priv->rts_threshold = wrqu->rts.value;
5709 }
5710
5711 ipw_send_rts_threshold(priv, priv->rts_threshold);
5712 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
5713 return 0;
5714}
5715
5716static int ipw_wx_get_rts(struct net_device *dev,
5717 struct iw_request_info *info,
5718 union iwreq_data *wrqu, char *extra)
5719{
5720 struct ipw_priv *priv = ieee80211_priv(dev);
5721 wrqu->rts.value = priv->rts_threshold;
5722 wrqu->rts.fixed = 0; /* no auto select */
5723 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5724
5725 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5726 return 0;
5727}
5728
5729static int ipw_wx_set_txpow(struct net_device *dev,
5730 struct iw_request_info *info,
5731 union iwreq_data *wrqu, char *extra)
5732{
5733 struct ipw_priv *priv = ieee80211_priv(dev);
5734 struct ipw_tx_power tx_power;
5735 int i;
5736
5737 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
5738 return -EINPROGRESS;
5739
5740 if (wrqu->power.flags != IW_TXPOW_DBM)
5741 return -EINVAL;
5742
5743 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
5744 return -EINVAL;
5745
5746 priv->tx_power = wrqu->power.value;
5747
5748 memset(&tx_power, 0, sizeof(tx_power));
5749
5750 /* configure device for 'G' band */
5751 tx_power.ieee_mode = IPW_G_MODE;
5752 tx_power.num_channels = 11;
5753 for (i = 0; i < 11; i++) {
5754 tx_power.channels_tx_power[i].channel_number = i + 1;
5755 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
5756 }
5757 if (ipw_send_tx_power(priv, &tx_power))
5758 goto error;
5759
5760 /* configure device to also handle 'B' band */
5761 tx_power.ieee_mode = IPW_B_MODE;
5762 if (ipw_send_tx_power(priv, &tx_power))
5763 goto error;
5764
5765 return 0;
5766
5767 error:
5768 return -EIO;
5769}
5770
5771static int ipw_wx_get_txpow(struct net_device *dev,
5772 struct iw_request_info *info,
5773 union iwreq_data *wrqu, char *extra)
5774{
5775 struct ipw_priv *priv = ieee80211_priv(dev);
5776
5777 wrqu->power.value = priv->tx_power;
5778 wrqu->power.fixed = 1;
5779 wrqu->power.flags = IW_TXPOW_DBM;
5780 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5781
5782 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5783 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
5784
5785 return 0;
5786}
5787
5788static int ipw_wx_set_frag(struct net_device *dev,
5789 struct iw_request_info *info,
5790 union iwreq_data *wrqu, char *extra)
5791{
5792 struct ipw_priv *priv = ieee80211_priv(dev);
5793
5794 if (wrqu->frag.disabled)
5795 priv->ieee->fts = DEFAULT_FTS;
5796 else {
5797 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
5798 wrqu->frag.value > MAX_FRAG_THRESHOLD)
5799 return -EINVAL;
5800
5801 priv->ieee->fts = wrqu->frag.value & ~0x1;
5802 }
5803
5804 ipw_send_frag_threshold(priv, wrqu->frag.value);
5805 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
5806 return 0;
5807}
5808
5809static int ipw_wx_get_frag(struct net_device *dev,
5810 struct iw_request_info *info,
5811 union iwreq_data *wrqu, char *extra)
5812{
5813 struct ipw_priv *priv = ieee80211_priv(dev);
5814 wrqu->frag.value = priv->ieee->fts;
5815 wrqu->frag.fixed = 0; /* no auto select */
5816 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
5817
5818 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5819
5820 return 0;
5821}
5822
5823static int ipw_wx_set_retry(struct net_device *dev,
5824 struct iw_request_info *info,
5825 union iwreq_data *wrqu, char *extra)
5826{
5827 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5828 return -EOPNOTSUPP;
5829}
5830
5831static int ipw_wx_get_retry(struct net_device *dev,
5832 struct iw_request_info *info,
5833 union iwreq_data *wrqu, char *extra)
5834{
5835 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5836 return -EOPNOTSUPP;
5837}
5838
5839static int ipw_wx_set_scan(struct net_device *dev,
5840 struct iw_request_info *info,
5841 union iwreq_data *wrqu, char *extra)
5842{
5843 struct ipw_priv *priv = ieee80211_priv(dev);
5844 IPW_DEBUG_WX("Start scan\n");
5845 if (ipw_request_scan(priv))
5846 return -EIO;
5847 return 0;
5848}
5849
5850static int ipw_wx_get_scan(struct net_device *dev,
5851 struct iw_request_info *info,
5852 union iwreq_data *wrqu, char *extra)
5853{
5854 struct ipw_priv *priv = ieee80211_priv(dev);
5855 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
5856}
5857
5858static int ipw_wx_set_encode(struct net_device *dev,
5859 struct iw_request_info *info,
5860 union iwreq_data *wrqu, char *key)
5861{
5862 struct ipw_priv *priv = ieee80211_priv(dev);
5863 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5864}
5865
5866static int ipw_wx_get_encode(struct net_device *dev,
5867 struct iw_request_info *info,
5868 union iwreq_data *wrqu, char *key)
5869{
5870 struct ipw_priv *priv = ieee80211_priv(dev);
5871 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5872}
5873
5874static int ipw_wx_set_power(struct net_device *dev,
5875 struct iw_request_info *info,
5876 union iwreq_data *wrqu, char *extra)
5877{
5878 struct ipw_priv *priv = ieee80211_priv(dev);
5879 int err;
5880
5881 if (wrqu->power.disabled) {
5882 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
5883 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
5884 if (err) {
5885 IPW_DEBUG_WX("failed setting power mode.\n");
5886 return err;
5887 }
5888
5889 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
5890
5891 return 0;
5892 }
5893
5894 switch (wrqu->power.flags & IW_POWER_MODE) {
5895 case IW_POWER_ON: /* If not specified */
5896 case IW_POWER_MODE: /* If set all mask */
5897 case IW_POWER_ALL_R: /* If explicitely state all */
5898 break;
5899 default: /* Otherwise we don't support it */
5900 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5901 wrqu->power.flags);
5902 return -EOPNOTSUPP;
5903 }
5904
5905 /* If the user hasn't specified a power management mode yet, default
5906 * to BATTERY */
5907 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5908 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5909 else
5910 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
5911 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
5912 if (err) {
5913 IPW_DEBUG_WX("failed setting power mode.\n");
5914 return err;
5915 }
5916
5917 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
5918
5919 return 0;
5920}
5921
5922static int ipw_wx_get_power(struct net_device *dev,
5923 struct iw_request_info *info,
5924 union iwreq_data *wrqu, char *extra)
5925{
5926 struct ipw_priv *priv = ieee80211_priv(dev);
5927
5928 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
5929 wrqu->power.disabled = 1;
5930 } else {
5931 wrqu->power.disabled = 0;
5932 }
5933
5934 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
5935
5936 return 0;
5937}
5938
5939static int ipw_wx_set_powermode(struct net_device *dev,
5940 struct iw_request_info *info,
5941 union iwreq_data *wrqu, char *extra)
5942{
5943 struct ipw_priv *priv = ieee80211_priv(dev);
5944 int mode = *(int *)extra;
5945 int err;
5946
5947 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
5948 mode = IPW_POWER_AC;
5949 priv->power_mode = mode;
5950 } else {
5951 priv->power_mode = IPW_POWER_ENABLED | mode;
5952 }
5953
5954 if (priv->power_mode != mode) {
5955 err = ipw_send_power_mode(priv, mode);
5956
5957 if (err) {
5958 IPW_DEBUG_WX("failed setting power mode.\n");
5959 return err;
5960 }
5961 }
5962
5963 return 0;
5964}
5965
5966#define MAX_WX_STRING 80
5967static int ipw_wx_get_powermode(struct net_device *dev,
5968 struct iw_request_info *info,
5969 union iwreq_data *wrqu, char *extra)
5970{
5971 struct ipw_priv *priv = ieee80211_priv(dev);
5972 int level = IPW_POWER_LEVEL(priv->power_mode);
5973 char *p = extra;
5974
5975 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
5976
5977 switch (level) {
5978 case IPW_POWER_AC:
5979 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
5980 break;
5981 case IPW_POWER_BATTERY:
5982 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
5983 break;
5984 default:
5985 p += snprintf(p, MAX_WX_STRING - (p - extra),
5986 "(Timeout %dms, Period %dms)",
5987 timeout_duration[level - 1] / 1000,
5988 period_duration[level - 1] / 1000);
5989 }
5990
5991 if (!(priv->power_mode & IPW_POWER_ENABLED))
5992 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
5993
5994 wrqu->data.length = p - extra + 1;
5995
5996 return 0;
5997}
5998
5999static int ipw_wx_set_wireless_mode(struct net_device *dev,
6000 struct iw_request_info *info,
6001 union iwreq_data *wrqu, char *extra)
6002{
6003 struct ipw_priv *priv = ieee80211_priv(dev);
6004 int mode = *(int *)extra;
6005 u8 band = 0, modulation = 0;
6006
6007 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
6008 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
6009 return -EINVAL;
6010 }
6011
6012 if (priv->adapter == IPW_2915ABG) {
6013 priv->ieee->abg_ture = 1;
6014 if (mode & IEEE_A) {
6015 band |= IEEE80211_52GHZ_BAND;
6016 modulation |= IEEE80211_OFDM_MODULATION;
6017 } else
6018 priv->ieee->abg_ture = 0;
6019 } else {
6020 if (mode & IEEE_A) {
6021 IPW_WARNING("Attempt to set 2200BG into "
6022 "802.11a mode\n");
6023 return -EINVAL;
6024 }
6025
6026 priv->ieee->abg_ture = 0;
6027 }
6028
6029 if (mode & IEEE_B) {
6030 band |= IEEE80211_24GHZ_BAND;
6031 modulation |= IEEE80211_CCK_MODULATION;
6032 } else
6033 priv->ieee->abg_ture = 0;
6034
6035 if (mode & IEEE_G) {
6036 band |= IEEE80211_24GHZ_BAND;
6037 modulation |= IEEE80211_OFDM_MODULATION;
6038 } else
6039 priv->ieee->abg_ture = 0;
6040
6041 priv->ieee->mode = mode;
6042 priv->ieee->freq_band = band;
6043 priv->ieee->modulation = modulation;
6044 init_supported_rates(priv, &priv->rates);
6045
6046 /* If we are currently associated, or trying to associate
6047 * then see if this is a new configuration (causing us to
6048 * disassociate) */
6049 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6050 /* The resulting association will trigger
6051 * the new rates to be sent to the device */
6052 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6053 ipw_disassociate(priv);
6054 } else
6055 ipw_send_supported_rates(priv, &priv->rates);
6056
6057 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6058 mode & IEEE_A ? 'a' : '.',
6059 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
6060 return 0;
6061}
6062
6063static int ipw_wx_get_wireless_mode(struct net_device *dev,
6064 struct iw_request_info *info,
6065 union iwreq_data *wrqu, char *extra)
6066{
6067 struct ipw_priv *priv = ieee80211_priv(dev);
6068
6069 switch (priv->ieee->freq_band) {
6070 case IEEE80211_24GHZ_BAND:
6071 switch (priv->ieee->modulation) {
6072 case IEEE80211_CCK_MODULATION:
6073 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6074 break;
6075 case IEEE80211_OFDM_MODULATION:
6076 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6077 break;
6078 default:
6079 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6080 break;
6081 }
6082 break;
6083
6084 case IEEE80211_52GHZ_BAND:
6085 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6086 break;
6087
6088 default: /* Mixed Band */
6089 switch (priv->ieee->modulation) {
6090 case IEEE80211_CCK_MODULATION:
6091 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6092 break;
6093 case IEEE80211_OFDM_MODULATION:
6094 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6095 break;
6096 default:
6097 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6098 break;
6099 }
6100 break;
6101 }
6102
6103 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6104
6105 wrqu->data.length = strlen(extra) + 1;
6106
6107 return 0;
6108}
6109
6110#ifdef CONFIG_IPW_PROMISC
6111static int ipw_wx_set_promisc(struct net_device *dev,
6112 struct iw_request_info *info,
6113 union iwreq_data *wrqu, char *extra)
6114{
6115 struct ipw_priv *priv = ieee80211_priv(dev);
6116 int *parms = (int *)extra;
6117 int enable = (parms[0] > 0);
6118
6119 IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
6120 if (enable) {
6121 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6122 priv->net_dev->type = ARPHRD_IEEE80211;
6123 ipw_adapter_restart(priv);
6124 }
6125
6126 ipw_set_channel(priv, parms[1]);
6127 } else {
6128 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6129 return 0;
6130 priv->net_dev->type = ARPHRD_ETHER;
6131 ipw_adapter_restart(priv);
6132 }
6133 return 0;
6134}
6135
6136static int ipw_wx_reset(struct net_device *dev,
6137 struct iw_request_info *info,
6138 union iwreq_data *wrqu, char *extra)
6139{
6140 struct ipw_priv *priv = ieee80211_priv(dev);
6141 IPW_DEBUG_WX("RESET\n");
6142 ipw_adapter_restart(priv);
6143 return 0;
6144}
6145#endif // CONFIG_IPW_PROMISC
6146
6147/* Rebase the WE IOCTLs to zero for the handler array */
6148#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6149static iw_handler ipw_wx_handlers[] = {
6150 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6151 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6152 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6153 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6154 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6155 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6156 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6157 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6158 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6159 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6160 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6161 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6162 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6163 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6164 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6165 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6166 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6167 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6168 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6169 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6170 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6171 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6172 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6173 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6174 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6175 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6176 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6177 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6178};
6179
6180#define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6181#define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6182#define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6183#define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6184#define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6185#define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6186
6187static struct iw_priv_args ipw_priv_args[] = {
6188 {
6189 .cmd = IPW_PRIV_SET_POWER,
6190 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6191 .name = "set_power"},
6192 {
6193 .cmd = IPW_PRIV_GET_POWER,
6194 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6195 .name = "get_power"},
6196 {
6197 .cmd = IPW_PRIV_SET_MODE,
6198 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6199 .name = "set_mode"},
6200 {
6201 .cmd = IPW_PRIV_GET_MODE,
6202 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6203 .name = "get_mode"},
6204#ifdef CONFIG_IPW_PROMISC
6205 {
6206 IPW_PRIV_SET_PROMISC,
6207 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
6208 {
6209 IPW_PRIV_RESET,
6210 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
6211#endif /* CONFIG_IPW_PROMISC */
6212};
6213
6214static iw_handler ipw_priv_handler[] = {
6215 ipw_wx_set_powermode,
6216 ipw_wx_get_powermode,
6217 ipw_wx_set_wireless_mode,
6218 ipw_wx_get_wireless_mode,
6219#ifdef CONFIG_IPW_PROMISC
6220 ipw_wx_set_promisc,
6221 ipw_wx_reset,
6222#endif
6223};
6224
6225static struct iw_handler_def ipw_wx_handler_def = {
6226 .standard = ipw_wx_handlers,
6227 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6228 .num_private = ARRAY_SIZE(ipw_priv_handler),
6229 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6230 .private = ipw_priv_handler,
6231 .private_args = ipw_priv_args,
6232};
6233
6234/*
6235 * Get wireless statistics.
6236 * Called by /proc/net/wireless
6237 * Also called by SIOCGIWSTATS
6238 */
6239static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
6240{
6241 struct ipw_priv *priv = ieee80211_priv(dev);
6242 struct iw_statistics *wstats;
6243
6244 wstats = &priv->wstats;
6245
6246 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
6247 * ipw2100_wx_wireless_stats seems to be called before fw is
6248 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
6249 * and associated; if not associcated, the values are all meaningless
6250 * anyway, so set them all to NULL and INVALID */
6251 if (!(priv->status & STATUS_ASSOCIATED)) {
6252 wstats->miss.beacon = 0;
6253 wstats->discard.retries = 0;
6254 wstats->qual.qual = 0;
6255 wstats->qual.level = 0;
6256 wstats->qual.noise = 0;
6257 wstats->qual.updated = 7;
6258 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6259 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6260 return wstats;
6261 }
6262
6263 wstats->qual.qual = priv->quality;
6264 wstats->qual.level = average_value(&priv->average_rssi);
6265 wstats->qual.noise = average_value(&priv->average_noise);
6266 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6267 IW_QUAL_NOISE_UPDATED;
6268
6269 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6270 wstats->discard.retries = priv->last_tx_failures;
6271 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
6272
6273/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
6274 goto fail_get_ordinal;
6275 wstats->discard.retries += tx_retry; */
6276
6277 return wstats;
6278}
6279
6280/* net device stuff */
6281
6282static inline void init_sys_config(struct ipw_sys_config *sys_config)
6283{
6284 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6285 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6286 sys_config->answer_broadcast_ssid_probe = 0;
6287 sys_config->accept_all_data_frames = 0;
6288 sys_config->accept_non_directed_frames = 1;
6289 sys_config->exclude_unicast_unencrypted = 0;
6290 sys_config->disable_unicast_decryption = 1;
6291 sys_config->exclude_multicast_unencrypted = 0;
6292 sys_config->disable_multicast_decryption = 1;
6293 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6294 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6295 sys_config->dot11g_auto_detection = 0;
6296 sys_config->enable_cts_to_self = 0;
6297 sys_config->bt_coexist_collision_thr = 0;
6298 sys_config->pass_noise_stats_to_host = 1;
6299}
6300
6301static int ipw_net_open(struct net_device *dev)
6302{
6303 struct ipw_priv *priv = ieee80211_priv(dev);
6304 IPW_DEBUG_INFO("dev->open\n");
6305 /* we should be verifying the device is ready to be opened */
6306 if (!(priv->status & STATUS_RF_KILL_MASK) &&
6307 (priv->status & STATUS_ASSOCIATED))
6308 netif_start_queue(dev);
6309 return 0;
6310}
6311
6312static int ipw_net_stop(struct net_device *dev)
6313{
6314 IPW_DEBUG_INFO("dev->close\n");
6315 netif_stop_queue(dev);
6316 return 0;
6317}
6318
6319/*
6320todo:
6321
6322modify to send one tfd per fragment instead of using chunking. otherwise
6323we need to heavily modify the ieee80211_skb_to_txb.
6324*/
6325
6326static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6327{
6328 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
6329 txb->fragments[0]->data;
6330 int i = 0;
6331 struct tfd_frame *tfd;
6332 struct clx2_tx_queue *txq = &priv->txq[0];
6333 struct clx2_queue *q = &txq->q;
6334 u8 id, hdr_len, unicast;
6335 u16 remaining_bytes;
6336
6337 switch (priv->ieee->iw_mode) {
6338 case IW_MODE_ADHOC:
6339 hdr_len = IEEE80211_3ADDR_LEN;
6340 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6341 !is_multicast_ether_addr(hdr->addr1);
6342 id = ipw_find_station(priv, hdr->addr1);
6343 if (id == IPW_INVALID_STATION) {
6344 id = ipw_add_station(priv, hdr->addr1);
6345 if (id == IPW_INVALID_STATION) {
6346 IPW_WARNING("Attempt to send data to "
6347 "invalid cell: " MAC_FMT "\n",
6348 MAC_ARG(hdr->addr1));
6349 goto drop;
6350 }
6351 }
6352 break;
6353
6354 case IW_MODE_INFRA:
6355 default:
6356 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6357 !is_multicast_ether_addr(hdr->addr3);
6358 hdr_len = IEEE80211_3ADDR_LEN;
6359 id = 0;
6360 break;
6361 }
6362
6363 tfd = &txq->bd[q->first_empty];
6364 txq->txb[q->first_empty] = txb;
6365 memset(tfd, 0, sizeof(*tfd));
6366 tfd->u.data.station_number = id;
6367
6368 tfd->control_flags.message_type = TX_FRAME_TYPE;
6369 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
6370
6371 tfd->u.data.cmd_id = DINO_CMD_TX;
6372 tfd->u.data.len = txb->payload_size;
6373 remaining_bytes = txb->payload_size;
6374 if (unlikely(!unicast))
6375 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
6376 else
6377 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
6378
6379 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
6380 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
6381 else
6382 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
6383
6384 if (priv->config & CFG_PREAMBLE)
6385 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
6386
6387 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6388
6389 /* payload */
6390 tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
6391 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6392 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6393 i, tfd->u.data.num_chunks,
6394 txb->fragments[i]->len - hdr_len);
6395 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6396 txb->fragments[i]->len - hdr_len);
6397
6398 tfd->u.data.chunk_ptr[i] =
6399 pci_map_single(priv->pci_dev,
6400 txb->fragments[i]->data + hdr_len,
6401 txb->fragments[i]->len - hdr_len,
6402 PCI_DMA_TODEVICE);
6403 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6404 }
6405
6406 if (i != txb->nr_frags) {
6407 struct sk_buff *skb;
6408 u16 remaining_bytes = 0;
6409 int j;
6410
6411 for (j = i; j < txb->nr_frags; j++)
6412 remaining_bytes += txb->fragments[j]->len - hdr_len;
6413
6414 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
6415 remaining_bytes);
6416 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
6417 if (skb != NULL) {
6418 tfd->u.data.chunk_len[i] = remaining_bytes;
6419 for (j = i; j < txb->nr_frags; j++) {
6420 int size = txb->fragments[j]->len - hdr_len;
6421 printk(KERN_INFO "Adding frag %d %d...\n",
6422 j, size);
6423 memcpy(skb_put(skb, size),
6424 txb->fragments[j]->data + hdr_len, size);
6425 }
6426 dev_kfree_skb_any(txb->fragments[i]);
6427 txb->fragments[i] = skb;
6428 tfd->u.data.chunk_ptr[i] =
6429 pci_map_single(priv->pci_dev, skb->data,
6430 tfd->u.data.chunk_len[i],
6431 PCI_DMA_TODEVICE);
6432 tfd->u.data.num_chunks++;
6433 }
6434 }
6435
6436 /* kick DMA */
6437 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
6438 ipw_write32(priv, q->reg_w, q->first_empty);
6439
6440 if (ipw_queue_space(q) < q->high_mark)
6441 netif_stop_queue(priv->net_dev);
6442
6443 return;
6444
6445 drop:
6446 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6447 ieee80211_txb_free(txb);
6448}
6449
6450static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6451 struct net_device *dev)
6452{
6453 struct ipw_priv *priv = ieee80211_priv(dev);
6454 unsigned long flags;
6455
6456 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
6457
6458 spin_lock_irqsave(&priv->lock, flags);
6459
6460 if (!(priv->status & STATUS_ASSOCIATED)) {
6461 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
6462 priv->ieee->stats.tx_carrier_errors++;
6463 netif_stop_queue(dev);
6464 goto fail_unlock;
6465 }
6466
6467 ipw_tx_skb(priv, txb);
6468
6469 spin_unlock_irqrestore(&priv->lock, flags);
6470 return 0;
6471
6472 fail_unlock:
6473 spin_unlock_irqrestore(&priv->lock, flags);
6474 return 1;
6475}
6476
6477static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
6478{
6479 struct ipw_priv *priv = ieee80211_priv(dev);
6480
6481 priv->ieee->stats.tx_packets = priv->tx_packets;
6482 priv->ieee->stats.rx_packets = priv->rx_packets;
6483 return &priv->ieee->stats;
6484}
6485
6486static void ipw_net_set_multicast_list(struct net_device *dev)
6487{
6488
6489}
6490
6491static int ipw_net_set_mac_address(struct net_device *dev, void *p)
6492{
6493 struct ipw_priv *priv = ieee80211_priv(dev);
6494 struct sockaddr *addr = p;
6495 if (!is_valid_ether_addr(addr->sa_data))
6496 return -EADDRNOTAVAIL;
6497 priv->config |= CFG_CUSTOM_MAC;
6498 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
6499 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
6500 priv->net_dev->name, MAC_ARG(priv->mac_addr));
6501 ipw_adapter_restart(priv);
6502 return 0;
6503}
6504
6505static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6506 struct ethtool_drvinfo *info)
6507{
6508 struct ipw_priv *p = ieee80211_priv(dev);
6509 char vers[64];
6510 char date[32];
6511 u32 len;
6512
6513 strcpy(info->driver, DRV_NAME);
6514 strcpy(info->version, DRV_VERSION);
6515
6516 len = sizeof(vers);
6517 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
6518 len = sizeof(date);
6519 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6520
6521 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
6522 vers, date);
6523 strcpy(info->bus_info, pci_name(p->pci_dev));
6524 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
6525}
6526
6527static u32 ipw_ethtool_get_link(struct net_device *dev)
6528{
6529 struct ipw_priv *priv = ieee80211_priv(dev);
6530 return (priv->status & STATUS_ASSOCIATED) != 0;
6531}
6532
6533static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6534{
6535 return CX2_EEPROM_IMAGE_SIZE;
6536}
6537
6538static int ipw_ethtool_get_eeprom(struct net_device *dev,
6539 struct ethtool_eeprom *eeprom, u8 * bytes)
6540{
6541 struct ipw_priv *p = ieee80211_priv(dev);
6542
6543 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6544 return -EINVAL;
6545
6546 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
6547 return 0;
6548}
6549
6550static int ipw_ethtool_set_eeprom(struct net_device *dev,
6551 struct ethtool_eeprom *eeprom, u8 * bytes)
6552{
6553 struct ipw_priv *p = ieee80211_priv(dev);
6554 int i;
6555
6556 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6557 return -EINVAL;
6558
6559 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
6560 for (i = IPW_EEPROM_DATA;
6561 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
6562 ipw_write8(p, i, p->eeprom[i]);
6563
6564 return 0;
6565}
6566
6567static struct ethtool_ops ipw_ethtool_ops = {
6568 .get_link = ipw_ethtool_get_link,
6569 .get_drvinfo = ipw_ethtool_get_drvinfo,
6570 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6571 .get_eeprom = ipw_ethtool_get_eeprom,
6572 .set_eeprom = ipw_ethtool_set_eeprom,
6573};
6574
6575static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6576{
6577 struct ipw_priv *priv = data;
6578 u32 inta, inta_mask;
6579
6580 if (!priv)
6581 return IRQ_NONE;
6582
6583 spin_lock(&priv->lock);
6584
6585 if (!(priv->status & STATUS_INT_ENABLED)) {
6586 /* Shared IRQ */
6587 goto none;
6588 }
6589
6590 inta = ipw_read32(priv, CX2_INTA_RW);
6591 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
6592
6593 if (inta == 0xFFFFFFFF) {
6594 /* Hardware disappeared */
6595 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
6596 goto none;
6597 }
6598
6599 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
6600 /* Shared interrupt */
6601 goto none;
6602 }
6603
6604 /* tell the device to stop sending interrupts */
6605 ipw_disable_interrupts(priv);
6606
6607 /* ack current interrupts */
6608 inta &= (CX2_INTA_MASK_ALL & inta_mask);
6609 ipw_write32(priv, CX2_INTA_RW, inta);
6610
6611 /* Cache INTA value for our tasklet */
6612 priv->isr_inta = inta;
6613
6614 tasklet_schedule(&priv->irq_tasklet);
6615
6616 spin_unlock(&priv->lock);
6617
6618 return IRQ_HANDLED;
6619 none:
6620 spin_unlock(&priv->lock);
6621 return IRQ_NONE;
6622}
6623
6624static void ipw_rf_kill(void *adapter)
6625{
6626 struct ipw_priv *priv = adapter;
6627 unsigned long flags;
6628
6629 spin_lock_irqsave(&priv->lock, flags);
6630
6631 if (rf_kill_active(priv)) {
6632 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6633 if (priv->workqueue)
6634 queue_delayed_work(priv->workqueue,
6635 &priv->rf_kill, 2 * HZ);
6636 goto exit_unlock;
6637 }
6638
6639 /* RF Kill is now disabled, so bring the device back up */
6640
6641 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6642 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6643 "device\n");
6644
6645 /* we can not do an adapter restart while inside an irq lock */
6646 queue_work(priv->workqueue, &priv->adapter_restart);
6647 } else
6648 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6649 "enabled\n");
6650
6651 exit_unlock:
6652 spin_unlock_irqrestore(&priv->lock, flags);
6653}
6654
6655static int ipw_setup_deferred_work(struct ipw_priv *priv)
6656{
6657 int ret = 0;
6658
6659 priv->workqueue = create_workqueue(DRV_NAME);
6660 init_waitqueue_head(&priv->wait_command_queue);
6661
6662 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
6663 INIT_WORK(&priv->associate, ipw_associate, priv);
6664 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
6665 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
6666 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
6667 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
6668 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
6669 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
6670 INIT_WORK(&priv->request_scan,
6671 (void (*)(void *))ipw_request_scan, priv);
6672 INIT_WORK(&priv->gather_stats,
6673 (void (*)(void *))ipw_gather_stats, priv);
6674 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
6675 INIT_WORK(&priv->roam, ipw_roam, priv);
6676 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
6677
6678 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6679 ipw_irq_tasklet, (unsigned long)priv);
6680
6681 return ret;
6682}
6683
6684static void shim__set_security(struct net_device *dev,
6685 struct ieee80211_security *sec)
6686{
6687 struct ipw_priv *priv = ieee80211_priv(dev);
6688 int i;
6689
6690 for (i = 0; i < 4; i++) {
6691 if (sec->flags & (1 << i)) {
6692 priv->sec.key_sizes[i] = sec->key_sizes[i];
6693 if (sec->key_sizes[i] == 0)
6694 priv->sec.flags &= ~(1 << i);
6695 else
6696 memcpy(priv->sec.keys[i], sec->keys[i],
6697 sec->key_sizes[i]);
6698 priv->sec.flags |= (1 << i);
6699 priv->status |= STATUS_SECURITY_UPDATED;
6700 }
6701 }
6702
6703 if ((sec->flags & SEC_ACTIVE_KEY) &&
6704 priv->sec.active_key != sec->active_key) {
6705 if (sec->active_key <= 3) {
6706 priv->sec.active_key = sec->active_key;
6707 priv->sec.flags |= SEC_ACTIVE_KEY;
6708 } else
6709 priv->sec.flags &= ~SEC_ACTIVE_KEY;
6710 priv->status |= STATUS_SECURITY_UPDATED;
6711 }
6712
6713 if ((sec->flags & SEC_AUTH_MODE) &&
6714 (priv->sec.auth_mode != sec->auth_mode)) {
6715 priv->sec.auth_mode = sec->auth_mode;
6716 priv->sec.flags |= SEC_AUTH_MODE;
6717 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
6718 priv->capability |= CAP_SHARED_KEY;
6719 else
6720 priv->capability &= ~CAP_SHARED_KEY;
6721 priv->status |= STATUS_SECURITY_UPDATED;
6722 }
6723
6724 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
6725 priv->sec.flags |= SEC_ENABLED;
6726 priv->sec.enabled = sec->enabled;
6727 priv->status |= STATUS_SECURITY_UPDATED;
6728 if (sec->enabled)
6729 priv->capability |= CAP_PRIVACY_ON;
6730 else
6731 priv->capability &= ~CAP_PRIVACY_ON;
6732 }
6733
6734 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
6735 priv->sec.level = sec->level;
6736 priv->sec.flags |= SEC_LEVEL;
6737 priv->status |= STATUS_SECURITY_UPDATED;
6738 }
6739
6740 /* To match current functionality of ipw2100 (which works well w/
6741 * various supplicants, we don't force a disassociate if the
6742 * privacy capability changes ... */
6743#if 0
6744 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
6745 (((priv->assoc_request.capability &
6746 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6747 (!(priv->assoc_request.capability &
6748 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6749 IPW_DEBUG_ASSOC("Disassociating due to capability "
6750 "change.\n");
6751 ipw_disassociate(priv);
6752 }
6753#endif
6754}
6755
6756static int init_supported_rates(struct ipw_priv *priv,
6757 struct ipw_supported_rates *rates)
6758{
6759 /* TODO: Mask out rates based on priv->rates_mask */
6760
6761 memset(rates, 0, sizeof(*rates));
6762 /* configure supported rates */
6763 switch (priv->ieee->freq_band) {
6764 case IEEE80211_52GHZ_BAND:
6765 rates->ieee_mode = IPW_A_MODE;
6766 rates->purpose = IPW_RATE_CAPABILITIES;
6767 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6768 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6769 break;
6770
6771 default: /* Mixed or 2.4Ghz */
6772 rates->ieee_mode = IPW_G_MODE;
6773 rates->purpose = IPW_RATE_CAPABILITIES;
6774 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
6775 IEEE80211_CCK_DEFAULT_RATES_MASK);
6776 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
6777 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6778 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6779 }
6780 break;
6781 }
6782
6783 return 0;
6784}
6785
6786static int ipw_config(struct ipw_priv *priv)
6787{
6788 int i;
6789 struct ipw_tx_power tx_power;
6790
6791 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
6792 memset(&tx_power, 0, sizeof(tx_power));
6793
6794 /* This is only called from ipw_up, which resets/reloads the firmware
6795 so, we don't need to first disable the card before we configure
6796 it */
6797
6798 /* configure device for 'G' band */
6799 tx_power.ieee_mode = IPW_G_MODE;
6800 tx_power.num_channels = 11;
6801 for (i = 0; i < 11; i++) {
6802 tx_power.channels_tx_power[i].channel_number = i + 1;
6803 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6804 }
6805 if (ipw_send_tx_power(priv, &tx_power))
6806 goto error;
6807
6808 /* configure device to also handle 'B' band */
6809 tx_power.ieee_mode = IPW_B_MODE;
6810 if (ipw_send_tx_power(priv, &tx_power))
6811 goto error;
6812
6813 /* initialize adapter address */
6814 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
6815 goto error;
6816
6817 /* set basic system config settings */
6818 init_sys_config(&priv->sys_config);
6819 if (ipw_send_system_config(priv, &priv->sys_config))
6820 goto error;
6821
6822 init_supported_rates(priv, &priv->rates);
6823 if (ipw_send_supported_rates(priv, &priv->rates))
6824 goto error;
6825
6826 /* Set request-to-send threshold */
6827 if (priv->rts_threshold) {
6828 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
6829 goto error;
6830 }
6831
6832 if (ipw_set_random_seed(priv))
6833 goto error;
6834
6835 /* final state transition to the RUN state */
6836 if (ipw_send_host_complete(priv))
6837 goto error;
6838
6839 /* If configured to try and auto-associate, kick off a scan */
6840 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
6841 goto error;
6842
6843 return 0;
6844
6845 error:
6846 return -EIO;
6847}
6848
6849#define MAX_HW_RESTARTS 5
6850static int ipw_up(struct ipw_priv *priv)
6851{
6852 int rc, i;
6853
6854 if (priv->status & STATUS_EXIT_PENDING)
6855 return -EIO;
6856
6857 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6858 /* Load the microcode, firmware, and eeprom.
6859 * Also start the clocks. */
6860 rc = ipw_load(priv);
6861 if (rc) {
6862 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
6863 return rc;
6864 }
6865
6866 ipw_init_ordinals(priv);
6867 if (!(priv->config & CFG_CUSTOM_MAC))
6868 eeprom_parse_mac(priv, priv->mac_addr);
6869 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
6870
6871 if (priv->status & STATUS_RF_KILL_MASK)
6872 return 0;
6873
6874 rc = ipw_config(priv);
6875 if (!rc) {
6876 IPW_DEBUG_INFO("Configured device on count %i\n", i);
6877 priv->notif_missed_beacons = 0;
6878 netif_start_queue(priv->net_dev);
6879 return 0;
6880 } else {
6881 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
6882 rc);
6883 }
6884
6885 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
6886 i, MAX_HW_RESTARTS);
6887
6888 /* We had an error bringing up the hardware, so take it
6889 * all the way back down so we can try again */
6890 ipw_down(priv);
6891 }
6892
6893 /* tried to restart and config the device for as long as our
6894 * patience could withstand */
6895 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
6896 return -EIO;
6897}
6898
6899static void ipw_down(struct ipw_priv *priv)
6900{
6901 /* Attempt to disable the card */
6902#if 0
6903 ipw_send_card_disable(priv, 0);
6904#endif
6905
6906 /* tell the device to stop sending interrupts */
6907 ipw_disable_interrupts(priv);
6908
6909 /* Clear all bits but the RF Kill */
6910 priv->status &= STATUS_RF_KILL_MASK;
6911
6912 netif_carrier_off(priv->net_dev);
6913 netif_stop_queue(priv->net_dev);
6914
6915 ipw_stop_nic(priv);
6916}
6917
6918/* Called by register_netdev() */
6919static int ipw_net_init(struct net_device *dev)
6920{
6921 struct ipw_priv *priv = ieee80211_priv(dev);
6922
6923 if (priv->status & STATUS_RF_KILL_SW) {
6924 IPW_WARNING("Radio disabled by module parameter.\n");
6925 return 0;
6926 } else if (rf_kill_active(priv)) {
6927 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
6928 "Kill switch must be turned off for "
6929 "wireless networking to work.\n");
6930 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
6931 return 0;
6932 }
6933
6934 if (ipw_up(priv))
6935 return -EIO;
6936
6937 return 0;
6938}
6939
6940/* PCI driver stuff */
6941static struct pci_device_id card_ids[] = {
6942 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
6943 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
6944 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
6945 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
6946 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
6947 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
6948 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
6949 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
6950 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
6951 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
6952 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
6953 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
6954 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
6955 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
6956 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
6957 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6958 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6959 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6960 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6961 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6962 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6963 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6964
6965 /* required last entry */
6966 {0,}
6967};
6968
6969MODULE_DEVICE_TABLE(pci, card_ids);
6970
6971static struct attribute *ipw_sysfs_entries[] = {
6972 &dev_attr_rf_kill.attr,
6973 &dev_attr_direct_dword.attr,
6974 &dev_attr_indirect_byte.attr,
6975 &dev_attr_indirect_dword.attr,
6976 &dev_attr_mem_gpio_reg.attr,
6977 &dev_attr_command_event_reg.attr,
6978 &dev_attr_nic_type.attr,
6979 &dev_attr_status.attr,
6980 &dev_attr_cfg.attr,
6981 &dev_attr_dump_errors.attr,
6982 &dev_attr_dump_events.attr,
6983 &dev_attr_eeprom_delay.attr,
6984 &dev_attr_ucode_version.attr,
6985 &dev_attr_rtc.attr,
6986 NULL
6987};
6988
6989static struct attribute_group ipw_attribute_group = {
6990 .name = NULL, /* put in device directory */
6991 .attrs = ipw_sysfs_entries,
6992};
6993
6994static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6995{
6996 int err = 0;
6997 struct net_device *net_dev;
6998 void __iomem *base;
6999 u32 length, val;
7000 struct ipw_priv *priv;
7001 int band, modulation;
7002
7003 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
7004 if (net_dev == NULL) {
7005 err = -ENOMEM;
7006 goto out;
7007 }
7008
7009 priv = ieee80211_priv(net_dev);
7010 priv->ieee = netdev_priv(net_dev);
7011 priv->net_dev = net_dev;
7012 priv->pci_dev = pdev;
7013#ifdef CONFIG_IPW_DEBUG
7014 ipw_debug_level = debug;
7015#endif
7016 spin_lock_init(&priv->lock);
7017
7018 if (pci_enable_device(pdev)) {
7019 err = -ENODEV;
7020 goto out_free_ieee80211;
7021 }
7022
7023 pci_set_master(pdev);
7024
7025 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7026 if (!err)
7027 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7028 if (err) {
7029 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7030 goto out_pci_disable_device;
7031 }
7032
7033 pci_set_drvdata(pdev, priv);
7034
7035 err = pci_request_regions(pdev, DRV_NAME);
7036 if (err)
7037 goto out_pci_disable_device;
7038
7039 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7040 * PCI Tx retries from interfering with C3 CPU state */
7041 pci_read_config_dword(pdev, 0x40, &val);
7042 if ((val & 0x0000ff00) != 0)
7043 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7044
7045 length = pci_resource_len(pdev, 0);
7046 priv->hw_len = length;
7047
7048 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7049 if (!base) {
7050 err = -ENODEV;
7051 goto out_pci_release_regions;
7052 }
7053
7054 priv->hw_base = base;
7055 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7056 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7057
7058 err = ipw_setup_deferred_work(priv);
7059 if (err) {
7060 IPW_ERROR("Unable to setup deferred work\n");
7061 goto out_iounmap;
7062 }
7063
7064 /* Initialize module parameter values here */
7065 if (ifname)
7066 strncpy(net_dev->name, ifname, IFNAMSIZ);
7067
7068 if (associate)
7069 priv->config |= CFG_ASSOCIATE;
7070 else
7071 IPW_DEBUG_INFO("Auto associate disabled.\n");
7072
7073 if (auto_create)
7074 priv->config |= CFG_ADHOC_CREATE;
7075 else
7076 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7077
7078 if (disable) {
7079 priv->status |= STATUS_RF_KILL_SW;
7080 IPW_DEBUG_INFO("Radio disabled.\n");
7081 }
7082
7083 if (channel != 0) {
7084 priv->config |= CFG_STATIC_CHANNEL;
7085 priv->channel = channel;
7086 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7087 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7088 /* TODO: Validate that provided channel is in range */
7089 }
7090
7091 switch (mode) {
7092 case 1:
7093 priv->ieee->iw_mode = IW_MODE_ADHOC;
7094 break;
7095#ifdef CONFIG_IPW_PROMISC
7096 case 2:
7097 priv->ieee->iw_mode = IW_MODE_MONITOR;
7098 break;
7099#endif
7100 default:
7101 case 0:
7102 priv->ieee->iw_mode = IW_MODE_INFRA;
7103 break;
7104 }
7105
7106 if ((priv->pci_dev->device == 0x4223) ||
7107 (priv->pci_dev->device == 0x4224)) {
7108 printk(KERN_INFO DRV_NAME
7109 ": Detected Intel PRO/Wireless 2915ABG Network "
7110 "Connection\n");
7111 priv->ieee->abg_ture = 1;
7112 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7113 modulation = IEEE80211_OFDM_MODULATION |
7114 IEEE80211_CCK_MODULATION;
7115 priv->adapter = IPW_2915ABG;
7116 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
7117 } else {
7118 if (priv->pci_dev->device == 0x4221)
7119 printk(KERN_INFO DRV_NAME
7120 ": Detected Intel PRO/Wireless 2225BG Network "
7121 "Connection\n");
7122 else
7123 printk(KERN_INFO DRV_NAME
7124 ": Detected Intel PRO/Wireless 2200BG Network "
7125 "Connection\n");
7126
7127 priv->ieee->abg_ture = 0;
7128 band = IEEE80211_24GHZ_BAND;
7129 modulation = IEEE80211_OFDM_MODULATION |
7130 IEEE80211_CCK_MODULATION;
7131 priv->adapter = IPW_2200BG;
7132 priv->ieee->mode = IEEE_G | IEEE_B;
7133 }
7134
7135 priv->ieee->freq_band = band;
7136 priv->ieee->modulation = modulation;
7137
7138 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7139
7140 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7141 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7142
7143 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7144
7145 /* If power management is turned on, default to AC mode */
7146 priv->power_mode = IPW_POWER_AC;
7147 priv->tx_power = IPW_DEFAULT_TX_POWER;
7148
7149 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
7150 if (err) {
7151 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7152 goto out_destroy_workqueue;
7153 }
7154
7155 SET_MODULE_OWNER(net_dev);
7156 SET_NETDEV_DEV(net_dev, &pdev->dev);
7157
7158 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7159 priv->ieee->set_security = shim__set_security;
7160
7161 net_dev->open = ipw_net_open;
7162 net_dev->stop = ipw_net_stop;
7163 net_dev->init = ipw_net_init;
7164 net_dev->get_stats = ipw_net_get_stats;
7165 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7166 net_dev->set_mac_address = ipw_net_set_mac_address;
7167 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7168 net_dev->wireless_handlers = &ipw_wx_handler_def;
7169 net_dev->ethtool_ops = &ipw_ethtool_ops;
7170 net_dev->irq = pdev->irq;
7171 net_dev->base_addr = (unsigned long)priv->hw_base;
7172 net_dev->mem_start = pci_resource_start(pdev, 0);
7173 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7174
7175 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7176 if (err) {
7177 IPW_ERROR("failed to create sysfs device attributes\n");
7178 goto out_release_irq;
7179 }
7180
7181 err = register_netdev(net_dev);
7182 if (err) {
7183 IPW_ERROR("failed to register network device\n");
7184 goto out_remove_group;
7185 }
7186
7187 return 0;
7188
7189 out_remove_group:
7190 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7191 out_release_irq:
7192 free_irq(pdev->irq, priv);
7193 out_destroy_workqueue:
7194 destroy_workqueue(priv->workqueue);
7195 priv->workqueue = NULL;
7196 out_iounmap:
7197 iounmap(priv->hw_base);
7198 out_pci_release_regions:
7199 pci_release_regions(pdev);
7200 out_pci_disable_device:
7201 pci_disable_device(pdev);
7202 pci_set_drvdata(pdev, NULL);
7203 out_free_ieee80211:
7204 free_ieee80211(priv->net_dev);
7205 out:
7206 return err;
7207}
7208
7209static void ipw_pci_remove(struct pci_dev *pdev)
7210{
7211 struct ipw_priv *priv = pci_get_drvdata(pdev);
7212 if (!priv)
7213 return;
7214
7215 priv->status |= STATUS_EXIT_PENDING;
7216
7217 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7218
7219 ipw_down(priv);
7220
7221 unregister_netdev(priv->net_dev);
7222
7223 if (priv->rxq) {
7224 ipw_rx_queue_free(priv, priv->rxq);
7225 priv->rxq = NULL;
7226 }
7227 ipw_tx_queue_free(priv);
7228
7229 /* ipw_down will ensure that there is no more pending work
7230 * in the workqueue's, so we can safely remove them now. */
7231 if (priv->workqueue) {
7232 cancel_delayed_work(&priv->adhoc_check);
7233 cancel_delayed_work(&priv->gather_stats);
7234 cancel_delayed_work(&priv->request_scan);
7235 cancel_delayed_work(&priv->rf_kill);
7236 cancel_delayed_work(&priv->scan_check);
7237 destroy_workqueue(priv->workqueue);
7238 priv->workqueue = NULL;
7239 }
7240
7241 free_irq(pdev->irq, priv);
7242 iounmap(priv->hw_base);
7243 pci_release_regions(pdev);
7244 pci_disable_device(pdev);
7245 pci_set_drvdata(pdev, NULL);
7246 free_ieee80211(priv->net_dev);
7247
7248#ifdef CONFIG_PM
7249 if (fw_loaded) {
7250 release_firmware(bootfw);
7251 release_firmware(ucode);
7252 release_firmware(firmware);
7253 fw_loaded = 0;
7254 }
7255#endif
7256}
7257
7258#ifdef CONFIG_PM
7259static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
7260{
7261 struct ipw_priv *priv = pci_get_drvdata(pdev);
7262 struct net_device *dev = priv->net_dev;
7263
7264 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7265
7266 /* Take down the device; powers it off, etc. */
7267 ipw_down(priv);
7268
7269 /* Remove the PRESENT state of the device */
7270 netif_device_detach(dev);
7271
7272 pci_save_state(pdev);
7273 pci_disable_device(pdev);
7274 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7275
7276 return 0;
7277}
7278
7279static int ipw_pci_resume(struct pci_dev *pdev)
7280{
7281 struct ipw_priv *priv = pci_get_drvdata(pdev);
7282 struct net_device *dev = priv->net_dev;
7283 u32 val;
7284
7285 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
7286
7287 pci_set_power_state(pdev, 0);
7288 pci_enable_device(pdev);
7289#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7290 pci_restore_state(pdev, priv->pm_state);
7291#else
7292 pci_restore_state(pdev);
7293#endif
7294 /*
7295 * Suspend/Resume resets the PCI configuration space, so we have to
7296 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
7297 * from interfering with C3 CPU state. pci_restore_state won't help
7298 * here since it only restores the first 64 bytes pci config header.
7299 */
7300 pci_read_config_dword(pdev, 0x40, &val);
7301 if ((val & 0x0000ff00) != 0)
7302 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7303
7304 /* Set the device back into the PRESENT state; this will also wake
7305 * the queue of needed */
7306 netif_device_attach(dev);
7307
7308 /* Bring the device back up */
7309 queue_work(priv->workqueue, &priv->up);
7310
7311 return 0;
7312}
7313#endif
7314
7315/* driver initialization stuff */
7316static struct pci_driver ipw_driver = {
7317 .name = DRV_NAME,
7318 .id_table = card_ids,
7319 .probe = ipw_pci_probe,
7320 .remove = __devexit_p(ipw_pci_remove),
7321#ifdef CONFIG_PM
7322 .suspend = ipw_pci_suspend,
7323 .resume = ipw_pci_resume,
7324#endif
7325};
7326
7327static int __init ipw_init(void)
7328{
7329 int ret;
7330
7331 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7332 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7333
7334 ret = pci_module_init(&ipw_driver);
7335 if (ret) {
7336 IPW_ERROR("Unable to initialize PCI module\n");
7337 return ret;
7338 }
7339
7340 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
7341 if (ret) {
7342 IPW_ERROR("Unable to create driver sysfs file\n");
7343 pci_unregister_driver(&ipw_driver);
7344 return ret;
7345 }
7346
7347 return ret;
7348}
7349
7350static void __exit ipw_exit(void)
7351{
7352 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
7353 pci_unregister_driver(&ipw_driver);
7354}
7355
7356module_param(disable, int, 0444);
7357MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7358
7359module_param(associate, int, 0444);
7360MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
7361
7362module_param(auto_create, int, 0444);
7363MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
7364
7365module_param(debug, int, 0444);
7366MODULE_PARM_DESC(debug, "debug output mask");
7367
7368module_param(channel, int, 0444);
7369MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
7370
7371module_param(ifname, charp, 0444);
7372MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
7373
7374#ifdef CONFIG_IPW_PROMISC
7375module_param(mode, int, 0444);
7376MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
7377#else
7378module_param(mode, int, 0444);
7379MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
7380#endif
7381
7382module_exit(ipw_exit);
7383module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
new file mode 100644
index 000000000000..5b00882133f9
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.h
@@ -0,0 +1,1680 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26
27#ifndef __ipw2200_h__
28#define __ipw2200_h__
29
30#define WEXT_USECHANNELS 1
31
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/config.h>
35#include <linux/init.h>
36
37#include <linux/version.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/ethtool.h>
41#include <linux/skbuff.h>
42#include <linux/etherdevice.h>
43#include <linux/delay.h>
44#include <linux/random.h>
45#include <linux/dma-mapping.h>
46
47#include <linux/firmware.h>
48#include <linux/wireless.h>
49#include <linux/dma-mapping.h>
50#include <asm/io.h>
51
52#include <net/ieee80211.h>
53
54#define DRV_NAME "ipw2200"
55
56#include <linux/workqueue.h>
57
58/* Authentication and Association States */
59enum connection_manager_assoc_states {
60 CMAS_INIT = 0,
61 CMAS_TX_AUTH_SEQ_1,
62 CMAS_RX_AUTH_SEQ_2,
63 CMAS_AUTH_SEQ_1_PASS,
64 CMAS_AUTH_SEQ_1_FAIL,
65 CMAS_TX_AUTH_SEQ_3,
66 CMAS_RX_AUTH_SEQ_4,
67 CMAS_AUTH_SEQ_2_PASS,
68 CMAS_AUTH_SEQ_2_FAIL,
69 CMAS_AUTHENTICATED,
70 CMAS_TX_ASSOC,
71 CMAS_RX_ASSOC_RESP,
72 CMAS_ASSOCIATED,
73 CMAS_LAST
74};
75
76#define IPW_WAIT (1<<0)
77#define IPW_QUIET (1<<1)
78#define IPW_ROAMING (1<<2)
79
80#define IPW_POWER_MODE_CAM 0x00 //(always on)
81#define IPW_POWER_INDEX_1 0x01
82#define IPW_POWER_INDEX_2 0x02
83#define IPW_POWER_INDEX_3 0x03
84#define IPW_POWER_INDEX_4 0x04
85#define IPW_POWER_INDEX_5 0x05
86#define IPW_POWER_AC 0x06
87#define IPW_POWER_BATTERY 0x07
88#define IPW_POWER_LIMIT 0x07
89#define IPW_POWER_MASK 0x0F
90#define IPW_POWER_ENABLED 0x10
91#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK)
92
93#define IPW_CMD_HOST_COMPLETE 2
94#define IPW_CMD_POWER_DOWN 4
95#define IPW_CMD_SYSTEM_CONFIG 6
96#define IPW_CMD_MULTICAST_ADDRESS 7
97#define IPW_CMD_SSID 8
98#define IPW_CMD_ADAPTER_ADDRESS 11
99#define IPW_CMD_PORT_TYPE 12
100#define IPW_CMD_RTS_THRESHOLD 15
101#define IPW_CMD_FRAG_THRESHOLD 16
102#define IPW_CMD_POWER_MODE 17
103#define IPW_CMD_WEP_KEY 18
104#define IPW_CMD_TGI_TX_KEY 19
105#define IPW_CMD_SCAN_REQUEST 20
106#define IPW_CMD_ASSOCIATE 21
107#define IPW_CMD_SUPPORTED_RATES 22
108#define IPW_CMD_SCAN_ABORT 23
109#define IPW_CMD_TX_FLUSH 24
110#define IPW_CMD_QOS_PARAMETERS 25
111#define IPW_CMD_SCAN_REQUEST_EXT 26
112#define IPW_CMD_DINO_CONFIG 30
113#define IPW_CMD_RSN_CAPABILITIES 31
114#define IPW_CMD_RX_KEY 32
115#define IPW_CMD_CARD_DISABLE 33
116#define IPW_CMD_SEED_NUMBER 34
117#define IPW_CMD_TX_POWER 35
118#define IPW_CMD_COUNTRY_INFO 36
119#define IPW_CMD_AIRONET_INFO 37
120#define IPW_CMD_AP_TX_POWER 38
121#define IPW_CMD_CCKM_INFO 39
122#define IPW_CMD_CCX_VER_INFO 40
123#define IPW_CMD_SET_CALIBRATION 41
124#define IPW_CMD_SENSITIVITY_CALIB 42
125#define IPW_CMD_RETRY_LIMIT 51
126#define IPW_CMD_IPW_PRE_POWER_DOWN 58
127#define IPW_CMD_VAP_BEACON_TEMPLATE 60
128#define IPW_CMD_VAP_DTIM_PERIOD 61
129#define IPW_CMD_EXT_SUPPORTED_RATES 62
130#define IPW_CMD_VAP_LOCAL_TX_PWR_CONSTRAINT 63
131#define IPW_CMD_VAP_QUIET_INTERVALS 64
132#define IPW_CMD_VAP_CHANNEL_SWITCH 65
133#define IPW_CMD_VAP_MANDATORY_CHANNELS 66
134#define IPW_CMD_VAP_CELL_PWR_LIMIT 67
135#define IPW_CMD_VAP_CF_PARAM_SET 68
136#define IPW_CMD_VAP_SET_BEACONING_STATE 69
137#define IPW_CMD_MEASUREMENT 80
138#define IPW_CMD_POWER_CAPABILITY 81
139#define IPW_CMD_SUPPORTED_CHANNELS 82
140#define IPW_CMD_TPC_REPORT 83
141#define IPW_CMD_WME_INFO 84
142#define IPW_CMD_PRODUCTION_COMMAND 85
143#define IPW_CMD_LINKSYS_EOU_INFO 90
144
145#define RFD_SIZE 4
146#define NUM_TFD_CHUNKS 6
147
148#define TX_QUEUE_SIZE 32
149#define RX_QUEUE_SIZE 32
150
151#define DINO_CMD_WEP_KEY 0x08
152#define DINO_CMD_TX 0x0B
153#define DCT_ANTENNA_A 0x01
154#define DCT_ANTENNA_B 0x02
155
156#define IPW_A_MODE 0
157#define IPW_B_MODE 1
158#define IPW_G_MODE 2
159
160/*
161 * TX Queue Flag Definitions
162 */
163
164/* abort attempt if mgmt frame is rx'd */
165#define DCT_FLAG_ABORT_MGMT 0x01
166
167/* require CTS */
168#define DCT_FLAG_CTS_REQUIRED 0x02
169
170/* use short preamble */
171#define DCT_FLAG_SHORT_PREMBL 0x04
172
173/* RTS/CTS first */
174#define DCT_FLAG_RTS_REQD 0x08
175
176/* dont calculate duration field */
177#define DCT_FLAG_DUR_SET 0x10
178
179/* even if MAC WEP set (allows pre-encrypt) */
180#define DCT_FLAG_NO_WEP 0x20
181
182/* overwrite TSF field */
183#define DCT_FLAG_TSF_REQD 0x40
184
185/* ACK rx is expected to follow */
186#define DCT_FLAG_ACK_REQD 0x80
187
188#define DCT_FLAG_EXT_MODE_CCK 0x01
189#define DCT_FLAG_EXT_MODE_OFDM 0x00
190
191#define TX_RX_TYPE_MASK 0xFF
192#define TX_FRAME_TYPE 0x00
193#define TX_HOST_COMMAND_TYPE 0x01
194#define RX_FRAME_TYPE 0x09
195#define RX_HOST_NOTIFICATION_TYPE 0x03
196#define RX_HOST_CMD_RESPONSE_TYPE 0x04
197#define RX_TX_FRAME_RESPONSE_TYPE 0x05
198#define TFD_NEED_IRQ_MASK 0x04
199
200#define HOST_CMD_DINO_CONFIG 30
201
202#define HOST_NOTIFICATION_STATUS_ASSOCIATED 10
203#define HOST_NOTIFICATION_STATUS_AUTHENTICATE 11
204#define HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT 12
205#define HOST_NOTIFICATION_STATUS_SCAN_COMPLETED 13
206#define HOST_NOTIFICATION_STATUS_FRAG_LENGTH 14
207#define HOST_NOTIFICATION_STATUS_LINK_DETERIORATION 15
208#define HOST_NOTIFICATION_DINO_CONFIG_RESPONSE 16
209#define HOST_NOTIFICATION_STATUS_BEACON_STATE 17
210#define HOST_NOTIFICATION_STATUS_TGI_TX_KEY 18
211#define HOST_NOTIFICATION_TX_STATUS 19
212#define HOST_NOTIFICATION_CALIB_KEEP_RESULTS 20
213#define HOST_NOTIFICATION_MEASUREMENT_STARTED 21
214#define HOST_NOTIFICATION_MEASUREMENT_ENDED 22
215#define HOST_NOTIFICATION_CHANNEL_SWITCHED 23
216#define HOST_NOTIFICATION_RX_DURING_QUIET_PERIOD 24
217#define HOST_NOTIFICATION_NOISE_STATS 25
218#define HOST_NOTIFICATION_S36_MEASUREMENT_ACCEPTED 30
219#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31
220
221#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1
222#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 24
223#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8
224#define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300
225
226#define MACADRR_BYTE_LEN 6
227
228#define DCR_TYPE_AP 0x01
229#define DCR_TYPE_WLAP 0x02
230#define DCR_TYPE_MU_ESS 0x03
231#define DCR_TYPE_MU_IBSS 0x04
232#define DCR_TYPE_MU_PIBSS 0x05
233#define DCR_TYPE_SNIFFER 0x06
234#define DCR_TYPE_MU_BSS DCR_TYPE_MU_ESS
235
236/**
237 * Generic queue structure
238 *
239 * Contains common data for Rx and Tx queues
240 */
241struct clx2_queue {
242 int n_bd; /**< number of BDs in this queue */
243 int first_empty; /**< 1-st empty entry (index) */
244 int last_used; /**< last used entry (index) */
245 u32 reg_w; /**< 'write' reg (queue head), addr in domain 1 */
246 u32 reg_r; /**< 'read' reg (queue tail), addr in domain 1 */
247 dma_addr_t dma_addr; /**< physical addr for BD's */
248 int low_mark; /**< low watermark, resume queue if free space more than this */
249 int high_mark; /**< high watermark, stop queue if free space less than this */
250} __attribute__ ((packed));
251
252struct machdr32 {
253 u16 frame_ctl;
254 u16 duration; // watch out for endians!
255 u8 addr1[MACADRR_BYTE_LEN];
256 u8 addr2[MACADRR_BYTE_LEN];
257 u8 addr3[MACADRR_BYTE_LEN];
258 u16 seq_ctrl; // more endians!
259 u8 addr4[MACADRR_BYTE_LEN];
260 u16 qos_ctrl;
261} __attribute__ ((packed));
262
263struct machdr30 {
264 u16 frame_ctl;
265 u16 duration; // watch out for endians!
266 u8 addr1[MACADRR_BYTE_LEN];
267 u8 addr2[MACADRR_BYTE_LEN];
268 u8 addr3[MACADRR_BYTE_LEN];
269 u16 seq_ctrl; // more endians!
270 u8 addr4[MACADRR_BYTE_LEN];
271} __attribute__ ((packed));
272
273struct machdr26 {
274 u16 frame_ctl;
275 u16 duration; // watch out for endians!
276 u8 addr1[MACADRR_BYTE_LEN];
277 u8 addr2[MACADRR_BYTE_LEN];
278 u8 addr3[MACADRR_BYTE_LEN];
279 u16 seq_ctrl; // more endians!
280 u16 qos_ctrl;
281} __attribute__ ((packed));
282
283struct machdr24 {
284 u16 frame_ctl;
285 u16 duration; // watch out for endians!
286 u8 addr1[MACADRR_BYTE_LEN];
287 u8 addr2[MACADRR_BYTE_LEN];
288 u8 addr3[MACADRR_BYTE_LEN];
289 u16 seq_ctrl; // more endians!
290} __attribute__ ((packed));
291
292// TX TFD with 32 byte MAC Header
293struct tx_tfd_32 {
294 struct machdr32 mchdr; // 32
295 u32 uivplaceholder[2]; // 8
296} __attribute__ ((packed));
297
298// TX TFD with 30 byte MAC Header
299struct tx_tfd_30 {
300 struct machdr30 mchdr; // 30
301 u8 reserved[2]; // 2
302 u32 uivplaceholder[2]; // 8
303} __attribute__ ((packed));
304
305// tx tfd with 26 byte mac header
306struct tx_tfd_26 {
307 struct machdr26 mchdr; // 26
308 u8 reserved1[2]; // 2
309 u32 uivplaceholder[2]; // 8
310 u8 reserved2[4]; // 4
311} __attribute__ ((packed));
312
313// tx tfd with 24 byte mac header
314struct tx_tfd_24 {
315 struct machdr24 mchdr; // 24
316 u32 uivplaceholder[2]; // 8
317 u8 reserved[8]; // 8
318} __attribute__ ((packed));
319
320#define DCT_WEP_KEY_FIELD_LENGTH 16
321
322struct tfd_command {
323 u8 index;
324 u8 length;
325 u16 reserved;
326 u8 payload[0];
327} __attribute__ ((packed));
328
329struct tfd_data {
330 /* Header */
331 u32 work_area_ptr;
332 u8 station_number; /* 0 for BSS */
333 u8 reserved1;
334 u16 reserved2;
335
336 /* Tx Parameters */
337 u8 cmd_id;
338 u8 seq_num;
339 u16 len;
340 u8 priority;
341 u8 tx_flags;
342 u8 tx_flags_ext;
343 u8 key_index;
344 u8 wepkey[DCT_WEP_KEY_FIELD_LENGTH];
345 u8 rate;
346 u8 antenna;
347 u16 next_packet_duration;
348 u16 next_frag_len;
349 u16 back_off_counter; //////txop;
350 u8 retrylimit;
351 u16 cwcurrent;
352 u8 reserved3;
353
354 /* 802.11 MAC Header */
355 union {
356 struct tx_tfd_24 tfd_24;
357 struct tx_tfd_26 tfd_26;
358 struct tx_tfd_30 tfd_30;
359 struct tx_tfd_32 tfd_32;
360 } tfd;
361
362 /* Payload DMA info */
363 u32 num_chunks;
364 u32 chunk_ptr[NUM_TFD_CHUNKS];
365 u16 chunk_len[NUM_TFD_CHUNKS];
366} __attribute__ ((packed));
367
368struct txrx_control_flags {
369 u8 message_type;
370 u8 rx_seq_num;
371 u8 control_bits;
372 u8 reserved;
373} __attribute__ ((packed));
374
375#define TFD_SIZE 128
376#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
377
378struct tfd_frame {
379 struct txrx_control_flags control_flags;
380 union {
381 struct tfd_data data;
382 struct tfd_command cmd;
383 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
384 } u;
385} __attribute__ ((packed));
386
387typedef void destructor_func(const void *);
388
389/**
390 * Tx Queue for DMA. Queue consists of circular buffer of
391 * BD's and required locking structures.
392 */
393struct clx2_tx_queue {
394 struct clx2_queue q;
395 struct tfd_frame *bd;
396 struct ieee80211_txb **txb;
397};
398
399/*
400 * RX related structures and functions
401 */
402#define RX_FREE_BUFFERS 32
403#define RX_LOW_WATERMARK 8
404
405#define SUP_RATE_11A_MAX_NUM_CHANNELS (8)
406#define SUP_RATE_11B_MAX_NUM_CHANNELS (4)
407#define SUP_RATE_11G_MAX_NUM_CHANNELS (12)
408
409// Used for passing to driver number of successes and failures per rate
410struct rate_histogram {
411 union {
412 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
413 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
414 u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
415 } success;
416 union {
417 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
418 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
419 u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
420 } failed;
421} __attribute__ ((packed));
422
423/* statistics command response */
424struct ipw_cmd_stats {
425 u8 cmd_id;
426 u8 seq_num;
427 u16 good_sfd;
428 u16 bad_plcp;
429 u16 wrong_bssid;
430 u16 valid_mpdu;
431 u16 bad_mac_header;
432 u16 reserved_frame_types;
433 u16 rx_ina;
434 u16 bad_crc32;
435 u16 invalid_cts;
436 u16 invalid_acks;
437 u16 long_distance_ina_fina;
438 u16 dsp_silence_unreachable;
439 u16 accumulated_rssi;
440 u16 rx_ovfl_frame_tossed;
441 u16 rssi_silence_threshold;
442 u16 rx_ovfl_frame_supplied;
443 u16 last_rx_frame_signal;
444 u16 last_rx_frame_noise;
445 u16 rx_autodetec_no_ofdm;
446 u16 rx_autodetec_no_barker;
447 u16 reserved;
448} __attribute__ ((packed));
449
450struct notif_channel_result {
451 u8 channel_num;
452 struct ipw_cmd_stats stats;
453 u8 uReserved;
454} __attribute__ ((packed));
455
456struct notif_scan_complete {
457 u8 scan_type;
458 u8 num_channels;
459 u8 status;
460 u8 reserved;
461} __attribute__ ((packed));
462
463struct notif_frag_length {
464 u16 frag_length;
465 u16 reserved;
466} __attribute__ ((packed));
467
468struct notif_beacon_state {
469 u32 state;
470 u32 number;
471} __attribute__ ((packed));
472
473struct notif_tgi_tx_key {
474 u8 key_state;
475 u8 security_type;
476 u8 station_index;
477 u8 reserved;
478} __attribute__ ((packed));
479
480struct notif_link_deterioration {
481 struct ipw_cmd_stats stats;
482 u8 rate;
483 u8 modulation;
484 struct rate_histogram histogram;
485 u8 reserved1;
486 u16 reserved2;
487} __attribute__ ((packed));
488
489struct notif_association {
490 u8 state;
491} __attribute__ ((packed));
492
493struct notif_authenticate {
494 u8 state;
495 struct machdr24 addr;
496 u16 status;
497} __attribute__ ((packed));
498
499struct notif_calibration {
500 u8 data[104];
501} __attribute__ ((packed));
502
503struct notif_noise {
504 u32 value;
505} __attribute__ ((packed));
506
507struct ipw_rx_notification {
508 u8 reserved[8];
509 u8 subtype;
510 u8 flags;
511 u16 size;
512 union {
513 struct notif_association assoc;
514 struct notif_authenticate auth;
515 struct notif_channel_result channel_result;
516 struct notif_scan_complete scan_complete;
517 struct notif_frag_length frag_len;
518 struct notif_beacon_state beacon_state;
519 struct notif_tgi_tx_key tgi_tx_key;
520 struct notif_link_deterioration link_deterioration;
521 struct notif_calibration calibration;
522 struct notif_noise noise;
523 u8 raw[0];
524 } u;
525} __attribute__ ((packed));
526
527struct ipw_rx_frame {
528 u32 reserved1;
529 u8 parent_tsf[4]; // fw_use[0] is boolean for OUR_TSF_IS_GREATER
530 u8 received_channel; // The channel that this frame was received on.
531 // Note that for .11b this does not have to be
532 // the same as the channel that it was sent.
533 // Filled by LMAC
534 u8 frameStatus;
535 u8 rate;
536 u8 rssi;
537 u8 agc;
538 u8 rssi_dbm;
539 u16 signal;
540 u16 noise;
541 u8 antennaAndPhy;
542 u8 control; // control bit should be on in bg
543 u8 rtscts_rate; // rate of rts or cts (in rts cts sequence rate
544 // is identical)
545 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
546 u16 length;
547 u8 data[0];
548} __attribute__ ((packed));
549
550struct ipw_rx_header {
551 u8 message_type;
552 u8 rx_seq_num;
553 u8 control_bits;
554 u8 reserved;
555} __attribute__ ((packed));
556
557struct ipw_rx_packet {
558 struct ipw_rx_header header;
559 union {
560 struct ipw_rx_frame frame;
561 struct ipw_rx_notification notification;
562 } u;
563} __attribute__ ((packed));
564
565#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
566#define IPW_RX_FRAME_SIZE sizeof(struct ipw_rx_header) + \
567 sizeof(struct ipw_rx_frame)
568
569struct ipw_rx_mem_buffer {
570 dma_addr_t dma_addr;
571 struct ipw_rx_buffer *rxb;
572 struct sk_buff *skb;
573 struct list_head list;
574}; /* Not transferred over network, so not __attribute__ ((packed)) */
575
576struct ipw_rx_queue {
577 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
578 struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
579 u32 processed; /* Internal index to last handled Rx packet */
580 u32 read; /* Shared index to newest available Rx buffer */
581 u32 write; /* Shared index to oldest written Rx packet */
582 u32 free_count; /* Number of pre-allocated buffers in rx_free */
583 /* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */
584 struct list_head rx_free; /* Own an SKBs */
585 struct list_head rx_used; /* No SKB allocated */
586 spinlock_t lock;
587}; /* Not transferred over network, so not __attribute__ ((packed)) */
588
589struct alive_command_responce {
590 u8 alive_command;
591 u8 sequence_number;
592 u16 software_revision;
593 u8 device_identifier;
594 u8 reserved1[5];
595 u16 reserved2;
596 u16 reserved3;
597 u16 clock_settle_time;
598 u16 powerup_settle_time;
599 u16 reserved4;
600 u8 time_stamp[5]; /* month, day, year, hours, minutes */
601 u8 ucode_valid;
602} __attribute__ ((packed));
603
604#define IPW_MAX_RATES 12
605
606struct ipw_rates {
607 u8 num_rates;
608 u8 rates[IPW_MAX_RATES];
609} __attribute__ ((packed));
610
611struct command_block {
612 unsigned int control;
613 u32 source_addr;
614 u32 dest_addr;
615 unsigned int status;
616} __attribute__ ((packed));
617
618#define CB_NUMBER_OF_ELEMENTS_SMALL 64
619struct fw_image_desc {
620 unsigned long last_cb_index;
621 unsigned long current_cb_index;
622 struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL];
623 void *v_addr;
624 unsigned long p_addr;
625 unsigned long len;
626};
627
628struct ipw_sys_config {
629 u8 bt_coexistence;
630 u8 reserved1;
631 u8 answer_broadcast_ssid_probe;
632 u8 accept_all_data_frames;
633 u8 accept_non_directed_frames;
634 u8 exclude_unicast_unencrypted;
635 u8 disable_unicast_decryption;
636 u8 exclude_multicast_unencrypted;
637 u8 disable_multicast_decryption;
638 u8 antenna_diversity;
639 u8 pass_crc_to_host;
640 u8 dot11g_auto_detection;
641 u8 enable_cts_to_self;
642 u8 enable_multicast_filtering;
643 u8 bt_coexist_collision_thr;
644 u8 reserved2;
645 u8 accept_all_mgmt_bcpr;
646 u8 accept_all_mgtm_frames;
647 u8 pass_noise_stats_to_host;
648 u8 reserved3;
649} __attribute__ ((packed));
650
651struct ipw_multicast_addr {
652 u8 num_of_multicast_addresses;
653 u8 reserved[3];
654 u8 mac1[6];
655 u8 mac2[6];
656 u8 mac3[6];
657 u8 mac4[6];
658} __attribute__ ((packed));
659
660struct ipw_wep_key {
661 u8 cmd_id;
662 u8 seq_num;
663 u8 key_index;
664 u8 key_size;
665 u8 key[16];
666} __attribute__ ((packed));
667
668struct ipw_tgi_tx_key {
669 u8 key_id;
670 u8 security_type;
671 u8 station_index;
672 u8 flags;
673 u8 key[16];
674 u32 tx_counter[2];
675} __attribute__ ((packed));
676
677#define IPW_SCAN_CHANNELS 54
678
679struct ipw_scan_request {
680 u8 scan_type;
681 u16 dwell_time;
682 u8 channels_list[IPW_SCAN_CHANNELS];
683 u8 channels_reserved[3];
684} __attribute__ ((packed));
685
686enum {
687 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
688 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN,
689 IPW_SCAN_ACTIVE_DIRECT_SCAN,
690 IPW_SCAN_ACTIVE_BROADCAST_SCAN,
691 IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN,
692 IPW_SCAN_TYPES
693};
694
695struct ipw_scan_request_ext {
696 u32 full_scan_index;
697 u8 channels_list[IPW_SCAN_CHANNELS];
698 u8 scan_type[IPW_SCAN_CHANNELS / 2];
699 u8 reserved;
700 u16 dwell_time[IPW_SCAN_TYPES];
701} __attribute__ ((packed));
702
703extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
704{
705 if (index % 2)
706 return scan->scan_type[index / 2] & 0x0F;
707 else
708 return (scan->scan_type[index / 2] & 0xF0) >> 4;
709}
710
711extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
712 u8 index, u8 scan_type)
713{
714 if (index % 2)
715 scan->scan_type[index / 2] =
716 (scan->scan_type[index / 2] & 0xF0) | (scan_type & 0x0F);
717 else
718 scan->scan_type[index / 2] =
719 (scan->scan_type[index / 2] & 0x0F) |
720 ((scan_type & 0x0F) << 4);
721}
722
723struct ipw_associate {
724 u8 channel;
725 u8 auth_type:4, auth_key:4;
726 u8 assoc_type;
727 u8 reserved;
728 u16 policy_support;
729 u8 preamble_length;
730 u8 ieee_mode;
731 u8 bssid[ETH_ALEN];
732 u32 assoc_tsf_msw;
733 u32 assoc_tsf_lsw;
734 u16 capability;
735 u16 listen_interval;
736 u16 beacon_interval;
737 u8 dest[ETH_ALEN];
738 u16 atim_window;
739 u8 smr;
740 u8 reserved1;
741 u16 reserved2;
742} __attribute__ ((packed));
743
744struct ipw_supported_rates {
745 u8 ieee_mode;
746 u8 num_rates;
747 u8 purpose;
748 u8 reserved;
749 u8 supported_rates[IPW_MAX_RATES];
750} __attribute__ ((packed));
751
752struct ipw_rts_threshold {
753 u16 rts_threshold;
754 u16 reserved;
755} __attribute__ ((packed));
756
757struct ipw_frag_threshold {
758 u16 frag_threshold;
759 u16 reserved;
760} __attribute__ ((packed));
761
762struct ipw_retry_limit {
763 u8 short_retry_limit;
764 u8 long_retry_limit;
765 u16 reserved;
766} __attribute__ ((packed));
767
768struct ipw_dino_config {
769 u32 dino_config_addr;
770 u16 dino_config_size;
771 u8 dino_response;
772 u8 reserved;
773} __attribute__ ((packed));
774
775struct ipw_aironet_info {
776 u8 id;
777 u8 length;
778 u16 reserved;
779} __attribute__ ((packed));
780
781struct ipw_rx_key {
782 u8 station_index;
783 u8 key_type;
784 u8 key_id;
785 u8 key_flag;
786 u8 key[16];
787 u8 station_address[6];
788 u8 key_index;
789 u8 reserved;
790} __attribute__ ((packed));
791
792struct ipw_country_channel_info {
793 u8 first_channel;
794 u8 no_channels;
795 s8 max_tx_power;
796} __attribute__ ((packed));
797
798struct ipw_country_info {
799 u8 id;
800 u8 length;
801 u8 country_str[3];
802 struct ipw_country_channel_info groups[7];
803} __attribute__ ((packed));
804
805struct ipw_channel_tx_power {
806 u8 channel_number;
807 s8 tx_power;
808} __attribute__ ((packed));
809
810#define SCAN_ASSOCIATED_INTERVAL (HZ)
811#define SCAN_INTERVAL (HZ / 10)
812#define MAX_A_CHANNELS 37
813#define MAX_B_CHANNELS 14
814
815struct ipw_tx_power {
816 u8 num_channels;
817 u8 ieee_mode;
818 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
819} __attribute__ ((packed));
820
821struct ipw_qos_parameters {
822 u16 cw_min[4];
823 u16 cw_max[4];
824 u8 aifs[4];
825 u8 flag[4];
826 u16 tx_op_limit[4];
827} __attribute__ ((packed));
828
829struct ipw_rsn_capabilities {
830 u8 id;
831 u8 length;
832 u16 version;
833} __attribute__ ((packed));
834
835struct ipw_sensitivity_calib {
836 u16 beacon_rssi_raw;
837 u16 reserved;
838} __attribute__ ((packed));
839
840/**
841 * Host command structure.
842 *
843 * On input, the following fields should be filled:
844 * - cmd
845 * - len
846 * - status_len
847 * - param (if needed)
848 *
849 * On output,
850 * - \a status contains status;
851 * - \a param filled with status parameters.
852 */
853struct ipw_cmd {
854 u32 cmd; /**< Host command */
855 u32 status;/**< Status */
856 u32 status_len;
857 /**< How many 32 bit parameters in the status */
858 u32 len; /**< incoming parameters length, bytes */
859 /**
860 * command parameters.
861 * There should be enough space for incoming and
862 * outcoming parameters.
863 * Incoming parameters listed 1-st, followed by outcoming params.
864 * nParams=(len+3)/4+status_len
865 */
866 u32 param[0];
867} __attribute__ ((packed));
868
869#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
870
871#define STATUS_INT_ENABLED (1<<1)
872#define STATUS_RF_KILL_HW (1<<2)
873#define STATUS_RF_KILL_SW (1<<3)
874#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
875
876#define STATUS_INIT (1<<5)
877#define STATUS_AUTH (1<<6)
878#define STATUS_ASSOCIATED (1<<7)
879#define STATUS_STATE_MASK (STATUS_INIT | STATUS_AUTH | STATUS_ASSOCIATED)
880
881#define STATUS_ASSOCIATING (1<<8)
882#define STATUS_DISASSOCIATING (1<<9)
883#define STATUS_ROAMING (1<<10)
884#define STATUS_EXIT_PENDING (1<<11)
885#define STATUS_DISASSOC_PENDING (1<<12)
886#define STATUS_STATE_PENDING (1<<13)
887
888#define STATUS_SCAN_PENDING (1<<20)
889#define STATUS_SCANNING (1<<21)
890#define STATUS_SCAN_ABORTING (1<<22)
891
892#define STATUS_INDIRECT_BYTE (1<<28) /* sysfs entry configured for access */
893#define STATUS_INDIRECT_DWORD (1<<29) /* sysfs entry configured for access */
894#define STATUS_DIRECT_DWORD (1<<30) /* sysfs entry configured for access */
895
896#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */
897
898#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */
899#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */
900#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */
901#define CFG_CUSTOM_MAC (1<<3)
902#define CFG_PREAMBLE (1<<4)
903#define CFG_ADHOC_PERSIST (1<<5)
904#define CFG_ASSOCIATE (1<<6)
905#define CFG_FIXED_RATE (1<<7)
906#define CFG_ADHOC_CREATE (1<<8)
907
908#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */
909#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
910
911#define MAX_STATIONS 32
912#define IPW_INVALID_STATION (0xff)
913
914struct ipw_station_entry {
915 u8 mac_addr[ETH_ALEN];
916 u8 reserved;
917 u8 support_mode;
918};
919
920#define AVG_ENTRIES 8
921struct average {
922 s16 entries[AVG_ENTRIES];
923 u8 pos;
924 u8 init;
925 s32 sum;
926};
927
928struct ipw_priv {
929 /* ieee device used by generic ieee processing code */
930 struct ieee80211_device *ieee;
931 struct ieee80211_security sec;
932
933 /* spinlock */
934 spinlock_t lock;
935
936 /* basic pci-network driver stuff */
937 struct pci_dev *pci_dev;
938 struct net_device *net_dev;
939
940 /* pci hardware address support */
941 void __iomem *hw_base;
942 unsigned long hw_len;
943
944 struct fw_image_desc sram_desc;
945
946 /* result of ucode download */
947 struct alive_command_responce dino_alive;
948
949 wait_queue_head_t wait_command_queue;
950 wait_queue_head_t wait_state;
951
952 /* Rx and Tx DMA processing queues */
953 struct ipw_rx_queue *rxq;
954 struct clx2_tx_queue txq_cmd;
955 struct clx2_tx_queue txq[4];
956 u32 status;
957 u32 config;
958 u32 capability;
959
960 u8 last_rx_rssi;
961 u8 last_noise;
962 struct average average_missed_beacons;
963 struct average average_rssi;
964 struct average average_noise;
965 u32 port_type;
966 int rx_bufs_min; /**< minimum number of bufs in Rx queue */
967 int rx_pend_max; /**< maximum pending buffers for one IRQ */
968 u32 hcmd_seq; /**< sequence number for hcmd */
969 u32 missed_beacon_threshold;
970 u32 roaming_threshold;
971
972 struct ipw_associate assoc_request;
973 struct ieee80211_network *assoc_network;
974
975 unsigned long ts_scan_abort;
976 struct ipw_supported_rates rates;
977 struct ipw_rates phy[3]; /**< PHY restrictions, per band */
978 struct ipw_rates supp; /**< software defined */
979 struct ipw_rates extended; /**< use for corresp. IE, AP only */
980
981 struct notif_link_deterioration last_link_deterioration; /** for statistics */
982 struct ipw_cmd *hcmd; /**< host command currently executed */
983
984 wait_queue_head_t hcmd_wq; /**< host command waits for execution */
985 u32 tsf_bcn[2]; /**< TSF from latest beacon */
986
987 struct notif_calibration calib; /**< last calibration */
988
989 /* ordinal interface with firmware */
990 u32 table0_addr;
991 u32 table0_len;
992 u32 table1_addr;
993 u32 table1_len;
994 u32 table2_addr;
995 u32 table2_len;
996
997 /* context information */
998 u8 essid[IW_ESSID_MAX_SIZE];
999 u8 essid_len;
1000 u8 nick[IW_ESSID_MAX_SIZE];
1001 u16 rates_mask;
1002 u8 channel;
1003 struct ipw_sys_config sys_config;
1004 u32 power_mode;
1005 u8 bssid[ETH_ALEN];
1006 u16 rts_threshold;
1007 u8 mac_addr[ETH_ALEN];
1008 u8 num_stations;
1009 u8 stations[MAX_STATIONS][ETH_ALEN];
1010
1011 u32 notif_missed_beacons;
1012
1013 /* Statistics and counters normalized with each association */
1014 u32 last_missed_beacons;
1015 u32 last_tx_packets;
1016 u32 last_rx_packets;
1017 u32 last_tx_failures;
1018 u32 last_rx_err;
1019 u32 last_rate;
1020
1021 u32 missed_adhoc_beacons;
1022 u32 missed_beacons;
1023 u32 rx_packets;
1024 u32 tx_packets;
1025 u32 quality;
1026
1027 /* eeprom */
1028 u8 eeprom[0x100]; /* 256 bytes of eeprom */
1029 int eeprom_delay;
1030
1031 struct iw_statistics wstats;
1032
1033 struct workqueue_struct *workqueue;
1034
1035 struct work_struct adhoc_check;
1036 struct work_struct associate;
1037 struct work_struct disassociate;
1038 struct work_struct rx_replenish;
1039 struct work_struct request_scan;
1040 struct work_struct adapter_restart;
1041 struct work_struct rf_kill;
1042 struct work_struct up;
1043 struct work_struct down;
1044 struct work_struct gather_stats;
1045 struct work_struct abort_scan;
1046 struct work_struct roam;
1047 struct work_struct scan_check;
1048
1049 struct tasklet_struct irq_tasklet;
1050
1051#define IPW_2200BG 1
1052#define IPW_2915ABG 2
1053 u8 adapter;
1054
1055#define IPW_DEFAULT_TX_POWER 0x14
1056 u8 tx_power;
1057
1058#ifdef CONFIG_PM
1059 u32 pm_state[16];
1060#endif
1061
1062 /* network state */
1063
1064 /* Used to pass the current INTA value from ISR to Tasklet */
1065 u32 isr_inta;
1066
1067 /* debugging info */
1068 u32 indirect_dword;
1069 u32 direct_dword;
1070 u32 indirect_byte;
1071}; /*ipw_priv */
1072
1073/* debug macros */
1074
1075#ifdef CONFIG_IPW_DEBUG
1076#define IPW_DEBUG(level, fmt, args...) \
1077do { if (ipw_debug_level & (level)) \
1078 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1079 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
1080#else
1081#define IPW_DEBUG(level, fmt, args...) do {} while (0)
1082#endif /* CONFIG_IPW_DEBUG */
1083
1084/*
1085 * To use the debug system;
1086 *
1087 * If you are defining a new debug classification, simply add it to the #define
1088 * list here in the form of:
1089 *
1090 * #define IPW_DL_xxxx VALUE
1091 *
1092 * shifting value to the left one bit from the previous entry. xxxx should be
1093 * the name of the classification (for example, WEP)
1094 *
1095 * You then need to either add a IPW_xxxx_DEBUG() macro definition for your
1096 * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
1097 * to send output to that classification.
1098 *
1099 * To add your debug level to the list of levels seen when you perform
1100 *
1101 * % cat /proc/net/ipw/debug_level
1102 *
1103 * you simply need to add your entry to the ipw_debug_levels array.
1104 *
1105 * If you do not see debug_level in /proc/net/ipw then you do not have
1106 * CONFIG_IPW_DEBUG defined in your kernel configuration
1107 *
1108 */
1109
1110#define IPW_DL_ERROR (1<<0)
1111#define IPW_DL_WARNING (1<<1)
1112#define IPW_DL_INFO (1<<2)
1113#define IPW_DL_WX (1<<3)
1114#define IPW_DL_HOST_COMMAND (1<<5)
1115#define IPW_DL_STATE (1<<6)
1116
1117#define IPW_DL_NOTIF (1<<10)
1118#define IPW_DL_SCAN (1<<11)
1119#define IPW_DL_ASSOC (1<<12)
1120#define IPW_DL_DROP (1<<13)
1121#define IPW_DL_IOCTL (1<<14)
1122
1123#define IPW_DL_MANAGE (1<<15)
1124#define IPW_DL_FW (1<<16)
1125#define IPW_DL_RF_KILL (1<<17)
1126#define IPW_DL_FW_ERRORS (1<<18)
1127
1128#define IPW_DL_ORD (1<<20)
1129
1130#define IPW_DL_FRAG (1<<21)
1131#define IPW_DL_WEP (1<<22)
1132#define IPW_DL_TX (1<<23)
1133#define IPW_DL_RX (1<<24)
1134#define IPW_DL_ISR (1<<25)
1135#define IPW_DL_FW_INFO (1<<26)
1136#define IPW_DL_IO (1<<27)
1137#define IPW_DL_TRACE (1<<28)
1138
1139#define IPW_DL_STATS (1<<29)
1140
1141#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
1142#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
1143#define IPW_DEBUG_INFO(f, a...) IPW_DEBUG(IPW_DL_INFO, f, ## a)
1144
1145#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a)
1146#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a)
1147#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a)
1148#define IPW_DEBUG_TRACE(f, a...) IPW_DEBUG(IPW_DL_TRACE, f, ## a)
1149#define IPW_DEBUG_RX(f, a...) IPW_DEBUG(IPW_DL_RX, f, ## a)
1150#define IPW_DEBUG_TX(f, a...) IPW_DEBUG(IPW_DL_TX, f, ## a)
1151#define IPW_DEBUG_ISR(f, a...) IPW_DEBUG(IPW_DL_ISR, f, ## a)
1152#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
1153#define IPW_DEBUG_WEP(f, a...) IPW_DEBUG(IPW_DL_WEP, f, ## a)
1154#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
1155#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a)
1156#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a)
1157#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
1158#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
1159#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a)
1160#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a)
1161#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a)
1162#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
1163#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1164#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1165#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a)
1166
1167#include <linux/ctype.h>
1168
1169/*
1170* Register bit definitions
1171*/
1172
1173/* Dino control registers bits */
1174
1175#define DINO_ENABLE_SYSTEM 0x80
1176#define DINO_ENABLE_CS 0x40
1177#define DINO_RXFIFO_DATA 0x01
1178#define DINO_CONTROL_REG 0x00200000
1179
1180#define CX2_INTA_RW 0x00000008
1181#define CX2_INTA_MASK_R 0x0000000C
1182#define CX2_INDIRECT_ADDR 0x00000010
1183#define CX2_INDIRECT_DATA 0x00000014
1184#define CX2_AUTOINC_ADDR 0x00000018
1185#define CX2_AUTOINC_DATA 0x0000001C
1186#define CX2_RESET_REG 0x00000020
1187#define CX2_GP_CNTRL_RW 0x00000024
1188
1189#define CX2_READ_INT_REGISTER 0xFF4
1190
1191#define CX2_GP_CNTRL_BIT_INIT_DONE 0x00000004
1192
1193#define CX2_REGISTER_DOMAIN1_END 0x00001000
1194#define CX2_SRAM_READ_INT_REGISTER 0x00000ff4
1195
1196#define CX2_SHARED_LOWER_BOUND 0x00000200
1197#define CX2_INTERRUPT_AREA_LOWER_BOUND 0x00000f80
1198
1199#define CX2_NIC_SRAM_LOWER_BOUND 0x00000000
1200#define CX2_NIC_SRAM_UPPER_BOUND 0x00030000
1201
1202#define CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER (1 << 29)
1203#define CX2_GP_CNTRL_BIT_CLOCK_READY 0x00000001
1204#define CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY 0x00000002
1205
1206/*
1207 * RESET Register Bit Indexes
1208 */
1209#define CBD_RESET_REG_PRINCETON_RESET 0x00000001 /* Bit 0 (LSB) */
1210#define CX2_RESET_REG_SW_RESET 0x00000080 /* Bit 7 */
1211#define CX2_RESET_REG_MASTER_DISABLED 0x00000100 /* Bit 8 */
1212#define CX2_RESET_REG_STOP_MASTER 0x00000200 /* Bit 9 */
1213#define CX2_ARC_KESHET_CONFIG 0x08000000 /* Bit 27 */
1214#define CX2_START_STANDBY 0x00000004 /* Bit 2 */
1215
1216#define CX2_CSR_CIS_UPPER_BOUND 0x00000200
1217#define CX2_DOMAIN_0_END 0x1000
1218#define CLX_MEM_BAR_SIZE 0x1000
1219
1220#define CX2_BASEBAND_CONTROL_STATUS 0X00200000
1221#define CX2_BASEBAND_TX_FIFO_WRITE 0X00200004
1222#define CX2_BASEBAND_RX_FIFO_READ 0X00200004
1223#define CX2_BASEBAND_CONTROL_STORE 0X00200010
1224
1225#define CX2_INTERNAL_CMD_EVENT 0X00300004
1226#define CX2_BASEBAND_POWER_DOWN 0x00000001
1227
1228#define CX2_MEM_HALT_AND_RESET 0x003000e0
1229
1230/* defgroup bits_halt_reset MEM_HALT_AND_RESET register bits */
1231#define CX2_BIT_HALT_RESET_ON 0x80000000
1232#define CX2_BIT_HALT_RESET_OFF 0x00000000
1233
1234#define CB_LAST_VALID 0x20000000
1235#define CB_INT_ENABLED 0x40000000
1236#define CB_VALID 0x80000000
1237#define CB_SRC_LE 0x08000000
1238#define CB_DEST_LE 0x04000000
1239#define CB_SRC_AUTOINC 0x00800000
1240#define CB_SRC_IO_GATED 0x00400000
1241#define CB_DEST_AUTOINC 0x00080000
1242#define CB_SRC_SIZE_LONG 0x00200000
1243#define CB_DEST_SIZE_LONG 0x00020000
1244
1245/* DMA DEFINES */
1246
1247#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000
1248#define DMA_CB_STOP_AND_ABORT 0x00000C00
1249#define DMA_CB_START 0x00000100
1250
1251#define CX2_SHARED_SRAM_SIZE 0x00030000
1252#define CX2_SHARED_SRAM_DMA_CONTROL 0x00027000
1253#define CB_MAX_LENGTH 0x1FFF
1254
1255#define CX2_HOST_EEPROM_DATA_SRAM_SIZE 0xA18
1256#define CX2_EEPROM_IMAGE_SIZE 0x100
1257
1258/* DMA defs */
1259#define CX2_DMA_I_CURRENT_CB 0x003000D0
1260#define CX2_DMA_O_CURRENT_CB 0x003000D4
1261#define CX2_DMA_I_DMA_CONTROL 0x003000A4
1262#define CX2_DMA_I_CB_BASE 0x003000A0
1263
1264#define CX2_TX_CMD_QUEUE_BD_BASE (0x00000200)
1265#define CX2_TX_CMD_QUEUE_BD_SIZE (0x00000204)
1266#define CX2_TX_QUEUE_0_BD_BASE (0x00000208)
1267#define CX2_TX_QUEUE_0_BD_SIZE (0x0000020C)
1268#define CX2_TX_QUEUE_1_BD_BASE (0x00000210)
1269#define CX2_TX_QUEUE_1_BD_SIZE (0x00000214)
1270#define CX2_TX_QUEUE_2_BD_BASE (0x00000218)
1271#define CX2_TX_QUEUE_2_BD_SIZE (0x0000021C)
1272#define CX2_TX_QUEUE_3_BD_BASE (0x00000220)
1273#define CX2_TX_QUEUE_3_BD_SIZE (0x00000224)
1274#define CX2_RX_BD_BASE (0x00000240)
1275#define CX2_RX_BD_SIZE (0x00000244)
1276#define CX2_RFDS_TABLE_LOWER (0x00000500)
1277
1278#define CX2_TX_CMD_QUEUE_READ_INDEX (0x00000280)
1279#define CX2_TX_QUEUE_0_READ_INDEX (0x00000284)
1280#define CX2_TX_QUEUE_1_READ_INDEX (0x00000288)
1281#define CX2_TX_QUEUE_2_READ_INDEX (0x0000028C)
1282#define CX2_TX_QUEUE_3_READ_INDEX (0x00000290)
1283#define CX2_RX_READ_INDEX (0x000002A0)
1284
1285#define CX2_TX_CMD_QUEUE_WRITE_INDEX (0x00000F80)
1286#define CX2_TX_QUEUE_0_WRITE_INDEX (0x00000F84)
1287#define CX2_TX_QUEUE_1_WRITE_INDEX (0x00000F88)
1288#define CX2_TX_QUEUE_2_WRITE_INDEX (0x00000F8C)
1289#define CX2_TX_QUEUE_3_WRITE_INDEX (0x00000F90)
1290#define CX2_RX_WRITE_INDEX (0x00000FA0)
1291
1292/*
1293 * EEPROM Related Definitions
1294 */
1295
1296#define IPW_EEPROM_DATA_SRAM_ADDRESS (CX2_SHARED_LOWER_BOUND + 0x814)
1297#define IPW_EEPROM_DATA_SRAM_SIZE (CX2_SHARED_LOWER_BOUND + 0x818)
1298#define IPW_EEPROM_LOAD_DISABLE (CX2_SHARED_LOWER_BOUND + 0x81C)
1299#define IPW_EEPROM_DATA (CX2_SHARED_LOWER_BOUND + 0x820)
1300#define IPW_EEPROM_UPPER_ADDRESS (CX2_SHARED_LOWER_BOUND + 0x9E0)
1301
1302#define IPW_STATION_TABLE_LOWER (CX2_SHARED_LOWER_BOUND + 0xA0C)
1303#define IPW_STATION_TABLE_UPPER (CX2_SHARED_LOWER_BOUND + 0xB0C)
1304#define IPW_REQUEST_ATIM (CX2_SHARED_LOWER_BOUND + 0xB0C)
1305#define IPW_ATIM_SENT (CX2_SHARED_LOWER_BOUND + 0xB10)
1306#define IPW_WHO_IS_AWAKE (CX2_SHARED_LOWER_BOUND + 0xB14)
1307#define IPW_DURING_ATIM_WINDOW (CX2_SHARED_LOWER_BOUND + 0xB18)
1308
1309#define MSB 1
1310#define LSB 0
1311#define WORD_TO_BYTE(_word) ((_word) * sizeof(u16))
1312
1313#define GET_EEPROM_ADDR(_wordoffset,_byteoffset) \
1314 ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) )
1315
1316/* EEPROM access by BYTE */
1317#define EEPROM_PME_CAPABILITY (GET_EEPROM_ADDR(0x09,MSB)) /* 1 byte */
1318#define EEPROM_MAC_ADDRESS (GET_EEPROM_ADDR(0x21,LSB)) /* 6 byte */
1319#define EEPROM_VERSION (GET_EEPROM_ADDR(0x24,MSB)) /* 1 byte */
1320#define EEPROM_NIC_TYPE (GET_EEPROM_ADDR(0x25,LSB)) /* 1 byte */
1321#define EEPROM_SKU_CAPABILITY (GET_EEPROM_ADDR(0x25,MSB)) /* 1 byte */
1322#define EEPROM_COUNTRY_CODE (GET_EEPROM_ADDR(0x26,LSB)) /* 3 bytes */
1323#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB)) /* 2 bytes */
1324#define EEPROM_IBSS_CHANNELS_A (GET_EEPROM_ADDR(0x29,MSB)) /* 5 bytes */
1325#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */
1326#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */
1327
1328/* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/
1329#define EEPROM_NIC_TYPE_STANDARD 0
1330#define EEPROM_NIC_TYPE_DELL 1
1331#define EEPROM_NIC_TYPE_FUJITSU 2
1332#define EEPROM_NIC_TYPE_IBM 3
1333#define EEPROM_NIC_TYPE_HP 4
1334
1335#define FW_MEM_REG_LOWER_BOUND 0x00300000
1336#define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40)
1337
1338#define EEPROM_BIT_SK (1<<0)
1339#define EEPROM_BIT_CS (1<<1)
1340#define EEPROM_BIT_DI (1<<2)
1341#define EEPROM_BIT_DO (1<<4)
1342
1343#define EEPROM_CMD_READ 0x2
1344
1345/* Interrupts masks */
1346#define CX2_INTA_NONE 0x00000000
1347
1348#define CX2_INTA_BIT_RX_TRANSFER 0x00000002
1349#define CX2_INTA_BIT_STATUS_CHANGE 0x00000010
1350#define CX2_INTA_BIT_BEACON_PERIOD_EXPIRED 0x00000020
1351
1352//Inta Bits for CF
1353#define CX2_INTA_BIT_TX_CMD_QUEUE 0x00000800
1354#define CX2_INTA_BIT_TX_QUEUE_1 0x00001000
1355#define CX2_INTA_BIT_TX_QUEUE_2 0x00002000
1356#define CX2_INTA_BIT_TX_QUEUE_3 0x00004000
1357#define CX2_INTA_BIT_TX_QUEUE_4 0x00008000
1358
1359#define CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE 0x00010000
1360
1361#define CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN 0x00100000
1362#define CX2_INTA_BIT_POWER_DOWN 0x00200000
1363
1364#define CX2_INTA_BIT_FW_INITIALIZATION_DONE 0x01000000
1365#define CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE 0x02000000
1366#define CX2_INTA_BIT_RF_KILL_DONE 0x04000000
1367#define CX2_INTA_BIT_FATAL_ERROR 0x40000000
1368#define CX2_INTA_BIT_PARITY_ERROR 0x80000000
1369
1370/* Interrupts enabled at init time. */
1371#define CX2_INTA_MASK_ALL \
1372 (CX2_INTA_BIT_TX_QUEUE_1 | \
1373 CX2_INTA_BIT_TX_QUEUE_2 | \
1374 CX2_INTA_BIT_TX_QUEUE_3 | \
1375 CX2_INTA_BIT_TX_QUEUE_4 | \
1376 CX2_INTA_BIT_TX_CMD_QUEUE | \
1377 CX2_INTA_BIT_RX_TRANSFER | \
1378 CX2_INTA_BIT_FATAL_ERROR | \
1379 CX2_INTA_BIT_PARITY_ERROR | \
1380 CX2_INTA_BIT_STATUS_CHANGE | \
1381 CX2_INTA_BIT_FW_INITIALIZATION_DONE | \
1382 CX2_INTA_BIT_BEACON_PERIOD_EXPIRED | \
1383 CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE | \
1384 CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN | \
1385 CX2_INTA_BIT_POWER_DOWN | \
1386 CX2_INTA_BIT_RF_KILL_DONE )
1387
1388#define IPWSTATUS_ERROR_LOG (CX2_SHARED_LOWER_BOUND + 0x410)
1389#define IPW_EVENT_LOG (CX2_SHARED_LOWER_BOUND + 0x414)
1390
1391/* FW event log definitions */
1392#define EVENT_ELEM_SIZE (3 * sizeof(u32))
1393#define EVENT_START_OFFSET (1 * sizeof(u32) + 2 * sizeof(u16))
1394
1395/* FW error log definitions */
1396#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1397#define ERROR_START_OFFSET (1 * sizeof(u32))
1398
1399enum {
1400 IPW_FW_ERROR_OK = 0,
1401 IPW_FW_ERROR_FAIL,
1402 IPW_FW_ERROR_MEMORY_UNDERFLOW,
1403 IPW_FW_ERROR_MEMORY_OVERFLOW,
1404 IPW_FW_ERROR_BAD_PARAM,
1405 IPW_FW_ERROR_BAD_CHECKSUM,
1406 IPW_FW_ERROR_NMI_INTERRUPT,
1407 IPW_FW_ERROR_BAD_DATABASE,
1408 IPW_FW_ERROR_ALLOC_FAIL,
1409 IPW_FW_ERROR_DMA_UNDERRUN,
1410 IPW_FW_ERROR_DMA_STATUS,
1411 IPW_FW_ERROR_DINOSTATUS_ERROR,
1412 IPW_FW_ERROR_EEPROMSTATUS_ERROR,
1413 IPW_FW_ERROR_SYSASSERT,
1414 IPW_FW_ERROR_FATAL_ERROR
1415};
1416
1417#define AUTH_OPEN 0
1418#define AUTH_SHARED_KEY 1
1419#define AUTH_IGNORE 3
1420
1421#define HC_ASSOCIATE 0
1422#define HC_REASSOCIATE 1
1423#define HC_DISASSOCIATE 2
1424#define HC_IBSS_START 3
1425#define HC_IBSS_RECONF 4
1426#define HC_DISASSOC_QUIET 5
1427
1428#define IPW_RATE_CAPABILITIES 1
1429#define IPW_RATE_CONNECT 0
1430
1431/*
1432 * Rate values and masks
1433 */
1434#define IPW_TX_RATE_1MB 0x0A
1435#define IPW_TX_RATE_2MB 0x14
1436#define IPW_TX_RATE_5MB 0x37
1437#define IPW_TX_RATE_6MB 0x0D
1438#define IPW_TX_RATE_9MB 0x0F
1439#define IPW_TX_RATE_11MB 0x6E
1440#define IPW_TX_RATE_12MB 0x05
1441#define IPW_TX_RATE_18MB 0x07
1442#define IPW_TX_RATE_24MB 0x09
1443#define IPW_TX_RATE_36MB 0x0B
1444#define IPW_TX_RATE_48MB 0x01
1445#define IPW_TX_RATE_54MB 0x03
1446
1447#define IPW_ORD_TABLE_ID_MASK 0x0000FF00
1448#define IPW_ORD_TABLE_VALUE_MASK 0x000000FF
1449
1450#define IPW_ORD_TABLE_0_MASK 0x0000F000
1451#define IPW_ORD_TABLE_1_MASK 0x0000F100
1452#define IPW_ORD_TABLE_2_MASK 0x0000F200
1453#define IPW_ORD_TABLE_3_MASK 0x0000F300
1454#define IPW_ORD_TABLE_4_MASK 0x0000F400
1455#define IPW_ORD_TABLE_5_MASK 0x0000F500
1456#define IPW_ORD_TABLE_6_MASK 0x0000F600
1457#define IPW_ORD_TABLE_7_MASK 0x0000F700
1458
1459/*
1460 * Table 0 Entries (all entries are 32 bits)
1461 */
1462enum {
1463 IPW_ORD_STAT_TX_CURR_RATE = IPW_ORD_TABLE_0_MASK + 1,
1464 IPW_ORD_STAT_FRAG_TRESHOLD,
1465 IPW_ORD_STAT_RTS_THRESHOLD,
1466 IPW_ORD_STAT_TX_HOST_REQUESTS,
1467 IPW_ORD_STAT_TX_HOST_COMPLETE,
1468 IPW_ORD_STAT_TX_DIR_DATA,
1469 IPW_ORD_STAT_TX_DIR_DATA_B_1,
1470 IPW_ORD_STAT_TX_DIR_DATA_B_2,
1471 IPW_ORD_STAT_TX_DIR_DATA_B_5_5,
1472 IPW_ORD_STAT_TX_DIR_DATA_B_11,
1473 /* Hole */
1474
1475 IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19,
1476 IPW_ORD_STAT_TX_DIR_DATA_G_2,
1477 IPW_ORD_STAT_TX_DIR_DATA_G_5_5,
1478 IPW_ORD_STAT_TX_DIR_DATA_G_6,
1479 IPW_ORD_STAT_TX_DIR_DATA_G_9,
1480 IPW_ORD_STAT_TX_DIR_DATA_G_11,
1481 IPW_ORD_STAT_TX_DIR_DATA_G_12,
1482 IPW_ORD_STAT_TX_DIR_DATA_G_18,
1483 IPW_ORD_STAT_TX_DIR_DATA_G_24,
1484 IPW_ORD_STAT_TX_DIR_DATA_G_36,
1485 IPW_ORD_STAT_TX_DIR_DATA_G_48,
1486 IPW_ORD_STAT_TX_DIR_DATA_G_54,
1487 IPW_ORD_STAT_TX_NON_DIR_DATA,
1488 IPW_ORD_STAT_TX_NON_DIR_DATA_B_1,
1489 IPW_ORD_STAT_TX_NON_DIR_DATA_B_2,
1490 IPW_ORD_STAT_TX_NON_DIR_DATA_B_5_5,
1491 IPW_ORD_STAT_TX_NON_DIR_DATA_B_11,
1492 /* Hole */
1493
1494 IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44,
1495 IPW_ORD_STAT_TX_NON_DIR_DATA_G_2,
1496 IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5,
1497 IPW_ORD_STAT_TX_NON_DIR_DATA_G_6,
1498 IPW_ORD_STAT_TX_NON_DIR_DATA_G_9,
1499 IPW_ORD_STAT_TX_NON_DIR_DATA_G_11,
1500 IPW_ORD_STAT_TX_NON_DIR_DATA_G_12,
1501 IPW_ORD_STAT_TX_NON_DIR_DATA_G_18,
1502 IPW_ORD_STAT_TX_NON_DIR_DATA_G_24,
1503 IPW_ORD_STAT_TX_NON_DIR_DATA_G_36,
1504 IPW_ORD_STAT_TX_NON_DIR_DATA_G_48,
1505 IPW_ORD_STAT_TX_NON_DIR_DATA_G_54,
1506 IPW_ORD_STAT_TX_RETRY,
1507 IPW_ORD_STAT_TX_FAILURE,
1508 IPW_ORD_STAT_RX_ERR_CRC,
1509 IPW_ORD_STAT_RX_ERR_ICV,
1510 IPW_ORD_STAT_RX_NO_BUFFER,
1511 IPW_ORD_STAT_FULL_SCANS,
1512 IPW_ORD_STAT_PARTIAL_SCANS,
1513 IPW_ORD_STAT_TGH_ABORTED_SCANS,
1514 IPW_ORD_STAT_TX_TOTAL_BYTES,
1515 IPW_ORD_STAT_CURR_RSSI_RAW,
1516 IPW_ORD_STAT_RX_BEACON,
1517 IPW_ORD_STAT_MISSED_BEACONS,
1518 IPW_ORD_TABLE_0_LAST
1519};
1520
1521#define IPW_RSSI_TO_DBM 112
1522
1523/* Table 1 Entries
1524 */
1525enum {
1526 IPW_ORD_TABLE_1_LAST = IPW_ORD_TABLE_1_MASK | 1,
1527};
1528
1529/*
1530 * Table 2 Entries
1531 *
1532 * FW_VERSION: 16 byte string
1533 * FW_DATE: 16 byte string (only 14 bytes used)
1534 * UCODE_VERSION: 4 byte version code
1535 * UCODE_DATE: 5 bytes code code
1536 * ADDAPTER_MAC: 6 byte MAC address
1537 * RTC: 4 byte clock
1538 */
1539enum {
1540 IPW_ORD_STAT_FW_VERSION = IPW_ORD_TABLE_2_MASK | 1,
1541 IPW_ORD_STAT_FW_DATE,
1542 IPW_ORD_STAT_UCODE_VERSION,
1543 IPW_ORD_STAT_UCODE_DATE,
1544 IPW_ORD_STAT_ADAPTER_MAC,
1545 IPW_ORD_STAT_RTC,
1546 IPW_ORD_TABLE_2_LAST
1547};
1548
1549/* Table 3 */
1550enum {
1551 IPW_ORD_STAT_TX_PACKET = IPW_ORD_TABLE_3_MASK | 0,
1552 IPW_ORD_STAT_TX_PACKET_FAILURE,
1553 IPW_ORD_STAT_TX_PACKET_SUCCESS,
1554 IPW_ORD_STAT_TX_PACKET_ABORTED,
1555 IPW_ORD_TABLE_3_LAST
1556};
1557
1558/* Table 4 */
1559enum {
1560 IPW_ORD_TABLE_4_LAST = IPW_ORD_TABLE_4_MASK
1561};
1562
1563/* Table 5 */
1564enum {
1565 IPW_ORD_STAT_AVAILABLE_AP_COUNT = IPW_ORD_TABLE_5_MASK,
1566 IPW_ORD_STAT_AP_ASSNS,
1567 IPW_ORD_STAT_ROAM,
1568 IPW_ORD_STAT_ROAM_CAUSE_MISSED_BEACONS,
1569 IPW_ORD_STAT_ROAM_CAUSE_UNASSOC,
1570 IPW_ORD_STAT_ROAM_CAUSE_RSSI,
1571 IPW_ORD_STAT_ROAM_CAUSE_LINK_QUALITY,
1572 IPW_ORD_STAT_ROAM_CAUSE_AP_LOAD_BALANCE,
1573 IPW_ORD_STAT_ROAM_CAUSE_AP_NO_TX,
1574 IPW_ORD_STAT_LINK_UP,
1575 IPW_ORD_STAT_LINK_DOWN,
1576 IPW_ORD_ANTENNA_DIVERSITY,
1577 IPW_ORD_CURR_FREQ,
1578 IPW_ORD_TABLE_5_LAST
1579};
1580
1581/* Table 6 */
1582enum {
1583 IPW_ORD_COUNTRY_CODE = IPW_ORD_TABLE_6_MASK,
1584 IPW_ORD_CURR_BSSID,
1585 IPW_ORD_CURR_SSID,
1586 IPW_ORD_TABLE_6_LAST
1587};
1588
1589/* Table 7 */
1590enum {
1591 IPW_ORD_STAT_PERCENT_MISSED_BEACONS = IPW_ORD_TABLE_7_MASK,
1592 IPW_ORD_STAT_PERCENT_TX_RETRIES,
1593 IPW_ORD_STAT_PERCENT_LINK_QUALITY,
1594 IPW_ORD_STAT_CURR_RSSI_DBM,
1595 IPW_ORD_TABLE_7_LAST
1596};
1597
1598#define IPW_ORDINALS_TABLE_LOWER (CX2_SHARED_LOWER_BOUND + 0x500)
1599#define IPW_ORDINALS_TABLE_0 (CX2_SHARED_LOWER_BOUND + 0x180)
1600#define IPW_ORDINALS_TABLE_1 (CX2_SHARED_LOWER_BOUND + 0x184)
1601#define IPW_ORDINALS_TABLE_2 (CX2_SHARED_LOWER_BOUND + 0x188)
1602#define IPW_MEM_FIXED_OVERRIDE (CX2_SHARED_LOWER_BOUND + 0x41C)
1603
1604struct ipw_fixed_rate {
1605 u16 tx_rates;
1606 u16 reserved;
1607} __attribute__ ((packed));
1608
1609#define CX2_INDIRECT_ADDR_MASK (~0x3ul)
1610
1611struct host_cmd {
1612 u8 cmd;
1613 u8 len;
1614 u16 reserved;
1615 u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
1616} __attribute__ ((packed));
1617
1618#define CFG_BT_COEXISTENCE_MIN 0x00
1619#define CFG_BT_COEXISTENCE_DEFER 0x02
1620#define CFG_BT_COEXISTENCE_KILL 0x04
1621#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08
1622#define CFG_BT_COEXISTENCE_OOB 0x10
1623#define CFG_BT_COEXISTENCE_MAX 0xFF
1624#define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM */
1625
1626#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0
1627#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1
1628#define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN
1629
1630#define CFG_SYS_ANTENNA_BOTH 0x000
1631#define CFG_SYS_ANTENNA_A 0x001
1632#define CFG_SYS_ANTENNA_B 0x003
1633
1634/*
1635 * The definitions below were lifted off the ipw2100 driver, which only
1636 * supports 'b' mode, so I'm sure these are not exactly correct.
1637 *
1638 * Somebody fix these!!
1639 */
1640#define REG_MIN_CHANNEL 0
1641#define REG_MAX_CHANNEL 14
1642
1643#define REG_CHANNEL_MASK 0x00003FFF
1644#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff
1645
1646static const long ipw_frequencies[] = {
1647 2412, 2417, 2422, 2427,
1648 2432, 2437, 2442, 2447,
1649 2452, 2457, 2462, 2467,
1650 2472, 2484
1651};
1652
1653#define FREQ_COUNT ARRAY_SIZE(ipw_frequencies)
1654
1655#define IPW_MAX_CONFIG_RETRIES 10
1656
1657static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
1658{
1659 u32 retval;
1660 u16 fc;
1661
1662 retval = sizeof(struct ieee80211_hdr);
1663 fc = le16_to_cpu(hdr->frame_ctl);
1664
1665 /*
1666 * Function ToDS FromDS
1667 * IBSS 0 0
1668 * To AP 1 0
1669 * From AP 0 1
1670 * WDS (bridge) 1 1
1671 *
1672 * Only WDS frames use Address4 among them. --YZ
1673 */
1674 if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS))
1675 retval -= ETH_ALEN;
1676
1677 return retval;
1678}
1679
1680#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 5f507c49907b..ca6c03c89926 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -471,12 +471,12 @@ static dev_link_t *netwave_attach(void)
471 dev->get_stats = &netwave_get_stats; 471 dev->get_stats = &netwave_get_stats;
472 dev->set_multicast_list = &set_multicast_list; 472 dev->set_multicast_list = &set_multicast_list;
473 /* wireless extensions */ 473 /* wireless extensions */
474#ifdef WIRELESS_EXT 474#if WIRELESS_EXT <= 16
475 dev->get_wireless_stats = &netwave_get_wireless_stats; 475 dev->get_wireless_stats = &netwave_get_wireless_stats;
476#endif /* WIRELESS_EXT <= 16 */
476#if WIRELESS_EXT > 12 477#if WIRELESS_EXT > 12
477 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def; 478 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
478#endif /* WIRELESS_EXT > 12 */ 479#endif /* WIRELESS_EXT > 12 */
479#endif /* WIRELESS_EXT */
480 dev->do_ioctl = &netwave_ioctl; 480 dev->do_ioctl = &netwave_ioctl;
481 481
482 dev->tx_timeout = &netwave_watchdog; 482 dev->tx_timeout = &netwave_watchdog;
@@ -839,6 +839,9 @@ static const struct iw_handler_def netwave_handler_def =
839 .standard = (iw_handler *) netwave_handler, 839 .standard = (iw_handler *) netwave_handler,
840 .private = (iw_handler *) netwave_private_handler, 840 .private = (iw_handler *) netwave_private_handler,
841 .private_args = (struct iw_priv_args *) netwave_private_args, 841 .private_args = (struct iw_priv_args *) netwave_private_args,
842#if WIRELESS_EXT > 16
843 .get_wireless_stats = netwave_get_wireless_stats,
844#endif /* WIRELESS_EXT > 16 */
842}; 845};
843#endif /* WIRELESS_EXT > 12 */ 846#endif /* WIRELESS_EXT > 12 */
844 847
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 9c2d07cde010..8de49fe57233 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -94,6 +94,8 @@
94#include <net/iw_handler.h> 94#include <net/iw_handler.h>
95#include <net/ieee80211.h> 95#include <net/ieee80211.h>
96 96
97#include <net/ieee80211.h>
98
97#include <asm/uaccess.h> 99#include <asm/uaccess.h>
98#include <asm/io.h> 100#include <asm/io.h>
99#include <asm/system.h> 101#include <asm/system.h>
@@ -101,7 +103,6 @@
101#include "hermes.h" 103#include "hermes.h"
102#include "hermes_rid.h" 104#include "hermes_rid.h"
103#include "orinoco.h" 105#include "orinoco.h"
104#include "ieee802_11.h"
105 106
106/********************************************************************/ 107/********************************************************************/
107/* Module information */ 108/* Module information */
@@ -150,7 +151,7 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
150#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) 151#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
151 152
152#define ORINOCO_MIN_MTU 256 153#define ORINOCO_MIN_MTU 256
153#define ORINOCO_MAX_MTU (IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD) 154#define ORINOCO_MAX_MTU (IEEE80211_DATA_LEN - ENCAPS_OVERHEAD)
154 155
155#define SYMBOL_MAX_VER_LEN (14) 156#define SYMBOL_MAX_VER_LEN (14)
156#define USER_BAP 0 157#define USER_BAP 0
@@ -442,7 +443,7 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
442 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) ) 443 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
443 return -EINVAL; 444 return -EINVAL;
444 445
445 if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) > 446 if ( (new_mtu + ENCAPS_OVERHEAD + IEEE80211_HLEN) >
446 (priv->nicbuf_size - ETH_HLEN) ) 447 (priv->nicbuf_size - ETH_HLEN) )
447 return -EINVAL; 448 return -EINVAL;
448 449
@@ -918,7 +919,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
918 data. */ 919 data. */
919 return; 920 return;
920 } 921 }
921 if (length > IEEE802_11_DATA_LEN) { 922 if (length > IEEE80211_DATA_LEN) {
922 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", 923 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
923 dev->name, length); 924 dev->name, length);
924 stats->rx_length_errors++; 925 stats->rx_length_errors++;
@@ -1052,8 +1053,9 @@ static void orinoco_join_ap(struct net_device *dev)
1052 u16 channel; 1053 u16 channel;
1053 } __attribute__ ((packed)) req; 1054 } __attribute__ ((packed)) req;
1054 const int atom_len = offsetof(struct prism2_scan_apinfo, atim); 1055 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1055 struct prism2_scan_apinfo *atom; 1056 struct prism2_scan_apinfo *atom = NULL;
1056 int offset = 4; 1057 int offset = 4;
1058 int found = 0;
1057 u8 *buf; 1059 u8 *buf;
1058 u16 len; 1060 u16 len;
1059 1061
@@ -1088,15 +1090,18 @@ static void orinoco_join_ap(struct net_device *dev)
1088 * we were requested to join */ 1090 * we were requested to join */
1089 for (; offset + atom_len <= len; offset += atom_len) { 1091 for (; offset + atom_len <= len; offset += atom_len) {
1090 atom = (struct prism2_scan_apinfo *) (buf + offset); 1092 atom = (struct prism2_scan_apinfo *) (buf + offset);
1091 if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) 1093 if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) {
1092 goto found; 1094 found = 1;
1095 break;
1096 }
1093 } 1097 }
1094 1098
1095 DEBUG(1, "%s: Requested AP not found in scan results\n", 1099 if (! found) {
1096 dev->name); 1100 DEBUG(1, "%s: Requested AP not found in scan results\n",
1097 goto out; 1101 dev->name);
1102 goto out;
1103 }
1098 1104
1099 found:
1100 memcpy(req.bssid, priv->desired_bssid, ETH_ALEN); 1105 memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
1101 req.channel = atom->channel; /* both are little-endian */ 1106 req.channel = atom->channel; /* both are little-endian */
1102 err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST, 1107 err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST,
@@ -1283,8 +1288,10 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1283 /* Read scan data */ 1288 /* Read scan data */
1284 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len, 1289 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
1285 infofid, sizeof(info)); 1290 infofid, sizeof(info));
1286 if (err) 1291 if (err) {
1292 kfree(buf);
1287 break; 1293 break;
1294 }
1288 1295
1289#ifdef ORINOCO_DEBUG 1296#ifdef ORINOCO_DEBUG
1290 { 1297 {
@@ -2272,7 +2279,7 @@ static int orinoco_init(struct net_device *dev)
2272 2279
2273 /* No need to lock, the hw_unavailable flag is already set in 2280 /* No need to lock, the hw_unavailable flag is already set in
2274 * alloc_orinocodev() */ 2281 * alloc_orinocodev() */
2275 priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN; 2282 priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
2276 2283
2277 /* Initialize the firmware */ 2284 /* Initialize the firmware */
2278 err = orinoco_reinit_firmware(dev); 2285 err = orinoco_reinit_firmware(dev);
@@ -4020,7 +4027,8 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
4020} 4027}
4021 4028
4022/* Translate scan data returned from the card to a card independant 4029/* Translate scan data returned from the card to a card independant
4023 * format that the Wireless Tools will understand - Jean II */ 4030 * format that the Wireless Tools will understand - Jean II
4031 * Return message length or -errno for fatal errors */
4024static inline int orinoco_translate_scan(struct net_device *dev, 4032static inline int orinoco_translate_scan(struct net_device *dev,
4025 char *buffer, 4033 char *buffer,
4026 char *scan, 4034 char *scan,
@@ -4060,13 +4068,19 @@ static inline int orinoco_translate_scan(struct net_device *dev,
4060 break; 4068 break;
4061 case FIRMWARE_TYPE_INTERSIL: 4069 case FIRMWARE_TYPE_INTERSIL:
4062 offset = 4; 4070 offset = 4;
4063 if (priv->has_hostscan) 4071 if (priv->has_hostscan) {
4064 atom_len = scan[0] + (scan[1] << 8); 4072 atom_len = le16_to_cpup((u16 *)scan);
4065 else 4073 /* Sanity check for atom_len */
4074 if (atom_len < sizeof(struct prism2_scan_apinfo)) {
4075 printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n",
4076 dev->name, atom_len);
4077 return -EIO;
4078 }
4079 } else
4066 atom_len = offsetof(struct prism2_scan_apinfo, atim); 4080 atom_len = offsetof(struct prism2_scan_apinfo, atim);
4067 break; 4081 break;
4068 default: 4082 default:
4069 return 0; 4083 return -EOPNOTSUPP;
4070 } 4084 }
4071 4085
4072 /* Check that we got an whole number of atoms */ 4086 /* Check that we got an whole number of atoms */
@@ -4074,7 +4088,7 @@ static inline int orinoco_translate_scan(struct net_device *dev,
4074 printk(KERN_ERR "%s: Unexpected scan data length %d, " 4088 printk(KERN_ERR "%s: Unexpected scan data length %d, "
4075 "atom_len %d, offset %d\n", dev->name, scan_len, 4089 "atom_len %d, offset %d\n", dev->name, scan_len,
4076 atom_len, offset); 4090 atom_len, offset);
4077 return 0; 4091 return -EIO;
4078 } 4092 }
4079 4093
4080 /* Read the entries one by one */ 4094 /* Read the entries one by one */
@@ -4209,33 +4223,41 @@ static int orinoco_ioctl_getscan(struct net_device *dev,
4209 /* We have some results to push back to user space */ 4223 /* We have some results to push back to user space */
4210 4224
4211 /* Translate to WE format */ 4225 /* Translate to WE format */
4212 srq->length = orinoco_translate_scan(dev, extra, 4226 int ret = orinoco_translate_scan(dev, extra,
4213 priv->scan_result, 4227 priv->scan_result,
4214 priv->scan_len); 4228 priv->scan_len);
4215 4229
4216 /* Return flags */ 4230 if (ret < 0) {
4217 srq->flags = (__u16) priv->scan_mode; 4231 err = ret;
4232 kfree(priv->scan_result);
4233 priv->scan_result = NULL;
4234 } else {
4235 srq->length = ret;
4218 4236
4219 /* Results are here, so scan no longer in progress */ 4237 /* Return flags */
4220 priv->scan_inprogress = 0; 4238 srq->flags = (__u16) priv->scan_mode;
4221 4239
4222 /* In any case, Scan results will be cleaned up in the 4240 /* In any case, Scan results will be cleaned up in the
4223 * reset function and when exiting the driver. 4241 * reset function and when exiting the driver.
4224 * The person triggering the scanning may never come to 4242 * The person triggering the scanning may never come to
4225 * pick the results, so we need to do it in those places. 4243 * pick the results, so we need to do it in those places.
4226 * Jean II */ 4244 * Jean II */
4227 4245
4228#ifdef SCAN_SINGLE_READ 4246#ifdef SCAN_SINGLE_READ
4229 /* If you enable this option, only one client (the first 4247 /* If you enable this option, only one client (the first
4230 * one) will be able to read the result (and only one 4248 * one) will be able to read the result (and only one
4231 * time). If there is multiple concurent clients that 4249 * time). If there is multiple concurent clients that
4232 * want to read scan results, this behavior is not 4250 * want to read scan results, this behavior is not
4233 * advisable - Jean II */ 4251 * advisable - Jean II */
4234 kfree(priv->scan_result); 4252 kfree(priv->scan_result);
4235 priv->scan_result = NULL; 4253 priv->scan_result = NULL;
4236#endif /* SCAN_SINGLE_READ */ 4254#endif /* SCAN_SINGLE_READ */
4237 /* Here, if too much time has elapsed since last scan, 4255 /* Here, if too much time has elapsed since last scan,
4238 * we may want to clean up scan results... - Jean II */ 4256 * we may want to clean up scan results... - Jean II */
4257 }
4258
4259 /* Scan is no longer in progress */
4260 priv->scan_inprogress = 0;
4239 } 4261 }
4240 4262
4241 orinoco_unlock(priv, &flags); 4263 orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 1cc1492083c9..d1fb1bab8aa8 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -604,7 +604,6 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
604 604
605static struct pcmcia_device_id orinoco_cs_ids[] = { 605static struct pcmcia_device_id orinoco_cs_ids[] = {
606 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), 606 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
607 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001),
608 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), 607 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
609 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), 608 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
610 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), 609 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a),
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
new file mode 100644
index 000000000000..86fa58e5cfac
--- /dev/null
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -0,0 +1,324 @@
1/* orinoco_nortel.c
2 *
3 * Driver for Prism II devices which would usually be driven by orinoco_cs,
4 * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter.
5 *
6 * Copyright (C) 2002 Tobias Hoffmann
7 * (C) 2003 Christoph Jungegger <disdos@traum404.de>
8 *
9 * Some of this code is borrowed from orinoco_plx.c
10 * Copyright (C) 2001 Daniel Barlow
11 * Some of this code is borrowed from orinoco_pci.c
12 * Copyright (C) 2001 Jean Tourrilhes
13 * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
14 * has been copied from it. linux-wlan-ng-0.1.10 is originally :
15 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
16 *
17 * The contents of this file are subject to the Mozilla Public License
18 * Version 1.1 (the "License"); you may not use this file except in
19 * compliance with the License. You may obtain a copy of the License
20 * at http://www.mozilla.org/MPL/
21 *
22 * Software distributed under the License is distributed on an "AS IS"
23 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
24 * the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * Alternatively, the contents of this file may be used under the
28 * terms of the GNU General Public License version 2 (the "GPL"), in
29 * which case the provisions of the GPL are applicable instead of the
30 * above. If you wish to allow the use of your version of this file
31 * only under the terms of the GPL and not to allow others to use your
32 * version of this file under the MPL, indicate your decision by
33 * deleting the provisions above and replace them with the notice and
34 * other provisions required by the GPL. If you do not delete the
35 * provisions above, a recipient may use your version of this file
36 * under either the MPL or the GPL.
37 */
38
39#define DRIVER_NAME "orinoco_nortel"
40#define PFX DRIVER_NAME ": "
41
42#include <linux/config.h>
43
44#include <linux/module.h>
45#include <linux/kernel.h>
46#include <linux/init.h>
47#include <linux/sched.h>
48#include <linux/ptrace.h>
49#include <linux/slab.h>
50#include <linux/string.h>
51#include <linux/timer.h>
52#include <linux/ioport.h>
53#include <asm/uaccess.h>
54#include <asm/io.h>
55#include <asm/system.h>
56#include <linux/netdevice.h>
57#include <linux/if_arp.h>
58#include <linux/etherdevice.h>
59#include <linux/list.h>
60#include <linux/pci.h>
61#include <linux/fcntl.h>
62
63#include <pcmcia/cisreg.h>
64
65#include "hermes.h"
66#include "orinoco.h"
67
68#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
69#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
70
71
72/* Nortel specific data */
73struct nortel_pci_card {
74 unsigned long iobase1;
75 unsigned long iobase2;
76};
77
78/*
79 * Do a soft reset of the PCI card using the Configuration Option Register
80 * We need this to get going...
81 * This is the part of the code that is strongly inspired from wlan-ng
82 *
83 * Note bis : Don't try to access HERMES_CMD during the reset phase.
84 * It just won't work !
85 */
86static int nortel_pci_cor_reset(struct orinoco_private *priv)
87{
88 struct nortel_pci_card *card = priv->card;
89
90 /* Assert the reset until the card notice */
91 outw_p(8, card->iobase1 + 2);
92 inw(card->iobase2 + COR_OFFSET);
93 outw_p(0x80, card->iobase2 + COR_OFFSET);
94 mdelay(1);
95
96 /* Give time for the card to recover from this hard effort */
97 outw_p(0, card->iobase2 + COR_OFFSET);
98 outw_p(0, card->iobase2 + COR_OFFSET);
99 mdelay(1);
100
101 /* set COR as usual */
102 outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
103 outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
104 mdelay(1);
105
106 outw_p(0x228, card->iobase1 + 2);
107
108 return 0;
109}
110
111int nortel_pci_hw_init(struct nortel_pci_card *card)
112{
113 int i;
114 u32 reg;
115
116 /* setup bridge */
117 if (inw(card->iobase1) & 1) {
118 printk(KERN_ERR PFX "brg1 answer1 wrong\n");
119 return -EBUSY;
120 }
121 outw_p(0x118, card->iobase1 + 2);
122 outw_p(0x108, card->iobase1 + 2);
123 mdelay(30);
124 outw_p(0x8, card->iobase1 + 2);
125 for (i = 0; i < 30; i++) {
126 mdelay(30);
127 if (inw(card->iobase1) & 0x10) {
128 break;
129 }
130 }
131 if (i == 30) {
132 printk(KERN_ERR PFX "brg1 timed out\n");
133 return -EBUSY;
134 }
135 if (inw(card->iobase2 + 0xe0) & 1) {
136 printk(KERN_ERR PFX "brg2 answer1 wrong\n");
137 return -EBUSY;
138 }
139 if (inw(card->iobase2 + 0xe2) & 1) {
140 printk(KERN_ERR PFX "brg2 answer2 wrong\n");
141 return -EBUSY;
142 }
143 if (inw(card->iobase2 + 0xe4) & 1) {
144 printk(KERN_ERR PFX "brg2 answer3 wrong\n");
145 return -EBUSY;
146 }
147
148 /* set the PCMCIA COR-Register */
149 outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
150 mdelay(1);
151 reg = inw(card->iobase2 + COR_OFFSET);
152 if (reg != COR_VALUE) {
153 printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n",
154 reg);
155 return -EBUSY;
156 }
157
158 /* set leds */
159 outw_p(1, card->iobase1 + 10);
160 return 0;
161}
162
163static int nortel_pci_init_one(struct pci_dev *pdev,
164 const struct pci_device_id *ent)
165{
166 int err;
167 struct orinoco_private *priv;
168 struct nortel_pci_card *card;
169 struct net_device *dev;
170 void __iomem *iomem;
171
172 err = pci_enable_device(pdev);
173 if (err) {
174 printk(KERN_ERR PFX "Cannot enable PCI device\n");
175 return err;
176 }
177
178 err = pci_request_regions(pdev, DRIVER_NAME);
179 if (err != 0) {
180 printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
181 goto fail_resources;
182 }
183
184 iomem = pci_iomap(pdev, 3, 0);
185 if (!iomem) {
186 err = -ENOMEM;
187 goto fail_map_io;
188 }
189
190 /* Allocate network device */
191 dev = alloc_orinocodev(sizeof(*card), nortel_pci_cor_reset);
192 if (!dev) {
193 printk(KERN_ERR PFX "Cannot allocate network device\n");
194 err = -ENOMEM;
195 goto fail_alloc;
196 }
197
198 priv = netdev_priv(dev);
199 card = priv->card;
200 card->iobase1 = pci_resource_start(pdev, 0);
201 card->iobase2 = pci_resource_start(pdev, 1);
202 dev->base_addr = pci_resource_start(pdev, 2);
203 SET_MODULE_OWNER(dev);
204 SET_NETDEV_DEV(dev, &pdev->dev);
205
206 hermes_struct_init(&priv->hw, iomem, HERMES_16BIT_REGSPACING);
207
208 printk(KERN_DEBUG PFX "Detected Nortel PCI device at %s irq:%d, "
209 "io addr:0x%lx\n", pci_name(pdev), pdev->irq, dev->base_addr);
210
211 err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
212 dev->name, dev);
213 if (err) {
214 printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
215 err = -EBUSY;
216 goto fail_irq;
217 }
218 dev->irq = pdev->irq;
219
220 err = nortel_pci_hw_init(card);
221 if (err) {
222 printk(KERN_ERR PFX "Hardware initialization failed\n");
223 goto fail;
224 }
225
226 err = nortel_pci_cor_reset(priv);
227 if (err) {
228 printk(KERN_ERR PFX "Initial reset failed\n");
229 goto fail;
230 }
231
232
233 err = register_netdev(dev);
234 if (err) {
235 printk(KERN_ERR PFX "Cannot register network device\n");
236 goto fail;
237 }
238
239 pci_set_drvdata(pdev, dev);
240
241 return 0;
242
243 fail:
244 free_irq(pdev->irq, dev);
245
246 fail_irq:
247 pci_set_drvdata(pdev, NULL);
248 free_orinocodev(dev);
249
250 fail_alloc:
251 pci_iounmap(pdev, iomem);
252
253 fail_map_io:
254 pci_release_regions(pdev);
255
256 fail_resources:
257 pci_disable_device(pdev);
258
259 return err;
260}
261
262static void __devexit nortel_pci_remove_one(struct pci_dev *pdev)
263{
264 struct net_device *dev = pci_get_drvdata(pdev);
265 struct orinoco_private *priv = netdev_priv(dev);
266 struct nortel_pci_card *card = priv->card;
267
268 /* clear leds */
269 outw_p(0, card->iobase1 + 10);
270
271 unregister_netdev(dev);
272 free_irq(dev->irq, dev);
273 pci_set_drvdata(pdev, NULL);
274 free_orinocodev(dev);
275 pci_iounmap(pdev, priv->hw.iobase);
276 pci_release_regions(pdev);
277 pci_disable_device(pdev);
278}
279
280
281static struct pci_device_id nortel_pci_id_table[] = {
282 /* Nortel emobility PCI */
283 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
284 {0,},
285};
286
287MODULE_DEVICE_TABLE(pci, nortel_pci_id_table);
288
289static struct pci_driver nortel_pci_driver = {
290 .name = DRIVER_NAME,
291 .id_table = nortel_pci_id_table,
292 .probe = nortel_pci_init_one,
293 .remove = __devexit_p(nortel_pci_remove_one),
294};
295
296static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
297 " (Tobias Hoffmann & Christoph Jungegger <disdos@traum404.de>)";
298MODULE_AUTHOR("Christoph Jungegger <disdos@traum404.de>");
299MODULE_DESCRIPTION
300 ("Driver for wireless LAN cards using the Nortel PCI bridge");
301MODULE_LICENSE("Dual MPL/GPL");
302
303static int __init nortel_pci_init(void)
304{
305 printk(KERN_DEBUG "%s\n", version);
306 return pci_module_init(&nortel_pci_driver);
307}
308
309static void __exit nortel_pci_exit(void)
310{
311 pci_unregister_driver(&nortel_pci_driver);
312 ssleep(1);
313}
314
315module_init(nortel_pci_init);
316module_exit(nortel_pci_exit);
317
318/*
319 * Local variables:
320 * c-indent-level: 8
321 * c-basic-offset: 8
322 * tab-width: 8
323 * End:
324 */
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 7a6f52ea7faa..42e03438291b 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -301,8 +301,6 @@ static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
301 unsigned long flags; 301 unsigned long flags;
302 int err; 302 int err;
303 303
304 printk(KERN_DEBUG "%s: Orinoco-PCI entering sleep mode (state=%d)\n",
305 dev->name, state);
306 304
307 err = orinoco_lock(priv, &flags); 305 err = orinoco_lock(priv, &flags);
308 if (err) { 306 if (err) {
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 0f29a9c7bc2c..9a8790e3580c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2727,6 +2727,9 @@ const struct iw_handler_def prism54_handler_def = {
2727 .standard = (iw_handler *) prism54_handler, 2727 .standard = (iw_handler *) prism54_handler,
2728 .private = (iw_handler *) prism54_private_handler, 2728 .private = (iw_handler *) prism54_private_handler,
2729 .private_args = (struct iw_priv_args *) prism54_private_args, 2729 .private_args = (struct iw_priv_args *) prism54_private_args,
2730#if WIRELESS_EXT > 16
2731 .get_wireless_stats = prism54_get_wireless_stats,
2732#endif /* WIRELESS_EXT > 16 */
2730#if WIRELESS_EXT == 16 2733#if WIRELESS_EXT == 16
2731 .spy_offset = offsetof(islpci_private, spy_data), 2734 .spy_offset = offsetof(islpci_private, spy_data),
2732#endif /* WIRELESS_EXT == 16 */ 2735#endif /* WIRELESS_EXT == 16 */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index efab07e9e24e..6f13d4a8e2d3 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -815,7 +815,6 @@ islpci_setup(struct pci_dev *pdev)
815 ndev->open = &islpci_open; 815 ndev->open = &islpci_open;
816 ndev->stop = &islpci_close; 816 ndev->stop = &islpci_close;
817 ndev->get_stats = &islpci_statistics; 817 ndev->get_stats = &islpci_statistics;
818 ndev->get_wireless_stats = &prism54_get_wireless_stats;
819 ndev->do_ioctl = &prism54_ioctl; 818 ndev->do_ioctl = &prism54_ioctl;
820 ndev->wireless_handlers = 819 ndev->wireless_handlers =
821 (struct iw_handler_def *) &prism54_handler_def; 820 (struct iw_handler_def *) &prism54_handler_def;
@@ -844,6 +843,8 @@ islpci_setup(struct pci_dev *pdev)
844 /* Add pointers to enable iwspy support. */ 843 /* Add pointers to enable iwspy support. */
845 priv->wireless_data.spy_data = &priv->spy_data; 844 priv->wireless_data.spy_data = &priv->spy_data;
846 ndev->wireless_data = &priv->wireless_data; 845 ndev->wireless_data = &priv->wireless_data;
846#else /* WIRELESS_EXT > 16 */
847 ndev->get_wireless_stats = &prism54_get_wireless_stats;
847#endif /* WIRELESS_EXT > 16 */ 848#endif /* WIRELESS_EXT > 16 */
848 849
849 /* save the start and end address of the PCI memory area */ 850 /* save the start and end address of the PCI memory area */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index c17391d947f3..dc040caab7d7 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -267,8 +267,6 @@ prism54_suspend(struct pci_dev *pdev, pm_message_t state)
267 islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; 267 islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
268 BUG_ON(!priv); 268 BUG_ON(!priv);
269 269
270 printk(KERN_NOTICE "%s: got suspend request (state %d)\n",
271 ndev->name, state);
272 270
273 pci_save_state(pdev); 271 pci_save_state(pdev);
274 272
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0e0ba614259a..e9c5ea0f5535 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -53,6 +53,7 @@
53#include <pcmcia/ds.h> 53#include <pcmcia/ds.h>
54#include <pcmcia/mem_op.h> 54#include <pcmcia/mem_op.h>
55 55
56#include <net/ieee80211.h>
56#include <linux/wireless.h> 57#include <linux/wireless.h>
57 58
58#include <asm/io.h> 59#include <asm/io.h>
@@ -64,7 +65,6 @@
64#define WIRELESS_SPY /* Enable spying addresses */ 65#define WIRELESS_SPY /* Enable spying addresses */
65/* Definitions we need for spy */ 66/* Definitions we need for spy */
66typedef struct iw_statistics iw_stats; 67typedef struct iw_statistics iw_stats;
67typedef struct iw_quality iw_qual;
68typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */ 68typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
69 69
70#include "rayctl.h" 70#include "rayctl.h"
@@ -101,7 +101,6 @@ static int ray_dev_close(struct net_device *dev);
101static int ray_dev_config(struct net_device *dev, struct ifmap *map); 101static int ray_dev_config(struct net_device *dev, struct ifmap *map);
102static struct net_device_stats *ray_get_stats(struct net_device *dev); 102static struct net_device_stats *ray_get_stats(struct net_device *dev);
103static int ray_dev_init(struct net_device *dev); 103static int ray_dev_init(struct net_device *dev);
104static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
105 104
106static struct ethtool_ops netdev_ethtool_ops; 105static struct ethtool_ops netdev_ethtool_ops;
107 106
@@ -114,9 +113,8 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
114static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type, 113static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type,
115 unsigned char *data); 114 unsigned char *data);
116static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len); 115static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
117#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */
118static iw_stats * ray_get_wireless_stats(struct net_device * dev); 116static iw_stats * ray_get_wireless_stats(struct net_device * dev);
119#endif /* WIRELESS_EXT > 7 */ 117static const struct iw_handler_def ray_handler_def;
120 118
121/***** Prototypes for raylink functions **************************************/ 119/***** Prototypes for raylink functions **************************************/
122static int asc_to_int(char a); 120static int asc_to_int(char a);
@@ -373,11 +371,12 @@ static dev_link_t *ray_attach(void)
373 dev->hard_start_xmit = &ray_dev_start_xmit; 371 dev->hard_start_xmit = &ray_dev_start_xmit;
374 dev->set_config = &ray_dev_config; 372 dev->set_config = &ray_dev_config;
375 dev->get_stats = &ray_get_stats; 373 dev->get_stats = &ray_get_stats;
376 dev->do_ioctl = &ray_dev_ioctl;
377 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 374 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
378#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */ 375 dev->wireless_handlers = &ray_handler_def;
379 dev->get_wireless_stats = ray_get_wireless_stats; 376#ifdef WIRELESS_SPY
380#endif 377 local->wireless_data.spy_data = &local->spy_data;
378 dev->wireless_data = &local->wireless_data;
379#endif /* WIRELESS_SPY */
381 380
382 dev->set_multicast_list = &set_multicast_list; 381 dev->set_multicast_list = &set_multicast_list;
383 382
@@ -1201,436 +1200,420 @@ static struct ethtool_ops netdev_ethtool_ops = {
1201 1200
1202/*====================================================================*/ 1201/*====================================================================*/
1203 1202
1204static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1203/*------------------------------------------------------------------*/
1204/*
1205 * Wireless Handler : get protocol name
1206 */
1207static int ray_get_name(struct net_device *dev,
1208 struct iw_request_info *info,
1209 char *cwrq,
1210 char *extra)
1205{ 1211{
1206 ray_dev_t *local = (ray_dev_t *)dev->priv; 1212 strcpy(cwrq, "IEEE 802.11-FH");
1207 dev_link_t *link = local->finder; 1213 return 0;
1208 int err = 0; 1214}
1209#if WIRELESS_EXT > 7
1210 struct iwreq *wrq = (struct iwreq *) ifr;
1211#endif /* WIRELESS_EXT > 7 */
1212#ifdef WIRELESS_SPY
1213 struct sockaddr address[IW_MAX_SPY];
1214#endif /* WIRELESS_SPY */
1215 1215
1216 if (!(link->state & DEV_PRESENT)) { 1216/*------------------------------------------------------------------*/
1217 DEBUG(2,"ray_dev_ioctl - device not present\n"); 1217/*
1218 return -1; 1218 * Wireless Handler : set frequency
1219 } 1219 */
1220 DEBUG(2,"ray_cs IOCTL dev=%p, ifr=%p, cmd = 0x%x\n",dev,ifr,cmd); 1220static int ray_set_freq(struct net_device *dev,
1221 /* Validate the command */ 1221 struct iw_request_info *info,
1222 switch (cmd) 1222 struct iw_freq *fwrq,
1223 { 1223 char *extra)
1224#if WIRELESS_EXT > 7 1224{
1225 /* --------------- WIRELESS EXTENSIONS --------------- */ 1225 ray_dev_t *local = (ray_dev_t *)dev->priv;
1226 /* Get name */ 1226 int err = -EINPROGRESS; /* Call commit handler */
1227 case SIOCGIWNAME:
1228 strcpy(wrq->u.name, "IEEE 802.11-FH");
1229 break;
1230
1231 /* Get frequency/channel */
1232 case SIOCGIWFREQ:
1233 wrq->u.freq.m = local->sparm.b5.a_hop_pattern;
1234 wrq->u.freq.e = 0;
1235 break;
1236
1237 /* Set frequency/channel */
1238 case SIOCSIWFREQ:
1239 /* Reject if card is already initialised */
1240 if(local->card_status != CARD_AWAITING_PARAM)
1241 {
1242 err = -EBUSY;
1243 break;
1244 }
1245 1227
1246 /* Setting by channel number */ 1228 /* Reject if card is already initialised */
1247 if ((wrq->u.freq.m > USA_HOP_MOD) || (wrq->u.freq.e > 0)) 1229 if(local->card_status != CARD_AWAITING_PARAM)
1248 err = -EOPNOTSUPP; 1230 return -EBUSY;
1249 else
1250 local->sparm.b5.a_hop_pattern = wrq->u.freq.m;
1251 break;
1252 1231
1253 /* Get current network name (ESSID) */ 1232 /* Setting by channel number */
1254 case SIOCGIWESSID: 1233 if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0))
1255 if (wrq->u.data.pointer) 1234 err = -EOPNOTSUPP;
1256 { 1235 else
1257 char essid[IW_ESSID_MAX_SIZE + 1]; 1236 local->sparm.b5.a_hop_pattern = fwrq->m;
1258 /* Get the essid that was set */
1259 memcpy(essid, local->sparm.b5.a_current_ess_id,
1260 IW_ESSID_MAX_SIZE);
1261 essid[IW_ESSID_MAX_SIZE] = '\0';
1262
1263 /* Push it out ! */
1264 wrq->u.data.length = strlen(essid) + 1;
1265 wrq->u.data.flags = 1; /* active */
1266 if (copy_to_user(wrq->u.data.pointer, essid, sizeof(essid)))
1267 err = -EFAULT;
1268 }
1269 break;
1270 1237
1271 /* Set desired network name (ESSID) */ 1238 return err;
1272 case SIOCSIWESSID: 1239}
1273 /* Reject if card is already initialised */ 1240
1274 if(local->card_status != CARD_AWAITING_PARAM) 1241/*------------------------------------------------------------------*/
1275 { 1242/*
1276 err = -EBUSY; 1243 * Wireless Handler : get frequency
1277 break; 1244 */
1278 } 1245static int ray_get_freq(struct net_device *dev,
1246 struct iw_request_info *info,
1247 struct iw_freq *fwrq,
1248 char *extra)
1249{
1250 ray_dev_t *local = (ray_dev_t *)dev->priv;
1279 1251
1280 if (wrq->u.data.pointer) 1252 fwrq->m = local->sparm.b5.a_hop_pattern;
1281 { 1253 fwrq->e = 0;
1282 char card_essid[IW_ESSID_MAX_SIZE + 1]; 1254 return 0;
1283 1255}
1284 /* Check if we asked for `any' */ 1256
1285 if(wrq->u.data.flags == 0) 1257/*------------------------------------------------------------------*/
1286 { 1258/*
1259 * Wireless Handler : set ESSID
1260 */
1261static int ray_set_essid(struct net_device *dev,
1262 struct iw_request_info *info,
1263 struct iw_point *dwrq,
1264 char *extra)
1265{
1266 ray_dev_t *local = (ray_dev_t *)dev->priv;
1267
1268 /* Reject if card is already initialised */
1269 if(local->card_status != CARD_AWAITING_PARAM)
1270 return -EBUSY;
1271
1272 /* Check if we asked for `any' */
1273 if(dwrq->flags == 0) {
1287 /* Corey : can you do that ? */ 1274 /* Corey : can you do that ? */
1288 err = -EOPNOTSUPP; 1275 return -EOPNOTSUPP;
1289 } 1276 } else {
1290 else
1291 {
1292 /* Check the size of the string */ 1277 /* Check the size of the string */
1293 if(wrq->u.data.length > 1278 if(dwrq->length > IW_ESSID_MAX_SIZE + 1) {
1294 IW_ESSID_MAX_SIZE + 1) 1279 return -E2BIG;
1295 {
1296 err = -E2BIG;
1297 break;
1298 }
1299 if (copy_from_user(card_essid,
1300 wrq->u.data.pointer,
1301 wrq->u.data.length)) {
1302 err = -EFAULT;
1303 break;
1304 } 1280 }
1305 card_essid[IW_ESSID_MAX_SIZE] = '\0';
1306 1281
1307 /* Set the ESSID in the card */ 1282 /* Set the ESSID in the card */
1308 memcpy(local->sparm.b5.a_current_ess_id, card_essid, 1283 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
1309 IW_ESSID_MAX_SIZE); 1284 memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length);
1310 }
1311 } 1285 }
1312 break;
1313
1314 /* Get current Access Point (BSSID in our case) */
1315 case SIOCGIWAP:
1316 memcpy(wrq->u.ap_addr.sa_data, local->bss_id, ETH_ALEN);
1317 wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
1318 break;
1319
1320 /* Get the current bit-rate */
1321 case SIOCGIWRATE:
1322 if(local->net_default_tx_rate == 3)
1323 wrq->u.bitrate.value = 2000000; /* Hum... */
1324 else
1325 wrq->u.bitrate.value = local->net_default_tx_rate * 500000;
1326 wrq->u.bitrate.fixed = 0; /* We are in auto mode */
1327 break;
1328
1329 /* Set the desired bit-rate */
1330 case SIOCSIWRATE:
1331 /* Check if rate is in range */
1332 if((wrq->u.bitrate.value != 1000000) &&
1333 (wrq->u.bitrate.value != 2000000))
1334 {
1335 err = -EINVAL;
1336 break;
1337 }
1338 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1339 if((local->fw_ver == 0x55) && /* Please check */
1340 (wrq->u.bitrate.value == 2000000))
1341 local->net_default_tx_rate = 3;
1342 else
1343 local->net_default_tx_rate = wrq->u.bitrate.value/500000;
1344 break;
1345
1346 /* Get the current RTS threshold */
1347 case SIOCGIWRTS:
1348 wrq->u.rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
1349 + local->sparm.b5.a_rts_threshold[1];
1350#if WIRELESS_EXT > 8
1351 wrq->u.rts.disabled = (wrq->u.rts.value == 32767);
1352#endif /* WIRELESS_EXT > 8 */
1353 wrq->u.rts.fixed = 1;
1354 break;
1355
1356 /* Set the desired RTS threshold */
1357 case SIOCSIWRTS:
1358 {
1359 int rthr = wrq->u.rts.value;
1360 1286
1361 /* Reject if card is already initialised */ 1287 return -EINPROGRESS; /* Call commit handler */
1362 if(local->card_status != CARD_AWAITING_PARAM) 1288}
1363 {
1364 err = -EBUSY;
1365 break;
1366 }
1367 1289
1368 /* if(wrq->u.rts.fixed == 0) we should complain */ 1290/*------------------------------------------------------------------*/
1369#if WIRELESS_EXT > 8 1291/*
1370 if(wrq->u.rts.disabled) 1292 * Wireless Handler : get ESSID
1371 rthr = 32767; 1293 */
1294static int ray_get_essid(struct net_device *dev,
1295 struct iw_request_info *info,
1296 struct iw_point *dwrq,
1297 char *extra)
1298{
1299 ray_dev_t *local = (ray_dev_t *)dev->priv;
1300
1301 /* Get the essid that was set */
1302 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
1303 extra[IW_ESSID_MAX_SIZE] = '\0';
1304
1305 /* Push it out ! */
1306 dwrq->length = strlen(extra) + 1;
1307 dwrq->flags = 1; /* active */
1308
1309 return 0;
1310}
1311
1312/*------------------------------------------------------------------*/
1313/*
1314 * Wireless Handler : get AP address
1315 */
1316static int ray_get_wap(struct net_device *dev,
1317 struct iw_request_info *info,
1318 struct sockaddr *awrq,
1319 char *extra)
1320{
1321 ray_dev_t *local = (ray_dev_t *)dev->priv;
1322
1323 memcpy(awrq->sa_data, local->bss_id, ETH_ALEN);
1324 awrq->sa_family = ARPHRD_ETHER;
1325
1326 return 0;
1327}
1328
1329/*------------------------------------------------------------------*/
1330/*
1331 * Wireless Handler : set Bit-Rate
1332 */
1333static int ray_set_rate(struct net_device *dev,
1334 struct iw_request_info *info,
1335 struct iw_param *vwrq,
1336 char *extra)
1337{
1338 ray_dev_t *local = (ray_dev_t *)dev->priv;
1339
1340 /* Reject if card is already initialised */
1341 if(local->card_status != CARD_AWAITING_PARAM)
1342 return -EBUSY;
1343
1344 /* Check if rate is in range */
1345 if((vwrq->value != 1000000) && (vwrq->value != 2000000))
1346 return -EINVAL;
1347
1348 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1349 if((local->fw_ver == 0x55) && /* Please check */
1350 (vwrq->value == 2000000))
1351 local->net_default_tx_rate = 3;
1372 else 1352 else
1373#endif /* WIRELESS_EXT > 8 */ 1353 local->net_default_tx_rate = vwrq->value/500000;
1374 if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */ 1354
1375 { 1355 return 0;
1376 err = -EINVAL; 1356}
1377 break; 1357
1378 } 1358/*------------------------------------------------------------------*/
1359/*
1360 * Wireless Handler : get Bit-Rate
1361 */
1362static int ray_get_rate(struct net_device *dev,
1363 struct iw_request_info *info,
1364 struct iw_param *vwrq,
1365 char *extra)
1366{
1367 ray_dev_t *local = (ray_dev_t *)dev->priv;
1368
1369 if(local->net_default_tx_rate == 3)
1370 vwrq->value = 2000000; /* Hum... */
1371 else
1372 vwrq->value = local->net_default_tx_rate * 500000;
1373 vwrq->fixed = 0; /* We are in auto mode */
1374
1375 return 0;
1376}
1377
1378/*------------------------------------------------------------------*/
1379/*
1380 * Wireless Handler : set RTS threshold
1381 */
1382static int ray_set_rts(struct net_device *dev,
1383 struct iw_request_info *info,
1384 struct iw_param *vwrq,
1385 char *extra)
1386{
1387 ray_dev_t *local = (ray_dev_t *)dev->priv;
1388 int rthr = vwrq->value;
1389
1390 /* Reject if card is already initialised */
1391 if(local->card_status != CARD_AWAITING_PARAM)
1392 return -EBUSY;
1393
1394 /* if(wrq->u.rts.fixed == 0) we should complain */
1395 if(vwrq->disabled)
1396 rthr = 32767;
1397 else {
1398 if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
1399 return -EINVAL;
1400 }
1379 local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF; 1401 local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
1380 local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF; 1402 local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
1381 }
1382 break;
1383 1403
1384 /* Get the current fragmentation threshold */ 1404 return -EINPROGRESS; /* Call commit handler */
1385 case SIOCGIWFRAG: 1405}
1386 wrq->u.frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
1387 + local->sparm.b5.a_frag_threshold[1];
1388#if WIRELESS_EXT > 8
1389 wrq->u.frag.disabled = (wrq->u.frag.value == 32767);
1390#endif /* WIRELESS_EXT > 8 */
1391 wrq->u.frag.fixed = 1;
1392 break;
1393 1406
1394 /* Set the desired fragmentation threshold */
1395 case SIOCSIWFRAG:
1396 {
1397 int fthr = wrq->u.frag.value;
1398 1407
1399 /* Reject if card is already initialised */ 1408/*------------------------------------------------------------------*/
1400 if(local->card_status != CARD_AWAITING_PARAM) 1409/*
1401 { 1410 * Wireless Handler : get RTS threshold
1402 err = -EBUSY; 1411 */
1403 break; 1412static int ray_get_rts(struct net_device *dev,
1404 } 1413 struct iw_request_info *info,
1414 struct iw_param *vwrq,
1415 char *extra)
1416{
1417 ray_dev_t *local = (ray_dev_t *)dev->priv;
1418
1419 vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8)
1420 + local->sparm.b5.a_rts_threshold[1];
1421 vwrq->disabled = (vwrq->value == 32767);
1422 vwrq->fixed = 1;
1423
1424 return 0;
1425}
1426
1427/*------------------------------------------------------------------*/
1428/*
1429 * Wireless Handler : set Fragmentation threshold
1430 */
1431static int ray_set_frag(struct net_device *dev,
1432 struct iw_request_info *info,
1433 struct iw_param *vwrq,
1434 char *extra)
1435{
1436 ray_dev_t *local = (ray_dev_t *)dev->priv;
1437 int fthr = vwrq->value;
1438
1439 /* Reject if card is already initialised */
1440 if(local->card_status != CARD_AWAITING_PARAM)
1441 return -EBUSY;
1405 1442
1406 /* if(wrq->u.frag.fixed == 0) should complain */ 1443 /* if(wrq->u.frag.fixed == 0) should complain */
1407#if WIRELESS_EXT > 8 1444 if(vwrq->disabled)
1408 if(wrq->u.frag.disabled) 1445 fthr = 32767;
1409 fthr = 32767; 1446 else {
1410 else 1447 if((fthr < 256) || (fthr > 2347)) /* To check out ! */
1411#endif /* WIRELESS_EXT > 8 */ 1448 return -EINVAL;
1412 if((fthr < 256) || (fthr > 2347)) /* To check out ! */ 1449 }
1413 {
1414 err = -EINVAL;
1415 break;
1416 }
1417 local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF; 1450 local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
1418 local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF; 1451 local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
1419 }
1420 break;
1421 1452
1422#endif /* WIRELESS_EXT > 7 */ 1453 return -EINPROGRESS; /* Call commit handler */
1423#if WIRELESS_EXT > 8 1454}
1424 1455
1425 /* Get the current mode of operation */ 1456/*------------------------------------------------------------------*/
1426 case SIOCGIWMODE: 1457/*
1427 if(local->sparm.b5.a_network_type) 1458 * Wireless Handler : get Fragmentation threshold
1428 wrq->u.mode = IW_MODE_INFRA; 1459 */
1429 else 1460static int ray_get_frag(struct net_device *dev,
1430 wrq->u.mode = IW_MODE_ADHOC; 1461 struct iw_request_info *info,
1431 break; 1462 struct iw_param *vwrq,
1463 char *extra)
1464{
1465 ray_dev_t *local = (ray_dev_t *)dev->priv;
1432 1466
1433 /* Set the current mode of operation */ 1467 vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8)
1434 case SIOCSIWMODE: 1468 + local->sparm.b5.a_frag_threshold[1];
1435 { 1469 vwrq->disabled = (vwrq->value == 32767);
1470 vwrq->fixed = 1;
1471
1472 return 0;
1473}
1474
1475/*------------------------------------------------------------------*/
1476/*
1477 * Wireless Handler : set Mode of Operation
1478 */
1479static int ray_set_mode(struct net_device *dev,
1480 struct iw_request_info *info,
1481 __u32 *uwrq,
1482 char *extra)
1483{
1484 ray_dev_t *local = (ray_dev_t *)dev->priv;
1485 int err = -EINPROGRESS; /* Call commit handler */
1436 char card_mode = 1; 1486 char card_mode = 1;
1437
1438 /* Reject if card is already initialised */
1439 if(local->card_status != CARD_AWAITING_PARAM)
1440 {
1441 err = -EBUSY;
1442 break;
1443 }
1444 1487
1445 switch (wrq->u.mode) 1488 /* Reject if card is already initialised */
1489 if(local->card_status != CARD_AWAITING_PARAM)
1490 return -EBUSY;
1491
1492 switch (*uwrq)
1446 { 1493 {
1447 case IW_MODE_ADHOC: 1494 case IW_MODE_ADHOC:
1448 card_mode = 0; 1495 card_mode = 0;
1449 // Fall through 1496 // Fall through
1450 case IW_MODE_INFRA: 1497 case IW_MODE_INFRA:
1451 local->sparm.b5.a_network_type = card_mode; 1498 local->sparm.b5.a_network_type = card_mode;
1452 break; 1499 break;
1453 default: 1500 default:
1454 err = -EINVAL; 1501 err = -EINVAL;
1455 } 1502 }
1456 }
1457 break;
1458 1503
1459#endif /* WIRELESS_EXT > 8 */ 1504 return err;
1460#if WIRELESS_EXT > 7 1505}
1461 /* ------------------ IWSPY SUPPORT ------------------ */
1462 /* Define the range (variations) of above parameters */
1463 case SIOCGIWRANGE:
1464 /* Basic checking... */
1465 if(wrq->u.data.pointer != (caddr_t) 0)
1466 {
1467 struct iw_range range;
1468 memset((char *) &range, 0, sizeof(struct iw_range));
1469
1470 /* Set the length (very important for backward compatibility) */
1471 wrq->u.data.length = sizeof(struct iw_range);
1472
1473#if WIRELESS_EXT > 10
1474 /* Set the Wireless Extension versions */
1475 range.we_version_compiled = WIRELESS_EXT;
1476 range.we_version_source = 9;
1477#endif /* WIRELESS_EXT > 10 */
1478
1479 /* Set information in the range struct */
1480 range.throughput = 1.1 * 1000 * 1000; /* Put the right number here */
1481 range.num_channels = hop_pattern_length[(int)country];
1482 range.num_frequency = 0;
1483 range.max_qual.qual = 0;
1484 range.max_qual.level = 255; /* What's the correct value ? */
1485 range.max_qual.noise = 255; /* Idem */
1486 range.num_bitrates = 2;
1487 range.bitrate[0] = 1000000; /* 1 Mb/s */
1488 range.bitrate[1] = 2000000; /* 2 Mb/s */
1489
1490 /* Copy structure to the user buffer */
1491 if(copy_to_user(wrq->u.data.pointer, &range,
1492 sizeof(struct iw_range)))
1493 err = -EFAULT;
1494 }
1495 break;
1496 1506
1497#ifdef WIRELESS_SPY 1507/*------------------------------------------------------------------*/
1498 /* Set addresses to spy */ 1508/*
1499 case SIOCSIWSPY: 1509 * Wireless Handler : get Mode of Operation
1500 /* Check the number of addresses */ 1510 */
1501 if(wrq->u.data.length > IW_MAX_SPY) 1511static int ray_get_mode(struct net_device *dev,
1502 { 1512 struct iw_request_info *info,
1503 err = -E2BIG; 1513 __u32 *uwrq,
1504 break; 1514 char *extra)
1505 } 1515{
1506 local->spy_number = wrq->u.data.length; 1516 ray_dev_t *local = (ray_dev_t *)dev->priv;
1507 1517
1508 /* If there is some addresses to copy */ 1518 if(local->sparm.b5.a_network_type)
1509 if(local->spy_number > 0) 1519 *uwrq = IW_MODE_INFRA;
1510 { 1520 else
1511 int i; 1521 *uwrq = IW_MODE_ADHOC;
1512
1513 /* Copy addresses to the driver */
1514 if(copy_from_user(address, wrq->u.data.pointer,
1515 sizeof(struct sockaddr) * local->spy_number))
1516 {
1517 err = -EFAULT;
1518 break;
1519 }
1520
1521 /* Copy addresses to the lp structure */
1522 for(i = 0; i < local->spy_number; i++)
1523 memcpy(local->spy_address[i], address[i].sa_data, ETH_ALEN);
1524
1525 /* Reset structure... */
1526 memset(local->spy_stat, 0x00, sizeof(iw_qual) * IW_MAX_SPY);
1527
1528#ifdef DEBUG_IOCTL_INFO
1529 printk(KERN_DEBUG "SetSpy - Set of new addresses is :\n");
1530 for(i = 0; i < local->spy_number; i++)
1531 printk(KERN_DEBUG "%02X:%02X:%02X:%02X:%02X:%02X\n",
1532 local->spy_address[i][0],
1533 local->spy_address[i][1],
1534 local->spy_address[i][2],
1535 local->spy_address[i][3],
1536 local->spy_address[i][4],
1537 local->spy_address[i][5]);
1538#endif /* DEBUG_IOCTL_INFO */
1539 }
1540 break;
1541 1522
1542 /* Get the spy list and spy stats */ 1523 return 0;
1543 case SIOCGIWSPY: 1524}
1544 /* Set the number of addresses */
1545 wrq->u.data.length = local->spy_number;
1546 1525
1547 /* If the user want to have the addresses back... */ 1526/*------------------------------------------------------------------*/
1548 if((local->spy_number > 0) && (wrq->u.data.pointer != (caddr_t) 0)) 1527/*
1549 { 1528 * Wireless Handler : get range info
1550 int i; 1529 */
1551 1530static int ray_get_range(struct net_device *dev,
1552 /* Copy addresses from the lp structure */ 1531 struct iw_request_info *info,
1553 for(i = 0; i < local->spy_number; i++) 1532 struct iw_point *dwrq,
1554 { 1533 char *extra)
1555 memcpy(address[i].sa_data, local->spy_address[i], ETH_ALEN); 1534{
1556 address[i].sa_family = ARPHRD_ETHER; 1535 struct iw_range *range = (struct iw_range *) extra;
1557 } 1536
1558 1537 memset((char *) range, 0, sizeof(struct iw_range));
1559 /* Copy addresses to the user buffer */ 1538
1560 if(copy_to_user(wrq->u.data.pointer, address, 1539 /* Set the length (very important for backward compatibility) */
1561 sizeof(struct sockaddr) * local->spy_number)) 1540 dwrq->length = sizeof(struct iw_range);
1562 { 1541
1563 err = -EFAULT; 1542 /* Set the Wireless Extension versions */
1564 break; 1543 range->we_version_compiled = WIRELESS_EXT;
1565 } 1544 range->we_version_source = 9;
1566 1545
1567 /* Copy stats to the user buffer (just after) */ 1546 /* Set information in the range struct */
1568 if(copy_to_user(wrq->u.data.pointer + 1547 range->throughput = 1.1 * 1000 * 1000; /* Put the right number here */
1569 (sizeof(struct sockaddr) * local->spy_number), 1548 range->num_channels = hop_pattern_length[(int)country];
1570 local->spy_stat, sizeof(iw_qual) * local->spy_number)) 1549 range->num_frequency = 0;
1571 { 1550 range->max_qual.qual = 0;
1572 err = -EFAULT; 1551 range->max_qual.level = 255; /* What's the correct value ? */
1573 break; 1552 range->max_qual.noise = 255; /* Idem */
1574 } 1553 range->num_bitrates = 2;
1575 1554 range->bitrate[0] = 1000000; /* 1 Mb/s */
1576 /* Reset updated flags */ 1555 range->bitrate[1] = 2000000; /* 2 Mb/s */
1577 for(i = 0; i < local->spy_number; i++) 1556 return 0;
1578 local->spy_stat[i].updated = 0x0; 1557}
1579 } /* if(pointer != NULL) */
1580
1581 break;
1582#endif /* WIRELESS_SPY */
1583 1558
1584 /* ------------------ PRIVATE IOCTL ------------------ */ 1559/*------------------------------------------------------------------*/
1585#ifndef SIOCIWFIRSTPRIV 1560/*
1586#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE 1561 * Wireless Private Handler : set framing mode
1587#endif /* SIOCIWFIRSTPRIV */ 1562 */
1588#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ 1563static int ray_set_framing(struct net_device *dev,
1589#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */ 1564 struct iw_request_info *info,
1590#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ 1565 union iwreq_data *wrqu,
1591 case SIOCSIPFRAMING: 1566 char *extra)
1592 if(!capable(CAP_NET_ADMIN)) /* For private IOCTLs, we need to check permissions */ 1567{
1593 { 1568 translate = *(extra); /* Set framing mode */
1594 err = -EPERM;
1595 break;
1596 }
1597 translate = *(wrq->u.name); /* Set framing mode */
1598 break;
1599 case SIOCGIPFRAMING:
1600 *(wrq->u.name) = translate;
1601 break;
1602 case SIOCGIPCOUNTRY:
1603 *(wrq->u.name) = country;
1604 break;
1605 case SIOCGIWPRIV:
1606 /* Export our "private" intercace */
1607 if(wrq->u.data.pointer != (caddr_t) 0)
1608 {
1609 struct iw_priv_args priv[] =
1610 { /* cmd, set_args, get_args, name */
1611 { SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "set_framing" },
1612 { SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_framing" },
1613 { SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_country" },
1614 };
1615 /* Set the number of ioctl available */
1616 wrq->u.data.length = 3;
1617 /* Copy structure to the user buffer */
1618 if(copy_to_user(wrq->u.data.pointer, (u_char *) priv,
1619 sizeof(priv)))
1620 err = -EFAULT;
1621 }
1622 break;
1623#endif /* WIRELESS_EXT > 7 */
1624 1569
1570 return 0;
1571}
1625 1572
1626 default: 1573/*------------------------------------------------------------------*/
1627 DEBUG(0,"ray_dev_ioctl cmd = 0x%x\n", cmd); 1574/*
1628 err = -EOPNOTSUPP; 1575 * Wireless Private Handler : get framing mode
1629 } 1576 */
1630 return err; 1577static int ray_get_framing(struct net_device *dev,
1631} /* end ray_dev_ioctl */ 1578 struct iw_request_info *info,
1632/*===========================================================================*/ 1579 union iwreq_data *wrqu,
1633#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */ 1580 char *extra)
1581{
1582 *(extra) = translate;
1583
1584 return 0;
1585}
1586
1587/*------------------------------------------------------------------*/
1588/*
1589 * Wireless Private Handler : get country
1590 */
1591static int ray_get_country(struct net_device *dev,
1592 struct iw_request_info *info,
1593 union iwreq_data *wrqu,
1594 char *extra)
1595{
1596 *(extra) = country;
1597
1598 return 0;
1599}
1600
1601/*------------------------------------------------------------------*/
1602/*
1603 * Commit handler : called after a bunch of SET operations
1604 */
1605static int ray_commit(struct net_device *dev,
1606 struct iw_request_info *info, /* NULL */
1607 void *zwrq, /* NULL */
1608 char *extra) /* NULL */
1609{
1610 return 0;
1611}
1612
1613/*------------------------------------------------------------------*/
1614/*
1615 * Stats handler : return Wireless Stats
1616 */
1634static iw_stats * ray_get_wireless_stats(struct net_device * dev) 1617static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1635{ 1618{
1636 ray_dev_t * local = (ray_dev_t *) dev->priv; 1619 ray_dev_t * local = (ray_dev_t *) dev->priv;
@@ -1642,13 +1625,13 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1642 1625
1643 local->wstats.status = local->card_status; 1626 local->wstats.status = local->card_status;
1644#ifdef WIRELESS_SPY 1627#ifdef WIRELESS_SPY
1645 if((local->spy_number > 0) && (local->sparm.b5.a_network_type == 0)) 1628 if((local->spy_data.spy_number > 0) && (local->sparm.b5.a_network_type == 0))
1646 { 1629 {
1647 /* Get it from the first node in spy list */ 1630 /* Get it from the first node in spy list */
1648 local->wstats.qual.qual = local->spy_stat[0].qual; 1631 local->wstats.qual.qual = local->spy_data.spy_stat[0].qual;
1649 local->wstats.qual.level = local->spy_stat[0].level; 1632 local->wstats.qual.level = local->spy_data.spy_stat[0].level;
1650 local->wstats.qual.noise = local->spy_stat[0].noise; 1633 local->wstats.qual.noise = local->spy_data.spy_stat[0].noise;
1651 local->wstats.qual.updated = local->spy_stat[0].updated; 1634 local->wstats.qual.updated = local->spy_data.spy_stat[0].updated;
1652 } 1635 }
1653#endif /* WIRELESS_SPY */ 1636#endif /* WIRELESS_SPY */
1654 1637
@@ -1659,7 +1642,65 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1659 1642
1660 return &local->wstats; 1643 return &local->wstats;
1661} /* end ray_get_wireless_stats */ 1644} /* end ray_get_wireless_stats */
1662#endif /* WIRELESS_EXT > 7 */ 1645
1646/*------------------------------------------------------------------*/
1647/*
1648 * Structures to export the Wireless Handlers
1649 */
1650
1651static const iw_handler ray_handler[] = {
1652 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit,
1653 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) ray_get_name,
1654 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) ray_set_freq,
1655 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) ray_get_freq,
1656 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) ray_set_mode,
1657 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) ray_get_mode,
1658 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range,
1659#ifdef WIRELESS_SPY
1660 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_set_spy,
1661 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_get_spy,
1662 [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy,
1663 [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy,
1664#endif /* WIRELESS_SPY */
1665 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) ray_get_wap,
1666 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid,
1667 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid,
1668 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) ray_set_rate,
1669 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) ray_get_rate,
1670 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) ray_set_rts,
1671 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) ray_get_rts,
1672 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) ray_set_frag,
1673 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) ray_get_frag,
1674};
1675
1676#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
1677#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */
1678#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
1679
1680static const iw_handler ray_private_handler[] = {
1681 [0] (iw_handler) ray_set_framing,
1682 [1] (iw_handler) ray_get_framing,
1683 [3] (iw_handler) ray_get_country,
1684};
1685
1686static const struct iw_priv_args ray_private_args[] = {
1687/* cmd, set_args, get_args, name */
1688{ SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "set_framing" },
1689{ SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_framing" },
1690{ SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_country" },
1691};
1692
1693static const struct iw_handler_def ray_handler_def =
1694{
1695 .num_standard = sizeof(ray_handler)/sizeof(iw_handler),
1696 .num_private = sizeof(ray_private_handler)/sizeof(iw_handler),
1697 .num_private_args = sizeof(ray_private_args)/sizeof(struct iw_priv_args),
1698 .standard = ray_handler,
1699 .private = ray_private_handler,
1700 .private_args = ray_private_args,
1701 .get_wireless_stats = ray_get_wireless_stats,
1702};
1703
1663/*===========================================================================*/ 1704/*===========================================================================*/
1664static int ray_open(struct net_device *dev) 1705static int ray_open(struct net_device *dev)
1665{ 1706{
@@ -2392,20 +2433,15 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
2392 /*local->wstats.qual.noise = none ? */ 2433 /*local->wstats.qual.noise = none ? */
2393 local->wstats.qual.updated = 0x2; 2434 local->wstats.qual.updated = 0x2;
2394 } 2435 }
2395 /* Now, for the addresses in the spy list */ 2436 /* Now, update the spy stuff */
2396 { 2437 {
2397 int i; 2438 struct iw_quality wstats;
2398 /* Look all addresses */ 2439 wstats.level = siglev;
2399 for(i = 0; i < local->spy_number; i++) 2440 /* wstats.noise = none ? */
2400 /* If match */ 2441 /* wstats.qual = none ? */
2401 if(!memcmp(linksrcaddr, local->spy_address[i], ETH_ALEN)) 2442 wstats.updated = 0x2;
2402 { 2443 /* Update spy records */
2403 /* Update statistics */ 2444 wireless_spy_update(dev, linksrcaddr, &wstats);
2404 /*local->spy_stat[i].qual = none ? */
2405 local->spy_stat[i].level = siglev;
2406 /*local->spy_stat[i].noise = none ? */
2407 local->spy_stat[i].updated = 0x2;
2408 }
2409 } 2445 }
2410#endif /* WIRELESS_SPY */ 2446#endif /* WIRELESS_SPY */
2411} /* end rx_data */ 2447} /* end rx_data */
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index c77afa14fa86..42660fe64bfd 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -63,13 +63,10 @@ typedef struct ray_dev_t {
63 UCHAR last_rsl; 63 UCHAR last_rsl;
64 int beacon_rxed; 64 int beacon_rxed;
65 struct beacon_rx last_bcn; 65 struct beacon_rx last_bcn;
66#ifdef WIRELESS_EXT
67 iw_stats wstats; /* Wireless specific stats */ 66 iw_stats wstats; /* Wireless specific stats */
68#endif
69#ifdef WIRELESS_SPY 67#ifdef WIRELESS_SPY
70 int spy_number; /* Number of addresses to spy */ 68 struct iw_spy_data spy_data;
71 mac_addr spy_address[IW_MAX_SPY + 1]; /* The addresses to spy */ 69 struct iw_public_data wireless_data;
72 iw_qual spy_stat[IW_MAX_SPY + 1]; /* Statistics gathered */
73#endif /* WIRELESS_SPY */ 70#endif /* WIRELESS_SPY */
74 71
75} ray_dev_t; 72} ray_dev_t;
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
new file mode 100644
index 000000000000..39c6cdf7f3f7
--- /dev/null
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -0,0 +1,1120 @@
1/*
2 * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as
3 * Symbol Wireless Networker LA4100, CompactFlash cards by Socket
4 * Communications and Intel PRO/Wireless 2011B.
5 *
6 * The driver implements Symbol firmware download. The rest is handled
7 * in hermes.c and orinoco.c.
8 *
9 * Utilities for downloading the Symbol firmware are available at
10 * http://sourceforge.net/projects/orinoco/
11 *
12 * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
13 * Portions based on orinoco_cs.c:
14 * Copyright (C) David Gibson, Linuxcare Australia
15 * Portions based on Spectrum24tDnld.c from original spectrum24 driver:
16 * Copyright (C) Symbol Technologies.
17 *
18 * See copyright notice in file orinoco.c.
19 */
20
21#define DRIVER_NAME "spectrum_cs"
22#define PFX DRIVER_NAME ": "
23
24#include <linux/config.h>
25#ifdef __IN_PCMCIA_PACKAGE__
26#include <pcmcia/k_compat.h>
27#endif /* __IN_PCMCIA_PACKAGE__ */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/sched.h>
33#include <linux/ptrace.h>
34#include <linux/slab.h>
35#include <linux/string.h>
36#include <linux/ioport.h>
37#include <linux/netdevice.h>
38#include <linux/if_arp.h>
39#include <linux/etherdevice.h>
40#include <linux/wireless.h>
41
42#include <pcmcia/cs_types.h>
43#include <pcmcia/cs.h>
44#include <pcmcia/cistpl.h>
45#include <pcmcia/cisreg.h>
46#include <pcmcia/ds.h>
47
48#include <asm/uaccess.h>
49#include <asm/io.h>
50#include <asm/system.h>
51
52#include "orinoco.h"
53
54/*
55 * If SPECTRUM_FW_INCLUDED is defined, the firmware is hardcoded into
56 * the driver. Use get_symbol_fw script to generate spectrum_fw.h and
57 * copy it to the same directory as spectrum_cs.c.
58 *
59 * If SPECTRUM_FW_INCLUDED is not defined, the firmware is loaded at the
60 * runtime using hotplug. Use the same get_symbol_fw script to generate
61 * files symbol_sp24t_prim_fw symbol_sp24t_sec_fw, copy them to the
62 * hotplug firmware directory (typically /usr/lib/hotplug/firmware) and
63 * make sure that you have hotplug installed and enabled in the kernel.
64 */
65/* #define SPECTRUM_FW_INCLUDED 1 */
66
67#ifdef SPECTRUM_FW_INCLUDED
68/* Header with the firmware */
69#include "spectrum_fw.h"
70#else /* !SPECTRUM_FW_INCLUDED */
71#include <linux/firmware.h>
72static unsigned char *primsym;
73static unsigned char *secsym;
74static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
75static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
76#endif /* !SPECTRUM_FW_INCLUDED */
77
78/********************************************************************/
79/* Module stuff */
80/********************************************************************/
81
82MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
83MODULE_DESCRIPTION("Driver for Symbol Spectrum24 Trilogy cards with firmware downloader");
84MODULE_LICENSE("Dual MPL/GPL");
85
86/* Module parameters */
87
88/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
89 * don't have any CIS entry for it. This workaround it... */
90static int ignore_cis_vcc; /* = 0 */
91module_param(ignore_cis_vcc, int, 0);
92MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket");
93
94/********************************************************************/
95/* Magic constants */
96/********************************************************************/
97
98/*
99 * The dev_info variable is the "key" that is used to match up this
100 * device driver with appropriate cards, through the card
101 * configuration database.
102 */
103static dev_info_t dev_info = DRIVER_NAME;
104
105/********************************************************************/
106/* Data structures */
107/********************************************************************/
108
109/* PCMCIA specific device information (goes in the card field of
110 * struct orinoco_private */
111struct orinoco_pccard {
112 dev_link_t link;
113 dev_node_t node;
114};
115
116/*
117 * A linked list of "instances" of the device. Each actual PCMCIA
118 * card corresponds to one device instance, and is described by one
119 * dev_link_t structure (defined in ds.h).
120 */
121static dev_link_t *dev_list; /* = NULL */
122
123/********************************************************************/
124/* Function prototypes */
125/********************************************************************/
126
127/* device methods */
128static int spectrum_cs_hard_reset(struct orinoco_private *priv);
129
130/* PCMCIA gumpf */
131static void spectrum_cs_config(dev_link_t * link);
132static void spectrum_cs_release(dev_link_t * link);
133static int spectrum_cs_event(event_t event, int priority,
134 event_callback_args_t * args);
135
136static dev_link_t *spectrum_cs_attach(void);
137static void spectrum_cs_detach(dev_link_t *);
138
139/********************************************************************/
140/* Firmware downloader */
141/********************************************************************/
142
143/* Position of PDA in the adapter memory */
144#define EEPROM_ADDR 0x3000
145#define EEPROM_LEN 0x200
146#define PDA_OFFSET 0x100
147
148#define PDA_ADDR (EEPROM_ADDR + PDA_OFFSET)
149#define PDA_WORDS ((EEPROM_LEN - PDA_OFFSET) / 2)
150
151/* Constants for the CISREG_CCSR register */
152#define HCR_RUN 0x07 /* run firmware after reset */
153#define HCR_IDLE 0x0E /* don't run firmware after reset */
154#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
155
156/*
157 * AUX port access. To unlock the AUX port write the access keys to the
158 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
159 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
160 */
161#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
162#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
163#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
164
165#define HERMES_AUX_PW0 0xFE01
166#define HERMES_AUX_PW1 0xDC23
167#define HERMES_AUX_PW2 0xBA45
168
169/* End markers */
170#define PDI_END 0x00000000 /* End of PDA */
171#define BLOCK_END 0xFFFFFFFF /* Last image block */
172#define TEXT_END 0x1A /* End of text header */
173
174/*
175 * The following structures have little-endian fields denoted by
176 * the leading underscore. Don't access them directly - use inline
177 * functions defined below.
178 */
179
180/*
181 * The binary image to be downloaded consists of series of data blocks.
182 * Each block has the following structure.
183 */
184struct dblock {
185 u32 _addr; /* adapter address where to write the block */
186 u16 _len; /* length of the data only, in bytes */
187 char data[0]; /* data to be written */
188} __attribute__ ((packed));
189
190/*
191 * Plug Data References are located in in the image after the last data
192 * block. They refer to areas in the adapter memory where the plug data
193 * items with matching ID should be written.
194 */
195struct pdr {
196 u32 _id; /* record ID */
197 u32 _addr; /* adapter address where to write the data */
198 u32 _len; /* expected length of the data, in bytes */
199 char next[0]; /* next PDR starts here */
200} __attribute__ ((packed));
201
202
203/*
204 * Plug Data Items are located in the EEPROM read from the adapter by
205 * primary firmware. They refer to the device-specific data that should
206 * be plugged into the secondary firmware.
207 */
208struct pdi {
209 u16 _len; /* length of ID and data, in words */
210 u16 _id; /* record ID */
211 char data[0]; /* plug data */
212} __attribute__ ((packed));;
213
214
215/* Functions for access to little-endian data */
216static inline u32
217dblock_addr(const struct dblock *blk)
218{
219 return le32_to_cpu(blk->_addr);
220}
221
222static inline u32
223dblock_len(const struct dblock *blk)
224{
225 return le16_to_cpu(blk->_len);
226}
227
228static inline u32
229pdr_id(const struct pdr *pdr)
230{
231 return le32_to_cpu(pdr->_id);
232}
233
234static inline u32
235pdr_addr(const struct pdr *pdr)
236{
237 return le32_to_cpu(pdr->_addr);
238}
239
240static inline u32
241pdr_len(const struct pdr *pdr)
242{
243 return le32_to_cpu(pdr->_len);
244}
245
246static inline u32
247pdi_id(const struct pdi *pdi)
248{
249 return le16_to_cpu(pdi->_id);
250}
251
252/* Return length of the data only, in bytes */
253static inline u32
254pdi_len(const struct pdi *pdi)
255{
256 return 2 * (le16_to_cpu(pdi->_len) - 1);
257}
258
259
260/* Set address of the auxiliary port */
261static inline void
262spectrum_aux_setaddr(hermes_t *hw, u32 addr)
263{
264 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
265 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
266}
267
268
269/* Open access to the auxiliary port */
270static int
271spectrum_aux_open(hermes_t *hw)
272{
273 int i;
274
275 /* Already open? */
276 if (hermes_read_reg(hw, HERMES_CONTROL) == HERMES_AUX_ENABLED)
277 return 0;
278
279 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
280 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
281 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
282 hermes_write_reg(hw, HERMES_CONTROL, HERMES_AUX_ENABLE);
283
284 for (i = 0; i < 20; i++) {
285 udelay(10);
286 if (hermes_read_reg(hw, HERMES_CONTROL) ==
287 HERMES_AUX_ENABLED)
288 return 0;
289 }
290
291 return -EBUSY;
292}
293
294
295#define CS_CHECK(fn, ret) \
296 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
297
298/*
299 * Reset the card using configuration registers COR and CCSR.
300 * If IDLE is 1, stop the firmware, so that it can be safely rewritten.
301 */
302static int
303spectrum_reset(dev_link_t *link, int idle)
304{
305 int last_ret, last_fn;
306 conf_reg_t reg;
307 u_int save_cor;
308
309 /* Doing it if hardware is gone is guaranteed crash */
310 if (!(link->state & DEV_CONFIG))
311 return -ENODEV;
312
313 /* Save original COR value */
314 reg.Function = 0;
315 reg.Action = CS_READ;
316 reg.Offset = CISREG_COR;
317 CS_CHECK(AccessConfigurationRegister,
318 pcmcia_access_configuration_register(link->handle, &reg));
319 save_cor = reg.Value;
320
321 /* Soft-Reset card */
322 reg.Action = CS_WRITE;
323 reg.Offset = CISREG_COR;
324 reg.Value = (save_cor | COR_SOFT_RESET);
325 CS_CHECK(AccessConfigurationRegister,
326 pcmcia_access_configuration_register(link->handle, &reg));
327 udelay(1000);
328
329 /* Read CCSR */
330 reg.Action = CS_READ;
331 reg.Offset = CISREG_CCSR;
332 CS_CHECK(AccessConfigurationRegister,
333 pcmcia_access_configuration_register(link->handle, &reg));
334
335 /*
336 * Start or stop the firmware. Memory width bit should be
337 * preserved from the value we've just read.
338 */
339 reg.Action = CS_WRITE;
340 reg.Offset = CISREG_CCSR;
341 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
342 CS_CHECK(AccessConfigurationRegister,
343 pcmcia_access_configuration_register(link->handle, &reg));
344 udelay(1000);
345
346 /* Restore original COR configuration index */
347 reg.Action = CS_WRITE;
348 reg.Offset = CISREG_COR;
349 reg.Value = (save_cor & ~COR_SOFT_RESET);
350 CS_CHECK(AccessConfigurationRegister,
351 pcmcia_access_configuration_register(link->handle, &reg));
352 udelay(1000);
353 return 0;
354
355 cs_failed:
356 cs_error(link->handle, last_fn, last_ret);
357 return -ENODEV;
358}
359
360
361/*
362 * Scan PDR for the record with the specified RECORD_ID.
363 * If it's not found, return NULL.
364 */
365static struct pdr *
366spectrum_find_pdr(struct pdr *first_pdr, u32 record_id)
367{
368 struct pdr *pdr = first_pdr;
369
370 while (pdr_id(pdr) != PDI_END) {
371 /*
372 * PDR area is currently not terminated by PDI_END.
373 * It's followed by CRC records, which have the type
374 * field where PDR has length. The type can be 0 or 1.
375 */
376 if (pdr_len(pdr) < 2)
377 return NULL;
378
379 /* If the record ID matches, we are done */
380 if (pdr_id(pdr) == record_id)
381 return pdr;
382
383 pdr = (struct pdr *) pdr->next;
384 }
385 return NULL;
386}
387
388
389/* Process one Plug Data Item - find corresponding PDR and plug it */
390static int
391spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
392{
393 struct pdr *pdr;
394
395 /* Find the PDI corresponding to this PDR */
396 pdr = spectrum_find_pdr(first_pdr, pdi_id(pdi));
397
398 /* No match is found, safe to ignore */
399 if (!pdr)
400 return 0;
401
402 /* Lengths of the data in PDI and PDR must match */
403 if (pdi_len(pdi) != pdr_len(pdr))
404 return -EINVAL;
405
406 /* do the actual plugging */
407 spectrum_aux_setaddr(hw, pdr_addr(pdr));
408 hermes_write_words(hw, HERMES_AUXDATA, pdi->data,
409 pdi_len(pdi) / 2);
410
411 return 0;
412}
413
414
415/* Read PDA from the adapter */
416static int
417spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
418{
419 int ret;
420 int pda_size;
421
422 /* Issue command to read EEPROM */
423 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
424 if (ret)
425 return ret;
426
427 /* Open auxiliary port */
428 ret = spectrum_aux_open(hw);
429 if (ret)
430 return ret;
431
432 /* read PDA from EEPROM */
433 spectrum_aux_setaddr(hw, PDA_ADDR);
434 hermes_read_words(hw, HERMES_AUXDATA, pda, pda_len / 2);
435
436 /* Check PDA length */
437 pda_size = le16_to_cpu(pda[0]);
438 if (pda_size > pda_len)
439 return -EINVAL;
440
441 return 0;
442}
443
444
445/* Parse PDA and write the records into the adapter */
446static int
447spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
448 u16 *pda)
449{
450 int ret;
451 struct pdi *pdi;
452 struct pdr *first_pdr;
453 const struct dblock *blk = first_block;
454
455 /* Skip all blocks to locate Plug Data References */
456 while (dblock_addr(blk) != BLOCK_END)
457 blk = (struct dblock *) &blk->data[dblock_len(blk)];
458
459 first_pdr = (struct pdr *) blk;
460
461 /* Go through every PDI and plug them into the adapter */
462 pdi = (struct pdi *) (pda + 2);
463 while (pdi_id(pdi) != PDI_END) {
464 ret = spectrum_plug_pdi(hw, first_pdr, pdi);
465 if (ret)
466 return ret;
467
468 /* Increment to the next PDI */
469 pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
470 }
471 return 0;
472}
473
474
475/* Load firmware blocks into the adapter */
476static int
477spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
478{
479 const struct dblock *blk;
480 u32 blkaddr;
481 u32 blklen;
482
483 blk = first_block;
484 blkaddr = dblock_addr(blk);
485 blklen = dblock_len(blk);
486
487 while (dblock_addr(blk) != BLOCK_END) {
488 spectrum_aux_setaddr(hw, blkaddr);
489 hermes_write_words(hw, HERMES_AUXDATA, blk->data,
490 blklen / 2);
491
492 blk = (struct dblock *) &blk->data[blklen];
493 blkaddr = dblock_addr(blk);
494 blklen = dblock_len(blk);
495 }
496 return 0;
497}
498
499
500/*
501 * Process a firmware image - stop the card, load the firmware, reset
502 * the card and make sure it responds. For the secondary firmware take
503 * care of the PDA - read it and then write it on top of the firmware.
504 */
505static int
506spectrum_dl_image(hermes_t *hw, dev_link_t *link,
507 const unsigned char *image)
508{
509 int ret;
510 const unsigned char *ptr;
511 const struct dblock *first_block;
512
513 /* Plug Data Area (PDA) */
514 u16 pda[PDA_WORDS];
515
516 /* Binary block begins after the 0x1A marker */
517 ptr = image;
518 while (*ptr++ != TEXT_END);
519 first_block = (const struct dblock *) ptr;
520
521 /* Read the PDA */
522 if (image != primsym) {
523 ret = spectrum_read_pda(hw, pda, sizeof(pda));
524 if (ret)
525 return ret;
526 }
527
528 /* Stop the firmware, so that it can be safely rewritten */
529 ret = spectrum_reset(link, 1);
530 if (ret)
531 return ret;
532
533 /* Program the adapter with new firmware */
534 ret = spectrum_load_blocks(hw, first_block);
535 if (ret)
536 return ret;
537
538 /* Write the PDA to the adapter */
539 if (image != primsym) {
540 ret = spectrum_apply_pda(hw, first_block, pda);
541 if (ret)
542 return ret;
543 }
544
545 /* Run the firmware */
546 ret = spectrum_reset(link, 0);
547 if (ret)
548 return ret;
549
550 /* Reset hermes chip and make sure it responds */
551 ret = hermes_init(hw);
552
553 /* hermes_reset() should return 0 with the secondary firmware */
554 if (image != primsym && ret != 0)
555 return -ENODEV;
556
557 /* And this should work with any firmware */
558 if (!hermes_present(hw))
559 return -ENODEV;
560
561 return 0;
562}
563
564
565/*
566 * Download the firmware into the card, this also does a PCMCIA soft
567 * reset on the card, to make sure it's in a sane state.
568 */
569static int
570spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
571{
572 int ret;
573 client_handle_t handle = link->handle;
574
575#ifndef SPECTRUM_FW_INCLUDED
576 const struct firmware *fw_entry;
577
578 if (request_firmware(&fw_entry, primary_fw_name,
579 &handle_to_dev(handle)) == 0) {
580 primsym = fw_entry->data;
581 } else {
582 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
583 primary_fw_name);
584 return -ENOENT;
585 }
586
587 if (request_firmware(&fw_entry, secondary_fw_name,
588 &handle_to_dev(handle)) == 0) {
589 secsym = fw_entry->data;
590 } else {
591 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
592 secondary_fw_name);
593 return -ENOENT;
594 }
595#endif
596
597 /* Load primary firmware */
598 ret = spectrum_dl_image(hw, link, primsym);
599 if (ret) {
600 printk(KERN_ERR PFX "Primary firmware download failed\n");
601 return ret;
602 }
603
604 /* Load secondary firmware */
605 ret = spectrum_dl_image(hw, link, secsym);
606
607 if (ret) {
608 printk(KERN_ERR PFX "Secondary firmware download failed\n");
609 }
610
611 return ret;
612}
613
614/********************************************************************/
615/* Device methods */
616/********************************************************************/
617
618static int
619spectrum_cs_hard_reset(struct orinoco_private *priv)
620{
621 struct orinoco_pccard *card = priv->card;
622 dev_link_t *link = &card->link;
623 int err;
624
625 if (!hermes_present(&priv->hw)) {
626 /* The firmware needs to be reloaded */
627 if (spectrum_dl_firmware(&priv->hw, &card->link) != 0) {
628 printk(KERN_ERR PFX "Firmware download failed\n");
629 err = -ENODEV;
630 }
631 } else {
632 /* Soft reset using COR and HCR */
633 spectrum_reset(link, 0);
634 }
635
636 return 0;
637}
638
639/********************************************************************/
640/* PCMCIA stuff */
641/********************************************************************/
642
643/*
644 * This creates an "instance" of the driver, allocating local data
645 * structures for one device. The device is registered with Card
646 * Services.
647 *
648 * The dev_link structure is initialized, but we don't actually
649 * configure the card at this point -- we wait until we receive a card
650 * insertion event. */
651static dev_link_t *
652spectrum_cs_attach(void)
653{
654 struct net_device *dev;
655 struct orinoco_private *priv;
656 struct orinoco_pccard *card;
657 dev_link_t *link;
658 client_reg_t client_reg;
659 int ret;
660
661 dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset);
662 if (! dev)
663 return NULL;
664 priv = netdev_priv(dev);
665 card = priv->card;
666
667 /* Link both structures together */
668 link = &card->link;
669 link->priv = dev;
670
671 /* Interrupt setup */
672 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
673 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
674 link->irq.Handler = orinoco_interrupt;
675 link->irq.Instance = dev;
676
677 /* General socket configuration defaults can go here. In this
678 * client, we assume very little, and rely on the CIS for
679 * almost everything. In most clients, many details (i.e.,
680 * number, sizes, and attributes of IO windows) are fixed by
681 * the nature of the device, and can be hard-wired here. */
682 link->conf.Attributes = 0;
683 link->conf.IntType = INT_MEMORY_AND_IO;
684
685 /* Register with Card Services */
686 /* FIXME: need a lock? */
687 link->next = dev_list;
688 dev_list = link;
689
690 client_reg.dev_info = &dev_info;
691 client_reg.Version = 0x0210; /* FIXME: what does this mean? */
692 client_reg.event_callback_args.client_data = link;
693
694 ret = pcmcia_register_client(&link->handle, &client_reg);
695 if (ret != CS_SUCCESS) {
696 cs_error(link->handle, RegisterClient, ret);
697 spectrum_cs_detach(link);
698 return NULL;
699 }
700
701 return link;
702} /* spectrum_cs_attach */
703
704/*
705 * This deletes a driver "instance". The device is de-registered with
706 * Card Services. If it has been released, all local data structures
707 * are freed. Otherwise, the structures will be freed when the device
708 * is released.
709 */
710static void spectrum_cs_detach(dev_link_t *link)
711{
712 dev_link_t **linkp;
713 struct net_device *dev = link->priv;
714
715 /* Locate device structure */
716 for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
717 if (*linkp == link)
718 break;
719
720 BUG_ON(*linkp == NULL);
721
722 if (link->state & DEV_CONFIG)
723 spectrum_cs_release(link);
724
725 /* Break the link with Card Services */
726 if (link->handle)
727 pcmcia_deregister_client(link->handle);
728
729 /* Unlink device structure, and free it */
730 *linkp = link->next;
731 DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev);
732 if (link->dev) {
733 DEBUG(0, PFX "About to unregister net device %p\n",
734 dev);
735 unregister_netdev(dev);
736 }
737 free_orinocodev(dev);
738} /* spectrum_cs_detach */
739
740/*
741 * spectrum_cs_config() is scheduled to run after a CARD_INSERTION
742 * event is received, to configure the PCMCIA socket, and to make the
743 * device available to the system.
744 */
745
746static void
747spectrum_cs_config(dev_link_t *link)
748{
749 struct net_device *dev = link->priv;
750 client_handle_t handle = link->handle;
751 struct orinoco_private *priv = netdev_priv(dev);
752 struct orinoco_pccard *card = priv->card;
753 hermes_t *hw = &priv->hw;
754 int last_fn, last_ret;
755 u_char buf[64];
756 config_info_t conf;
757 cisinfo_t info;
758 tuple_t tuple;
759 cisparse_t parse;
760 void __iomem *mem;
761
762 CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info));
763
764 /*
765 * This reads the card's CONFIG tuple to find its
766 * configuration registers.
767 */
768 tuple.DesiredTuple = CISTPL_CONFIG;
769 tuple.Attributes = 0;
770 tuple.TupleData = buf;
771 tuple.TupleDataMax = sizeof(buf);
772 tuple.TupleOffset = 0;
773 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
774 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
775 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
776 link->conf.ConfigBase = parse.config.base;
777 link->conf.Present = parse.config.rmask[0];
778
779 /* Configure card */
780 link->state |= DEV_CONFIG;
781
782 /* Look up the current Vcc */
783 CS_CHECK(GetConfigurationInfo,
784 pcmcia_get_configuration_info(handle, &conf));
785 link->conf.Vcc = conf.Vcc;
786
787 /*
788 * In this loop, we scan the CIS for configuration table
789 * entries, each of which describes a valid card
790 * configuration, including voltage, IO window, memory window,
791 * and interrupt settings.
792 *
793 * We make no assumptions about the card to be configured: we
794 * use just the information available in the CIS. In an ideal
795 * world, this would work for any PCMCIA card, but it requires
796 * a complete and accurate CIS. In practice, a driver usually
797 * "knows" most of these things without consulting the CIS,
798 * and most client drivers will only use the CIS to fill in
799 * implementation-defined details.
800 */
801 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
802 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
803 while (1) {
804 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
805 cistpl_cftable_entry_t dflt = { .index = 0 };
806
807 if ( (pcmcia_get_tuple_data(handle, &tuple) != 0)
808 || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0))
809 goto next_entry;
810
811 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
812 dflt = *cfg;
813 if (cfg->index == 0)
814 goto next_entry;
815 link->conf.ConfigIndex = cfg->index;
816
817 /* Does this card need audio output? */
818 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
819 link->conf.Attributes |= CONF_ENABLE_SPKR;
820 link->conf.Status = CCSR_AUDIO_ENA;
821 }
822
823 /* Use power settings for Vcc and Vpp if present */
824 /* Note that the CIS values need to be rescaled */
825 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
826 if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
827 DEBUG(2, "spectrum_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
828 if (!ignore_cis_vcc)
829 goto next_entry;
830 }
831 } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
832 if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
833 DEBUG(2, "spectrum_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
834 if(!ignore_cis_vcc)
835 goto next_entry;
836 }
837 }
838
839 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
840 link->conf.Vpp1 = link->conf.Vpp2 =
841 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
842 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
843 link->conf.Vpp1 = link->conf.Vpp2 =
844 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
845
846 /* Do we need to allocate an interrupt? */
847 link->conf.Attributes |= CONF_ENABLE_IRQ;
848
849 /* IO window settings */
850 link->io.NumPorts1 = link->io.NumPorts2 = 0;
851 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
852 cistpl_io_t *io =
853 (cfg->io.nwin) ? &cfg->io : &dflt.io;
854 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
855 if (!(io->flags & CISTPL_IO_8BIT))
856 link->io.Attributes1 =
857 IO_DATA_PATH_WIDTH_16;
858 if (!(io->flags & CISTPL_IO_16BIT))
859 link->io.Attributes1 =
860 IO_DATA_PATH_WIDTH_8;
861 link->io.IOAddrLines =
862 io->flags & CISTPL_IO_LINES_MASK;
863 link->io.BasePort1 = io->win[0].base;
864 link->io.NumPorts1 = io->win[0].len;
865 if (io->nwin > 1) {
866 link->io.Attributes2 =
867 link->io.Attributes1;
868 link->io.BasePort2 = io->win[1].base;
869 link->io.NumPorts2 = io->win[1].len;
870 }
871
872 /* This reserves IO space but doesn't actually enable it */
873 if (pcmcia_request_io(link->handle, &link->io) != 0)
874 goto next_entry;
875 }
876
877
878 /* If we got this far, we're cool! */
879
880 break;
881
882 next_entry:
883 if (link->io.NumPorts1)
884 pcmcia_release_io(link->handle, &link->io);
885 last_ret = pcmcia_get_next_tuple(handle, &tuple);
886 if (last_ret == CS_NO_MORE_ITEMS) {
887 printk(KERN_ERR PFX "GetNextTuple(): No matching "
888 "CIS configuration. Maybe you need the "
889 "ignore_cis_vcc=1 parameter.\n");
890 goto cs_failed;
891 }
892 }
893
894 /*
895 * Allocate an interrupt line. Note that this does not assign
896 * a handler to the interrupt, unless the 'Handler' member of
897 * the irq structure is initialized.
898 */
899 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
900
901 /* We initialize the hermes structure before completing PCMCIA
902 * configuration just in case the interrupt handler gets
903 * called. */
904 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
905 if (!mem)
906 goto cs_failed;
907
908 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
909
910 /*
911 * This actually configures the PCMCIA socket -- setting up
912 * the I/O windows and the interrupt mapping, and putting the
913 * card and host interface into "Memory and IO" mode.
914 */
915 CS_CHECK(RequestConfiguration,
916 pcmcia_request_configuration(link->handle, &link->conf));
917
918 /* Ok, we have the configuration, prepare to register the netdev */
919 dev->base_addr = link->io.BasePort1;
920 dev->irq = link->irq.AssignedIRQ;
921 SET_MODULE_OWNER(dev);
922 card->node.major = card->node.minor = 0;
923
924 /* Reset card and download firmware */
925 if (spectrum_cs_hard_reset(priv) != 0) {
926 goto failed;
927 }
928
929 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
930 /* Tell the stack we exist */
931 if (register_netdev(dev) != 0) {
932 printk(KERN_ERR PFX "register_netdev() failed\n");
933 goto failed;
934 }
935
936 /* At this point, the dev_node_t structure(s) needs to be
937 * initialized and arranged in a linked list at link->dev. */
938 strcpy(card->node.dev_name, dev->name);
939 link->dev = &card->node; /* link->dev being non-NULL is also
940 used to indicate that the
941 net_device has been registered */
942 link->state &= ~DEV_CONFIG_PENDING;
943
944 /* Finally, report what we've done */
945 printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
946 dev->name, link->conf.ConfigIndex,
947 link->conf.Vcc / 10, link->conf.Vcc % 10);
948 if (link->conf.Vpp1)
949 printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
950 link->conf.Vpp1 % 10);
951 printk(", irq %d", link->irq.AssignedIRQ);
952 if (link->io.NumPorts1)
953 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
954 link->io.BasePort1 + link->io.NumPorts1 - 1);
955 if (link->io.NumPorts2)
956 printk(" & 0x%04x-0x%04x", link->io.BasePort2,
957 link->io.BasePort2 + link->io.NumPorts2 - 1);
958 printk("\n");
959
960 return;
961
962 cs_failed:
963 cs_error(link->handle, last_fn, last_ret);
964
965 failed:
966 spectrum_cs_release(link);
967} /* spectrum_cs_config */
968
969/*
970 * After a card is removed, spectrum_cs_release() will unregister the
971 * device, and release the PCMCIA configuration. If the device is
972 * still open, this will be postponed until it is closed.
973 */
974static void
975spectrum_cs_release(dev_link_t *link)
976{
977 struct net_device *dev = link->priv;
978 struct orinoco_private *priv = netdev_priv(dev);
979 unsigned long flags;
980
981 /* We're committed to taking the device away now, so mark the
982 * hardware as unavailable */
983 spin_lock_irqsave(&priv->lock, flags);
984 priv->hw_unavailable++;
985 spin_unlock_irqrestore(&priv->lock, flags);
986
987 /* Don't bother checking to see if these succeed or not */
988 pcmcia_release_configuration(link->handle);
989 if (link->io.NumPorts1)
990 pcmcia_release_io(link->handle, &link->io);
991 if (link->irq.AssignedIRQ)
992 pcmcia_release_irq(link->handle, &link->irq);
993 link->state &= ~DEV_CONFIG;
994 if (priv->hw.iobase)
995 ioport_unmap(priv->hw.iobase);
996} /* spectrum_cs_release */
997
998/*
999 * The card status event handler. Mostly, this schedules other stuff
1000 * to run after an event is received.
1001 */
1002static int
1003spectrum_cs_event(event_t event, int priority,
1004 event_callback_args_t * args)
1005{
1006 dev_link_t *link = args->client_data;
1007 struct net_device *dev = link->priv;
1008 struct orinoco_private *priv = netdev_priv(dev);
1009 int err = 0;
1010 unsigned long flags;
1011
1012 switch (event) {
1013 case CS_EVENT_CARD_REMOVAL:
1014 link->state &= ~DEV_PRESENT;
1015 if (link->state & DEV_CONFIG) {
1016 unsigned long flags;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019 netif_device_detach(dev);
1020 priv->hw_unavailable++;
1021 spin_unlock_irqrestore(&priv->lock, flags);
1022 }
1023 break;
1024
1025 case CS_EVENT_CARD_INSERTION:
1026 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
1027 spectrum_cs_config(link);
1028 break;
1029
1030 case CS_EVENT_PM_SUSPEND:
1031 link->state |= DEV_SUSPEND;
1032 /* Fall through... */
1033 case CS_EVENT_RESET_PHYSICAL:
1034 /* Mark the device as stopped, to block IO until later */
1035 if (link->state & DEV_CONFIG) {
1036 /* This is probably racy, but I can't think of
1037 a better way, short of rewriting the PCMCIA
1038 layer to not suck :-( */
1039 spin_lock_irqsave(&priv->lock, flags);
1040
1041 err = __orinoco_down(dev);
1042 if (err)
1043 printk(KERN_WARNING "%s: %s: Error %d downing interface\n",
1044 dev->name,
1045 event == CS_EVENT_PM_SUSPEND ? "SUSPEND" : "RESET_PHYSICAL",
1046 err);
1047
1048 netif_device_detach(dev);
1049 priv->hw_unavailable++;
1050
1051 spin_unlock_irqrestore(&priv->lock, flags);
1052
1053 pcmcia_release_configuration(link->handle);
1054 }
1055 break;
1056
1057 case CS_EVENT_PM_RESUME:
1058 link->state &= ~DEV_SUSPEND;
1059 /* Fall through... */
1060 case CS_EVENT_CARD_RESET:
1061 if (link->state & DEV_CONFIG) {
1062 /* FIXME: should we double check that this is
1063 * the same card as we had before */
1064 pcmcia_request_configuration(link->handle, &link->conf);
1065 netif_device_attach(dev);
1066 priv->hw_unavailable--;
1067 schedule_work(&priv->reset_work);
1068 }
1069 break;
1070 }
1071
1072 return err;
1073} /* spectrum_cs_event */
1074
1075/********************************************************************/
1076/* Module initialization */
1077/********************************************************************/
1078
1079/* Can't be declared "const" or the whole __initdata section will
1080 * become const */
1081static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
1082 " (Pavel Roskin <proski@gnu.org>,"
1083 " David Gibson <hermes@gibson.dropbear.id.au>, et al)";
1084
1085static struct pcmcia_device_id spectrum_cs_ids[] = {
1086 PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
1087 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
1088 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001), /* Intel PRO/Wireless 2011B */
1089 PCMCIA_DEVICE_NULL,
1090};
1091MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids);
1092
1093static struct pcmcia_driver orinoco_driver = {
1094 .owner = THIS_MODULE,
1095 .drv = {
1096 .name = DRIVER_NAME,
1097 },
1098 .attach = spectrum_cs_attach,
1099 .event = spectrum_cs_event,
1100 .detach = spectrum_cs_detach,
1101 .id_table = spectrum_cs_ids,
1102};
1103
1104static int __init
1105init_spectrum_cs(void)
1106{
1107 printk(KERN_DEBUG "%s\n", version);
1108
1109 return pcmcia_register_driver(&orinoco_driver);
1110}
1111
1112static void __exit
1113exit_spectrum_cs(void)
1114{
1115 pcmcia_unregister_driver(&orinoco_driver);
1116 BUG_ON(dev_list != NULL);
1117}
1118
1119module_init(init_spectrum_cs);
1120module_exit(exit_spectrum_cs);
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 6c42b573a95a..4b0acae22b0d 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -209,7 +209,7 @@ enum {
209 NoStructure = 0, /* Really old firmware */ 209 NoStructure = 0, /* Really old firmware */
210 StructuredMessages = 1, /* Parsable AT response msgs */ 210 StructuredMessages = 1, /* Parsable AT response msgs */
211 ChecksummedMessages = 2 /* Parsable AT response msgs with checksums */ 211 ChecksummedMessages = 2 /* Parsable AT response msgs with checksums */
212} FirmwareLevel; 212};
213 213
214struct strip { 214struct strip {
215 int magic; 215 int magic;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index f6130a53b796..183c4732ef65 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -59,6 +59,12 @@
59/* Do *NOT* add other headers here, you are guaranteed to be wrong - Jean II */ 59/* Do *NOT* add other headers here, you are guaranteed to be wrong - Jean II */
60#include "wavelan_cs.p.h" /* Private header */ 60#include "wavelan_cs.p.h" /* Private header */
61 61
62#ifdef WAVELAN_ROAMING
63static void wl_cell_expiry(unsigned long data);
64static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp);
65static void wv_nwid_filter(unsigned char mode, net_local *lp);
66#endif /* WAVELAN_ROAMING */
67
62/************************* MISC SUBROUTINES **************************/ 68/************************* MISC SUBROUTINES **************************/
63/* 69/*
64 * Subroutines which won't fit in one of the following category 70 * Subroutines which won't fit in one of the following category
@@ -500,9 +506,9 @@ fee_write(u_long base, /* i/o port of the card */
500 506
501#ifdef WAVELAN_ROAMING /* Conditional compile, see wavelan_cs.h */ 507#ifdef WAVELAN_ROAMING /* Conditional compile, see wavelan_cs.h */
502 508
503unsigned char WAVELAN_BEACON_ADDRESS[]= {0x09,0x00,0x0e,0x20,0x03,0x00}; 509static unsigned char WAVELAN_BEACON_ADDRESS[] = {0x09,0x00,0x0e,0x20,0x03,0x00};
504 510
505void wv_roam_init(struct net_device *dev) 511static void wv_roam_init(struct net_device *dev)
506{ 512{
507 net_local *lp= netdev_priv(dev); 513 net_local *lp= netdev_priv(dev);
508 514
@@ -531,7 +537,7 @@ void wv_roam_init(struct net_device *dev)
531 printk(KERN_DEBUG "WaveLAN: Roaming enabled on device %s\n",dev->name); 537 printk(KERN_DEBUG "WaveLAN: Roaming enabled on device %s\n",dev->name);
532} 538}
533 539
534void wv_roam_cleanup(struct net_device *dev) 540static void wv_roam_cleanup(struct net_device *dev)
535{ 541{
536 wavepoint_history *ptr,*old_ptr; 542 wavepoint_history *ptr,*old_ptr;
537 net_local *lp= netdev_priv(dev); 543 net_local *lp= netdev_priv(dev);
@@ -550,7 +556,7 @@ void wv_roam_cleanup(struct net_device *dev)
550} 556}
551 557
552/* Enable/Disable NWID promiscuous mode on a given device */ 558/* Enable/Disable NWID promiscuous mode on a given device */
553void wv_nwid_filter(unsigned char mode, net_local *lp) 559static void wv_nwid_filter(unsigned char mode, net_local *lp)
554{ 560{
555 mm_t m; 561 mm_t m;
556 unsigned long flags; 562 unsigned long flags;
@@ -575,7 +581,7 @@ void wv_nwid_filter(unsigned char mode, net_local *lp)
575} 581}
576 582
577/* Find a record in the WavePoint table matching a given NWID */ 583/* Find a record in the WavePoint table matching a given NWID */
578wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp) 584static wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
579{ 585{
580 wavepoint_history *ptr=lp->wavepoint_table.head; 586 wavepoint_history *ptr=lp->wavepoint_table.head;
581 587
@@ -588,7 +594,7 @@ wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
588} 594}
589 595
590/* Create a new wavepoint table entry */ 596/* Create a new wavepoint table entry */
591wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp) 597static wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp)
592{ 598{
593 wavepoint_history *new_wavepoint; 599 wavepoint_history *new_wavepoint;
594 600
@@ -624,7 +630,7 @@ wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_
624} 630}
625 631
626/* Remove a wavepoint entry from WavePoint table */ 632/* Remove a wavepoint entry from WavePoint table */
627void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp) 633static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
628{ 634{
629 if(wavepoint==NULL) 635 if(wavepoint==NULL)
630 return; 636 return;
@@ -646,7 +652,7 @@ void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
646} 652}
647 653
648/* Timer callback function - checks WavePoint table for stale entries */ 654/* Timer callback function - checks WavePoint table for stale entries */
649void wl_cell_expiry(unsigned long data) 655static void wl_cell_expiry(unsigned long data)
650{ 656{
651 net_local *lp=(net_local *)data; 657 net_local *lp=(net_local *)data;
652 wavepoint_history *wavepoint=lp->wavepoint_table.head,*old_point; 658 wavepoint_history *wavepoint=lp->wavepoint_table.head,*old_point;
@@ -686,7 +692,7 @@ void wl_cell_expiry(unsigned long data)
686} 692}
687 693
688/* Update SNR history of a wavepoint */ 694/* Update SNR history of a wavepoint */
689void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq) 695static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq)
690{ 696{
691 int i=0,num_missed=0,ptr=0; 697 int i=0,num_missed=0,ptr=0;
692 int average_fast=0,average_slow=0; 698 int average_fast=0,average_slow=0;
@@ -723,7 +729,7 @@ void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsi
723} 729}
724 730
725/* Perform a handover to a new WavePoint */ 731/* Perform a handover to a new WavePoint */
726void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp) 732static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
727{ 733{
728 kio_addr_t base = lp->dev->base_addr; 734 kio_addr_t base = lp->dev->base_addr;
729 mm_t m; 735 mm_t m;
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/net/wireless/wavelan_cs.h
index 29cff6daf860..fabc63ee153c 100644
--- a/drivers/net/wireless/wavelan_cs.h
+++ b/drivers/net/wireless/wavelan_cs.h
@@ -62,7 +62,7 @@
62 * like DEC RoamAbout, or Digital Ocean, Epson, ...), you must modify this 62 * like DEC RoamAbout, or Digital Ocean, Epson, ...), you must modify this
63 * part to accommodate your hardware... 63 * part to accommodate your hardware...
64 */ 64 */
65const unsigned char MAC_ADDRESSES[][3] = 65static const unsigned char MAC_ADDRESSES[][3] =
66{ 66{
67 { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */ 67 { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */
68 { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */ 68 { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */
@@ -79,14 +79,14 @@ const unsigned char MAC_ADDRESSES[][3] =
79 * (as read in the offset register of the dac area). 79 * (as read in the offset register of the dac area).
80 * Used to map channel numbers used by `wfreqsel' to frequencies 80 * Used to map channel numbers used by `wfreqsel' to frequencies
81 */ 81 */
82const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8, 82static const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
83 0xD0, 0xF0, 0xF8, 0x150 }; 83 0xD0, 0xF0, 0xF8, 0x150 };
84 84
85/* Frequencies of the 1.0 modem (fixed frequencies). 85/* Frequencies of the 1.0 modem (fixed frequencies).
86 * Use to map the PSA `subband' to a frequency 86 * Use to map the PSA `subband' to a frequency
87 * Note : all frequencies apart from the first one need to be multiplied by 10 87 * Note : all frequencies apart from the first one need to be multiplied by 10
88 */ 88 */
89const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 }; 89static const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
90 90
91 91
92/*************************** PC INTERFACE ****************************/ 92/*************************** PC INTERFACE ****************************/
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 677ff71883cb..01d882be8790 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -647,23 +647,6 @@ struct net_local
647 void __iomem *mem; 647 void __iomem *mem;
648}; 648};
649 649
650/**************************** PROTOTYPES ****************************/
651
652#ifdef WAVELAN_ROAMING
653/* ---------------------- ROAMING SUBROUTINES -----------------------*/
654
655wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp);
656wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local *lp);
657void wl_del_wavepoint(wavepoint_history *wavepoint, net_local *lp);
658void wl_cell_expiry(unsigned long data);
659wavepoint_history *wl_best_sigqual(int fast_search, net_local *lp);
660void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq);
661void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp);
662void wv_nwid_filter(unsigned char mode, net_local *lp);
663void wv_roam_init(struct net_device *dev);
664void wv_roam_cleanup(struct net_device *dev);
665#endif /* WAVELAN_ROAMING */
666
667/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */ 650/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
668static inline u_char /* data */ 651static inline u_char /* data */
669 hasr_read(u_long); /* Read the host interface : base address */ 652 hasr_read(u_long); /* Read the host interface : base address */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8636d9306785..7fcbe589c3f2 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -2,7 +2,7 @@
2#define __WL3501_H__ 2#define __WL3501_H__
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include "ieee802_11.h" 5#include <net/ieee80211.h>
6 6
7/* define for WLA 2.0 */ 7/* define for WLA 2.0 */
8#define WL3501_BLKSZ 256 8#define WL3501_BLKSZ 256
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee802_11_hdr mac_hdr; 551 struct ieee80211_hdr mac_hdr;
552} __attribute__ ((packed)); 552} __attribute__ ((packed));
553 553
554/* 554/*
@@ -609,6 +609,7 @@ struct wl3501_card {
609 struct net_device_stats stats; 609 struct net_device_stats stats;
610 struct iw_statistics wstats; 610 struct iw_statistics wstats;
611 struct iw_spy_data spy_data; 611 struct iw_spy_data spy_data;
612 struct iw_public_data wireless_data;
612 struct dev_node_t node; 613 struct dev_node_t node;
613}; 614};
614#endif 615#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index dd902126d018..3f8c27f0871b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -296,7 +296,8 @@ static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
296 * 296 *
297 * Move 'size' bytes from PC to card. (Shouldn't be interrupted) 297 * Move 'size' bytes from PC to card. (Shouldn't be interrupted)
298 */ 298 */
299void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, int size) 299static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
300 int size)
300{ 301{
301 /* switch to SRAM Page 0 */ 302 /* switch to SRAM Page 0 */
302 wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 : 303 wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 :
@@ -317,8 +318,8 @@ void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, int size)
317 * 318 *
318 * Move 'size' bytes from card to PC. (Shouldn't be interrupted) 319 * Move 'size' bytes from card to PC. (Shouldn't be interrupted)
319 */ 320 */
320void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest, 321static void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
321 int size) 322 int size)
322{ 323{
323 /* switch to SRAM Page 0 */ 324 /* switch to SRAM Page 0 */
324 wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 : 325 wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 :
@@ -1438,14 +1439,14 @@ fail:
1438 goto out; 1439 goto out;
1439} 1440}
1440 1441
1441struct net_device_stats *wl3501_get_stats(struct net_device *dev) 1442static struct net_device_stats *wl3501_get_stats(struct net_device *dev)
1442{ 1443{
1443 struct wl3501_card *this = dev->priv; 1444 struct wl3501_card *this = dev->priv;
1444 1445
1445 return &this->stats; 1446 return &this->stats;
1446} 1447}
1447 1448
1448struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev) 1449static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
1449{ 1450{
1450 struct wl3501_card *this = dev->priv; 1451 struct wl3501_card *this = dev->priv;
1451 struct iw_statistics *wstats = &this->wstats; 1452 struct iw_statistics *wstats = &this->wstats;
@@ -1943,7 +1944,7 @@ static const iw_handler wl3501_handler[] = {
1943static const struct iw_handler_def wl3501_handler_def = { 1944static const struct iw_handler_def wl3501_handler_def = {
1944 .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler), 1945 .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler),
1945 .standard = (iw_handler *)wl3501_handler, 1946 .standard = (iw_handler *)wl3501_handler,
1946 .spy_offset = offsetof(struct wl3501_card, spy_data), 1947 .get_wireless_stats = wl3501_get_wireless_stats,
1947}; 1948};
1948 1949
1949/** 1950/**
@@ -1960,6 +1961,7 @@ static dev_link_t *wl3501_attach(void)
1960 client_reg_t client_reg; 1961 client_reg_t client_reg;
1961 dev_link_t *link; 1962 dev_link_t *link;
1962 struct net_device *dev; 1963 struct net_device *dev;
1964 struct wl3501_card *this;
1963 int ret; 1965 int ret;
1964 1966
1965 /* Initialize the dev_link_t structure */ 1967 /* Initialize the dev_link_t structure */
@@ -1994,7 +1996,9 @@ static dev_link_t *wl3501_attach(void)
1994 dev->tx_timeout = wl3501_tx_timeout; 1996 dev->tx_timeout = wl3501_tx_timeout;
1995 dev->watchdog_timeo = 5 * HZ; 1997 dev->watchdog_timeo = 5 * HZ;
1996 dev->get_stats = wl3501_get_stats; 1998 dev->get_stats = wl3501_get_stats;
1997 dev->get_wireless_stats = wl3501_get_wireless_stats; 1999 this = dev->priv;
2000 this->wireless_data.spy_data = &this->spy_data;
2001 dev->wireless_data = &this->wireless_data;
1998 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def; 2002 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def;
1999 SET_ETHTOOL_OPS(dev, &ops); 2003 SET_ETHTOOL_OPS(dev, &ops);
2000 netif_stop_queue(dev); 2004 netif_stop_queue(dev);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 4598c6a9212d..97f723179f62 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2739,6 +2739,7 @@ enum parport_pc_pci_cards {
2739 syba_2p_epp, 2739 syba_2p_epp,
2740 syba_1p_ecp, 2740 syba_1p_ecp,
2741 titan_010l, 2741 titan_010l,
2742 titan_1284p1,
2742 titan_1284p2, 2743 titan_1284p2,
2743 avlab_1p, 2744 avlab_1p,
2744 avlab_2p, 2745 avlab_2p,
@@ -2811,6 +2812,7 @@ static struct parport_pc_pci {
2811 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, 2812 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
2812 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, 2813 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
2813 /* titan_010l */ { 1, { { 3, -1 }, } }, 2814 /* titan_010l */ { 1, { { 3, -1 }, } },
2815 /* titan_1284p1 */ { 1, { { 0, 1 }, } },
2814 /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } }, 2816 /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
2815 /* avlab_1p */ { 1, { { 0, 1}, } }, 2817 /* avlab_1p */ { 1, { { 0, 1}, } },
2816 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, 2818 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
@@ -2884,6 +2886,7 @@ static struct pci_device_id parport_pc_pci_tbl[] = {
2884 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp }, 2886 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
2885 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L, 2887 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
2886 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l }, 2888 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
2889 { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
2887 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 }, 2890 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
2888 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ 2891 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
2889 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */ 2892 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2b85aa39f954..532f73bb2224 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -91,6 +91,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
91{ 91{
92 struct msi_desc *entry; 92 struct msi_desc *entry;
93 struct msg_address address; 93 struct msg_address address;
94 unsigned int irq = vector;
94 95
95 entry = (struct msi_desc *)msi_desc[vector]; 96 entry = (struct msi_desc *)msi_desc[vector];
96 if (!entry || !entry->dev) 97 if (!entry || !entry->dev)
@@ -112,6 +113,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
112 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 113 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
113 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 114 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
114 address.lo_address.value); 115 address.lo_address.value);
116 set_native_irq_info(irq, cpu_mask);
115 break; 117 break;
116 } 118 }
117 case PCI_CAP_ID_MSIX: 119 case PCI_CAP_ID_MSIX:
@@ -125,22 +127,13 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
125 MSI_TARGET_CPU_SHIFT); 127 MSI_TARGET_CPU_SHIFT);
126 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 128 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
127 writel(address.lo_address.value, entry->mask_base + offset); 129 writel(address.lo_address.value, entry->mask_base + offset);
130 set_native_irq_info(irq, cpu_mask);
128 break; 131 break;
129 } 132 }
130 default: 133 default:
131 break; 134 break;
132 } 135 }
133} 136}
134
135#ifdef CONFIG_IRQBALANCE
136static inline void move_msi(int vector)
137{
138 if (!cpus_empty(pending_irq_balance_cpumask[vector])) {
139 set_msi_affinity(vector, pending_irq_balance_cpumask[vector]);
140 cpus_clear(pending_irq_balance_cpumask[vector]);
141 }
142}
143#endif /* CONFIG_IRQBALANCE */
144#endif /* CONFIG_SMP */ 137#endif /* CONFIG_SMP */
145 138
146static void mask_MSI_irq(unsigned int vector) 139static void mask_MSI_irq(unsigned int vector)
@@ -191,13 +184,13 @@ static void shutdown_msi_irq(unsigned int vector)
191 184
192static void end_msi_irq_wo_maskbit(unsigned int vector) 185static void end_msi_irq_wo_maskbit(unsigned int vector)
193{ 186{
194 move_msi(vector); 187 move_native_irq(vector);
195 ack_APIC_irq(); 188 ack_APIC_irq();
196} 189}
197 190
198static void end_msi_irq_w_maskbit(unsigned int vector) 191static void end_msi_irq_w_maskbit(unsigned int vector)
199{ 192{
200 move_msi(vector); 193 move_native_irq(vector);
201 unmask_MSI_irq(vector); 194 unmask_MSI_irq(vector);
202 ack_APIC_irq(); 195 ack_APIC_irq();
203} 196}
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 390f1851c0f1..402136a5c9e4 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -19,7 +19,6 @@
19#define NR_HP_RESERVED_VECTORS 20 19#define NR_HP_RESERVED_VECTORS 20
20 20
21extern int vector_irq[NR_VECTORS]; 21extern int vector_irq[NR_VECTORS];
22extern cpumask_t pending_irq_balance_cpumask[NR_IRQS];
23extern void (*interrupt[NR_IRQS])(void); 22extern void (*interrupt[NR_IRQS])(void);
24extern int pci_vector_resources(int last, int nr_released); 23extern int pci_vector_resources(int last, int nr_released);
25 24
@@ -29,10 +28,6 @@ extern int pci_vector_resources(int last, int nr_released);
29#define set_msi_irq_affinity NULL 28#define set_msi_irq_affinity NULL
30#endif 29#endif
31 30
32#ifndef CONFIG_IRQBALANCE
33static inline void move_msi(int vector) {}
34#endif
35
36/* 31/*
37 * MSI-X Address Register 32 * MSI-X Address Register
38 */ 33 */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1b34fc56067e..c62d2f043397 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -333,13 +333,17 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
333 if (platform_pci_choose_state) { 333 if (platform_pci_choose_state) {
334 ret = platform_pci_choose_state(dev, state); 334 ret = platform_pci_choose_state(dev, state);
335 if (ret >= 0) 335 if (ret >= 0)
336 state = ret; 336 state.event = ret;
337 } 337 }
338 switch (state) { 338
339 case 0: return PCI_D0; 339 switch (state.event) {
340 case 3: return PCI_D3hot; 340 case PM_EVENT_ON:
341 return PCI_D0;
342 case PM_EVENT_FREEZE:
343 case PM_EVENT_SUSPEND:
344 return PCI_D3hot;
341 default: 345 default:
342 printk("They asked me for state %d\n", state); 346 printk("They asked me for state %d\n", state.event);
343 BUG(); 347 BUG();
344 } 348 }
345 return PCI_D0; 349 return PCI_D0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bb36bb69803f..140354a2aa72 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -422,6 +422,25 @@ static void __devinit quirk_via_ioapic(struct pci_dev *dev)
422DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic ); 422DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic );
423 423
424/* 424/*
425 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
426 * This leads to doubled level interrupt rates.
427 * Set this bit to get rid of cycle wastage.
428 * Otherwise uncritical.
429 */
430static void __devinit quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
431{
432 u8 misc_control2;
433#define BYPASS_APIC_DEASSERT 8
434
435 pci_read_config_byte(dev, 0x5B, &misc_control2);
436 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
437 printk(KERN_INFO "PCI: Bypassing VIA 8237 APIC De-Assert Message\n");
438 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
439 }
440}
441DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
442
443/*
425 * The AMD io apic can hang the box when an apic irq is masked. 444 * The AMD io apic can hang the box when an apic irq is masked.
426 * We check all revs >= B0 (yet not in the pre production!) as the bug 445 * We check all revs >= B0 (yet not in the pre production!) as the bug
427 * is currently marked NoFix 446 * is currently marked NoFix
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 713c78f3a65d..49bd21702314 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -21,13 +21,21 @@
21 * between the ROM and other resources, so enabling it may disable access 21 * between the ROM and other resources, so enabling it may disable access
22 * to MMIO registers or other card memory. 22 * to MMIO registers or other card memory.
23 */ 23 */
24static void pci_enable_rom(struct pci_dev *pdev) 24static int pci_enable_rom(struct pci_dev *pdev)
25{ 25{
26 struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
27 struct pci_bus_region region;
26 u32 rom_addr; 28 u32 rom_addr;
27 29
30 if (!res->flags)
31 return -1;
32
33 pcibios_resource_to_bus(pdev, &region, res);
28 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); 34 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
29 rom_addr |= PCI_ROM_ADDRESS_ENABLE; 35 rom_addr &= ~PCI_ROM_ADDRESS_MASK;
36 rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
30 pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); 37 pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
38 return 0;
31} 39}
32 40
33/** 41/**
@@ -71,19 +79,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
71 } else { 79 } else {
72 if (res->flags & IORESOURCE_ROM_COPY) { 80 if (res->flags & IORESOURCE_ROM_COPY) {
73 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 81 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
74 return (void __iomem *)pci_resource_start(pdev, PCI_ROM_RESOURCE); 82 return (void __iomem *)pci_resource_start(pdev,
83 PCI_ROM_RESOURCE);
75 } else { 84 } else {
76 /* assign the ROM an address if it doesn't have one */ 85 /* assign the ROM an address if it doesn't have one */
77 if (res->parent == NULL) 86 if (res->parent == NULL &&
78 pci_assign_resource(pdev, PCI_ROM_RESOURCE); 87 pci_assign_resource(pdev,PCI_ROM_RESOURCE))
79 88 return NULL;
80 start = pci_resource_start(pdev, PCI_ROM_RESOURCE); 89 start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
81 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 90 *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
82 if (*size == 0) 91 if (*size == 0)
83 return NULL; 92 return NULL;
84 93
85 /* Enable ROM space decodes */ 94 /* Enable ROM space decodes */
86 pci_enable_rom(pdev); 95 if (pci_enable_rom(pdev))
96 return NULL;
87 } 97 }
88 } 98 }
89 99
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6d864c502a1f..6b0e6464eb39 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -40,7 +40,7 @@
40 * FIXME: IO should be max 256 bytes. However, since we may 40 * FIXME: IO should be max 256 bytes. However, since we may
41 * have a P2P bridge below a cardbus bridge, we need 4K. 41 * have a P2P bridge below a cardbus bridge, we need 4K.
42 */ 42 */
43#define CARDBUS_IO_SIZE (256) 43#define CARDBUS_IO_SIZE (4*1024)
44#define CARDBUS_MEM_SIZE (32*1024*1024) 44#define CARDBUS_MEM_SIZE (32*1024*1024)
45 45
46static void __devinit 46static void __devinit
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3e23cd461fb1..325c992f7d8f 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -246,7 +246,7 @@ static void __exit pxa2xx_pcmcia_exit(void)
246 driver_unregister(&pxa2xx_pcmcia_driver); 246 driver_unregister(&pxa2xx_pcmcia_driver);
247} 247}
248 248
249module_init(pxa2xx_pcmcia_init); 249fs_initcall(pxa2xx_pcmcia_init);
250module_exit(pxa2xx_pcmcia_exit); 250module_exit(pxa2xx_pcmcia_exit);
251 251
252MODULE_AUTHOR("Stefan Eletzhofer <stefan.eletzhofer@inquant.de> and Ian Molton <spyro@f2s.com>"); 252MODULE_AUTHOR("Stefan Eletzhofer <stefan.eletzhofer@inquant.de> and Ian Molton <spyro@f2s.com>");
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 5309734e1687..bbe69b07ce50 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -196,7 +196,7 @@ static void __exit mst_pcmcia_exit(void)
196 platform_device_unregister(mst_pcmcia_device); 196 platform_device_unregister(mst_pcmcia_device);
197} 197}
198 198
199module_init(mst_pcmcia_init); 199fs_initcall(mst_pcmcia_init);
200module_exit(mst_pcmcia_exit); 200module_exit(mst_pcmcia_exit);
201 201
202MODULE_LICENSE("GPL"); 202MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 42efe218867a..a1178a600e3c 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -20,27 +20,18 @@
20 20
21#include <asm/hardware.h> 21#include <asm/hardware.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23
24#include <asm/hardware/scoop.h> 23#include <asm/hardware/scoop.h>
25#include <asm/arch/corgi.h>
26#include <asm/arch/pxa-regs.h> 24#include <asm/arch/pxa-regs.h>
27 25
28#include "soc_common.h" 26#include "soc_common.h"
29 27
30#define NO_KEEP_VS 0x0001 28#define NO_KEEP_VS 0x0001
31 29
32static unsigned char keep_vs; 30static void sharpsl_pcmcia_init_reset(struct scoop_pcmcia_dev *scoopdev)
33static unsigned char keep_rd;
34
35static struct pcmcia_irqs irqs[] = {
36 { 0, CORGI_IRQ_GPIO_CF_CD, "PCMCIA0 CD"},
37};
38
39static void sharpsl_pcmcia_init_reset(void)
40{ 31{
41 reset_scoop(&corgiscoop_device.dev); 32 reset_scoop(scoopdev->dev);
42 keep_vs = NO_KEEP_VS; 33 scoopdev->keep_vs = NO_KEEP_VS;
43 keep_rd = 0; 34 scoopdev->keep_rd = 0;
44} 35}
45 36
46static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 37static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -71,29 +62,35 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
71 pxa_gpio_mode(GPIO57_nIOIS16_MD); 62 pxa_gpio_mode(GPIO57_nIOIS16_MD);
72 63
73 /* Register interrupts */ 64 /* Register interrupts */
74 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 65 if (scoop_devs[skt->nr].cd_irq >= 0) {
75 66 struct pcmcia_irqs cd_irq;
76 if (ret) { 67
77 printk(KERN_ERR "Request for Compact Flash IRQ failed\n"); 68 cd_irq.sock = skt->nr;
78 return ret; 69 cd_irq.irq = scoop_devs[skt->nr].cd_irq;
70 cd_irq.str = scoop_devs[skt->nr].cd_irq_str;
71 ret = soc_pcmcia_request_irqs(skt, &cd_irq, 1);
72
73 if (ret) {
74 printk(KERN_ERR "Request for Compact Flash IRQ failed\n");
75 return ret;
76 }
79 } 77 }
80 78
81 /* Enable interrupt */ 79 skt->irq = scoop_devs[skt->nr].irq;
82 write_scoop_reg(&corgiscoop_device.dev, SCOOP_IMR, 0x00C0);
83 write_scoop_reg(&corgiscoop_device.dev, SCOOP_MCR, 0x0101);
84 keep_vs = NO_KEEP_VS;
85
86 skt->irq = CORGI_IRQ_GPIO_CF_IRQ;
87 80
88 return 0; 81 return 0;
89} 82}
90 83
91static void sharpsl_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 84static void sharpsl_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
92{ 85{
93 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 86 if (scoop_devs[skt->nr].cd_irq >= 0) {
87 struct pcmcia_irqs cd_irq;
94 88
95 /* CF_BUS_OFF */ 89 cd_irq.sock = skt->nr;
96 sharpsl_pcmcia_init_reset(); 90 cd_irq.irq = scoop_devs[skt->nr].cd_irq;
91 cd_irq.str = scoop_devs[skt->nr].cd_irq_str;
92 soc_pcmcia_free_irqs(skt, &cd_irq, 1);
93 }
97} 94}
98 95
99 96
@@ -101,31 +98,32 @@ static void sharpsl_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
101 struct pcmcia_state *state) 98 struct pcmcia_state *state)
102{ 99{
103 unsigned short cpr, csr; 100 unsigned short cpr, csr;
101 struct device *scoop = scoop_devs[skt->nr].dev;
104 102
105 cpr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_CPR); 103 cpr = read_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_CPR);
106 104
107 write_scoop_reg(&corgiscoop_device.dev, SCOOP_IRM, 0x00FF); 105 write_scoop_reg(scoop, SCOOP_IRM, 0x00FF);
108 write_scoop_reg(&corgiscoop_device.dev, SCOOP_ISR, 0x0000); 106 write_scoop_reg(scoop, SCOOP_ISR, 0x0000);
109 write_scoop_reg(&corgiscoop_device.dev, SCOOP_IRM, 0x0000); 107 write_scoop_reg(scoop, SCOOP_IRM, 0x0000);
110 csr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_CSR); 108 csr = read_scoop_reg(scoop, SCOOP_CSR);
111 if (csr & 0x0004) { 109 if (csr & 0x0004) {
112 /* card eject */ 110 /* card eject */
113 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CDR, 0x0000); 111 write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
114 keep_vs = NO_KEEP_VS; 112 scoop_devs[skt->nr].keep_vs = NO_KEEP_VS;
115 } 113 }
116 else if (!(keep_vs & NO_KEEP_VS)) { 114 else if (!(scoop_devs[skt->nr].keep_vs & NO_KEEP_VS)) {
117 /* keep vs1,vs2 */ 115 /* keep vs1,vs2 */
118 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CDR, 0x0000); 116 write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
119 csr |= keep_vs; 117 csr |= scoop_devs[skt->nr].keep_vs;
120 } 118 }
121 else if (cpr & 0x0003) { 119 else if (cpr & 0x0003) {
122 /* power on */ 120 /* power on */
123 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CDR, 0x0000); 121 write_scoop_reg(scoop, SCOOP_CDR, 0x0000);
124 keep_vs = (csr & 0x00C0); 122 scoop_devs[skt->nr].keep_vs = (csr & 0x00C0);
125 } 123 }
126 else { 124 else {
127 /* card detect */ 125 /* card detect */
128 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CDR, 0x0002); 126 write_scoop_reg(scoop, SCOOP_CDR, 0x0002);
129 } 127 }
130 128
131 state->detect = (csr & 0x0004) ? 0 : 1; 129 state->detect = (csr & 0x0004) ? 0 : 1;
@@ -147,6 +145,7 @@ static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
147 const socket_state_t *state) 145 const socket_state_t *state)
148{ 146{
149 unsigned long flags; 147 unsigned long flags;
148 struct device *scoop = scoop_devs[skt->nr].dev;
150 149
151 unsigned short cpr, ncpr, ccr, nccr, mcr, nmcr, imr, nimr; 150 unsigned short cpr, ncpr, ccr, nccr, mcr, nmcr, imr, nimr;
152 151
@@ -166,10 +165,10 @@ static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
166 165
167 local_irq_save(flags); 166 local_irq_save(flags);
168 167
169 nmcr = (mcr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_MCR)) & ~0x0010; 168 nmcr = (mcr = read_scoop_reg(scoop, SCOOP_MCR)) & ~0x0010;
170 ncpr = (cpr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_CPR)) & ~0x0083; 169 ncpr = (cpr = read_scoop_reg(scoop, SCOOP_CPR)) & ~0x0083;
171 nccr = (ccr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_CCR)) & ~0x0080; 170 nccr = (ccr = read_scoop_reg(scoop, SCOOP_CCR)) & ~0x0080;
172 nimr = (imr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_IMR)) & ~0x003E; 171 nimr = (imr = read_scoop_reg(scoop, SCOOP_IMR)) & ~0x003E;
173 172
174 ncpr |= (state->Vcc == 33) ? 0x0001 : 173 ncpr |= (state->Vcc == 33) ? 0x0001 :
175 (state->Vcc == 50) ? 0x0002 : 0; 174 (state->Vcc == 50) ? 0x0002 : 0;
@@ -184,22 +183,22 @@ static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
184 ((skt->status&SS_WRPROT) ? 0x0008 : 0); 183 ((skt->status&SS_WRPROT) ? 0x0008 : 0);
185 184
186 if (!(ncpr & 0x0003)) { 185 if (!(ncpr & 0x0003)) {
187 keep_rd = 0; 186 scoop_devs[skt->nr].keep_rd = 0;
188 } else if (!keep_rd) { 187 } else if (!scoop_devs[skt->nr].keep_rd) {
189 if (nccr & 0x0080) 188 if (nccr & 0x0080)
190 keep_rd = 1; 189 scoop_devs[skt->nr].keep_rd = 1;
191 else 190 else
192 nccr |= 0x0080; 191 nccr |= 0x0080;
193 } 192 }
194 193
195 if (mcr != nmcr) 194 if (mcr != nmcr)
196 write_scoop_reg(&corgiscoop_device.dev, SCOOP_MCR, nmcr); 195 write_scoop_reg(scoop, SCOOP_MCR, nmcr);
197 if (cpr != ncpr) 196 if (cpr != ncpr)
198 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CPR, ncpr); 197 write_scoop_reg(scoop, SCOOP_CPR, ncpr);
199 if (ccr != nccr) 198 if (ccr != nccr)
200 write_scoop_reg(&corgiscoop_device.dev, SCOOP_CCR, nccr); 199 write_scoop_reg(scoop, SCOOP_CCR, nccr);
201 if (imr != nimr) 200 if (imr != nimr)
202 write_scoop_reg(&corgiscoop_device.dev, SCOOP_IMR, nimr); 201 write_scoop_reg(scoop, SCOOP_IMR, nimr);
203 202
204 local_irq_restore(flags); 203 local_irq_restore(flags);
205 204
@@ -208,10 +207,18 @@ static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
208 207
209static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt) 208static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
210{ 209{
210 sharpsl_pcmcia_init_reset(&scoop_devs[skt->nr]);
211
212 /* Enable interrupt */
213 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_IMR, 0x00C0);
214 write_scoop_reg(scoop_devs[skt->nr].dev, SCOOP_MCR, 0x0101);
215 scoop_devs[skt->nr].keep_vs = NO_KEEP_VS;
211} 216}
212 217
213static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) 218static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
214{ 219{
220 /* CF_BUS_OFF */
221 sharpsl_pcmcia_init_reset(&scoop_devs[skt->nr]);
215} 222}
216 223
217static struct pcmcia_low_level sharpsl_pcmcia_ops = { 224static struct pcmcia_low_level sharpsl_pcmcia_ops = {
@@ -223,7 +230,7 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops = {
223 .socket_init = sharpsl_pcmcia_socket_init, 230 .socket_init = sharpsl_pcmcia_socket_init,
224 .socket_suspend = sharpsl_pcmcia_socket_suspend, 231 .socket_suspend = sharpsl_pcmcia_socket_suspend,
225 .first = 0, 232 .first = 0,
226 .nr = 1, 233 .nr = 0,
227}; 234};
228 235
229static struct platform_device *sharpsl_pcmcia_device; 236static struct platform_device *sharpsl_pcmcia_device;
@@ -232,12 +239,15 @@ static int __init sharpsl_pcmcia_init(void)
232{ 239{
233 int ret; 240 int ret;
234 241
242 sharpsl_pcmcia_ops.nr=scoop_num;
235 sharpsl_pcmcia_device = kmalloc(sizeof(*sharpsl_pcmcia_device), GFP_KERNEL); 243 sharpsl_pcmcia_device = kmalloc(sizeof(*sharpsl_pcmcia_device), GFP_KERNEL);
236 if (!sharpsl_pcmcia_device) 244 if (!sharpsl_pcmcia_device)
237 return -ENOMEM; 245 return -ENOMEM;
246
238 memset(sharpsl_pcmcia_device, 0, sizeof(*sharpsl_pcmcia_device)); 247 memset(sharpsl_pcmcia_device, 0, sizeof(*sharpsl_pcmcia_device));
239 sharpsl_pcmcia_device->name = "pxa2xx-pcmcia"; 248 sharpsl_pcmcia_device->name = "pxa2xx-pcmcia";
240 sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops; 249 sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops;
250 sharpsl_pcmcia_device->dev.parent=scoop_devs[0].dev;
241 251
242 ret = platform_device_register(sharpsl_pcmcia_device); 252 ret = platform_device_register(sharpsl_pcmcia_device);
243 if (ret) 253 if (ret)
@@ -257,7 +267,7 @@ static void __exit sharpsl_pcmcia_exit(void)
257 platform_device_unregister(sharpsl_pcmcia_device); 267 platform_device_unregister(sharpsl_pcmcia_device);
258} 268}
259 269
260module_init(sharpsl_pcmcia_init); 270fs_initcall(sharpsl_pcmcia_init);
261module_exit(sharpsl_pcmcia_exit); 271module_exit(sharpsl_pcmcia_exit);
262 272
263MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support"); 273MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support");
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index e98bb3d80e7c..d4ed508b38be 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -126,5 +126,5 @@ MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
126MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11x0 Socket Controller"); 126MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11x0 Socket Controller");
127MODULE_LICENSE("Dual MPL/GPL"); 127MODULE_LICENSE("Dual MPL/GPL");
128 128
129module_init(sa11x0_pcmcia_init); 129fs_initcall(sa11x0_pcmcia_init);
130module_exit(sa11x0_pcmcia_exit); 130module_exit(sa11x0_pcmcia_exit);
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index b441f43a6a55..bb90a1448a53 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -189,7 +189,7 @@ static void __exit sa1111_drv_pcmcia_exit(void)
189 sa1111_driver_unregister(&pcmcia_driver); 189 sa1111_driver_unregister(&pcmcia_driver);
190} 190}
191 191
192module_init(sa1111_drv_pcmcia_init); 192fs_initcall(sa1111_drv_pcmcia_init);
193module_exit(sa1111_drv_pcmcia_exit); 193module_exit(sa1111_drv_pcmcia_exit);
194 194
195MODULE_DESCRIPTION("SA1111 PCMCIA card socket driver"); 195MODULE_DESCRIPTION("SA1111 PCMCIA card socket driver");
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index db04ffb6f68c..59c5d968e9f6 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -189,7 +189,7 @@ static int __init sa11xx_pcmcia_init(void)
189{ 189{
190 return 0; 190 return 0;
191} 191}
192module_init(sa11xx_pcmcia_init); 192fs_initcall(sa11xx_pcmcia_init);
193 193
194static void __exit sa11xx_pcmcia_exit(void) {} 194static void __exit sa11xx_pcmcia_exit(void) {}
195 195
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
index be420bb29113..edccfa5bb400 100644
--- a/drivers/pcmcia/topic.h
+++ b/drivers/pcmcia/topic.h
@@ -101,6 +101,8 @@
101#define TOPIC97_AVS_AUDIO_CONTROL 0x02 101#define TOPIC97_AVS_AUDIO_CONTROL 0x02
102#define TOPIC97_AVS_VIDEO_CONTROL 0x01 102#define TOPIC97_AVS_VIDEO_CONTROL 0x01
103 103
104#define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */
105#define TOPIC_EXCA_IFC_33V_ENA 0x01
104 106
105static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff) 107static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
106{ 108{
@@ -137,4 +139,19 @@ static int topic97_override(struct yenta_socket *socket)
137 return 0; 139 return 0;
138} 140}
139 141
142
143static int topic95_override(struct yenta_socket *socket)
144{
145 u8 fctrl;
146
147 /* enable 3.3V support for 16bit cards */
148 fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
149 exca_writeb(socket, TOPIC_EXCA_IF_CONTROL, fctrl | TOPIC_EXCA_IFC_33V_ENA);
150
151 /* tell yenta to use exca registers to power 16bit cards */
152 socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
153
154 return 0;
155}
156
140#endif /* _LINUX_TOPIC_H */ 157#endif /* _LINUX_TOPIC_H */
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 62fd705203fb..0347a29f297b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -184,22 +184,52 @@ static int yenta_get_status(struct pcmcia_socket *sock, unsigned int *value)
184 return 0; 184 return 0;
185} 185}
186 186
187static int yenta_Vcc_power(u32 control) 187static void yenta_get_power(struct yenta_socket *socket, socket_state_t *state)
188{ 188{
189 switch (control & CB_SC_VCC_MASK) { 189 if (!(cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) &&
190 case CB_SC_VCC_5V: return 50; 190 (socket->flags & YENTA_16BIT_POWER_EXCA)) {
191 case CB_SC_VCC_3V: return 33; 191 u8 reg, vcc, vpp;
192 default: return 0; 192
193 } 193 reg = exca_readb(socket, I365_POWER);
194} 194 vcc = reg & I365_VCC_MASK;
195 vpp = reg & I365_VPP1_MASK;
196 state->Vcc = state->Vpp = 0;
197
198 if (socket->flags & YENTA_16BIT_POWER_DF) {
199 if (vcc == I365_VCC_3V)
200 state->Vcc = 33;
201 if (vcc == I365_VCC_5V)
202 state->Vcc = 50;
203 if (vpp == I365_VPP1_5V)
204 state->Vpp = state->Vcc;
205 if (vpp == I365_VPP1_12V)
206 state->Vpp = 120;
207 } else {
208 if (reg & I365_VCC_5V) {
209 state->Vcc = 50;
210 if (vpp == I365_VPP1_5V)
211 state->Vpp = 50;
212 if (vpp == I365_VPP1_12V)
213 state->Vpp = 120;
214 }
215 }
216 } else {
217 u32 control;
195 218
196static int yenta_Vpp_power(u32 control) 219 control = cb_readl(socket, CB_SOCKET_CONTROL);
197{ 220
198 switch (control & CB_SC_VPP_MASK) { 221 switch (control & CB_SC_VCC_MASK) {
199 case CB_SC_VPP_12V: return 120; 222 case CB_SC_VCC_5V: state->Vcc = 50; break;
200 case CB_SC_VPP_5V: return 50; 223 case CB_SC_VCC_3V: state->Vcc = 33; break;
201 case CB_SC_VPP_3V: return 33; 224 default: state->Vcc = 0;
202 default: return 0; 225 }
226
227 switch (control & CB_SC_VPP_MASK) {
228 case CB_SC_VPP_12V: state->Vpp = 120; break;
229 case CB_SC_VPP_5V: state->Vpp = 50; break;
230 case CB_SC_VPP_3V: state->Vpp = 33; break;
231 default: state->Vpp = 0;
232 }
203 } 233 }
204} 234}
205 235
@@ -211,8 +241,7 @@ static int yenta_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
211 241
212 control = cb_readl(socket, CB_SOCKET_CONTROL); 242 control = cb_readl(socket, CB_SOCKET_CONTROL);
213 243
214 state->Vcc = yenta_Vcc_power(control); 244 yenta_get_power(socket, state);
215 state->Vpp = yenta_Vpp_power(control);
216 state->io_irq = socket->io_irq; 245 state->io_irq = socket->io_irq;
217 246
218 if (cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) { 247 if (cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) {
@@ -246,19 +275,54 @@ static int yenta_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
246 275
247static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state) 276static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
248{ 277{
249 u32 reg = 0; /* CB_SC_STPCLK? */ 278 /* some birdges require to use the ExCA registers to power 16bit cards */
250 switch (state->Vcc) { 279 if (!(cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) &&
251 case 33: reg = CB_SC_VCC_3V; break; 280 (socket->flags & YENTA_16BIT_POWER_EXCA)) {
252 case 50: reg = CB_SC_VCC_5V; break; 281 u8 reg, old;
253 default: reg = 0; break; 282 reg = old = exca_readb(socket, I365_POWER);
254 } 283 reg &= ~(I365_VCC_MASK | I365_VPP1_MASK | I365_VPP2_MASK);
255 switch (state->Vpp) { 284
256 case 33: reg |= CB_SC_VPP_3V; break; 285 /* i82365SL-DF style */
257 case 50: reg |= CB_SC_VPP_5V; break; 286 if (socket->flags & YENTA_16BIT_POWER_DF) {
258 case 120: reg |= CB_SC_VPP_12V; break; 287 switch (state->Vcc) {
288 case 33: reg |= I365_VCC_3V; break;
289 case 50: reg |= I365_VCC_5V; break;
290 default: reg = 0; break;
291 }
292 switch (state->Vpp) {
293 case 33:
294 case 50: reg |= I365_VPP1_5V; break;
295 case 120: reg |= I365_VPP1_12V; break;
296 }
297 } else {
298 /* i82365SL-B style */
299 switch (state->Vcc) {
300 case 50: reg |= I365_VCC_5V; break;
301 default: reg = 0; break;
302 }
303 switch (state->Vpp) {
304 case 50: reg |= I365_VPP1_5V | I365_VPP2_5V; break;
305 case 120: reg |= I365_VPP1_12V | I365_VPP2_12V; break;
306 }
307 }
308
309 if (reg != old)
310 exca_writeb(socket, I365_POWER, reg);
311 } else {
312 u32 reg = 0; /* CB_SC_STPCLK? */
313 switch (state->Vcc) {
314 case 33: reg = CB_SC_VCC_3V; break;
315 case 50: reg = CB_SC_VCC_5V; break;
316 default: reg = 0; break;
317 }
318 switch (state->Vpp) {
319 case 33: reg |= CB_SC_VPP_3V; break;
320 case 50: reg |= CB_SC_VPP_5V; break;
321 case 120: reg |= CB_SC_VPP_12V; break;
322 }
323 if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
324 cb_writel(socket, CB_SOCKET_CONTROL, reg);
259 } 325 }
260 if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
261 cb_writel(socket, CB_SOCKET_CONTROL, reg);
262} 326}
263 327
264static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) 328static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
@@ -751,6 +815,7 @@ enum {
751 CARDBUS_TYPE_TI12XX, 815 CARDBUS_TYPE_TI12XX,
752 CARDBUS_TYPE_TI1250, 816 CARDBUS_TYPE_TI1250,
753 CARDBUS_TYPE_RICOH, 817 CARDBUS_TYPE_RICOH,
818 CARDBUS_TYPE_TOPIC95,
754 CARDBUS_TYPE_TOPIC97, 819 CARDBUS_TYPE_TOPIC97,
755 CARDBUS_TYPE_O2MICRO, 820 CARDBUS_TYPE_O2MICRO,
756}; 821};
@@ -789,6 +854,9 @@ static struct cardbus_type cardbus_type[] = {
789 .save_state = ricoh_save_state, 854 .save_state = ricoh_save_state,
790 .restore_state = ricoh_restore_state, 855 .restore_state = ricoh_restore_state,
791 }, 856 },
857 [CARDBUS_TYPE_TOPIC95] = {
858 .override = topic95_override,
859 },
792 [CARDBUS_TYPE_TOPIC97] = { 860 [CARDBUS_TYPE_TOPIC97] = {
793 .override = topic97_override, 861 .override = topic97_override,
794 }, 862 },
@@ -1196,6 +1264,7 @@ static struct pci_device_id yenta_table [] = {
1196 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, RICOH), 1264 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, RICOH),
1197 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C478, RICOH), 1265 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C478, RICOH),
1198 1266
1267 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC95, TOPIC95),
1199 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC97, TOPIC97), 1268 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC97, TOPIC97),
1200 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC100, TOPIC97), 1269 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC100, TOPIC97),
1201 1270
diff --git a/drivers/pcmcia/yenta_socket.h b/drivers/pcmcia/yenta_socket.h
index 4e637eef2076..4e75e9e258cd 100644
--- a/drivers/pcmcia/yenta_socket.h
+++ b/drivers/pcmcia/yenta_socket.h
@@ -95,6 +95,12 @@
95 */ 95 */
96#define CB_MEM_PAGE(map) (0x40 + (map)) 96#define CB_MEM_PAGE(map) (0x40 + (map))
97 97
98
99/* control how 16bit cards are powered */
100#define YENTA_16BIT_POWER_EXCA 0x00000001
101#define YENTA_16BIT_POWER_DF 0x00000002
102
103
98struct yenta_socket; 104struct yenta_socket;
99 105
100struct cardbus_type { 106struct cardbus_type {
@@ -113,6 +119,8 @@ struct yenta_socket {
113 struct pcmcia_socket socket; 119 struct pcmcia_socket socket;
114 struct cardbus_type *type; 120 struct cardbus_type *type;
115 121
122 u32 flags;
123
116 /* for PCI interrupt probing */ 124 /* for PCI interrupt probing */
117 unsigned int probe_status; 125 unsigned int probe_status;
118 126
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 6e5229e92fbc..e95ed67d4f05 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -8,13 +8,6 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11
12#ifdef CONFIG_PNP_DEBUG
13 #define DEBUG
14#else
15 #undef DEBUG
16#endif
17
18#include <linux/pnp.h> 11#include <linux/pnp.h>
19#include "base.h" 12#include "base.h"
20 13
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 1d037c2a82ac..33da25f3213f 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -11,13 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14
15#ifdef CONFIG_PNP_DEBUG
16 #define DEBUG
17#else
18 #undef DEBUG
19#endif
20
21#include <linux/pnp.h> 14#include <linux/pnp.h>
22#include "base.h" 15#include "base.h"
23 16
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 82c5edd5b9ee..beedd86800f4 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -142,17 +142,6 @@ static void isapnp_write_word(unsigned char idx, unsigned short val)
142 isapnp_write_byte(idx+1, val); 142 isapnp_write_byte(idx+1, val);
143} 143}
144 144
145static void *isapnp_alloc(long size)
146{
147 void *result;
148
149 result = kmalloc(size, GFP_KERNEL);
150 if (!result)
151 return NULL;
152 memset(result, 0, size);
153 return result;
154}
155
156static void isapnp_key(void) 145static void isapnp_key(void)
157{ 146{
158 unsigned char code = 0x6a, msb; 147 unsigned char code = 0x6a, msb;
@@ -406,7 +395,7 @@ static void isapnp_parse_id(struct pnp_dev * dev, unsigned short vendor, unsigne
406 struct pnp_id * id; 395 struct pnp_id * id;
407 if (!dev) 396 if (!dev)
408 return; 397 return;
409 id = isapnp_alloc(sizeof(struct pnp_id)); 398 id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
410 if (!id) 399 if (!id)
411 return; 400 return;
412 sprintf(id->id, "%c%c%c%x%x%x%x", 401 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -430,7 +419,7 @@ static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int si
430 struct pnp_dev *dev; 419 struct pnp_dev *dev;
431 420
432 isapnp_peek(tmp, size); 421 isapnp_peek(tmp, size);
433 dev = isapnp_alloc(sizeof(struct pnp_dev)); 422 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL);
434 if (!dev) 423 if (!dev)
435 return NULL; 424 return NULL;
436 dev->number = number; 425 dev->number = number;
@@ -461,7 +450,7 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
461 unsigned long bits; 450 unsigned long bits;
462 451
463 isapnp_peek(tmp, size); 452 isapnp_peek(tmp, size);
464 irq = isapnp_alloc(sizeof(struct pnp_irq)); 453 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
465 if (!irq) 454 if (!irq)
466 return; 455 return;
467 bits = (tmp[1] << 8) | tmp[0]; 456 bits = (tmp[1] << 8) | tmp[0];
@@ -485,7 +474,7 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
485 struct pnp_dma *dma; 474 struct pnp_dma *dma;
486 475
487 isapnp_peek(tmp, size); 476 isapnp_peek(tmp, size);
488 dma = isapnp_alloc(sizeof(struct pnp_dma)); 477 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
489 if (!dma) 478 if (!dma)
490 return; 479 return;
491 dma->map = tmp[0]; 480 dma->map = tmp[0];
@@ -505,7 +494,7 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
505 struct pnp_port *port; 494 struct pnp_port *port;
506 495
507 isapnp_peek(tmp, size); 496 isapnp_peek(tmp, size);
508 port = isapnp_alloc(sizeof(struct pnp_port)); 497 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
509 if (!port) 498 if (!port)
510 return; 499 return;
511 port->min = (tmp[2] << 8) | tmp[1]; 500 port->min = (tmp[2] << 8) | tmp[1];
@@ -528,7 +517,7 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
528 struct pnp_port *port; 517 struct pnp_port *port;
529 518
530 isapnp_peek(tmp, size); 519 isapnp_peek(tmp, size);
531 port = isapnp_alloc(sizeof(struct pnp_port)); 520 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
532 if (!port) 521 if (!port)
533 return; 522 return;
534 port->min = port->max = (tmp[1] << 8) | tmp[0]; 523 port->min = port->max = (tmp[1] << 8) | tmp[0];
@@ -550,7 +539,7 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
550 struct pnp_mem *mem; 539 struct pnp_mem *mem;
551 540
552 isapnp_peek(tmp, size); 541 isapnp_peek(tmp, size);
553 mem = isapnp_alloc(sizeof(struct pnp_mem)); 542 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
554 if (!mem) 543 if (!mem)
555 return; 544 return;
556 mem->min = ((tmp[2] << 8) | tmp[1]) << 8; 545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8;
@@ -573,7 +562,7 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
573 struct pnp_mem *mem; 562 struct pnp_mem *mem;
574 563
575 isapnp_peek(tmp, size); 564 isapnp_peek(tmp, size);
576 mem = isapnp_alloc(sizeof(struct pnp_mem)); 565 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
577 if (!mem) 566 if (!mem)
578 return; 567 return;
579 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -595,7 +584,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
595 struct pnp_mem *mem; 584 struct pnp_mem *mem;
596 585
597 isapnp_peek(tmp, size); 586 isapnp_peek(tmp, size);
598 mem = isapnp_alloc(sizeof(struct pnp_mem)); 587 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
599 if (!mem) 588 if (!mem)
600 return; 589 return;
601 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -838,7 +827,7 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
838 827
839static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device) 828static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device)
840{ 829{
841 struct pnp_id * id = isapnp_alloc(sizeof(struct pnp_id)); 830 struct pnp_id * id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
842 if (!id) 831 if (!id)
843 return; 832 return;
844 sprintf(id->id, "%c%c%c%x%x%x%x", 833 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -874,7 +863,7 @@ static int __init isapnp_build_device_list(void)
874 header[4], header[5], header[6], header[7], header[8]); 863 header[4], header[5], header[6], header[7], header[8]);
875 printk(KERN_DEBUG "checksum = 0x%x\n", checksum); 864 printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
876#endif 865#endif
877 if ((card = isapnp_alloc(sizeof(struct pnp_card))) == NULL) 866 if ((card = kcalloc(1, sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
878 continue; 867 continue;
879 868
880 card->number = csn; 869 card->number = csn;
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 6c510c19ad7d..94442ffd4aed 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -11,13 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14
15#ifdef CONFIG_PNP_DEBUG
16 #define DEBUG
17#else
18 #undef DEBUG
19#endif
20
21#include <linux/pnp.h> 14#include <linux/pnp.h>
22#include "base.h" 15#include "base.h"
23 16
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 8655dd2e5b83..1a8915e74160 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -19,6 +19,7 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/config.h>
22#include <linux/acpi.h> 23#include <linux/acpi.h>
23#include <linux/pnp.h> 24#include <linux/pnp.h>
24#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
@@ -41,14 +42,6 @@ static inline int is_exclusive_device(struct acpi_device *dev)
41 return (!acpi_match_ids(dev, excluded_id_list)); 42 return (!acpi_match_ids(dev, excluded_id_list));
42} 43}
43 44
44void *pnpacpi_kmalloc(size_t size, int f)
45{
46 void *p = kmalloc(size, f);
47 if (p)
48 memset(p, 0, size);
49 return p;
50}
51
52/* 45/*
53 * Compatible Device IDs 46 * Compatible Device IDs
54 */ 47 */
@@ -143,7 +136,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
143 return 0; 136 return 0;
144 137
145 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device)); 138 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device));
146 dev = pnpacpi_kmalloc(sizeof(struct pnp_dev), GFP_KERNEL); 139 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL);
147 if (!dev) { 140 if (!dev) {
148 pnp_err("Out of memory"); 141 pnp_err("Out of memory");
149 return -ENOMEM; 142 return -ENOMEM;
@@ -173,7 +166,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
173 dev->number = num; 166 dev->number = num;
174 167
175 /* set the initial values for the PnP device */ 168 /* set the initial values for the PnP device */
176 dev_id = pnpacpi_kmalloc(sizeof(struct pnp_id), GFP_KERNEL); 169 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
177 if (!dev_id) 170 if (!dev_id)
178 goto err; 171 goto err;
179 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id); 172 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
@@ -205,8 +198,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
205 for (i = 0; i < cid_list->count; i++) { 198 for (i = 0; i < cid_list->count; i++) {
206 if (!ispnpidacpi(cid_list->id[i].value)) 199 if (!ispnpidacpi(cid_list->id[i].value))
207 continue; 200 continue;
208 dev_id = pnpacpi_kmalloc(sizeof(struct pnp_id), 201 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
209 GFP_KERNEL);
210 if (!dev_id) 202 if (!dev_id)
211 continue; 203 continue;
212 204
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index 76f907e09ee6..f28e2ed66fa3 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -5,7 +5,6 @@
5#include <linux/acpi.h> 5#include <linux/acpi.h>
6#include <linux/pnp.h> 6#include <linux/pnp.h>
7 7
8void *pnpacpi_kmalloc(size_t size, int f);
9acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*); 8acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*);
10acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*); 9acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*);
11int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *); 10int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 75575f6c349c..675b76a42403 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -244,7 +244,7 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
244 244
245 if (p->number_of_channels == 0) 245 if (p->number_of_channels == 0)
246 return; 246 return;
247 dma = pnpacpi_kmalloc(sizeof(struct pnp_dma), GFP_KERNEL); 247 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
248 if (!dma) 248 if (!dma)
249 return; 249 return;
250 250
@@ -300,7 +300,7 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
300 300
301 if (p->number_of_interrupts == 0) 301 if (p->number_of_interrupts == 0)
302 return; 302 return;
303 irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 303 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
304 if (!irq) 304 if (!irq)
305 return; 305 return;
306 306
@@ -321,7 +321,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
321 321
322 if (p->number_of_interrupts == 0) 322 if (p->number_of_interrupts == 0)
323 return; 323 return;
324 irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 324 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
325 if (!irq) 325 if (!irq)
326 return; 326 return;
327 327
@@ -342,7 +342,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
342 342
343 if (io->range_length == 0) 343 if (io->range_length == 0)
344 return; 344 return;
345 port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 345 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
346 if (!port) 346 if (!port)
347 return; 347 return;
348 port->min = io->min_base_address; 348 port->min = io->min_base_address;
@@ -363,7 +363,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
363 363
364 if (io->range_length == 0) 364 if (io->range_length == 0)
365 return; 365 return;
366 port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 366 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
367 if (!port) 367 if (!port)
368 return; 368 return;
369 port->min = port->max = io->base_address; 369 port->min = port->max = io->base_address;
@@ -382,7 +382,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
382 382
383 if (p->range_length == 0) 383 if (p->range_length == 0)
384 return; 384 return;
385 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 385 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
386 if (!mem) 386 if (!mem)
387 return; 387 return;
388 mem->min = p->min_base_address; 388 mem->min = p->min_base_address;
@@ -405,7 +405,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
405 405
406 if (p->range_length == 0) 406 if (p->range_length == 0)
407 return; 407 return;
408 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 408 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
409 if (!mem) 409 if (!mem)
410 return; 410 return;
411 mem->min = p->min_base_address; 411 mem->min = p->min_base_address;
@@ -428,7 +428,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
428 428
429 if (p->range_length == 0) 429 if (p->range_length == 0)
430 return; 430 return;
431 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 431 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
432 if (!mem) 432 if (!mem)
433 return; 433 return;
434 mem->min = mem->max = p->range_base_address; 434 mem->min = mem->max = p->range_base_address;
@@ -612,7 +612,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
612 if (!res_cnt) 612 if (!res_cnt)
613 return -EINVAL; 613 return -EINVAL;
614 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1; 614 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
615 buffer->pointer = pnpacpi_kmalloc(buffer->length - 1, GFP_KERNEL); 615 buffer->pointer = kcalloc(1, buffer->length - 1, GFP_KERNEL);
616 if (!buffer->pointer) 616 if (!buffer->pointer)
617 return -ENOMEM; 617 return -ENOMEM;
618 pnp_dbg("Res cnt %d", res_cnt); 618 pnp_dbg("Res cnt %d", res_cnt);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 778a324028f4..f49674f07949 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -86,16 +86,6 @@ int pnp_bios_present(void)
86 86
87struct pnp_dev_node_info node_info; 87struct pnp_dev_node_info node_info;
88 88
89void *pnpbios_kmalloc(size_t size, int f)
90{
91 void *p = kmalloc( size, f );
92 if ( p == NULL )
93 printk(KERN_ERR "PnPBIOS: kmalloc() failed\n");
94 else
95 memset(p, 0, size);
96 return p;
97}
98
99/* 89/*
100 * 90 *
101 * DOCKING FUNCTIONS 91 * DOCKING FUNCTIONS
@@ -121,10 +111,10 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
121 if (!current->fs->root) { 111 if (!current->fs->root) {
122 return -EAGAIN; 112 return -EAGAIN;
123 } 113 }
124 if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) { 114 if (!(envp = (char **) kcalloc (20, sizeof (char *), GFP_KERNEL))) {
125 return -ENOMEM; 115 return -ENOMEM;
126 } 116 }
127 if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) { 117 if (!(buf = kcalloc (1, 256, GFP_KERNEL))) {
128 kfree (envp); 118 kfree (envp);
129 return -ENOMEM; 119 return -ENOMEM;
130 } 120 }
@@ -231,7 +221,7 @@ static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table
231 if(!pnpbios_is_dynamic(dev)) 221 if(!pnpbios_is_dynamic(dev))
232 return -EPERM; 222 return -EPERM;
233 223
234 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 224 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
235 if (!node) 225 if (!node)
236 return -1; 226 return -1;
237 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 227 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -254,7 +244,7 @@ static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table
254 if (!pnpbios_is_dynamic(dev)) 244 if (!pnpbios_is_dynamic(dev))
255 return -EPERM; 245 return -EPERM;
256 246
257 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 247 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
258 if (!node) 248 if (!node)
259 return -1; 249 return -1;
260 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 250 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -305,7 +295,7 @@ static int pnpbios_disable_resources(struct pnp_dev *dev)
305 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) 295 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
306 return -EPERM; 296 return -EPERM;
307 297
308 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 298 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
309 if (!node) 299 if (!node)
310 return -ENOMEM; 300 return -ENOMEM;
311 301
@@ -347,7 +337,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
347 } 337 }
348 338
349 /* set the initial values for the PnP device */ 339 /* set the initial values for the PnP device */
350 dev_id = pnpbios_kmalloc(sizeof(struct pnp_id), GFP_KERNEL); 340 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
351 if (!dev_id) 341 if (!dev_id)
352 return -1; 342 return -1;
353 pnpid32_to_pnpid(node->eisa_id,id); 343 pnpid32_to_pnpid(node->eisa_id,id);
@@ -385,7 +375,7 @@ static void __init build_devlist(void)
385 struct pnp_bios_node *node; 375 struct pnp_bios_node *node;
386 struct pnp_dev *dev; 376 struct pnp_dev *dev;
387 377
388 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 378 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
389 if (!node) 379 if (!node)
390 return; 380 return;
391 381
@@ -402,7 +392,7 @@ static void __init build_devlist(void)
402 break; 392 break;
403 } 393 }
404 nodes_got++; 394 nodes_got++;
405 dev = pnpbios_kmalloc(sizeof (struct pnp_dev), GFP_KERNEL); 395 dev = kcalloc(1, sizeof (struct pnp_dev), GFP_KERNEL);
406 if (!dev) 396 if (!dev)
407 break; 397 break;
408 if(insert_device(dev,node)<0) 398 if(insert_device(dev,node)<0)
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index 01896e705ed4..d8cb2fd1f127 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -26,7 +26,6 @@ union pnp_bios_install_struct {
26 26
27extern int pnp_bios_present(void); 27extern int pnp_bios_present(void);
28extern int pnpbios_dont_use_current_config; 28extern int pnpbios_dont_use_current_config;
29extern void *pnpbios_kmalloc(size_t size, int f);
30 29
31extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node); 30extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
32extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node); 31extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node);
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 6bb8e1973fd4..5a3dfc97f5e9 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -87,7 +87,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
87 return -EFBIG; 87 return -EFBIG;
88 } 88 }
89 89
90 tmpbuf = pnpbios_kmalloc(escd.escd_size, GFP_KERNEL); 90 tmpbuf = kcalloc(1, escd.escd_size, GFP_KERNEL);
91 if (!tmpbuf) return -ENOMEM; 91 if (!tmpbuf) return -ENOMEM;
92 92
93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) { 93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
@@ -133,7 +133,7 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
133 if (pos >= 0xff) 133 if (pos >= 0xff)
134 return 0; 134 return 0;
135 135
136 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 136 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
137 if (!node) return -ENOMEM; 137 if (!node) return -ENOMEM;
138 138
139 for (nodenum=pos; nodenum<0xff; ) { 139 for (nodenum=pos; nodenum<0xff; ) {
@@ -168,7 +168,7 @@ static int proc_read_node(char *buf, char **start, off_t pos,
168 u8 nodenum = (long)data; 168 u8 nodenum = (long)data;
169 int len; 169 int len;
170 170
171 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 171 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
172 if (!node) return -ENOMEM; 172 if (!node) return -ENOMEM;
173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
174 kfree(node); 174 kfree(node);
@@ -188,7 +188,7 @@ static int proc_write_node(struct file *file, const char __user *buf,
188 u8 nodenum = (long)data; 188 u8 nodenum = (long)data;
189 int ret = count; 189 int ret = count;
190 190
191 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 191 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
192 if (!node) 192 if (!node)
193 return -ENOMEM; 193 return -ENOMEM;
194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index e305bb132c24..b0ca65b68645 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -247,7 +247,7 @@ static void
247pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option) 247pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
248{ 248{
249 struct pnp_mem * mem; 249 struct pnp_mem * mem;
250 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 250 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
251 if (!mem) 251 if (!mem)
252 return; 252 return;
253 mem->min = ((p[5] << 8) | p[4]) << 8; 253 mem->min = ((p[5] << 8) | p[4]) << 8;
@@ -263,7 +263,7 @@ static void
263pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option) 263pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option)
264{ 264{
265 struct pnp_mem * mem; 265 struct pnp_mem * mem;
266 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 266 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
267 if (!mem) 267 if (!mem)
268 return; 268 return;
269 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 269 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -279,7 +279,7 @@ static void
279pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option) 279pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option)
280{ 280{
281 struct pnp_mem * mem; 281 struct pnp_mem * mem;
282 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 282 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
283 if (!mem) 283 if (!mem)
284 return; 284 return;
285 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 285 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -296,7 +296,7 @@ pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
296 struct pnp_irq * irq; 296 struct pnp_irq * irq;
297 unsigned long bits; 297 unsigned long bits;
298 298
299 irq = pnpbios_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 299 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
300 if (!irq) 300 if (!irq)
301 return; 301 return;
302 bits = (p[2] << 8) | p[1]; 302 bits = (p[2] << 8) | p[1];
@@ -313,7 +313,7 @@ static void
313pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option) 313pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option)
314{ 314{
315 struct pnp_dma * dma; 315 struct pnp_dma * dma;
316 dma = pnpbios_kmalloc(sizeof(struct pnp_dma), GFP_KERNEL); 316 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
317 if (!dma) 317 if (!dma)
318 return; 318 return;
319 dma->map = p[1]; 319 dma->map = p[1];
@@ -326,7 +326,7 @@ static void
326pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option) 326pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
327{ 327{
328 struct pnp_port * port; 328 struct pnp_port * port;
329 port = pnpbios_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 329 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
330 if (!port) 330 if (!port)
331 return; 331 return;
332 port->min = (p[3] << 8) | p[2]; 332 port->min = (p[3] << 8) | p[2];
@@ -342,7 +342,7 @@ static void
342pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option) 342pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option)
343{ 343{
344 struct pnp_port * port; 344 struct pnp_port * port;
345 port = pnpbios_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 345 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
346 if (!port) 346 if (!port)
347 return; 347 return;
348 port->min = port->max = (p[2] << 8) | p[1]; 348 port->min = port->max = (p[2] << 8) | p[1];
@@ -530,7 +530,7 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
530 case SMALL_TAG_COMPATDEVID: /* compatible ID */ 530 case SMALL_TAG_COMPATDEVID: /* compatible ID */
531 if (len != 4) 531 if (len != 4)
532 goto len_err; 532 goto len_err;
533 dev_id = pnpbios_kmalloc(sizeof (struct pnp_id), GFP_KERNEL); 533 dev_id = kcalloc(1, sizeof (struct pnp_id), GFP_KERNEL);
534 if (!dev_id) 534 if (!dev_id)
535 return NULL; 535 return NULL;
536 memset(dev_id, 0, sizeof(struct pnp_id)); 536 memset(dev_id, 0, sizeof(struct pnp_id));
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 596a02d7e03d..8936b0cb2ec3 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -16,13 +16,6 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19
20#ifdef CONFIG_PNP_DEBUG
21 #define DEBUG
22#else
23 #undef DEBUG
24#endif
25
26#include <linux/pnp.h> 19#include <linux/pnp.h>
27#include "base.h" 20#include "base.h"
28 21
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index b952aec49189..61fe998944bd 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -8,13 +8,6 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11
12#ifdef CONFIG_PNP_DEBUG
13 #define DEBUG
14#else
15 #undef DEBUG
16#endif
17
18#include <linux/pnp.h> 11#include <linux/pnp.h>
19#include "base.h" 12#include "base.h"
20 13
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index dc1c89dbdb8f..6e7d7b06421d 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -49,7 +49,7 @@ config DASD_FBA
49 49
50config DASD_DIAG 50config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ARCH_S390X = 'n' 52 depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL)
53 help 53 help
54 Select this option if you want to use Diagnose250 command to access 54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d5f53980749b..8fc891a9d47f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.165 $ 10 * $Revision: 1.167 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -1131,17 +1131,13 @@ __dasd_process_blk_queue(struct dasd_device * device)
1131 request_queue_t *queue; 1131 request_queue_t *queue;
1132 struct request *req; 1132 struct request *req;
1133 struct dasd_ccw_req *cqr; 1133 struct dasd_ccw_req *cqr;
1134 int nr_queued, feature_ro; 1134 int nr_queued;
1135 1135
1136 queue = device->request_queue; 1136 queue = device->request_queue;
1137 /* No queue ? Then there is nothing to do. */ 1137 /* No queue ? Then there is nothing to do. */
1138 if (queue == NULL) 1138 if (queue == NULL)
1139 return; 1139 return;
1140 1140
1141 feature_ro = dasd_get_feature(device->cdev, DASD_FEATURE_READONLY);
1142 if (feature_ro < 0) /* no devmap */
1143 return;
1144
1145 /* 1141 /*
1146 * We requeue request from the block device queue to the ccw 1142 * We requeue request from the block device queue to the ccw
1147 * queue only in two states. In state DASD_STATE_READY the 1143 * queue only in two states. In state DASD_STATE_READY the
@@ -1162,7 +1158,8 @@ __dasd_process_blk_queue(struct dasd_device * device)
1162 nr_queued < DASD_CHANQ_MAX_SIZE) { 1158 nr_queued < DASD_CHANQ_MAX_SIZE) {
1163 req = elv_next_request(queue); 1159 req = elv_next_request(queue);
1164 1160
1165 if (feature_ro && rq_data_dir(req) == WRITE) { 1161 if (device->features & DASD_FEATURE_READONLY &&
1162 rq_data_dir(req) == WRITE) {
1166 DBF_DEV_EVENT(DBF_ERR, device, 1163 DBF_DEV_EVENT(DBF_ERR, device,
1167 "Rejecting write request %p", 1164 "Rejecting write request %p",
1168 req); 1165 req);
@@ -1814,17 +1811,13 @@ dasd_generic_set_online (struct ccw_device *cdev,
1814 1811
1815{ 1812{
1816 struct dasd_device *device; 1813 struct dasd_device *device;
1817 int feature_diag, rc; 1814 int rc;
1818 1815
1819 device = dasd_create_device(cdev); 1816 device = dasd_create_device(cdev);
1820 if (IS_ERR(device)) 1817 if (IS_ERR(device))
1821 return PTR_ERR(device); 1818 return PTR_ERR(device);
1822 1819
1823 feature_diag = dasd_get_feature(cdev, DASD_FEATURE_USEDIAG); 1820 if (device->features & DASD_FEATURE_USEDIAG) {
1824 if (feature_diag < 0)
1825 return feature_diag;
1826
1827 if (feature_diag) {
1828 if (!dasd_diag_discipline_pointer) { 1821 if (!dasd_diag_discipline_pointer) {
1829 printk (KERN_WARNING 1822 printk (KERN_WARNING
1830 "dasd_generic couldn't online device %s " 1823 "dasd_generic couldn't online device %s "
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index d948566bb24a..bda896d9d788 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -11,7 +11,7 @@
11 * functions may not be called from interrupt context. In particular 11 * functions may not be called from interrupt context. In particular
12 * dasd_get_device is a no-no from interrupt context. 12 * dasd_get_device is a no-no from interrupt context.
13 * 13 *
14 * $Revision: 1.40 $ 14 * $Revision: 1.43 $
15 */ 15 */
16 16
17#include <linux/config.h> 17#include <linux/config.h>
@@ -513,6 +513,7 @@ dasd_create_device(struct ccw_device *cdev)
513 if (!devmap->device) { 513 if (!devmap->device) {
514 devmap->device = device; 514 devmap->device = device;
515 device->devindex = devmap->devindex; 515 device->devindex = devmap->devindex;
516 device->features = devmap->features;
516 get_device(&cdev->dev); 517 get_device(&cdev->dev);
517 device->cdev = cdev; 518 device->cdev = cdev;
518 rc = 0; 519 rc = 0;
@@ -643,6 +644,8 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr, const char *buf
643 devmap->features |= DASD_FEATURE_READONLY; 644 devmap->features |= DASD_FEATURE_READONLY;
644 else 645 else
645 devmap->features &= ~DASD_FEATURE_READONLY; 646 devmap->features &= ~DASD_FEATURE_READONLY;
647 if (devmap->device)
648 devmap->device->features = devmap->features;
646 if (devmap->device && devmap->device->gdp) 649 if (devmap->device && devmap->device->gdp)
647 set_disk_ro(devmap->device->gdp, ro_flag); 650 set_disk_ro(devmap->device->gdp, ro_flag);
648 spin_unlock(&dasd_devmap_lock); 651 spin_unlock(&dasd_devmap_lock);
@@ -758,7 +761,8 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
758 devmap->features |= feature; 761 devmap->features |= feature;
759 else 762 else
760 devmap->features &= ~feature; 763 devmap->features &= ~feature;
761 764 if (devmap->device)
765 devmap->device->features = devmap->features;
762 spin_unlock(&dasd_devmap_lock); 766 spin_unlock(&dasd_devmap_lock);
763 return 0; 767 return 0;
764} 768}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 127699830fa1..7478423b53bb 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -6,17 +6,18 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.42 $ 9 * $Revision: 1.49 $
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/hdreg.h> /* HDIO_GETGEO */ 16#include <linux/hdreg.h>
17#include <linux/bio.h> 17#include <linux/bio.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/jiffies.h>
20 21
21#include <asm/dasd.h> 22#include <asm/dasd.h>
22#include <asm/debug.h> 23#include <asm/debug.h>
@@ -28,58 +29,89 @@
28#include "dasd_int.h" 29#include "dasd_int.h"
29#include "dasd_diag.h" 30#include "dasd_diag.h"
30 31
31#ifdef PRINTK_HEADER
32#undef PRINTK_HEADER
33#endif /* PRINTK_HEADER */
34#define PRINTK_HEADER "dasd(diag):" 32#define PRINTK_HEADER "dasd(diag):"
35 33
36MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
37 35
36/* The maximum number of blocks per request (max_blocks) is dependent on the
37 * amount of storage that is available in the static I/O buffer for each
38 * device. Currently each device gets 2 pages. We want to fit two requests
39 * into the available memory so that we can immediately start the next if one
40 * finishes. */
41#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
42 sizeof(struct dasd_diag_req)) / \
43 sizeof(struct dasd_diag_bio)) / 2)
44#define DIAG_MAX_RETRIES 32
45#define DIAG_TIMEOUT 50 * HZ
46
38struct dasd_discipline dasd_diag_discipline; 47struct dasd_discipline dasd_diag_discipline;
39 48
40struct dasd_diag_private { 49struct dasd_diag_private {
41 struct dasd_diag_characteristics rdc_data; 50 struct dasd_diag_characteristics rdc_data;
42 struct dasd_diag_rw_io iob; 51 struct dasd_diag_rw_io iob;
43 struct dasd_diag_init_io iib; 52 struct dasd_diag_init_io iib;
44 unsigned int pt_block; 53 blocknum_t pt_block;
45}; 54};
46 55
47struct dasd_diag_req { 56struct dasd_diag_req {
48 int block_count; 57 unsigned int block_count;
49 struct dasd_diag_bio bio[0]; 58 struct dasd_diag_bio bio[0];
50}; 59};
51 60
61static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
62
63/* Perform DIAG250 call with block I/O parameter list iob (input and output)
64 * and function code cmd.
65 * In case of an exception return 3. Otherwise return result of bitwise OR of
66 * resulting condition code and DIAG return code. */
52static __inline__ int 67static __inline__ int
53dia250(void *iob, int cmd) 68dia250(void *iob, int cmd)
54{ 69{
70 typedef struct {
71 char _[max(sizeof (struct dasd_diag_init_io),
72 sizeof (struct dasd_diag_rw_io))];
73 } addr_type;
55 int rc; 74 int rc;
56 75
57 __asm__ __volatile__(" lhi %0,3\n" 76 __asm__ __volatile__(
58 " lr 0,%2\n" 77#ifdef CONFIG_ARCH_S390X
59 " diag 0,%1,0x250\n" 78 " lghi %0,3\n"
60 "0: ipm %0\n" 79 " lgr 0,%3\n"
61 " srl %0,28\n" 80 " diag 0,%2,0x250\n"
62 " or %0,1\n" 81 "0: ipm %0\n"
63 "1:\n" 82 " srl %0,28\n"
64#ifndef CONFIG_ARCH_S390X 83 " or %0,1\n"
65 ".section __ex_table,\"a\"\n" 84 "1:\n"
66 " .align 4\n" 85 ".section __ex_table,\"a\"\n"
67 " .long 0b,1b\n" 86 " .align 8\n"
68 ".previous\n" 87 " .quad 0b,1b\n"
88 ".previous\n"
69#else 89#else
70 ".section __ex_table,\"a\"\n" 90 " lhi %0,3\n"
71 " .align 8\n" 91 " lr 0,%3\n"
72 " .quad 0b,1b\n" 92 " diag 0,%2,0x250\n"
73 ".previous\n" 93 "0: ipm %0\n"
94 " srl %0,28\n"
95 " or %0,1\n"
96 "1:\n"
97 ".section __ex_table,\"a\"\n"
98 " .align 4\n"
99 " .long 0b,1b\n"
100 ".previous\n"
74#endif 101#endif
75 : "=&d" (rc) 102 : "=&d" (rc), "=m" (*(addr_type *) iob)
76 : "d" (cmd), "d" ((void *) __pa(iob)) 103 : "d" (cmd), "d" (iob), "m" (*(addr_type *) iob)
77 : "0", "1", "cc"); 104 : "0", "1", "cc");
78 return rc; 105 return rc;
79} 106}
80 107
108/* Initialize block I/O to DIAG device using the specified blocksize and
109 * block offset. On success, return zero and set end_block to contain the
110 * number of blocks on the device minus the specified offset. Return non-zero
111 * otherwise. */
81static __inline__ int 112static __inline__ int
82mdsk_init_io(struct dasd_device * device, int blocksize, int offset, int size) 113mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
114 blocknum_t offset, blocknum_t *end_block)
83{ 115{
84 struct dasd_diag_private *private; 116 struct dasd_diag_private *private;
85 struct dasd_diag_init_io *iib; 117 struct dasd_diag_init_io *iib;
@@ -92,14 +124,18 @@ mdsk_init_io(struct dasd_device * device, int blocksize, int offset, int size)
92 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 124 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
93 iib->block_size = blocksize; 125 iib->block_size = blocksize;
94 iib->offset = offset; 126 iib->offset = offset;
95 iib->start_block = 0; 127 iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
96 iib->end_block = size;
97 128
98 rc = dia250(iib, INIT_BIO); 129 rc = dia250(iib, INIT_BIO);
99 130
100 return rc & 3; 131 if ((rc & 3) == 0 && end_block)
132 *end_block = iib->end_block;
133
134 return rc;
101} 135}
102 136
137/* Remove block I/O environment for device. Return zero on success, non-zero
138 * otherwise. */
103static __inline__ int 139static __inline__ int
104mdsk_term_io(struct dasd_device * device) 140mdsk_term_io(struct dasd_device * device)
105{ 141{
@@ -112,9 +148,25 @@ mdsk_term_io(struct dasd_device * device)
112 memset(iib, 0, sizeof (struct dasd_diag_init_io)); 148 memset(iib, 0, sizeof (struct dasd_diag_init_io));
113 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 149 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
114 rc = dia250(iib, TERM_BIO); 150 rc = dia250(iib, TERM_BIO);
115 return rc & 3; 151 return rc;
152}
153
154/* Error recovery for failed DIAG requests - try to reestablish the DIAG
155 * environment. */
156static void
157dasd_diag_erp(struct dasd_device *device)
158{
159 int rc;
160
161 mdsk_term_io(device);
162 rc = mdsk_init_io(device, device->bp_block, 0, NULL);
163 if (rc)
164 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, "
165 "rc=%d", rc);
116} 166}
117 167
168/* Start a given request at the device. Return zero on success, non-zero
169 * otherwise. */
118static int 170static int
119dasd_start_diag(struct dasd_ccw_req * cqr) 171dasd_start_diag(struct dasd_ccw_req * cqr)
120{ 172{
@@ -124,32 +176,66 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
124 int rc; 176 int rc;
125 177
126 device = cqr->device; 178 device = cqr->device;
179 if (cqr->retries < 0) {
180 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p "
181 "- no retry left)", cqr);
182 cqr->status = DASD_CQR_FAILED;
183 return -EIO;
184 }
127 private = (struct dasd_diag_private *) device->private; 185 private = (struct dasd_diag_private *) device->private;
128 dreq = (struct dasd_diag_req *) cqr->data; 186 dreq = (struct dasd_diag_req *) cqr->data;
129 187
130 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev); 188 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev);
131 private->iob.key = 0; 189 private->iob.key = 0;
132 private->iob.flags = 2; /* do asynchronous io */ 190 private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
133 private->iob.block_count = dreq->block_count; 191 private->iob.block_count = dreq->block_count;
134 private->iob.interrupt_params = (u32)(addr_t) cqr; 192 private->iob.interrupt_params = (addr_t) cqr;
135 private->iob.bio_list = __pa(dreq->bio); 193 private->iob.bio_list = __pa(dreq->bio);
194 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
136 195
137 cqr->startclk = get_clock(); 196 cqr->startclk = get_clock();
197 cqr->starttime = jiffies;
198 cqr->retries--;
138 199
139 rc = dia250(&private->iob, RW_BIO); 200 rc = dia250(&private->iob, RW_BIO);
140 if (rc > 8) { 201 switch (rc) {
141 DEV_MESSAGE(KERN_WARNING, device, "dia250 returned CC %d", rc); 202 case 0: /* Synchronous I/O finished successfully */
142 cqr->status = DASD_CQR_ERROR; 203 cqr->stopclk = get_clock();
143 } else if (rc == 0) {
144 cqr->status = DASD_CQR_DONE; 204 cqr->status = DASD_CQR_DONE;
145 dasd_schedule_bh(device); 205 /* Indicate to calling function that only a dasd_schedule_bh()
146 } else { 206 and no timer is needed */
207 rc = -EACCES;
208 break;
209 case 8: /* Asynchronous I/O was started */
147 cqr->status = DASD_CQR_IN_IO; 210 cqr->status = DASD_CQR_IN_IO;
148 rc = 0; 211 rc = 0;
212 break;
213 default: /* Error condition */
214 cqr->status = DASD_CQR_QUEUED;
215 DEV_MESSAGE(KERN_WARNING, device, "dia250 returned rc=%d", rc);
216 dasd_diag_erp(device);
217 rc = -EIO;
218 break;
149 } 219 }
150 return rc; 220 return rc;
151} 221}
152 222
223/* Terminate given request at the device. */
224static int
225dasd_diag_term_IO(struct dasd_ccw_req * cqr)
226{
227 struct dasd_device *device;
228
229 device = cqr->device;
230 mdsk_term_io(device);
231 mdsk_init_io(device, device->bp_block, 0, NULL);
232 cqr->status = DASD_CQR_CLEAR;
233 cqr->stopclk = get_clock();
234 dasd_schedule_bh(device);
235 return 0;
236}
237
238/* Handle external interruption. */
153static void 239static void
154dasd_ext_handler(struct pt_regs *regs, __u16 code) 240dasd_ext_handler(struct pt_regs *regs, __u16 code)
155{ 241{
@@ -157,25 +243,27 @@ dasd_ext_handler(struct pt_regs *regs, __u16 code)
157 struct dasd_device *device; 243 struct dasd_device *device;
158 unsigned long long expires; 244 unsigned long long expires;
159 unsigned long flags; 245 unsigned long flags;
160 char status; 246 u8 int_code, status;
161 int ip; 247 addr_t ip;
162 248 int rc;
163 /*
164 * Get the external interruption subcode. VM stores
165 * this in the 'cpu address' field associated with
166 * the external interrupt. For diag 250 the subcode
167 * needs to be 3.
168 */
169 if ((S390_lowcore.cpu_addr & 0xff00) != 0x0300)
170 return;
171 status = *((char *) &S390_lowcore.ext_params + 5);
172 ip = S390_lowcore.ext_params;
173 249
250 int_code = *((u8 *) DASD_DIAG_LC_INT_CODE);
251 status = *((u8 *) DASD_DIAG_LC_INT_STATUS);
252 switch (int_code) {
253 case DASD_DIAG_CODE_31BIT:
254 ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT);
255 break;
256 case DASD_DIAG_CODE_64BIT:
257 ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT);
258 break;
259 default:
260 return;
261 }
174 if (!ip) { /* no intparm: unsolicited interrupt */ 262 if (!ip) { /* no intparm: unsolicited interrupt */
175 MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt"); 263 MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt");
176 return; 264 return;
177 } 265 }
178 cqr = (struct dasd_ccw_req *)(addr_t) ip; 266 cqr = (struct dasd_ccw_req *) ip;
179 device = (struct dasd_device *) cqr->device; 267 device = (struct dasd_device *) cqr->device;
180 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 268 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
181 DEV_MESSAGE(KERN_WARNING, device, 269 DEV_MESSAGE(KERN_WARNING, device,
@@ -188,6 +276,15 @@ dasd_ext_handler(struct pt_regs *regs, __u16 code)
188 /* get irq lock to modify request queue */ 276 /* get irq lock to modify request queue */
189 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 277 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
190 278
279 /* Check for a pending clear operation */
280 if (cqr->status == DASD_CQR_CLEAR) {
281 cqr->status = DASD_CQR_QUEUED;
282 dasd_clear_timer(device);
283 dasd_schedule_bh(device);
284 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
285 return;
286 }
287
191 cqr->stopclk = get_clock(); 288 cqr->stopclk = get_clock();
192 289
193 expires = 0; 290 expires = 0;
@@ -198,16 +295,22 @@ dasd_ext_handler(struct pt_regs *regs, __u16 code)
198 next = list_entry(device->ccw_queue.next, 295 next = list_entry(device->ccw_queue.next,
199 struct dasd_ccw_req, list); 296 struct dasd_ccw_req, list);
200 if (next->status == DASD_CQR_QUEUED) { 297 if (next->status == DASD_CQR_QUEUED) {
201 if (dasd_start_diag(next) == 0) 298 rc = dasd_start_diag(next);
299 if (rc == 0)
202 expires = next->expires; 300 expires = next->expires;
203 else 301 else if (rc != -EACCES)
204 DEV_MESSAGE(KERN_WARNING, device, "%s", 302 DEV_MESSAGE(KERN_WARNING, device, "%s",
205 "Interrupt fastpath " 303 "Interrupt fastpath "
206 "failed!"); 304 "failed!");
207 } 305 }
208 } 306 }
209 } else 307 } else {
210 cqr->status = DASD_CQR_FAILED; 308 cqr->status = DASD_CQR_QUEUED;
309 DEV_MESSAGE(KERN_WARNING, device, "interrupt status for "
310 "request %p was %d (%d retries left)", cqr, status,
311 cqr->retries);
312 dasd_diag_erp(device);
313 }
211 314
212 if (expires != 0) 315 if (expires != 0)
213 dasd_set_timer(device, expires); 316 dasd_set_timer(device, expires);
@@ -218,14 +321,17 @@ dasd_ext_handler(struct pt_regs *regs, __u16 code)
218 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 321 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
219} 322}
220 323
324/* Check whether device can be controlled by DIAG discipline. Return zero on
325 * success, non-zero otherwise. */
221static int 326static int
222dasd_diag_check_device(struct dasd_device *device) 327dasd_diag_check_device(struct dasd_device *device)
223{ 328{
224 struct dasd_diag_private *private; 329 struct dasd_diag_private *private;
225 struct dasd_diag_characteristics *rdc_data; 330 struct dasd_diag_characteristics *rdc_data;
226 struct dasd_diag_bio bio; 331 struct dasd_diag_bio bio;
227 long *label; 332 struct dasd_diag_cms_label *label;
228 int sb, bsize; 333 blocknum_t end_block;
334 unsigned int sb, bsize;
229 int rc; 335 int rc;
230 336
231 private = (struct dasd_diag_private *) device->private; 337 private = (struct dasd_diag_private *) device->private;
@@ -244,8 +350,11 @@ dasd_diag_check_device(struct dasd_device *device)
244 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); 350 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
245 351
246 rc = diag210((struct diag210 *) rdc_data); 352 rc = diag210((struct diag210 *) rdc_data);
247 if (rc) 353 if (rc) {
354 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device "
355 "information (rc=%d)", rc);
248 return -ENOTSUPP; 356 return -ENOTSUPP;
357 }
249 358
250 /* Figure out position of label block */ 359 /* Figure out position of label block */
251 switch (private->rdc_data.vdev_class) { 360 switch (private->rdc_data.vdev_class) {
@@ -256,6 +365,8 @@ dasd_diag_check_device(struct dasd_device *device)
256 private->pt_block = 2; 365 private->pt_block = 2;
257 break; 366 break;
258 default: 367 default:
368 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class "
369 "(class=%d)", private->rdc_data.vdev_class);
259 return -ENOTSUPP; 370 return -ENOTSUPP;
260 } 371 }
261 372
@@ -269,15 +380,17 @@ dasd_diag_check_device(struct dasd_device *device)
269 mdsk_term_io(device); 380 mdsk_term_io(device);
270 381
271 /* figure out blocksize of device */ 382 /* figure out blocksize of device */
272 label = (long *) get_zeroed_page(GFP_KERNEL); 383 label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL);
273 if (label == NULL) { 384 if (label == NULL) {
274 DEV_MESSAGE(KERN_WARNING, device, "%s", 385 DEV_MESSAGE(KERN_WARNING, device, "%s",
275 "No memory to allocate initialization request"); 386 "No memory to allocate initialization request");
276 return -ENOMEM; 387 return -ENOMEM;
277 } 388 }
389 rc = 0;
390 end_block = 0;
278 /* try all sizes - needed for ECKD devices */ 391 /* try all sizes - needed for ECKD devices */
279 for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { 392 for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
280 mdsk_init_io(device, bsize, 0, 64); 393 mdsk_init_io(device, bsize, 0, &end_block);
281 memset(&bio, 0, sizeof (struct dasd_diag_bio)); 394 memset(&bio, 0, sizeof (struct dasd_diag_bio));
282 bio.type = MDSK_READ_REQ; 395 bio.type = MDSK_READ_REQ;
283 bio.block_number = private->pt_block + 1; 396 bio.block_number = private->pt_block + 1;
@@ -289,37 +402,45 @@ dasd_diag_check_device(struct dasd_device *device)
289 private->iob.block_count = 1; 402 private->iob.block_count = 1;
290 private->iob.interrupt_params = 0; 403 private->iob.interrupt_params = 0;
291 private->iob.bio_list = __pa(&bio); 404 private->iob.bio_list = __pa(&bio);
292 if (dia250(&private->iob, RW_BIO) == 0) 405 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
406 rc = dia250(&private->iob, RW_BIO);
407 if (rc == 0 || rc == 3)
293 break; 408 break;
294 mdsk_term_io(device); 409 mdsk_term_io(device);
295 } 410 }
296 if (bsize <= PAGE_SIZE && label[0] == 0xc3d4e2f1) { 411 if (rc == 3) {
297 /* get formatted blocksize from label block */ 412 DEV_MESSAGE(KERN_WARNING, device, "%s", "DIAG call failed");
298 bsize = (int) label[3]; 413 rc = -EOPNOTSUPP;
299 device->blocks = label[7]; 414 } else if (rc != 0) {
415 DEV_MESSAGE(KERN_WARNING, device, "device access failed "
416 "(rc=%d)", rc);
417 rc = -EIO;
418 } else {
419 if (memcmp(label->label_id, DASD_DIAG_CMS1,
420 sizeof(DASD_DIAG_CMS1)) == 0) {
421 /* get formatted blocksize from label block */
422 bsize = (unsigned int) label->block_size;
423 device->blocks = (unsigned long) label->block_count;
424 } else
425 device->blocks = end_block;
300 device->bp_block = bsize; 426 device->bp_block = bsize;
301 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 427 device->s2b_shift = 0; /* bits to shift 512 to get a block */
302 for (sb = 512; sb < bsize; sb = sb << 1) 428 for (sb = 512; sb < bsize; sb = sb << 1)
303 device->s2b_shift++; 429 device->s2b_shift++;
304 430
305 DEV_MESSAGE(KERN_INFO, device, 431 DEV_MESSAGE(KERN_INFO, device,
306 "capacity (%dkB blks): %ldkB", 432 "(%ld B/blk): %ldkB",
307 (device->bp_block >> 10), 433 (unsigned long) device->bp_block,
308 (device->blocks << device->s2b_shift) >> 1); 434 (unsigned long) (device->blocks <<
435 device->s2b_shift) >> 1);
309 rc = 0; 436 rc = 0;
310 } else {
311 if (bsize > PAGE_SIZE)
312 DEV_MESSAGE(KERN_WARNING, device, "%s",
313 "DIAG access failed");
314 else
315 DEV_MESSAGE(KERN_WARNING, device, "%s",
316 "volume is not CMS formatted");
317 rc = -EMEDIUMTYPE;
318 } 437 }
319 free_page((long) label); 438 free_page((long) label);
320 return rc; 439 return rc;
321} 440}
322 441
442/* Fill in virtual disk geometry for device. Return zero on success, non-zero
443 * otherwise. */
323static int 444static int
324dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 445dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
325{ 446{
@@ -349,6 +470,8 @@ dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
349 return dasd_default_erp_postaction; 470 return dasd_default_erp_postaction;
350} 471}
351 472
473/* Create DASD request from block device request. Return pointer to new
474 * request on success, ERR_PTR otherwise. */
352static struct dasd_ccw_req * 475static struct dasd_ccw_req *
353dasd_diag_build_cp(struct dasd_device * device, struct request *req) 476dasd_diag_build_cp(struct dasd_device * device, struct request *req)
354{ 477{
@@ -358,9 +481,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
358 struct bio *bio; 481 struct bio *bio;
359 struct bio_vec *bv; 482 struct bio_vec *bv;
360 char *dst; 483 char *dst;
361 int count, datasize; 484 unsigned int count, datasize;
362 sector_t recid, first_rec, last_rec; 485 sector_t recid, first_rec, last_rec;
363 unsigned blksize, off; 486 unsigned int blksize, off;
364 unsigned char rw_cmd; 487 unsigned char rw_cmd;
365 int i; 488 int i;
366 489
@@ -413,13 +536,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
413 } 536 }
414 } 537 }
415 } 538 }
539 cqr->retries = DIAG_MAX_RETRIES;
416 cqr->buildclk = get_clock(); 540 cqr->buildclk = get_clock();
417 cqr->device = device; 541 cqr->device = device;
418 cqr->expires = 50 * HZ; /* 50 seconds */ 542 cqr->expires = DIAG_TIMEOUT;
419 cqr->status = DASD_CQR_FILLED; 543 cqr->status = DASD_CQR_FILLED;
420 return cqr; 544 return cqr;
421} 545}
422 546
547/* Release DASD request. Return non-zero if request was successful, zero
548 * otherwise. */
423static int 549static int
424dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) 550dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
425{ 551{
@@ -430,6 +556,7 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
430 return status; 556 return status;
431} 557}
432 558
559/* Fill in IOCTL data for device. */
433static int 560static int
434dasd_diag_fill_info(struct dasd_device * device, 561dasd_diag_fill_info(struct dasd_device * device,
435 struct dasd_information2_t * info) 562 struct dasd_information2_t * info)
@@ -437,7 +564,7 @@ dasd_diag_fill_info(struct dasd_device * device,
437 struct dasd_diag_private *private; 564 struct dasd_diag_private *private;
438 565
439 private = (struct dasd_diag_private *) device->private; 566 private = (struct dasd_diag_private *) device->private;
440 info->label_block = private->pt_block; 567 info->label_block = (unsigned int) private->pt_block;
441 info->FBA_layout = 1; 568 info->FBA_layout = 1;
442 info->format = DASD_FORMAT_LDL; 569 info->format = DASD_FORMAT_LDL;
443 info->characteristics_size = sizeof (struct dasd_diag_characteristics); 570 info->characteristics_size = sizeof (struct dasd_diag_characteristics);
@@ -456,26 +583,15 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
456 "dump sense not available for DIAG data"); 583 "dump sense not available for DIAG data");
457} 584}
458 585
459/*
460 * max_blocks is dependent on the amount of storage that is available
461 * in the static io buffer for each device. Currently each device has
462 * 8192 bytes (=2 pages). dasd diag is only relevant for 31 bit.
463 * The struct dasd_ccw_req has 96 bytes, the struct dasd_diag_req has
464 * 8 bytes and the struct dasd_diag_bio for each block has 16 bytes.
465 * That makes:
466 * (8192 - 96 - 8) / 16 = 505.5 blocks at maximum.
467 * We want to fit two into the available memory so that we can immediately
468 * start the next request if one finishes off. That makes 252.75 blocks
469 * for one request. Give a little safety and the result is 240.
470 */
471struct dasd_discipline dasd_diag_discipline = { 586struct dasd_discipline dasd_diag_discipline = {
472 .owner = THIS_MODULE, 587 .owner = THIS_MODULE,
473 .name = "DIAG", 588 .name = "DIAG",
474 .ebcname = "DIAG", 589 .ebcname = "DIAG",
475 .max_blocks = 240, 590 .max_blocks = DIAG_MAX_BLOCKS,
476 .check_device = dasd_diag_check_device, 591 .check_device = dasd_diag_check_device,
477 .fill_geometry = dasd_diag_fill_geometry, 592 .fill_geometry = dasd_diag_fill_geometry,
478 .start_IO = dasd_start_diag, 593 .start_IO = dasd_start_diag,
594 .term_IO = dasd_diag_term_IO,
479 .examine_error = dasd_diag_examine_error, 595 .examine_error = dasd_diag_examine_error,
480 .erp_action = dasd_diag_erp_action, 596 .erp_action = dasd_diag_erp_action,
481 .erp_postaction = dasd_diag_erp_postaction, 597 .erp_postaction = dasd_diag_erp_postaction,
@@ -493,7 +609,7 @@ dasd_diag_init(void)
493 "Machine is not VM: %s " 609 "Machine is not VM: %s "
494 "discipline not initializing", 610 "discipline not initializing",
495 dasd_diag_discipline.name); 611 dasd_diag_discipline.name);
496 return -EINVAL; 612 return -ENODEV;
497 } 613 }
498 ASCEBC(dasd_diag_discipline.ebcname, 4); 614 ASCEBC(dasd_diag_discipline.ebcname, 4);
499 615
@@ -506,13 +622,6 @@ dasd_diag_init(void)
506static void __exit 622static void __exit
507dasd_diag_cleanup(void) 623dasd_diag_cleanup(void)
508{ 624{
509 if (!MACHINE_IS_VM) {
510 MESSAGE_LOG(KERN_INFO,
511 "Machine is not VM: %s "
512 "discipline not cleaned",
513 dasd_diag_discipline.name);
514 return;
515 }
516 unregister_external_interrupt(0x2603, dasd_ext_handler); 625 unregister_external_interrupt(0x2603, dasd_ext_handler);
517 ctl_clear_bit(0, 9); 626 ctl_clear_bit(0, 9);
518 dasd_diag_discipline_pointer = NULL; 627 dasd_diag_discipline_pointer = NULL;
@@ -520,22 +629,3 @@ dasd_diag_cleanup(void)
520 629
521module_init(dasd_diag_init); 630module_init(dasd_diag_init);
522module_exit(dasd_diag_cleanup); 631module_exit(dasd_diag_cleanup);
523
524/*
525 * Overrides for Emacs so that we follow Linus's tabbing style.
526 * Emacs will notice this stuff at the end of the file and automatically
527 * adjust the settings for this buffer only. This must remain at the end
528 * of the file.
529 * ---------------------------------------------------------------------------
530 * Local variables:
531 * c-indent-level: 4
532 * c-brace-imaginary-offset: 0
533 * c-brace-offset: -4
534 * c-argdecl-indent: 4
535 * c-label-offset: -4
536 * c-continued-statement-offset: 4
537 * c-continued-brace-offset: 0
538 * indent-tabs-mode: 1
539 * tab-width: 8
540 * End:
541 */
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index a0c38e303979..b26eb28df4bf 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.6 $ 9 * $Revision: 1.7 $
10 */ 10 */
11 11
12#define MDSK_WRITE_REQ 0x01 12#define MDSK_WRITE_REQ 0x01
@@ -19,6 +19,18 @@
19#define DEV_CLASS_FBA 0x01 19#define DEV_CLASS_FBA 0x01
20#define DEV_CLASS_ECKD 0x04 20#define DEV_CLASS_ECKD 0x04
21 21
22#define DASD_DIAG_LC_INT_CODE 132
23#define DASD_DIAG_LC_INT_STATUS 133
24#define DASD_DIAG_LC_INT_PARM_31BIT 128
25#define DASD_DIAG_LC_INT_PARM_64BIT 4536
26#define DASD_DIAG_CODE_31BIT 0x03
27#define DASD_DIAG_CODE_64BIT 0x07
28
29#define DASD_DIAG_RWFLAG_ASYNC 0x02
30#define DASD_DIAG_RWFLAG_NOCACHE 0x01
31
32#define DASD_DIAG_FLAGA_FORMAT_64BIT 0x80
33
22struct dasd_diag_characteristics { 34struct dasd_diag_characteristics {
23 u16 dev_nr; 35 u16 dev_nr;
24 u16 rdc_len; 36 u16 rdc_len;
@@ -32,35 +44,106 @@ struct dasd_diag_characteristics {
32 u8 rdev_features; 44 u8 rdev_features;
33} __attribute__ ((packed, aligned(4))); 45} __attribute__ ((packed, aligned(4)));
34 46
47struct dasd_diag_cms_label {
48 u8 label_id[4];
49 u8 vol_id[6];
50 u16 version_id;
51 u32 block_size;
52 u32 origin_ptr;
53 u32 usable_count;
54 u32 formatted_count;
55 u32 block_count;
56 u32 used_count;
57 u32 fst_size;
58 u32 fst_count;
59 u8 format_date[6];
60 u8 reserved1[2];
61 u32 disk_offset;
62 u32 map_block;
63 u32 hblk_disp;
64 u32 user_disp;
65 u8 reserved2[4];
66 u8 segment_name[8];
67} __attribute__ ((packed));
68
69#ifdef CONFIG_ARCH_S390X
70#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
71
72typedef u64 blocknum_t;
73typedef s64 sblocknum_t;
74
75struct dasd_diag_bio {
76 u8 type;
77 u8 status;
78 u8 spare1[2];
79 u32 alet;
80 blocknum_t block_number;
81 u64 buffer;
82} __attribute__ ((packed, aligned(8)));
83
84struct dasd_diag_init_io {
85 u16 dev_nr;
86 u8 flaga;
87 u8 spare1[21];
88 u32 block_size;
89 u8 spare2[4];
90 blocknum_t offset;
91 sblocknum_t start_block;
92 blocknum_t end_block;
93 u8 spare3[8];
94} __attribute__ ((packed, aligned(8)));
95
96struct dasd_diag_rw_io {
97 u16 dev_nr;
98 u8 flaga;
99 u8 spare1[21];
100 u8 key;
101 u8 flags;
102 u8 spare2[2];
103 u32 block_count;
104 u32 alet;
105 u8 spare3[4];
106 u64 interrupt_params;
107 u64 bio_list;
108 u8 spare4[8];
109} __attribute__ ((packed, aligned(8)));
110#else /* CONFIG_ARCH_S390X */
111#define DASD_DIAG_FLAGA_DEFAULT 0x0
112
113typedef u32 blocknum_t;
114typedef s32 sblocknum_t;
115
35struct dasd_diag_bio { 116struct dasd_diag_bio {
36 u8 type; 117 u8 type;
37 u8 status; 118 u8 status;
38 u16 spare1; 119 u16 spare1;
39 u32 block_number; 120 blocknum_t block_number;
40 u32 alet; 121 u32 alet;
41 u32 buffer; 122 u32 buffer;
42} __attribute__ ((packed, aligned(8))); 123} __attribute__ ((packed, aligned(8)));
43 124
44struct dasd_diag_init_io { 125struct dasd_diag_init_io {
45 u16 dev_nr; 126 u16 dev_nr;
46 u16 spare1[11]; 127 u8 flaga;
128 u8 spare1[21];
47 u32 block_size; 129 u32 block_size;
48 u32 offset; 130 blocknum_t offset;
49 u32 start_block; 131 sblocknum_t start_block;
50 u32 end_block; 132 blocknum_t end_block;
51 u32 spare2[6]; 133 u8 spare2[24];
52} __attribute__ ((packed, aligned(8))); 134} __attribute__ ((packed, aligned(8)));
53 135
54struct dasd_diag_rw_io { 136struct dasd_diag_rw_io {
55 u16 dev_nr; 137 u16 dev_nr;
56 u16 spare1[11]; 138 u8 flaga;
139 u8 spare1[21];
57 u8 key; 140 u8 key;
58 u8 flags; 141 u8 flags;
59 u16 spare2; 142 u8 spare2[2];
60 u32 block_count; 143 u32 block_count;
61 u32 alet; 144 u32 alet;
62 u32 bio_list; 145 u32 bio_list;
63 u32 interrupt_params; 146 u32 interrupt_params;
64 u32 spare3[5]; 147 u8 spare3[20];
65} __attribute__ ((packed, aligned(8))); 148} __attribute__ ((packed, aligned(8)));
66 149#endif /* CONFIG_ARCH_S390X */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 96c49349701f..a601c9a33541 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * gendisk related functions for the dasd driver. 10 * gendisk related functions for the dasd driver.
11 * 11 *
12 * $Revision: 1.50 $ 12 * $Revision: 1.51 $
13 */ 13 */
14 14
15#include <linux/config.h> 15#include <linux/config.h>
@@ -31,16 +31,12 @@ int
31dasd_gendisk_alloc(struct dasd_device *device) 31dasd_gendisk_alloc(struct dasd_device *device)
32{ 32{
33 struct gendisk *gdp; 33 struct gendisk *gdp;
34 int len, feature_ro; 34 int len;
35 35
36 /* Make sure the minor for this device exists. */ 36 /* Make sure the minor for this device exists. */
37 if (device->devindex >= DASD_PER_MAJOR) 37 if (device->devindex >= DASD_PER_MAJOR)
38 return -EBUSY; 38 return -EBUSY;
39 39
40 feature_ro = dasd_get_feature(device->cdev, DASD_FEATURE_READONLY);
41 if (feature_ro < 0)
42 return feature_ro;
43
44 gdp = alloc_disk(1 << DASD_PARTN_BITS); 40 gdp = alloc_disk(1 << DASD_PARTN_BITS);
45 if (!gdp) 41 if (!gdp)
46 return -ENOMEM; 42 return -ENOMEM;
@@ -75,7 +71,7 @@ dasd_gendisk_alloc(struct dasd_device *device)
75 71
76 sprintf(gdp->devfs_name, "dasd/%s", device->cdev->dev.bus_id); 72 sprintf(gdp->devfs_name, "dasd/%s", device->cdev->dev.bus_id);
77 73
78 if (feature_ro) 74 if (device->features & DASD_FEATURE_READONLY)
79 set_disk_ro(gdp, 1); 75 set_disk_ro(gdp, 1);
80 gdp->private_data = device; 76 gdp->private_data = device;
81 gdp->queue = device->request_queue; 77 gdp->queue = device->request_queue;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a9f38b235981..9fab04f3056d 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.64 $ 9 * $Revision: 1.65 $
10 */ 10 */
11 11
12#ifndef DASD_INT_H 12#ifndef DASD_INT_H
@@ -286,6 +286,7 @@ struct dasd_device {
286 unsigned int bp_block; /* bytes per block */ 286 unsigned int bp_block; /* bytes per block */
287 unsigned int s2b_shift; /* log2 (bp_block/512) */ 287 unsigned int s2b_shift; /* log2 (bp_block/512) */
288 unsigned long flags; /* per device flags */ 288 unsigned long flags; /* per device flags */
289 unsigned short features; /* copy of devmap-features (read-only!) */
289 290
290 /* Device discipline stuff. */ 291 /* Device discipline stuff. */
291 struct dasd_discipline *discipline; 292 struct dasd_discipline *discipline;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 980c555aa538..789595b3fa09 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.45 $ 10 * $Revision: 1.47 $
11 * 11 *
12 * i/o controls for the dasd driver. 12 * i/o controls for the dasd driver.
13 */ 13 */
@@ -296,7 +296,6 @@ dasd_ioctl_format(struct block_device *bdev, int no, long args)
296{ 296{
297 struct dasd_device *device; 297 struct dasd_device *device;
298 struct format_data_t fdata; 298 struct format_data_t fdata;
299 int feature_ro;
300 299
301 if (!capable(CAP_SYS_ADMIN)) 300 if (!capable(CAP_SYS_ADMIN))
302 return -EACCES; 301 return -EACCES;
@@ -308,10 +307,7 @@ dasd_ioctl_format(struct block_device *bdev, int no, long args)
308 if (device == NULL) 307 if (device == NULL)
309 return -ENODEV; 308 return -ENODEV;
310 309
311 feature_ro = dasd_get_feature(device->cdev, DASD_FEATURE_READONLY); 310 if (device->features & DASD_FEATURE_READONLY)
312 if (feature_ro < 0)
313 return feature_ro;
314 if (feature_ro)
315 return -EROFS; 311 return -EROFS;
316 if (copy_from_user(&fdata, (void __user *) args, 312 if (copy_from_user(&fdata, (void __user *) args,
317 sizeof (struct format_data_t))) 313 sizeof (struct format_data_t)))
@@ -384,7 +380,7 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args)
384 struct dasd_device *device; 380 struct dasd_device *device;
385 struct dasd_information2_t *dasd_info; 381 struct dasd_information2_t *dasd_info;
386 unsigned long flags; 382 unsigned long flags;
387 int rc, feature_ro; 383 int rc;
388 struct ccw_device *cdev; 384 struct ccw_device *cdev;
389 385
390 device = bdev->bd_disk->private_data; 386 device = bdev->bd_disk->private_data;
@@ -394,10 +390,6 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args)
394 if (!device->discipline->fill_info) 390 if (!device->discipline->fill_info)
395 return -EINVAL; 391 return -EINVAL;
396 392
397 feature_ro = dasd_get_feature(device->cdev, DASD_FEATURE_READONLY);
398 if (feature_ro < 0)
399 return feature_ro;
400
401 dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 393 dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
402 if (dasd_info == NULL) 394 if (dasd_info == NULL)
403 return -ENOMEM; 395 return -ENOMEM;
@@ -427,7 +419,8 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args)
427 (dasd_check_blocksize(device->bp_block))) 419 (dasd_check_blocksize(device->bp_block)))
428 dasd_info->format = DASD_FORMAT_NONE; 420 dasd_info->format = DASD_FORMAT_NONE;
429 421
430 dasd_info->features |= feature_ro; 422 dasd_info->features |=
423 ((device->features & DASD_FEATURE_READONLY) != 0);
431 424
432 if (device->discipline) 425 if (device->discipline)
433 memcpy(dasd_info->type, device->discipline->name, 4); 426 memcpy(dasd_info->type, device->discipline->name, 4);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 43c34f8c5e68..fff9020d4886 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * /proc interface for the dasd driver. 10 * /proc interface for the dasd driver.
11 * 11 *
12 * $Revision: 1.32 $ 12 * $Revision: 1.33 $
13 */ 13 */
14 14
15#include <linux/config.h> 15#include <linux/config.h>
@@ -55,7 +55,6 @@ dasd_devices_show(struct seq_file *m, void *v)
55{ 55{
56 struct dasd_device *device; 56 struct dasd_device *device;
57 char *substr; 57 char *substr;
58 int feature;
59 58
60 device = dasd_device_from_devindex((unsigned long) v - 1); 59 device = dasd_device_from_devindex((unsigned long) v - 1);
61 if (IS_ERR(device)) 60 if (IS_ERR(device))
@@ -79,10 +78,7 @@ dasd_devices_show(struct seq_file *m, void *v)
79 else 78 else
80 seq_printf(m, " is ????????"); 79 seq_printf(m, " is ????????");
81 /* Print devices features. */ 80 /* Print devices features. */
82 feature = dasd_get_feature(device->cdev, DASD_FEATURE_READONLY); 81 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
83 if (feature < 0)
84 return 0;
85 substr = feature ? "(ro)" : " ";
86 seq_printf(m, "%4s: ", substr); 82 seq_printf(m, "%4s: ", substr);
87 /* Print device status information. */ 83 /* Print device status information. */
88 switch ((device != NULL) ? device->state : -1) { 84 switch ((device != NULL) ? device->state : -1) {
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index d5eefeaba50c..328d9cbc56a3 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -632,12 +632,9 @@ __raw3270_size_device(struct raw3270 *rp)
632 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data); 632 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
633 633
634 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request); 634 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
635 if (rc) { 635 if (rc)
636 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ 636 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
637 if (rc == -EOPNOTSUPP && MACHINE_IS_VM)
638 return __raw3270_size_device_vm(rp);
639 return rc; 637 return rc;
640 }
641 638
642 /* Wait for attention interrupt. */ 639 /* Wait for attention interrupt. */
643#ifdef CONFIG_TN3270_CONSOLE 640#ifdef CONFIG_TN3270_CONSOLE
@@ -695,7 +692,10 @@ raw3270_size_device(struct raw3270 *rp)
695 down(&raw3270_init_sem); 692 down(&raw3270_init_sem);
696 rp->view = &raw3270_init_view; 693 rp->view = &raw3270_init_view;
697 raw3270_init_view.dev = rp; 694 raw3270_init_view.dev = rp;
698 rc = __raw3270_size_device(rp); 695 if (MACHINE_IS_VM)
696 rc = __raw3270_size_device_vm(rp);
697 else
698 rc = __raw3270_size_device(rp);
699 raw3270_init_view.dev = 0; 699 raw3270_init_view.dev = 0;
700 rp->view = 0; 700 rp->view = 0;
701 up(&raw3270_init_sem); 701 up(&raw3270_init_sem);
@@ -710,6 +710,12 @@ raw3270_size_device(struct raw3270 *rp)
710 rp->model = 4; 710 rp->model = 4;
711 if (rp->rows == 27 && rp->cols == 132) 711 if (rp->rows == 27 && rp->cols == 132)
712 rp->model = 5; 712 rp->model = 5;
713 } else {
714 /* Couldn't detect size. Use default model 2. */
715 rp->model = 2;
716 rp->rows = 24;
717 rp->cols = 80;
718 return 0;
713 } 719 }
714 return rc; 720 return rc;
715} 721}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ea813bdce1d6..185bc73c3ecd 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * $Revision: 1.134 $ 4 * $Revision: 1.135 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -815,8 +815,9 @@ __clear_subchannel_easy(unsigned int schid)
815 struct tpi_info ti; 815 struct tpi_info ti;
816 816
817 if (tpi(&ti)) { 817 if (tpi(&ti)) {
818 tsch(schid, (struct irb *)__LC_IRB); 818 tsch(ti.irq, (struct irb *)__LC_IRB);
819 return 0; 819 if (ti.irq == schid)
820 return 0;
820 } 821 }
821 udelay(100); 822 udelay(100);
822 } 823 }
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ee7a05e0c3ba..fbe4202a3f6f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/qdio.h> 16#include <asm/cio.h>
17 17
18#include "cio.h" 18#include "cio.h"
19#include "cio_debug.h" 19#include "cio_debug.h"
@@ -21,7 +21,6 @@
21#include "device.h" 21#include "device.h"
22#include "chsc.h" 22#include "chsc.h"
23#include "ioasm.h" 23#include "ioasm.h"
24#include "qdio.h"
25 24
26int 25int
27device_is_online(struct subchannel *sch) 26device_is_online(struct subchannel *sch)
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 02d01a0de16c..ad3fe5aeb663 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device_ops.c 2 * drivers/s390/cio/device_ops.c
3 * 3 *
4 * $Revision: 1.56 $ 4 * $Revision: 1.57 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -19,14 +19,12 @@
19 19
20#include <asm/ccwdev.h> 20#include <asm/ccwdev.h>
21#include <asm/idals.h> 21#include <asm/idals.h>
22#include <asm/qdio.h>
23 22
24#include "cio.h" 23#include "cio.h"
25#include "cio_debug.h" 24#include "cio_debug.h"
26#include "css.h" 25#include "css.h"
27#include "chsc.h" 26#include "chsc.h"
28#include "device.h" 27#include "device.h"
29#include "qdio.h"
30 28
31int 29int
32ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) 30ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index c874607d9a80..45480a2bc4c0 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -21,7 +21,7 @@ struct tpi_info {
21 * Some S390 specific IO instructions as inline 21 * Some S390 specific IO instructions as inline
22 */ 22 */
23 23
24extern __inline__ int stsch(int irq, volatile struct schib *addr) 24static inline int stsch(int irq, volatile struct schib *addr)
25{ 25{
26 int ccode; 26 int ccode;
27 27
@@ -36,7 +36,7 @@ extern __inline__ int stsch(int irq, volatile struct schib *addr)
36 return ccode; 36 return ccode;
37} 37}
38 38
39extern __inline__ int msch(int irq, volatile struct schib *addr) 39static inline int msch(int irq, volatile struct schib *addr)
40{ 40{
41 int ccode; 41 int ccode;
42 42
@@ -51,7 +51,7 @@ extern __inline__ int msch(int irq, volatile struct schib *addr)
51 return ccode; 51 return ccode;
52} 52}
53 53
54extern __inline__ int msch_err(int irq, volatile struct schib *addr) 54static inline int msch_err(int irq, volatile struct schib *addr)
55{ 55{
56 int ccode; 56 int ccode;
57 57
@@ -79,7 +79,7 @@ extern __inline__ int msch_err(int irq, volatile struct schib *addr)
79 return ccode; 79 return ccode;
80} 80}
81 81
82extern __inline__ int tsch(int irq, volatile struct irb *addr) 82static inline int tsch(int irq, volatile struct irb *addr)
83{ 83{
84 int ccode; 84 int ccode;
85 85
@@ -94,7 +94,7 @@ extern __inline__ int tsch(int irq, volatile struct irb *addr)
94 return ccode; 94 return ccode;
95} 95}
96 96
97extern __inline__ int tpi( volatile struct tpi_info *addr) 97static inline int tpi( volatile struct tpi_info *addr)
98{ 98{
99 int ccode; 99 int ccode;
100 100
@@ -108,7 +108,7 @@ extern __inline__ int tpi( volatile struct tpi_info *addr)
108 return ccode; 108 return ccode;
109} 109}
110 110
111extern __inline__ int ssch(int irq, volatile struct orb *addr) 111static inline int ssch(int irq, volatile struct orb *addr)
112{ 112{
113 int ccode; 113 int ccode;
114 114
@@ -123,7 +123,7 @@ extern __inline__ int ssch(int irq, volatile struct orb *addr)
123 return ccode; 123 return ccode;
124} 124}
125 125
126extern __inline__ int rsch(int irq) 126static inline int rsch(int irq)
127{ 127{
128 int ccode; 128 int ccode;
129 129
@@ -138,7 +138,7 @@ extern __inline__ int rsch(int irq)
138 return ccode; 138 return ccode;
139} 139}
140 140
141extern __inline__ int csch(int irq) 141static inline int csch(int irq)
142{ 142{
143 int ccode; 143 int ccode;
144 144
@@ -153,7 +153,7 @@ extern __inline__ int csch(int irq)
153 return ccode; 153 return ccode;
154} 154}
155 155
156extern __inline__ int hsch(int irq) 156static inline int hsch(int irq)
157{ 157{
158 int ccode; 158 int ccode;
159 159
@@ -168,7 +168,7 @@ extern __inline__ int hsch(int irq)
168 return ccode; 168 return ccode;
169} 169}
170 170
171extern __inline__ int xsch(int irq) 171static inline int xsch(int irq)
172{ 172{
173 int ccode; 173 int ccode;
174 174
@@ -183,7 +183,7 @@ extern __inline__ int xsch(int irq)
183 return ccode; 183 return ccode;
184} 184}
185 185
186extern __inline__ int chsc(void *chsc_area) 186static inline int chsc(void *chsc_area)
187{ 187{
188 int cc; 188 int cc;
189 189
@@ -198,7 +198,7 @@ extern __inline__ int chsc(void *chsc_area)
198 return cc; 198 return cc;
199} 199}
200 200
201extern __inline__ int iac( void) 201static inline int iac( void)
202{ 202{
203 int ccode; 203 int ccode;
204 204
@@ -210,7 +210,7 @@ extern __inline__ int iac( void)
210 return ccode; 210 return ccode;
211} 211}
212 212
213extern __inline__ int rchp(int chpid) 213static inline int rchp(int chpid)
214{ 214{
215 int ccode; 215 int ccode;
216 216
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
index bcabac7a7c46..e319e78b5ea2 100644
--- a/drivers/s390/crypto/z90common.h
+++ b/drivers/s390/crypto/z90common.h
@@ -27,7 +27,7 @@
27#ifndef _Z90COMMON_H_ 27#ifndef _Z90COMMON_H_
28#define _Z90COMMON_H_ 28#define _Z90COMMON_H_
29 29
30#define VERSION_Z90COMMON_H "$Revision: 1.16 $" 30#define VERSION_Z90COMMON_H "$Revision: 1.17 $"
31 31
32 32
33#define RESPBUFFSIZE 256 33#define RESPBUFFSIZE 256
@@ -164,5 +164,4 @@ struct CPRBX {
164#define UMIN(a,b) ((a) < (b) ? (a) : (b)) 164#define UMIN(a,b) ((a) < (b) ? (a) : (b))
165#define IS_EVEN(x) ((x) == (2 * ((x) / 2))) 165#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
166 166
167
168#endif 167#endif
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
index beb6a5e0da22..c215e0889736 100644
--- a/drivers/s390/crypto/z90hardware.c
+++ b/drivers/s390/crypto/z90hardware.c
@@ -32,7 +32,7 @@
32#include "z90crypt.h" 32#include "z90crypt.h"
33#include "z90common.h" 33#include "z90common.h"
34 34
35#define VERSION_Z90HARDWARE_C "$Revision: 1.33 $" 35#define VERSION_Z90HARDWARE_C "$Revision: 1.34 $"
36 36
37char z90hardware_version[] __initdata = 37char z90hardware_version[] __initdata =
38 "z90hardware.o (" VERSION_Z90HARDWARE_C "/" 38 "z90hardware.o (" VERSION_Z90HARDWARE_C "/"
@@ -283,48 +283,6 @@ struct type6_msg {
283 struct CPRB CPRB; 283 struct CPRB CPRB;
284}; 284};
285 285
286union request_msg {
287 union type4_msg t4msg;
288 struct type6_msg t6msg;
289};
290
291struct request_msg_ext {
292 int q_nr;
293 unsigned char *psmid;
294 union request_msg reqMsg;
295};
296
297struct type82_hdr {
298 unsigned char reserved1;
299 unsigned char type;
300 unsigned char reserved2[2];
301 unsigned char reply_code;
302 unsigned char reserved3[3];
303};
304
305#define TYPE82_RSP_CODE 0x82
306
307#define REPLY_ERROR_MACHINE_FAILURE 0x10
308#define REPLY_ERROR_PREEMPT_FAILURE 0x12
309#define REPLY_ERROR_CHECKPT_FAILURE 0x14
310#define REPLY_ERROR_MESSAGE_TYPE 0x20
311#define REPLY_ERROR_INVALID_COMM_CD 0x21
312#define REPLY_ERROR_INVALID_MSG_LEN 0x23
313#define REPLY_ERROR_RESERVD_FIELD 0x24
314#define REPLY_ERROR_FORMAT_FIELD 0x29
315#define REPLY_ERROR_INVALID_COMMAND 0x30
316#define REPLY_ERROR_MALFORMED_MSG 0x40
317#define REPLY_ERROR_RESERVED_FIELDO 0x50
318#define REPLY_ERROR_WORD_ALIGNMENT 0x60
319#define REPLY_ERROR_MESSAGE_LENGTH 0x80
320#define REPLY_ERROR_OPERAND_INVALID 0x82
321#define REPLY_ERROR_OPERAND_SIZE 0x84
322#define REPLY_ERROR_EVEN_MOD_IN_OPND 0x85
323#define REPLY_ERROR_RESERVED_FIELD 0x88
324#define REPLY_ERROR_TRANSPORT_FAIL 0x90
325#define REPLY_ERROR_PACKET_TRUNCATED 0xA0
326#define REPLY_ERROR_ZERO_BUFFER_LEN 0xB0
327
328struct type86_hdr { 286struct type86_hdr {
329 unsigned char reserved1; 287 unsigned char reserved1;
330 unsigned char type; 288 unsigned char type;
@@ -338,7 +296,7 @@ struct type86_hdr {
338#define TYPE86_FMT2 0x02 296#define TYPE86_FMT2 0x02
339 297
340struct type86_fmt2_msg { 298struct type86_fmt2_msg {
341 struct type86_hdr hdr; 299 struct type86_hdr header;
342 unsigned char reserved[4]; 300 unsigned char reserved[4];
343 unsigned char apfs[4]; 301 unsigned char apfs[4];
344 unsigned int count1; 302 unsigned int count1;
@@ -538,6 +496,8 @@ static struct function_and_rules_block static_pke_function_and_rulesX = {
538 {'M','R','P',' ',' ',' ',' ',' '} 496 {'M','R','P',' ',' ',' ',' ',' '}
539}; 497};
540 498
499static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
500
541struct T6_keyBlock_hdrX { 501struct T6_keyBlock_hdrX {
542 unsigned short blen; 502 unsigned short blen;
543 unsigned short ulen; 503 unsigned short ulen;
@@ -688,9 +648,38 @@ static struct cca_public_sec static_cca_pub_sec = {
688#define RESPONSE_CPRB_SIZE 0x000006B8 648#define RESPONSE_CPRB_SIZE 0x000006B8
689#define RESPONSE_CPRBX_SIZE 0x00000724 649#define RESPONSE_CPRBX_SIZE 0x00000724
690 650
691#define CALLER_HEADER 12 651struct error_hdr {
652 unsigned char reserved1;
653 unsigned char type;
654 unsigned char reserved2[2];
655 unsigned char reply_code;
656 unsigned char reserved3[3];
657};
692 658
693static unsigned char static_PKE_function_code[2] = {0x50, 0x4B}; 659#define TYPE82_RSP_CODE 0x82
660
661#define REP82_ERROR_MACHINE_FAILURE 0x10
662#define REP82_ERROR_PREEMPT_FAILURE 0x12
663#define REP82_ERROR_CHECKPT_FAILURE 0x14
664#define REP82_ERROR_MESSAGE_TYPE 0x20
665#define REP82_ERROR_INVALID_COMM_CD 0x21
666#define REP82_ERROR_INVALID_MSG_LEN 0x23
667#define REP82_ERROR_RESERVD_FIELD 0x24
668#define REP82_ERROR_FORMAT_FIELD 0x29
669#define REP82_ERROR_INVALID_COMMAND 0x30
670#define REP82_ERROR_MALFORMED_MSG 0x40
671#define REP82_ERROR_RESERVED_FIELDO 0x50
672#define REP82_ERROR_WORD_ALIGNMENT 0x60
673#define REP82_ERROR_MESSAGE_LENGTH 0x80
674#define REP82_ERROR_OPERAND_INVALID 0x82
675#define REP82_ERROR_OPERAND_SIZE 0x84
676#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
677#define REP82_ERROR_RESERVED_FIELD 0x88
678#define REP82_ERROR_TRANSPORT_FAIL 0x90
679#define REP82_ERROR_PACKET_TRUNCATED 0xA0
680#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
681
682#define CALLER_HEADER 12
694 683
695static inline int 684static inline int
696testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat) 685testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
@@ -1212,9 +1201,9 @@ send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
1212 struct ap_status_word stat_word; 1201 struct ap_status_word stat_word;
1213 enum devstat stat; 1202 enum devstat stat;
1214 int ccode; 1203 int ccode;
1204 u32 *q_nr_p = (u32 *)msg_ext;
1215 1205
1216 ((struct request_msg_ext *) msg_ext)->q_nr = 1206 *q_nr_p = (dev_nr << SKIP_BITL) + cdx;
1217 (dev_nr << SKIP_BITL) + cdx;
1218 PDEBUG("msg_len passed to sen: %d\n", msg_len); 1207 PDEBUG("msg_len passed to sen: %d\n", msg_len);
1219 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n", 1208 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
1220 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]); 1209 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
@@ -2104,7 +2093,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2104 int *respbufflen_p, unsigned char *resp_buff) 2093 int *respbufflen_p, unsigned char *resp_buff)
2105{ 2094{
2106 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; 2095 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2107 struct type82_hdr *t82h_p = (struct type82_hdr *) response; 2096 struct error_hdr *errh_p = (struct error_hdr *) response;
2108 struct type84_hdr *t84h_p = (struct type84_hdr *) response; 2097 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2109 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; 2098 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2110 int reply_code, service_rc, service_rs, src_l; 2099 int reply_code, service_rc, service_rs, src_l;
@@ -2117,12 +2106,13 @@ convert_response(unsigned char *response, unsigned char *buffer,
2117 service_rc = 0; 2106 service_rc = 0;
2118 service_rs = 0; 2107 service_rs = 0;
2119 src_l = 0; 2108 src_l = 0;
2120 switch (t82h_p->type) { 2109 switch (errh_p->type) {
2121 case TYPE82_RSP_CODE: 2110 case TYPE82_RSP_CODE:
2122 reply_code = t82h_p->reply_code; 2111 reply_code = errh_p->reply_code;
2123 src_p = (unsigned char *)t82h_p; 2112 src_p = (unsigned char *)errh_p;
2124 PRINTK("Hardware error: Type 82 Message Header: " 2113 PRINTK("Hardware error: Type %02X Message Header: "
2125 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 2114 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2115 errh_p->type,
2126 src_p[0], src_p[1], src_p[2], src_p[3], 2116 src_p[0], src_p[1], src_p[2], src_p[3],
2127 src_p[4], src_p[5], src_p[6], src_p[7]); 2117 src_p[4], src_p[5], src_p[6], src_p[7]);
2128 break; 2118 break;
@@ -2131,7 +2121,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2131 src_p = response + (int)t84h_p->len - src_l; 2121 src_p = response + (int)t84h_p->len - src_l;
2132 break; 2122 break;
2133 case TYPE86_RSP_CODE: 2123 case TYPE86_RSP_CODE:
2134 reply_code = t86m_p->hdr.reply_code; 2124 reply_code = t86m_p->header.reply_code;
2135 if (reply_code != 0) 2125 if (reply_code != 0)
2136 break; 2126 break;
2137 cprb_p = (struct CPRB *) 2127 cprb_p = (struct CPRB *)
@@ -2143,6 +2133,9 @@ convert_response(unsigned char *response, unsigned char *buffer,
2143 le2toI(cprb_p->ccp_rscode, &service_rs); 2133 le2toI(cprb_p->ccp_rscode, &service_rs);
2144 if ((service_rc == 8) && (service_rs == 66)) 2134 if ((service_rc == 8) && (service_rs == 66))
2145 PDEBUG("Bad block format on PCICC\n"); 2135 PDEBUG("Bad block format on PCICC\n");
2136 else if ((service_rc == 8) && (service_rs == 65))
2137 PDEBUG("Probably an even modulus on "
2138 "PCICC\n");
2146 else if ((service_rc == 8) && (service_rs == 770)) { 2139 else if ((service_rc == 8) && (service_rs == 770)) {
2147 PDEBUG("Invalid key length on PCICC\n"); 2140 PDEBUG("Invalid key length on PCICC\n");
2148 unset_ext_bitlens(); 2141 unset_ext_bitlens();
@@ -2155,7 +2148,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2155 return REC_USE_PCICA; 2148 return REC_USE_PCICA;
2156 } 2149 }
2157 else 2150 else
2158 PRINTK("service rc/rs: %d/%d\n", 2151 PRINTK("service rc/rs (PCICC): %d/%d\n",
2159 service_rc, service_rs); 2152 service_rc, service_rs);
2160 return REC_OPERAND_INV; 2153 return REC_OPERAND_INV;
2161 } 2154 }
@@ -2169,7 +2162,10 @@ convert_response(unsigned char *response, unsigned char *buffer,
2169 if (service_rc != 0) { 2162 if (service_rc != 0) {
2170 service_rs = (int) cprbx_p->ccp_rscode; 2163 service_rs = (int) cprbx_p->ccp_rscode;
2171 if ((service_rc == 8) && (service_rs == 66)) 2164 if ((service_rc == 8) && (service_rs == 66))
2172 PDEBUG("Bad block format on PCXICC\n"); 2165 PDEBUG("Bad block format on PCIXCC\n");
2166 else if ((service_rc == 8) && (service_rs == 65))
2167 PDEBUG("Probably an even modulus on "
2168 "PCIXCC\n");
2173 else if ((service_rc == 8) && (service_rs == 770)) { 2169 else if ((service_rc == 8) && (service_rs == 770)) {
2174 PDEBUG("Invalid key length on PCIXCC\n"); 2170 PDEBUG("Invalid key length on PCIXCC\n");
2175 unset_ext_bitlens(); 2171 unset_ext_bitlens();
@@ -2182,7 +2178,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2182 return REC_USE_PCICA; 2178 return REC_USE_PCICA;
2183 } 2179 }
2184 else 2180 else
2185 PRINTK("service rc/rs: %d/%d\n", 2181 PRINTK("service rc/rs (PCIXCC): %d/%d\n",
2186 service_rc, service_rs); 2182 service_rc, service_rs);
2187 return REC_OPERAND_INV; 2183 return REC_OPERAND_INV;
2188 } 2184 }
@@ -2195,20 +2191,25 @@ convert_response(unsigned char *response, unsigned char *buffer,
2195 } 2191 }
2196 break; 2192 break;
2197 default: 2193 default:
2194 src_p = (unsigned char *)errh_p;
2195 PRINTK("Unrecognized Message Header: "
2196 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2197 src_p[0], src_p[1], src_p[2], src_p[3],
2198 src_p[4], src_p[5], src_p[6], src_p[7]);
2198 return REC_BAD_MESSAGE; 2199 return REC_BAD_MESSAGE;
2199 } 2200 }
2200 2201
2201 if (reply_code) 2202 if (reply_code)
2202 switch (reply_code) { 2203 switch (reply_code) {
2203 case REPLY_ERROR_OPERAND_INVALID: 2204 case REP82_ERROR_OPERAND_INVALID:
2204 return REC_OPERAND_INV; 2205 return REC_OPERAND_INV;
2205 case REPLY_ERROR_OPERAND_SIZE: 2206 case REP82_ERROR_OPERAND_SIZE:
2206 return REC_OPERAND_SIZE; 2207 return REC_OPERAND_SIZE;
2207 case REPLY_ERROR_EVEN_MOD_IN_OPND: 2208 case REP82_ERROR_EVEN_MOD_IN_OPND:
2208 return REC_EVEN_MOD; 2209 return REC_EVEN_MOD;
2209 case REPLY_ERROR_MESSAGE_TYPE: 2210 case REP82_ERROR_MESSAGE_TYPE:
2210 return WRONG_DEVICE_TYPE; 2211 return WRONG_DEVICE_TYPE;
2211 case REPLY_ERROR_TRANSPORT_FAIL: 2212 case REP82_ERROR_TRANSPORT_FAIL:
2212 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n", 2213 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
2213 t86m_p->apfs[0], t86m_p->apfs[1], 2214 t86m_p->apfs[0], t86m_p->apfs[1],
2214 t86m_p->apfs[2], t86m_p->apfs[3]); 2215 t86m_p->apfs[2], t86m_p->apfs[3]);
@@ -2229,7 +2230,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2229 PDEBUG("Length returned = %d\n", src_l); 2230 PDEBUG("Length returned = %d\n", src_l);
2230 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l; 2231 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
2231 memcpy(tgt_p, src_p, src_l); 2232 memcpy(tgt_p, src_p, src_l);
2232 if ((t82h_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) { 2233 if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
2233 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l); 2234 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
2234 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l)) 2235 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
2235 return REC_INVALID_PAD; 2236 return REC_INVALID_PAD;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 9ec29bb41b28..6aeef3bacc33 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets 32#include <linux/interrupt.h> // for tasklets
33#include <linux/ioctl32.h> 33#include <linux/ioctl32.h>
34#include <linux/miscdevice.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
36#include <linux/kobject_uevent.h> 37#include <linux/kobject_uevent.h>
@@ -39,19 +40,8 @@
39#include <linux/version.h> 40#include <linux/version.h>
40#include "z90crypt.h" 41#include "z90crypt.h"
41#include "z90common.h" 42#include "z90common.h"
42#ifndef Z90CRYPT_USE_HOTPLUG
43#include <linux/miscdevice.h>
44#endif
45
46#define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
47#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
48# error "This kernel is too old: not supported"
49#endif
50#if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
51# error "This kernel is too recent: not supported by this file"
52#endif
53 43
54#define VERSION_Z90MAIN_C "$Revision: 1.57 $" 44#define VERSION_Z90MAIN_C "$Revision: 1.62 $"
55 45
56static char z90main_version[] __initdata = 46static char z90main_version[] __initdata =
57 "z90main.o (" VERSION_Z90MAIN_C "/" 47 "z90main.o (" VERSION_Z90MAIN_C "/"
@@ -63,21 +53,12 @@ extern char z90hardware_version[];
63 * Defaults that may be modified. 53 * Defaults that may be modified.
64 */ 54 */
65 55
66#ifndef Z90CRYPT_USE_HOTPLUG
67/** 56/**
68 * You can specify a different minor at compile time. 57 * You can specify a different minor at compile time.
69 */ 58 */
70#ifndef Z90CRYPT_MINOR 59#ifndef Z90CRYPT_MINOR
71#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR 60#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
72#endif 61#endif
73#else
74/**
75 * You can specify a different major at compile time.
76 */
77#ifndef Z90CRYPT_MAJOR
78#define Z90CRYPT_MAJOR 0
79#endif
80#endif
81 62
82/** 63/**
83 * You can specify a different domain at compile time or on the insmod 64 * You can specify a different domain at compile time or on the insmod
@@ -97,7 +78,7 @@ extern char z90hardware_version[];
97 * older than CLEANUPTIME seconds in the past. 78 * older than CLEANUPTIME seconds in the past.
98 */ 79 */
99#ifndef CLEANUPTIME 80#ifndef CLEANUPTIME
100#define CLEANUPTIME 20 81#define CLEANUPTIME 15
101#endif 82#endif
102 83
103/** 84/**
@@ -298,6 +279,10 @@ struct z90crypt {
298 * it contains the request; at READ, the response. The function 279 * it contains the request; at READ, the response. The function
299 * send_to_crypto_device converts the request to device-dependent 280 * send_to_crypto_device converts the request to device-dependent
300 * form and use the caller's OPEN-allocated buffer for the response. 281 * form and use the caller's OPEN-allocated buffer for the response.
282 *
283 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
284 * because that points to it, see the discussion in z90hardware.c.
285 * Search for "extended request message block".
301 */ 286 */
302struct caller { 287struct caller {
303 int caller_buf_l; // length of original request 288 int caller_buf_l; // length of original request
@@ -398,24 +383,9 @@ static int z90crypt_status_write(struct file *, const char __user *,
398 unsigned long, void *); 383 unsigned long, void *);
399 384
400/** 385/**
401 * Hotplug support
402 */
403
404#ifdef Z90CRYPT_USE_HOTPLUG
405#define Z90CRYPT_HOTPLUG_ADD 1
406#define Z90CRYPT_HOTPLUG_REMOVE 2
407
408static void z90crypt_hotplug_event(int, int, int);
409#endif
410
411/**
412 * Storage allocated at initialization and used throughout the life of 386 * Storage allocated at initialization and used throughout the life of
413 * this insmod 387 * this insmod
414 */ 388 */
415#ifdef Z90CRYPT_USE_HOTPLUG
416static int z90crypt_major = Z90CRYPT_MAJOR;
417#endif
418
419static int domain = DOMAIN_INDEX; 389static int domain = DOMAIN_INDEX;
420static struct z90crypt z90crypt; 390static struct z90crypt z90crypt;
421static int quiesce_z90crypt; 391static int quiesce_z90crypt;
@@ -444,14 +414,12 @@ static struct file_operations z90crypt_fops = {
444 .release = z90crypt_release 414 .release = z90crypt_release
445}; 415};
446 416
447#ifndef Z90CRYPT_USE_HOTPLUG
448static struct miscdevice z90crypt_misc_device = { 417static struct miscdevice z90crypt_misc_device = {
449 .minor = Z90CRYPT_MINOR, 418 .minor = Z90CRYPT_MINOR,
450 .name = DEV_NAME, 419 .name = DEV_NAME,
451 .fops = &z90crypt_fops, 420 .fops = &z90crypt_fops,
452 .devfs_name = DEV_NAME 421 .devfs_name = DEV_NAME
453}; 422};
454#endif
455 423
456/** 424/**
457 * Documentation values. 425 * Documentation values.
@@ -603,7 +571,6 @@ z90crypt_init_module(void)
603 return -EINVAL; 571 return -EINVAL;
604 } 572 }
605 573
606#ifndef Z90CRYPT_USE_HOTPLUG
607 /* Register as misc device with given minor (or get a dynamic one). */ 574 /* Register as misc device with given minor (or get a dynamic one). */
608 result = misc_register(&z90crypt_misc_device); 575 result = misc_register(&z90crypt_misc_device);
609 if (result < 0) { 576 if (result < 0) {
@@ -611,18 +578,6 @@ z90crypt_init_module(void)
611 z90crypt_misc_device.minor, result); 578 z90crypt_misc_device.minor, result);
612 return result; 579 return result;
613 } 580 }
614#else
615 /* Register the major (or get a dynamic one). */
616 result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
617 if (result < 0) {
618 PRINTKW("register_chrdev (major %d) failed with %d.\n",
619 z90crypt_major, result);
620 return result;
621 }
622
623 if (z90crypt_major == 0)
624 z90crypt_major = result;
625#endif
626 581
627 PDEBUG("Registered " DEV_NAME " with result %d\n", result); 582 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
628 583
@@ -645,11 +600,6 @@ z90crypt_init_module(void)
645 } else 600 } else
646 PRINTK("No devices at startup\n"); 601 PRINTK("No devices at startup\n");
647 602
648#ifdef Z90CRYPT_USE_HOTPLUG
649 /* generate hotplug event for device node generation */
650 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
651#endif
652
653 /* Initialize globals. */ 603 /* Initialize globals. */
654 spin_lock_init(&queuespinlock); 604 spin_lock_init(&queuespinlock);
655 605
@@ -701,17 +651,10 @@ z90crypt_init_module(void)
701 return 0; // success 651 return 0; // success
702 652
703init_module_cleanup: 653init_module_cleanup:
704#ifndef Z90CRYPT_USE_HOTPLUG
705 if ((nresult = misc_deregister(&z90crypt_misc_device))) 654 if ((nresult = misc_deregister(&z90crypt_misc_device)))
706 PRINTK("misc_deregister failed with %d.\n", nresult); 655 PRINTK("misc_deregister failed with %d.\n", nresult);
707 else 656 else
708 PDEBUG("misc_deregister successful.\n"); 657 PDEBUG("misc_deregister successful.\n");
709#else
710 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
711 PRINTK("unregister_chrdev failed with %d.\n", nresult);
712 else
713 PDEBUG("unregister_chrdev successful.\n");
714#endif
715 658
716 return result; // failure 659 return result; // failure
717} 660}
@@ -728,19 +671,10 @@ z90crypt_cleanup_module(void)
728 671
729 remove_proc_entry("driver/z90crypt", 0); 672 remove_proc_entry("driver/z90crypt", 0);
730 673
731#ifndef Z90CRYPT_USE_HOTPLUG
732 if ((nresult = misc_deregister(&z90crypt_misc_device))) 674 if ((nresult = misc_deregister(&z90crypt_misc_device)))
733 PRINTK("misc_deregister failed with %d.\n", nresult); 675 PRINTK("misc_deregister failed with %d.\n", nresult);
734 else 676 else
735 PDEBUG("misc_deregister successful.\n"); 677 PDEBUG("misc_deregister successful.\n");
736#else
737 z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
738
739 if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
740 PRINTK("unregister_chrdev failed with %d.\n", nresult);
741 else
742 PDEBUG("unregister_chrdev successful.\n");
743#endif
744 678
745 /* Remove the tasks */ 679 /* Remove the tasks */
746 tasklet_kill(&reader_tasklet); 680 tasklet_kill(&reader_tasklet);
@@ -748,6 +682,9 @@ z90crypt_cleanup_module(void)
748 del_timer(&config_timer); 682 del_timer(&config_timer);
749 del_timer(&cleanup_timer); 683 del_timer(&cleanup_timer);
750 684
685 if (z90_device_work)
686 destroy_workqueue(z90_device_work);
687
751 destroy_z90crypt(); 688 destroy_z90crypt();
752 689
753 PRINTKN("Unloaded.\n"); 690 PRINTKN("Unloaded.\n");
@@ -766,8 +703,6 @@ z90crypt_cleanup_module(void)
766 * z90crypt_status_write 703 * z90crypt_status_write
767 * disable_card 704 * disable_card
768 * enable_card 705 * enable_card
769 * scan_char
770 * scan_string
771 * 706 *
772 * Helper functions: 707 * Helper functions:
773 * z90crypt_rsa 708 * z90crypt_rsa
@@ -1057,9 +992,10 @@ remove_device(struct device *device_p)
1057 * The MCL must be applied and the newer bitlengths enabled for these to work. 992 * The MCL must be applied and the newer bitlengths enabled for these to work.
1058 * 993 *
1059 * Card Type Old limit New limit 994 * Card Type Old limit New limit
995 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
1060 * PCICC 512-1024 512-2048 996 * PCICC 512-1024 512-2048
1061 * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+) 997 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
1062 * PCIXCC_MCL3 512-2048 128-2048 998 * PCIXCC_MCL3 ----- 128-2048
1063 * CEX2C 512-2048 128-2048 999 * CEX2C 512-2048 128-2048
1064 * 1000 *
1065 * ext_bitlens (extended bitlengths) is a global, since you should not apply an 1001 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
@@ -1104,7 +1040,7 @@ select_device_type(int *dev_type_p, int bytelength)
1104 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { 1040 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
1105 /** 1041 /**
1106 * bitlength is a factor, PCICA is the most capable, even with 1042 * bitlength is a factor, PCICA is the most capable, even with
1107 * the new MCL. 1043 * the new MCL for PCIXCC.
1108 */ 1044 */
1109 if ((bytelength < PCIXCC_MIN_MOD_SIZE) || 1045 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1110 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { 1046 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
@@ -2144,73 +2080,15 @@ enable_card(int card_index)
2144 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--; 2080 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2145} 2081}
2146 2082
2147static inline int
2148scan_char(unsigned char *bf, unsigned int len,
2149 unsigned int *offs, unsigned int *p_eof, unsigned char c)
2150{
2151 unsigned int i, found;
2152
2153 found = 0;
2154 for (i = 0; i < len; i++) {
2155 if (bf[i] == c) {
2156 found = 1;
2157 break;
2158 }
2159 if (bf[i] == '\0') {
2160 *p_eof = 1;
2161 break;
2162 }
2163 if (bf[i] == '\n') {
2164 break;
2165 }
2166 }
2167 *offs = i+1;
2168 return found;
2169}
2170
2171static inline int
2172scan_string(unsigned char *bf, unsigned int len,
2173 unsigned int *offs, unsigned int *p_eof, unsigned char *s)
2174{
2175 unsigned int temp_len, temp_offs, found, eof;
2176
2177 temp_len = temp_offs = found = eof = 0;
2178 while (!eof && !found) {
2179 found = scan_char(bf+temp_len, len-temp_len,
2180 &temp_offs, &eof, *s);
2181
2182 temp_len += temp_offs;
2183 if (eof) {
2184 found = 0;
2185 break;
2186 }
2187
2188 if (found) {
2189 if (len >= temp_offs+strlen(s)) {
2190 found = !strncmp(bf+temp_len-1, s, strlen(s));
2191 if (found) {
2192 *offs = temp_len+strlen(s)-1;
2193 break;
2194 }
2195 } else {
2196 found = 0;
2197 *p_eof = 1;
2198 break;
2199 }
2200 }
2201 }
2202 return found;
2203}
2204
2205static int 2083static int
2206z90crypt_status_write(struct file *file, const char __user *buffer, 2084z90crypt_status_write(struct file *file, const char __user *buffer,
2207 unsigned long count, void *data) 2085 unsigned long count, void *data)
2208{ 2086{
2209 int i, j, len, offs, found, eof; 2087 int j, eol;
2210 unsigned char *lbuf; 2088 unsigned char *lbuf, *ptr;
2211 unsigned int local_count; 2089 unsigned int local_count;
2212 2090
2213#define LBUFSIZE 600 2091#define LBUFSIZE 1200
2214 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 2092 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2215 if (!lbuf) { 2093 if (!lbuf) {
2216 PRINTK("kmalloc failed!\n"); 2094 PRINTK("kmalloc failed!\n");
@@ -2227,49 +2105,46 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
2227 return -EFAULT; 2105 return -EFAULT;
2228 } 2106 }
2229 2107
2230 lbuf[local_count-1] = '\0'; 2108 lbuf[local_count] = '\0';
2231 2109
2232 len = 0; 2110 ptr = strstr(lbuf, "Online devices");
2233 eof = 0; 2111 if (ptr == 0) {
2234 found = 0; 2112 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2235 while (!eof) { 2113 kfree(lbuf);
2236 found = scan_string(lbuf+len, local_count-len, &offs, &eof, 2114 return count;
2237 "Online devices");
2238 len += offs;
2239 if (found == 1)
2240 break;
2241 } 2115 }
2242 2116
2243 if (eof) { 2117 ptr = strstr(ptr, "\n");
2118 if (ptr == 0) {
2119 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2244 kfree(lbuf); 2120 kfree(lbuf);
2245 return count; 2121 return count;
2246 } 2122 }
2123 ptr++;
2247 2124
2248 if (found) 2125 if (strstr(ptr, "Waiting work element counts") == NULL) {
2249 found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n'); 2126 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2250
2251 if (!found || eof) {
2252 kfree(lbuf); 2127 kfree(lbuf);
2253 return count; 2128 return count;
2254 } 2129 }
2255 2130
2256 len += offs;
2257 j = 0; 2131 j = 0;
2258 for (i = 0; i < 80; i++) { 2132 eol = 0;
2259 switch (*(lbuf+len+i)) { 2133 while ((j < 64) && (*ptr != '\0')) {
2134 switch (*ptr) {
2260 case '\t': 2135 case '\t':
2261 case ' ': 2136 case ' ':
2262 break; 2137 break;
2263 case '\n': 2138 case '\n':
2264 default: 2139 default:
2265 eof = 1; 2140 eol = 1;
2266 break; 2141 break;
2267 case '0': 2142 case '0': // no device
2268 case '1': 2143 case '1': // PCICA
2269 case '2': 2144 case '2': // PCICC
2270 case '3': 2145 case '3': // PCIXCC_MCL2
2271 case '4': 2146 case '4': // PCIXCC_MCL3
2272 case '5': 2147 case '5': // CEX2C
2273 j++; 2148 j++;
2274 break; 2149 break;
2275 case 'd': 2150 case 'd':
@@ -2283,8 +2158,9 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
2283 j++; 2158 j++;
2284 break; 2159 break;
2285 } 2160 }
2286 if (eof) 2161 if (eol)
2287 break; 2162 break;
2163 ptr++;
2288 } 2164 }
2289 2165
2290 kfree(lbuf); 2166 kfree(lbuf);
@@ -3479,45 +3355,5 @@ probe_PCIXCC_type(struct device *devPtr)
3479 return rv; 3355 return rv;
3480} 3356}
3481 3357
3482#ifdef Z90CRYPT_USE_HOTPLUG
3483static void
3484z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
3485{
3486#ifdef CONFIG_HOTPLUG
3487 char *argv[3];
3488 char *envp[6];
3489 char major[20];
3490 char minor[20];
3491
3492 sprintf(major, "MAJOR=%d", dev_major);
3493 sprintf(minor, "MINOR=%d", dev_minor);
3494
3495 argv[0] = hotplug_path;
3496 argv[1] = "z90crypt";
3497 argv[2] = 0;
3498
3499 envp[0] = "HOME=/";
3500 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3501
3502 switch (action) {
3503 case Z90CRYPT_HOTPLUG_ADD:
3504 envp[2] = "ACTION=add";
3505 break;
3506 case Z90CRYPT_HOTPLUG_REMOVE:
3507 envp[2] = "ACTION=remove";
3508 break;
3509 default:
3510 BUG();
3511 break;
3512 }
3513 envp[3] = major;
3514 envp[4] = minor;
3515 envp[5] = 0;
3516
3517 call_usermodehelper(argv[0], argv, envp, 0);
3518#endif
3519}
3520#endif
3521
3522module_init(z90crypt_init_module); 3358module_init(z90crypt_init_module);
3523module_exit(z90crypt_cleanup_module); 3359module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 24c0af49c25c..3092473991a7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -2,9 +2,9 @@
2 * drivers/s390/net/claw.c 2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver 3 * ESCON CLAW network driver
4 * 4 *
5 * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ 5 * $Revision: 1.38 $ $Date: 2005/08/29 09:47:04 $
6 * 6 *
7 * Linux fo zSeries version 7 * Linux for zSeries version
8 * Copyright (C) 2002,2005 IBM Corporation 8 * Copyright (C) 2002,2005 IBM Corporation
9 * Author(s) Original code written by: 9 * Author(s) Original code written by:
10 * Kazuo Iimura (iimura@jp.ibm.com) 10 * Kazuo Iimura (iimura@jp.ibm.com)
@@ -431,12 +431,12 @@ claw_pack_skb(struct claw_privbk *privptr)
431 if (!skb_queue_empty(&p_ch->collect_queue)) { 431 if (!skb_queue_empty(&p_ch->collect_queue)) {
432 /* some data */ 432 /* some data */
433 held_skb = skb_dequeue(&p_ch->collect_queue); 433 held_skb = skb_dequeue(&p_ch->collect_queue);
434 if (p_env->packing != DO_PACKED)
435 return held_skb;
436 if (held_skb) 434 if (held_skb)
437 atomic_dec(&held_skb->users); 435 dev_kfree_skb_any(held_skb);
438 else 436 else
439 return NULL; 437 return NULL;
438 if (p_env->packing != DO_PACKED)
439 return held_skb;
440 /* get a new SKB we will pack at least one */ 440 /* get a new SKB we will pack at least one */
441 new_skb = dev_alloc_skb(p_env->write_size); 441 new_skb = dev_alloc_skb(p_env->write_size);
442 if (new_skb == NULL) { 442 if (new_skb == NULL) {
@@ -455,7 +455,7 @@ claw_pack_skb(struct claw_privbk *privptr)
455 privptr->stats.tx_packets++; 455 privptr->stats.tx_packets++;
456 so_far += held_skb->len; 456 so_far += held_skb->len;
457 pkt_cnt++; 457 pkt_cnt++;
458 dev_kfree_skb_irq(held_skb); 458 dev_kfree_skb_any(held_skb);
459 held_skb = skb_dequeue(&p_ch->collect_queue); 459 held_skb = skb_dequeue(&p_ch->collect_queue);
460 if (held_skb) 460 if (held_skb)
461 atomic_dec(&held_skb->users); 461 atomic_dec(&held_skb->users);
@@ -1092,7 +1092,7 @@ claw_release(struct net_device *dev)
1092 } 1092 }
1093 } 1093 }
1094 if (privptr->pk_skb != NULL) { 1094 if (privptr->pk_skb != NULL) {
1095 dev_kfree_skb(privptr->pk_skb); 1095 dev_kfree_skb_any(privptr->pk_skb);
1096 privptr->pk_skb = NULL; 1096 privptr->pk_skb = NULL;
1097 } 1097 }
1098 if(privptr->buffs_alloc != 1) { 1098 if(privptr->buffs_alloc != 1) {
@@ -2016,7 +2016,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
2016 p_buf=(struct ccwbk*)privptr->p_end_ccw; 2016 p_buf=(struct ccwbk*)privptr->p_end_ccw;
2017 dumpit((char *)p_buf, sizeof(struct endccw)); 2017 dumpit((char *)p_buf, sizeof(struct endccw));
2018#endif 2018#endif
2019 dev_kfree_skb(skb); 2019 dev_kfree_skb_any(skb);
2020 if (linkid==0) { 2020 if (linkid==0) {
2021 lock=LOCK_NO; 2021 lock=LOCK_NO;
2022 } 2022 }
@@ -4061,7 +4061,7 @@ claw_purge_skb_queue(struct sk_buff_head *q)
4061 4061
4062 while ((skb = skb_dequeue(q))) { 4062 while ((skb = skb_dequeue(q))) {
4063 atomic_dec(&skb->users); 4063 atomic_dec(&skb->users);
4064 dev_kfree_skb_irq(skb); 4064 dev_kfree_skb_any(skb);
4065 } 4065 }
4066} 4066}
4067 4067
@@ -4410,7 +4410,7 @@ claw_init(void)
4410#else 4410#else
4411 "compiled into kernel " 4411 "compiled into kernel "
4412#endif 4412#endif
4413 " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n"); 4413 " $Revision: 1.38 $ $Date: 2005/08/29 09:47:04 $ \n");
4414 4414
4415 4415
4416#ifdef FUNCTRACE 4416#ifdef FUNCTRACE
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5bb255e02acc..4191fd9d4d11 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -240,7 +240,7 @@ s390_revalidate_registers(struct mci *mci)
240 * Floating point control register can't be restored. 240 * Floating point control register can't be restored.
241 * Task will be terminated. 241 * Task will be terminated.
242 */ 242 */
243 asm volatile ("lfpc 0(%0)" : : "a" (&zero)); 243 asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
244 kill_task = 1; 244 kill_task = 1;
245 245
246 } 246 }
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index a41778a490d6..3a8152906bf6 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -69,11 +69,40 @@ config SUN_JSFLASH
69 If you say Y here, you will be able to boot from your JavaStation's 69 If you say Y here, you will be able to boot from your JavaStation's
70 Flash memory. 70 Flash memory.
71 71
72# XXX Why don't we do "source drivers/char/Config.in" somewhere? 72config BBC_I2C
73# no shit 73 tristate "UltraSPARC-III bootbus i2c controller driver"
74config RTC 74 depends on PCI && SPARC64
75 tristate "PC-style Real Time Clock Support" 75 help
76 depends on PCI && EXPERIMENTAL && SPARC32 76 The BBC devices on the UltraSPARC III have two I2C controllers. The
77 first I2C controller connects mainly to configuration PROMs (NVRAM,
78 CPU configuration, DIMM types, etc.). The second I2C controller
79 connects to environmental control devices such as fans and
80 temperature sensors. The second controller also connects to the
81 smartcard reader, if present. Say Y to enable support for these.
82
83config ENVCTRL
84 tristate "SUNW, envctrl support"
85 depends on PCI && SPARC64
86 help
87 Kernel support for temperature and fan monitoring on Sun SME
88 machines.
89
90 To compile this driver as a module, choose M here: the
91 module will be called envctrl.
92
93config DISPLAY7SEG
94 tristate "7-Segment Display support"
95 depends on PCI && SPARC64
96 ---help---
97 This is the driver for the 7-segment display and LED present on
98 Sun Microsystems CompactPCI models CP1400 and CP1500.
99
100 To compile this driver as a module, choose M here: the
101 module will be called display7seg.
102
103 If you do not have a CompactPCI model CP1400 or CP1500, or
104 another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
105 you should say N to this option.
77 106
78endmenu 107endmenu
79 108
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 973c51fb0fe2..ae9e0203e9de 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1499,22 +1499,43 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
1499 return 0; 1499 return 0;
1500} /* End tw_scsiop_inquiry() */ 1500} /* End tw_scsiop_inquiry() */
1501 1501
1502static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1503 void *data, unsigned int len)
1504{
1505 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1506 void *buf;
1507 unsigned int transfer_len;
1508
1509 if (cmd->use_sg) {
1510 struct scatterlist *sg =
1511 (struct scatterlist *)cmd->request_buffer;
1512 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1513 transfer_len = min(sg->length, len);
1514 } else {
1515 buf = cmd->request_buffer;
1516 transfer_len = min(cmd->request_bufflen, len);
1517 }
1518
1519 memcpy(buf, data, transfer_len);
1520
1521 if (cmd->use_sg) {
1522 struct scatterlist *sg;
1523
1524 sg = (struct scatterlist *)cmd->request_buffer;
1525 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1526 }
1527}
1528
1502/* This function is called by the isr to complete an inquiry command */ 1529/* This function is called by the isr to complete an inquiry command */
1503static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id) 1530static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id)
1504{ 1531{
1505 unsigned char *is_unit_present; 1532 unsigned char *is_unit_present;
1506 unsigned char *request_buffer; 1533 unsigned char request_buffer[36];
1507 TW_Param *param; 1534 TW_Param *param;
1508 1535
1509 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n"); 1536 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n");
1510 1537
1511 /* Fill request buffer */ 1538 memset(request_buffer, 0, sizeof(request_buffer));
1512 if (tw_dev->srb[request_id]->request_buffer == NULL) {
1513 printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry_complete(): Request buffer NULL.\n");
1514 return 1;
1515 }
1516 request_buffer = tw_dev->srb[request_id]->request_buffer;
1517 memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
1518 request_buffer[0] = TYPE_DISK; /* Peripheral device type */ 1539 request_buffer[0] = TYPE_DISK; /* Peripheral device type */
1519 request_buffer[1] = 0; /* Device type modifier */ 1540 request_buffer[1] = 0; /* Device type modifier */
1520 request_buffer[2] = 0; /* No ansi/iso compliance */ 1541 request_buffer[2] = 0; /* No ansi/iso compliance */
@@ -1522,6 +1543,8 @@ static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_i
1522 memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */ 1543 memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */
1523 sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id); 1544 sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id);
1524 memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3); 1545 memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3);
1546 tw_transfer_internal(tw_dev, request_id, request_buffer,
1547 sizeof(request_buffer));
1525 1548
1526 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; 1549 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
1527 if (param == NULL) { 1550 if (param == NULL) {
@@ -1612,7 +1635,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1612{ 1635{
1613 TW_Param *param; 1636 TW_Param *param;
1614 unsigned char *flags; 1637 unsigned char *flags;
1615 unsigned char *request_buffer; 1638 unsigned char request_buffer[8];
1616 1639
1617 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n"); 1640 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n");
1618 1641
@@ -1622,8 +1645,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1622 return 1; 1645 return 1;
1623 } 1646 }
1624 flags = (char *)&(param->data[0]); 1647 flags = (char *)&(param->data[0]);
1625 request_buffer = tw_dev->srb[request_id]->buffer; 1648 memset(request_buffer, 0, sizeof(request_buffer));
1626 memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
1627 1649
1628 request_buffer[0] = 0xf; /* mode data length */ 1650 request_buffer[0] = 0xf; /* mode data length */
1629 request_buffer[1] = 0; /* default medium type */ 1651 request_buffer[1] = 0; /* default medium type */
@@ -1635,6 +1657,8 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
1635 request_buffer[6] = 0x4; /* WCE on */ 1657 request_buffer[6] = 0x4; /* WCE on */
1636 else 1658 else
1637 request_buffer[6] = 0x0; /* WCE off */ 1659 request_buffer[6] = 0x0; /* WCE off */
1660 tw_transfer_internal(tw_dev, request_id, request_buffer,
1661 sizeof(request_buffer));
1638 1662
1639 return 0; 1663 return 0;
1640} /* End tw_scsiop_mode_sense_complete() */ 1664} /* End tw_scsiop_mode_sense_complete() */
@@ -1701,17 +1725,12 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
1701{ 1725{
1702 unsigned char *param_data; 1726 unsigned char *param_data;
1703 u32 capacity; 1727 u32 capacity;
1704 char *buff; 1728 char buff[8];
1705 TW_Param *param; 1729 TW_Param *param;
1706 1730
1707 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n"); 1731 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n");
1708 1732
1709 buff = tw_dev->srb[request_id]->request_buffer; 1733 memset(buff, 0, sizeof(buff));
1710 if (buff == NULL) {
1711 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Request buffer NULL.\n");
1712 return 1;
1713 }
1714 memset(buff, 0, tw_dev->srb[request_id]->request_bufflen);
1715 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; 1734 param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
1716 if (param == NULL) { 1735 if (param == NULL) {
1717 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n"); 1736 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n");
@@ -1739,6 +1758,8 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
1739 buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff; 1758 buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff;
1740 buff[7] = TW_BLOCK_SIZE & 0xff; 1759 buff[7] = TW_BLOCK_SIZE & 0xff;
1741 1760
1761 tw_transfer_internal(tw_dev, request_id, buff, sizeof(buff));
1762
1742 return 0; 1763 return 0;
1743} /* End tw_scsiop_read_capacity_complete() */ 1764} /* End tw_scsiop_read_capacity_complete() */
1744 1765
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 79ba45c1e9c0..10f3f4da68c5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,5 +1,11 @@
1menu "SCSI device support" 1menu "SCSI device support"
2 2
3config RAID_ATTRS
4 tristate "RAID Transport Class"
5 default n
6 ---help---
7 Provides RAID
8
3config SCSI 9config SCSI
4 tristate "SCSI device support" 10 tristate "SCSI device support"
5 ---help--- 11 ---help---
@@ -250,7 +256,7 @@ config SCSI_DECNCR
250 256
251config SCSI_DECSII 257config SCSI_DECSII
252 tristate "DEC SII Scsi Driver" 258 tristate "DEC SII Scsi Driver"
253 depends on MACH_DECSTATION && SCSI && MIPS32 259 depends on MACH_DECSTATION && SCSI && 32BIT
254 260
255config BLK_DEV_3W_XXXX_RAID 261config BLK_DEV_3W_XXXX_RAID
256 tristate "3ware 5/6/7/8xxx ATA-RAID support" 262 tristate "3ware 5/6/7/8xxx ATA-RAID support"
@@ -459,6 +465,15 @@ config SCSI_ATA_PIIX
459 465
460 If unsure, say N. 466 If unsure, say N.
461 467
468config SCSI_SATA_MV
469 tristate "Marvell SATA support"
470 depends on SCSI_SATA && PCI && EXPERIMENTAL
471 help
472 This option enables support for the Marvell Serial ATA family.
473 Currently supports 88SX[56]0[48][01] chips.
474
475 If unsure, say N.
476
462config SCSI_SATA_NV 477config SCSI_SATA_NV
463 tristate "NVIDIA SATA support" 478 tristate "NVIDIA SATA support"
464 depends on SCSI_SATA && PCI && EXPERIMENTAL 479 depends on SCSI_SATA && PCI && EXPERIMENTAL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 64aaab64aeb6..370a560c6aef 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -22,6 +22,8 @@ subdir-$(CONFIG_PCMCIA) += pcmcia
22 22
23obj-$(CONFIG_SCSI) += scsi_mod.o 23obj-$(CONFIG_SCSI) += scsi_mod.o
24 24
25obj-$(CONFIG_RAID_ATTRS) += raid_class.o
26
25# --- NOTE ORDERING HERE --- 27# --- NOTE ORDERING HERE ---
26# For kernel non-modular link, transport attributes need to 28# For kernel non-modular link, transport attributes need to
27# be initialised before drivers 29# be initialised before drivers
@@ -133,6 +135,7 @@ obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
133obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o 135obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
134obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o 136obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
135obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o 137obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
138obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
136 139
137obj-$(CONFIG_ARM) += arm/ 140obj-$(CONFIG_ARM) += arm/
138 141
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f8ec6fe7d858..d40ba0bd68a3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -88,6 +88,13 @@
88 */ 88 */
89#include <scsi/scsi_dbg.h> 89#include <scsi/scsi_dbg.h>
90 90
91#ifndef NDEBUG
92#define NDEBUG 0
93#endif
94#ifndef NDEBUG
95#define NDEBUG_ABORT 0
96#endif
97
91#if (NDEBUG & NDEBUG_LISTS) 98#if (NDEBUG & NDEBUG_LISTS)
92#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } 99#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
93#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } 100#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -359,7 +366,7 @@ static struct {
359 {PHASE_UNKNOWN, "UNKNOWN"} 366 {PHASE_UNKNOWN, "UNKNOWN"}
360}; 367};
361 368
362#ifdef NDEBUG 369#if NDEBUG
363static struct { 370static struct {
364 unsigned char mask; 371 unsigned char mask;
365 const char *name; 372 const char *name;
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 79ae73b23680..e1f2246ee7cd 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -62,7 +62,7 @@
62 62
63#define SYNC_MODE 0 /* Synchronous transfer mode */ 63#define SYNC_MODE 0 /* Synchronous transfer mode */
64 64
65#if DEBUG 65#ifdef DEBUG
66#undef NCR53C406A_DEBUG 66#undef NCR53C406A_DEBUG
67#define NCR53C406A_DEBUG 1 67#define NCR53C406A_DEBUG 1
68#endif 68#endif
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ccdf440021fb..a8e3dfcd0dc7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -133,6 +133,7 @@ struct inquiry_data {
133 133
134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); 134static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); 135static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
136static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
136static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 137static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
137#ifdef AAC_DETAILED_STATUS_INFO 138#ifdef AAC_DETAILED_STATUS_INFO
138static char *aac_get_status_string(u32 status); 139static char *aac_get_status_string(u32 status);
@@ -348,6 +349,27 @@ static void aac_io_done(struct scsi_cmnd * scsicmd)
348 spin_unlock_irqrestore(host->host_lock, cpu_flags); 349 spin_unlock_irqrestore(host->host_lock, cpu_flags);
349} 350}
350 351
352static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
353{
354 void *buf;
355 unsigned int transfer_len;
356 struct scatterlist *sg = scsicmd->request_buffer;
357
358 if (scsicmd->use_sg) {
359 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
360 transfer_len = min(sg->length, len + offset);
361 } else {
362 buf = scsicmd->request_buffer;
363 transfer_len = min(scsicmd->request_bufflen, len + offset);
364 }
365
366 memcpy(buf + offset, data, transfer_len - offset);
367
368 if (scsicmd->use_sg)
369 kunmap_atomic(buf - sg->offset, KM_IRQ0);
370
371}
372
351static void get_container_name_callback(void *context, struct fib * fibptr) 373static void get_container_name_callback(void *context, struct fib * fibptr)
352{ 374{
353 struct aac_get_name_resp * get_name_reply; 375 struct aac_get_name_resp * get_name_reply;
@@ -363,18 +385,22 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
363 /* Failure is irrelevant, using default value instead */ 385 /* Failure is irrelevant, using default value instead */
364 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 386 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
365 && (get_name_reply->data[0] != '\0')) { 387 && (get_name_reply->data[0] != '\0')) {
366 int count; 388 char *sp = get_name_reply->data;
367 char * dp;
368 char * sp = get_name_reply->data;
369 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; 389 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
370 while (*sp == ' ') 390 while (*sp == ' ')
371 ++sp; 391 ++sp;
372 count = sizeof(((struct inquiry_data *)NULL)->inqd_pid); 392 if (*sp) {
373 dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid; 393 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
374 if (*sp) do { 394 int count = sizeof(d);
375 *dp++ = (*sp) ? *sp++ : ' '; 395 char *dp = d;
376 } while (--count > 0); 396 do {
397 *dp++ = (*sp) ? *sp++ : ' ';
398 } while (--count > 0);
399 aac_internal_transfer(scsicmd, d,
400 offsetof(struct inquiry_data, inqd_pid), sizeof(d));
401 }
377 } 402 }
403
378 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 404 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
379 405
380 fib_complete(fibptr); 406 fib_complete(fibptr);
@@ -777,34 +803,36 @@ int aac_get_adapter_info(struct aac_dev* dev)
777 /* 803 /*
778 * 57 scatter gather elements 804 * 57 scatter gather elements
779 */ 805 */
780 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 806 if (!(dev->raw_io_interface)) {
781 sizeof(struct aac_fibhdr) - 807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
782 sizeof(struct aac_write) + sizeof(struct sgmap)) /
783 sizeof(struct sgmap);
784 if (dev->dac_support) {
785 /*
786 * 38 scatter gather elements
787 */
788 dev->scsi_host_ptr->sg_tablesize =
789 (dev->max_fib_size -
790 sizeof(struct aac_fibhdr) - 808 sizeof(struct aac_fibhdr) -
791 sizeof(struct aac_write64) + 809 sizeof(struct aac_write) + sizeof(struct sgmap)) /
792 sizeof(struct sgmap64)) / 810 sizeof(struct sgmap);
793 sizeof(struct sgmap64); 811 if (dev->dac_support) {
794 } 812 /*
795 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 813 * 38 scatter gather elements
796 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 814 */
797 /* 815 dev->scsi_host_ptr->sg_tablesize =
798 * Worst case size that could cause sg overflow when 816 (dev->max_fib_size -
799 * we break up SG elements that are larger than 64KB. 817 sizeof(struct aac_fibhdr) -
800 * Would be nice if we could tell the SCSI layer what 818 sizeof(struct aac_write64) +
801 * the maximum SG element size can be. Worst case is 819 sizeof(struct sgmap64)) /
802 * (sg_tablesize-1) 4KB elements with one 64KB 820 sizeof(struct sgmap64);
803 * element. 821 }
804 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB 822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
805 */ 823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
806 dev->scsi_host_ptr->max_sectors = 824 /*
807 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 825 * Worst case size that could cause sg overflow when
826 * we break up SG elements that are larger than 64KB.
827 * Would be nice if we could tell the SCSI layer what
828 * the maximum SG element size can be. Worst case is
829 * (sg_tablesize-1) 4KB elements with one 64KB
830 * element.
831 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
832 */
833 dev->scsi_host_ptr->max_sectors =
834 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
835 }
808 } 836 }
809 837
810 fib_complete(fibptr); 838 fib_complete(fibptr);
@@ -814,12 +842,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
814} 842}
815 843
816 844
817static void read_callback(void *context, struct fib * fibptr) 845static void io_callback(void *context, struct fib * fibptr)
818{ 846{
819 struct aac_dev *dev; 847 struct aac_dev *dev;
820 struct aac_read_reply *readreply; 848 struct aac_read_reply *readreply;
821 struct scsi_cmnd *scsicmd; 849 struct scsi_cmnd *scsicmd;
822 u32 lba;
823 u32 cid; 850 u32 cid;
824 851
825 scsicmd = (struct scsi_cmnd *) context; 852 scsicmd = (struct scsi_cmnd *) context;
@@ -827,8 +854,7 @@ static void read_callback(void *context, struct fib * fibptr)
827 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 854 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
828 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
829 856
830 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
831 dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
832 858
833 if (fibptr == NULL) 859 if (fibptr == NULL)
834 BUG(); 860 BUG();
@@ -847,7 +873,7 @@ static void read_callback(void *context, struct fib * fibptr)
847 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 873 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
848 else { 874 else {
849#ifdef AAC_DETAILED_STATUS_INFO 875#ifdef AAC_DETAILED_STATUS_INFO
850 printk(KERN_WARNING "read_callback: io failed, status = %d\n", 876 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
851 le32_to_cpu(readreply->status)); 877 le32_to_cpu(readreply->status));
852#endif 878#endif
853 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 879 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
@@ -867,53 +893,6 @@ static void read_callback(void *context, struct fib * fibptr)
867 aac_io_done(scsicmd); 893 aac_io_done(scsicmd);
868} 894}
869 895
870static void write_callback(void *context, struct fib * fibptr)
871{
872 struct aac_dev *dev;
873 struct aac_write_reply *writereply;
874 struct scsi_cmnd *scsicmd;
875 u32 lba;
876 u32 cid;
877
878 scsicmd = (struct scsi_cmnd *) context;
879 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
880 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
881
882 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
883 dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
884 if (fibptr == NULL)
885 BUG();
886
887 if(scsicmd->use_sg)
888 pci_unmap_sg(dev->pdev,
889 (struct scatterlist *)scsicmd->buffer,
890 scsicmd->use_sg,
891 scsicmd->sc_data_direction);
892 else if(scsicmd->request_bufflen)
893 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
894 scsicmd->request_bufflen,
895 scsicmd->sc_data_direction);
896
897 writereply = (struct aac_write_reply *) fib_data(fibptr);
898 if (le32_to_cpu(writereply->status) == ST_OK)
899 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
900 else {
901 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
902 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
903 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
904 HARDWARE_ERROR,
905 SENCODE_INTERNAL_TARGET_FAILURE,
906 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
907 0, 0);
908 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
909 sizeof(struct sense_data));
910 }
911
912 fib_complete(fibptr);
913 fib_free(fibptr);
914 aac_io_done(scsicmd);
915}
916
917static int aac_read(struct scsi_cmnd * scsicmd, int cid) 896static int aac_read(struct scsi_cmnd * scsicmd, int cid)
918{ 897{
919 u32 lba; 898 u32 lba;
@@ -954,7 +933,32 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
954 933
955 fib_init(cmd_fibcontext); 934 fib_init(cmd_fibcontext);
956 935
957 if (dev->dac_support == 1) { 936 if (dev->raw_io_interface) {
937 struct aac_raw_io *readcmd;
938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
939 readcmd->block[0] = cpu_to_le32(lba);
940 readcmd->block[1] = 0;
941 readcmd->count = cpu_to_le32(count<<9);
942 readcmd->cid = cpu_to_le16(cid);
943 readcmd->flags = cpu_to_le16(1);
944 readcmd->bpTotal = 0;
945 readcmd->bpComplete = 0;
946
947 aac_build_sgraw(scsicmd, &readcmd->sg);
948 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
949 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
950 BUG();
951 /*
952 * Now send the Fib to the adapter
953 */
954 status = fib_send(ContainerRawIo,
955 cmd_fibcontext,
956 fibsize,
957 FsaNormal,
958 0, 1,
959 (fib_callback) io_callback,
960 (void *) scsicmd);
961 } else if (dev->dac_support == 1) {
958 struct aac_read64 *readcmd; 962 struct aac_read64 *readcmd;
959 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); 963 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
960 readcmd->command = cpu_to_le32(VM_CtHostRead64); 964 readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -968,7 +972,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
968 fibsize = sizeof(struct aac_read64) + 972 fibsize = sizeof(struct aac_read64) +
969 ((le32_to_cpu(readcmd->sg.count) - 1) * 973 ((le32_to_cpu(readcmd->sg.count) - 1) *
970 sizeof (struct sgentry64)); 974 sizeof (struct sgentry64));
971 BUG_ON (fibsize > (sizeof(struct hw_fib) - 975 BUG_ON (fibsize > (dev->max_fib_size -
972 sizeof(struct aac_fibhdr))); 976 sizeof(struct aac_fibhdr)));
973 /* 977 /*
974 * Now send the Fib to the adapter 978 * Now send the Fib to the adapter
@@ -978,7 +982,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
978 fibsize, 982 fibsize,
979 FsaNormal, 983 FsaNormal,
980 0, 1, 984 0, 1,
981 (fib_callback) read_callback, 985 (fib_callback) io_callback,
982 (void *) scsicmd); 986 (void *) scsicmd);
983 } else { 987 } else {
984 struct aac_read *readcmd; 988 struct aac_read *readcmd;
@@ -1002,7 +1006,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1002 fibsize, 1006 fibsize,
1003 FsaNormal, 1007 FsaNormal,
1004 0, 1, 1008 0, 1,
1005 (fib_callback) read_callback, 1009 (fib_callback) io_callback,
1006 (void *) scsicmd); 1010 (void *) scsicmd);
1007 } 1011 }
1008 1012
@@ -1061,7 +1065,32 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1061 } 1065 }
1062 fib_init(cmd_fibcontext); 1066 fib_init(cmd_fibcontext);
1063 1067
1064 if(dev->dac_support == 1) { 1068 if (dev->raw_io_interface) {
1069 struct aac_raw_io *writecmd;
1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1071 writecmd->block[0] = cpu_to_le32(lba);
1072 writecmd->block[1] = 0;
1073 writecmd->count = cpu_to_le32(count<<9);
1074 writecmd->cid = cpu_to_le16(cid);
1075 writecmd->flags = 0;
1076 writecmd->bpTotal = 0;
1077 writecmd->bpComplete = 0;
1078
1079 aac_build_sgraw(scsicmd, &writecmd->sg);
1080 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
1081 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
1082 BUG();
1083 /*
1084 * Now send the Fib to the adapter
1085 */
1086 status = fib_send(ContainerRawIo,
1087 cmd_fibcontext,
1088 fibsize,
1089 FsaNormal,
1090 0, 1,
1091 (fib_callback) io_callback,
1092 (void *) scsicmd);
1093 } else if (dev->dac_support == 1) {
1065 struct aac_write64 *writecmd; 1094 struct aac_write64 *writecmd;
1066 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); 1095 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
1067 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1085,7 +1114,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1085 fibsize, 1114 fibsize,
1086 FsaNormal, 1115 FsaNormal,
1087 0, 1, 1116 0, 1,
1088 (fib_callback) write_callback, 1117 (fib_callback) io_callback,
1089 (void *) scsicmd); 1118 (void *) scsicmd);
1090 } else { 1119 } else {
1091 struct aac_write *writecmd; 1120 struct aac_write *writecmd;
@@ -1111,7 +1140,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1111 fibsize, 1140 fibsize,
1112 FsaNormal, 1141 FsaNormal,
1113 0, 1, 1142 0, 1,
1114 (fib_callback) write_callback, 1143 (fib_callback) io_callback,
1115 (void *) scsicmd); 1144 (void *) scsicmd);
1116 } 1145 }
1117 1146
@@ -1340,44 +1369,45 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1340 switch (scsicmd->cmnd[0]) { 1369 switch (scsicmd->cmnd[0]) {
1341 case INQUIRY: 1370 case INQUIRY:
1342 { 1371 {
1343 struct inquiry_data *inq_data_ptr; 1372 struct inquiry_data inq_data;
1344 1373
1345 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id)); 1374 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
1346 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer; 1375 memset(&inq_data, 0, sizeof (struct inquiry_data));
1347 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
1348 1376
1349 inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */ 1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1350 inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ 1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1351 inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1352 inq_data_ptr->inqd_len = 31; 1380 inq_data.inqd_len = 31;
1353 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
1354 inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ 1382 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
1355 /* 1383 /*
1356 * Set the Vendor, Product, and Revision Level 1384 * Set the Vendor, Product, and Revision Level
1357 * see: <vendor>.c i.e. aac.c 1385 * see: <vendor>.c i.e. aac.c
1358 */ 1386 */
1359 if (scsicmd->device->id == host->this_id) { 1387 if (scsicmd->device->id == host->this_id) {
1360 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *))); 1388 setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *)));
1361 inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */ 1389 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
1390 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1362 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1391 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1363 scsicmd->scsi_done(scsicmd); 1392 scsicmd->scsi_done(scsicmd);
1364 return 0; 1393 return 0;
1365 } 1394 }
1366 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type); 1395 setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1367 inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1396 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1368 return aac_get_container_name(scsicmd, cid); 1398 return aac_get_container_name(scsicmd, cid);
1369 } 1399 }
1370 case READ_CAPACITY: 1400 case READ_CAPACITY:
1371 { 1401 {
1372 u32 capacity; 1402 u32 capacity;
1373 char *cp; 1403 char cp[8];
1374 1404
1375 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1376 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL)
1377 capacity = fsa_dev_ptr[cid].size - 1; 1407 capacity = fsa_dev_ptr[cid].size - 1;
1378 else 1408 else
1379 capacity = (u32)-1; 1409 capacity = (u32)-1;
1380 cp = scsicmd->request_buffer; 1410
1381 cp[0] = (capacity >> 24) & 0xff; 1411 cp[0] = (capacity >> 24) & 0xff;
1382 cp[1] = (capacity >> 16) & 0xff; 1412 cp[1] = (capacity >> 16) & 0xff;
1383 cp[2] = (capacity >> 8) & 0xff; 1413 cp[2] = (capacity >> 8) & 0xff;
@@ -1386,6 +1416,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1386 cp[5] = 0; 1416 cp[5] = 0;
1387 cp[6] = 2; 1417 cp[6] = 2;
1388 cp[7] = 0; 1418 cp[7] = 0;
1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1389 1420
1390 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1391 scsicmd->scsi_done(scsicmd); 1422 scsicmd->scsi_done(scsicmd);
@@ -1395,15 +1426,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1395 1426
1396 case MODE_SENSE: 1427 case MODE_SENSE:
1397 { 1428 {
1398 char *mode_buf; 1429 char mode_buf[4];
1399 1430
1400 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 1431 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1401 mode_buf = scsicmd->request_buffer;
1402 mode_buf[0] = 3; /* Mode data length */ 1432 mode_buf[0] = 3; /* Mode data length */
1403 mode_buf[1] = 0; /* Medium type - default */ 1433 mode_buf[1] = 0; /* Medium type - default */
1404 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1434 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1405 mode_buf[3] = 0; /* Block descriptor length */ 1435 mode_buf[3] = 0; /* Block descriptor length */
1406 1436
1437 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1407 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1438 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1408 scsicmd->scsi_done(scsicmd); 1439 scsicmd->scsi_done(scsicmd);
1409 1440
@@ -1411,10 +1442,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1411 } 1442 }
1412 case MODE_SENSE_10: 1443 case MODE_SENSE_10:
1413 { 1444 {
1414 char *mode_buf; 1445 char mode_buf[8];
1415 1446
1416 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 1447 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1417 mode_buf = scsicmd->request_buffer;
1418 mode_buf[0] = 0; /* Mode data length (MSB) */ 1448 mode_buf[0] = 0; /* Mode data length (MSB) */
1419 mode_buf[1] = 6; /* Mode data length (LSB) */ 1449 mode_buf[1] = 6; /* Mode data length (LSB) */
1420 mode_buf[2] = 0; /* Medium type - default */ 1450 mode_buf[2] = 0; /* Medium type - default */
@@ -1423,6 +1453,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1423 mode_buf[5] = 0; /* reserved */ 1453 mode_buf[5] = 0; /* reserved */
1424 mode_buf[6] = 0; /* Block descriptor length (MSB) */ 1454 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1425 mode_buf[7] = 0; /* Block descriptor length (LSB) */ 1455 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1456 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
1426 1457
1427 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1458 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1428 scsicmd->scsi_done(scsicmd); 1459 scsicmd->scsi_done(scsicmd);
@@ -1894,7 +1925,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
1894 srbcmd->id = cpu_to_le32(scsicmd->device->id); 1925 srbcmd->id = cpu_to_le32(scsicmd->device->id);
1895 srbcmd->lun = cpu_to_le32(scsicmd->device->lun); 1926 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
1896 srbcmd->flags = cpu_to_le32(flag); 1927 srbcmd->flags = cpu_to_le32(flag);
1897 timeout = (scsicmd->timeout-jiffies)/HZ; 1928 timeout = scsicmd->timeout_per_command/HZ;
1898 if(timeout == 0){ 1929 if(timeout == 0){
1899 timeout = 1; 1930 timeout = 1;
1900 } 1931 }
@@ -2077,6 +2108,76 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
2077 return byte_count; 2108 return byte_count;
2078} 2109}
2079 2110
2111static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
2112{
2113 struct Scsi_Host *host = scsicmd->device->host;
2114 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2115 unsigned long byte_count = 0;
2116
2117 // Get rid of old data
2118 psg->count = 0;
2119 psg->sg[0].next = 0;
2120 psg->sg[0].prev = 0;
2121 psg->sg[0].addr[0] = 0;
2122 psg->sg[0].addr[1] = 0;
2123 psg->sg[0].count = 0;
2124 psg->sg[0].flags = 0;
2125 if (scsicmd->use_sg) {
2126 struct scatterlist *sg;
2127 int i;
2128 int sg_count;
2129 sg = (struct scatterlist *) scsicmd->request_buffer;
2130
2131 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2132 scsicmd->sc_data_direction);
2133
2134 for (i = 0; i < sg_count; i++) {
2135 int count = sg_dma_len(sg);
2136 u64 addr = sg_dma_address(sg);
2137 psg->sg[i].next = 0;
2138 psg->sg[i].prev = 0;
2139 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
2140 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2141 psg->sg[i].count = cpu_to_le32(count);
2142 psg->sg[i].flags = 0;
2143 byte_count += count;
2144 sg++;
2145 }
2146 psg->count = cpu_to_le32(sg_count);
2147 /* hba wants the size to be exact */
2148 if(byte_count > scsicmd->request_bufflen){
2149 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
2150 (byte_count - scsicmd->request_bufflen);
2151 psg->sg[i-1].count = cpu_to_le32(temp);
2152 byte_count = scsicmd->request_bufflen;
2153 }
2154 /* Check for command underflow */
2155 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
2156 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
2157 byte_count, scsicmd->underflow);
2158 }
2159 }
2160 else if(scsicmd->request_bufflen) {
2161 int count;
2162 u64 addr;
2163 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2164 scsicmd->request_buffer,
2165 scsicmd->request_bufflen,
2166 scsicmd->sc_data_direction);
2167 addr = scsicmd->SCp.dma_handle;
2168 count = scsicmd->request_bufflen;
2169 psg->count = cpu_to_le32(1);
2170 psg->sg[0].next = 0;
2171 psg->sg[0].prev = 0;
2172 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
2173 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2174 psg->sg[0].count = cpu_to_le32(count);
2175 psg->sg[0].flags = 0;
2176 byte_count = scsicmd->request_bufflen;
2177 }
2178 return byte_count;
2179}
2180
2080#ifdef AAC_DETAILED_STATUS_INFO 2181#ifdef AAC_DETAILED_STATUS_INFO
2081 2182
2082struct aac_srb_status_info { 2183struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4ab07861b457..e40528185d48 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -110,6 +110,22 @@ struct user_sgentry64 {
110 u32 count; /* Length. */ 110 u32 count; /* Length. */
111}; 111};
112 112
113struct sgentryraw {
114 __le32 next; /* reserved for F/W use */
115 __le32 prev; /* reserved for F/W use */
116 __le32 addr[2];
117 __le32 count;
118 __le32 flags; /* reserved for F/W use */
119};
120
121struct user_sgentryraw {
122 u32 next; /* reserved for F/W use */
123 u32 prev; /* reserved for F/W use */
124 u32 addr[2];
125 u32 count;
126 u32 flags; /* reserved for F/W use */
127};
128
113/* 129/*
114 * SGMAP 130 * SGMAP
115 * 131 *
@@ -137,6 +153,16 @@ struct user_sgmap64 {
137 struct user_sgentry64 sg[1]; 153 struct user_sgentry64 sg[1];
138}; 154};
139 155
156struct sgmapraw {
157 __le32 count;
158 struct sgentryraw sg[1];
159};
160
161struct user_sgmapraw {
162 u32 count;
163 struct user_sgentryraw sg[1];
164};
165
140struct creation_info 166struct creation_info
141{ 167{
142 u8 buildnum; /* e.g., 588 */ 168 u8 buildnum; /* e.g., 588 */
@@ -351,6 +377,7 @@ struct hw_fib {
351 */ 377 */
352#define ContainerCommand 500 378#define ContainerCommand 500
353#define ContainerCommand64 501 379#define ContainerCommand64 501
380#define ContainerRawIo 502
354/* 381/*
355 * Cluster Commands 382 * Cluster Commands
356 */ 383 */
@@ -456,6 +483,7 @@ struct adapter_ops
456{ 483{
457 void (*adapter_interrupt)(struct aac_dev *dev); 484 void (*adapter_interrupt)(struct aac_dev *dev);
458 void (*adapter_notify)(struct aac_dev *dev, u32 event); 485 void (*adapter_notify)(struct aac_dev *dev, u32 event);
486 void (*adapter_disable_int)(struct aac_dev *dev);
459 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 487 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
460 int (*adapter_check_health)(struct aac_dev *dev); 488 int (*adapter_check_health)(struct aac_dev *dev);
461}; 489};
@@ -981,6 +1009,9 @@ struct aac_dev
981 u8 nondasd_support; 1009 u8 nondasd_support;
982 u8 dac_support; 1010 u8 dac_support;
983 u8 raid_scsi_mode; 1011 u8 raid_scsi_mode;
1012 /* macro side-effects BEWARE */
1013# define raw_io_interface \
1014 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
984 u8 printf_enabled; 1015 u8 printf_enabled;
985}; 1016};
986 1017
@@ -990,6 +1021,9 @@ struct aac_dev
990#define aac_adapter_notify(dev, event) \ 1021#define aac_adapter_notify(dev, event) \
991 (dev)->a_ops.adapter_notify(dev, event) 1022 (dev)->a_ops.adapter_notify(dev, event)
992 1023
1024#define aac_adapter_disable_int(dev) \
1025 (dev)->a_ops.adapter_disable_int(dev)
1026
993#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1027#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
994 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1028 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
995 1029
@@ -1156,6 +1190,17 @@ struct aac_write_reply
1156 __le32 committed; 1190 __le32 committed;
1157}; 1191};
1158 1192
1193struct aac_raw_io
1194{
1195 __le32 block[2];
1196 __le32 count;
1197 __le16 cid;
1198 __le16 flags; /* 00 W, 01 R */
1199 __le16 bpTotal; /* reserved for F/W use */
1200 __le16 bpComplete; /* reserved for F/W use */
1201 struct sgmapraw sg;
1202};
1203
1159#define CT_FLUSH_CACHE 129 1204#define CT_FLUSH_CACHE 129
1160struct aac_synchronize { 1205struct aac_synchronize {
1161 __le32 command; /* VM_ContainerConfig */ 1206 __le32 command; /* VM_ContainerConfig */
@@ -1196,7 +1241,7 @@ struct aac_srb
1196}; 1241};
1197 1242
1198/* 1243/*
1199 * This and assocated data structs are used by the 1244 * This and associated data structs are used by the
1200 * ioctl caller and are in cpu order. 1245 * ioctl caller and are in cpu order.
1201 */ 1246 */
1202struct user_aac_srb 1247struct user_aac_srb
@@ -1508,11 +1553,12 @@ struct fib_ioctl
1508 1553
1509struct revision 1554struct revision
1510{ 1555{
1511 u32 compat; 1556 __le32 compat;
1512 u32 version; 1557 __le32 version;
1513 u32 build; 1558 __le32 build;
1514}; 1559};
1515 1560
1561
1516/* 1562/*
1517 * Ugly - non Linux like ioctl coding for back compat. 1563 * Ugly - non Linux like ioctl coding for back compat.
1518 */ 1564 */
@@ -1733,3 +1779,4 @@ int aac_get_adapter_info(struct aac_dev* dev);
1733int aac_send_shutdown(struct aac_dev *dev); 1779int aac_send_shutdown(struct aac_dev *dev);
1734extern int numacb; 1780extern int numacb;
1735extern int acbsize; 1781extern int acbsize;
1782extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 85387099aab2..71f1cad9b5f0 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -287,7 +287,6 @@ return_fib:
287 kfree(fib->hw_fib); 287 kfree(fib->hw_fib);
288 kfree(fib); 288 kfree(fib);
289 status = 0; 289 status = 0;
290 fibctx->jiffies = jiffies/HZ;
291 } else { 290 } else {
292 spin_unlock_irqrestore(&dev->fib_lock, flags); 291 spin_unlock_irqrestore(&dev->fib_lock, flags);
293 if (f.wait) { 292 if (f.wait) {
@@ -302,6 +301,7 @@ return_fib:
302 status = -EAGAIN; 301 status = -EAGAIN;
303 } 302 }
304 } 303 }
304 fibctx->jiffies = jiffies/HZ;
305 return status; 305 return status;
306} 306}
307 307
@@ -405,10 +405,20 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
405static int check_revision(struct aac_dev *dev, void __user *arg) 405static int check_revision(struct aac_dev *dev, void __user *arg)
406{ 406{
407 struct revision response; 407 struct revision response;
408 408 char *driver_version = aac_driver_version;
409 response.compat = 1; 409 u32 version;
410 response.version = le32_to_cpu(dev->adapter_info.kernelrev); 410
411 response.build = le32_to_cpu(dev->adapter_info.kernelbuild); 411 response.compat = cpu_to_le32(1);
412 version = (simple_strtol(driver_version,
413 &driver_version, 10) << 24) | 0x00000400;
414 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
415 version += simple_strtol(driver_version + 1, NULL, 10);
416 response.version = cpu_to_le32(version);
417# if (defined(AAC_DRIVER_BUILD))
418 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
419# else
420 response.build = cpu_to_le32(9999);
421# endif
412 422
413 if (copy_to_user(arg, &response, sizeof(response))) 423 if (copy_to_user(arg, &response, sizeof(response)))
414 return -EFAULT; 424 return -EFAULT;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 43557bf661f6..75abd0453289 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -44,7 +44,9 @@
44 44
45#include "aacraid.h" 45#include "aacraid.h"
46 46
47struct aac_common aac_config; 47struct aac_common aac_config = {
48 .irq_mod = 1
49};
48 50
49static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) 51static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
50{ 52{
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5322865942e2..a1d303f03480 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -254,6 +254,7 @@ static void fib_dealloc(struct fib * fibptr)
254static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 254static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255{ 255{
256 struct aac_queue * q; 256 struct aac_queue * q;
257 unsigned long idx;
257 258
258 /* 259 /*
259 * All of the queues wrap when they reach the end, so we check 260 * All of the queues wrap when they reach the end, so we check
@@ -263,10 +264,23 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
263 */ 264 */
264 265
265 q = &dev->queues->queue[qid]; 266 q = &dev->queues->queue[qid];
266 267
267 *index = le32_to_cpu(*(q->headers.producer)); 268 idx = *index = le32_to_cpu(*(q->headers.producer));
268 if ((*index - 2) == le32_to_cpu(*(q->headers.consumer))) 269 /* Interrupt Moderation, only interrupt for first two entries */
270 if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 if (--idx == 0) {
272 if (qid == AdapHighCmdQueue)
273 idx = ADAP_HIGH_CMD_ENTRIES;
274 else if (qid == AdapNormCmdQueue)
275 idx = ADAP_NORM_CMD_ENTRIES;
276 else if (qid == AdapHighRespQueue)
277 idx = ADAP_HIGH_RESP_ENTRIES;
278 else if (qid == AdapNormRespQueue)
279 idx = ADAP_NORM_RESP_ENTRIES;
280 }
281 if (idx != le32_to_cpu(*(q->headers.consumer)))
269 *nonotify = 1; 282 *nonotify = 1;
283 }
270 284
271 if (qid == AdapHighCmdQueue) { 285 if (qid == AdapHighCmdQueue) {
272 if (*index >= ADAP_HIGH_CMD_ENTRIES) 286 if (*index >= ADAP_HIGH_CMD_ENTRIES)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 562da90480a1..4ff29d7f5825 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -27,8 +27,11 @@
27 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller 27 * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
28 */ 28 */
29 29
30#define AAC_DRIVER_VERSION "1.1.2-lk2" 30#define AAC_DRIVER_VERSION "1.1-4"
31#define AAC_DRIVER_BUILD_DATE __DATE__ 31#ifndef AAC_DRIVER_BRANCH
32#define AAC_DRIVER_BRANCH ""
33#endif
34#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
32#define AAC_DRIVERNAME "aacraid" 35#define AAC_DRIVERNAME "aacraid"
33 36
34#include <linux/compat.h> 37#include <linux/compat.h>
@@ -58,16 +61,24 @@
58 61
59#include "aacraid.h" 62#include "aacraid.h"
60 63
64#ifdef AAC_DRIVER_BUILD
65#define _str(x) #x
66#define str(x) _str(x)
67#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
68#else
69#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE
70#endif
61 71
62MODULE_AUTHOR("Red Hat Inc and Adaptec"); 72MODULE_AUTHOR("Red Hat Inc and Adaptec");
63MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " 73MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
64 "Adaptec Advanced Raid Products, " 74 "Adaptec Advanced Raid Products, "
65 "and HP NetRAID-4M SCSI driver"); 75 "and HP NetRAID-4M SCSI driver");
66MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
67MODULE_VERSION(AAC_DRIVER_VERSION); 77MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
68 78
69static LIST_HEAD(aac_devices); 79static LIST_HEAD(aac_devices);
70static int aac_cfg_major = -1; 80static int aac_cfg_major = -1;
81char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
71 82
72/* 83/*
73 * Because of the way Linux names scsi devices, the order in this table has 84 * Because of the way Linux names scsi devices, the order in this table has
@@ -109,36 +120,39 @@ static struct pci_device_id aac_pci_tbl[] = {
109 { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */ 120 { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */
110 { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */ 121 { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
111 { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */ 122 { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
112 { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 32 }, /* Themisto Jupiter Platform */ 123 { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
113 { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 32 }, /* Themisto Jupiter Platform */ 124 { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
114 { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 33 }, /* Callisto Jupiter Platform */ 125 { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
115 { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 34 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ 126 { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
116 { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 35 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ 127 { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
117 { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 36 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ 128 { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
118 { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 37 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ 129 { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
119 { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 38 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ 130 { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
120 { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 39 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ 131 { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
121 { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 40 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ 132 { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
122 { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 41 }, /* AAR-2610SA PCI SATA 6ch */ 133 { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
123 { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 42 }, /* ASR-2240S (SabreExpress) */ 134 { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
124 { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 43 }, /* ASR-4005SAS */ 135 { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
125 { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 44 }, /* IBM 8i (AvonPark) */ 136 { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005SAS */
126 { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 44 }, /* IBM 8i (AvonPark Lite) */ 137 { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
127 { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 45 }, /* ASR-4000SAS (BlackBird) */ 138 { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
128 { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 46 }, /* ASR-4800SAS (Marauder-X) */ 139 { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
129 { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 47 }, /* ASR-4805SAS (Marauder-E) */ 140 { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
130 { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 48 }, /* ASR-4810SAS (Hurricane */ 141 { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000SAS (BlackBird) */
131 142 { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
132 { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 49 }, /* Perc 320/DC*/ 143 { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
133 { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 50 }, /* Adaptec 5400S (Mustang)*/ 144 { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-4810SAS (Hurricane */
134 { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 51 }, /* Adaptec 5400S (Mustang)*/ 145
135 { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 52 }, /* Dell PERC2/QC */ 146 { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
136 { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 53 }, /* HP NetRAID-4M */ 147 { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
137 148 { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
138 { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 54 }, /* Dell Catchall */ 149 { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
139 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 55 }, /* Legend Catchall */ 150 { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
140 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 56 }, /* Adaptec Catch All */ 151
141 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 57 }, /* Adaptec Rocket Catch All */ 152 { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
153 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
154 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
155 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
142 { 0,} 156 { 0,}
143}; 157};
144MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 158MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -180,8 +194,9 @@ static struct aac_driver_ident aac_drivers[] = {
180 { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */ 194 { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
181 { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */ 195 { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
182 { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */ 196 { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */
183 { aac_rkt_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */ 197 { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
184 { aac_rkt_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */ 198 { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
199 { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
185 { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */ 200 { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
186 { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */ 201 { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
187 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ 202 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
@@ -195,10 +210,12 @@ static struct aac_driver_ident aac_drivers[] = {
195 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */ 210 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
196 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */ 211 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
197 { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */ 212 { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
213 { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
214 { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
198 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */ 215 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
199 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */ 216 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
200 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */ 217 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
201 { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */ 218 { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */
202 219
203 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ 220 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
204 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ 221 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
@@ -839,11 +856,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
839 856
840 return 0; 857 return 0;
841 858
842out_deinit: 859 out_deinit:
843 kill_proc(aac->thread_pid, SIGKILL, 0); 860 kill_proc(aac->thread_pid, SIGKILL, 0);
844 wait_for_completion(&aac->aif_completion); 861 wait_for_completion(&aac->aif_completion);
845 862
846 aac_send_shutdown(aac); 863 aac_send_shutdown(aac);
864 aac_adapter_disable_int(aac);
847 fib_map_free(aac); 865 fib_map_free(aac);
848 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 866 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
849 kfree(aac->queues); 867 kfree(aac->queues);
@@ -860,6 +878,13 @@ out_deinit:
860 return error; 878 return error;
861} 879}
862 880
881static void aac_shutdown(struct pci_dev *dev)
882{
883 struct Scsi_Host *shost = pci_get_drvdata(dev);
884 struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
885 aac_send_shutdown(aac);
886}
887
863static void __devexit aac_remove_one(struct pci_dev *pdev) 888static void __devexit aac_remove_one(struct pci_dev *pdev)
864{ 889{
865 struct Scsi_Host *shost = pci_get_drvdata(pdev); 890 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -871,6 +896,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
871 wait_for_completion(&aac->aif_completion); 896 wait_for_completion(&aac->aif_completion);
872 897
873 aac_send_shutdown(aac); 898 aac_send_shutdown(aac);
899 aac_adapter_disable_int(aac);
874 fib_map_free(aac); 900 fib_map_free(aac);
875 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 901 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
876 aac->comm_phys); 902 aac->comm_phys);
@@ -891,14 +917,15 @@ static struct pci_driver aac_pci_driver = {
891 .id_table = aac_pci_tbl, 917 .id_table = aac_pci_tbl,
892 .probe = aac_probe_one, 918 .probe = aac_probe_one,
893 .remove = __devexit_p(aac_remove_one), 919 .remove = __devexit_p(aac_remove_one),
920 .shutdown = aac_shutdown,
894}; 921};
895 922
896static int __init aac_init(void) 923static int __init aac_init(void)
897{ 924{
898 int error; 925 int error;
899 926
900 printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", 927 printk(KERN_INFO "Adaptec %s driver (%s)\n",
901 AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE); 928 AAC_DRIVERNAME, aac_driver_version);
902 929
903 error = pci_module_init(&aac_pci_driver); 930 error = pci_module_init(&aac_pci_driver);
904 if (error) 931 if (error)
@@ -909,6 +936,7 @@ static int __init aac_init(void)
909 printk(KERN_WARNING 936 printk(KERN_WARNING
910 "aacraid: unable to register \"aac\" device.\n"); 937 "aacraid: unable to register \"aac\" device.\n");
911 } 938 }
939
912 return 0; 940 return 0;
913} 941}
914 942
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 7d68b7825137..557287a0b80b 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -88,6 +88,16 @@ static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
88} 88}
89 89
90/** 90/**
91 * aac_rkt_disable_interrupt - Disable interrupts
92 * @dev: Adapter
93 */
94
95static void aac_rkt_disable_interrupt(struct aac_dev *dev)
96{
97 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
98}
99
100/**
91 * rkt_sync_cmd - send a command and wait 101 * rkt_sync_cmd - send a command and wait
92 * @dev: Adapter 102 * @dev: Adapter
93 * @command: Command to execute 103 * @command: Command to execute
@@ -412,10 +422,19 @@ int aac_rkt_init(struct aac_dev *dev)
412 * Fill in the function dispatch table. 422 * Fill in the function dispatch table.
413 */ 423 */
414 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; 424 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
425 dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
415 dev->a_ops.adapter_notify = aac_rkt_notify_adapter; 426 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
416 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd; 427 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
417 dev->a_ops.adapter_check_health = aac_rkt_check_health; 428 dev->a_ops.adapter_check_health = aac_rkt_check_health;
418 429
430 /*
431 * First clear out all interrupts. Then enable the one's that we
432 * can handle.
433 */
434 rkt_writeb(dev, MUnit.OIMR, 0xff);
435 rkt_writel(dev, MUnit.ODR, 0xffffffff);
436 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
437
419 if (aac_init_adapter(dev) == NULL) 438 if (aac_init_adapter(dev) == NULL)
420 goto error_irq; 439 goto error_irq;
421 /* 440 /*
@@ -438,6 +457,7 @@ error_kfree:
438 kfree(dev->queues); 457 kfree(dev->queues);
439 458
440error_irq: 459error_irq:
460 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
441 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 461 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
442 462
443error_iounmap: 463error_iounmap:
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 1ff25f49fada..a8459faf87ca 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -88,6 +88,16 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
88} 88}
89 89
90/** 90/**
91 * aac_rx_disable_interrupt - Disable interrupts
92 * @dev: Adapter
93 */
94
95static void aac_rx_disable_interrupt(struct aac_dev *dev)
96{
97 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
98}
99
100/**
91 * rx_sync_cmd - send a command and wait 101 * rx_sync_cmd - send a command and wait
92 * @dev: Adapter 102 * @dev: Adapter
93 * @command: Command to execute 103 * @command: Command to execute
@@ -412,10 +422,19 @@ int aac_rx_init(struct aac_dev *dev)
412 * Fill in the function dispatch table. 422 * Fill in the function dispatch table.
413 */ 423 */
414 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 424 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
425 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
415 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 426 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
416 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 427 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
417 dev->a_ops.adapter_check_health = aac_rx_check_health; 428 dev->a_ops.adapter_check_health = aac_rx_check_health;
418 429
430 /*
431 * First clear out all interrupts. Then enable the one's that we
432 * can handle.
433 */
434 rx_writeb(dev, MUnit.OIMR, 0xff);
435 rx_writel(dev, MUnit.ODR, 0xffffffff);
436 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
437
419 if (aac_init_adapter(dev) == NULL) 438 if (aac_init_adapter(dev) == NULL)
420 goto error_irq; 439 goto error_irq;
421 /* 440 /*
@@ -438,6 +457,7 @@ error_kfree:
438 kfree(dev->queues); 457 kfree(dev->queues);
439 458
440error_irq: 459error_irq:
460 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
441 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 461 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
442 462
443error_iounmap: 463error_iounmap:
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 0680249ab861..3900abc5850d 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -82,6 +82,16 @@ static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
82} 82}
83 83
84/** 84/**
85 * aac_sa_disable_interrupt - disable interrupt
86 * @dev: Which adapter to enable.
87 */
88
89static void aac_sa_disable_interrupt (struct aac_dev *dev)
90{
91 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
92}
93
94/**
85 * aac_sa_notify_adapter - handle adapter notification 95 * aac_sa_notify_adapter - handle adapter notification
86 * @dev: Adapter that notification is for 96 * @dev: Adapter that notification is for
87 * @event: Event to notidy 97 * @event: Event to notidy
@@ -214,9 +224,8 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
214 224
215static void aac_sa_interrupt_adapter (struct aac_dev *dev) 225static void aac_sa_interrupt_adapter (struct aac_dev *dev)
216{ 226{
217 u32 ret;
218 sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, 227 sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
219 &ret, NULL, NULL, NULL, NULL); 228 NULL, NULL, NULL, NULL, NULL);
220} 229}
221 230
222/** 231/**
@@ -352,10 +361,18 @@ int aac_sa_init(struct aac_dev *dev)
352 */ 361 */
353 362
354 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; 363 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
364 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
355 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 365 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
356 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 366 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
357 dev->a_ops.adapter_check_health = aac_sa_check_health; 367 dev->a_ops.adapter_check_health = aac_sa_check_health;
358 368
369 /*
370 * First clear out all interrupts. Then enable the one's that
371 * we can handle.
372 */
373 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
374 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
375 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
359 376
360 if(aac_init_adapter(dev) == NULL) 377 if(aac_init_adapter(dev) == NULL)
361 goto error_irq; 378 goto error_irq;
@@ -381,6 +398,7 @@ error_kfree:
381 kfree(dev->queues); 398 kfree(dev->queues);
382 399
383error_irq: 400error_irq:
401 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
384 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 402 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
385 403
386error_iounmap: 404error_iounmap:
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 0fb93363eb22..37ec5411e325 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -9200,8 +9200,8 @@ asc_prt_scsi_cmnd(struct scsi_cmnd *s)
9200 (unsigned) s->serial_number, s->retries, s->allowed); 9200 (unsigned) s->serial_number, s->retries, s->allowed);
9201 9201
9202 printk( 9202 printk(
9203" timeout_per_command %d, timeout_total %d, timeout %d\n", 9203" timeout_per_command %d\n",
9204 s->timeout_per_command, s->timeout_total, s->timeout); 9204 s->timeout_per_command);
9205 9205
9206 printk( 9206 printk(
9207" scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n", 9207" scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n",
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 179c95c878ac..320df6cd3def 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -189,7 +189,6 @@ static void ahci_irq_clear(struct ata_port *ap);
189static void ahci_eng_timeout(struct ata_port *ap); 189static void ahci_eng_timeout(struct ata_port *ap);
190static int ahci_port_start(struct ata_port *ap); 190static int ahci_port_start(struct ata_port *ap);
191static void ahci_port_stop(struct ata_port *ap); 191static void ahci_port_stop(struct ata_port *ap);
192static void ahci_host_stop(struct ata_host_set *host_set);
193static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 192static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
194static void ahci_qc_prep(struct ata_queued_cmd *qc); 193static void ahci_qc_prep(struct ata_queued_cmd *qc);
195static u8 ahci_check_status(struct ata_port *ap); 194static u8 ahci_check_status(struct ata_port *ap);
@@ -242,7 +241,6 @@ static struct ata_port_operations ahci_ops = {
242 241
243 .port_start = ahci_port_start, 242 .port_start = ahci_port_start,
244 .port_stop = ahci_port_stop, 243 .port_stop = ahci_port_stop,
245 .host_stop = ahci_host_stop,
246}; 244};
247 245
248static struct ata_port_info ahci_port_info[] = { 246static struct ata_port_info ahci_port_info[] = {
@@ -252,7 +250,7 @@ static struct ata_port_info ahci_port_info[] = {
252 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 250 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
253 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 251 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
254 ATA_FLAG_PIO_DMA, 252 ATA_FLAG_PIO_DMA,
255 .pio_mask = 0x03, /* pio3-4 */ 253 .pio_mask = 0x1f, /* pio0-4 */
256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 254 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
257 .port_ops = &ahci_ops, 255 .port_ops = &ahci_ops,
258 }, 256 },
@@ -296,17 +294,9 @@ static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int
296 return base + 0x100 + (port * 0x80); 294 return base + 0x100 + (port * 0x80);
297} 295}
298 296
299static inline void *ahci_port_base (void *base, unsigned int port) 297static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
300{ 298{
301 return (void *) ahci_port_base_ul((unsigned long)base, port); 299 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
302}
303
304static void ahci_host_stop(struct ata_host_set *host_set)
305{
306 struct ahci_host_priv *hpriv = host_set->private_data;
307 kfree(hpriv);
308
309 ata_host_stop(host_set);
310} 300}
311 301
312static int ahci_port_start(struct ata_port *ap) 302static int ahci_port_start(struct ata_port *ap)
@@ -314,8 +304,9 @@ static int ahci_port_start(struct ata_port *ap)
314 struct device *dev = ap->host_set->dev; 304 struct device *dev = ap->host_set->dev;
315 struct ahci_host_priv *hpriv = ap->host_set->private_data; 305 struct ahci_host_priv *hpriv = ap->host_set->private_data;
316 struct ahci_port_priv *pp; 306 struct ahci_port_priv *pp;
317 void *mem, *mmio = ap->host_set->mmio_base; 307 void __iomem *mmio = ap->host_set->mmio_base;
318 void *port_mmio = ahci_port_base(mmio, ap->port_no); 308 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
309 void *mem;
319 dma_addr_t mem_dma; 310 dma_addr_t mem_dma;
320 311
321 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 312 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
@@ -383,8 +374,8 @@ static void ahci_port_stop(struct ata_port *ap)
383{ 374{
384 struct device *dev = ap->host_set->dev; 375 struct device *dev = ap->host_set->dev;
385 struct ahci_port_priv *pp = ap->private_data; 376 struct ahci_port_priv *pp = ap->private_data;
386 void *mmio = ap->host_set->mmio_base; 377 void __iomem *mmio = ap->host_set->mmio_base;
387 void *port_mmio = ahci_port_base(mmio, ap->port_no); 378 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
388 u32 tmp; 379 u32 tmp;
389 380
390 tmp = readl(port_mmio + PORT_CMD); 381 tmp = readl(port_mmio + PORT_CMD);
@@ -546,8 +537,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
546 537
547static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) 538static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
548{ 539{
549 void *mmio = ap->host_set->mmio_base; 540 void __iomem *mmio = ap->host_set->mmio_base;
550 void *port_mmio = ahci_port_base(mmio, ap->port_no); 541 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
551 u32 tmp; 542 u32 tmp;
552 int work; 543 int work;
553 544
@@ -595,8 +586,8 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
595static void ahci_eng_timeout(struct ata_port *ap) 586static void ahci_eng_timeout(struct ata_port *ap)
596{ 587{
597 struct ata_host_set *host_set = ap->host_set; 588 struct ata_host_set *host_set = ap->host_set;
598 void *mmio = host_set->mmio_base; 589 void __iomem *mmio = host_set->mmio_base;
599 void *port_mmio = ahci_port_base(mmio, ap->port_no); 590 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
600 struct ata_queued_cmd *qc; 591 struct ata_queued_cmd *qc;
601 unsigned long flags; 592 unsigned long flags;
602 593
@@ -626,8 +617,8 @@ static void ahci_eng_timeout(struct ata_port *ap)
626 617
627static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 618static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
628{ 619{
629 void *mmio = ap->host_set->mmio_base; 620 void __iomem *mmio = ap->host_set->mmio_base;
630 void *port_mmio = ahci_port_base(mmio, ap->port_no); 621 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
631 u32 status, serr, ci; 622 u32 status, serr, ci;
632 623
633 serr = readl(port_mmio + PORT_SCR_ERR); 624 serr = readl(port_mmio + PORT_SCR_ERR);
@@ -663,7 +654,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
663 struct ata_host_set *host_set = dev_instance; 654 struct ata_host_set *host_set = dev_instance;
664 struct ahci_host_priv *hpriv; 655 struct ahci_host_priv *hpriv;
665 unsigned int i, handled = 0; 656 unsigned int i, handled = 0;
666 void *mmio; 657 void __iomem *mmio;
667 u32 irq_stat, irq_ack = 0; 658 u32 irq_stat, irq_ack = 0;
668 659
669 VPRINTK("ENTER\n"); 660 VPRINTK("ENTER\n");
@@ -709,7 +700,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
709static int ahci_qc_issue(struct ata_queued_cmd *qc) 700static int ahci_qc_issue(struct ata_queued_cmd *qc)
710{ 701{
711 struct ata_port *ap = qc->ap; 702 struct ata_port *ap = qc->ap;
712 void *port_mmio = (void *) ap->ioaddr.cmd_addr; 703 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
713 704
714 writel(1, port_mmio + PORT_CMD_ISSUE); 705 writel(1, port_mmio + PORT_CMD_ISSUE);
715 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 706 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
@@ -894,7 +885,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
894{ 885{
895 struct ahci_host_priv *hpriv = probe_ent->private_data; 886 struct ahci_host_priv *hpriv = probe_ent->private_data;
896 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 887 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
897 void *mmio = probe_ent->mmio_base; 888 void __iomem *mmio = probe_ent->mmio_base;
898 u32 vers, cap, impl, speed; 889 u32 vers, cap, impl, speed;
899 const char *speed_s; 890 const char *speed_s;
900 u16 cc; 891 u16 cc;
@@ -967,7 +958,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
967 struct ata_probe_ent *probe_ent = NULL; 958 struct ata_probe_ent *probe_ent = NULL;
968 struct ahci_host_priv *hpriv; 959 struct ahci_host_priv *hpriv;
969 unsigned long base; 960 unsigned long base;
970 void *mmio_base; 961 void __iomem *mmio_base;
971 unsigned int board_idx = (unsigned int) ent->driver_data; 962 unsigned int board_idx = (unsigned int) ent->driver_data;
972 int have_msi, pci_dev_busy = 0; 963 int have_msi, pci_dev_busy = 0;
973 int rc; 964 int rc;
@@ -1004,8 +995,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1004 probe_ent->dev = pci_dev_to_dev(pdev); 995 probe_ent->dev = pci_dev_to_dev(pdev);
1005 INIT_LIST_HEAD(&probe_ent->node); 996 INIT_LIST_HEAD(&probe_ent->node);
1006 997
1007 mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR), 998 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1008 pci_resource_len(pdev, AHCI_PCI_BAR));
1009 if (mmio_base == NULL) { 999 if (mmio_base == NULL) {
1010 rc = -ENOMEM; 1000 rc = -ENOMEM;
1011 goto err_out_free_ent; 1001 goto err_out_free_ent;
@@ -1049,7 +1039,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1049err_out_hpriv: 1039err_out_hpriv:
1050 kfree(hpriv); 1040 kfree(hpriv);
1051err_out_iounmap: 1041err_out_iounmap:
1052 iounmap(mmio_base); 1042 pci_iounmap(pdev, mmio_base);
1053err_out_free_ent: 1043err_out_free_ent:
1054 kfree(probe_ent); 1044 kfree(probe_ent);
1055err_out_msi: 1045err_out_msi:
@@ -1089,7 +1079,8 @@ static void ahci_remove_one (struct pci_dev *pdev)
1089 scsi_host_put(ap->host); 1079 scsi_host_put(ap->host);
1090 } 1080 }
1091 1081
1092 host_set->ops->host_stop(host_set); 1082 kfree(hpriv);
1083 pci_iounmap(pdev, host_set->mmio_base);
1093 kfree(host_set); 1084 kfree(host_set);
1094 1085
1095 if (have_msi) 1086 if (have_msi)
@@ -1106,7 +1097,6 @@ static int __init ahci_init(void)
1106 return pci_module_init(&ahci_pci_driver); 1097 return pci_module_init(&ahci_pci_driver);
1107} 1098}
1108 1099
1109
1110static void __exit ahci_exit(void) 1100static void __exit ahci_exit(void)
1111{ 1101{
1112 pci_unregister_driver(&ahci_pci_driver); 1102 pci_unregister_driver(&ahci_pci_driver);
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index c2523a30a7f5..69ed77fcb71f 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -5,6 +5,7 @@
5config SCSI_AIC79XX 5config SCSI_AIC79XX
6 tristate "Adaptec AIC79xx U320 support" 6 tristate "Adaptec AIC79xx U320 support"
7 depends on PCI && SCSI 7 depends on PCI && SCSI
8 select SCSI_SPI_ATTRS
8 help 9 help
9 This driver supports all of Adaptec's Ultra 320 PCI-X 10 This driver supports all of Adaptec's Ultra 320 PCI-X
10 based SCSI controllers. 11 based SCSI controllers.
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 00f3bd1e181e..527efd36f5c1 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -126,7 +126,6 @@ aic7770_find_device(uint32_t id)
126int 126int
127aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) 127aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
128{ 128{
129 u_long l;
130 int error; 129 int error;
131 int have_seeprom; 130 int have_seeprom;
132 u_int hostconf; 131 u_int hostconf;
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index fd4b2f3eb0c2..653fb0b42aea 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1247,9 +1247,6 @@ struct ahd_softc {
1247 uint16_t user_tagenable;/* Tagged Queuing allowed */ 1247 uint16_t user_tagenable;/* Tagged Queuing allowed */
1248}; 1248};
1249 1249
1250TAILQ_HEAD(ahd_softc_tailq, ahd_softc);
1251extern struct ahd_softc_tailq ahd_tailq;
1252
1253/*************************** IO Cell Configuration ****************************/ 1250/*************************** IO Cell Configuration ****************************/
1254#define AHD_PRECOMP_SLEW_INDEX \ 1251#define AHD_PRECOMP_SLEW_INDEX \
1255 (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) 1252 (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0)
@@ -1374,8 +1371,6 @@ void ahd_enable_coalescing(struct ahd_softc *ahd,
1374void ahd_pause_and_flushwork(struct ahd_softc *ahd); 1371void ahd_pause_and_flushwork(struct ahd_softc *ahd);
1375int ahd_suspend(struct ahd_softc *ahd); 1372int ahd_suspend(struct ahd_softc *ahd);
1376int ahd_resume(struct ahd_softc *ahd); 1373int ahd_resume(struct ahd_softc *ahd);
1377void ahd_softc_insert(struct ahd_softc *);
1378struct ahd_softc *ahd_find_softc(struct ahd_softc *ahd);
1379void ahd_set_unit(struct ahd_softc *, int); 1374void ahd_set_unit(struct ahd_softc *, int);
1380void ahd_set_name(struct ahd_softc *, char *); 1375void ahd_set_name(struct ahd_softc *, char *);
1381struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); 1376struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
@@ -1524,7 +1519,6 @@ void ahd_print_scb(struct scb *scb);
1524void ahd_print_devinfo(struct ahd_softc *ahd, 1519void ahd_print_devinfo(struct ahd_softc *ahd,
1525 struct ahd_devinfo *devinfo); 1520 struct ahd_devinfo *devinfo);
1526void ahd_dump_sglist(struct scb *scb); 1521void ahd_dump_sglist(struct scb *scb);
1527void ahd_dump_all_cards_state(void);
1528void ahd_dump_card_state(struct ahd_softc *ahd); 1522void ahd_dump_card_state(struct ahd_softc *ahd);
1529int ahd_print_register(ahd_reg_parse_entry_t *table, 1523int ahd_print_register(ahd_reg_parse_entry_t *table,
1530 u_int num_entries, 1524 u_int num_entries,
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 137fb1a37dd1..4e8f00df978d 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,8 +52,6 @@
52#include <dev/aic7xxx/aicasm/aicasm_insformat.h> 52#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
53#endif 53#endif
54 54
55/******************************** Globals *************************************/
56struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
57 55
58/***************************** Lookup Tables **********************************/ 56/***************************** Lookup Tables **********************************/
59char *ahd_chip_names[] = 57char *ahd_chip_names[] =
@@ -5180,74 +5178,6 @@ ahd_softc_init(struct ahd_softc *ahd)
5180} 5178}
5181 5179
5182void 5180void
5183ahd_softc_insert(struct ahd_softc *ahd)
5184{
5185 struct ahd_softc *list_ahd;
5186
5187#if AHD_PCI_CONFIG > 0
5188 /*
5189 * Second Function PCI devices need to inherit some
5190 * settings from function 0.
5191 */
5192 if ((ahd->features & AHD_MULTI_FUNC) != 0) {
5193 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5194 ahd_dev_softc_t list_pci;
5195 ahd_dev_softc_t pci;
5196
5197 list_pci = list_ahd->dev_softc;
5198 pci = ahd->dev_softc;
5199 if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci)
5200 && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) {
5201 struct ahd_softc *master;
5202 struct ahd_softc *slave;
5203
5204 if (ahd_get_pci_function(list_pci) == 0) {
5205 master = list_ahd;
5206 slave = ahd;
5207 } else {
5208 master = ahd;
5209 slave = list_ahd;
5210 }
5211 slave->flags &= ~AHD_BIOS_ENABLED;
5212 slave->flags |=
5213 master->flags & AHD_BIOS_ENABLED;
5214 break;
5215 }
5216 }
5217 }
5218#endif
5219
5220 /*
5221 * Insertion sort into our list of softcs.
5222 */
5223 list_ahd = TAILQ_FIRST(&ahd_tailq);
5224 while (list_ahd != NULL
5225 && ahd_softc_comp(ahd, list_ahd) <= 0)
5226 list_ahd = TAILQ_NEXT(list_ahd, links);
5227 if (list_ahd != NULL)
5228 TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
5229 else
5230 TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
5231 ahd->init_level++;
5232}
5233
5234/*
5235 * Verify that the passed in softc pointer is for a
5236 * controller that is still configured.
5237 */
5238struct ahd_softc *
5239ahd_find_softc(struct ahd_softc *ahd)
5240{
5241 struct ahd_softc *list_ahd;
5242
5243 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5244 if (list_ahd == ahd)
5245 return (ahd);
5246 }
5247 return (NULL);
5248}
5249
5250void
5251ahd_set_unit(struct ahd_softc *ahd, int unit) 5181ahd_set_unit(struct ahd_softc *ahd, int unit)
5252{ 5182{
5253 ahd->unit = unit; 5183 ahd->unit = unit;
@@ -7902,18 +7832,10 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7902static void 7832static void
7903ahd_reset_poll(void *arg) 7833ahd_reset_poll(void *arg)
7904{ 7834{
7905 struct ahd_softc *ahd; 7835 struct ahd_softc *ahd = arg;
7906 u_int scsiseq1; 7836 u_int scsiseq1;
7907 u_long l;
7908 u_long s; 7837 u_long s;
7909 7838
7910 ahd_list_lock(&l);
7911 ahd = ahd_find_softc((struct ahd_softc *)arg);
7912 if (ahd == NULL) {
7913 printf("ahd_reset_poll: Instance %p no longer exists\n", arg);
7914 ahd_list_unlock(&l);
7915 return;
7916 }
7917 ahd_lock(ahd, &s); 7839 ahd_lock(ahd, &s);
7918 ahd_pause(ahd); 7840 ahd_pause(ahd);
7919 ahd_update_modes(ahd); 7841 ahd_update_modes(ahd);
@@ -7924,7 +7846,6 @@ ahd_reset_poll(void *arg)
7924 ahd_reset_poll, ahd); 7846 ahd_reset_poll, ahd);
7925 ahd_unpause(ahd); 7847 ahd_unpause(ahd);
7926 ahd_unlock(ahd, &s); 7848 ahd_unlock(ahd, &s);
7927 ahd_list_unlock(&l);
7928 return; 7849 return;
7929 } 7850 }
7930 7851
@@ -7936,25 +7857,16 @@ ahd_reset_poll(void *arg)
7936 ahd->flags &= ~AHD_RESET_POLL_ACTIVE; 7857 ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
7937 ahd_unlock(ahd, &s); 7858 ahd_unlock(ahd, &s);
7938 ahd_release_simq(ahd); 7859 ahd_release_simq(ahd);
7939 ahd_list_unlock(&l);
7940} 7860}
7941 7861
7942/**************************** Statistics Processing ***************************/ 7862/**************************** Statistics Processing ***************************/
7943static void 7863static void
7944ahd_stat_timer(void *arg) 7864ahd_stat_timer(void *arg)
7945{ 7865{
7946 struct ahd_softc *ahd; 7866 struct ahd_softc *ahd = arg;
7947 u_long l;
7948 u_long s; 7867 u_long s;
7949 int enint_coal; 7868 int enint_coal;
7950 7869
7951 ahd_list_lock(&l);
7952 ahd = ahd_find_softc((struct ahd_softc *)arg);
7953 if (ahd == NULL) {
7954 printf("ahd_stat_timer: Instance %p no longer exists\n", arg);
7955 ahd_list_unlock(&l);
7956 return;
7957 }
7958 ahd_lock(ahd, &s); 7870 ahd_lock(ahd, &s);
7959 7871
7960 enint_coal = ahd->hs_mailbox & ENINT_COALESCE; 7872 enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
@@ -7981,7 +7893,6 @@ ahd_stat_timer(void *arg)
7981 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 7893 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
7982 ahd_stat_timer, ahd); 7894 ahd_stat_timer, ahd);
7983 ahd_unlock(ahd, &s); 7895 ahd_unlock(ahd, &s);
7984 ahd_list_unlock(&l);
7985} 7896}
7986 7897
7987/****************************** Status Processing *****************************/ 7898/****************************** Status Processing *****************************/
@@ -8745,16 +8656,6 @@ sized:
8745 return (last_probe); 8656 return (last_probe);
8746} 8657}
8747 8658
8748void
8749ahd_dump_all_cards_state(void)
8750{
8751 struct ahd_softc *list_ahd;
8752
8753 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
8754 ahd_dump_card_state(list_ahd);
8755 }
8756}
8757
8758int 8659int
8759ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, 8660ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
8760 const char *name, u_int address, u_int value, 8661 const char *name, u_int address, u_int value,
@@ -9039,7 +8940,6 @@ ahd_dump_card_state(struct ahd_softc *ahd)
9039 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 8940 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
9040 } 8941 }
9041 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 8942 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
9042 ahd_platform_dump_card_state(ahd);
9043 ahd_restore_modes(ahd, saved_modes); 8943 ahd_restore_modes(ahd, saved_modes);
9044 if (paused == 0) 8944 if (paused == 0)
9045 ahd_unpause(ahd); 8945 ahd_unpause(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 329cb2331339..6b6d4e287793 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -46,32 +46,14 @@
46#include "aic79xx_inline.h" 46#include "aic79xx_inline.h"
47#include <scsi/scsicam.h> 47#include <scsi/scsicam.h>
48 48
49/* 49static struct scsi_transport_template *ahd_linux_transport_template = NULL;
50 * Include aiclib.c as part of our
51 * "module dependencies are hard" work around.
52 */
53#include "aiclib.c"
54 50
55#include <linux/init.h> /* __setup */ 51#include <linux/init.h> /* __setup */
56
57#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
58#include "sd.h" /* For geometry detection */
59#endif
60
61#include <linux/mm.h> /* For fetching system memory size */ 52#include <linux/mm.h> /* For fetching system memory size */
53#include <linux/blkdev.h> /* For block_size() */
62#include <linux/delay.h> /* For ssleep/msleep */ 54#include <linux/delay.h> /* For ssleep/msleep */
63 55
64/* 56/*
65 * Lock protecting manipulation of the ahd softc list.
66 */
67spinlock_t ahd_list_spinlock;
68
69#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
70/* For dynamic sglist size calculation. */
71u_int ahd_linux_nseg;
72#endif
73
74/*
75 * Bucket size for counting good commands in between bad ones. 57 * Bucket size for counting good commands in between bad ones.
76 */ 58 */
77#define AHD_LINUX_ERR_THRESH 1000 59#define AHD_LINUX_ERR_THRESH 1000
@@ -188,71 +170,6 @@ static adapter_tag_info_t aic79xx_tag_info[] =
188}; 170};
189 171
190/* 172/*
191 * By default, read streaming is disabled. In theory,
192 * read streaming should enhance performance, but early
193 * U320 drive firmware actually performs slower with
194 * read streaming enabled.
195 */
196#ifdef CONFIG_AIC79XX_ENABLE_RD_STRM
197#define AIC79XX_CONFIGED_RD_STRM 0xFFFF
198#else
199#define AIC79XX_CONFIGED_RD_STRM 0
200#endif
201
202static uint16_t aic79xx_rd_strm_info[] =
203{
204 AIC79XX_CONFIGED_RD_STRM,
205 AIC79XX_CONFIGED_RD_STRM,
206 AIC79XX_CONFIGED_RD_STRM,
207 AIC79XX_CONFIGED_RD_STRM,
208 AIC79XX_CONFIGED_RD_STRM,
209 AIC79XX_CONFIGED_RD_STRM,
210 AIC79XX_CONFIGED_RD_STRM,
211 AIC79XX_CONFIGED_RD_STRM,
212 AIC79XX_CONFIGED_RD_STRM,
213 AIC79XX_CONFIGED_RD_STRM,
214 AIC79XX_CONFIGED_RD_STRM,
215 AIC79XX_CONFIGED_RD_STRM,
216 AIC79XX_CONFIGED_RD_STRM,
217 AIC79XX_CONFIGED_RD_STRM,
218 AIC79XX_CONFIGED_RD_STRM,
219 AIC79XX_CONFIGED_RD_STRM
220};
221
222/*
223 * DV option:
224 *
225 * positive value = DV Enabled
226 * zero = DV Disabled
227 * negative value = DV Default for adapter type/seeprom
228 */
229#ifdef CONFIG_AIC79XX_DV_SETTING
230#define AIC79XX_CONFIGED_DV CONFIG_AIC79XX_DV_SETTING
231#else
232#define AIC79XX_CONFIGED_DV -1
233#endif
234
235static int8_t aic79xx_dv_settings[] =
236{
237 AIC79XX_CONFIGED_DV,
238 AIC79XX_CONFIGED_DV,
239 AIC79XX_CONFIGED_DV,
240 AIC79XX_CONFIGED_DV,
241 AIC79XX_CONFIGED_DV,
242 AIC79XX_CONFIGED_DV,
243 AIC79XX_CONFIGED_DV,
244 AIC79XX_CONFIGED_DV,
245 AIC79XX_CONFIGED_DV,
246 AIC79XX_CONFIGED_DV,
247 AIC79XX_CONFIGED_DV,
248 AIC79XX_CONFIGED_DV,
249 AIC79XX_CONFIGED_DV,
250 AIC79XX_CONFIGED_DV,
251 AIC79XX_CONFIGED_DV,
252 AIC79XX_CONFIGED_DV
253};
254
255/*
256 * The I/O cell on the chip is very configurable in respect to its analog 173 * The I/O cell on the chip is very configurable in respect to its analog
257 * characteristics. Set the defaults here; they can be overriden with 174 * characteristics. Set the defaults here; they can be overriden with
258 * the proper insmod parameters. 175 * the proper insmod parameters.
@@ -375,13 +292,6 @@ static uint32_t aic79xx_pci_parity = ~0;
375uint32_t aic79xx_allow_memio = ~0; 292uint32_t aic79xx_allow_memio = ~0;
376 293
377/* 294/*
378 * aic79xx_detect() has been run, so register all device arrivals
379 * immediately with the system rather than deferring to the sorted
380 * attachment performed by aic79xx_detect().
381 */
382int aic79xx_detect_complete;
383
384/*
385 * So that we can set how long each device is given as a selection timeout. 295 * So that we can set how long each device is given as a selection timeout.
386 * The table of values goes like this: 296 * The table of values goes like this:
387 * 0 - 256ms 297 * 0 - 256ms
@@ -412,7 +322,7 @@ MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
412MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver"); 322MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver");
413MODULE_LICENSE("Dual BSD/GPL"); 323MODULE_LICENSE("Dual BSD/GPL");
414MODULE_VERSION(AIC79XX_DRIVER_VERSION); 324MODULE_VERSION(AIC79XX_DRIVER_VERSION);
415module_param(aic79xx, charp, 0); 325module_param(aic79xx, charp, 0444);
416MODULE_PARM_DESC(aic79xx, 326MODULE_PARM_DESC(aic79xx,
417"period delimited, options string.\n" 327"period delimited, options string.\n"
418" verbose Enable verbose/diagnostic logging\n" 328" verbose Enable verbose/diagnostic logging\n"
@@ -427,8 +337,6 @@ MODULE_PARM_DESC(aic79xx,
427" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" 337" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
428" tag_info:<tag_str> Set per-target tag depth\n" 338" tag_info:<tag_str> Set per-target tag depth\n"
429" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 339" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
430" rd_strm:<rd_strm_masks> Set per-target read streaming setting.\n"
431" dv:<dv_settings> Set per-controller Domain Validation Setting.\n"
432" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 340" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
433" precomp:<pcomp_list> Set the signal precompensation (0-7).\n" 341" precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
434" amplitude:<int> Set the signal amplitude (0-7).\n" 342" amplitude:<int> Set the signal amplitude (0-7).\n"
@@ -441,249 +349,35 @@ MODULE_PARM_DESC(aic79xx,
441" Shorten the selection timeout to 128ms\n" 349" Shorten the selection timeout to 128ms\n"
442"\n" 350"\n"
443" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 351" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
444"\n" 352"\n");
445" Sample /etc/modprobe.conf line:\n"
446" Change Read Streaming for Controller's 2 and 3\n"
447"\n"
448" options aic79xx 'aic79xx=rd_strm:{..0xFFF0.0xC0F0}'");
449 353
450static void ahd_linux_handle_scsi_status(struct ahd_softc *, 354static void ahd_linux_handle_scsi_status(struct ahd_softc *,
451 struct ahd_linux_device *, 355 struct scsi_device *,
452 struct scb *); 356 struct scb *);
453static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, 357static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
454 Scsi_Cmnd *cmd); 358 struct scsi_cmnd *cmd);
455static void ahd_linux_filter_inquiry(struct ahd_softc *ahd,
456 struct ahd_devinfo *devinfo);
457static void ahd_linux_dev_timed_unfreeze(u_long arg);
458static void ahd_linux_sem_timeout(u_long arg); 359static void ahd_linux_sem_timeout(u_long arg);
360static int ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
459static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd); 361static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
460static void ahd_linux_size_nseg(void);
461static void ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd);
462static void ahd_linux_start_dv(struct ahd_softc *ahd);
463static void ahd_linux_dv_timeout(struct scsi_cmnd *cmd);
464static int ahd_linux_dv_thread(void *data);
465static void ahd_linux_kill_dv_thread(struct ahd_softc *ahd);
466static void ahd_linux_dv_target(struct ahd_softc *ahd, u_int target);
467static void ahd_linux_dv_transition(struct ahd_softc *ahd,
468 struct scsi_cmnd *cmd,
469 struct ahd_devinfo *devinfo,
470 struct ahd_linux_target *targ);
471static void ahd_linux_dv_fill_cmd(struct ahd_softc *ahd,
472 struct scsi_cmnd *cmd,
473 struct ahd_devinfo *devinfo);
474static void ahd_linux_dv_inq(struct ahd_softc *ahd,
475 struct scsi_cmnd *cmd,
476 struct ahd_devinfo *devinfo,
477 struct ahd_linux_target *targ,
478 u_int request_length);
479static void ahd_linux_dv_tur(struct ahd_softc *ahd,
480 struct scsi_cmnd *cmd,
481 struct ahd_devinfo *devinfo);
482static void ahd_linux_dv_rebd(struct ahd_softc *ahd,
483 struct scsi_cmnd *cmd,
484 struct ahd_devinfo *devinfo,
485 struct ahd_linux_target *targ);
486static void ahd_linux_dv_web(struct ahd_softc *ahd,
487 struct scsi_cmnd *cmd,
488 struct ahd_devinfo *devinfo,
489 struct ahd_linux_target *targ);
490static void ahd_linux_dv_reb(struct ahd_softc *ahd,
491 struct scsi_cmnd *cmd,
492 struct ahd_devinfo *devinfo,
493 struct ahd_linux_target *targ);
494static void ahd_linux_dv_su(struct ahd_softc *ahd,
495 struct scsi_cmnd *cmd,
496 struct ahd_devinfo *devinfo,
497 struct ahd_linux_target *targ);
498static int ahd_linux_fallback(struct ahd_softc *ahd,
499 struct ahd_devinfo *devinfo);
500static __inline int ahd_linux_dv_fallback(struct ahd_softc *ahd,
501 struct ahd_devinfo *devinfo);
502static void ahd_linux_dv_complete(Scsi_Cmnd *cmd);
503static void ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ);
504static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd, 362static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
505 struct ahd_devinfo *devinfo); 363 struct ahd_devinfo *devinfo);
506static u_int ahd_linux_user_dv_setting(struct ahd_softc *ahd); 364static void ahd_linux_device_queue_depth(struct scsi_device *);
507static void ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd); 365static int ahd_linux_run_command(struct ahd_softc*,
508static void ahd_linux_device_queue_depth(struct ahd_softc *ahd, 366 struct ahd_linux_device *,
509 struct ahd_linux_device *dev); 367 struct scsi_cmnd *);
510static struct ahd_linux_target* ahd_linux_alloc_target(struct ahd_softc*,
511 u_int, u_int);
512static void ahd_linux_free_target(struct ahd_softc*,
513 struct ahd_linux_target*);
514static struct ahd_linux_device* ahd_linux_alloc_device(struct ahd_softc*,
515 struct ahd_linux_target*,
516 u_int);
517static void ahd_linux_free_device(struct ahd_softc*,
518 struct ahd_linux_device*);
519static void ahd_linux_run_device_queue(struct ahd_softc*,
520 struct ahd_linux_device*);
521static void ahd_linux_setup_tag_info_global(char *p); 368static void ahd_linux_setup_tag_info_global(char *p);
522static aic_option_callback_t ahd_linux_setup_tag_info; 369static int aic79xx_setup(char *c);
523static aic_option_callback_t ahd_linux_setup_rd_strm_info;
524static aic_option_callback_t ahd_linux_setup_dv;
525static aic_option_callback_t ahd_linux_setup_iocell_info;
526static int ahd_linux_next_unit(void);
527static void ahd_runq_tasklet(unsigned long data);
528static int aic79xx_setup(char *c);
529
530/****************************** Inlines ***************************************/
531static __inline void ahd_schedule_completeq(struct ahd_softc *ahd);
532static __inline void ahd_schedule_runq(struct ahd_softc *ahd);
533static __inline void ahd_setup_runq_tasklet(struct ahd_softc *ahd);
534static __inline void ahd_teardown_runq_tasklet(struct ahd_softc *ahd);
535static __inline struct ahd_linux_device*
536 ahd_linux_get_device(struct ahd_softc *ahd, u_int channel,
537 u_int target, u_int lun, int alloc);
538static struct ahd_cmd *ahd_linux_run_complete_queue(struct ahd_softc *ahd);
539static __inline void ahd_linux_check_device_queue(struct ahd_softc *ahd,
540 struct ahd_linux_device *dev);
541static __inline struct ahd_linux_device *
542 ahd_linux_next_device_to_run(struct ahd_softc *ahd);
543static __inline void ahd_linux_run_device_queues(struct ahd_softc *ahd);
544static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
545
546static __inline void
547ahd_schedule_completeq(struct ahd_softc *ahd)
548{
549 if ((ahd->platform_data->flags & AHD_RUN_CMPLT_Q_TIMER) == 0) {
550 ahd->platform_data->flags |= AHD_RUN_CMPLT_Q_TIMER;
551 ahd->platform_data->completeq_timer.expires = jiffies;
552 add_timer(&ahd->platform_data->completeq_timer);
553 }
554}
555
556/*
557 * Must be called with our lock held.
558 */
559static __inline void
560ahd_schedule_runq(struct ahd_softc *ahd)
561{
562 tasklet_schedule(&ahd->platform_data->runq_tasklet);
563}
564
565static __inline
566void ahd_setup_runq_tasklet(struct ahd_softc *ahd)
567{
568 tasklet_init(&ahd->platform_data->runq_tasklet, ahd_runq_tasklet,
569 (unsigned long)ahd);
570}
571 370
572static __inline void 371static int ahd_linux_unit;
573ahd_teardown_runq_tasklet(struct ahd_softc *ahd)
574{
575 tasklet_kill(&ahd->platform_data->runq_tasklet);
576}
577 372
578static __inline struct ahd_linux_device*
579ahd_linux_get_device(struct ahd_softc *ahd, u_int channel, u_int target,
580 u_int lun, int alloc)
581{
582 struct ahd_linux_target *targ;
583 struct ahd_linux_device *dev;
584 u_int target_offset;
585 373
586 target_offset = target; 374/****************************** Inlines ***************************************/
587 if (channel != 0) 375static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
588 target_offset += 8;
589 targ = ahd->platform_data->targets[target_offset];
590 if (targ == NULL) {
591 if (alloc != 0) {
592 targ = ahd_linux_alloc_target(ahd, channel, target);
593 if (targ == NULL)
594 return (NULL);
595 } else
596 return (NULL);
597 }
598 dev = targ->devices[lun];
599 if (dev == NULL && alloc != 0)
600 dev = ahd_linux_alloc_device(ahd, targ, lun);
601 return (dev);
602}
603
604#define AHD_LINUX_MAX_RETURNED_ERRORS 4
605static struct ahd_cmd *
606ahd_linux_run_complete_queue(struct ahd_softc *ahd)
607{
608 struct ahd_cmd *acmd;
609 u_long done_flags;
610 int with_errors;
611
612 with_errors = 0;
613 ahd_done_lock(ahd, &done_flags);
614 while ((acmd = TAILQ_FIRST(&ahd->platform_data->completeq)) != NULL) {
615 Scsi_Cmnd *cmd;
616
617 if (with_errors > AHD_LINUX_MAX_RETURNED_ERRORS) {
618 /*
619 * Linux uses stack recursion to requeue
620 * commands that need to be retried. Avoid
621 * blowing out the stack by "spoon feeding"
622 * commands that completed with error back
623 * the operating system in case they are going
624 * to be retried. "ick"
625 */
626 ahd_schedule_completeq(ahd);
627 break;
628 }
629 TAILQ_REMOVE(&ahd->platform_data->completeq,
630 acmd, acmd_links.tqe);
631 cmd = &acmd_scsi_cmd(acmd);
632 cmd->host_scribble = NULL;
633 if (ahd_cmd_get_transaction_status(cmd) != DID_OK
634 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
635 with_errors++;
636
637 cmd->scsi_done(cmd);
638 }
639 ahd_done_unlock(ahd, &done_flags);
640 return (acmd);
641}
642
643static __inline void
644ahd_linux_check_device_queue(struct ahd_softc *ahd,
645 struct ahd_linux_device *dev)
646{
647 if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) != 0
648 && dev->active == 0) {
649 dev->flags &= ~AHD_DEV_FREEZE_TIL_EMPTY;
650 dev->qfrozen--;
651 }
652
653 if (TAILQ_FIRST(&dev->busyq) == NULL
654 || dev->openings == 0 || dev->qfrozen != 0)
655 return;
656
657 ahd_linux_run_device_queue(ahd, dev);
658}
659
660static __inline struct ahd_linux_device *
661ahd_linux_next_device_to_run(struct ahd_softc *ahd)
662{
663
664 if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0
665 || (ahd->platform_data->qfrozen != 0
666 && AHD_DV_SIMQ_FROZEN(ahd) == 0))
667 return (NULL);
668 return (TAILQ_FIRST(&ahd->platform_data->device_runq));
669}
670
671static __inline void
672ahd_linux_run_device_queues(struct ahd_softc *ahd)
673{
674 struct ahd_linux_device *dev;
675
676 while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
677 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
678 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
679 ahd_linux_check_device_queue(ahd, dev);
680 }
681}
682 376
683static __inline void 377static __inline void
684ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 378ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
685{ 379{
686 Scsi_Cmnd *cmd; 380 struct scsi_cmnd *cmd;
687 int direction; 381 int direction;
688 382
689 cmd = scb->io_ctx; 383 cmd = scb->io_ctx;
@@ -705,197 +399,6 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
705#define BUILD_SCSIID(ahd, cmd) \ 399#define BUILD_SCSIID(ahd, cmd) \
706 ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id) 400 ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
707 401
708/************************ Host template entry points *************************/
709static int ahd_linux_detect(Scsi_Host_Template *);
710static const char *ahd_linux_info(struct Scsi_Host *);
711static int ahd_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
712#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
713static int ahd_linux_slave_alloc(Scsi_Device *);
714static int ahd_linux_slave_configure(Scsi_Device *);
715static void ahd_linux_slave_destroy(Scsi_Device *);
716#if defined(__i386__)
717static int ahd_linux_biosparam(struct scsi_device*,
718 struct block_device*, sector_t, int[]);
719#endif
720#else
721static int ahd_linux_release(struct Scsi_Host *);
722static void ahd_linux_select_queue_depth(struct Scsi_Host *host,
723 Scsi_Device *scsi_devs);
724#if defined(__i386__)
725static int ahd_linux_biosparam(Disk *, kdev_t, int[]);
726#endif
727#endif
728static int ahd_linux_bus_reset(Scsi_Cmnd *);
729static int ahd_linux_dev_reset(Scsi_Cmnd *);
730static int ahd_linux_abort(Scsi_Cmnd *);
731
732/*
733 * Calculate a safe value for AHD_NSEG (as expressed through ahd_linux_nseg).
734 *
735 * In pre-2.5.X...
736 * The midlayer allocates an S/G array dynamically when a command is issued
737 * using SCSI malloc. This array, which is in an OS dependent format that
738 * must later be copied to our private S/G list, is sized to house just the
739 * number of segments needed for the current transfer. Since the code that
740 * sizes the SCSI malloc pool does not take into consideration fragmentation
741 * of the pool, executing transactions numbering just a fraction of our
742 * concurrent transaction limit with SG list lengths aproaching AHC_NSEG will
743 * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
744 * mid-layer does not properly handle this scsi malloc failures for the S/G
745 * array and the result can be a lockup of the I/O subsystem. We try to size
746 * our S/G list so that it satisfies our drivers allocation requirements in
747 * addition to avoiding fragmentation of the SCSI malloc pool.
748 */
749static void
750ahd_linux_size_nseg(void)
751{
752#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
753 u_int cur_size;
754 u_int best_size;
755
756 /*
757 * The SCSI allocator rounds to the nearest 512 bytes
758 * an cannot allocate across a page boundary. Our algorithm
759 * is to start at 1K of scsi malloc space per-command and
760 * loop through all factors of the PAGE_SIZE and pick the best.
761 */
762 best_size = 0;
763 for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
764 u_int nseg;
765
766 nseg = cur_size / sizeof(struct scatterlist);
767 if (nseg < AHD_LINUX_MIN_NSEG)
768 continue;
769
770 if (best_size == 0) {
771 best_size = cur_size;
772 ahd_linux_nseg = nseg;
773 } else {
774 u_int best_rem;
775 u_int cur_rem;
776
777 /*
778 * Compare the traits of the current "best_size"
779 * with the current size to determine if the
780 * current size is a better size.
781 */
782 best_rem = best_size % sizeof(struct scatterlist);
783 cur_rem = cur_size % sizeof(struct scatterlist);
784 if (cur_rem < best_rem) {
785 best_size = cur_size;
786 ahd_linux_nseg = nseg;
787 }
788 }
789 }
790#endif
791}
792
793/*
794 * Try to detect an Adaptec 79XX controller.
795 */
796static int
797ahd_linux_detect(Scsi_Host_Template *template)
798{
799 struct ahd_softc *ahd;
800 int found;
801 int error = 0;
802
803#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
804 /*
805 * It is a bug that the upper layer takes
806 * this lock just prior to calling us.
807 */
808 spin_unlock_irq(&io_request_lock);
809#endif
810
811 /*
812 * Sanity checking of Linux SCSI data structures so
813 * that some of our hacks^H^H^H^H^Hassumptions aren't
814 * violated.
815 */
816 if (offsetof(struct ahd_cmd_internal, end)
817 > offsetof(struct scsi_cmnd, host_scribble)) {
818 printf("ahd_linux_detect: SCSI data structures changed.\n");
819 printf("ahd_linux_detect: Unable to attach\n");
820 return (0);
821 }
822 /*
823 * Determine an appropriate size for our Scatter Gatther lists.
824 */
825 ahd_linux_size_nseg();
826#ifdef MODULE
827 /*
828 * If we've been passed any parameters, process them now.
829 */
830 if (aic79xx)
831 aic79xx_setup(aic79xx);
832#endif
833
834 template->proc_name = "aic79xx";
835
836 /*
837 * Initialize our softc list lock prior to
838 * probing for any adapters.
839 */
840 ahd_list_lockinit();
841
842#ifdef CONFIG_PCI
843 error = ahd_linux_pci_init();
844 if (error)
845 return error;
846#endif
847
848 /*
849 * Register with the SCSI layer all
850 * controllers we've found.
851 */
852 found = 0;
853 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
854
855 if (ahd_linux_register_host(ahd, template) == 0)
856 found++;
857 }
858#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
859 spin_lock_irq(&io_request_lock);
860#endif
861 aic79xx_detect_complete++;
862 return 0;
863}
864
865#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
866/*
867 * Free the passed in Scsi_Host memory structures prior to unloading the
868 * module.
869 */
870static int
871ahd_linux_release(struct Scsi_Host * host)
872{
873 struct ahd_softc *ahd;
874 u_long l;
875
876 ahd_list_lock(&l);
877 if (host != NULL) {
878
879 /*
880 * We should be able to just perform
881 * the free directly, but check our
882 * list for extra sanity.
883 */
884 ahd = ahd_find_softc(*(struct ahd_softc **)host->hostdata);
885 if (ahd != NULL) {
886 u_long s;
887
888 ahd_lock(ahd, &s);
889 ahd_intr_enable(ahd, FALSE);
890 ahd_unlock(ahd, &s);
891 ahd_free(ahd);
892 }
893 }
894 ahd_list_unlock(&l);
895 return (0);
896}
897#endif
898
899/* 402/*
900 * Return a string describing the driver. 403 * Return a string describing the driver.
901 */ 404 */
@@ -928,220 +431,177 @@ ahd_linux_info(struct Scsi_Host *host)
928 * Queue an SCB to the controller. 431 * Queue an SCB to the controller.
929 */ 432 */
930static int 433static int
931ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)) 434ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
932{ 435{
933 struct ahd_softc *ahd; 436 struct ahd_softc *ahd;
934 struct ahd_linux_device *dev; 437 struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
935 u_long flags;
936 438
937 ahd = *(struct ahd_softc **)cmd->device->host->hostdata; 439 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
938 440
939 /* 441 /*
940 * Save the callback on completion function.
941 */
942 cmd->scsi_done = scsi_done;
943
944 ahd_midlayer_entrypoint_lock(ahd, &flags);
945
946 /*
947 * Close the race of a command that was in the process of 442 * Close the race of a command that was in the process of
948 * being queued to us just as our simq was frozen. Let 443 * being queued to us just as our simq was frozen. Let
949 * DV commands through so long as we are only frozen to 444 * DV commands through so long as we are only frozen to
950 * perform DV. 445 * perform DV.
951 */ 446 */
952 if (ahd->platform_data->qfrozen != 0 447 if (ahd->platform_data->qfrozen != 0) {
953 && AHD_DV_CMD(cmd) == 0) { 448 printf("%s: queue frozen\n", ahd_name(ahd));
954 449
955 ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ); 450 return SCSI_MLQUEUE_HOST_BUSY;
956 ahd_linux_queue_cmd_complete(ahd, cmd);
957 ahd_schedule_completeq(ahd);
958 ahd_midlayer_entrypoint_unlock(ahd, &flags);
959 return (0);
960 }
961 dev = ahd_linux_get_device(ahd, cmd->device->channel,
962 cmd->device->id, cmd->device->lun,
963 /*alloc*/TRUE);
964 if (dev == NULL) {
965 ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
966 ahd_linux_queue_cmd_complete(ahd, cmd);
967 ahd_schedule_completeq(ahd);
968 ahd_midlayer_entrypoint_unlock(ahd, &flags);
969 printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
970 ahd_name(ahd));
971 return (0);
972 } 451 }
973 if (cmd->cmd_len > MAX_CDB_LEN) 452
974 return (-EINVAL); 453 /*
454 * Save the callback on completion function.
455 */
456 cmd->scsi_done = scsi_done;
457
975 cmd->result = CAM_REQ_INPROG << 16; 458 cmd->result = CAM_REQ_INPROG << 16;
976 TAILQ_INSERT_TAIL(&dev->busyq, (struct ahd_cmd *)cmd, acmd_links.tqe); 459
977 if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) { 460 return ahd_linux_run_command(ahd, dev, cmd);
978 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links); 461}
979 dev->flags |= AHD_DEV_ON_RUN_LIST; 462
980 ahd_linux_run_device_queues(ahd); 463static inline struct scsi_target **
981 } 464ahd_linux_target_in_softc(struct scsi_target *starget)
982 ahd_midlayer_entrypoint_unlock(ahd, &flags); 465{
983 return (0); 466 struct ahd_softc *ahd =
467 *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
468 unsigned int target_offset;
469
470 target_offset = starget->id;
471 if (starget->channel != 0)
472 target_offset += 8;
473
474 return &ahd->platform_data->starget[target_offset];
984} 475}
985 476
986#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
987static int 477static int
988ahd_linux_slave_alloc(Scsi_Device *device) 478ahd_linux_target_alloc(struct scsi_target *starget)
989{ 479{
990 struct ahd_softc *ahd; 480 struct ahd_softc *ahd =
481 *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
482 unsigned long flags;
483 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
484 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
485 struct ahd_devinfo devinfo;
486 struct ahd_initiator_tinfo *tinfo;
487 struct ahd_tmode_tstate *tstate;
488 char channel = starget->channel + 'A';
991 489
992 ahd = *((struct ahd_softc **)device->host->hostdata); 490 ahd_lock(ahd, &flags);
993 if (bootverbose) 491
994 printf("%s: Slave Alloc %d\n", ahd_name(ahd), device->id); 492 BUG_ON(*ahd_targp != NULL);
995 return (0); 493
494 *ahd_targp = starget;
495 memset(targ, 0, sizeof(*targ));
496
497 tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
498 starget->id, &tstate);
499 ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id,
500 CAM_LUN_WILDCARD, channel,
501 ROLE_INITIATOR);
502 spi_min_period(starget) = AHD_SYNCRATE_MAX; /* We can do U320 */
503 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
504 spi_max_offset(starget) = MAX_OFFSET_PACED_BUG;
505 else
506 spi_max_offset(starget) = MAX_OFFSET_PACED;
507 spi_max_width(starget) = ahd->features & AHD_WIDE;
508
509 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
510 AHD_TRANS_GOAL, /*paused*/FALSE);
511 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
512 AHD_TRANS_GOAL, /*paused*/FALSE);
513 ahd_unlock(ahd, &flags);
514
515 return 0;
516}
517
518static void
519ahd_linux_target_destroy(struct scsi_target *starget)
520{
521 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
522
523 *ahd_targp = NULL;
996} 524}
997 525
998static int 526static int
999ahd_linux_slave_configure(Scsi_Device *device) 527ahd_linux_slave_alloc(struct scsi_device *sdev)
1000{ 528{
1001 struct ahd_softc *ahd; 529 struct ahd_softc *ahd =
1002 struct ahd_linux_device *dev; 530 *((struct ahd_softc **)sdev->host->hostdata);
1003 u_long flags; 531 struct scsi_target *starget = sdev->sdev_target;
532 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
533 struct ahd_linux_device *dev;
1004 534
1005 ahd = *((struct ahd_softc **)device->host->hostdata);
1006 if (bootverbose) 535 if (bootverbose)
1007 printf("%s: Slave Configure %d\n", ahd_name(ahd), device->id); 536 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
1008 ahd_midlayer_entrypoint_lock(ahd, &flags); 537
538 BUG_ON(targ->sdev[sdev->lun] != NULL);
539
540 dev = scsi_transport_device_data(sdev);
541 memset(dev, 0, sizeof(*dev));
542
1009 /* 543 /*
1010 * Since Linux has attached to the device, configure 544 * We start out life using untagged
1011 * it so we don't free and allocate the device 545 * transactions of which we allow one.
1012 * structure on every command.
1013 */ 546 */
1014 dev = ahd_linux_get_device(ahd, device->channel, 547 dev->openings = 1;
1015 device->id, device->lun, 548
1016 /*alloc*/TRUE); 549 /*
1017 if (dev != NULL) { 550 * Set maxtags to 0. This will be changed if we
1018 dev->flags &= ~AHD_DEV_UNCONFIGURED; 551 * later determine that we are dealing with
1019 dev->flags |= AHD_DEV_SLAVE_CONFIGURED; 552 * a tagged queuing capable device.
1020 dev->scsi_device = device; 553 */
1021 ahd_linux_device_queue_depth(ahd, dev); 554 dev->maxtags = 0;
1022 } 555
1023 ahd_midlayer_entrypoint_unlock(ahd, &flags); 556 targ->sdev[sdev->lun] = sdev;
557
1024 return (0); 558 return (0);
1025} 559}
1026 560
1027static void 561static int
1028ahd_linux_slave_destroy(Scsi_Device *device) 562ahd_linux_slave_configure(struct scsi_device *sdev)
1029{ 563{
1030 struct ahd_softc *ahd; 564 struct ahd_softc *ahd;
1031 struct ahd_linux_device *dev;
1032 u_long flags;
1033 565
1034 ahd = *((struct ahd_softc **)device->host->hostdata); 566 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1035 if (bootverbose) 567 if (bootverbose)
1036 printf("%s: Slave Destroy %d\n", ahd_name(ahd), device->id); 568 printf("%s: Slave Configure %d\n", ahd_name(ahd), sdev->id);
1037 ahd_midlayer_entrypoint_lock(ahd, &flags);
1038 dev = ahd_linux_get_device(ahd, device->channel,
1039 device->id, device->lun,
1040 /*alloc*/FALSE);
1041 569
1042 /* 570 ahd_linux_device_queue_depth(sdev);
1043 * Filter out "silly" deletions of real devices by only 571
1044 * deleting devices that have had slave_configure() 572 /* Initial Domain Validation */
1045 * called on them. All other devices that have not 573 if (!spi_initial_dv(sdev->sdev_target))
1046 * been configured will automatically be deleted by 574 spi_dv_device(sdev);
1047 * the refcounting process. 575
1048 */ 576 return 0;
1049 if (dev != NULL
1050 && (dev->flags & AHD_DEV_SLAVE_CONFIGURED) != 0) {
1051 dev->flags |= AHD_DEV_UNCONFIGURED;
1052 if (TAILQ_EMPTY(&dev->busyq)
1053 && dev->active == 0
1054 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
1055 ahd_linux_free_device(ahd, dev);
1056 }
1057 ahd_midlayer_entrypoint_unlock(ahd, &flags);
1058} 577}
1059#else 578
1060/*
1061 * Sets the queue depth for each SCSI device hanging
1062 * off the input host adapter.
1063 */
1064static void 579static void
1065ahd_linux_select_queue_depth(struct Scsi_Host * host, 580ahd_linux_slave_destroy(struct scsi_device *sdev)
1066 Scsi_Device * scsi_devs)
1067{ 581{
1068 Scsi_Device *device;
1069 Scsi_Device *ldev;
1070 struct ahd_softc *ahd; 582 struct ahd_softc *ahd;
1071 u_long flags; 583 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
584 struct ahd_linux_target *targ = scsi_transport_target_data(sdev->sdev_target);
1072 585
1073 ahd = *((struct ahd_softc **)host->hostdata); 586 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1074 ahd_lock(ahd, &flags); 587 if (bootverbose)
1075 for (device = scsi_devs; device != NULL; device = device->next) { 588 printf("%s: Slave Destroy %d\n", ahd_name(ahd), sdev->id);
1076 589
1077 /* 590 BUG_ON(dev->active);
1078 * Watch out for duplicate devices. This works around
1079 * some quirks in how the SCSI scanning code does its
1080 * device management.
1081 */
1082 for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
1083 if (ldev->host == device->host
1084 && ldev->channel == device->channel
1085 && ldev->id == device->id
1086 && ldev->lun == device->lun)
1087 break;
1088 }
1089 /* Skip duplicate. */
1090 if (ldev != device)
1091 continue;
1092 591
1093 if (device->host == host) { 592 targ->sdev[sdev->lun] = NULL;
1094 struct ahd_linux_device *dev;
1095 593
1096 /*
1097 * Since Linux has attached to the device, configure
1098 * it so we don't free and allocate the device
1099 * structure on every command.
1100 */
1101 dev = ahd_linux_get_device(ahd, device->channel,
1102 device->id, device->lun,
1103 /*alloc*/TRUE);
1104 if (dev != NULL) {
1105 dev->flags &= ~AHD_DEV_UNCONFIGURED;
1106 dev->scsi_device = device;
1107 ahd_linux_device_queue_depth(ahd, dev);
1108 device->queue_depth = dev->openings
1109 + dev->active;
1110 if ((dev->flags & (AHD_DEV_Q_BASIC
1111 | AHD_DEV_Q_TAGGED)) == 0) {
1112 /*
1113 * We allow the OS to queue 2 untagged
1114 * transactions to us at any time even
1115 * though we can only execute them
1116 * serially on the controller/device.
1117 * This should remove some latency.
1118 */
1119 device->queue_depth = 2;
1120 }
1121 }
1122 }
1123 }
1124 ahd_unlock(ahd, &flags);
1125} 594}
1126#endif
1127 595
1128#if defined(__i386__) 596#if defined(__i386__)
1129/* 597/*
1130 * Return the disk geometry for the given SCSI device. 598 * Return the disk geometry for the given SCSI device.
1131 */ 599 */
1132static int 600static int
1133#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1134ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, 601ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1135 sector_t capacity, int geom[]) 602 sector_t capacity, int geom[])
1136{ 603{
1137 uint8_t *bh; 604 uint8_t *bh;
1138#else
1139ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1140{
1141 struct scsi_device *sdev = disk->device;
1142 u_long capacity = disk->capacity;
1143 struct buffer_head *bh;
1144#endif
1145 int heads; 605 int heads;
1146 int sectors; 606 int sectors;
1147 int cylinders; 607 int cylinders;
@@ -1151,22 +611,11 @@ ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1151 611
1152 ahd = *((struct ahd_softc **)sdev->host->hostdata); 612 ahd = *((struct ahd_softc **)sdev->host->hostdata);
1153 613
1154#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1155 bh = scsi_bios_ptable(bdev); 614 bh = scsi_bios_ptable(bdev);
1156#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
1157 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
1158#else
1159 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
1160#endif
1161
1162 if (bh) { 615 if (bh) {
1163 ret = scsi_partsize(bh, capacity, 616 ret = scsi_partsize(bh, capacity,
1164 &geom[2], &geom[0], &geom[1]); 617 &geom[2], &geom[0], &geom[1]);
1165#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1166 kfree(bh); 618 kfree(bh);
1167#else
1168 brelse(bh);
1169#endif
1170 if (ret != -1) 619 if (ret != -1)
1171 return (ret); 620 return (ret);
1172 } 621 }
@@ -1194,392 +643,35 @@ ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1194 * Abort the current SCSI command(s). 643 * Abort the current SCSI command(s).
1195 */ 644 */
1196static int 645static int
1197ahd_linux_abort(Scsi_Cmnd *cmd) 646ahd_linux_abort(struct scsi_cmnd *cmd)
1198{ 647{
1199 struct ahd_softc *ahd; 648 int error;
1200 struct ahd_cmd *acmd;
1201 struct ahd_cmd *list_acmd;
1202 struct ahd_linux_device *dev;
1203 struct scb *pending_scb;
1204 u_long s;
1205 u_int saved_scbptr;
1206 u_int active_scbptr;
1207 u_int last_phase;
1208 u_int cdb_byte;
1209 int retval;
1210 int was_paused;
1211 int paused;
1212 int wait;
1213 int disconnected;
1214 ahd_mode_state saved_modes;
1215
1216 pending_scb = NULL;
1217 paused = FALSE;
1218 wait = FALSE;
1219 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1220 acmd = (struct ahd_cmd *)cmd;
1221
1222 printf("%s:%d:%d:%d: Attempting to abort cmd %p:",
1223 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1224 cmd->device->lun, cmd);
1225 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
1226 printf(" 0x%x", cmd->cmnd[cdb_byte]);
1227 printf("\n");
1228
1229 /*
1230 * In all versions of Linux, we have to work around
1231 * a major flaw in how the mid-layer is locked down
1232 * if we are to sleep successfully in our error handler
1233 * while allowing our interrupt handler to run. Since
1234 * the midlayer acquires either the io_request_lock or
1235 * our lock prior to calling us, we must use the
1236 * spin_unlock_irq() method for unlocking our lock.
1237 * This will force interrupts to be enabled on the
1238 * current CPU. Since the EH thread should not have
1239 * been running with CPU interrupts disabled other than
1240 * by acquiring either the io_request_lock or our own
1241 * lock, this *should* be safe.
1242 */
1243 ahd_midlayer_entrypoint_lock(ahd, &s);
1244
1245 /*
1246 * First determine if we currently own this command.
1247 * Start by searching the device queue. If not found
1248 * there, check the pending_scb list. If not found
1249 * at all, and the system wanted us to just abort the
1250 * command, return success.
1251 */
1252 dev = ahd_linux_get_device(ahd, cmd->device->channel,
1253 cmd->device->id, cmd->device->lun,
1254 /*alloc*/FALSE);
1255
1256 if (dev == NULL) {
1257 /*
1258 * No target device for this command exists,
1259 * so we must not still own the command.
1260 */
1261 printf("%s:%d:%d:%d: Is not an active device\n",
1262 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1263 cmd->device->lun);
1264 retval = SUCCESS;
1265 goto no_cmd;
1266 }
1267
1268 TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
1269 if (list_acmd == acmd)
1270 break;
1271 }
1272
1273 if (list_acmd != NULL) {
1274 printf("%s:%d:%d:%d: Command found on device queue\n",
1275 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1276 cmd->device->lun);
1277 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
1278 cmd->result = DID_ABORT << 16;
1279 ahd_linux_queue_cmd_complete(ahd, cmd);
1280 retval = SUCCESS;
1281 goto done;
1282 }
1283 649
1284 /* 650 error = ahd_linux_queue_recovery_cmd(cmd, SCB_ABORT);
1285 * See if we can find a matching cmd in the pending list. 651 if (error != 0)
1286 */ 652 printf("aic79xx_abort returns 0x%x\n", error);
1287 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 653 return error;
1288 if (pending_scb->io_ctx == cmd)
1289 break;
1290 }
1291
1292 if (pending_scb == NULL) {
1293 printf("%s:%d:%d:%d: Command not found\n",
1294 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1295 cmd->device->lun);
1296 goto no_cmd;
1297 }
1298
1299 if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
1300 /*
1301 * We can't queue two recovery actions using the same SCB
1302 */
1303 retval = FAILED;
1304 goto done;
1305 }
1306
1307 /*
1308 * Ensure that the card doesn't do anything
1309 * behind our back. Also make sure that we
1310 * didn't "just" miss an interrupt that would
1311 * affect this cmd.
1312 */
1313 was_paused = ahd_is_paused(ahd);
1314 ahd_pause_and_flushwork(ahd);
1315 paused = TRUE;
1316
1317 if ((pending_scb->flags & SCB_ACTIVE) == 0) {
1318 printf("%s:%d:%d:%d: Command already completed\n",
1319 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1320 cmd->device->lun);
1321 goto no_cmd;
1322 }
1323
1324 printf("%s: At time of recovery, card was %spaused\n",
1325 ahd_name(ahd), was_paused ? "" : "not ");
1326 ahd_dump_card_state(ahd);
1327
1328 disconnected = TRUE;
1329 if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A',
1330 cmd->device->lun, SCB_GET_TAG(pending_scb),
1331 ROLE_INITIATOR, CAM_REQ_ABORTED,
1332 SEARCH_COMPLETE) > 0) {
1333 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
1334 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1335 cmd->device->lun);
1336 retval = SUCCESS;
1337 goto done;
1338 }
1339
1340 saved_modes = ahd_save_modes(ahd);
1341 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1342 last_phase = ahd_inb(ahd, LASTPHASE);
1343 saved_scbptr = ahd_get_scbptr(ahd);
1344 active_scbptr = saved_scbptr;
1345 if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
1346 struct scb *bus_scb;
1347
1348 bus_scb = ahd_lookup_scb(ahd, active_scbptr);
1349 if (bus_scb == pending_scb)
1350 disconnected = FALSE;
1351 }
1352
1353 /*
1354 * At this point, pending_scb is the scb associated with the
1355 * passed in command. That command is currently active on the
1356 * bus or is in the disconnected state.
1357 */
1358 if (last_phase != P_BUSFREE
1359 && SCB_GET_TAG(pending_scb) == active_scbptr) {
1360
1361 /*
1362 * We're active on the bus, so assert ATN
1363 * and hope that the target responds.
1364 */
1365 pending_scb = ahd_lookup_scb(ahd, active_scbptr);
1366 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1367 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1368 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1369 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
1370 ahd_name(ahd), cmd->device->channel,
1371 cmd->device->id, cmd->device->lun);
1372 wait = TRUE;
1373 } else if (disconnected) {
1374
1375 /*
1376 * Actually re-queue this SCB in an attempt
1377 * to select the device before it reconnects.
1378 */
1379 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1380 ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
1381 pending_scb->hscb->cdb_len = 0;
1382 pending_scb->hscb->task_attribute = 0;
1383 pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
1384
1385 if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
1386 /*
1387 * Mark the SCB has having an outstanding
1388 * task management function. Should the command
1389 * complete normally before the task management
1390 * function can be sent, the host will be notified
1391 * to abort our requeued SCB.
1392 */
1393 ahd_outb(ahd, SCB_TASK_MANAGEMENT,
1394 pending_scb->hscb->task_management);
1395 } else {
1396 /*
1397 * If non-packetized, set the MK_MESSAGE control
1398 * bit indicating that we desire to send a message.
1399 * We also set the disconnected flag since there is
1400 * no guarantee that our SCB control byte matches
1401 * the version on the card. We don't want the
1402 * sequencer to abort the command thinking an
1403 * unsolicited reselection occurred.
1404 */
1405 pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1406
1407 /*
1408 * The sequencer will never re-reference the
1409 * in-core SCB. To make sure we are notified
1410 * during reslection, set the MK_MESSAGE flag in
1411 * the card's copy of the SCB.
1412 */
1413 ahd_outb(ahd, SCB_CONTROL,
1414 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1415 }
1416
1417 /*
1418 * Clear out any entries in the QINFIFO first
1419 * so we are the next SCB for this target
1420 * to run.
1421 */
1422 ahd_search_qinfifo(ahd, cmd->device->id,
1423 cmd->device->channel + 'A', cmd->device->lun,
1424 SCB_LIST_NULL, ROLE_INITIATOR,
1425 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
1426 ahd_qinfifo_requeue_tail(ahd, pending_scb);
1427 ahd_set_scbptr(ahd, saved_scbptr);
1428 ahd_print_path(ahd, pending_scb);
1429 printf("Device is disconnected, re-queuing SCB\n");
1430 wait = TRUE;
1431 } else {
1432 printf("%s:%d:%d:%d: Unable to deliver message\n",
1433 ahd_name(ahd), cmd->device->channel,
1434 cmd->device->id, cmd->device->lun);
1435 retval = FAILED;
1436 goto done;
1437 }
1438
1439no_cmd:
1440 /*
1441 * Our assumption is that if we don't have the command, no
1442 * recovery action was required, so we return success. Again,
1443 * the semantics of the mid-layer recovery engine are not
1444 * well defined, so this may change in time.
1445 */
1446 retval = SUCCESS;
1447done:
1448 if (paused)
1449 ahd_unpause(ahd);
1450 if (wait) {
1451 struct timer_list timer;
1452 int ret;
1453
1454 pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
1455 spin_unlock_irq(&ahd->platform_data->spin_lock);
1456 init_timer(&timer);
1457 timer.data = (u_long)pending_scb;
1458 timer.expires = jiffies + (5 * HZ);
1459 timer.function = ahd_linux_sem_timeout;
1460 add_timer(&timer);
1461 printf("Recovery code sleeping\n");
1462 down(&ahd->platform_data->eh_sem);
1463 printf("Recovery code awake\n");
1464 ret = del_timer_sync(&timer);
1465 if (ret == 0) {
1466 printf("Timer Expired\n");
1467 retval = FAILED;
1468 }
1469 spin_lock_irq(&ahd->platform_data->spin_lock);
1470 }
1471 ahd_schedule_runq(ahd);
1472 ahd_linux_run_complete_queue(ahd);
1473 ahd_midlayer_entrypoint_unlock(ahd, &s);
1474 return (retval);
1475}
1476
1477
1478static void
1479ahd_linux_dev_reset_complete(Scsi_Cmnd *cmd)
1480{
1481 free(cmd, M_DEVBUF);
1482} 654}
1483 655
1484/* 656/*
1485 * Attempt to send a target reset message to the device that timed out. 657 * Attempt to send a target reset message to the device that timed out.
1486 */ 658 */
1487static int 659static int
1488ahd_linux_dev_reset(Scsi_Cmnd *cmd) 660ahd_linux_dev_reset(struct scsi_cmnd *cmd)
1489{ 661{
1490 struct ahd_softc *ahd; 662 int error;
1491 struct scsi_cmnd *recovery_cmd;
1492 struct ahd_linux_device *dev;
1493 struct ahd_initiator_tinfo *tinfo;
1494 struct ahd_tmode_tstate *tstate;
1495 struct scb *scb;
1496 struct hardware_scb *hscb;
1497 u_long s;
1498 struct timer_list timer;
1499 int retval;
1500
1501 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1502 recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
1503 if (!recovery_cmd)
1504 return (FAILED);
1505 memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
1506 recovery_cmd->device = cmd->device;
1507 recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
1508#ifdef AHD_DEBUG
1509 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1510 printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
1511 ahd_name(ahd), cmd->device->channel, cmd->device->id,
1512 cmd->device->lun, cmd);
1513#endif
1514 ahd_lock(ahd, &s);
1515
1516 dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
1517 cmd->device->lun, /*alloc*/FALSE);
1518 if (dev == NULL) {
1519 ahd_unlock(ahd, &s);
1520 kfree(recovery_cmd);
1521 return (FAILED);
1522 }
1523 if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) {
1524 ahd_unlock(ahd, &s);
1525 kfree(recovery_cmd);
1526 return (FAILED);
1527 }
1528 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
1529 cmd->device->id, &tstate);
1530 recovery_cmd->result = CAM_REQ_INPROG << 16;
1531 recovery_cmd->host_scribble = (char *)scb;
1532 scb->io_ctx = recovery_cmd;
1533 scb->platform_data->dev = dev;
1534 scb->sg_count = 0;
1535 ahd_set_residual(scb, 0);
1536 ahd_set_sense_residual(scb, 0);
1537 hscb = scb->hscb;
1538 hscb->control = 0;
1539 hscb->scsiid = BUILD_SCSIID(ahd, cmd);
1540 hscb->lun = cmd->device->lun;
1541 hscb->cdb_len = 0;
1542 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
1543 scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
1544 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1545 scb->flags |= SCB_PACKETIZED;
1546 } else {
1547 hscb->control |= MK_MESSAGE;
1548 }
1549 dev->openings--;
1550 dev->active++;
1551 dev->commands_issued++;
1552 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1553 ahd_queue_scb(ahd, scb);
1554 663
1555 scb->platform_data->flags |= AHD_SCB_UP_EH_SEM; 664 error = ahd_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
1556 ahd_unlock(ahd, &s); 665 if (error != 0)
1557 init_timer(&timer); 666 printf("aic79xx_dev_reset returns 0x%x\n", error);
1558 timer.data = (u_long)scb; 667 return error;
1559 timer.expires = jiffies + (5 * HZ);
1560 timer.function = ahd_linux_sem_timeout;
1561 add_timer(&timer);
1562 printf("Recovery code sleeping\n");
1563 down(&ahd->platform_data->eh_sem);
1564 printf("Recovery code awake\n");
1565 retval = SUCCESS;
1566 if (del_timer_sync(&timer) == 0) {
1567 printf("Timer Expired\n");
1568 retval = FAILED;
1569 }
1570 ahd_lock(ahd, &s);
1571 ahd_schedule_runq(ahd);
1572 ahd_linux_run_complete_queue(ahd);
1573 ahd_unlock(ahd, &s);
1574 printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
1575 return (retval);
1576} 668}
1577 669
1578/* 670/*
1579 * Reset the SCSI bus. 671 * Reset the SCSI bus.
1580 */ 672 */
1581static int 673static int
1582ahd_linux_bus_reset(Scsi_Cmnd *cmd) 674ahd_linux_bus_reset(struct scsi_cmnd *cmd)
1583{ 675{
1584 struct ahd_softc *ahd; 676 struct ahd_softc *ahd;
1585 u_long s; 677 u_long s;
@@ -1594,7 +686,6 @@ ahd_linux_bus_reset(Scsi_Cmnd *cmd)
1594 ahd_lock(ahd, &s); 686 ahd_lock(ahd, &s);
1595 found = ahd_reset_channel(ahd, cmd->device->channel + 'A', 687 found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
1596 /*initiate reset*/TRUE); 688 /*initiate reset*/TRUE);
1597 ahd_linux_run_complete_queue(ahd);
1598 ahd_unlock(ahd, &s); 689 ahd_unlock(ahd, &s);
1599 690
1600 if (bootverbose) 691 if (bootverbose)
@@ -1604,9 +695,10 @@ ahd_linux_bus_reset(Scsi_Cmnd *cmd)
1604 return (SUCCESS); 695 return (SUCCESS);
1605} 696}
1606 697
1607Scsi_Host_Template aic79xx_driver_template = { 698struct scsi_host_template aic79xx_driver_template = {
1608 .module = THIS_MODULE, 699 .module = THIS_MODULE,
1609 .name = "aic79xx", 700 .name = "aic79xx",
701 .proc_name = "aic79xx",
1610 .proc_info = ahd_linux_proc_info, 702 .proc_info = ahd_linux_proc_info,
1611 .info = ahd_linux_info, 703 .info = ahd_linux_info,
1612 .queuecommand = ahd_linux_queue, 704 .queuecommand = ahd_linux_queue,
@@ -1623,37 +715,10 @@ Scsi_Host_Template aic79xx_driver_template = {
1623 .slave_alloc = ahd_linux_slave_alloc, 715 .slave_alloc = ahd_linux_slave_alloc,
1624 .slave_configure = ahd_linux_slave_configure, 716 .slave_configure = ahd_linux_slave_configure,
1625 .slave_destroy = ahd_linux_slave_destroy, 717 .slave_destroy = ahd_linux_slave_destroy,
718 .target_alloc = ahd_linux_target_alloc,
719 .target_destroy = ahd_linux_target_destroy,
1626}; 720};
1627 721
1628/**************************** Tasklet Handler *********************************/
1629
1630/*
1631 * In 2.4.X and above, this routine is called from a tasklet,
1632 * so we must re-acquire our lock prior to executing this code.
1633 * In all prior kernels, ahd_schedule_runq() calls this routine
1634 * directly and ahd_schedule_runq() is called with our lock held.
1635 */
1636static void
1637ahd_runq_tasklet(unsigned long data)
1638{
1639 struct ahd_softc* ahd;
1640 struct ahd_linux_device *dev;
1641 u_long flags;
1642
1643 ahd = (struct ahd_softc *)data;
1644 ahd_lock(ahd, &flags);
1645 while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
1646
1647 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
1648 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
1649 ahd_linux_check_device_queue(ahd, dev);
1650 /* Yeild to our interrupt handler */
1651 ahd_unlock(ahd, &flags);
1652 ahd_lock(ahd, &flags);
1653 }
1654 ahd_unlock(ahd, &flags);
1655}
1656
1657/******************************** Bus DMA *************************************/ 722/******************************** Bus DMA *************************************/
1658int 723int
1659ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent, 724ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
@@ -1693,36 +758,10 @@ int
1693ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, 758ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
1694 int flags, bus_dmamap_t *mapp) 759 int flags, bus_dmamap_t *mapp)
1695{ 760{
1696 bus_dmamap_t map;
1697
1698 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
1699 if (map == NULL)
1700 return (ENOMEM);
1701 /*
1702 * Although we can dma data above 4GB, our
1703 * "consistent" memory is below 4GB for
1704 * space efficiency reasons (only need a 4byte
1705 * address). For this reason, we have to reset
1706 * our dma mask when doing allocations.
1707 */
1708 if (ahd->dev_softc != NULL)
1709 if (pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) {
1710 printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1711 kfree(map);
1712 return (ENODEV);
1713 }
1714 *vaddr = pci_alloc_consistent(ahd->dev_softc, 761 *vaddr = pci_alloc_consistent(ahd->dev_softc,
1715 dmat->maxsize, &map->bus_addr); 762 dmat->maxsize, mapp);
1716 if (ahd->dev_softc != NULL)
1717 if (pci_set_dma_mask(ahd->dev_softc,
1718 ahd->platform_data->hw_dma_mask)) {
1719 printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1720 kfree(map);
1721 return (ENODEV);
1722 }
1723 if (*vaddr == NULL) 763 if (*vaddr == NULL)
1724 return (ENOMEM); 764 return (ENOMEM);
1725 *mapp = map;
1726 return(0); 765 return(0);
1727} 766}
1728 767
@@ -1731,7 +770,7 @@ ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
1731 void* vaddr, bus_dmamap_t map) 770 void* vaddr, bus_dmamap_t map)
1732{ 771{
1733 pci_free_consistent(ahd->dev_softc, dmat->maxsize, 772 pci_free_consistent(ahd->dev_softc, dmat->maxsize,
1734 vaddr, map->bus_addr); 773 vaddr, map);
1735} 774}
1736 775
1737int 776int
@@ -1745,7 +784,7 @@ ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
1745 */ 784 */
1746 bus_dma_segment_t stack_sg; 785 bus_dma_segment_t stack_sg;
1747 786
1748 stack_sg.ds_addr = map->bus_addr; 787 stack_sg.ds_addr = map;
1749 stack_sg.ds_len = dmat->maxsize; 788 stack_sg.ds_len = dmat->maxsize;
1750 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); 789 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
1751 return (0); 790 return (0);
@@ -1754,11 +793,6 @@ ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
1754void 793void
1755ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) 794ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
1756{ 795{
1757 /*
1758 * The map may is NULL in our < 2.3.X implementation.
1759 */
1760 if (map != NULL)
1761 free(map, M_DEVBUF);
1762} 796}
1763 797
1764int 798int
@@ -1823,41 +857,6 @@ ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1823} 857}
1824 858
1825static void 859static void
1826ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1827{
1828
1829 if ((instance >= 0) && (targ >= 0)
1830 && (instance < NUM_ELEMENTS(aic79xx_tag_info))
1831 && (targ < AHD_NUM_TARGETS)) {
1832 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
1833 if (bootverbose)
1834 printf("tag_info[%d:%d] = %d\n", instance, targ, value);
1835 }
1836}
1837
1838static void
1839ahd_linux_setup_rd_strm_info(u_long arg, int instance, int targ, int32_t value)
1840{
1841 if ((instance >= 0)
1842 && (instance < NUM_ELEMENTS(aic79xx_rd_strm_info))) {
1843 aic79xx_rd_strm_info[instance] = value & 0xFFFF;
1844 if (bootverbose)
1845 printf("rd_strm[%d] = 0x%x\n", instance, value);
1846 }
1847}
1848
1849static void
1850ahd_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
1851{
1852 if ((instance >= 0)
1853 && (instance < NUM_ELEMENTS(aic79xx_dv_settings))) {
1854 aic79xx_dv_settings[instance] = value;
1855 if (bootverbose)
1856 printf("dv[%d] = %d\n", instance, value);
1857 }
1858}
1859
1860static void
1861ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value) 860ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
1862{ 861{
1863 862
@@ -1887,6 +886,99 @@ ahd_linux_setup_tag_info_global(char *p)
1887 } 886 }
1888} 887}
1889 888
889static void
890ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
891{
892
893 if ((instance >= 0) && (targ >= 0)
894 && (instance < NUM_ELEMENTS(aic79xx_tag_info))
895 && (targ < AHD_NUM_TARGETS)) {
896 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
897 if (bootverbose)
898 printf("tag_info[%d:%d] = %d\n", instance, targ, value);
899 }
900}
901
902static char *
903ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
904 void (*callback)(u_long, int, int, int32_t),
905 u_long callback_arg)
906{
907 char *tok_end;
908 char *tok_end2;
909 int i;
910 int instance;
911 int targ;
912 int done;
913 char tok_list[] = {'.', ',', '{', '}', '\0'};
914
915 /* All options use a ':' name/arg separator */
916 if (*opt_arg != ':')
917 return (opt_arg);
918 opt_arg++;
919 instance = -1;
920 targ = -1;
921 done = FALSE;
922 /*
923 * Restore separator that may be in
924 * the middle of our option argument.
925 */
926 tok_end = strchr(opt_arg, '\0');
927 if (tok_end < end)
928 *tok_end = ',';
929 while (!done) {
930 switch (*opt_arg) {
931 case '{':
932 if (instance == -1) {
933 instance = 0;
934 } else {
935 if (depth > 1) {
936 if (targ == -1)
937 targ = 0;
938 } else {
939 printf("Malformed Option %s\n",
940 opt_name);
941 done = TRUE;
942 }
943 }
944 opt_arg++;
945 break;
946 case '}':
947 if (targ != -1)
948 targ = -1;
949 else if (instance != -1)
950 instance = -1;
951 opt_arg++;
952 break;
953 case ',':
954 case '.':
955 if (instance == -1)
956 done = TRUE;
957 else if (targ >= 0)
958 targ++;
959 else if (instance >= 0)
960 instance++;
961 opt_arg++;
962 break;
963 case '\0':
964 done = TRUE;
965 break;
966 default:
967 tok_end = end;
968 for (i = 0; tok_list[i]; i++) {
969 tok_end2 = strchr(opt_arg, tok_list[i]);
970 if ((tok_end2) && (tok_end2 < tok_end))
971 tok_end = tok_end2;
972 }
973 callback(callback_arg, instance, targ,
974 simple_strtol(opt_arg, NULL, 0));
975 opt_arg = tok_end;
976 break;
977 }
978 }
979 return (opt_arg);
980}
981
1890/* 982/*
1891 * Handle Linux boot parameters. This routine allows for assigning a value 983 * Handle Linux boot parameters. This routine allows for assigning a value
1892 * to a parameter with a ':' between the parameter and the value. 984 * to a parameter with a ':' between the parameter and the value.
@@ -1916,8 +1008,6 @@ aic79xx_setup(char *s)
1916 { "seltime", &aic79xx_seltime }, 1008 { "seltime", &aic79xx_seltime },
1917 { "tag_info", NULL }, 1009 { "tag_info", NULL },
1918 { "global_tag_depth", NULL}, 1010 { "global_tag_depth", NULL},
1919 { "rd_strm", NULL },
1920 { "dv", NULL },
1921 { "slewrate", NULL }, 1011 { "slewrate", NULL },
1922 { "precomp", NULL }, 1012 { "precomp", NULL },
1923 { "amplitude", NULL }, 1013 { "amplitude", NULL },
@@ -1946,24 +1036,18 @@ aic79xx_setup(char *s)
1946 if (strncmp(p, "global_tag_depth", n) == 0) { 1036 if (strncmp(p, "global_tag_depth", n) == 0) {
1947 ahd_linux_setup_tag_info_global(p + n); 1037 ahd_linux_setup_tag_info_global(p + n);
1948 } else if (strncmp(p, "tag_info", n) == 0) { 1038 } else if (strncmp(p, "tag_info", n) == 0) {
1949 s = aic_parse_brace_option("tag_info", p + n, end, 1039 s = ahd_parse_brace_option("tag_info", p + n, end,
1950 2, ahd_linux_setup_tag_info, 0); 1040 2, ahd_linux_setup_tag_info, 0);
1951 } else if (strncmp(p, "rd_strm", n) == 0) {
1952 s = aic_parse_brace_option("rd_strm", p + n, end,
1953 1, ahd_linux_setup_rd_strm_info, 0);
1954 } else if (strncmp(p, "dv", n) == 0) {
1955 s = aic_parse_brace_option("dv", p + n, end, 1,
1956 ahd_linux_setup_dv, 0);
1957 } else if (strncmp(p, "slewrate", n) == 0) { 1041 } else if (strncmp(p, "slewrate", n) == 0) {
1958 s = aic_parse_brace_option("slewrate", 1042 s = ahd_parse_brace_option("slewrate",
1959 p + n, end, 1, ahd_linux_setup_iocell_info, 1043 p + n, end, 1, ahd_linux_setup_iocell_info,
1960 AIC79XX_SLEWRATE_INDEX); 1044 AIC79XX_SLEWRATE_INDEX);
1961 } else if (strncmp(p, "precomp", n) == 0) { 1045 } else if (strncmp(p, "precomp", n) == 0) {
1962 s = aic_parse_brace_option("precomp", 1046 s = ahd_parse_brace_option("precomp",
1963 p + n, end, 1, ahd_linux_setup_iocell_info, 1047 p + n, end, 1, ahd_linux_setup_iocell_info,
1964 AIC79XX_PRECOMP_INDEX); 1048 AIC79XX_PRECOMP_INDEX);
1965 } else if (strncmp(p, "amplitude", n) == 0) { 1049 } else if (strncmp(p, "amplitude", n) == 0) {
1966 s = aic_parse_brace_option("amplitude", 1050 s = ahd_parse_brace_option("amplitude",
1967 p + n, end, 1, ahd_linux_setup_iocell_info, 1051 p + n, end, 1, ahd_linux_setup_iocell_info,
1968 AIC79XX_AMPLITUDE_INDEX); 1052 AIC79XX_AMPLITUDE_INDEX);
1969 } else if (p[n] == ':') { 1053 } else if (p[n] == ':') {
@@ -1982,13 +1066,12 @@ __setup("aic79xx=", aic79xx_setup);
1982uint32_t aic79xx_verbose; 1066uint32_t aic79xx_verbose;
1983 1067
1984int 1068int
1985ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template) 1069ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template)
1986{ 1070{
1987 char buf[80]; 1071 char buf[80];
1988 struct Scsi_Host *host; 1072 struct Scsi_Host *host;
1989 char *new_name; 1073 char *new_name;
1990 u_long s; 1074 u_long s;
1991 u_long target;
1992 1075
1993 template->name = ahd->description; 1076 template->name = ahd->description;
1994 host = scsi_host_alloc(template, sizeof(struct ahd_softc *)); 1077 host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
@@ -1997,11 +1080,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
1997 1080
1998 *((struct ahd_softc **)host->hostdata) = ahd; 1081 *((struct ahd_softc **)host->hostdata) = ahd;
1999 ahd_lock(ahd, &s); 1082 ahd_lock(ahd, &s);
2000#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2001 scsi_assign_lock(host, &ahd->platform_data->spin_lock); 1083 scsi_assign_lock(host, &ahd->platform_data->spin_lock);
2002#elif AHD_SCSI_HAS_HOST_LOCK != 0
2003 host->lock = &ahd->platform_data->spin_lock;
2004#endif
2005 ahd->platform_data->host = host; 1084 ahd->platform_data->host = host;
2006 host->can_queue = AHD_MAX_QUEUE; 1085 host->can_queue = AHD_MAX_QUEUE;
2007 host->cmd_per_lun = 2; 1086 host->cmd_per_lun = 2;
@@ -2012,7 +1091,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
2012 host->max_lun = AHD_NUM_LUNS; 1091 host->max_lun = AHD_NUM_LUNS;
2013 host->max_channel = 0; 1092 host->max_channel = 0;
2014 host->sg_tablesize = AHD_NSEG; 1093 host->sg_tablesize = AHD_NSEG;
2015 ahd_set_unit(ahd, ahd_linux_next_unit()); 1094 ahd_set_unit(ahd, ahd_linux_unit++);
2016 sprintf(buf, "scsi%d", host->host_no); 1095 sprintf(buf, "scsi%d", host->host_no);
2017 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT); 1096 new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
2018 if (new_name != NULL) { 1097 if (new_name != NULL) {
@@ -2020,54 +1099,14 @@ ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
2020 ahd_set_name(ahd, new_name); 1099 ahd_set_name(ahd, new_name);
2021 } 1100 }
2022 host->unique_id = ahd->unit; 1101 host->unique_id = ahd->unit;
2023#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2024 scsi_set_pci_device(host, ahd->dev_softc);
2025#endif
2026 ahd_linux_setup_user_rd_strm_settings(ahd);
2027 ahd_linux_initialize_scsi_bus(ahd); 1102 ahd_linux_initialize_scsi_bus(ahd);
2028 ahd_unlock(ahd, &s);
2029 ahd->platform_data->dv_pid = kernel_thread(ahd_linux_dv_thread, ahd, 0);
2030 ahd_lock(ahd, &s);
2031 if (ahd->platform_data->dv_pid < 0) {
2032 printf("%s: Failed to create DV thread, error= %d\n",
2033 ahd_name(ahd), ahd->platform_data->dv_pid);
2034 return (-ahd->platform_data->dv_pid);
2035 }
2036 /*
2037 * Initially allocate *all* of our linux target objects
2038 * so that the DV thread will scan them all in parallel
2039 * just after driver initialization. Any device that
2040 * does not exist will have its target object destroyed
2041 * by the selection timeout handler. In the case of a
2042 * device that appears after the initial DV scan, async
2043 * negotiation will occur for the first command, and DV
2044 * will comence should that first command be successful.
2045 */
2046 for (target = 0; target < host->max_id; target++) {
2047
2048 /*
2049 * Skip our own ID. Some Compaq/HP storage devices
2050 * have enclosure management devices that respond to
2051 * single bit selection (i.e. selecting ourselves).
2052 * It is expected that either an external application
2053 * or a modified kernel will be used to probe this
2054 * ID if it is appropriate. To accommodate these
2055 * installations, ahc_linux_alloc_target() will allocate
2056 * for our ID if asked to do so.
2057 */
2058 if (target == ahd->our_id)
2059 continue;
2060
2061 ahd_linux_alloc_target(ahd, 0, target);
2062 }
2063 ahd_intr_enable(ahd, TRUE); 1103 ahd_intr_enable(ahd, TRUE);
2064 ahd_linux_start_dv(ahd);
2065 ahd_unlock(ahd, &s); 1104 ahd_unlock(ahd, &s);
2066 1105
2067#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1106 host->transportt = ahd_linux_transport_template;
1107
2068 scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */ 1108 scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
2069 scsi_scan_host(host); 1109 scsi_scan_host(host);
2070#endif
2071 return (0); 1110 return (0);
2072} 1111}
2073 1112
@@ -2081,29 +1120,6 @@ ahd_linux_get_memsize(void)
2081} 1120}
2082 1121
2083/* 1122/*
2084 * Find the smallest available unit number to use
2085 * for a new device. We don't just use a static
2086 * count to handle the "repeated hot-(un)plug"
2087 * scenario.
2088 */
2089static int
2090ahd_linux_next_unit(void)
2091{
2092 struct ahd_softc *ahd;
2093 int unit;
2094
2095 unit = 0;
2096retry:
2097 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
2098 if (ahd->unit == unit) {
2099 unit++;
2100 goto retry;
2101 }
2102 }
2103 return (unit);
2104}
2105
2106/*
2107 * Place the SCSI bus into a known state by either resetting it, 1123 * Place the SCSI bus into a known state by either resetting it,
2108 * or forcing transfer negotiations on the next command to any 1124 * or forcing transfer negotiations on the next command to any
2109 * target. 1125 * target.
@@ -2162,20 +1178,9 @@ ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
2162 if (ahd->platform_data == NULL) 1178 if (ahd->platform_data == NULL)
2163 return (ENOMEM); 1179 return (ENOMEM);
2164 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data)); 1180 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
2165 TAILQ_INIT(&ahd->platform_data->completeq);
2166 TAILQ_INIT(&ahd->platform_data->device_runq);
2167 ahd->platform_data->irq = AHD_LINUX_NOIRQ; 1181 ahd->platform_data->irq = AHD_LINUX_NOIRQ;
2168 ahd->platform_data->hw_dma_mask = 0xFFFFFFFF;
2169 ahd_lockinit(ahd); 1182 ahd_lockinit(ahd);
2170 ahd_done_lockinit(ahd);
2171 init_timer(&ahd->platform_data->completeq_timer);
2172 ahd->platform_data->completeq_timer.data = (u_long)ahd;
2173 ahd->platform_data->completeq_timer.function =
2174 (ahd_linux_callback_t *)ahd_linux_thread_run_complete_queue;
2175 init_MUTEX_LOCKED(&ahd->platform_data->eh_sem); 1183 init_MUTEX_LOCKED(&ahd->platform_data->eh_sem);
2176 init_MUTEX_LOCKED(&ahd->platform_data->dv_sem);
2177 init_MUTEX_LOCKED(&ahd->platform_data->dv_cmd_sem);
2178 ahd_setup_runq_tasklet(ahd);
2179 ahd->seltime = (aic79xx_seltime & 0x3) << 4; 1184 ahd->seltime = (aic79xx_seltime & 0x3) << 4;
2180 return (0); 1185 return (0);
2181} 1186}
@@ -2183,39 +1188,27 @@ ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
2183void 1188void
2184ahd_platform_free(struct ahd_softc *ahd) 1189ahd_platform_free(struct ahd_softc *ahd)
2185{ 1190{
2186 struct ahd_linux_target *targ; 1191 struct scsi_target *starget;
2187 struct ahd_linux_device *dev;
2188 int i, j; 1192 int i, j;
2189 1193
2190 if (ahd->platform_data != NULL) { 1194 if (ahd->platform_data != NULL) {
2191 del_timer_sync(&ahd->platform_data->completeq_timer);
2192 ahd_linux_kill_dv_thread(ahd);
2193 ahd_teardown_runq_tasklet(ahd);
2194 if (ahd->platform_data->host != NULL) { 1195 if (ahd->platform_data->host != NULL) {
2195#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2196 scsi_remove_host(ahd->platform_data->host); 1196 scsi_remove_host(ahd->platform_data->host);
2197#endif
2198 scsi_host_put(ahd->platform_data->host); 1197 scsi_host_put(ahd->platform_data->host);
2199 } 1198 }
2200 1199
2201 /* destroy all of the device and target objects */ 1200 /* destroy all of the device and target objects */
2202 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1201 for (i = 0; i < AHD_NUM_TARGETS; i++) {
2203 targ = ahd->platform_data->targets[i]; 1202 starget = ahd->platform_data->starget[i];
2204 if (targ != NULL) { 1203 if (starget != NULL) {
2205 /* Keep target around through the loop. */
2206 targ->refcount++;
2207 for (j = 0; j < AHD_NUM_LUNS; j++) { 1204 for (j = 0; j < AHD_NUM_LUNS; j++) {
2208 1205 struct ahd_linux_target *targ =
2209 if (targ->devices[j] == NULL) 1206 scsi_transport_target_data(starget);
1207 if (targ->sdev[j] == NULL)
2210 continue; 1208 continue;
2211 dev = targ->devices[j]; 1209 targ->sdev[j] = NULL;
2212 ahd_linux_free_device(ahd, dev);
2213 } 1210 }
2214 /* 1211 ahd->platform_data->starget[i] = NULL;
2215 * Forcibly free the target now that
2216 * all devices are gone.
2217 */
2218 ahd_linux_free_target(ahd, targ);
2219 } 1212 }
2220 } 1213 }
2221 1214
@@ -2233,16 +1226,6 @@ ahd_platform_free(struct ahd_softc *ahd)
2233 release_mem_region(ahd->platform_data->mem_busaddr, 1226 release_mem_region(ahd->platform_data->mem_busaddr,
2234 0x1000); 1227 0x1000);
2235 } 1228 }
2236#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
2237 /*
2238 * In 2.4 we detach from the scsi midlayer before the PCI
2239 * layer invokes our remove callback. No per-instance
2240 * detach is provided, so we must reach inside the PCI
2241 * subsystem's internals and detach our driver manually.
2242 */
2243 if (ahd->dev_softc != NULL)
2244 ahd->dev_softc->driver = NULL;
2245#endif
2246 free(ahd->platform_data, M_DEVBUF); 1229 free(ahd->platform_data, M_DEVBUF);
2247 } 1230 }
2248} 1231}
@@ -2280,13 +1263,22 @@ void
2280ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 1263ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2281 ahd_queue_alg alg) 1264 ahd_queue_alg alg)
2282{ 1265{
1266 struct scsi_target *starget;
1267 struct ahd_linux_target *targ;
2283 struct ahd_linux_device *dev; 1268 struct ahd_linux_device *dev;
1269 struct scsi_device *sdev;
2284 int was_queuing; 1270 int was_queuing;
2285 int now_queuing; 1271 int now_queuing;
2286 1272
2287 dev = ahd_linux_get_device(ahd, devinfo->channel - 'A', 1273 starget = ahd->platform_data->starget[devinfo->target];
2288 devinfo->target, 1274 targ = scsi_transport_target_data(starget);
2289 devinfo->lun, /*alloc*/FALSE); 1275 BUG_ON(targ == NULL);
1276 sdev = targ->sdev[devinfo->lun];
1277 if (sdev == NULL)
1278 return;
1279
1280 dev = scsi_transport_device_data(sdev);
1281
2290 if (dev == NULL) 1282 if (dev == NULL)
2291 return; 1283 return;
2292 was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED); 1284 was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
@@ -2339,1434 +1331,37 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2339 dev->maxtags = 0; 1331 dev->maxtags = 0;
2340 dev->openings = 1 - dev->active; 1332 dev->openings = 1 - dev->active;
2341 } 1333 }
2342#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2343 if (dev->scsi_device != NULL) {
2344 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
2345 case AHD_DEV_Q_BASIC:
2346 scsi_adjust_queue_depth(dev->scsi_device,
2347 MSG_SIMPLE_TASK,
2348 dev->openings + dev->active);
2349 break;
2350 case AHD_DEV_Q_TAGGED:
2351 scsi_adjust_queue_depth(dev->scsi_device,
2352 MSG_ORDERED_TASK,
2353 dev->openings + dev->active);
2354 break;
2355 default:
2356 /*
2357 * We allow the OS to queue 2 untagged transactions to
2358 * us at any time even though we can only execute them
2359 * serially on the controller/device. This should
2360 * remove some latency.
2361 */
2362 scsi_adjust_queue_depth(dev->scsi_device,
2363 /*NON-TAGGED*/0,
2364 /*queue depth*/2);
2365 break;
2366 }
2367 }
2368#endif
2369}
2370
2371int
2372ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
2373 int lun, u_int tag, role_t role, uint32_t status)
2374{
2375 int targ;
2376 int maxtarg;
2377 int maxlun;
2378 int clun;
2379 int count;
2380
2381 if (tag != SCB_LIST_NULL)
2382 return (0);
2383
2384 targ = 0;
2385 if (target != CAM_TARGET_WILDCARD) {
2386 targ = target;
2387 maxtarg = targ + 1;
2388 } else {
2389 maxtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
2390 }
2391 clun = 0;
2392 if (lun != CAM_LUN_WILDCARD) {
2393 clun = lun;
2394 maxlun = clun + 1;
2395 } else {
2396 maxlun = AHD_NUM_LUNS;
2397 }
2398
2399 count = 0;
2400 for (; targ < maxtarg; targ++) {
2401
2402 for (; clun < maxlun; clun++) {
2403 struct ahd_linux_device *dev;
2404 struct ahd_busyq *busyq;
2405 struct ahd_cmd *acmd;
2406
2407 dev = ahd_linux_get_device(ahd, /*chan*/0, targ,
2408 clun, /*alloc*/FALSE);
2409 if (dev == NULL)
2410 continue;
2411
2412 busyq = &dev->busyq;
2413 while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
2414 Scsi_Cmnd *cmd;
2415
2416 cmd = &acmd_scsi_cmd(acmd);
2417 TAILQ_REMOVE(busyq, acmd,
2418 acmd_links.tqe);
2419 count++;
2420 cmd->result = status << 16;
2421 ahd_linux_queue_cmd_complete(ahd, cmd);
2422 }
2423 }
2424 }
2425
2426 return (count);
2427}
2428
2429static void
2430ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd)
2431{
2432 u_long flags;
2433
2434 ahd_lock(ahd, &flags);
2435 del_timer(&ahd->platform_data->completeq_timer);
2436 ahd->platform_data->flags &= ~AHD_RUN_CMPLT_Q_TIMER;
2437 ahd_linux_run_complete_queue(ahd);
2438 ahd_unlock(ahd, &flags);
2439}
2440
2441static void
2442ahd_linux_start_dv(struct ahd_softc *ahd)
2443{
2444
2445 /*
2446 * Freeze the simq and signal ahd_linux_queue to not let any
2447 * more commands through
2448 */
2449 if ((ahd->platform_data->flags & AHD_DV_ACTIVE) == 0) {
2450#ifdef AHD_DEBUG
2451 if (ahd_debug & AHD_SHOW_DV)
2452 printf("%s: Starting DV\n", ahd_name(ahd));
2453#endif
2454
2455 ahd->platform_data->flags |= AHD_DV_ACTIVE;
2456 ahd_freeze_simq(ahd);
2457
2458 /* Wake up the DV kthread */
2459 up(&ahd->platform_data->dv_sem);
2460 }
2461}
2462
2463static int
2464ahd_linux_dv_thread(void *data)
2465{
2466 struct ahd_softc *ahd;
2467 int target;
2468 u_long s;
2469
2470 ahd = (struct ahd_softc *)data;
2471
2472#ifdef AHD_DEBUG
2473 if (ahd_debug & AHD_SHOW_DV)
2474 printf("In DV Thread\n");
2475#endif
2476
2477 /*
2478 * Complete thread creation.
2479 */
2480 lock_kernel();
2481#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
2482 /*
2483 * Don't care about any signals.
2484 */
2485 siginitsetinv(&current->blocked, 0);
2486
2487 daemonize();
2488 sprintf(current->comm, "ahd_dv_%d", ahd->unit);
2489#else
2490 daemonize("ahd_dv_%d", ahd->unit);
2491 current->flags |= PF_NOFREEZE;
2492#endif
2493 unlock_kernel();
2494
2495 while (1) {
2496 /*
2497 * Use down_interruptible() rather than down() to
2498 * avoid inclusion in the load average.
2499 */
2500 down_interruptible(&ahd->platform_data->dv_sem);
2501
2502 /* Check to see if we've been signaled to exit */
2503 ahd_lock(ahd, &s);
2504 if ((ahd->platform_data->flags & AHD_DV_SHUTDOWN) != 0) {
2505 ahd_unlock(ahd, &s);
2506 break;
2507 }
2508 ahd_unlock(ahd, &s);
2509
2510#ifdef AHD_DEBUG
2511 if (ahd_debug & AHD_SHOW_DV)
2512 printf("%s: Beginning Domain Validation\n",
2513 ahd_name(ahd));
2514#endif
2515
2516 /*
2517 * Wait for any pending commands to drain before proceeding.
2518 */
2519 ahd_lock(ahd, &s);
2520 while (LIST_FIRST(&ahd->pending_scbs) != NULL) {
2521 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_EMPTY;
2522 ahd_unlock(ahd, &s);
2523 down_interruptible(&ahd->platform_data->dv_sem);
2524 ahd_lock(ahd, &s);
2525 }
2526
2527 /*
2528 * Wait for the SIMQ to be released so that DV is the
2529 * only reason the queue is frozen.
2530 */
2531 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2532 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2533 ahd_unlock(ahd, &s);
2534 down_interruptible(&ahd->platform_data->dv_sem);
2535 ahd_lock(ahd, &s);
2536 }
2537 ahd_unlock(ahd, &s);
2538
2539 for (target = 0; target < AHD_NUM_TARGETS; target++)
2540 ahd_linux_dv_target(ahd, target);
2541
2542 ahd_lock(ahd, &s);
2543 ahd->platform_data->flags &= ~AHD_DV_ACTIVE;
2544 ahd_unlock(ahd, &s);
2545
2546 /*
2547 * Release the SIMQ so that normal commands are
2548 * allowed to continue on the bus.
2549 */
2550 ahd_release_simq(ahd);
2551 }
2552 up(&ahd->platform_data->eh_sem);
2553 return (0);
2554}
2555
2556static void
2557ahd_linux_kill_dv_thread(struct ahd_softc *ahd)
2558{
2559 u_long s;
2560
2561 ahd_lock(ahd, &s);
2562 if (ahd->platform_data->dv_pid != 0) {
2563 ahd->platform_data->flags |= AHD_DV_SHUTDOWN;
2564 ahd_unlock(ahd, &s);
2565 up(&ahd->platform_data->dv_sem);
2566
2567 /*
2568 * Use the eh_sem as an indicator that the
2569 * dv thread is exiting. Note that the dv
2570 * thread must still return after performing
2571 * the up on our semaphore before it has
2572 * completely exited this module. Unfortunately,
2573 * there seems to be no easy way to wait for the
2574 * exit of a thread for which you are not the
2575 * parent (dv threads are parented by init).
2576 * Cross your fingers...
2577 */
2578 down(&ahd->platform_data->eh_sem);
2579
2580 /*
2581 * Mark the dv thread as already dead. This
2582 * avoids attempting to kill it a second time.
2583 * This is necessary because we must kill the
2584 * DV thread before calling ahd_free() in the
2585 * module shutdown case to avoid bogus locking
2586 * in the SCSI mid-layer, but we ahd_free() is
2587 * called without killing the DV thread in the
2588 * instance detach case, so ahd_platform_free()
2589 * calls us again to verify that the DV thread
2590 * is dead.
2591 */
2592 ahd->platform_data->dv_pid = 0;
2593 } else {
2594 ahd_unlock(ahd, &s);
2595 }
2596}
2597
2598#define AHD_LINUX_DV_INQ_SHORT_LEN 36
2599#define AHD_LINUX_DV_INQ_LEN 256
2600#define AHD_LINUX_DV_TIMEOUT (HZ / 4)
2601
2602#define AHD_SET_DV_STATE(ahd, targ, newstate) \
2603 ahd_set_dv_state(ahd, targ, newstate, __LINE__)
2604
2605static __inline void
2606ahd_set_dv_state(struct ahd_softc *ahd, struct ahd_linux_target *targ,
2607 ahd_dv_state newstate, u_int line)
2608{
2609 ahd_dv_state oldstate;
2610
2611 oldstate = targ->dv_state;
2612#ifdef AHD_DEBUG
2613 if (ahd_debug & AHD_SHOW_DV)
2614 printf("%s:%d: Going from state %d to state %d\n",
2615 ahd_name(ahd), line, oldstate, newstate);
2616#endif
2617
2618 if (oldstate == newstate)
2619 targ->dv_state_retry++;
2620 else
2621 targ->dv_state_retry = 0;
2622 targ->dv_state = newstate;
2623}
2624
2625static void
2626ahd_linux_dv_target(struct ahd_softc *ahd, u_int target_offset)
2627{
2628 struct ahd_devinfo devinfo;
2629 struct ahd_linux_target *targ;
2630 struct scsi_cmnd *cmd;
2631 struct scsi_device *scsi_dev;
2632 struct scsi_sense_data *sense;
2633 uint8_t *buffer;
2634 u_long s;
2635 u_int timeout;
2636 int echo_size;
2637
2638 sense = NULL;
2639 buffer = NULL;
2640 echo_size = 0;
2641 ahd_lock(ahd, &s);
2642 targ = ahd->platform_data->targets[target_offset];
2643 if (targ == NULL || (targ->flags & AHD_DV_REQUIRED) == 0) {
2644 ahd_unlock(ahd, &s);
2645 return;
2646 }
2647 ahd_compile_devinfo(&devinfo, ahd->our_id, targ->target, /*lun*/0,
2648 targ->channel + 'A', ROLE_INITIATOR);
2649#ifdef AHD_DEBUG
2650 if (ahd_debug & AHD_SHOW_DV) {
2651 ahd_print_devinfo(ahd, &devinfo);
2652 printf("Performing DV\n");
2653 }
2654#endif
2655
2656 ahd_unlock(ahd, &s);
2657
2658 cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
2659 scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
2660 scsi_dev->host = ahd->platform_data->host;
2661 scsi_dev->id = devinfo.target;
2662 scsi_dev->lun = devinfo.lun;
2663 scsi_dev->channel = devinfo.channel - 'A';
2664 ahd->platform_data->dv_scsi_dev = scsi_dev;
2665
2666 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_SHORT_ASYNC);
2667
2668 while (targ->dv_state != AHD_DV_STATE_EXIT) {
2669 timeout = AHD_LINUX_DV_TIMEOUT;
2670 switch (targ->dv_state) {
2671 case AHD_DV_STATE_INQ_SHORT_ASYNC:
2672 case AHD_DV_STATE_INQ_ASYNC:
2673 case AHD_DV_STATE_INQ_ASYNC_VERIFY:
2674 /*
2675 * Set things to async narrow to reduce the
2676 * chance that the INQ will fail.
2677 */
2678 ahd_lock(ahd, &s);
2679 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
2680 AHD_TRANS_GOAL, /*paused*/FALSE);
2681 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2682 AHD_TRANS_GOAL, /*paused*/FALSE);
2683 ahd_unlock(ahd, &s);
2684 timeout = 10 * HZ;
2685 targ->flags &= ~AHD_INQ_VALID;
2686 /* FALLTHROUGH */
2687 case AHD_DV_STATE_INQ_VERIFY:
2688 {
2689 u_int inq_len;
2690
2691 if (targ->dv_state == AHD_DV_STATE_INQ_SHORT_ASYNC)
2692 inq_len = AHD_LINUX_DV_INQ_SHORT_LEN;
2693 else
2694 inq_len = targ->inq_data->additional_length + 5;
2695 ahd_linux_dv_inq(ahd, cmd, &devinfo, targ, inq_len);
2696 break;
2697 }
2698 case AHD_DV_STATE_TUR:
2699 case AHD_DV_STATE_BUSY:
2700 timeout = 5 * HZ;
2701 ahd_linux_dv_tur(ahd, cmd, &devinfo);
2702 break;
2703 case AHD_DV_STATE_REBD:
2704 ahd_linux_dv_rebd(ahd, cmd, &devinfo, targ);
2705 break;
2706 case AHD_DV_STATE_WEB:
2707 ahd_linux_dv_web(ahd, cmd, &devinfo, targ);
2708 break;
2709
2710 case AHD_DV_STATE_REB:
2711 ahd_linux_dv_reb(ahd, cmd, &devinfo, targ);
2712 break;
2713
2714 case AHD_DV_STATE_SU:
2715 ahd_linux_dv_su(ahd, cmd, &devinfo, targ);
2716 timeout = 50 * HZ;
2717 break;
2718
2719 default:
2720 ahd_print_devinfo(ahd, &devinfo);
2721 printf("Unknown DV state %d\n", targ->dv_state);
2722 goto out;
2723 }
2724
2725 /* Queue the command and wait for it to complete */
2726 /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
2727 init_timer(&cmd->eh_timeout);
2728#ifdef AHD_DEBUG
2729 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2730 /*
2731 * All of the printfs during negotiation
2732 * really slow down the negotiation.
2733 * Add a bit of time just to be safe.
2734 */
2735 timeout += HZ;
2736#endif
2737 scsi_add_timer(cmd, timeout, ahd_linux_dv_timeout);
2738 /*
2739 * In 2.5.X, it is assumed that all calls from the
2740 * "midlayer" (which we are emulating) will have the
2741 * ahd host lock held. For other kernels, the
2742 * io_request_lock must be held.
2743 */
2744#if AHD_SCSI_HAS_HOST_LOCK != 0
2745 ahd_lock(ahd, &s);
2746#else
2747 spin_lock_irqsave(&io_request_lock, s);
2748#endif
2749 ahd_linux_queue(cmd, ahd_linux_dv_complete);
2750#if AHD_SCSI_HAS_HOST_LOCK != 0
2751 ahd_unlock(ahd, &s);
2752#else
2753 spin_unlock_irqrestore(&io_request_lock, s);
2754#endif
2755 down_interruptible(&ahd->platform_data->dv_cmd_sem);
2756 /*
2757 * Wait for the SIMQ to be released so that DV is the
2758 * only reason the queue is frozen.
2759 */
2760 ahd_lock(ahd, &s);
2761 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2762 ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2763 ahd_unlock(ahd, &s);
2764 down_interruptible(&ahd->platform_data->dv_sem);
2765 ahd_lock(ahd, &s);
2766 }
2767 ahd_unlock(ahd, &s);
2768
2769 ahd_linux_dv_transition(ahd, cmd, &devinfo, targ);
2770 }
2771
2772out:
2773 if ((targ->flags & AHD_INQ_VALID) != 0
2774 && ahd_linux_get_device(ahd, devinfo.channel - 'A',
2775 devinfo.target, devinfo.lun,
2776 /*alloc*/FALSE) == NULL) {
2777 /*
2778 * The DV state machine failed to configure this device.
2779 * This is normal if DV is disabled. Since we have inquiry
2780 * data, filter it and use the "optimistic" negotiation
2781 * parameters found in the inquiry string.
2782 */
2783 ahd_linux_filter_inquiry(ahd, &devinfo);
2784 if ((targ->flags & (AHD_BASIC_DV|AHD_ENHANCED_DV)) != 0) {
2785 ahd_print_devinfo(ahd, &devinfo);
2786 printf("DV failed to configure device. "
2787 "Please file a bug report against "
2788 "this driver.\n");
2789 }
2790 }
2791
2792 if (cmd != NULL)
2793 free(cmd, M_DEVBUF);
2794
2795 if (ahd->platform_data->dv_scsi_dev != NULL) {
2796 free(ahd->platform_data->dv_scsi_dev, M_DEVBUF);
2797 ahd->platform_data->dv_scsi_dev = NULL;
2798 }
2799
2800 ahd_lock(ahd, &s);
2801 if (targ->dv_buffer != NULL) {
2802 free(targ->dv_buffer, M_DEVBUF);
2803 targ->dv_buffer = NULL;
2804 }
2805 if (targ->dv_buffer1 != NULL) {
2806 free(targ->dv_buffer1, M_DEVBUF);
2807 targ->dv_buffer1 = NULL;
2808 }
2809 targ->flags &= ~AHD_DV_REQUIRED;
2810 if (targ->refcount == 0)
2811 ahd_linux_free_target(ahd, targ);
2812 ahd_unlock(ahd, &s);
2813}
2814
2815static __inline int
2816ahd_linux_dv_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2817{
2818 u_long s;
2819 int retval;
2820
2821 ahd_lock(ahd, &s);
2822 retval = ahd_linux_fallback(ahd, devinfo);
2823 ahd_unlock(ahd, &s);
2824
2825 return (retval);
2826}
2827
2828static void
2829ahd_linux_dv_transition(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
2830 struct ahd_devinfo *devinfo,
2831 struct ahd_linux_target *targ)
2832{
2833 u_int32_t status;
2834
2835 status = aic_error_action(cmd, targ->inq_data,
2836 ahd_cmd_get_transaction_status(cmd),
2837 ahd_cmd_get_scsi_status(cmd));
2838
2839
2840#ifdef AHD_DEBUG
2841 if (ahd_debug & AHD_SHOW_DV) {
2842 ahd_print_devinfo(ahd, devinfo);
2843 printf("Entering ahd_linux_dv_transition, state= %d, "
2844 "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
2845 status, cmd->result);
2846 }
2847#endif
2848 1334
2849 switch (targ->dv_state) { 1335 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
2850 case AHD_DV_STATE_INQ_SHORT_ASYNC: 1336 case AHD_DEV_Q_BASIC:
2851 case AHD_DV_STATE_INQ_ASYNC: 1337 scsi_adjust_queue_depth(sdev,
2852 switch (status & SS_MASK) { 1338 MSG_SIMPLE_TASK,
2853 case SS_NOP: 1339 dev->openings + dev->active);
2854 {
2855 AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
2856 break;
2857 }
2858 case SS_INQ_REFRESH:
2859 AHD_SET_DV_STATE(ahd, targ,
2860 AHD_DV_STATE_INQ_SHORT_ASYNC);
2861 break;
2862 case SS_TUR:
2863 case SS_RETRY:
2864 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2865 if (ahd_cmd_get_transaction_status(cmd)
2866 == CAM_REQUEUE_REQ)
2867 targ->dv_state_retry--;
2868 if ((status & SS_ERRMASK) == EBUSY)
2869 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
2870 if (targ->dv_state_retry < 10)
2871 break;
2872 /* FALLTHROUGH */
2873 default:
2874 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2875#ifdef AHD_DEBUG
2876 if (ahd_debug & AHD_SHOW_DV) {
2877 ahd_print_devinfo(ahd, devinfo);
2878 printf("Failed DV inquiry, skipping\n");
2879 }
2880#endif
2881 break;
2882 }
2883 break; 1340 break;
2884 case AHD_DV_STATE_INQ_ASYNC_VERIFY: 1341 case AHD_DEV_Q_TAGGED:
2885 switch (status & SS_MASK) { 1342 scsi_adjust_queue_depth(sdev,
2886 case SS_NOP: 1343 MSG_ORDERED_TASK,
2887 { 1344 dev->openings + dev->active);
2888 u_int xportflags;
2889 u_int spi3data;
2890
2891 if (memcmp(targ->inq_data, targ->dv_buffer,
2892 AHD_LINUX_DV_INQ_LEN) != 0) {
2893 /*
2894 * Inquiry data must have changed.
2895 * Try from the top again.
2896 */
2897 AHD_SET_DV_STATE(ahd, targ,
2898 AHD_DV_STATE_INQ_SHORT_ASYNC);
2899 break;
2900 }
2901
2902 AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
2903 targ->flags |= AHD_INQ_VALID;
2904 if (ahd_linux_user_dv_setting(ahd) == 0)
2905 break;
2906
2907 xportflags = targ->inq_data->flags;
2908 if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
2909 break;
2910
2911 spi3data = targ->inq_data->spi3data;
2912 switch (spi3data & SID_SPI_CLOCK_DT_ST) {
2913 default:
2914 case SID_SPI_CLOCK_ST:
2915 /* Assume only basic DV is supported. */
2916 targ->flags |= AHD_BASIC_DV;
2917 break;
2918 case SID_SPI_CLOCK_DT:
2919 case SID_SPI_CLOCK_DT_ST:
2920 targ->flags |= AHD_ENHANCED_DV;
2921 break;
2922 }
2923 break;
2924 }
2925 case SS_INQ_REFRESH:
2926 AHD_SET_DV_STATE(ahd, targ,
2927 AHD_DV_STATE_INQ_SHORT_ASYNC);
2928 break;
2929 case SS_TUR:
2930 case SS_RETRY:
2931 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2932 if (ahd_cmd_get_transaction_status(cmd)
2933 == CAM_REQUEUE_REQ)
2934 targ->dv_state_retry--;
2935
2936 if ((status & SS_ERRMASK) == EBUSY)
2937 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
2938 if (targ->dv_state_retry < 10)
2939 break;
2940 /* FALLTHROUGH */
2941 default:
2942 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2943#ifdef AHD_DEBUG
2944 if (ahd_debug & AHD_SHOW_DV) {
2945 ahd_print_devinfo(ahd, devinfo);
2946 printf("Failed DV inquiry, skipping\n");
2947 }
2948#endif
2949 break;
2950 }
2951 break; 1345 break;
2952 case AHD_DV_STATE_INQ_VERIFY:
2953 switch (status & SS_MASK) {
2954 case SS_NOP:
2955 {
2956
2957 if (memcmp(targ->inq_data, targ->dv_buffer,
2958 AHD_LINUX_DV_INQ_LEN) == 0) {
2959 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2960 break;
2961 }
2962
2963#ifdef AHD_DEBUG
2964 if (ahd_debug & AHD_SHOW_DV) {
2965 int i;
2966
2967 ahd_print_devinfo(ahd, devinfo);
2968 printf("Inquiry buffer mismatch:");
2969 for (i = 0; i < AHD_LINUX_DV_INQ_LEN; i++) {
2970 if ((i & 0xF) == 0)
2971 printf("\n ");
2972 printf("0x%x:0x0%x ",
2973 ((uint8_t *)targ->inq_data)[i],
2974 targ->dv_buffer[i]);
2975 }
2976 printf("\n");
2977 }
2978#endif
2979
2980 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
2981 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2982 break;
2983 }
2984 /*
2985 * Do not count "falling back"
2986 * against our retries.
2987 */
2988 targ->dv_state_retry = 0;
2989 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2990 break;
2991 }
2992 case SS_INQ_REFRESH:
2993 AHD_SET_DV_STATE(ahd, targ,
2994 AHD_DV_STATE_INQ_SHORT_ASYNC);
2995 break;
2996 case SS_TUR:
2997 case SS_RETRY:
2998 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2999 if (ahd_cmd_get_transaction_status(cmd)
3000 == CAM_REQUEUE_REQ) {
3001 targ->dv_state_retry--;
3002 } else if ((status & SSQ_FALLBACK) != 0) {
3003 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3004 AHD_SET_DV_STATE(ahd, targ,
3005 AHD_DV_STATE_EXIT);
3006 break;
3007 }
3008 /*
3009 * Do not count "falling back"
3010 * against our retries.
3011 */
3012 targ->dv_state_retry = 0;
3013 } else if ((status & SS_ERRMASK) == EBUSY)
3014 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3015 if (targ->dv_state_retry < 10)
3016 break;
3017 /* FALLTHROUGH */
3018 default:
3019 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3020#ifdef AHD_DEBUG
3021 if (ahd_debug & AHD_SHOW_DV) {
3022 ahd_print_devinfo(ahd, devinfo);
3023 printf("Failed DV inquiry, skipping\n");
3024 }
3025#endif
3026 break;
3027 }
3028 break;
3029
3030 case AHD_DV_STATE_TUR:
3031 switch (status & SS_MASK) {
3032 case SS_NOP:
3033 if ((targ->flags & AHD_BASIC_DV) != 0) {
3034 ahd_linux_filter_inquiry(ahd, devinfo);
3035 AHD_SET_DV_STATE(ahd, targ,
3036 AHD_DV_STATE_INQ_VERIFY);
3037 } else if ((targ->flags & AHD_ENHANCED_DV) != 0) {
3038 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REBD);
3039 } else {
3040 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3041 }
3042 break;
3043 case SS_RETRY:
3044 case SS_TUR:
3045 if ((status & SS_ERRMASK) == EBUSY) {
3046 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3047 break;
3048 }
3049 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3050 if (ahd_cmd_get_transaction_status(cmd)
3051 == CAM_REQUEUE_REQ) {
3052 targ->dv_state_retry--;
3053 } else if ((status & SSQ_FALLBACK) != 0) {
3054 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3055 AHD_SET_DV_STATE(ahd, targ,
3056 AHD_DV_STATE_EXIT);
3057 break;
3058 }
3059 /*
3060 * Do not count "falling back"
3061 * against our retries.
3062 */
3063 targ->dv_state_retry = 0;
3064 }
3065 if (targ->dv_state_retry >= 10) {
3066#ifdef AHD_DEBUG
3067 if (ahd_debug & AHD_SHOW_DV) {
3068 ahd_print_devinfo(ahd, devinfo);
3069 printf("DV TUR reties exhausted\n");
3070 }
3071#endif
3072 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3073 break;
3074 }
3075 if (status & SSQ_DELAY)
3076 ssleep(1);
3077
3078 break;
3079 case SS_START:
3080 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_SU);
3081 break;
3082 case SS_INQ_REFRESH:
3083 AHD_SET_DV_STATE(ahd, targ,
3084 AHD_DV_STATE_INQ_SHORT_ASYNC);
3085 break;
3086 default:
3087 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3088 break;
3089 }
3090 break;
3091
3092 case AHD_DV_STATE_REBD:
3093 switch (status & SS_MASK) {
3094 case SS_NOP:
3095 {
3096 uint32_t echo_size;
3097
3098 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3099 echo_size = scsi_3btoul(&targ->dv_buffer[1]);
3100 echo_size &= 0x1FFF;
3101#ifdef AHD_DEBUG
3102 if (ahd_debug & AHD_SHOW_DV) {
3103 ahd_print_devinfo(ahd, devinfo);
3104 printf("Echo buffer size= %d\n", echo_size);
3105 }
3106#endif
3107 if (echo_size == 0) {
3108 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3109 break;
3110 }
3111
3112 /* Generate the buffer pattern */
3113 targ->dv_echo_size = echo_size;
3114 ahd_linux_generate_dv_pattern(targ);
3115 /*
3116 * Setup initial negotiation values.
3117 */
3118 ahd_linux_filter_inquiry(ahd, devinfo);
3119 break;
3120 }
3121 case SS_INQ_REFRESH:
3122 AHD_SET_DV_STATE(ahd, targ,
3123 AHD_DV_STATE_INQ_SHORT_ASYNC);
3124 break;
3125 case SS_RETRY:
3126 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3127 if (ahd_cmd_get_transaction_status(cmd)
3128 == CAM_REQUEUE_REQ)
3129 targ->dv_state_retry--;
3130 if (targ->dv_state_retry <= 10)
3131 break;
3132#ifdef AHD_DEBUG
3133 if (ahd_debug & AHD_SHOW_DV) {
3134 ahd_print_devinfo(ahd, devinfo);
3135 printf("DV REBD reties exhausted\n");
3136 }
3137#endif
3138 /* FALLTHROUGH */
3139 case SS_FATAL:
3140 default:
3141 /*
3142 * Setup initial negotiation values
3143 * and try level 1 DV.
3144 */
3145 ahd_linux_filter_inquiry(ahd, devinfo);
3146 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_VERIFY);
3147 targ->dv_echo_size = 0;
3148 break;
3149 }
3150 break;
3151
3152 case AHD_DV_STATE_WEB:
3153 switch (status & SS_MASK) {
3154 case SS_NOP:
3155 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REB);
3156 break;
3157 case SS_INQ_REFRESH:
3158 AHD_SET_DV_STATE(ahd, targ,
3159 AHD_DV_STATE_INQ_SHORT_ASYNC);
3160 break;
3161 case SS_RETRY:
3162 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3163 if (ahd_cmd_get_transaction_status(cmd)
3164 == CAM_REQUEUE_REQ) {
3165 targ->dv_state_retry--;
3166 } else if ((status & SSQ_FALLBACK) != 0) {
3167 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3168 AHD_SET_DV_STATE(ahd, targ,
3169 AHD_DV_STATE_EXIT);
3170 break;
3171 }
3172 /*
3173 * Do not count "falling back"
3174 * against our retries.
3175 */
3176 targ->dv_state_retry = 0;
3177 }
3178 if (targ->dv_state_retry <= 10)
3179 break;
3180 /* FALLTHROUGH */
3181#ifdef AHD_DEBUG
3182 if (ahd_debug & AHD_SHOW_DV) {
3183 ahd_print_devinfo(ahd, devinfo);
3184 printf("DV WEB reties exhausted\n");
3185 }
3186#endif
3187 default:
3188 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3189 break;
3190 }
3191 break;
3192
3193 case AHD_DV_STATE_REB:
3194 switch (status & SS_MASK) {
3195 case SS_NOP:
3196 if (memcmp(targ->dv_buffer, targ->dv_buffer1,
3197 targ->dv_echo_size) != 0) {
3198 if (ahd_linux_dv_fallback(ahd, devinfo) != 0)
3199 AHD_SET_DV_STATE(ahd, targ,
3200 AHD_DV_STATE_EXIT);
3201 else
3202 AHD_SET_DV_STATE(ahd, targ,
3203 AHD_DV_STATE_WEB);
3204 break;
3205 }
3206
3207 if (targ->dv_buffer != NULL) {
3208 free(targ->dv_buffer, M_DEVBUF);
3209 targ->dv_buffer = NULL;
3210 }
3211 if (targ->dv_buffer1 != NULL) {
3212 free(targ->dv_buffer1, M_DEVBUF);
3213 targ->dv_buffer1 = NULL;
3214 }
3215 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3216 break;
3217 case SS_INQ_REFRESH:
3218 AHD_SET_DV_STATE(ahd, targ,
3219 AHD_DV_STATE_INQ_SHORT_ASYNC);
3220 break;
3221 case SS_RETRY:
3222 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3223 if (ahd_cmd_get_transaction_status(cmd)
3224 == CAM_REQUEUE_REQ) {
3225 targ->dv_state_retry--;
3226 } else if ((status & SSQ_FALLBACK) != 0) {
3227 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3228 AHD_SET_DV_STATE(ahd, targ,
3229 AHD_DV_STATE_EXIT);
3230 break;
3231 }
3232 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3233 }
3234 if (targ->dv_state_retry <= 10) {
3235 if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
3236 msleep(ahd->our_id*1000/10);
3237 break;
3238 }
3239#ifdef AHD_DEBUG
3240 if (ahd_debug & AHD_SHOW_DV) {
3241 ahd_print_devinfo(ahd, devinfo);
3242 printf("DV REB reties exhausted\n");
3243 }
3244#endif
3245 /* FALLTHROUGH */
3246 default:
3247 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3248 break;
3249 }
3250 break;
3251
3252 case AHD_DV_STATE_SU:
3253 switch (status & SS_MASK) {
3254 case SS_NOP:
3255 case SS_INQ_REFRESH:
3256 AHD_SET_DV_STATE(ahd, targ,
3257 AHD_DV_STATE_INQ_SHORT_ASYNC);
3258 break;
3259 default:
3260 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3261 break;
3262 }
3263 break;
3264
3265 case AHD_DV_STATE_BUSY:
3266 switch (status & SS_MASK) {
3267 case SS_NOP:
3268 case SS_INQ_REFRESH:
3269 AHD_SET_DV_STATE(ahd, targ,
3270 AHD_DV_STATE_INQ_SHORT_ASYNC);
3271 break;
3272 case SS_TUR:
3273 case SS_RETRY:
3274 AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3275 if (ahd_cmd_get_transaction_status(cmd)
3276 == CAM_REQUEUE_REQ) {
3277 targ->dv_state_retry--;
3278 } else if (targ->dv_state_retry < 60) {
3279 if ((status & SSQ_DELAY) != 0)
3280 ssleep(1);
3281 } else {
3282#ifdef AHD_DEBUG
3283 if (ahd_debug & AHD_SHOW_DV) {
3284 ahd_print_devinfo(ahd, devinfo);
3285 printf("DV BUSY reties exhausted\n");
3286 }
3287#endif
3288 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3289 }
3290 break;
3291 default:
3292 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3293 break;
3294 }
3295 break;
3296
3297 default: 1346 default:
3298 printf("%s: Invalid DV completion state %d\n", ahd_name(ahd),
3299 targ->dv_state);
3300 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3301 break;
3302 }
3303}
3304
3305static void
3306ahd_linux_dv_fill_cmd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3307 struct ahd_devinfo *devinfo)
3308{
3309 memset(cmd, 0, sizeof(struct scsi_cmnd));
3310 cmd->device = ahd->platform_data->dv_scsi_dev;
3311 cmd->scsi_done = ahd_linux_dv_complete;
3312}
3313
3314/*
3315 * Synthesize an inquiry command. On the return trip, it'll be
3316 * sniffed and the device transfer settings set for us.
3317 */
3318static void
3319ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3320 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ,
3321 u_int request_length)
3322{
3323
3324#ifdef AHD_DEBUG
3325 if (ahd_debug & AHD_SHOW_DV) {
3326 ahd_print_devinfo(ahd, devinfo);
3327 printf("Sending INQ\n");
3328 }
3329#endif
3330 if (targ->inq_data == NULL)
3331 targ->inq_data = malloc(AHD_LINUX_DV_INQ_LEN,
3332 M_DEVBUF, M_WAITOK);
3333 if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC) {
3334 if (targ->dv_buffer != NULL)
3335 free(targ->dv_buffer, M_DEVBUF);
3336 targ->dv_buffer = malloc(AHD_LINUX_DV_INQ_LEN,
3337 M_DEVBUF, M_WAITOK);
3338 }
3339
3340 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3341 cmd->sc_data_direction = DMA_FROM_DEVICE;
3342 cmd->cmd_len = 6;
3343 cmd->cmnd[0] = INQUIRY;
3344 cmd->cmnd[4] = request_length;
3345 cmd->request_bufflen = request_length;
3346 if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC)
3347 cmd->request_buffer = targ->dv_buffer;
3348 else
3349 cmd->request_buffer = targ->inq_data;
3350 memset(cmd->request_buffer, 0, AHD_LINUX_DV_INQ_LEN);
3351}
3352
3353static void
3354ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3355 struct ahd_devinfo *devinfo)
3356{
3357
3358#ifdef AHD_DEBUG
3359 if (ahd_debug & AHD_SHOW_DV) {
3360 ahd_print_devinfo(ahd, devinfo);
3361 printf("Sending TUR\n");
3362 }
3363#endif
3364 /* Do a TUR to clear out any non-fatal transitional state */
3365 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3366 cmd->sc_data_direction = DMA_NONE;
3367 cmd->cmd_len = 6;
3368 cmd->cmnd[0] = TEST_UNIT_READY;
3369}
3370
3371#define AHD_REBD_LEN 4
3372
3373static void
3374ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3375 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3376{
3377
3378#ifdef AHD_DEBUG
3379 if (ahd_debug & AHD_SHOW_DV) {
3380 ahd_print_devinfo(ahd, devinfo);
3381 printf("Sending REBD\n");
3382 }
3383#endif
3384 if (targ->dv_buffer != NULL)
3385 free(targ->dv_buffer, M_DEVBUF);
3386 targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
3387 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3388 cmd->sc_data_direction = DMA_FROM_DEVICE;
3389 cmd->cmd_len = 10;
3390 cmd->cmnd[0] = READ_BUFFER;
3391 cmd->cmnd[1] = 0x0b;
3392 scsi_ulto3b(AHD_REBD_LEN, &cmd->cmnd[6]);
3393 cmd->request_bufflen = AHD_REBD_LEN;
3394 cmd->underflow = cmd->request_bufflen;
3395 cmd->request_buffer = targ->dv_buffer;
3396}
3397
3398static void
3399ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3400 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3401{
3402
3403#ifdef AHD_DEBUG
3404 if (ahd_debug & AHD_SHOW_DV) {
3405 ahd_print_devinfo(ahd, devinfo);
3406 printf("Sending WEB\n");
3407 }
3408#endif
3409 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3410 cmd->sc_data_direction = DMA_TO_DEVICE;
3411 cmd->cmd_len = 10;
3412 cmd->cmnd[0] = WRITE_BUFFER;
3413 cmd->cmnd[1] = 0x0a;
3414 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3415 cmd->request_bufflen = targ->dv_echo_size;
3416 cmd->underflow = cmd->request_bufflen;
3417 cmd->request_buffer = targ->dv_buffer;
3418}
3419
3420static void
3421ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3422 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3423{
3424
3425#ifdef AHD_DEBUG
3426 if (ahd_debug & AHD_SHOW_DV) {
3427 ahd_print_devinfo(ahd, devinfo);
3428 printf("Sending REB\n");
3429 }
3430#endif
3431 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3432 cmd->sc_data_direction = DMA_FROM_DEVICE;
3433 cmd->cmd_len = 10;
3434 cmd->cmnd[0] = READ_BUFFER;
3435 cmd->cmnd[1] = 0x0a;
3436 scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3437 cmd->request_bufflen = targ->dv_echo_size;
3438 cmd->underflow = cmd->request_bufflen;
3439 cmd->request_buffer = targ->dv_buffer1;
3440}
3441
3442static void
3443ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3444 struct ahd_devinfo *devinfo,
3445 struct ahd_linux_target *targ)
3446{
3447 u_int le;
3448
3449 le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
3450
3451#ifdef AHD_DEBUG
3452 if (ahd_debug & AHD_SHOW_DV) {
3453 ahd_print_devinfo(ahd, devinfo);
3454 printf("Sending SU\n");
3455 }
3456#endif
3457 ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3458 cmd->sc_data_direction = DMA_NONE;
3459 cmd->cmd_len = 6;
3460 cmd->cmnd[0] = START_STOP_UNIT;
3461 cmd->cmnd[4] = le | SSS_START;
3462}
3463
3464static int
3465ahd_linux_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3466{
3467 struct ahd_linux_target *targ;
3468 struct ahd_initiator_tinfo *tinfo;
3469 struct ahd_transinfo *goal;
3470 struct ahd_tmode_tstate *tstate;
3471 u_int width;
3472 u_int period;
3473 u_int offset;
3474 u_int ppr_options;
3475 u_int cur_speed;
3476 u_int wide_speed;
3477 u_int narrow_speed;
3478 u_int fallback_speed;
3479
3480#ifdef AHD_DEBUG
3481 if (ahd_debug & AHD_SHOW_DV) {
3482 ahd_print_devinfo(ahd, devinfo);
3483 printf("Trying to fallback\n");
3484 }
3485#endif
3486 targ = ahd->platform_data->targets[devinfo->target_offset];
3487 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
3488 devinfo->our_scsiid,
3489 devinfo->target, &tstate);
3490 goal = &tinfo->goal;
3491 width = goal->width;
3492 period = goal->period;
3493 offset = goal->offset;
3494 ppr_options = goal->ppr_options;
3495 if (offset == 0)
3496 period = AHD_ASYNC_XFER_PERIOD;
3497 if (targ->dv_next_narrow_period == 0)
3498 targ->dv_next_narrow_period = MAX(period, AHD_SYNCRATE_ULTRA2);
3499 if (targ->dv_next_wide_period == 0)
3500 targ->dv_next_wide_period = period;
3501 if (targ->dv_max_width == 0)
3502 targ->dv_max_width = width;
3503 if (targ->dv_max_ppr_options == 0)
3504 targ->dv_max_ppr_options = ppr_options;
3505 if (targ->dv_last_ppr_options == 0)
3506 targ->dv_last_ppr_options = ppr_options;
3507
3508 cur_speed = aic_calc_speed(width, period, offset, AHD_SYNCRATE_MIN);
3509 wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
3510 targ->dv_next_wide_period,
3511 MAX_OFFSET, AHD_SYNCRATE_MIN);
3512 narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
3513 targ->dv_next_narrow_period,
3514 MAX_OFFSET, AHD_SYNCRATE_MIN);
3515 fallback_speed = aic_calc_speed(width, period+1, offset,
3516 AHD_SYNCRATE_MIN);
3517#ifdef AHD_DEBUG
3518 if (ahd_debug & AHD_SHOW_DV) {
3519 printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
3520 "fallback_speed= %d\n", cur_speed, wide_speed,
3521 narrow_speed, fallback_speed);
3522 }
3523#endif
3524
3525 if (cur_speed > 160000) {
3526 /*
3527 * Paced/DT/IU_REQ only transfer speeds. All we
3528 * can do is fallback in terms of syncrate.
3529 */
3530 period++;
3531 } else if (cur_speed > 80000) {
3532 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3533 /*
3534 * Try without IU_REQ as it may be confusing
3535 * an expander.
3536 */
3537 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3538 } else {
3539 /*
3540 * Paced/DT only transfer speeds. All we
3541 * can do is fallback in terms of syncrate.
3542 */
3543 period++;
3544 ppr_options = targ->dv_max_ppr_options;
3545 }
3546 } else if (cur_speed > 3300) {
3547
3548 /* 1347 /*
3549 * In this range we the following 1348 * We allow the OS to queue 2 untagged transactions to
3550 * options ordered from highest to 1349 * us at any time even though we can only execute them
3551 * lowest desireability: 1350 * serially on the controller/device. This should
3552 * 1351 * remove some latency.
3553 * o Wide/DT
3554 * o Wide/non-DT
3555 * o Narrow at a potentally higher sync rate.
3556 *
3557 * All modes are tested with and without IU_REQ
3558 * set since using IUs may confuse an expander.
3559 */ 1352 */
3560 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 1353 scsi_adjust_queue_depth(sdev,
3561 1354 /*NON-TAGGED*/0,
3562 ppr_options &= ~MSG_EXT_PPR_IU_REQ; 1355 /*queue depth*/2);
3563 } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 1356 break;
3564 /*
3565 * Try going non-DT.
3566 */
3567 ppr_options = targ->dv_max_ppr_options;
3568 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
3569 } else if (targ->dv_last_ppr_options != 0) {
3570 /*
3571 * Try without QAS or any other PPR options.
3572 * We may need a non-PPR message to work with
3573 * an expander. We look at the "last PPR options"
3574 * so we will perform this fallback even if the
3575 * target responded to our PPR negotiation with
3576 * no option bits set.
3577 */
3578 ppr_options = 0;
3579 } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
3580 /*
3581 * If the next narrow speed is greater than
3582 * the next wide speed, fallback to narrow.
3583 * Otherwise fallback to the next DT/Wide setting.
3584 * The narrow async speed will always be smaller
3585 * than the wide async speed, so handle this case
3586 * specifically.
3587 */
3588 ppr_options = targ->dv_max_ppr_options;
3589 if (narrow_speed > fallback_speed
3590 || period >= AHD_ASYNC_XFER_PERIOD) {
3591 targ->dv_next_wide_period = period+1;
3592 width = MSG_EXT_WDTR_BUS_8_BIT;
3593 period = targ->dv_next_narrow_period;
3594 } else {
3595 period++;
3596 }
3597 } else if ((ahd->features & AHD_WIDE) != 0
3598 && targ->dv_max_width != 0
3599 && wide_speed >= fallback_speed
3600 && (targ->dv_next_wide_period <= AHD_ASYNC_XFER_PERIOD
3601 || period >= AHD_ASYNC_XFER_PERIOD)) {
3602
3603 /*
3604 * We are narrow. Try falling back
3605 * to the next wide speed with
3606 * all supported ppr options set.
3607 */
3608 targ->dv_next_narrow_period = period+1;
3609 width = MSG_EXT_WDTR_BUS_16_BIT;
3610 period = targ->dv_next_wide_period;
3611 ppr_options = targ->dv_max_ppr_options;
3612 } else {
3613 /* Only narrow fallback is allowed. */
3614 period++;
3615 ppr_options = targ->dv_max_ppr_options;
3616 }
3617 } else {
3618 return (-1);
3619 }
3620 offset = MAX_OFFSET;
3621 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_PACED);
3622 ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, FALSE);
3623 if (period == 0) {
3624 period = 0;
3625 offset = 0;
3626 ppr_options = 0;
3627 if (width == MSG_EXT_WDTR_BUS_8_BIT)
3628 targ->dv_next_narrow_period = AHD_ASYNC_XFER_PERIOD;
3629 else
3630 targ->dv_next_wide_period = AHD_ASYNC_XFER_PERIOD;
3631 }
3632 ahd_set_syncrate(ahd, devinfo, period, offset,
3633 ppr_options, AHD_TRANS_GOAL, FALSE);
3634 targ->dv_last_ppr_options = ppr_options;
3635 return (0);
3636}
3637
3638static void
3639ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
3640{
3641 struct ahd_softc *ahd;
3642 struct scb *scb;
3643 u_long flags;
3644
3645 ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3646 ahd_lock(ahd, &flags);
3647
3648#ifdef AHD_DEBUG
3649 if (ahd_debug & AHD_SHOW_DV) {
3650 printf("%s: Timeout while doing DV command %x.\n",
3651 ahd_name(ahd), cmd->cmnd[0]);
3652 ahd_dump_card_state(ahd);
3653 }
3654#endif
3655
3656 /*
3657 * Guard against "done race". No action is
3658 * required if we just completed.
3659 */
3660 if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
3661 ahd_unlock(ahd, &flags);
3662 return;
3663 } 1357 }
3664
3665 /*
3666 * Command has not completed. Mark this
3667 * SCB as having failing status prior to
3668 * resetting the bus, so we get the correct
3669 * error code.
3670 */
3671 if ((scb->flags & SCB_SENSE) != 0)
3672 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
3673 else
3674 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
3675 ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
3676
3677 /*
3678 * Add a minimal bus settle delay for devices that are slow to
3679 * respond after bus resets.
3680 */
3681 ahd_freeze_simq(ahd);
3682 init_timer(&ahd->platform_data->reset_timer);
3683 ahd->platform_data->reset_timer.data = (u_long)ahd;
3684 ahd->platform_data->reset_timer.expires = jiffies + HZ / 2;
3685 ahd->platform_data->reset_timer.function =
3686 (ahd_linux_callback_t *)ahd_release_simq;
3687 add_timer(&ahd->platform_data->reset_timer);
3688 if (ahd_linux_next_device_to_run(ahd) != NULL)
3689 ahd_schedule_runq(ahd);
3690 ahd_linux_run_complete_queue(ahd);
3691 ahd_unlock(ahd, &flags);
3692}
3693
3694static void
3695ahd_linux_dv_complete(struct scsi_cmnd *cmd)
3696{
3697 struct ahd_softc *ahd;
3698
3699 ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3700
3701 /* Delete the DV timer before it goes off! */
3702 scsi_delete_timer(cmd);
3703
3704#ifdef AHD_DEBUG
3705 if (ahd_debug & AHD_SHOW_DV)
3706 printf("%s:%c:%d: Command completed, status= 0x%x\n",
3707 ahd_name(ahd), cmd->device->channel, cmd->device->id,
3708 cmd->result);
3709#endif
3710
3711 /* Wake up the state machine */
3712 up(&ahd->platform_data->dv_cmd_sem);
3713} 1358}
3714 1359
3715static void 1360int
3716ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ) 1361ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
1362 int lun, u_int tag, role_t role, uint32_t status)
3717{ 1363{
3718 uint16_t b; 1364 return 0;
3719 u_int i;
3720 u_int j;
3721
3722 if (targ->dv_buffer != NULL)
3723 free(targ->dv_buffer, M_DEVBUF);
3724 targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3725 if (targ->dv_buffer1 != NULL)
3726 free(targ->dv_buffer1, M_DEVBUF);
3727 targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3728
3729 i = 0;
3730
3731 b = 0x0001;
3732 for (j = 0 ; i < targ->dv_echo_size; j++) {
3733 if (j < 32) {
3734 /*
3735 * 32bytes of sequential numbers.
3736 */
3737 targ->dv_buffer[i++] = j & 0xff;
3738 } else if (j < 48) {
3739 /*
3740 * 32bytes of repeating 0x0000, 0xffff.
3741 */
3742 targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
3743 } else if (j < 64) {
3744 /*
3745 * 32bytes of repeating 0x5555, 0xaaaa.
3746 */
3747 targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
3748 } else {
3749 /*
3750 * Remaining buffer is filled with a repeating
3751 * patter of:
3752 *
3753 * 0xffff
3754 * ~0x0001 << shifted once in each loop.
3755 */
3756 if (j & 0x02) {
3757 if (j & 0x01) {
3758 targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
3759 b <<= 1;
3760 if (b == 0x0000)
3761 b = 0x0001;
3762 } else {
3763 targ->dv_buffer[i++] = (~b & 0xff);
3764 }
3765 } else {
3766 targ->dv_buffer[i++] = 0xff;
3767 }
3768 }
3769 }
3770} 1365}
3771 1366
3772static u_int 1367static u_int
@@ -3800,100 +1395,23 @@ ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3800 return (tags); 1395 return (tags);
3801} 1396}
3802 1397
3803static u_int
3804ahd_linux_user_dv_setting(struct ahd_softc *ahd)
3805{
3806 static int warned_user;
3807 int dv;
3808
3809 if (ahd->unit >= NUM_ELEMENTS(aic79xx_dv_settings)) {
3810
3811 if (warned_user == 0) {
3812 printf(KERN_WARNING
3813"aic79xx: WARNING: Insufficient dv settings instances\n"
3814"aic79xx: for installed controllers. Using defaults\n"
3815"aic79xx: Please update the aic79xx_dv_settings array in"
3816"aic79xx: the aic79xx_osm.c source file.\n");
3817 warned_user++;
3818 }
3819 dv = -1;
3820 } else {
3821
3822 dv = aic79xx_dv_settings[ahd->unit];
3823 }
3824
3825 if (dv < 0) {
3826 /*
3827 * Apply the default.
3828 */
3829 dv = 1;
3830 if (ahd->seep_config != 0)
3831 dv = (ahd->seep_config->bios_control & CFENABLEDV);
3832 }
3833 return (dv);
3834}
3835
3836static void
3837ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd)
3838{
3839 static int warned_user;
3840 u_int rd_strm_mask;
3841 u_int target_id;
3842
3843 /*
3844 * If we have specific read streaming info for this controller,
3845 * apply it. Otherwise use the defaults.
3846 */
3847 if (ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {
3848
3849 if (warned_user == 0) {
3850
3851 printf(KERN_WARNING
3852"aic79xx: WARNING: Insufficient rd_strm instances\n"
3853"aic79xx: for installed controllers. Using defaults\n"
3854"aic79xx: Please update the aic79xx_rd_strm_info array\n"
3855"aic79xx: in the aic79xx_osm.c source file.\n");
3856 warned_user++;
3857 }
3858 rd_strm_mask = AIC79XX_CONFIGED_RD_STRM;
3859 } else {
3860
3861 rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];
3862 }
3863 for (target_id = 0; target_id < 16; target_id++) {
3864 struct ahd_devinfo devinfo;
3865 struct ahd_initiator_tinfo *tinfo;
3866 struct ahd_tmode_tstate *tstate;
3867
3868 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
3869 target_id, &tstate);
3870 ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
3871 CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
3872 tinfo->user.ppr_options &= ~MSG_EXT_PPR_RD_STRM;
3873 if ((rd_strm_mask & devinfo.target_mask) != 0)
3874 tinfo->user.ppr_options |= MSG_EXT_PPR_RD_STRM;
3875 }
3876}
3877
3878/* 1398/*
3879 * Determines the queue depth for a given device. 1399 * Determines the queue depth for a given device.
3880 */ 1400 */
3881static void 1401static void
3882ahd_linux_device_queue_depth(struct ahd_softc *ahd, 1402ahd_linux_device_queue_depth(struct scsi_device *sdev)
3883 struct ahd_linux_device *dev)
3884{ 1403{
3885 struct ahd_devinfo devinfo; 1404 struct ahd_devinfo devinfo;
3886 u_int tags; 1405 u_int tags;
1406 struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata);
3887 1407
3888 ahd_compile_devinfo(&devinfo, 1408 ahd_compile_devinfo(&devinfo,
3889 ahd->our_id, 1409 ahd->our_id,
3890 dev->target->target, dev->lun, 1410 sdev->sdev_target->id, sdev->lun,
3891 dev->target->channel == 0 ? 'A' : 'B', 1411 sdev->sdev_target->channel == 0 ? 'A' : 'B',
3892 ROLE_INITIATOR); 1412 ROLE_INITIATOR);
3893 tags = ahd_linux_user_tagdepth(ahd, &devinfo); 1413 tags = ahd_linux_user_tagdepth(ahd, &devinfo);
3894 if (tags != 0 1414 if (tags != 0 && sdev->tagged_supported != 0) {
3895 && dev->scsi_device != NULL
3896 && dev->scsi_device->tagged_supported != 0) {
3897 1415
3898 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED); 1416 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED);
3899 ahd_print_devinfo(ahd, &devinfo); 1417 ahd_print_devinfo(ahd, &devinfo);
@@ -3903,11 +1421,10 @@ ahd_linux_device_queue_depth(struct ahd_softc *ahd,
3903 } 1421 }
3904} 1422}
3905 1423
3906static void 1424static int
3907ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev) 1425ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1426 struct scsi_cmnd *cmd)
3908{ 1427{
3909 struct ahd_cmd *acmd;
3910 struct scsi_cmnd *cmd;
3911 struct scb *scb; 1428 struct scb *scb;
3912 struct hardware_scb *hscb; 1429 struct hardware_scb *hscb;
3913 struct ahd_initiator_tinfo *tinfo; 1430 struct ahd_initiator_tinfo *tinfo;
@@ -3915,157 +1432,122 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
3915 u_int col_idx; 1432 u_int col_idx;
3916 uint16_t mask; 1433 uint16_t mask;
3917 1434
3918 if ((dev->flags & AHD_DEV_ON_RUN_LIST) != 0) 1435 /*
3919 panic("running device on run list"); 1436 * Get an scb to use.
3920 1437 */
3921 while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL 1438 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
3922 && dev->openings > 0 && dev->qfrozen == 0) { 1439 cmd->device->id, &tstate);
3923 1440 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
3924 /* 1441 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3925 * Schedule us to run later. The only reason we are not 1442 col_idx = AHD_NEVER_COL_IDX;
3926 * running is because the whole controller Q is frozen. 1443 } else {
3927 */ 1444 col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
3928 if (ahd->platform_data->qfrozen != 0 1445 cmd->device->lun);
3929 && AHD_DV_SIMQ_FROZEN(ahd) == 0) { 1446 }
3930 1447 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
3931 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, 1448 ahd->flags |= AHD_RESOURCE_SHORTAGE;
3932 dev, links); 1449 return SCSI_MLQUEUE_HOST_BUSY;
3933 dev->flags |= AHD_DEV_ON_RUN_LIST; 1450 }
3934 return;
3935 }
3936
3937 cmd = &acmd_scsi_cmd(acmd);
3938 1451
3939 /* 1452 scb->io_ctx = cmd;
3940 * Get an scb to use. 1453 scb->platform_data->dev = dev;
3941 */ 1454 hscb = scb->hscb;
3942 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 1455 cmd->host_scribble = (char *)scb;
3943 cmd->device->id, &tstate);
3944 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
3945 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3946 col_idx = AHD_NEVER_COL_IDX;
3947 } else {
3948 col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
3949 cmd->device->lun);
3950 }
3951 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
3952 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
3953 dev, links);
3954 dev->flags |= AHD_DEV_ON_RUN_LIST;
3955 ahd->flags |= AHD_RESOURCE_SHORTAGE;
3956 return;
3957 }
3958 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
3959 scb->io_ctx = cmd;
3960 scb->platform_data->dev = dev;
3961 hscb = scb->hscb;
3962 cmd->host_scribble = (char *)scb;
3963 1456
3964 /* 1457 /*
3965 * Fill out basics of the HSCB. 1458 * Fill out basics of the HSCB.
3966 */ 1459 */
3967 hscb->control = 0; 1460 hscb->control = 0;
3968 hscb->scsiid = BUILD_SCSIID(ahd, cmd); 1461 hscb->scsiid = BUILD_SCSIID(ahd, cmd);
3969 hscb->lun = cmd->device->lun; 1462 hscb->lun = cmd->device->lun;
3970 scb->hscb->task_management = 0; 1463 scb->hscb->task_management = 0;
3971 mask = SCB_GET_TARGET_MASK(ahd, scb); 1464 mask = SCB_GET_TARGET_MASK(ahd, scb);
3972 1465
3973 if ((ahd->user_discenable & mask) != 0) 1466 if ((ahd->user_discenable & mask) != 0)
3974 hscb->control |= DISCENB; 1467 hscb->control |= DISCENB;
3975 1468
3976 if (AHD_DV_CMD(cmd) != 0) 1469 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
3977 scb->flags |= SCB_SILENT; 1470 scb->flags |= SCB_PACKETIZED;
3978 1471
3979 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) 1472 if ((tstate->auto_negotiate & mask) != 0) {
3980 scb->flags |= SCB_PACKETIZED; 1473 scb->flags |= SCB_AUTO_NEGOTIATE;
1474 scb->hscb->control |= MK_MESSAGE;
1475 }
3981 1476
3982 if ((tstate->auto_negotiate & mask) != 0) { 1477 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
3983 scb->flags |= SCB_AUTO_NEGOTIATE; 1478 int msg_bytes;
3984 scb->hscb->control |= MK_MESSAGE; 1479 uint8_t tag_msgs[2];
3985 }
3986 1480
3987 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { 1481 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
3988#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1482 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
3989 int msg_bytes; 1483 hscb->control |= tag_msgs[0];
3990 uint8_t tag_msgs[2]; 1484 if (tag_msgs[0] == MSG_ORDERED_TASK)
3991
3992 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
3993 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
3994 hscb->control |= tag_msgs[0];
3995 if (tag_msgs[0] == MSG_ORDERED_TASK)
3996 dev->commands_since_idle_or_otag = 0;
3997 } else
3998#endif
3999 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
4000 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
4001 hscb->control |= MSG_ORDERED_TASK;
4002 dev->commands_since_idle_or_otag = 0; 1485 dev->commands_since_idle_or_otag = 0;
4003 } else { 1486 } else
4004 hscb->control |= MSG_SIMPLE_TASK; 1487 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
4005 } 1488 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
1489 hscb->control |= MSG_ORDERED_TASK;
1490 dev->commands_since_idle_or_otag = 0;
1491 } else {
1492 hscb->control |= MSG_SIMPLE_TASK;
4006 } 1493 }
1494 }
4007 1495
4008 hscb->cdb_len = cmd->cmd_len; 1496 hscb->cdb_len = cmd->cmd_len;
4009 memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len); 1497 memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
4010 1498
4011 scb->sg_count = 0; 1499 scb->platform_data->xfer_len = 0;
4012 ahd_set_residual(scb, 0); 1500 ahd_set_residual(scb, 0);
4013 ahd_set_sense_residual(scb, 0); 1501 ahd_set_sense_residual(scb, 0);
4014 if (cmd->use_sg != 0) { 1502 scb->sg_count = 0;
4015 void *sg; 1503 if (cmd->use_sg != 0) {
4016 struct scatterlist *cur_seg; 1504 void *sg;
4017 u_int nseg; 1505 struct scatterlist *cur_seg;
4018 int dir; 1506 u_int nseg;
4019 1507 int dir;
4020 cur_seg = (struct scatterlist *)cmd->request_buffer; 1508
4021 dir = cmd->sc_data_direction; 1509 cur_seg = (struct scatterlist *)cmd->request_buffer;
4022 nseg = pci_map_sg(ahd->dev_softc, cur_seg, 1510 dir = cmd->sc_data_direction;
4023 cmd->use_sg, dir); 1511 nseg = pci_map_sg(ahd->dev_softc, cur_seg,
4024 scb->platform_data->xfer_len = 0; 1512 cmd->use_sg, dir);
4025 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) { 1513 scb->platform_data->xfer_len = 0;
4026 dma_addr_t addr; 1514 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
4027 bus_size_t len;
4028
4029 addr = sg_dma_address(cur_seg);
4030 len = sg_dma_len(cur_seg);
4031 scb->platform_data->xfer_len += len;
4032 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
4033 /*last*/nseg == 1);
4034 }
4035 } else if (cmd->request_bufflen != 0) {
4036 void *sg;
4037 dma_addr_t addr; 1515 dma_addr_t addr;
4038 int dir; 1516 bus_size_t len;
4039
4040 sg = scb->sg_list;
4041 dir = cmd->sc_data_direction;
4042 addr = pci_map_single(ahd->dev_softc,
4043 cmd->request_buffer,
4044 cmd->request_bufflen, dir);
4045 scb->platform_data->xfer_len = cmd->request_bufflen;
4046 scb->platform_data->buf_busaddr = addr;
4047 sg = ahd_sg_setup(ahd, scb, sg, addr,
4048 cmd->request_bufflen, /*last*/TRUE);
4049 }
4050 1517
4051 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1518 addr = sg_dma_address(cur_seg);
4052 dev->openings--; 1519 len = sg_dma_len(cur_seg);
4053 dev->active++; 1520 scb->platform_data->xfer_len += len;
4054 dev->commands_issued++; 1521 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
4055 1522 /*last*/nseg == 1);
4056 /* Update the error counting bucket and dump if needed */
4057 if (dev->target->cmds_since_error) {
4058 dev->target->cmds_since_error++;
4059 if (dev->target->cmds_since_error >
4060 AHD_LINUX_ERR_THRESH)
4061 dev->target->cmds_since_error = 0;
4062 } 1523 }
1524 } else if (cmd->request_bufflen != 0) {
1525 void *sg;
1526 dma_addr_t addr;
1527 int dir;
4063 1528
4064 if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0) 1529 sg = scb->sg_list;
4065 dev->commands_since_idle_or_otag++; 1530 dir = cmd->sc_data_direction;
4066 scb->flags |= SCB_ACTIVE; 1531 addr = pci_map_single(ahd->dev_softc,
4067 ahd_queue_scb(ahd, scb); 1532 cmd->request_buffer,
1533 cmd->request_bufflen, dir);
1534 scb->platform_data->xfer_len = cmd->request_bufflen;
1535 scb->platform_data->buf_busaddr = addr;
1536 sg = ahd_sg_setup(ahd, scb, sg, addr,
1537 cmd->request_bufflen, /*last*/TRUE);
4068 } 1538 }
1539
1540 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1541 dev->openings--;
1542 dev->active++;
1543 dev->commands_issued++;
1544
1545 if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
1546 dev->commands_since_idle_or_otag++;
1547 scb->flags |= SCB_ACTIVE;
1548 ahd_queue_scb(ahd, scb);
1549
1550 return 0;
4069} 1551}
4070 1552
4071/* 1553/*
@@ -4081,9 +1563,6 @@ ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
4081 ahd = (struct ahd_softc *) dev_id; 1563 ahd = (struct ahd_softc *) dev_id;
4082 ahd_lock(ahd, &flags); 1564 ahd_lock(ahd, &flags);
4083 ours = ahd_intr(ahd); 1565 ours = ahd_intr(ahd);
4084 if (ahd_linux_next_device_to_run(ahd) != NULL)
4085 ahd_schedule_runq(ahd);
4086 ahd_linux_run_complete_queue(ahd);
4087 ahd_unlock(ahd, &flags); 1566 ahd_unlock(ahd, &flags);
4088 return IRQ_RETVAL(ours); 1567 return IRQ_RETVAL(ours);
4089} 1568}
@@ -4092,111 +1571,6 @@ void
4092ahd_platform_flushwork(struct ahd_softc *ahd) 1571ahd_platform_flushwork(struct ahd_softc *ahd)
4093{ 1572{
4094 1573
4095 while (ahd_linux_run_complete_queue(ahd) != NULL)
4096 ;
4097}
4098
4099static struct ahd_linux_target*
4100ahd_linux_alloc_target(struct ahd_softc *ahd, u_int channel, u_int target)
4101{
4102 struct ahd_linux_target *targ;
4103
4104 targ = malloc(sizeof(*targ), M_DEVBUF, M_NOWAIT);
4105 if (targ == NULL)
4106 return (NULL);
4107 memset(targ, 0, sizeof(*targ));
4108 targ->channel = channel;
4109 targ->target = target;
4110 targ->ahd = ahd;
4111 targ->flags = AHD_DV_REQUIRED;
4112 ahd->platform_data->targets[target] = targ;
4113 return (targ);
4114}
4115
4116static void
4117ahd_linux_free_target(struct ahd_softc *ahd, struct ahd_linux_target *targ)
4118{
4119 struct ahd_devinfo devinfo;
4120 struct ahd_initiator_tinfo *tinfo;
4121 struct ahd_tmode_tstate *tstate;
4122 u_int our_id;
4123 u_int target_offset;
4124 char channel;
4125
4126 /*
4127 * Force a negotiation to async/narrow on any
4128 * future command to this device unless a bus
4129 * reset occurs between now and that command.
4130 */
4131 channel = 'A' + targ->channel;
4132 our_id = ahd->our_id;
4133 target_offset = targ->target;
4134 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
4135 targ->target, &tstate);
4136 ahd_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
4137 channel, ROLE_INITIATOR);
4138 ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
4139 AHD_TRANS_GOAL, /*paused*/FALSE);
4140 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4141 AHD_TRANS_GOAL, /*paused*/FALSE);
4142 ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS);
4143 ahd->platform_data->targets[target_offset] = NULL;
4144 if (targ->inq_data != NULL)
4145 free(targ->inq_data, M_DEVBUF);
4146 if (targ->dv_buffer != NULL)
4147 free(targ->dv_buffer, M_DEVBUF);
4148 if (targ->dv_buffer1 != NULL)
4149 free(targ->dv_buffer1, M_DEVBUF);
4150 free(targ, M_DEVBUF);
4151}
4152
4153static struct ahd_linux_device*
4154ahd_linux_alloc_device(struct ahd_softc *ahd,
4155 struct ahd_linux_target *targ, u_int lun)
4156{
4157 struct ahd_linux_device *dev;
4158
4159 dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
4160 if (dev == NULL)
4161 return (NULL);
4162 memset(dev, 0, sizeof(*dev));
4163 init_timer(&dev->timer);
4164 TAILQ_INIT(&dev->busyq);
4165 dev->flags = AHD_DEV_UNCONFIGURED;
4166 dev->lun = lun;
4167 dev->target = targ;
4168
4169 /*
4170 * We start out life using untagged
4171 * transactions of which we allow one.
4172 */
4173 dev->openings = 1;
4174
4175 /*
4176 * Set maxtags to 0. This will be changed if we
4177 * later determine that we are dealing with
4178 * a tagged queuing capable device.
4179 */
4180 dev->maxtags = 0;
4181
4182 targ->refcount++;
4183 targ->devices[lun] = dev;
4184 return (dev);
4185}
4186
4187static void
4188ahd_linux_free_device(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4189{
4190 struct ahd_linux_target *targ;
4191
4192 del_timer(&dev->timer);
4193 targ = dev->target;
4194 targ->devices[dev->lun] = NULL;
4195 free(dev, M_DEVBUF);
4196 targ->refcount--;
4197 if (targ->refcount == 0
4198 && (targ->flags & AHD_DV_REQUIRED) == 0)
4199 ahd_linux_free_target(ahd, targ);
4200} 1574}
4201 1575
4202void 1576void
@@ -4207,10 +1581,14 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4207 case AC_TRANSFER_NEG: 1581 case AC_TRANSFER_NEG:
4208 { 1582 {
4209 char buf[80]; 1583 char buf[80];
1584 struct scsi_target *starget;
4210 struct ahd_linux_target *targ; 1585 struct ahd_linux_target *targ;
4211 struct info_str info; 1586 struct info_str info;
4212 struct ahd_initiator_tinfo *tinfo; 1587 struct ahd_initiator_tinfo *tinfo;
4213 struct ahd_tmode_tstate *tstate; 1588 struct ahd_tmode_tstate *tstate;
1589 unsigned int target_ppr_options;
1590
1591 BUG_ON(target == CAM_TARGET_WILDCARD);
4214 1592
4215 info.buffer = buf; 1593 info.buffer = buf;
4216 info.length = sizeof(buf); 1594 info.length = sizeof(buf);
@@ -4234,58 +1612,47 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4234 * Don't bother reporting results that 1612 * Don't bother reporting results that
4235 * are identical to those last reported. 1613 * are identical to those last reported.
4236 */ 1614 */
4237 targ = ahd->platform_data->targets[target]; 1615 starget = ahd->platform_data->starget[target];
4238 if (targ == NULL) 1616 if (starget == NULL)
4239 break; 1617 break;
4240 if (tinfo->curr.period == targ->last_tinfo.period 1618 targ = scsi_transport_target_data(starget);
4241 && tinfo->curr.width == targ->last_tinfo.width 1619
4242 && tinfo->curr.offset == targ->last_tinfo.offset 1620 target_ppr_options =
4243 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options) 1621 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
1622 + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
1623 + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0)
1624 + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0)
1625 + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0)
1626 + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0)
1627 + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0)
1628 + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0);
1629
1630 if (tinfo->curr.period == spi_period(starget)
1631 && tinfo->curr.width == spi_width(starget)
1632 && tinfo->curr.offset == spi_offset(starget)
1633 && tinfo->curr.ppr_options == target_ppr_options)
4244 if (bootverbose == 0) 1634 if (bootverbose == 0)
4245 break; 1635 break;
4246 1636
4247 targ->last_tinfo.period = tinfo->curr.period; 1637 spi_period(starget) = tinfo->curr.period;
4248 targ->last_tinfo.width = tinfo->curr.width; 1638 spi_width(starget) = tinfo->curr.width;
4249 targ->last_tinfo.offset = tinfo->curr.offset; 1639 spi_offset(starget) = tinfo->curr.offset;
4250 targ->last_tinfo.ppr_options = tinfo->curr.ppr_options; 1640 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
4251 1641 spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
4252 printf("(%s:%c:", ahd_name(ahd), channel); 1642 spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
4253 if (target == CAM_TARGET_WILDCARD) 1643 spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
4254 printf("*): "); 1644 spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
4255 else 1645 spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0;
4256 printf("%d): ", target); 1646 spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
4257 ahd_format_transinfo(&info, &tinfo->curr); 1647 spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
4258 if (info.pos < info.length) 1648 spi_display_xfer_agreement(starget);
4259 *info.buffer = '\0';
4260 else
4261 buf[info.length - 1] = '\0';
4262 printf("%s", buf);
4263 break; 1649 break;
4264 } 1650 }
4265 case AC_SENT_BDR: 1651 case AC_SENT_BDR:
4266 { 1652 {
4267#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
4268 WARN_ON(lun != CAM_LUN_WILDCARD); 1653 WARN_ON(lun != CAM_LUN_WILDCARD);
4269 scsi_report_device_reset(ahd->platform_data->host, 1654 scsi_report_device_reset(ahd->platform_data->host,
4270 channel - 'A', target); 1655 channel - 'A', target);
4271#else
4272 Scsi_Device *scsi_dev;
4273
4274 /*
4275 * Find the SCSI device associated with this
4276 * request and indicate that a UA is expected.
4277 */
4278 for (scsi_dev = ahd->platform_data->host->host_queue;
4279 scsi_dev != NULL; scsi_dev = scsi_dev->next) {
4280 if (channel - 'A' == scsi_dev->channel
4281 && target == scsi_dev->id
4282 && (lun == CAM_LUN_WILDCARD
4283 || lun == scsi_dev->lun)) {
4284 scsi_dev->was_reset = 1;
4285 scsi_dev->expecting_cc_ua = 1;
4286 }
4287 }
4288#endif
4289 break; 1656 break;
4290 } 1657 }
4291 case AC_BUS_RESET: 1658 case AC_BUS_RESET:
@@ -4305,7 +1672,7 @@ ahd_send_async(struct ahd_softc *ahd, char channel,
4305void 1672void
4306ahd_done(struct ahd_softc *ahd, struct scb *scb) 1673ahd_done(struct ahd_softc *ahd, struct scb *scb)
4307{ 1674{
4308 Scsi_Cmnd *cmd; 1675 struct scsi_cmnd *cmd;
4309 struct ahd_linux_device *dev; 1676 struct ahd_linux_device *dev;
4310 1677
4311 if ((scb->flags & SCB_ACTIVE) == 0) { 1678 if ((scb->flags & SCB_ACTIVE) == 0) {
@@ -4373,19 +1740,8 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
4373 ahd_set_transaction_status(scb, CAM_REQ_CMP); 1740 ahd_set_transaction_status(scb, CAM_REQ_CMP);
4374 } 1741 }
4375 } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { 1742 } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
4376 ahd_linux_handle_scsi_status(ahd, dev, scb); 1743 ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
4377 } else if (ahd_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
4378 dev->flags |= AHD_DEV_UNCONFIGURED;
4379 if (AHD_DV_CMD(cmd) == FALSE)
4380 dev->target->flags &= ~AHD_DV_REQUIRED;
4381 } 1744 }
4382 /*
4383 * Start DV for devices that require it assuming the first command
4384 * sent does not result in a selection timeout.
4385 */
4386 if (ahd_get_transaction_status(scb) != CAM_SEL_TIMEOUT
4387 && (dev->target->flags & AHD_DV_REQUIRED) != 0)
4388 ahd_linux_start_dv(ahd);
4389 1745
4390 if (dev->openings == 1 1746 if (dev->openings == 1
4391 && ahd_get_transaction_status(scb) == CAM_REQ_CMP 1747 && ahd_get_transaction_status(scb) == CAM_REQ_CMP
@@ -4406,47 +1762,32 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
4406 if (dev->active == 0) 1762 if (dev->active == 0)
4407 dev->commands_since_idle_or_otag = 0; 1763 dev->commands_since_idle_or_otag = 0;
4408 1764
4409 if (TAILQ_EMPTY(&dev->busyq)) {
4410 if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
4411 && dev->active == 0
4412 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
4413 ahd_linux_free_device(ahd, dev);
4414 } else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
4415 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
4416 dev->flags |= AHD_DEV_ON_RUN_LIST;
4417 }
4418
4419 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 1765 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4420 printf("Recovery SCB completes\n"); 1766 printf("Recovery SCB completes\n");
4421 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT 1767 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
4422 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) 1768 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
4423 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1769 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
4424 if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) { 1770 if ((ahd->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
4425 scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM; 1771 ahd->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
4426 up(&ahd->platform_data->eh_sem); 1772 up(&ahd->platform_data->eh_sem);
4427 } 1773 }
4428 } 1774 }
4429 1775
4430 ahd_free_scb(ahd, scb); 1776 ahd_free_scb(ahd, scb);
4431 ahd_linux_queue_cmd_complete(ahd, cmd); 1777 ahd_linux_queue_cmd_complete(ahd, cmd);
4432
4433 if ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_EMPTY) != 0
4434 && LIST_FIRST(&ahd->pending_scbs) == NULL) {
4435 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_EMPTY;
4436 up(&ahd->platform_data->dv_sem);
4437 }
4438} 1778}
4439 1779
4440static void 1780static void
4441ahd_linux_handle_scsi_status(struct ahd_softc *ahd, 1781ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4442 struct ahd_linux_device *dev, struct scb *scb) 1782 struct scsi_device *sdev, struct scb *scb)
4443{ 1783{
4444 struct ahd_devinfo devinfo; 1784 struct ahd_devinfo devinfo;
1785 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
4445 1786
4446 ahd_compile_devinfo(&devinfo, 1787 ahd_compile_devinfo(&devinfo,
4447 ahd->our_id, 1788 ahd->our_id,
4448 dev->target->target, dev->lun, 1789 sdev->sdev_target->id, sdev->lun,
4449 dev->target->channel == 0 ? 'A' : 'B', 1790 sdev->sdev_target->channel == 0 ? 'A' : 'B',
4450 ROLE_INITIATOR); 1791 ROLE_INITIATOR);
4451 1792
4452 /* 1793 /*
@@ -4465,7 +1806,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4465 case SCSI_STATUS_CHECK_COND: 1806 case SCSI_STATUS_CHECK_COND:
4466 case SCSI_STATUS_CMD_TERMINATED: 1807 case SCSI_STATUS_CMD_TERMINATED:
4467 { 1808 {
4468 Scsi_Cmnd *cmd; 1809 struct scsi_cmnd *cmd;
4469 1810
4470 /* 1811 /*
4471 * Copy sense information to the OS's cmd 1812 * Copy sense information to the OS's cmd
@@ -4518,7 +1859,6 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4518 break; 1859 break;
4519 } 1860 }
4520 case SCSI_STATUS_QUEUE_FULL: 1861 case SCSI_STATUS_QUEUE_FULL:
4521 {
4522 /* 1862 /*
4523 * By the time the core driver has returned this 1863 * By the time the core driver has returned this
4524 * command, all other commands that were queued 1864 * command, all other commands that were queued
@@ -4579,98 +1919,23 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4579 (dev->flags & AHD_DEV_Q_BASIC) 1919 (dev->flags & AHD_DEV_Q_BASIC)
4580 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1920 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
4581 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY); 1921 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
4582 /* FALLTHROUGH */
4583 }
4584 case SCSI_STATUS_BUSY:
4585 /*
4586 * Set a short timer to defer sending commands for
4587 * a bit since Linux will not delay in this case.
4588 */
4589 if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {
4590 printf("%s:%c:%d: Device Timer still active during "
4591 "busy processing\n", ahd_name(ahd),
4592 dev->target->channel, dev->target->target);
4593 break;
4594 }
4595 dev->flags |= AHD_DEV_TIMER_ACTIVE;
4596 dev->qfrozen++;
4597 init_timer(&dev->timer);
4598 dev->timer.data = (u_long)dev;
4599 dev->timer.expires = jiffies + (HZ/2);
4600 dev->timer.function = ahd_linux_dev_timed_unfreeze;
4601 add_timer(&dev->timer);
4602 break;
4603 } 1922 }
4604} 1923}
4605 1924
4606static void 1925static void
4607ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd) 1926ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
4608{ 1927{
4609 /* 1928 /*
4610 * Typically, the complete queue has very few entries
4611 * queued to it before the queue is emptied by
4612 * ahd_linux_run_complete_queue, so sorting the entries
4613 * by generation number should be inexpensive.
4614 * We perform the sort so that commands that complete
4615 * with an error are retuned in the order origionally
4616 * queued to the controller so that any subsequent retries
4617 * are performed in order. The underlying ahd routines do
4618 * not guarantee the order that aborted commands will be
4619 * returned to us.
4620 */
4621 struct ahd_completeq *completeq;
4622 struct ahd_cmd *list_cmd;
4623 struct ahd_cmd *acmd;
4624
4625 /*
4626 * Map CAM error codes into Linux Error codes. We 1929 * Map CAM error codes into Linux Error codes. We
4627 * avoid the conversion so that the DV code has the 1930 * avoid the conversion so that the DV code has the
4628 * full error information available when making 1931 * full error information available when making
4629 * state change decisions. 1932 * state change decisions.
4630 */ 1933 */
4631 if (AHD_DV_CMD(cmd) == FALSE) { 1934 {
4632 uint32_t status; 1935 uint32_t status;
4633 u_int new_status; 1936 u_int new_status;
4634 1937
4635 status = ahd_cmd_get_transaction_status(cmd); 1938 status = ahd_cmd_get_transaction_status(cmd);
4636 if (status != CAM_REQ_CMP) {
4637 struct ahd_linux_device *dev;
4638 struct ahd_devinfo devinfo;
4639 cam_status cam_status;
4640 uint32_t action;
4641 u_int scsi_status;
4642
4643 dev = ahd_linux_get_device(ahd, cmd->device->channel,
4644 cmd->device->id,
4645 cmd->device->lun,
4646 /*alloc*/FALSE);
4647
4648 if (dev == NULL)
4649 goto no_fallback;
4650
4651 ahd_compile_devinfo(&devinfo,
4652 ahd->our_id,
4653 dev->target->target, dev->lun,
4654 dev->target->channel == 0 ? 'A':'B',
4655 ROLE_INITIATOR);
4656
4657 scsi_status = ahd_cmd_get_scsi_status(cmd);
4658 cam_status = ahd_cmd_get_transaction_status(cmd);
4659 action = aic_error_action(cmd, dev->target->inq_data,
4660 cam_status, scsi_status);
4661 if ((action & SSQ_FALLBACK) != 0) {
4662
4663 /* Update stats */
4664 dev->target->errors_detected++;
4665 if (dev->target->cmds_since_error == 0)
4666 dev->target->cmds_since_error++;
4667 else {
4668 dev->target->cmds_since_error = 0;
4669 ahd_linux_fallback(ahd, &devinfo);
4670 }
4671 }
4672 }
4673no_fallback:
4674 switch (status) { 1939 switch (status) {
4675 case CAM_REQ_INPROG: 1940 case CAM_REQ_INPROG:
4676 case CAM_REQ_CMP: 1941 case CAM_REQ_CMP:
@@ -4715,26 +1980,7 @@ no_fallback:
4715 new_status = DID_ERROR; 1980 new_status = DID_ERROR;
4716 break; 1981 break;
4717 case CAM_REQUEUE_REQ: 1982 case CAM_REQUEUE_REQ:
4718 /* 1983 new_status = DID_REQUEUE;
4719 * If we want the request requeued, make sure there
4720 * are sufficent retries. In the old scsi error code,
4721 * we used to be able to specify a result code that
4722 * bypassed the retry count. Now we must use this
4723 * hack. We also "fake" a check condition with
4724 * a sense code of ABORTED COMMAND. This seems to
4725 * evoke a retry even if this command is being sent
4726 * via the eh thread. Ick! Ick! Ick!
4727 */
4728 if (cmd->retries > 0)
4729 cmd->retries--;
4730 new_status = DID_OK;
4731 ahd_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
4732 cmd->result |= (DRIVER_SENSE << 24);
4733 memset(cmd->sense_buffer, 0,
4734 sizeof(cmd->sense_buffer));
4735 cmd->sense_buffer[0] = SSD_ERRCODE_VALID
4736 | SSD_CURRENT_ERROR;
4737 cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
4738 break; 1984 break;
4739 default: 1985 default:
4740 /* We should never get here */ 1986 /* We should never get here */
@@ -4745,116 +1991,23 @@ no_fallback:
4745 ahd_cmd_set_transaction_status(cmd, new_status); 1991 ahd_cmd_set_transaction_status(cmd, new_status);
4746 } 1992 }
4747 1993
4748 completeq = &ahd->platform_data->completeq; 1994 cmd->scsi_done(cmd);
4749 list_cmd = TAILQ_FIRST(completeq);
4750 acmd = (struct ahd_cmd *)cmd;
4751 while (list_cmd != NULL
4752 && acmd_scsi_cmd(list_cmd).serial_number
4753 < acmd_scsi_cmd(acmd).serial_number)
4754 list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
4755 if (list_cmd != NULL)
4756 TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
4757 else
4758 TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
4759} 1995}
4760 1996
4761static void 1997static void
4762ahd_linux_filter_inquiry(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 1998ahd_linux_sem_timeout(u_long arg)
4763{ 1999{
4764 struct scsi_inquiry_data *sid; 2000 struct ahd_softc *ahd;
4765 struct ahd_initiator_tinfo *tinfo; 2001 u_long s;
4766 struct ahd_transinfo *user;
4767 struct ahd_transinfo *goal;
4768 struct ahd_transinfo *curr;
4769 struct ahd_tmode_tstate *tstate;
4770 struct ahd_linux_device *dev;
4771 u_int width;
4772 u_int period;
4773 u_int offset;
4774 u_int ppr_options;
4775 u_int trans_version;
4776 u_int prot_version;
4777
4778 /*
4779 * Determine if this lun actually exists. If so,
4780 * hold on to its corresponding device structure.
4781 * If not, make sure we release the device and
4782 * don't bother processing the rest of this inquiry
4783 * command.
4784 */
4785 dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
4786 devinfo->target, devinfo->lun,
4787 /*alloc*/TRUE);
4788
4789 sid = (struct scsi_inquiry_data *)dev->target->inq_data;
4790 if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
4791
4792 dev->flags &= ~AHD_DEV_UNCONFIGURED;
4793 } else {
4794 dev->flags |= AHD_DEV_UNCONFIGURED;
4795 return;
4796 }
4797 2002
4798 /* 2003 ahd = (struct ahd_softc *)arg;
4799 * Update our notion of this device's transfer
4800 * negotiation capabilities.
4801 */
4802 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4803 devinfo->our_scsiid,
4804 devinfo->target, &tstate);
4805 user = &tinfo->user;
4806 goal = &tinfo->goal;
4807 curr = &tinfo->curr;
4808 width = user->width;
4809 period = user->period;
4810 offset = user->offset;
4811 ppr_options = user->ppr_options;
4812 trans_version = user->transport_version;
4813 prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
4814 2004
4815 /* 2005 ahd_lock(ahd, &s);
4816 * Only attempt SPI3/4 once we've verified that 2006 if ((ahd->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
4817 * the device claims to support SPI3/4 features. 2007 ahd->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
4818 */ 2008 up(&ahd->platform_data->eh_sem);
4819 if (prot_version < SCSI_REV_2)
4820 trans_version = SID_ANSI_REV(sid);
4821 else
4822 trans_version = SCSI_REV_2;
4823
4824 if ((sid->flags & SID_WBus16) == 0)
4825 width = MSG_EXT_WDTR_BUS_8_BIT;
4826 if ((sid->flags & SID_Sync) == 0) {
4827 period = 0;
4828 offset = 0;
4829 ppr_options = 0;
4830 }
4831 if ((sid->spi3data & SID_SPI_QAS) == 0)
4832 ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
4833 if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
4834 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4835 if ((sid->spi3data & SID_SPI_IUS) == 0)
4836 ppr_options &= (MSG_EXT_PPR_DT_REQ
4837 | MSG_EXT_PPR_QAS_REQ);
4838
4839 if (prot_version > SCSI_REV_2
4840 && ppr_options != 0)
4841 trans_version = user->transport_version;
4842
4843 ahd_validate_width(ahd, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
4844 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
4845 ahd_validate_offset(ahd, /*tinfo limit*/NULL, period,
4846 &offset, width, ROLE_UNKNOWN);
4847 if (offset == 0 || period == 0) {
4848 period = 0;
4849 offset = 0;
4850 ppr_options = 0;
4851 } 2009 }
4852 /* Apply our filtered user settings. */ 2010 ahd_unlock(ahd, &s);
4853 curr->transport_version = trans_version;
4854 curr->protocol_version = prot_version;
4855 ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, /*paused*/FALSE);
4856 ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options,
4857 AHD_TRANS_GOAL, /*paused*/FALSE);
4858} 2011}
4859 2012
4860void 2013void
@@ -4882,12 +2035,6 @@ ahd_release_simq(struct ahd_softc *ahd)
4882 if (ahd->platform_data->qfrozen == 0) { 2035 if (ahd->platform_data->qfrozen == 0) {
4883 unblock_reqs = 1; 2036 unblock_reqs = 1;
4884 } 2037 }
4885 if (AHD_DV_SIMQ_FROZEN(ahd)
4886 && ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_RELEASE) != 0)) {
4887 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_RELEASE;
4888 up(&ahd->platform_data->dv_sem);
4889 }
4890 ahd_schedule_runq(ahd);
4891 ahd_unlock(ahd, &s); 2038 ahd_unlock(ahd, &s);
4892 /* 2039 /*
4893 * There is still a race here. The mid-layer 2040 * There is still a race here. The mid-layer
@@ -4899,118 +2046,743 @@ ahd_release_simq(struct ahd_softc *ahd)
4899 scsi_unblock_requests(ahd->platform_data->host); 2046 scsi_unblock_requests(ahd->platform_data->host);
4900} 2047}
4901 2048
4902static void 2049static int
4903ahd_linux_sem_timeout(u_long arg) 2050ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
4904{ 2051{
4905 struct scb *scb; 2052 struct ahd_softc *ahd;
4906 struct ahd_softc *ahd; 2053 struct ahd_linux_device *dev;
4907 u_long s; 2054 struct scb *pending_scb;
2055 u_int saved_scbptr;
2056 u_int active_scbptr;
2057 u_int last_phase;
2058 u_int saved_scsiid;
2059 u_int cdb_byte;
2060 int retval;
2061 int was_paused;
2062 int paused;
2063 int wait;
2064 int disconnected;
2065 ahd_mode_state saved_modes;
4908 2066
4909 scb = (struct scb *)arg; 2067 pending_scb = NULL;
4910 ahd = scb->ahd_softc; 2068 paused = FALSE;
4911 ahd_lock(ahd, &s); 2069 wait = FALSE;
4912 if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) { 2070 ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
4913 scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM; 2071
4914 up(&ahd->platform_data->eh_sem); 2072 printf("%s:%d:%d:%d: Attempting to queue a%s message:",
2073 ahd_name(ahd), cmd->device->channel,
2074 cmd->device->id, cmd->device->lun,
2075 flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
2076
2077 printf("CDB:");
2078 for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2079 printf(" 0x%x", cmd->cmnd[cdb_byte]);
2080 printf("\n");
2081
2082 spin_lock_irq(&ahd->platform_data->spin_lock);
2083
2084 /*
2085 * First determine if we currently own this command.
2086 * Start by searching the device queue. If not found
2087 * there, check the pending_scb list. If not found
2088 * at all, and the system wanted us to just abort the
2089 * command, return success.
2090 */
2091 dev = scsi_transport_device_data(cmd->device);
2092
2093 if (dev == NULL) {
2094 /*
2095 * No target device for this command exists,
2096 * so we must not still own the command.
2097 */
2098 printf("%s:%d:%d:%d: Is not an active device\n",
2099 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2100 cmd->device->lun);
2101 retval = SUCCESS;
2102 goto no_cmd;
4915 } 2103 }
4916 ahd_unlock(ahd, &s); 2104
2105 /*
2106 * See if we can find a matching cmd in the pending list.
2107 */
2108 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
2109 if (pending_scb->io_ctx == cmd)
2110 break;
2111 }
2112
2113 if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
2114
2115 /* Any SCB for this device will do for a target reset */
2116 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
2117 if (ahd_match_scb(ahd, pending_scb, cmd->device->id,
2118 cmd->device->channel + 'A',
2119 CAM_LUN_WILDCARD,
2120 SCB_LIST_NULL, ROLE_INITIATOR) == 0)
2121 break;
2122 }
2123 }
2124
2125 if (pending_scb == NULL) {
2126 printf("%s:%d:%d:%d: Command not found\n",
2127 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2128 cmd->device->lun);
2129 goto no_cmd;
2130 }
2131
2132 if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
2133 /*
2134 * We can't queue two recovery actions using the same SCB
2135 */
2136 retval = FAILED;
2137 goto done;
2138 }
2139
2140 /*
2141 * Ensure that the card doesn't do anything
2142 * behind our back. Also make sure that we
2143 * didn't "just" miss an interrupt that would
2144 * affect this cmd.
2145 */
2146 was_paused = ahd_is_paused(ahd);
2147 ahd_pause_and_flushwork(ahd);
2148 paused = TRUE;
2149
2150 if ((pending_scb->flags & SCB_ACTIVE) == 0) {
2151 printf("%s:%d:%d:%d: Command already completed\n",
2152 ahd_name(ahd), cmd->device->channel, cmd->device->id,
2153 cmd->device->lun);
2154 goto no_cmd;
2155 }
2156
2157 printf("%s: At time of recovery, card was %spaused\n",
2158 ahd_name(ahd), was_paused ? "" : "not ");
2159 ahd_dump_card_state(ahd);
2160
2161 disconnected = TRUE;
2162 if (flag == SCB_ABORT) {
2163 if (ahd_search_qinfifo(ahd, cmd->device->id,
2164 cmd->device->channel + 'A',
2165 cmd->device->lun,
2166 pending_scb->hscb->tag,
2167 ROLE_INITIATOR, CAM_REQ_ABORTED,
2168 SEARCH_COMPLETE) > 0) {
2169 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2170 ahd_name(ahd), cmd->device->channel,
2171 cmd->device->id, cmd->device->lun);
2172 retval = SUCCESS;
2173 goto done;
2174 }
2175 } else if (ahd_search_qinfifo(ahd, cmd->device->id,
2176 cmd->device->channel + 'A',
2177 cmd->device->lun, pending_scb->hscb->tag,
2178 ROLE_INITIATOR, /*status*/0,
2179 SEARCH_COUNT) > 0) {
2180 disconnected = FALSE;
2181 }
2182
2183 saved_modes = ahd_save_modes(ahd);
2184 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2185 last_phase = ahd_inb(ahd, LASTPHASE);
2186 saved_scbptr = ahd_get_scbptr(ahd);
2187 active_scbptr = saved_scbptr;
2188 if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
2189 struct scb *bus_scb;
2190
2191 bus_scb = ahd_lookup_scb(ahd, active_scbptr);
2192 if (bus_scb == pending_scb)
2193 disconnected = FALSE;
2194 else if (flag != SCB_ABORT
2195 && ahd_inb(ahd, SAVED_SCSIID) == pending_scb->hscb->scsiid
2196 && ahd_inb(ahd, SAVED_LUN) == SCB_GET_LUN(pending_scb))
2197 disconnected = FALSE;
2198 }
2199
2200 /*
2201 * At this point, pending_scb is the scb associated with the
2202 * passed in command. That command is currently active on the
2203 * bus or is in the disconnected state.
2204 */
2205 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2206 if (last_phase != P_BUSFREE
2207 && (SCB_GET_TAG(pending_scb) == active_scbptr
2208 || (flag == SCB_DEVICE_RESET
2209 && SCSIID_TARGET(ahd, saved_scsiid) == cmd->device->id))) {
2210
2211 /*
2212 * We're active on the bus, so assert ATN
2213 * and hope that the target responds.
2214 */
2215 pending_scb = ahd_lookup_scb(ahd, active_scbptr);
2216 pending_scb->flags |= SCB_RECOVERY_SCB|flag;
2217 ahd_outb(ahd, MSG_OUT, HOST_MSG);
2218 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
2219 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
2220 ahd_name(ahd), cmd->device->channel,
2221 cmd->device->id, cmd->device->lun);
2222 wait = TRUE;
2223 } else if (disconnected) {
2224
2225 /*
2226 * Actually re-queue this SCB in an attempt
2227 * to select the device before it reconnects.
2228 */
2229 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
2230 ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
2231 pending_scb->hscb->cdb_len = 0;
2232 pending_scb->hscb->task_attribute = 0;
2233 pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
2234
2235 if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
2236 /*
2237 * Mark the SCB has having an outstanding
2238 * task management function. Should the command
2239 * complete normally before the task management
2240 * function can be sent, the host will be notified
2241 * to abort our requeued SCB.
2242 */
2243 ahd_outb(ahd, SCB_TASK_MANAGEMENT,
2244 pending_scb->hscb->task_management);
2245 } else {
2246 /*
2247 * If non-packetized, set the MK_MESSAGE control
2248 * bit indicating that we desire to send a message.
2249 * We also set the disconnected flag since there is
2250 * no guarantee that our SCB control byte matches
2251 * the version on the card. We don't want the
2252 * sequencer to abort the command thinking an
2253 * unsolicited reselection occurred.
2254 */
2255 pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
2256
2257 /*
2258 * The sequencer will never re-reference the
2259 * in-core SCB. To make sure we are notified
2260 * during reslection, set the MK_MESSAGE flag in
2261 * the card's copy of the SCB.
2262 */
2263 ahd_outb(ahd, SCB_CONTROL,
2264 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
2265 }
2266
2267 /*
2268 * Clear out any entries in the QINFIFO first
2269 * so we are the next SCB for this target
2270 * to run.
2271 */
2272 ahd_search_qinfifo(ahd, cmd->device->id,
2273 cmd->device->channel + 'A', cmd->device->lun,
2274 SCB_LIST_NULL, ROLE_INITIATOR,
2275 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
2276 ahd_qinfifo_requeue_tail(ahd, pending_scb);
2277 ahd_set_scbptr(ahd, saved_scbptr);
2278 ahd_print_path(ahd, pending_scb);
2279 printf("Device is disconnected, re-queuing SCB\n");
2280 wait = TRUE;
2281 } else {
2282 printf("%s:%d:%d:%d: Unable to deliver message\n",
2283 ahd_name(ahd), cmd->device->channel,
2284 cmd->device->id, cmd->device->lun);
2285 retval = FAILED;
2286 goto done;
2287 }
2288
2289no_cmd:
2290 /*
2291 * Our assumption is that if we don't have the command, no
2292 * recovery action was required, so we return success. Again,
2293 * the semantics of the mid-layer recovery engine are not
2294 * well defined, so this may change in time.
2295 */
2296 retval = SUCCESS;
2297done:
2298 if (paused)
2299 ahd_unpause(ahd);
2300 if (wait) {
2301 struct timer_list timer;
2302 int ret;
2303
2304 ahd->platform_data->flags |= AHD_SCB_UP_EH_SEM;
2305 spin_unlock_irq(&ahd->platform_data->spin_lock);
2306 init_timer(&timer);
2307 timer.data = (u_long)ahd;
2308 timer.expires = jiffies + (5 * HZ);
2309 timer.function = ahd_linux_sem_timeout;
2310 add_timer(&timer);
2311 printf("Recovery code sleeping\n");
2312 down(&ahd->platform_data->eh_sem);
2313 printf("Recovery code awake\n");
2314 ret = del_timer_sync(&timer);
2315 if (ret == 0) {
2316 printf("Timer Expired\n");
2317 retval = FAILED;
2318 }
2319 spin_lock_irq(&ahd->platform_data->spin_lock);
2320 }
2321 spin_unlock_irq(&ahd->platform_data->spin_lock);
2322 return (retval);
4917} 2323}
4918 2324
4919static void 2325static void ahd_linux_set_width(struct scsi_target *starget, int width)
4920ahd_linux_dev_timed_unfreeze(u_long arg)
4921{ 2326{
4922 struct ahd_linux_device *dev; 2327 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4923 struct ahd_softc *ahd; 2328 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4924 u_long s; 2329 struct ahd_devinfo devinfo;
2330 unsigned long flags;
4925 2331
4926 dev = (struct ahd_linux_device *)arg; 2332 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
4927 ahd = dev->target->ahd; 2333 starget->channel + 'A', ROLE_INITIATOR);
4928 ahd_lock(ahd, &s); 2334 ahd_lock(ahd, &flags);
4929 dev->flags &= ~AHD_DEV_TIMER_ACTIVE; 2335 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
4930 if (dev->qfrozen > 0) 2336 ahd_unlock(ahd, &flags);
4931 dev->qfrozen--;
4932 if (dev->qfrozen == 0
4933 && (dev->flags & AHD_DEV_ON_RUN_LIST) == 0)
4934 ahd_linux_run_device_queue(ahd, dev);
4935 if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
4936 && dev->active == 0)
4937 ahd_linux_free_device(ahd, dev);
4938 ahd_unlock(ahd, &s);
4939} 2337}
4940 2338
4941void 2339static void ahd_linux_set_period(struct scsi_target *starget, int period)
4942ahd_platform_dump_card_state(struct ahd_softc *ahd)
4943{ 2340{
4944 struct ahd_linux_device *dev; 2341 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4945 int target; 2342 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4946 int maxtarget; 2343 struct ahd_tmode_tstate *tstate;
4947 int lun; 2344 struct ahd_initiator_tinfo *tinfo
4948 int i; 2345 = ahd_fetch_transinfo(ahd,
4949 2346 starget->channel + 'A',
4950 maxtarget = (ahd->features & AHD_WIDE) ? 15 : 7; 2347 shost->this_id, starget->id, &tstate);
4951 for (target = 0; target <=maxtarget; target++) { 2348 struct ahd_devinfo devinfo;
4952 2349 unsigned int ppr_options = tinfo->goal.ppr_options;
4953 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 2350 unsigned int dt;
4954 struct ahd_cmd *acmd; 2351 unsigned long flags;
4955 2352 unsigned long offset = tinfo->goal.offset;
4956 dev = ahd_linux_get_device(ahd, 0, target, 2353
4957 lun, /*alloc*/FALSE); 2354#ifdef AHD_DEBUG
4958 if (dev == NULL) 2355 if ((ahd_debug & AHD_SHOW_DV) != 0)
4959 continue; 2356 printf("%s: set period to %d\n", ahd_name(ahd), period);
4960 2357#endif
4961 printf("DevQ(%d:%d:%d): ", 0, target, lun); 2358 if (offset == 0)
4962 i = 0; 2359 offset = MAX_OFFSET;
4963 TAILQ_FOREACH(acmd, &dev->busyq, acmd_links.tqe) { 2360
4964 if (i++ > AHD_SCB_MAX) 2361 if (period < 8)
4965 break; 2362 period = 8;
4966 } 2363 if (period < 10) {
4967 printf("%d waiting\n", i); 2364 ppr_options |= MSG_EXT_PPR_DT_REQ;
4968 } 2365 if (period == 8)
2366 ppr_options |= MSG_EXT_PPR_IU_REQ;
4969 } 2367 }
2368
2369 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2370
2371 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2372 starget->channel + 'A', ROLE_INITIATOR);
2373
2374 /* all PPR requests apart from QAS require wide transfers */
2375 if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
2376 if (spi_width(starget) == 0)
2377 ppr_options &= MSG_EXT_PPR_QAS_REQ;
2378 }
2379
2380 ahd_find_syncrate(ahd, &period, &ppr_options,
2381 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2382
2383 ahd_lock(ahd, &flags);
2384 ahd_set_syncrate(ahd, &devinfo, period, offset,
2385 ppr_options, AHD_TRANS_GOAL, FALSE);
2386 ahd_unlock(ahd, &flags);
4970} 2387}
4971 2388
4972static int __init 2389static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
4973ahd_linux_init(void)
4974{ 2390{
4975#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 2391 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
4976 return ahd_linux_detect(&aic79xx_driver_template); 2392 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
4977#else 2393 struct ahd_tmode_tstate *tstate;
4978 scsi_register_module(MODULE_SCSI_HA, &aic79xx_driver_template); 2394 struct ahd_initiator_tinfo *tinfo
4979 if (aic79xx_driver_template.present == 0) { 2395 = ahd_fetch_transinfo(ahd,
4980 scsi_unregister_module(MODULE_SCSI_HA, 2396 starget->channel + 'A',
4981 &aic79xx_driver_template); 2397 shost->this_id, starget->id, &tstate);
4982 return (-ENODEV); 2398 struct ahd_devinfo devinfo;
2399 unsigned int ppr_options = 0;
2400 unsigned int period = 0;
2401 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2402 unsigned long flags;
2403
2404#ifdef AHD_DEBUG
2405 if ((ahd_debug & AHD_SHOW_DV) != 0)
2406 printf("%s: set offset to %d\n", ahd_name(ahd), offset);
2407#endif
2408
2409 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2410 starget->channel + 'A', ROLE_INITIATOR);
2411 if (offset != 0) {
2412 period = tinfo->goal.period;
2413 ppr_options = tinfo->goal.ppr_options;
2414 ahd_find_syncrate(ahd, &period, &ppr_options,
2415 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
4983 } 2416 }
4984 2417
4985 return (0); 2418 ahd_lock(ahd, &flags);
2419 ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options,
2420 AHD_TRANS_GOAL, FALSE);
2421 ahd_unlock(ahd, &flags);
2422}
2423
2424static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
2425{
2426 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2427 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2428 struct ahd_tmode_tstate *tstate;
2429 struct ahd_initiator_tinfo *tinfo
2430 = ahd_fetch_transinfo(ahd,
2431 starget->channel + 'A',
2432 shost->this_id, starget->id, &tstate);
2433 struct ahd_devinfo devinfo;
2434 unsigned int ppr_options = tinfo->goal.ppr_options
2435 & ~MSG_EXT_PPR_DT_REQ;
2436 unsigned int period = tinfo->goal.period;
2437 unsigned int width = tinfo->goal.width;
2438 unsigned long flags;
2439
2440#ifdef AHD_DEBUG
2441 if ((ahd_debug & AHD_SHOW_DV) != 0)
2442 printf("%s: %s DT\n", ahd_name(ahd),
2443 dt ? "enabling" : "disabling");
2444#endif
2445 if (dt) {
2446 ppr_options |= MSG_EXT_PPR_DT_REQ;
2447 if (!width)
2448 ahd_linux_set_width(starget, 1);
2449 } else {
2450 if (period <= 9)
2451 period = 10; /* If resetting DT, period must be >= 25ns */
2452 /* IU is invalid without DT set */
2453 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
2454 }
2455 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2456 starget->channel + 'A', ROLE_INITIATOR);
2457 ahd_find_syncrate(ahd, &period, &ppr_options,
2458 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2459
2460 ahd_lock(ahd, &flags);
2461 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2462 ppr_options, AHD_TRANS_GOAL, FALSE);
2463 ahd_unlock(ahd, &flags);
2464}
2465
2466static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
2467{
2468 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2469 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2470 struct ahd_tmode_tstate *tstate;
2471 struct ahd_initiator_tinfo *tinfo
2472 = ahd_fetch_transinfo(ahd,
2473 starget->channel + 'A',
2474 shost->this_id, starget->id, &tstate);
2475 struct ahd_devinfo devinfo;
2476 unsigned int ppr_options = tinfo->goal.ppr_options
2477 & ~MSG_EXT_PPR_QAS_REQ;
2478 unsigned int period = tinfo->goal.period;
2479 unsigned int dt;
2480 unsigned long flags;
2481
2482#ifdef AHD_DEBUG
2483 if ((ahd_debug & AHD_SHOW_DV) != 0)
2484 printf("%s: %s QAS\n", ahd_name(ahd),
2485 qas ? "enabling" : "disabling");
2486#endif
2487
2488 if (qas) {
2489 ppr_options |= MSG_EXT_PPR_QAS_REQ;
2490 }
2491
2492 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2493
2494 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2495 starget->channel + 'A', ROLE_INITIATOR);
2496 ahd_find_syncrate(ahd, &period, &ppr_options,
2497 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2498
2499 ahd_lock(ahd, &flags);
2500 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2501 ppr_options, AHD_TRANS_GOAL, FALSE);
2502 ahd_unlock(ahd, &flags);
2503}
2504
2505static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
2506{
2507 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2508 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2509 struct ahd_tmode_tstate *tstate;
2510 struct ahd_initiator_tinfo *tinfo
2511 = ahd_fetch_transinfo(ahd,
2512 starget->channel + 'A',
2513 shost->this_id, starget->id, &tstate);
2514 struct ahd_devinfo devinfo;
2515 unsigned int ppr_options = tinfo->goal.ppr_options
2516 & ~MSG_EXT_PPR_IU_REQ;
2517 unsigned int period = tinfo->goal.period;
2518 unsigned int dt;
2519 unsigned long flags;
2520
2521#ifdef AHD_DEBUG
2522 if ((ahd_debug & AHD_SHOW_DV) != 0)
2523 printf("%s: %s IU\n", ahd_name(ahd),
2524 iu ? "enabling" : "disabling");
4986#endif 2525#endif
2526
2527 if (iu) {
2528 ppr_options |= MSG_EXT_PPR_IU_REQ;
2529 ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */
2530 }
2531
2532 dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2533
2534 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2535 starget->channel + 'A', ROLE_INITIATOR);
2536 ahd_find_syncrate(ahd, &period, &ppr_options,
2537 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2538
2539 ahd_lock(ahd, &flags);
2540 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2541 ppr_options, AHD_TRANS_GOAL, FALSE);
2542 ahd_unlock(ahd, &flags);
4987} 2543}
4988 2544
4989static void __exit 2545static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
4990ahd_linux_exit(void)
4991{ 2546{
4992 struct ahd_softc *ahd; 2547 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2548 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2549 struct ahd_tmode_tstate *tstate;
2550 struct ahd_initiator_tinfo *tinfo
2551 = ahd_fetch_transinfo(ahd,
2552 starget->channel + 'A',
2553 shost->this_id, starget->id, &tstate);
2554 struct ahd_devinfo devinfo;
2555 unsigned int ppr_options = tinfo->goal.ppr_options
2556 & ~MSG_EXT_PPR_RD_STRM;
2557 unsigned int period = tinfo->goal.period;
2558 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2559 unsigned long flags;
4993 2560
4994 /* 2561#ifdef AHD_DEBUG
4995 * Shutdown DV threads before going into the SCSI mid-layer. 2562 if ((ahd_debug & AHD_SHOW_DV) != 0)
4996 * This avoids situations where the mid-layer locks the entire 2563 printf("%s: %s Read Streaming\n", ahd_name(ahd),
4997 * kernel so that waiting for our DV threads to exit leads 2564 rdstrm ? "enabling" : "disabling");
4998 * to deadlock. 2565#endif
4999 */ 2566
5000 TAILQ_FOREACH(ahd, &ahd_tailq, links) { 2567 if (rdstrm)
2568 ppr_options |= MSG_EXT_PPR_RD_STRM;
2569
2570 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2571 starget->channel + 'A', ROLE_INITIATOR);
2572 ahd_find_syncrate(ahd, &period, &ppr_options,
2573 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
5001 2574
5002 ahd_linux_kill_dv_thread(ahd); 2575 ahd_lock(ahd, &flags);
2576 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2577 ppr_options, AHD_TRANS_GOAL, FALSE);
2578 ahd_unlock(ahd, &flags);
2579}
2580
2581static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
2582{
2583 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2584 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2585 struct ahd_tmode_tstate *tstate;
2586 struct ahd_initiator_tinfo *tinfo
2587 = ahd_fetch_transinfo(ahd,
2588 starget->channel + 'A',
2589 shost->this_id, starget->id, &tstate);
2590 struct ahd_devinfo devinfo;
2591 unsigned int ppr_options = tinfo->goal.ppr_options
2592 & ~MSG_EXT_PPR_WR_FLOW;
2593 unsigned int period = tinfo->goal.period;
2594 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2595 unsigned long flags;
2596
2597#ifdef AHD_DEBUG
2598 if ((ahd_debug & AHD_SHOW_DV) != 0)
2599 printf("%s: %s Write Flow Control\n", ahd_name(ahd),
2600 wrflow ? "enabling" : "disabling");
2601#endif
2602
2603 if (wrflow)
2604 ppr_options |= MSG_EXT_PPR_WR_FLOW;
2605
2606 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2607 starget->channel + 'A', ROLE_INITIATOR);
2608 ahd_find_syncrate(ahd, &period, &ppr_options,
2609 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2610
2611 ahd_lock(ahd, &flags);
2612 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2613 ppr_options, AHD_TRANS_GOAL, FALSE);
2614 ahd_unlock(ahd, &flags);
2615}
2616
2617static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
2618{
2619 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2620 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2621 struct ahd_tmode_tstate *tstate;
2622 struct ahd_initiator_tinfo *tinfo
2623 = ahd_fetch_transinfo(ahd,
2624 starget->channel + 'A',
2625 shost->this_id, starget->id, &tstate);
2626 struct ahd_devinfo devinfo;
2627 unsigned int ppr_options = tinfo->goal.ppr_options
2628 & ~MSG_EXT_PPR_RTI;
2629 unsigned int period = tinfo->goal.period;
2630 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2631 unsigned long flags;
2632
2633 if ((ahd->features & AHD_RTI) == 0) {
2634#ifdef AHD_DEBUG
2635 if ((ahd_debug & AHD_SHOW_DV) != 0)
2636 printf("%s: RTI not available\n", ahd_name(ahd));
2637#endif
2638 return;
5003 } 2639 }
5004 2640
5005#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 2641#ifdef AHD_DEBUG
2642 if ((ahd_debug & AHD_SHOW_DV) != 0)
2643 printf("%s: %s RTI\n", ahd_name(ahd),
2644 rti ? "enabling" : "disabling");
2645#endif
2646
2647 if (rti)
2648 ppr_options |= MSG_EXT_PPR_RTI;
2649
2650 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2651 starget->channel + 'A', ROLE_INITIATOR);
2652 ahd_find_syncrate(ahd, &period, &ppr_options,
2653 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2654
2655 ahd_lock(ahd, &flags);
2656 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2657 ppr_options, AHD_TRANS_GOAL, FALSE);
2658 ahd_unlock(ahd, &flags);
2659}
2660
2661static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2662{
2663 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2664 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2665 struct ahd_tmode_tstate *tstate;
2666 struct ahd_initiator_tinfo *tinfo
2667 = ahd_fetch_transinfo(ahd,
2668 starget->channel + 'A',
2669 shost->this_id, starget->id, &tstate);
2670 struct ahd_devinfo devinfo;
2671 unsigned int ppr_options = tinfo->goal.ppr_options
2672 & ~MSG_EXT_PPR_PCOMP_EN;
2673 unsigned int period = tinfo->goal.period;
2674 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2675 unsigned long flags;
2676
2677#ifdef AHD_DEBUG
2678 if ((ahd_debug & AHD_SHOW_DV) != 0)
2679 printf("%s: %s Precompensation\n", ahd_name(ahd),
2680 pcomp ? "Enable" : "Disable");
2681#endif
2682
2683 if (pcomp)
2684 ppr_options |= MSG_EXT_PPR_PCOMP_EN;
2685
2686 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2687 starget->channel + 'A', ROLE_INITIATOR);
2688 ahd_find_syncrate(ahd, &period, &ppr_options,
2689 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2690
2691 ahd_lock(ahd, &flags);
2692 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2693 ppr_options, AHD_TRANS_GOAL, FALSE);
2694 ahd_unlock(ahd, &flags);
2695}
2696
2697static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold)
2698{
2699 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2700 struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2701 struct ahd_tmode_tstate *tstate;
2702 struct ahd_initiator_tinfo *tinfo
2703 = ahd_fetch_transinfo(ahd,
2704 starget->channel + 'A',
2705 shost->this_id, starget->id, &tstate);
2706 struct ahd_devinfo devinfo;
2707 unsigned int ppr_options = tinfo->goal.ppr_options
2708 & ~MSG_EXT_PPR_HOLD_MCS;
2709 unsigned int period = tinfo->goal.period;
2710 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2711 unsigned long flags;
2712
2713 if (hold)
2714 ppr_options |= MSG_EXT_PPR_HOLD_MCS;
2715
2716 ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2717 starget->channel + 'A', ROLE_INITIATOR);
2718 ahd_find_syncrate(ahd, &period, &ppr_options,
2719 dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2720
2721 ahd_lock(ahd, &flags);
2722 ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2723 ppr_options, AHD_TRANS_GOAL, FALSE);
2724 ahd_unlock(ahd, &flags);
2725}
2726
2727
2728
2729static struct spi_function_template ahd_linux_transport_functions = {
2730 .set_offset = ahd_linux_set_offset,
2731 .show_offset = 1,
2732 .set_period = ahd_linux_set_period,
2733 .show_period = 1,
2734 .set_width = ahd_linux_set_width,
2735 .show_width = 1,
2736 .set_dt = ahd_linux_set_dt,
2737 .show_dt = 1,
2738 .set_iu = ahd_linux_set_iu,
2739 .show_iu = 1,
2740 .set_qas = ahd_linux_set_qas,
2741 .show_qas = 1,
2742 .set_rd_strm = ahd_linux_set_rd_strm,
2743 .show_rd_strm = 1,
2744 .set_wr_flow = ahd_linux_set_wr_flow,
2745 .show_wr_flow = 1,
2746 .set_rti = ahd_linux_set_rti,
2747 .show_rti = 1,
2748 .set_pcomp_en = ahd_linux_set_pcomp_en,
2749 .show_pcomp_en = 1,
2750 .set_hold_mcs = ahd_linux_set_hold_mcs,
2751 .show_hold_mcs = 1,
2752};
2753
2754static int __init
2755ahd_linux_init(void)
2756{
2757 int error = 0;
2758
5006 /* 2759 /*
5007 * In 2.4 we have to unregister from the PCI core _after_ 2760 * If we've been passed any parameters, process them now.
5008 * unregistering from the scsi midlayer to avoid dangling
5009 * references.
5010 */ 2761 */
5011 scsi_unregister_module(MODULE_SCSI_HA, &aic79xx_driver_template); 2762 if (aic79xx)
5012#endif 2763 aic79xx_setup(aic79xx);
2764
2765 ahd_linux_transport_template =
2766 spi_attach_transport(&ahd_linux_transport_functions);
2767 if (!ahd_linux_transport_template)
2768 return -ENODEV;
2769
2770 scsi_transport_reserve_target(ahd_linux_transport_template,
2771 sizeof(struct ahd_linux_target));
2772 scsi_transport_reserve_device(ahd_linux_transport_template,
2773 sizeof(struct ahd_linux_device));
2774
2775 error = ahd_linux_pci_init();
2776 if (error)
2777 spi_release_transport(ahd_linux_transport_template);
2778 return error;
2779}
2780
2781static void __exit
2782ahd_linux_exit(void)
2783{
5013 ahd_linux_pci_exit(); 2784 ahd_linux_pci_exit();
2785 spi_release_transport(ahd_linux_transport_template);
5014} 2786}
5015 2787
5016module_init(ahd_linux_init); 2788module_init(ahd_linux_init);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 7823e52e99ab..052c6619accc 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -42,6 +42,7 @@
42#ifndef _AIC79XX_LINUX_H_ 42#ifndef _AIC79XX_LINUX_H_
43#define _AIC79XX_LINUX_H_ 43#define _AIC79XX_LINUX_H_
44 44
45#include <linux/config.h>
45#include <linux/types.h> 46#include <linux/types.h>
46#include <linux/blkdev.h> 47#include <linux/blkdev.h>
47#include <linux/delay.h> 48#include <linux/delay.h>
@@ -49,18 +50,23 @@
49#include <linux/pci.h> 50#include <linux/pci.h>
50#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
51#include <linux/version.h> 52#include <linux/version.h>
53#include <linux/interrupt.h>
52#include <linux/module.h> 54#include <linux/module.h>
55#include <linux/slab.h>
53#include <asm/byteorder.h> 56#include <asm/byteorder.h>
54#include <asm/io.h> 57#include <asm/io.h>
55 58
56#include <linux/interrupt.h> /* For tasklet support. */ 59#include <scsi/scsi.h>
57#include <linux/config.h> 60#include <scsi/scsi_cmnd.h>
58#include <linux/slab.h> 61#include <scsi/scsi_eh.h>
62#include <scsi/scsi_device.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_transport.h>
66#include <scsi/scsi_transport_spi.h>
59 67
60/* Core SCSI definitions */ 68/* Core SCSI definitions */
61#define AIC_LIB_PREFIX ahd 69#define AIC_LIB_PREFIX ahd
62#include "scsi.h"
63#include <scsi/scsi_host.h>
64 70
65/* Name space conflict with BSD queue macros */ 71/* Name space conflict with BSD queue macros */
66#ifdef LIST_HEAD 72#ifdef LIST_HEAD
@@ -95,7 +101,7 @@
95/************************* Forward Declarations *******************************/ 101/************************* Forward Declarations *******************************/
96struct ahd_softc; 102struct ahd_softc;
97typedef struct pci_dev *ahd_dev_softc_t; 103typedef struct pci_dev *ahd_dev_softc_t;
98typedef Scsi_Cmnd *ahd_io_ctx_t; 104typedef struct scsi_cmnd *ahd_io_ctx_t;
99 105
100/******************************* Byte Order ***********************************/ 106/******************************* Byte Order ***********************************/
101#define ahd_htobe16(x) cpu_to_be16(x) 107#define ahd_htobe16(x) cpu_to_be16(x)
@@ -114,8 +120,7 @@ typedef Scsi_Cmnd *ahd_io_ctx_t;
114 120
115/************************* Configuration Data *********************************/ 121/************************* Configuration Data *********************************/
116extern uint32_t aic79xx_allow_memio; 122extern uint32_t aic79xx_allow_memio;
117extern int aic79xx_detect_complete; 123extern struct scsi_host_template aic79xx_driver_template;
118extern Scsi_Host_Template aic79xx_driver_template;
119 124
120/***************************** Bus Space/DMA **********************************/ 125/***************************** Bus Space/DMA **********************************/
121 126
@@ -145,11 +150,7 @@ struct ahd_linux_dma_tag
145}; 150};
146typedef struct ahd_linux_dma_tag* bus_dma_tag_t; 151typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
147 152
148struct ahd_linux_dmamap 153typedef dma_addr_t bus_dmamap_t;
149{
150 dma_addr_t bus_addr;
151};
152typedef struct ahd_linux_dmamap* bus_dmamap_t;
153 154
154typedef int bus_dma_filter_t(void*, dma_addr_t); 155typedef int bus_dma_filter_t(void*, dma_addr_t);
155typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); 156typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
@@ -226,12 +227,12 @@ typedef struct timer_list ahd_timer_t;
226#define ahd_timer_init init_timer 227#define ahd_timer_init init_timer
227#define ahd_timer_stop del_timer_sync 228#define ahd_timer_stop del_timer_sync
228typedef void ahd_linux_callback_t (u_long); 229typedef void ahd_linux_callback_t (u_long);
229static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec, 230static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
230 ahd_callback_t *func, void *arg); 231 ahd_callback_t *func, void *arg);
231static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec); 232static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
232 233
233static __inline void 234static __inline void
234ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg) 235ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
235{ 236{
236 struct ahd_softc *ahd; 237 struct ahd_softc *ahd;
237 238
@@ -252,43 +253,8 @@ ahd_scb_timer_reset(struct scb *scb, u_int usec)
252/***************************** SMP support ************************************/ 253/***************************** SMP support ************************************/
253#include <linux/spinlock.h> 254#include <linux/spinlock.h>
254 255
255#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
256#define AHD_SCSI_HAS_HOST_LOCK 1
257#else
258#define AHD_SCSI_HAS_HOST_LOCK 0
259#endif
260
261#define AIC79XX_DRIVER_VERSION "1.3.11" 256#define AIC79XX_DRIVER_VERSION "1.3.11"
262 257
263/**************************** Front End Queues ********************************/
264/*
265 * Data structure used to cast the Linux struct scsi_cmnd to something
266 * that allows us to use the queue macros. The linux structure has
267 * plenty of space to hold the links fields as required by the queue
268 * macros, but the queue macors require them to have the correct type.
269 */
270struct ahd_cmd_internal {
271 /* Area owned by the Linux scsi layer. */
272 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
273 union {
274 STAILQ_ENTRY(ahd_cmd) ste;
275 LIST_ENTRY(ahd_cmd) le;
276 TAILQ_ENTRY(ahd_cmd) tqe;
277 } links;
278 uint32_t end;
279};
280
281struct ahd_cmd {
282 union {
283 struct ahd_cmd_internal icmd;
284 struct scsi_cmnd scsi_cmd;
285 } un;
286};
287
288#define acmd_icmd(cmd) ((cmd)->un.icmd)
289#define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
290#define acmd_links un.icmd.links
291
292/*************************** Device Data Structures ***************************/ 258/*************************** Device Data Structures ***************************/
293/* 259/*
294 * A per probed device structure used to deal with some error recovery 260 * A per probed device structure used to deal with some error recovery
@@ -297,22 +263,17 @@ struct ahd_cmd {
297 * after a successfully completed inquiry command to the target when 263 * after a successfully completed inquiry command to the target when
298 * that inquiry data indicates a lun is present. 264 * that inquiry data indicates a lun is present.
299 */ 265 */
300TAILQ_HEAD(ahd_busyq, ahd_cmd); 266
301typedef enum { 267typedef enum {
302 AHD_DEV_UNCONFIGURED = 0x01,
303 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ 268 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
304 AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
305 AHD_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
306 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ 269 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
307 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ 270 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
308 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ 271 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
309 AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
310} ahd_linux_dev_flags; 272} ahd_linux_dev_flags;
311 273
312struct ahd_linux_target; 274struct ahd_linux_target;
313struct ahd_linux_device { 275struct ahd_linux_device {
314 TAILQ_ENTRY(ahd_linux_device) links; 276 TAILQ_ENTRY(ahd_linux_device) links;
315 struct ahd_busyq busyq;
316 277
317 /* 278 /*
318 * The number of transactions currently 279 * The number of transactions currently
@@ -388,62 +349,12 @@ struct ahd_linux_device {
388 */ 349 */
389 u_int commands_since_idle_or_otag; 350 u_int commands_since_idle_or_otag;
390#define AHD_OTAG_THRESH 500 351#define AHD_OTAG_THRESH 500
391
392 int lun;
393 Scsi_Device *scsi_device;
394 struct ahd_linux_target *target;
395}; 352};
396 353
397typedef enum {
398 AHD_DV_REQUIRED = 0x01,
399 AHD_INQ_VALID = 0x02,
400 AHD_BASIC_DV = 0x04,
401 AHD_ENHANCED_DV = 0x08
402} ahd_linux_targ_flags;
403
404/* DV States */
405typedef enum {
406 AHD_DV_STATE_EXIT = 0,
407 AHD_DV_STATE_INQ_SHORT_ASYNC,
408 AHD_DV_STATE_INQ_ASYNC,
409 AHD_DV_STATE_INQ_ASYNC_VERIFY,
410 AHD_DV_STATE_TUR,
411 AHD_DV_STATE_REBD,
412 AHD_DV_STATE_INQ_VERIFY,
413 AHD_DV_STATE_WEB,
414 AHD_DV_STATE_REB,
415 AHD_DV_STATE_SU,
416 AHD_DV_STATE_BUSY
417} ahd_dv_state;
418
419struct ahd_linux_target { 354struct ahd_linux_target {
420 struct ahd_linux_device *devices[AHD_NUM_LUNS]; 355 struct scsi_device *sdev[AHD_NUM_LUNS];
421 int channel;
422 int target;
423 int refcount;
424 struct ahd_transinfo last_tinfo; 356 struct ahd_transinfo last_tinfo;
425 struct ahd_softc *ahd; 357 struct ahd_softc *ahd;
426 ahd_linux_targ_flags flags;
427 struct scsi_inquiry_data *inq_data;
428 /*
429 * The next "fallback" period to use for narrow/wide transfers.
430 */
431 uint8_t dv_next_narrow_period;
432 uint8_t dv_next_wide_period;
433 uint8_t dv_max_width;
434 uint8_t dv_max_ppr_options;
435 uint8_t dv_last_ppr_options;
436 u_int dv_echo_size;
437 ahd_dv_state dv_state;
438 u_int dv_state_retry;
439 uint8_t *dv_buffer;
440 uint8_t *dv_buffer1;
441
442 /*
443 * Cumulative counter of errors.
444 */
445 u_long errors_detected;
446 u_long cmds_since_error;
447}; 358};
448 359
449/********************* Definitions Required by the Core ***********************/ 360/********************* Definitions Required by the Core ***********************/
@@ -453,32 +364,16 @@ struct ahd_linux_target {
453 * manner and are allocated below 4GB, the number of S/G segments is 364 * manner and are allocated below 4GB, the number of S/G segments is
454 * unrestricted. 365 * unrestricted.
455 */ 366 */
456#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
457/*
458 * We dynamically adjust the number of segments in pre-2.5 kernels to
459 * avoid fragmentation issues in the SCSI mid-layer's private memory
460 * allocator. See aic79xx_osm.c ahd_linux_size_nseg() for details.
461 */
462extern u_int ahd_linux_nseg;
463#define AHD_NSEG ahd_linux_nseg
464#define AHD_LINUX_MIN_NSEG 64
465#else
466#define AHD_NSEG 128 367#define AHD_NSEG 128
467#endif
468 368
469/* 369/*
470 * Per-SCB OSM storage. 370 * Per-SCB OSM storage.
471 */ 371 */
472typedef enum {
473 AHD_SCB_UP_EH_SEM = 0x1
474} ahd_linux_scb_flags;
475
476struct scb_platform_data { 372struct scb_platform_data {
477 struct ahd_linux_device *dev; 373 struct ahd_linux_device *dev;
478 dma_addr_t buf_busaddr; 374 dma_addr_t buf_busaddr;
479 uint32_t xfer_len; 375 uint32_t xfer_len;
480 uint32_t sense_resid; /* Auto-Sense residual */ 376 uint32_t sense_resid; /* Auto-Sense residual */
481 ahd_linux_scb_flags flags;
482}; 377};
483 378
484/* 379/*
@@ -487,44 +382,23 @@ struct scb_platform_data {
487 * alignment restrictions of the various platforms supported by 382 * alignment restrictions of the various platforms supported by
488 * this driver. 383 * this driver.
489 */ 384 */
490typedef enum {
491 AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
492 AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
493 AHD_DV_ACTIVE = 0x04,
494 AHD_DV_SHUTDOWN = 0x08,
495 AHD_RUN_CMPLT_Q_TIMER = 0x10
496} ahd_linux_softc_flags;
497
498TAILQ_HEAD(ahd_completeq, ahd_cmd);
499
500struct ahd_platform_data { 385struct ahd_platform_data {
501 /* 386 /*
502 * Fields accessed from interrupt context. 387 * Fields accessed from interrupt context.
503 */ 388 */
504 struct ahd_linux_target *targets[AHD_NUM_TARGETS]; 389 struct scsi_target *starget[AHD_NUM_TARGETS];
505 TAILQ_HEAD(, ahd_linux_device) device_runq;
506 struct ahd_completeq completeq;
507 390
508 spinlock_t spin_lock; 391 spinlock_t spin_lock;
509 struct tasklet_struct runq_tasklet;
510 u_int qfrozen; 392 u_int qfrozen;
511 pid_t dv_pid;
512 struct timer_list completeq_timer;
513 struct timer_list reset_timer; 393 struct timer_list reset_timer;
514 struct timer_list stats_timer;
515 struct semaphore eh_sem; 394 struct semaphore eh_sem;
516 struct semaphore dv_sem;
517 struct semaphore dv_cmd_sem; /* XXX This needs to be in
518 * the target struct
519 */
520 struct scsi_device *dv_scsi_dev;
521 struct Scsi_Host *host; /* pointer to scsi host */ 395 struct Scsi_Host *host; /* pointer to scsi host */
522#define AHD_LINUX_NOIRQ ((uint32_t)~0) 396#define AHD_LINUX_NOIRQ ((uint32_t)~0)
523 uint32_t irq; /* IRQ for this adapter */ 397 uint32_t irq; /* IRQ for this adapter */
524 uint32_t bios_address; 398 uint32_t bios_address;
525 uint32_t mem_busaddr; /* Mem Base Addr */ 399 uint32_t mem_busaddr; /* Mem Base Addr */
526 uint64_t hw_dma_mask; 400#define AHD_SCB_UP_EH_SEM 0x1
527 ahd_linux_softc_flags flags; 401 uint32_t flags;
528}; 402};
529 403
530/************************** OS Utility Wrappers *******************************/ 404/************************** OS Utility Wrappers *******************************/
@@ -641,7 +515,7 @@ ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
641 515
642/**************************** Initialization **********************************/ 516/**************************** Initialization **********************************/
643int ahd_linux_register_host(struct ahd_softc *, 517int ahd_linux_register_host(struct ahd_softc *,
644 Scsi_Host_Template *); 518 struct scsi_host_template *);
645 519
646uint64_t ahd_linux_get_memsize(void); 520uint64_t ahd_linux_get_memsize(void);
647 521
@@ -657,28 +531,6 @@ void ahd_format_transinfo(struct info_str *info,
657 struct ahd_transinfo *tinfo); 531 struct ahd_transinfo *tinfo);
658 532
659/******************************** Locking *************************************/ 533/******************************** Locking *************************************/
660/* Lock protecting internal data structures */
661static __inline void ahd_lockinit(struct ahd_softc *);
662static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
663static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
664
665/* Lock acquisition and release of the above lock in midlayer entry points. */
666static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
667 unsigned long *flags);
668static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
669 unsigned long *flags);
670
671/* Lock held during command compeletion to the upper layer */
672static __inline void ahd_done_lockinit(struct ahd_softc *);
673static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
674static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
675
676/* Lock held during ahd_list manipulation and ahd softc frees */
677extern spinlock_t ahd_list_spinlock;
678static __inline void ahd_list_lockinit(void);
679static __inline void ahd_list_lock(unsigned long *flags);
680static __inline void ahd_list_unlock(unsigned long *flags);
681
682static __inline void 534static __inline void
683ahd_lockinit(struct ahd_softc *ahd) 535ahd_lockinit(struct ahd_softc *ahd)
684{ 536{
@@ -697,75 +549,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
697 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags); 549 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
698} 550}
699 551
700static __inline void
701ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
702{
703 /*
704 * In 2.5.X and some 2.4.X versions, the midlayer takes our
705 * lock just before calling us, so we avoid locking again.
706 * For other kernel versions, the io_request_lock is taken
707 * just before our entry point is called. In this case, we
708 * trade the io_request_lock for our per-softc lock.
709 */
710#if AHD_SCSI_HAS_HOST_LOCK == 0
711 spin_unlock(&io_request_lock);
712 spin_lock(&ahd->platform_data->spin_lock);
713#endif
714}
715
716static __inline void
717ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
718{
719#if AHD_SCSI_HAS_HOST_LOCK == 0
720 spin_unlock(&ahd->platform_data->spin_lock);
721 spin_lock(&io_request_lock);
722#endif
723}
724
725static __inline void
726ahd_done_lockinit(struct ahd_softc *ahd)
727{
728 /*
729 * In 2.5.X, our own lock is held during completions.
730 * In previous versions, the io_request_lock is used.
731 * In either case, we can't initialize this lock again.
732 */
733}
734
735static __inline void
736ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
737{
738#if AHD_SCSI_HAS_HOST_LOCK == 0
739 spin_lock(&io_request_lock);
740#endif
741}
742
743static __inline void
744ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
745{
746#if AHD_SCSI_HAS_HOST_LOCK == 0
747 spin_unlock(&io_request_lock);
748#endif
749}
750
751static __inline void
752ahd_list_lockinit(void)
753{
754 spin_lock_init(&ahd_list_spinlock);
755}
756
757static __inline void
758ahd_list_lock(unsigned long *flags)
759{
760 spin_lock_irqsave(&ahd_list_spinlock, *flags);
761}
762
763static __inline void
764ahd_list_unlock(unsigned long *flags)
765{
766 spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
767}
768
769/******************************* PCI Definitions ******************************/ 552/******************************* PCI Definitions ******************************/
770/* 553/*
771 * PCIM_xxx: mask to locate subfield in register 554 * PCIM_xxx: mask to locate subfield in register
@@ -925,27 +708,17 @@ ahd_flush_device_writes(struct ahd_softc *ahd)
925} 708}
926 709
927/**************************** Proc FS Support *********************************/ 710/**************************** Proc FS Support *********************************/
928#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
929int ahd_linux_proc_info(char *, char **, off_t, int, int, int);
930#else
931int ahd_linux_proc_info(struct Scsi_Host *, char *, char **, 711int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
932 off_t, int, int); 712 off_t, int, int);
933#endif
934
935/*************************** Domain Validation ********************************/
936#define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
937#define AHD_DV_SIMQ_FROZEN(ahd) \
938 ((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
939 && (ahd)->platform_data->qfrozen == 1)
940 713
941/*********************** Transaction Access Wrappers **************************/ 714/*********************** Transaction Access Wrappers **************************/
942static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); 715static __inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
943static __inline void ahd_set_transaction_status(struct scb *, uint32_t); 716static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
944static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t); 717static __inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
945static __inline void ahd_set_scsi_status(struct scb *, uint32_t); 718static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
946static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd); 719static __inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
947static __inline uint32_t ahd_get_transaction_status(struct scb *); 720static __inline uint32_t ahd_get_transaction_status(struct scb *);
948static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd); 721static __inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
949static __inline uint32_t ahd_get_scsi_status(struct scb *); 722static __inline uint32_t ahd_get_scsi_status(struct scb *);
950static __inline void ahd_set_transaction_tag(struct scb *, int, u_int); 723static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
951static __inline u_long ahd_get_transfer_length(struct scb *); 724static __inline u_long ahd_get_transfer_length(struct scb *);
@@ -964,7 +737,7 @@ static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
964static __inline void ahd_freeze_scb(struct scb *scb); 737static __inline void ahd_freeze_scb(struct scb *scb);
965 738
966static __inline 739static __inline
967void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status) 740void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
968{ 741{
969 cmd->result &= ~(CAM_STATUS_MASK << 16); 742 cmd->result &= ~(CAM_STATUS_MASK << 16);
970 cmd->result |= status << 16; 743 cmd->result |= status << 16;
@@ -977,7 +750,7 @@ void ahd_set_transaction_status(struct scb *scb, uint32_t status)
977} 750}
978 751
979static __inline 752static __inline
980void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status) 753void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
981{ 754{
982 cmd->result &= ~0xFFFF; 755 cmd->result &= ~0xFFFF;
983 cmd->result |= status; 756 cmd->result |= status;
@@ -990,7 +763,7 @@ void ahd_set_scsi_status(struct scb *scb, uint32_t status)
990} 763}
991 764
992static __inline 765static __inline
993uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd) 766uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd)
994{ 767{
995 return ((cmd->result >> 16) & CAM_STATUS_MASK); 768 return ((cmd->result >> 16) & CAM_STATUS_MASK);
996} 769}
@@ -1002,7 +775,7 @@ uint32_t ahd_get_transaction_status(struct scb *scb)
1002} 775}
1003 776
1004static __inline 777static __inline
1005uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd) 778uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd)
1006{ 779{
1007 return (cmd->result & 0xFFFF); 780 return (cmd->result & 0xFFFF);
1008} 781}
@@ -1117,7 +890,6 @@ void ahd_done(struct ahd_softc*, struct scb*);
1117void ahd_send_async(struct ahd_softc *, char channel, 890void ahd_send_async(struct ahd_softc *, char channel,
1118 u_int target, u_int lun, ac_code, void *); 891 u_int target, u_int lun, ac_code, void *);
1119void ahd_print_path(struct ahd_softc *, struct scb *); 892void ahd_print_path(struct ahd_softc *, struct scb *);
1120void ahd_platform_dump_card_state(struct ahd_softc *ahd);
1121 893
1122#ifdef CONFIG_PCI 894#ifdef CONFIG_PCI
1123#define AHD_PCI_CONFIG 1 895#define AHD_PCI_CONFIG 1
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 91daf0c7fb10..390b53852d4b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -92,27 +92,31 @@ struct pci_driver aic79xx_pci_driver = {
92static void 92static void
93ahd_linux_pci_dev_remove(struct pci_dev *pdev) 93ahd_linux_pci_dev_remove(struct pci_dev *pdev)
94{ 94{
95 struct ahd_softc *ahd; 95 struct ahd_softc *ahd = pci_get_drvdata(pdev);
96 u_long l; 96 u_long s;
97 97
98 /* 98 ahd_lock(ahd, &s);
99 * We should be able to just perform 99 ahd_intr_enable(ahd, FALSE);
100 * the free directly, but check our 100 ahd_unlock(ahd, &s);
101 * list for extra sanity. 101 ahd_free(ahd);
102 */ 102}
103 ahd_list_lock(&l); 103
104 ahd = ahd_find_softc((struct ahd_softc *)pci_get_drvdata(pdev)); 104static void
105 if (ahd != NULL) { 105ahd_linux_pci_inherit_flags(struct ahd_softc *ahd)
106 u_long s; 106{
107 107 struct pci_dev *pdev = ahd->dev_softc, *master_pdev;
108 TAILQ_REMOVE(&ahd_tailq, ahd, links); 108 unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
109 ahd_list_unlock(&l); 109
110 ahd_lock(ahd, &s); 110 master_pdev = pci_get_slot(pdev->bus, master_devfn);
111 ahd_intr_enable(ahd, FALSE); 111 if (master_pdev) {
112 ahd_unlock(ahd, &s); 112 struct ahd_softc *master = pci_get_drvdata(master_pdev);
113 ahd_free(ahd); 113 if (master) {
114 } else 114 ahd->flags &= ~AHD_BIOS_ENABLED;
115 ahd_list_unlock(&l); 115 ahd->flags |= master->flags & AHD_BIOS_ENABLED;
116 } else
117 printk(KERN_ERR "aic79xx: no multichannel peer found!\n");
118 pci_dev_put(master_pdev);
119 }
116} 120}
117 121
118static int 122static int
@@ -125,22 +129,6 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
125 char *name; 129 char *name;
126 int error; 130 int error;
127 131
128 /*
129 * Some BIOSen report the same device multiple times.
130 */
131 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
132 struct pci_dev *probed_pdev;
133
134 probed_pdev = ahd->dev_softc;
135 if (probed_pdev->bus->number == pdev->bus->number
136 && probed_pdev->devfn == pdev->devfn)
137 break;
138 }
139 if (ahd != NULL) {
140 /* Skip duplicate. */
141 return (-ENODEV);
142 }
143
144 pci = pdev; 132 pci = pdev;
145 entry = ahd_find_pci_device(pci); 133 entry = ahd_find_pci_device(pci);
146 if (entry == NULL) 134 if (entry == NULL)
@@ -177,15 +165,12 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
177 if (memsize >= 0x8000000000ULL 165 if (memsize >= 0x8000000000ULL
178 && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { 166 && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
179 ahd->flags |= AHD_64BIT_ADDRESSING; 167 ahd->flags |= AHD_64BIT_ADDRESSING;
180 ahd->platform_data->hw_dma_mask = DMA_64BIT_MASK;
181 } else if (memsize > 0x80000000 168 } else if (memsize > 0x80000000
182 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 169 && pci_set_dma_mask(pdev, mask_39bit) == 0) {
183 ahd->flags |= AHD_39BIT_ADDRESSING; 170 ahd->flags |= AHD_39BIT_ADDRESSING;
184 ahd->platform_data->hw_dma_mask = mask_39bit;
185 } 171 }
186 } else { 172 } else {
187 pci_set_dma_mask(pdev, DMA_32BIT_MASK); 173 pci_set_dma_mask(pdev, DMA_32BIT_MASK);
188 ahd->platform_data->hw_dma_mask = DMA_32BIT_MASK;
189 } 174 }
190 ahd->dev_softc = pci; 175 ahd->dev_softc = pci;
191 error = ahd_pci_config(ahd, entry); 176 error = ahd_pci_config(ahd, entry);
@@ -193,16 +178,17 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
193 ahd_free(ahd); 178 ahd_free(ahd);
194 return (-error); 179 return (-error);
195 } 180 }
181
182 /*
183 * Second Function PCI devices need to inherit some
184 * * settings from function 0.
185 */
186 if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
187 ahd_linux_pci_inherit_flags(ahd);
188
196 pci_set_drvdata(pdev, ahd); 189 pci_set_drvdata(pdev, ahd);
197 if (aic79xx_detect_complete) { 190
198#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 191 ahd_linux_register_host(ahd, &aic79xx_driver_template);
199 ahd_linux_register_host(ahd, &aic79xx_driver_template);
200#else
201 printf("aic79xx: ignoring PCI device found after "
202 "initialization\n");
203 return (-ENODEV);
204#endif
205 }
206 return (0); 192 return (0);
207} 193}
208 194
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 703f6e44889d..2131db60018a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -283,7 +283,6 @@ int
283ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) 283ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
284{ 284{
285 struct scb_data *shared_scb_data; 285 struct scb_data *shared_scb_data;
286 u_long l;
287 u_int command; 286 u_int command;
288 uint32_t devconfig; 287 uint32_t devconfig;
289 uint16_t subvendor; 288 uint16_t subvendor;
@@ -373,16 +372,9 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
373 * Allow interrupts now that we are completely setup. 372 * Allow interrupts now that we are completely setup.
374 */ 373 */
375 error = ahd_pci_map_int(ahd); 374 error = ahd_pci_map_int(ahd);
376 if (error != 0) 375 if (!error)
377 return (error); 376 ahd->init_level++;
378 377 return error;
379 ahd_list_lock(&l);
380 /*
381 * Link this softc in with all other ahd instances.
382 */
383 ahd_softc_insert(ahd);
384 ahd_list_unlock(&l);
385 return (0);
386} 378}
387 379
388/* 380/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index e01cd6175e34..39a27840fce6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -49,10 +49,53 @@ static void ahd_dump_target_state(struct ahd_softc *ahd,
49 u_int our_id, char channel, 49 u_int our_id, char channel,
50 u_int target_id, u_int target_offset); 50 u_int target_id, u_int target_offset);
51static void ahd_dump_device_state(struct info_str *info, 51static void ahd_dump_device_state(struct info_str *info,
52 struct ahd_linux_device *dev); 52 struct scsi_device *sdev);
53static int ahd_proc_write_seeprom(struct ahd_softc *ahd, 53static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
54 char *buffer, int length); 54 char *buffer, int length);
55 55
56/*
57 * Table of syncrates that don't follow the "divisible by 4"
58 * rule. This table will be expanded in future SCSI specs.
59 */
60static struct {
61 u_int period_factor;
62 u_int period; /* in 100ths of ns */
63} scsi_syncrates[] = {
64 { 0x08, 625 }, /* FAST-160 */
65 { 0x09, 1250 }, /* FAST-80 */
66 { 0x0a, 2500 }, /* FAST-40 40MHz */
67 { 0x0b, 3030 }, /* FAST-40 33MHz */
68 { 0x0c, 5000 } /* FAST-20 */
69};
70
71/*
72 * Return the frequency in kHz corresponding to the given
73 * sync period factor.
74 */
75static u_int
76ahd_calc_syncsrate(u_int period_factor)
77{
78 int i;
79 int num_syncrates;
80
81 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
82 /* See if the period is in the "exception" table */
83 for (i = 0; i < num_syncrates; i++) {
84
85 if (period_factor == scsi_syncrates[i].period_factor) {
86 /* Period in kHz */
87 return (100000000 / scsi_syncrates[i].period);
88 }
89 }
90
91 /*
92 * Wasn't in the table, so use the standard
93 * 4 times conversion.
94 */
95 return (10000000 / (period_factor * 4 * 10));
96}
97
98
56static void 99static void
57copy_mem_info(struct info_str *info, char *data, int len) 100copy_mem_info(struct info_str *info, char *data, int len)
58{ 101{
@@ -109,7 +152,7 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
109 speed = 3300; 152 speed = 3300;
110 freq = 0; 153 freq = 0;
111 if (tinfo->offset != 0) { 154 if (tinfo->offset != 0) {
112 freq = aic_calc_syncsrate(tinfo->period); 155 freq = ahd_calc_syncsrate(tinfo->period);
113 speed = freq; 156 speed = freq;
114 } 157 }
115 speed *= (0x01 << tinfo->width); 158 speed *= (0x01 << tinfo->width);
@@ -167,6 +210,7 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
167 u_int target_offset) 210 u_int target_offset)
168{ 211{
169 struct ahd_linux_target *targ; 212 struct ahd_linux_target *targ;
213 struct scsi_target *starget;
170 struct ahd_initiator_tinfo *tinfo; 214 struct ahd_initiator_tinfo *tinfo;
171 struct ahd_tmode_tstate *tstate; 215 struct ahd_tmode_tstate *tstate;
172 int lun; 216 int lun;
@@ -176,20 +220,20 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
176 copy_info(info, "Target %d Negotiation Settings\n", target_id); 220 copy_info(info, "Target %d Negotiation Settings\n", target_id);
177 copy_info(info, "\tUser: "); 221 copy_info(info, "\tUser: ");
178 ahd_format_transinfo(info, &tinfo->user); 222 ahd_format_transinfo(info, &tinfo->user);
179 targ = ahd->platform_data->targets[target_offset]; 223 starget = ahd->platform_data->starget[target_offset];
180 if (targ == NULL) 224 if (starget == NULL)
181 return; 225 return;
226 targ = scsi_transport_target_data(starget);
182 227
183 copy_info(info, "\tGoal: "); 228 copy_info(info, "\tGoal: ");
184 ahd_format_transinfo(info, &tinfo->goal); 229 ahd_format_transinfo(info, &tinfo->goal);
185 copy_info(info, "\tCurr: "); 230 copy_info(info, "\tCurr: ");
186 ahd_format_transinfo(info, &tinfo->curr); 231 ahd_format_transinfo(info, &tinfo->curr);
187 copy_info(info, "\tTransmission Errors %ld\n", targ->errors_detected);
188 232
189 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 233 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
190 struct ahd_linux_device *dev; 234 struct scsi_device *dev;
191 235
192 dev = targ->devices[lun]; 236 dev = targ->sdev[lun];
193 237
194 if (dev == NULL) 238 if (dev == NULL)
195 continue; 239 continue;
@@ -199,10 +243,13 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
199} 243}
200 244
201static void 245static void
202ahd_dump_device_state(struct info_str *info, struct ahd_linux_device *dev) 246ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev)
203{ 247{
248 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
249
204 copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", 250 copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
205 dev->target->channel + 'A', dev->target->target, dev->lun); 251 sdev->sdev_target->channel + 'A',
252 sdev->sdev_target->id, sdev->lun);
206 253
207 copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); 254 copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
208 copy_info(info, "\t\tCommands Active %d\n", dev->active); 255 copy_info(info, "\t\tCommands Active %d\n", dev->active);
@@ -278,36 +325,16 @@ done:
278 * Return information to handle /proc support for the driver. 325 * Return information to handle /proc support for the driver.
279 */ 326 */
280int 327int
281#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
282ahd_linux_proc_info(char *buffer, char **start, off_t offset,
283 int length, int hostno, int inout)
284#else
285ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, 328ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
286 off_t offset, int length, int inout) 329 off_t offset, int length, int inout)
287#endif
288{ 330{
289 struct ahd_softc *ahd; 331 struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
290 struct info_str info; 332 struct info_str info;
291 char ahd_info[256]; 333 char ahd_info[256];
292 u_long l;
293 u_int max_targ; 334 u_int max_targ;
294 u_int i; 335 u_int i;
295 int retval; 336 int retval;
296 337
297 retval = -EINVAL;
298 ahd_list_lock(&l);
299#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
300 TAILQ_FOREACH(ahd, &ahd_tailq, links) {
301 if (ahd->platform_data->host->host_no == hostno)
302 break;
303 }
304#else
305 ahd = ahd_find_softc(*(struct ahd_softc **)shost->hostdata);
306#endif
307
308 if (ahd == NULL)
309 goto done;
310
311 /* Has data been written to the file? */ 338 /* Has data been written to the file? */
312 if (inout == TRUE) { 339 if (inout == TRUE) {
313 retval = ahd_proc_write_seeprom(ahd, buffer, length); 340 retval = ahd_proc_write_seeprom(ahd, buffer, length);
@@ -357,6 +384,5 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
357 } 384 }
358 retval = info.pos > info.offset ? info.pos - info.offset : 0; 385 retval = info.pos > info.offset ? info.pos - info.offset : 0;
359done: 386done:
360 ahd_list_unlock(&l);
361 return (retval); 387 return (retval);
362} 388}
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 088cbc23743d..91d294c6334e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -37,7 +37,7 @@
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES. 38 * POSSIBILITY OF SUCH DAMAGES.
39 * 39 *
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#79 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $
41 * 41 *
42 * $FreeBSD$ 42 * $FreeBSD$
43 */ 43 */
@@ -243,7 +243,7 @@ typedef enum {
243 */ 243 */
244 AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, 244 AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
245 AHC_AIC7860_FE = AHC_AIC7850_FE, 245 AHC_AIC7860_FE = AHC_AIC7850_FE,
246 AHC_AIC7870_FE = AHC_TARGETMODE, 246 AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE,
247 AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, 247 AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA,
248 /* 248 /*
249 * Although we have space for both the initiator and 249 * Although we have space for both the initiator and
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index 810ec700d9fc..e196d83b93c7 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -39,7 +39,7 @@
39 * 39 *
40 * $FreeBSD$ 40 * $FreeBSD$
41 */ 41 */
42VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $" 42VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
43 43
44/* 44/*
45 * This file is processed by the aic7xxx_asm utility for use in assembling 45 * This file is processed by the aic7xxx_asm utility for use in assembling
@@ -1306,7 +1306,6 @@ scratch_ram {
1306 */ 1306 */
1307 MWI_RESIDUAL { 1307 MWI_RESIDUAL {
1308 size 1 1308 size 1
1309 alias TARG_IMMEDIATE_SCB
1310 } 1309 }
1311 /* 1310 /*
1312 * SCBID of the next SCB to be started by the controller. 1311 * SCBID of the next SCB to be started by the controller.
@@ -1461,6 +1460,7 @@ scratch_ram {
1461 */ 1460 */
1462 LAST_MSG { 1461 LAST_MSG {
1463 size 1 1462 size 1
1463 alias TARG_IMMEDIATE_SCB
1464 } 1464 }
1465 1465
1466 /* 1466 /*
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq
index d84b741fbab5..15196390e28d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx/aic7xxx.seq
@@ -40,7 +40,7 @@
40 * $FreeBSD$ 40 * $FreeBSD$
41 */ 41 */
42 42
43VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $" 43VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
44PATCH_ARG_LIST = "struct ahc_softc *ahc" 44PATCH_ARG_LIST = "struct ahc_softc *ahc"
45PREFIX = "ahc_" 45PREFIX = "ahc_"
46 46
@@ -679,6 +679,7 @@ await_busfree:
679 clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ 679 clr SCSIBUSL; /* Prevent bit leakage durint SELTO */
680 } 680 }
681 and SXFRCTL0, ~SPIOEN; 681 and SXFRCTL0, ~SPIOEN;
682 mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
682 test SSTAT1,REQINIT|BUSFREE jz .; 683 test SSTAT1,REQINIT|BUSFREE jz .;
683 test SSTAT1, BUSFREE jnz poll_for_work; 684 test SSTAT1, BUSFREE jnz poll_for_work;
684 mvi MISSED_BUSFREE call set_seqint; 685 mvi MISSED_BUSFREE call set_seqint;
@@ -1097,7 +1098,7 @@ ultra2_dmahalt:
1097 test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; 1098 test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg;
1098 if ((ahc->flags & AHC_TARGETROLE) != 0) { 1099 if ((ahc->flags & AHC_TARGETROLE) != 0) {
1099 test SSTAT0, TARGET jz dma_last_sg; 1100 test SSTAT0, TARGET jz dma_last_sg;
1100 if ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0) { 1101 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) {
1101 test DMAPARAMS, DIRECTION jz dma_mid_sg; 1102 test DMAPARAMS, DIRECTION jz dma_mid_sg;
1102 } 1103 }
1103 } 1104 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 468d612a44f6..3cb07e114e89 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -28,9 +28,7 @@
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 * 30 *
31 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#17 $ 31 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $
32 *
33 * $FreeBSD$
34 */ 32 */
35 33
36/* 34/*
@@ -64,7 +62,6 @@
64 * is preceded by an initial zero (leading 0, followed by 16-bits, MSB 62 * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
65 * first). The clock cycling from low to high initiates the next data 63 * first). The clock cycling from low to high initiates the next data
66 * bit to be sent from the chip. 64 * bit to be sent from the chip.
67 *
68 */ 65 */
69 66
70#ifdef __linux__ 67#ifdef __linux__
@@ -81,14 +78,22 @@
81 * Right now, we only have to read the SEEPROM. But we make it easier to 78 * Right now, we only have to read the SEEPROM. But we make it easier to
82 * add other 93Cx6 functions. 79 * add other 93Cx6 functions.
83 */ 80 */
84static struct seeprom_cmd { 81struct seeprom_cmd {
85 uint8_t len; 82 uint8_t len;
86 uint8_t bits[9]; 83 uint8_t bits[11];
87} seeprom_read = {3, {1, 1, 0}}; 84};
88 85
86/* Short opcodes for the c46 */
89static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; 87static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
90static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; 88static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
89
90/* Long opcodes for the C56/C66 */
91static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
92static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
93
94/* Common opcodes */
91static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; 95static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
96static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
92 97
93/* 98/*
94 * Wait for the SEERDY to go high; about 800 ns. 99 * Wait for the SEERDY to go high; about 800 ns.
@@ -222,12 +227,25 @@ int
222ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, 227ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
223 u_int start_addr, u_int count) 228 u_int start_addr, u_int count)
224{ 229{
230 struct seeprom_cmd *ewen, *ewds;
225 uint16_t v; 231 uint16_t v;
226 uint8_t temp; 232 uint8_t temp;
227 int i, k; 233 int i, k;
228 234
229 /* Place the chip into write-enable mode */ 235 /* Place the chip into write-enable mode */
230 send_seeprom_cmd(sd, &seeprom_ewen); 236 if (sd->sd_chip == C46) {
237 ewen = &seeprom_ewen;
238 ewds = &seeprom_ewds;
239 } else if (sd->sd_chip == C56_66) {
240 ewen = &seeprom_long_ewen;
241 ewds = &seeprom_long_ewds;
242 } else {
243 printf("ahc_write_seeprom: unsupported seeprom type %d\n",
244 sd->sd_chip);
245 return (0);
246 }
247
248 send_seeprom_cmd(sd, ewen);
231 reset_seeprom(sd); 249 reset_seeprom(sd);
232 250
233 /* Write all requested data out to the seeprom. */ 251 /* Write all requested data out to the seeprom. */
@@ -277,7 +295,7 @@ ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
277 } 295 }
278 296
279 /* Put the chip back into write-protect mode */ 297 /* Put the chip back into write-protect mode */
280 send_seeprom_cmd(sd, &seeprom_ewds); 298 send_seeprom_cmd(sd, ewds);
281 reset_seeprom(sd); 299 reset_seeprom(sd);
282 300
283 return (1); 301 return (1);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 7bc01e41bcce..58ac46103eb6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -37,9 +37,7 @@
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES. 38 * POSSIBILITY OF SUCH DAMAGES.
39 * 39 *
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
41 *
42 * $FreeBSD$
43 */ 41 */
44 42
45#ifdef __linux__ 43#ifdef __linux__
@@ -287,10 +285,19 @@ ahc_restart(struct ahc_softc *ahc)
287 ahc_outb(ahc, SEQ_FLAGS2, 285 ahc_outb(ahc, SEQ_FLAGS2,
288 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 286 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
289 } 287 }
288
289 /*
290 * Clear any pending sequencer interrupt. It is no
291 * longer relevant since we're resetting the Program
292 * Counter.
293 */
294 ahc_outb(ahc, CLRINT, CLRSEQINT);
295
290 ahc_outb(ahc, MWI_RESIDUAL, 0); 296 ahc_outb(ahc, MWI_RESIDUAL, 0);
291 ahc_outb(ahc, SEQCTL, ahc->seqctl); 297 ahc_outb(ahc, SEQCTL, ahc->seqctl);
292 ahc_outb(ahc, SEQADDR0, 0); 298 ahc_outb(ahc, SEQADDR0, 0);
293 ahc_outb(ahc, SEQADDR1, 0); 299 ahc_outb(ahc, SEQADDR1, 0);
300
294 ahc_unpause(ahc); 301 ahc_unpause(ahc);
295} 302}
296 303
@@ -1174,19 +1181,20 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
1174 scb_index); 1181 scb_index);
1175 } 1182 }
1176#endif 1183#endif
1177 /*
1178 * Force a renegotiation with this target just in
1179 * case the cable was pulled and will later be
1180 * re-attached. The target may forget its negotiation
1181 * settings with us should it attempt to reselect
1182 * during the interruption. The target will not issue
1183 * a unit attention in this case, so we must always
1184 * renegotiate.
1185 */
1186 ahc_scb_devinfo(ahc, &devinfo, scb); 1184 ahc_scb_devinfo(ahc, &devinfo, scb);
1187 ahc_force_renegotiation(ahc, &devinfo);
1188 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1185 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1189 ahc_freeze_devq(ahc, scb); 1186 ahc_freeze_devq(ahc, scb);
1187
1188 /*
1189 * Cancel any pending transactions on the device
1190 * now that it seems to be missing. This will
1191 * also revert us to async/narrow transfers until
1192 * we can renegotiate with the device.
1193 */
1194 ahc_handle_devreset(ahc, &devinfo,
1195 CAM_SEL_TIMEOUT,
1196 "Selection Timeout",
1197 /*verbose_level*/1);
1190 } 1198 }
1191 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1199 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1192 ahc_restart(ahc); 1200 ahc_restart(ahc);
@@ -3763,8 +3771,9 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3763 /*period*/0, /*offset*/0, /*ppr_options*/0, 3771 /*period*/0, /*offset*/0, /*ppr_options*/0,
3764 AHC_TRANS_CUR, /*paused*/TRUE); 3772 AHC_TRANS_CUR, /*paused*/TRUE);
3765 3773
3766 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3774 if (status != CAM_SEL_TIMEOUT)
3767 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3775 ahc_send_async(ahc, devinfo->channel, devinfo->target,
3776 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
3768 3777
3769 if (message != NULL 3778 if (message != NULL
3770 && (verbose_level <= bootverbose)) 3779 && (verbose_level <= bootverbose))
@@ -4003,14 +4012,6 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
4003 * to disturb the integrity of the bus. 4012 * to disturb the integrity of the bus.
4004 */ 4013 */
4005 ahc_pause(ahc); 4014 ahc_pause(ahc);
4006 if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) {
4007 /*
4008 * The chip has not been initialized since
4009 * PCI/EISA/VLB bus reset. Don't trust
4010 * "left over BIOS data".
4011 */
4012 ahc->flags |= AHC_NO_BIOS_INIT;
4013 }
4014 sxfrctl1_b = 0; 4015 sxfrctl1_b = 0;
4015 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 4016 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4016 u_int sblkctl; 4017 u_int sblkctl;
@@ -5036,14 +5037,23 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
5036 ahc->flags |= AHC_ALL_INTERRUPTS; 5037 ahc->flags |= AHC_ALL_INTERRUPTS;
5037 paused = FALSE; 5038 paused = FALSE;
5038 do { 5039 do {
5039 if (paused) 5040 if (paused) {
5040 ahc_unpause(ahc); 5041 ahc_unpause(ahc);
5042 /*
5043 * Give the sequencer some time to service
5044 * any active selections.
5045 */
5046 ahc_delay(500);
5047 }
5041 ahc_intr(ahc); 5048 ahc_intr(ahc);
5042 ahc_pause(ahc); 5049 ahc_pause(ahc);
5043 paused = TRUE; 5050 paused = TRUE;
5044 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); 5051 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5045 ahc_clear_critical_section(ahc);
5046 intstat = ahc_inb(ahc, INTSTAT); 5052 intstat = ahc_inb(ahc, INTSTAT);
5053 if ((intstat & INT_PEND) == 0) {
5054 ahc_clear_critical_section(ahc);
5055 intstat = ahc_inb(ahc, INTSTAT);
5056 }
5047 } while (--maxloops 5057 } while (--maxloops
5048 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) 5058 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
5049 && ((intstat & INT_PEND) != 0 5059 && ((intstat & INT_PEND) != 0
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 687f19e9cf03..c932b3b94490 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -125,12 +125,6 @@
125 125
126static struct scsi_transport_template *ahc_linux_transport_template = NULL; 126static struct scsi_transport_template *ahc_linux_transport_template = NULL;
127 127
128/*
129 * Include aiclib.c as part of our
130 * "module dependencies are hard" work around.
131 */
132#include "aiclib.c"
133
134#include <linux/init.h> /* __setup */ 128#include <linux/init.h> /* __setup */
135#include <linux/mm.h> /* For fetching system memory size */ 129#include <linux/mm.h> /* For fetching system memory size */
136#include <linux/blkdev.h> /* For block_size() */ 130#include <linux/blkdev.h> /* For block_size() */
@@ -391,7 +385,6 @@ static int ahc_linux_run_command(struct ahc_softc*,
391 struct ahc_linux_device *, 385 struct ahc_linux_device *,
392 struct scsi_cmnd *); 386 struct scsi_cmnd *);
393static void ahc_linux_setup_tag_info_global(char *p); 387static void ahc_linux_setup_tag_info_global(char *p);
394static aic_option_callback_t ahc_linux_setup_tag_info;
395static int aic7xxx_setup(char *s); 388static int aic7xxx_setup(char *s);
396 389
397static int ahc_linux_unit; 390static int ahc_linux_unit;
@@ -635,6 +628,8 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
635 628
636 targ->sdev[sdev->lun] = sdev; 629 targ->sdev[sdev->lun] = sdev;
637 630
631 spi_period(starget) = 0;
632
638 return 0; 633 return 0;
639} 634}
640 635
@@ -918,6 +913,86 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
918 } 913 }
919} 914}
920 915
916static char *
917ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
918 void (*callback)(u_long, int, int, int32_t),
919 u_long callback_arg)
920{
921 char *tok_end;
922 char *tok_end2;
923 int i;
924 int instance;
925 int targ;
926 int done;
927 char tok_list[] = {'.', ',', '{', '}', '\0'};
928
929 /* All options use a ':' name/arg separator */
930 if (*opt_arg != ':')
931 return (opt_arg);
932 opt_arg++;
933 instance = -1;
934 targ = -1;
935 done = FALSE;
936 /*
937 * Restore separator that may be in
938 * the middle of our option argument.
939 */
940 tok_end = strchr(opt_arg, '\0');
941 if (tok_end < end)
942 *tok_end = ',';
943 while (!done) {
944 switch (*opt_arg) {
945 case '{':
946 if (instance == -1) {
947 instance = 0;
948 } else {
949 if (depth > 1) {
950 if (targ == -1)
951 targ = 0;
952 } else {
953 printf("Malformed Option %s\n",
954 opt_name);
955 done = TRUE;
956 }
957 }
958 opt_arg++;
959 break;
960 case '}':
961 if (targ != -1)
962 targ = -1;
963 else if (instance != -1)
964 instance = -1;
965 opt_arg++;
966 break;
967 case ',':
968 case '.':
969 if (instance == -1)
970 done = TRUE;
971 else if (targ >= 0)
972 targ++;
973 else if (instance >= 0)
974 instance++;
975 opt_arg++;
976 break;
977 case '\0':
978 done = TRUE;
979 break;
980 default:
981 tok_end = end;
982 for (i = 0; tok_list[i]; i++) {
983 tok_end2 = strchr(opt_arg, tok_list[i]);
984 if ((tok_end2) && (tok_end2 < tok_end))
985 tok_end = tok_end2;
986 }
987 callback(callback_arg, instance, targ,
988 simple_strtol(opt_arg, NULL, 0));
989 opt_arg = tok_end;
990 break;
991 }
992 }
993 return (opt_arg);
994}
995
921/* 996/*
922 * Handle Linux boot parameters. This routine allows for assigning a value 997 * Handle Linux boot parameters. This routine allows for assigning a value
923 * to a parameter with a ':' between the parameter and the value. 998 * to a parameter with a ':' between the parameter and the value.
@@ -972,7 +1047,7 @@ aic7xxx_setup(char *s)
972 if (strncmp(p, "global_tag_depth", n) == 0) { 1047 if (strncmp(p, "global_tag_depth", n) == 0) {
973 ahc_linux_setup_tag_info_global(p + n); 1048 ahc_linux_setup_tag_info_global(p + n);
974 } else if (strncmp(p, "tag_info", n) == 0) { 1049 } else if (strncmp(p, "tag_info", n) == 0) {
975 s = aic_parse_brace_option("tag_info", p + n, end, 1050 s = ahc_parse_brace_option("tag_info", p + n, end,
976 2, ahc_linux_setup_tag_info, 0); 1051 2, ahc_linux_setup_tag_info, 0);
977 } else if (p[n] == ':') { 1052 } else if (p[n] == ':') {
978 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); 1053 *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
@@ -1612,9 +1687,9 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
1612 if (channel == 'B') 1687 if (channel == 'B')
1613 target_offset += 8; 1688 target_offset += 8;
1614 starget = ahc->platform_data->starget[target_offset]; 1689 starget = ahc->platform_data->starget[target_offset];
1615 targ = scsi_transport_target_data(starget); 1690 if (starget == NULL)
1616 if (targ == NULL)
1617 break; 1691 break;
1692 targ = scsi_transport_target_data(starget);
1618 1693
1619 target_ppr_options = 1694 target_ppr_options =
1620 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) 1695 (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
@@ -2329,8 +2404,6 @@ ahc_platform_dump_card_state(struct ahc_softc *ahc)
2329{ 2404{
2330} 2405}
2331 2406
2332static void ahc_linux_exit(void);
2333
2334static void ahc_linux_set_width(struct scsi_target *starget, int width) 2407static void ahc_linux_set_width(struct scsi_target *starget, int width)
2335{ 2408{
2336 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2409 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 0e47ac217549..c52996269240 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -265,7 +265,7 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
265/***************************** SMP support ************************************/ 265/***************************** SMP support ************************************/
266#include <linux/spinlock.h> 266#include <linux/spinlock.h>
267 267
268#define AIC7XXX_DRIVER_VERSION "6.2.36" 268#define AIC7XXX_DRIVER_VERSION "7.0"
269 269
270/*************************** Device Data Structures ***************************/ 270/*************************** Device Data Structures ***************************/
271/* 271/*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 9d318ce2c993..0d44a6907dd2 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -149,6 +149,27 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
149 ahc_free(ahc); 149 ahc_free(ahc);
150} 150}
151 151
152static void
153ahc_linux_pci_inherit_flags(struct ahc_softc *ahc)
154{
155 struct pci_dev *pdev = ahc->dev_softc, *master_pdev;
156 unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
157
158 master_pdev = pci_get_slot(pdev->bus, master_devfn);
159 if (master_pdev) {
160 struct ahc_softc *master = pci_get_drvdata(master_pdev);
161 if (master) {
162 ahc->flags &= ~AHC_BIOS_ENABLED;
163 ahc->flags |= master->flags & AHC_BIOS_ENABLED;
164
165 ahc->flags &= ~AHC_PRIMARY_CHANNEL;
166 ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL;
167 } else
168 printk(KERN_ERR "aic7xxx: no multichannel peer found!\n");
169 pci_dev_put(master_pdev);
170 }
171}
172
152static int 173static int
153ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 174ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
154{ 175{
@@ -203,6 +224,14 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
203 ahc_free(ahc); 224 ahc_free(ahc);
204 return (-error); 225 return (-error);
205 } 226 }
227
228 /*
229 * Second Function PCI devices need to inherit some
230 * settings from function 0.
231 */
232 if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
233 ahc_linux_pci_inherit_flags(ahc);
234
206 pci_set_drvdata(pdev, ahc); 235 pci_set_drvdata(pdev, ahc);
207 ahc_linux_register_host(ahc, &aic7xxx_driver_template); 236 ahc_linux_register_host(ahc, &aic7xxx_driver_template);
208 return (0); 237 return (0);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 3802c91f0b07..04a3506cf340 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -54,6 +54,49 @@ static void ahc_dump_device_state(struct info_str *info,
54static int ahc_proc_write_seeprom(struct ahc_softc *ahc, 54static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
55 char *buffer, int length); 55 char *buffer, int length);
56 56
57/*
58 * Table of syncrates that don't follow the "divisible by 4"
59 * rule. This table will be expanded in future SCSI specs.
60 */
61static struct {
62 u_int period_factor;
63 u_int period; /* in 100ths of ns */
64} scsi_syncrates[] = {
65 { 0x08, 625 }, /* FAST-160 */
66 { 0x09, 1250 }, /* FAST-80 */
67 { 0x0a, 2500 }, /* FAST-40 40MHz */
68 { 0x0b, 3030 }, /* FAST-40 33MHz */
69 { 0x0c, 5000 } /* FAST-20 */
70};
71
72/*
73 * Return the frequency in kHz corresponding to the given
74 * sync period factor.
75 */
76static u_int
77ahc_calc_syncsrate(u_int period_factor)
78{
79 int i;
80 int num_syncrates;
81
82 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
83 /* See if the period is in the "exception" table */
84 for (i = 0; i < num_syncrates; i++) {
85
86 if (period_factor == scsi_syncrates[i].period_factor) {
87 /* Period in kHz */
88 return (100000000 / scsi_syncrates[i].period);
89 }
90 }
91
92 /*
93 * Wasn't in the table, so use the standard
94 * 4 times conversion.
95 */
96 return (10000000 / (period_factor * 4 * 10));
97}
98
99
57static void 100static void
58copy_mem_info(struct info_str *info, char *data, int len) 101copy_mem_info(struct info_str *info, char *data, int len)
59{ 102{
@@ -106,7 +149,7 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
106 speed = 3300; 149 speed = 3300;
107 freq = 0; 150 freq = 0;
108 if (tinfo->offset != 0) { 151 if (tinfo->offset != 0) {
109 freq = aic_calc_syncsrate(tinfo->period); 152 freq = ahc_calc_syncsrate(tinfo->period);
110 speed = freq; 153 speed = freq;
111 } 154 }
112 speed *= (0x01 << tinfo->width); 155 speed *= (0x01 << tinfo->width);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index 7c1390ed1179..2ce1febca207 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -2,8 +2,8 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8typedef int (ahc_reg_print_t)(u_int, u_int *, u_int); 8typedef int (ahc_reg_print_t)(u_int, u_int *, u_int);
9typedef struct ahc_reg_parse_entry { 9typedef struct ahc_reg_parse_entry {
@@ -1298,7 +1298,6 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
1298#define CMDSIZE_TABLE_TAIL 0x34 1298#define CMDSIZE_TABLE_TAIL 0x34
1299 1299
1300#define MWI_RESIDUAL 0x38 1300#define MWI_RESIDUAL 0x38
1301#define TARG_IMMEDIATE_SCB 0x38
1302 1301
1303#define NEXT_QUEUED_SCB 0x39 1302#define NEXT_QUEUED_SCB 0x39
1304 1303
@@ -1380,6 +1379,7 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
1380#define RETURN_2 0x52 1379#define RETURN_2 0x52
1381 1380
1382#define LAST_MSG 0x53 1381#define LAST_MSG 0x53
1382#define TARG_IMMEDIATE_SCB 0x53
1383 1383
1384#define SCSISEQ_TEMPLATE 0x54 1384#define SCSISEQ_TEMPLATE 0x54
1385#define ENSELO 0x40 1385#define ENSELO 0x40
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 9c713775d44a..88bfd767c51c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -2,8 +2,8 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8 8
9#include "aic7xxx_osm.h" 9#include "aic7xxx_osm.h"
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index cf411368a871..4cee08521e75 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -2,13 +2,13 @@
2 * DO NOT EDIT - This file is automatically generated 2 * DO NOT EDIT - This file is automatically generated
3 * from the following source files: 3 * from the following source files:
4 * 4 *
5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
7 */ 7 */
8static uint8_t seqprog[] = { 8static uint8_t seqprog[] = {
9 0xb2, 0x00, 0x00, 0x08, 9 0xb2, 0x00, 0x00, 0x08,
10 0xf7, 0x11, 0x22, 0x08, 10 0xf7, 0x11, 0x22, 0x08,
11 0x00, 0x65, 0xec, 0x59, 11 0x00, 0x65, 0xee, 0x59,
12 0xf7, 0x01, 0x02, 0x08, 12 0xf7, 0x01, 0x02, 0x08,
13 0xff, 0x6a, 0x24, 0x08, 13 0xff, 0x6a, 0x24, 0x08,
14 0x40, 0x00, 0x40, 0x68, 14 0x40, 0x00, 0x40, 0x68,
@@ -21,15 +21,15 @@ static uint8_t seqprog[] = {
21 0x01, 0x4d, 0xc8, 0x30, 21 0x01, 0x4d, 0xc8, 0x30,
22 0x00, 0x4c, 0x12, 0x70, 22 0x00, 0x4c, 0x12, 0x70,
23 0x01, 0x39, 0xa2, 0x30, 23 0x01, 0x39, 0xa2, 0x30,
24 0x00, 0x6a, 0xc0, 0x5e, 24 0x00, 0x6a, 0xc2, 0x5e,
25 0x01, 0x51, 0x20, 0x31, 25 0x01, 0x51, 0x20, 0x31,
26 0x01, 0x57, 0xae, 0x00, 26 0x01, 0x57, 0xae, 0x00,
27 0x0d, 0x6a, 0x76, 0x00, 27 0x0d, 0x6a, 0x76, 0x00,
28 0x00, 0x51, 0x12, 0x5e, 28 0x00, 0x51, 0x14, 0x5e,
29 0x01, 0x51, 0xc8, 0x30, 29 0x01, 0x51, 0xc8, 0x30,
30 0x00, 0x39, 0xc8, 0x60, 30 0x00, 0x39, 0xc8, 0x60,
31 0x00, 0xbb, 0x30, 0x70, 31 0x00, 0xbb, 0x30, 0x70,
32 0xc1, 0x6a, 0xd8, 0x5e, 32 0xc1, 0x6a, 0xda, 0x5e,
33 0x01, 0xbf, 0x72, 0x30, 33 0x01, 0xbf, 0x72, 0x30,
34 0x01, 0x40, 0x7e, 0x31, 34 0x01, 0x40, 0x7e, 0x31,
35 0x01, 0x90, 0x80, 0x30, 35 0x01, 0x90, 0x80, 0x30,
@@ -49,10 +49,10 @@ static uint8_t seqprog[] = {
49 0x08, 0x6a, 0x78, 0x00, 49 0x08, 0x6a, 0x78, 0x00,
50 0x01, 0x50, 0xc8, 0x30, 50 0x01, 0x50, 0xc8, 0x30,
51 0xe0, 0x6a, 0xcc, 0x00, 51 0xe0, 0x6a, 0xcc, 0x00,
52 0x48, 0x6a, 0xfc, 0x5d, 52 0x48, 0x6a, 0xfe, 0x5d,
53 0x01, 0x6a, 0xdc, 0x01, 53 0x01, 0x6a, 0xdc, 0x01,
54 0x88, 0x6a, 0xcc, 0x00, 54 0x88, 0x6a, 0xcc, 0x00,
55 0x48, 0x6a, 0xfc, 0x5d, 55 0x48, 0x6a, 0xfe, 0x5d,
56 0x01, 0x6a, 0x26, 0x01, 56 0x01, 0x6a, 0x26, 0x01,
57 0xf0, 0x19, 0x7a, 0x08, 57 0xf0, 0x19, 0x7a, 0x08,
58 0x0f, 0x18, 0xc8, 0x08, 58 0x0f, 0x18, 0xc8, 0x08,
@@ -93,7 +93,7 @@ static uint8_t seqprog[] = {
93 0x00, 0x65, 0x20, 0x41, 93 0x00, 0x65, 0x20, 0x41,
94 0x02, 0x57, 0xae, 0x00, 94 0x02, 0x57, 0xae, 0x00,
95 0x00, 0x65, 0x9e, 0x40, 95 0x00, 0x65, 0x9e, 0x40,
96 0x61, 0x6a, 0xd8, 0x5e, 96 0x61, 0x6a, 0xda, 0x5e,
97 0x08, 0x51, 0x20, 0x71, 97 0x08, 0x51, 0x20, 0x71,
98 0x02, 0x0b, 0xb2, 0x78, 98 0x02, 0x0b, 0xb2, 0x78,
99 0x00, 0x65, 0xae, 0x40, 99 0x00, 0x65, 0xae, 0x40,
@@ -106,7 +106,7 @@ static uint8_t seqprog[] = {
106 0x80, 0x3d, 0x7a, 0x00, 106 0x80, 0x3d, 0x7a, 0x00,
107 0x20, 0x6a, 0x16, 0x00, 107 0x20, 0x6a, 0x16, 0x00,
108 0x00, 0x65, 0xcc, 0x41, 108 0x00, 0x65, 0xcc, 0x41,
109 0x00, 0x65, 0xb2, 0x5e, 109 0x00, 0x65, 0xb4, 0x5e,
110 0x00, 0x65, 0x12, 0x40, 110 0x00, 0x65, 0x12, 0x40,
111 0x20, 0x11, 0xd2, 0x68, 111 0x20, 0x11, 0xd2, 0x68,
112 0x20, 0x6a, 0x18, 0x00, 112 0x20, 0x6a, 0x18, 0x00,
@@ -140,27 +140,27 @@ static uint8_t seqprog[] = {
140 0x80, 0x0b, 0xc4, 0x79, 140 0x80, 0x0b, 0xc4, 0x79,
141 0x12, 0x01, 0x02, 0x00, 141 0x12, 0x01, 0x02, 0x00,
142 0x01, 0xab, 0xac, 0x30, 142 0x01, 0xab, 0xac, 0x30,
143 0xe4, 0x6a, 0x6e, 0x5d, 143 0xe4, 0x6a, 0x70, 0x5d,
144 0x40, 0x6a, 0x16, 0x00, 144 0x40, 0x6a, 0x16, 0x00,
145 0x80, 0x3e, 0x84, 0x5d, 145 0x80, 0x3e, 0x86, 0x5d,
146 0x20, 0xb8, 0x18, 0x79, 146 0x20, 0xb8, 0x18, 0x79,
147 0x20, 0x6a, 0x84, 0x5d, 147 0x20, 0x6a, 0x86, 0x5d,
148 0x00, 0xab, 0x84, 0x5d, 148 0x00, 0xab, 0x86, 0x5d,
149 0x01, 0xa9, 0x78, 0x30, 149 0x01, 0xa9, 0x78, 0x30,
150 0x10, 0xb8, 0x20, 0x79, 150 0x10, 0xb8, 0x20, 0x79,
151 0xe4, 0x6a, 0x6e, 0x5d, 151 0xe4, 0x6a, 0x70, 0x5d,
152 0x00, 0x65, 0xae, 0x40, 152 0x00, 0x65, 0xae, 0x40,
153 0x10, 0x03, 0x3c, 0x69, 153 0x10, 0x03, 0x3c, 0x69,
154 0x08, 0x3c, 0x5a, 0x69, 154 0x08, 0x3c, 0x5a, 0x69,
155 0x04, 0x3c, 0x92, 0x69, 155 0x04, 0x3c, 0x92, 0x69,
156 0x02, 0x3c, 0x98, 0x69, 156 0x02, 0x3c, 0x98, 0x69,
157 0x01, 0x3c, 0x44, 0x79, 157 0x01, 0x3c, 0x44, 0x79,
158 0xff, 0x6a, 0x70, 0x00, 158 0xff, 0x6a, 0xa6, 0x00,
159 0x00, 0x65, 0xa4, 0x59, 159 0x00, 0x65, 0xa4, 0x59,
160 0x00, 0x6a, 0xc0, 0x5e, 160 0x00, 0x6a, 0xc2, 0x5e,
161 0xff, 0x38, 0x30, 0x71, 161 0xff, 0x53, 0x30, 0x71,
162 0x0d, 0x6a, 0x76, 0x00, 162 0x0d, 0x6a, 0x76, 0x00,
163 0x00, 0x38, 0x12, 0x5e, 163 0x00, 0x53, 0x14, 0x5e,
164 0x00, 0x65, 0xea, 0x58, 164 0x00, 0x65, 0xea, 0x58,
165 0x12, 0x01, 0x02, 0x00, 165 0x12, 0x01, 0x02, 0x00,
166 0x00, 0x65, 0x18, 0x41, 166 0x00, 0x65, 0x18, 0x41,
@@ -168,10 +168,10 @@ static uint8_t seqprog[] = {
168 0x00, 0x65, 0xf2, 0x58, 168 0x00, 0x65, 0xf2, 0x58,
169 0xfd, 0x57, 0xae, 0x08, 169 0xfd, 0x57, 0xae, 0x08,
170 0x00, 0x65, 0xae, 0x40, 170 0x00, 0x65, 0xae, 0x40,
171 0xe4, 0x6a, 0x6e, 0x5d, 171 0xe4, 0x6a, 0x70, 0x5d,
172 0x20, 0x3c, 0x4a, 0x79, 172 0x20, 0x3c, 0x4a, 0x79,
173 0x02, 0x6a, 0x84, 0x5d, 173 0x02, 0x6a, 0x86, 0x5d,
174 0x04, 0x6a, 0x84, 0x5d, 174 0x04, 0x6a, 0x86, 0x5d,
175 0x01, 0x03, 0x4c, 0x69, 175 0x01, 0x03, 0x4c, 0x69,
176 0xf7, 0x11, 0x22, 0x08, 176 0xf7, 0x11, 0x22, 0x08,
177 0xff, 0x6a, 0x24, 0x08, 177 0xff, 0x6a, 0x24, 0x08,
@@ -182,13 +182,13 @@ static uint8_t seqprog[] = {
182 0x80, 0x86, 0xc8, 0x08, 182 0x80, 0x86, 0xc8, 0x08,
183 0x01, 0x4f, 0xc8, 0x30, 183 0x01, 0x4f, 0xc8, 0x30,
184 0x00, 0x50, 0x6c, 0x61, 184 0x00, 0x50, 0x6c, 0x61,
185 0xc4, 0x6a, 0x6e, 0x5d, 185 0xc4, 0x6a, 0x70, 0x5d,
186 0x40, 0x3c, 0x68, 0x79, 186 0x40, 0x3c, 0x68, 0x79,
187 0x28, 0x6a, 0x84, 0x5d, 187 0x28, 0x6a, 0x86, 0x5d,
188 0x00, 0x65, 0x4c, 0x41, 188 0x00, 0x65, 0x4c, 0x41,
189 0x08, 0x6a, 0x84, 0x5d, 189 0x08, 0x6a, 0x86, 0x5d,
190 0x00, 0x65, 0x4c, 0x41, 190 0x00, 0x65, 0x4c, 0x41,
191 0x84, 0x6a, 0x6e, 0x5d, 191 0x84, 0x6a, 0x70, 0x5d,
192 0x00, 0x65, 0xf2, 0x58, 192 0x00, 0x65, 0xf2, 0x58,
193 0x01, 0x66, 0xc8, 0x30, 193 0x01, 0x66, 0xc8, 0x30,
194 0x01, 0x64, 0xd8, 0x31, 194 0x01, 0x64, 0xd8, 0x31,
@@ -208,16 +208,16 @@ static uint8_t seqprog[] = {
208 0xf7, 0x3c, 0x78, 0x08, 208 0xf7, 0x3c, 0x78, 0x08,
209 0x00, 0x65, 0x20, 0x41, 209 0x00, 0x65, 0x20, 0x41,
210 0x40, 0xaa, 0x7e, 0x10, 210 0x40, 0xaa, 0x7e, 0x10,
211 0x04, 0xaa, 0x6e, 0x5d, 211 0x04, 0xaa, 0x70, 0x5d,
212 0x00, 0x65, 0x56, 0x42, 212 0x00, 0x65, 0x58, 0x42,
213 0xc4, 0x6a, 0x6e, 0x5d, 213 0xc4, 0x6a, 0x70, 0x5d,
214 0xc0, 0x6a, 0x7e, 0x00, 214 0xc0, 0x6a, 0x7e, 0x00,
215 0x00, 0xa8, 0x84, 0x5d, 215 0x00, 0xa8, 0x86, 0x5d,
216 0xe4, 0x6a, 0x06, 0x00, 216 0xe4, 0x6a, 0x06, 0x00,
217 0x00, 0x6a, 0x84, 0x5d, 217 0x00, 0x6a, 0x86, 0x5d,
218 0x00, 0x65, 0x4c, 0x41, 218 0x00, 0x65, 0x4c, 0x41,
219 0x10, 0x3c, 0xa8, 0x69, 219 0x10, 0x3c, 0xa8, 0x69,
220 0x00, 0xbb, 0x8a, 0x44, 220 0x00, 0xbb, 0x8c, 0x44,
221 0x18, 0x6a, 0xda, 0x01, 221 0x18, 0x6a, 0xda, 0x01,
222 0x01, 0x69, 0xd8, 0x31, 222 0x01, 0x69, 0xd8, 0x31,
223 0x1c, 0x6a, 0xd0, 0x01, 223 0x1c, 0x6a, 0xd0, 0x01,
@@ -227,31 +227,32 @@ static uint8_t seqprog[] = {
227 0x01, 0x93, 0x26, 0x01, 227 0x01, 0x93, 0x26, 0x01,
228 0x03, 0x6a, 0x2a, 0x01, 228 0x03, 0x6a, 0x2a, 0x01,
229 0x01, 0x69, 0x32, 0x31, 229 0x01, 0x69, 0x32, 0x31,
230 0x1c, 0x6a, 0xe0, 0x5d, 230 0x1c, 0x6a, 0xe2, 0x5d,
231 0x0a, 0x93, 0x26, 0x01, 231 0x0a, 0x93, 0x26, 0x01,
232 0x00, 0x65, 0xa8, 0x5e, 232 0x00, 0x65, 0xaa, 0x5e,
233 0x01, 0x50, 0xa0, 0x18, 233 0x01, 0x50, 0xa0, 0x18,
234 0x02, 0x6a, 0x22, 0x05, 234 0x02, 0x6a, 0x22, 0x05,
235 0x1a, 0x01, 0x02, 0x00, 235 0x1a, 0x01, 0x02, 0x00,
236 0x80, 0x6a, 0x74, 0x00, 236 0x80, 0x6a, 0x74, 0x00,
237 0x40, 0x6a, 0x78, 0x00, 237 0x40, 0x6a, 0x78, 0x00,
238 0x40, 0x6a, 0x16, 0x00, 238 0x40, 0x6a, 0x16, 0x00,
239 0x00, 0x65, 0xd8, 0x5d, 239 0x00, 0x65, 0xda, 0x5d,
240 0x01, 0x3f, 0xc8, 0x30, 240 0x01, 0x3f, 0xc8, 0x30,
241 0xbf, 0x64, 0x56, 0x7a, 241 0xbf, 0x64, 0x58, 0x7a,
242 0x80, 0x64, 0x9e, 0x73, 242 0x80, 0x64, 0xa0, 0x73,
243 0xa0, 0x64, 0x00, 0x74, 243 0xa0, 0x64, 0x02, 0x74,
244 0xc0, 0x64, 0xf4, 0x73, 244 0xc0, 0x64, 0xf6, 0x73,
245 0xe0, 0x64, 0x30, 0x74, 245 0xe0, 0x64, 0x32, 0x74,
246 0x01, 0x6a, 0xd8, 0x5e, 246 0x01, 0x6a, 0xda, 0x5e,
247 0x00, 0x65, 0xcc, 0x41, 247 0x00, 0x65, 0xcc, 0x41,
248 0xf7, 0x11, 0x22, 0x08, 248 0xf7, 0x11, 0x22, 0x08,
249 0x01, 0x06, 0xd4, 0x30, 249 0x01, 0x06, 0xd4, 0x30,
250 0xff, 0x6a, 0x24, 0x08, 250 0xff, 0x6a, 0x24, 0x08,
251 0xf7, 0x01, 0x02, 0x08, 251 0xf7, 0x01, 0x02, 0x08,
252 0x09, 0x0c, 0xe6, 0x79, 252 0xc0, 0x6a, 0x78, 0x00,
253 0x09, 0x0c, 0xe8, 0x79,
253 0x08, 0x0c, 0x04, 0x68, 254 0x08, 0x0c, 0x04, 0x68,
254 0xb1, 0x6a, 0xd8, 0x5e, 255 0xb1, 0x6a, 0xda, 0x5e,
255 0xff, 0x6a, 0x26, 0x09, 256 0xff, 0x6a, 0x26, 0x09,
256 0x12, 0x01, 0x02, 0x00, 257 0x12, 0x01, 0x02, 0x00,
257 0x02, 0x6a, 0x08, 0x30, 258 0x02, 0x6a, 0x08, 0x30,
@@ -264,29 +265,29 @@ static uint8_t seqprog[] = {
264 0x00, 0xa5, 0x4a, 0x21, 265 0x00, 0xa5, 0x4a, 0x21,
265 0x00, 0xa6, 0x4c, 0x21, 266 0x00, 0xa6, 0x4c, 0x21,
266 0x00, 0xa7, 0x4e, 0x25, 267 0x00, 0xa7, 0x4e, 0x25,
267 0x08, 0xeb, 0xdc, 0x7e, 268 0x08, 0xeb, 0xde, 0x7e,
268 0x80, 0xeb, 0x06, 0x7a, 269 0x80, 0xeb, 0x08, 0x7a,
269 0xff, 0x6a, 0xd6, 0x09, 270 0xff, 0x6a, 0xd6, 0x09,
270 0x08, 0xeb, 0x0a, 0x6a, 271 0x08, 0xeb, 0x0c, 0x6a,
271 0xff, 0x6a, 0xd4, 0x0c, 272 0xff, 0x6a, 0xd4, 0x0c,
272 0x80, 0xa3, 0xdc, 0x6e, 273 0x80, 0xa3, 0xde, 0x6e,
273 0x88, 0xeb, 0x20, 0x72, 274 0x88, 0xeb, 0x22, 0x72,
274 0x08, 0xeb, 0xdc, 0x6e, 275 0x08, 0xeb, 0xde, 0x6e,
275 0x04, 0xea, 0x24, 0xe2, 276 0x04, 0xea, 0x26, 0xe2,
276 0x08, 0xee, 0xdc, 0x6e, 277 0x08, 0xee, 0xde, 0x6e,
277 0x04, 0x6a, 0xd0, 0x81, 278 0x04, 0x6a, 0xd0, 0x81,
278 0x05, 0xa4, 0xc0, 0x89, 279 0x05, 0xa4, 0xc0, 0x89,
279 0x03, 0xa5, 0xc2, 0x31, 280 0x03, 0xa5, 0xc2, 0x31,
280 0x09, 0x6a, 0xd6, 0x05, 281 0x09, 0x6a, 0xd6, 0x05,
281 0x00, 0x65, 0x08, 0x5a, 282 0x00, 0x65, 0x0a, 0x5a,
282 0x06, 0xa4, 0xd4, 0x89, 283 0x06, 0xa4, 0xd4, 0x89,
283 0x80, 0x94, 0xdc, 0x7e, 284 0x80, 0x94, 0xde, 0x7e,
284 0x07, 0xe9, 0x10, 0x31, 285 0x07, 0xe9, 0x10, 0x31,
285 0x01, 0xe9, 0x46, 0x31, 286 0x01, 0xe9, 0x46, 0x31,
286 0x00, 0xa3, 0xba, 0x5e, 287 0x00, 0xa3, 0xbc, 0x5e,
287 0x00, 0x65, 0xfa, 0x59, 288 0x00, 0x65, 0xfc, 0x59,
288 0x01, 0xa4, 0xca, 0x30, 289 0x01, 0xa4, 0xca, 0x30,
289 0x80, 0xa3, 0x34, 0x7a, 290 0x80, 0xa3, 0x36, 0x7a,
290 0x02, 0x65, 0xca, 0x00, 291 0x02, 0x65, 0xca, 0x00,
291 0x01, 0x65, 0xf8, 0x31, 292 0x01, 0x65, 0xf8, 0x31,
292 0x80, 0x93, 0x26, 0x01, 293 0x80, 0x93, 0x26, 0x01,
@@ -294,162 +295,162 @@ static uint8_t seqprog[] = {
294 0x01, 0x8c, 0xc8, 0x30, 295 0x01, 0x8c, 0xc8, 0x30,
295 0x00, 0x88, 0xc8, 0x18, 296 0x00, 0x88, 0xc8, 0x18,
296 0x02, 0x64, 0xc8, 0x88, 297 0x02, 0x64, 0xc8, 0x88,
297 0xff, 0x64, 0xdc, 0x7e, 298 0xff, 0x64, 0xde, 0x7e,
298 0xff, 0x8d, 0x4a, 0x6a, 299 0xff, 0x8d, 0x4c, 0x6a,
299 0xff, 0x8e, 0x4a, 0x6a, 300 0xff, 0x8e, 0x4c, 0x6a,
300 0x03, 0x8c, 0xd4, 0x98, 301 0x03, 0x8c, 0xd4, 0x98,
301 0x00, 0x65, 0xdc, 0x56, 302 0x00, 0x65, 0xde, 0x56,
302 0x01, 0x64, 0x70, 0x30, 303 0x01, 0x64, 0x70, 0x30,
303 0xff, 0x64, 0xc8, 0x10, 304 0xff, 0x64, 0xc8, 0x10,
304 0x01, 0x64, 0xc8, 0x18, 305 0x01, 0x64, 0xc8, 0x18,
305 0x00, 0x8c, 0x18, 0x19, 306 0x00, 0x8c, 0x18, 0x19,
306 0xff, 0x8d, 0x1a, 0x21, 307 0xff, 0x8d, 0x1a, 0x21,
307 0xff, 0x8e, 0x1c, 0x25, 308 0xff, 0x8e, 0x1c, 0x25,
308 0xc0, 0x3c, 0x5a, 0x7a, 309 0xc0, 0x3c, 0x5c, 0x7a,
309 0x21, 0x6a, 0xd8, 0x5e, 310 0x21, 0x6a, 0xda, 0x5e,
310 0xa8, 0x6a, 0x76, 0x00, 311 0xa8, 0x6a, 0x76, 0x00,
311 0x79, 0x6a, 0x76, 0x00, 312 0x79, 0x6a, 0x76, 0x00,
312 0x40, 0x3f, 0x62, 0x6a, 313 0x40, 0x3f, 0x64, 0x6a,
313 0x04, 0x3b, 0x76, 0x00, 314 0x04, 0x3b, 0x76, 0x00,
314 0x04, 0x6a, 0xd4, 0x81, 315 0x04, 0x6a, 0xd4, 0x81,
315 0x20, 0x3c, 0x6a, 0x7a, 316 0x20, 0x3c, 0x6c, 0x7a,
316 0x51, 0x6a, 0xd8, 0x5e, 317 0x51, 0x6a, 0xda, 0x5e,
317 0x00, 0x65, 0x82, 0x42, 318 0x00, 0x65, 0x84, 0x42,
318 0x20, 0x3c, 0x78, 0x00, 319 0x20, 0x3c, 0x78, 0x00,
319 0x00, 0xb3, 0xba, 0x5e, 320 0x00, 0xb3, 0xbc, 0x5e,
320 0x07, 0xac, 0x10, 0x31, 321 0x07, 0xac, 0x10, 0x31,
321 0x05, 0xb3, 0x46, 0x31, 322 0x05, 0xb3, 0x46, 0x31,
322 0x88, 0x6a, 0xcc, 0x00, 323 0x88, 0x6a, 0xcc, 0x00,
323 0xac, 0x6a, 0xee, 0x5d, 324 0xac, 0x6a, 0xf0, 0x5d,
324 0xa3, 0x6a, 0xcc, 0x00, 325 0xa3, 0x6a, 0xcc, 0x00,
325 0xb3, 0x6a, 0xf2, 0x5d, 326 0xb3, 0x6a, 0xf4, 0x5d,
326 0x00, 0x65, 0x3a, 0x5a, 327 0x00, 0x65, 0x3c, 0x5a,
327 0xfd, 0xa4, 0x48, 0x09, 328 0xfd, 0xa4, 0x48, 0x09,
328 0x03, 0x8c, 0x10, 0x30, 329 0x03, 0x8c, 0x10, 0x30,
329 0x00, 0x65, 0xe6, 0x5d, 330 0x00, 0x65, 0xe8, 0x5d,
330 0x01, 0xa4, 0x94, 0x7a, 331 0x01, 0xa4, 0x96, 0x7a,
331 0x04, 0x3b, 0x76, 0x08, 332 0x04, 0x3b, 0x76, 0x08,
332 0x01, 0x3b, 0x26, 0x31, 333 0x01, 0x3b, 0x26, 0x31,
333 0x80, 0x02, 0x04, 0x00, 334 0x80, 0x02, 0x04, 0x00,
334 0x10, 0x0c, 0x8a, 0x7a, 335 0x10, 0x0c, 0x8c, 0x7a,
335 0x03, 0x9e, 0x8c, 0x6a, 336 0x03, 0x9e, 0x8e, 0x6a,
336 0x7f, 0x02, 0x04, 0x08, 337 0x7f, 0x02, 0x04, 0x08,
337 0x91, 0x6a, 0xd8, 0x5e, 338 0x91, 0x6a, 0xda, 0x5e,
338 0x00, 0x65, 0xcc, 0x41, 339 0x00, 0x65, 0xcc, 0x41,
339 0x01, 0xa4, 0xca, 0x30, 340 0x01, 0xa4, 0xca, 0x30,
340 0x80, 0xa3, 0x9a, 0x7a, 341 0x80, 0xa3, 0x9c, 0x7a,
341 0x02, 0x65, 0xca, 0x00, 342 0x02, 0x65, 0xca, 0x00,
342 0x01, 0x65, 0xf8, 0x31, 343 0x01, 0x65, 0xf8, 0x31,
343 0x01, 0x3b, 0x26, 0x31, 344 0x01, 0x3b, 0x26, 0x31,
344 0x00, 0x65, 0x0e, 0x5a, 345 0x00, 0x65, 0x10, 0x5a,
345 0x01, 0xfc, 0xa8, 0x6a, 346 0x01, 0xfc, 0xaa, 0x6a,
346 0x80, 0x0b, 0x9e, 0x6a, 347 0x80, 0x0b, 0xa0, 0x6a,
347 0x10, 0x0c, 0x9e, 0x7a, 348 0x10, 0x0c, 0xa0, 0x7a,
348 0x20, 0x93, 0x9e, 0x6a, 349 0x20, 0x93, 0xa0, 0x6a,
349 0x02, 0x93, 0x26, 0x01, 350 0x02, 0x93, 0x26, 0x01,
350 0x02, 0xfc, 0xb2, 0x7a, 351 0x02, 0xfc, 0xb4, 0x7a,
351 0x40, 0x0d, 0xc6, 0x6a, 352 0x40, 0x0d, 0xc8, 0x6a,
352 0x01, 0xa4, 0x48, 0x01, 353 0x01, 0xa4, 0x48, 0x01,
353 0x00, 0x65, 0xc6, 0x42, 354 0x00, 0x65, 0xc8, 0x42,
354 0x40, 0x0d, 0xb8, 0x6a, 355 0x40, 0x0d, 0xba, 0x6a,
355 0x00, 0x65, 0x0e, 0x5a, 356 0x00, 0x65, 0x10, 0x5a,
356 0x00, 0x65, 0xaa, 0x42, 357 0x00, 0x65, 0xac, 0x42,
357 0x80, 0xfc, 0xc2, 0x7a, 358 0x80, 0xfc, 0xc4, 0x7a,
358 0x80, 0xa4, 0xc2, 0x6a, 359 0x80, 0xa4, 0xc4, 0x6a,
359 0xff, 0xa5, 0x4a, 0x19, 360 0xff, 0xa5, 0x4a, 0x19,
360 0xff, 0xa6, 0x4c, 0x21, 361 0xff, 0xa6, 0x4c, 0x21,
361 0xff, 0xa7, 0x4e, 0x21, 362 0xff, 0xa7, 0x4e, 0x21,
362 0xf8, 0xfc, 0x48, 0x09, 363 0xf8, 0xfc, 0x48, 0x09,
363 0x7f, 0xa3, 0x46, 0x09, 364 0x7f, 0xa3, 0x46, 0x09,
364 0x04, 0x3b, 0xe2, 0x6a, 365 0x04, 0x3b, 0xe4, 0x6a,
365 0x02, 0x93, 0x26, 0x01, 366 0x02, 0x93, 0x26, 0x01,
366 0x01, 0x94, 0xc8, 0x7a, 367 0x01, 0x94, 0xca, 0x7a,
367 0x01, 0x94, 0xc8, 0x7a, 368 0x01, 0x94, 0xca, 0x7a,
368 0x01, 0x94, 0xc8, 0x7a, 369 0x01, 0x94, 0xca, 0x7a,
369 0x01, 0x94, 0xc8, 0x7a, 370 0x01, 0x94, 0xca, 0x7a,
370 0x01, 0x94, 0xc8, 0x7a, 371 0x01, 0x94, 0xca, 0x7a,
371 0x01, 0xa4, 0xe0, 0x7a, 372 0x01, 0xa4, 0xe2, 0x7a,
372 0x01, 0xfc, 0xd6, 0x7a, 373 0x01, 0xfc, 0xd8, 0x7a,
373 0x01, 0x94, 0xe2, 0x6a, 374 0x01, 0x94, 0xe4, 0x6a,
374 0x01, 0x94, 0xe2, 0x6a, 375 0x01, 0x94, 0xe4, 0x6a,
375 0x01, 0x94, 0xe2, 0x6a, 376 0x01, 0x94, 0xe4, 0x6a,
376 0x00, 0x65, 0x82, 0x42, 377 0x00, 0x65, 0x84, 0x42,
377 0x01, 0x94, 0xe0, 0x7a, 378 0x01, 0x94, 0xe2, 0x7a,
378 0x10, 0x94, 0xe2, 0x6a, 379 0x10, 0x94, 0xe4, 0x6a,
379 0xd7, 0x93, 0x26, 0x09, 380 0xd7, 0x93, 0x26, 0x09,
380 0x28, 0x93, 0xe6, 0x6a, 381 0x28, 0x93, 0xe8, 0x6a,
381 0x01, 0x85, 0x0a, 0x01, 382 0x01, 0x85, 0x0a, 0x01,
382 0x02, 0xfc, 0xee, 0x6a, 383 0x02, 0xfc, 0xf0, 0x6a,
383 0x01, 0x14, 0x46, 0x31, 384 0x01, 0x14, 0x46, 0x31,
384 0xff, 0x6a, 0x10, 0x09, 385 0xff, 0x6a, 0x10, 0x09,
385 0xfe, 0x85, 0x0a, 0x09, 386 0xfe, 0x85, 0x0a, 0x09,
386 0xff, 0x38, 0xfc, 0x6a, 387 0xff, 0x38, 0xfe, 0x6a,
387 0x80, 0xa3, 0xfc, 0x7a, 388 0x80, 0xa3, 0xfe, 0x7a,
388 0x80, 0x0b, 0xfa, 0x7a, 389 0x80, 0x0b, 0xfc, 0x7a,
389 0x04, 0x3b, 0xfc, 0x7a, 390 0x04, 0x3b, 0xfe, 0x7a,
390 0xbf, 0x3b, 0x76, 0x08, 391 0xbf, 0x3b, 0x76, 0x08,
391 0x01, 0x3b, 0x26, 0x31, 392 0x01, 0x3b, 0x26, 0x31,
392 0x00, 0x65, 0x0e, 0x5a, 393 0x00, 0x65, 0x10, 0x5a,
393 0x01, 0x0b, 0x0a, 0x6b, 394 0x01, 0x0b, 0x0c, 0x6b,
394 0x10, 0x0c, 0xfe, 0x7a, 395 0x10, 0x0c, 0x00, 0x7b,
395 0x04, 0x93, 0x08, 0x6b, 396 0x04, 0x93, 0x0a, 0x6b,
396 0x01, 0x94, 0x06, 0x7b, 397 0x01, 0x94, 0x08, 0x7b,
397 0x10, 0x94, 0x08, 0x6b, 398 0x10, 0x94, 0x0a, 0x6b,
398 0xc7, 0x93, 0x26, 0x09, 399 0xc7, 0x93, 0x26, 0x09,
399 0x01, 0x99, 0xd4, 0x30, 400 0x01, 0x99, 0xd4, 0x30,
400 0x38, 0x93, 0x0c, 0x6b, 401 0x38, 0x93, 0x0e, 0x6b,
401 0xff, 0x08, 0x5a, 0x6b, 402 0xff, 0x08, 0x5c, 0x6b,
402 0xff, 0x09, 0x5a, 0x6b, 403 0xff, 0x09, 0x5c, 0x6b,
403 0xff, 0x0a, 0x5a, 0x6b, 404 0xff, 0x0a, 0x5c, 0x6b,
404 0xff, 0x38, 0x28, 0x7b, 405 0xff, 0x38, 0x2a, 0x7b,
405 0x04, 0x14, 0x10, 0x31, 406 0x04, 0x14, 0x10, 0x31,
406 0x01, 0x38, 0x18, 0x31, 407 0x01, 0x38, 0x18, 0x31,
407 0x02, 0x6a, 0x1a, 0x31, 408 0x02, 0x6a, 0x1a, 0x31,
408 0x88, 0x6a, 0xcc, 0x00, 409 0x88, 0x6a, 0xcc, 0x00,
409 0x14, 0x6a, 0xf4, 0x5d, 410 0x14, 0x6a, 0xf6, 0x5d,
410 0x00, 0x38, 0xe0, 0x5d, 411 0x00, 0x38, 0xe2, 0x5d,
411 0xff, 0x6a, 0x70, 0x08, 412 0xff, 0x6a, 0x70, 0x08,
412 0x00, 0x65, 0x54, 0x43, 413 0x00, 0x65, 0x56, 0x43,
413 0x80, 0xa3, 0x2e, 0x7b, 414 0x80, 0xa3, 0x30, 0x7b,
414 0x01, 0xa4, 0x48, 0x01, 415 0x01, 0xa4, 0x48, 0x01,
415 0x00, 0x65, 0x5a, 0x43, 416 0x00, 0x65, 0x5c, 0x43,
416 0x08, 0xeb, 0x34, 0x7b, 417 0x08, 0xeb, 0x36, 0x7b,
417 0x00, 0x65, 0x0e, 0x5a, 418 0x00, 0x65, 0x10, 0x5a,
418 0x08, 0xeb, 0x30, 0x6b, 419 0x08, 0xeb, 0x32, 0x6b,
419 0x07, 0xe9, 0x10, 0x31, 420 0x07, 0xe9, 0x10, 0x31,
420 0x01, 0xe9, 0xca, 0x30, 421 0x01, 0xe9, 0xca, 0x30,
421 0x01, 0x65, 0x46, 0x31, 422 0x01, 0x65, 0x46, 0x31,
422 0x00, 0x6a, 0xba, 0x5e, 423 0x00, 0x6a, 0xbc, 0x5e,
423 0x88, 0x6a, 0xcc, 0x00, 424 0x88, 0x6a, 0xcc, 0x00,
424 0xa4, 0x6a, 0xf4, 0x5d, 425 0xa4, 0x6a, 0xf6, 0x5d,
425 0x08, 0x6a, 0xe0, 0x5d, 426 0x08, 0x6a, 0xe2, 0x5d,
426 0x0d, 0x93, 0x26, 0x01, 427 0x0d, 0x93, 0x26, 0x01,
427 0x00, 0x65, 0xa8, 0x5e, 428 0x00, 0x65, 0xaa, 0x5e,
428 0x88, 0x6a, 0xcc, 0x00, 429 0x88, 0x6a, 0xcc, 0x00,
429 0x00, 0x65, 0x8a, 0x5e, 430 0x00, 0x65, 0x8c, 0x5e,
430 0x01, 0x99, 0x46, 0x31, 431 0x01, 0x99, 0x46, 0x31,
431 0x00, 0xa3, 0xba, 0x5e, 432 0x00, 0xa3, 0xbc, 0x5e,
432 0x01, 0x88, 0x10, 0x31, 433 0x01, 0x88, 0x10, 0x31,
433 0x00, 0x65, 0x3a, 0x5a, 434 0x00, 0x65, 0x3c, 0x5a,
434 0x00, 0x65, 0xfa, 0x59, 435 0x00, 0x65, 0xfc, 0x59,
435 0x03, 0x8c, 0x10, 0x30, 436 0x03, 0x8c, 0x10, 0x30,
436 0x00, 0x65, 0xe6, 0x5d, 437 0x00, 0x65, 0xe8, 0x5d,
437 0x80, 0x0b, 0x82, 0x6a, 438 0x80, 0x0b, 0x84, 0x6a,
438 0x80, 0x0b, 0x62, 0x6b, 439 0x80, 0x0b, 0x64, 0x6b,
439 0x01, 0x0c, 0x5c, 0x7b, 440 0x01, 0x0c, 0x5e, 0x7b,
440 0x10, 0x0c, 0x82, 0x7a, 441 0x10, 0x0c, 0x84, 0x7a,
441 0x03, 0x9e, 0x82, 0x6a, 442 0x03, 0x9e, 0x84, 0x6a,
442 0x00, 0x65, 0x04, 0x5a, 443 0x00, 0x65, 0x06, 0x5a,
443 0x00, 0x6a, 0xba, 0x5e, 444 0x00, 0x6a, 0xbc, 0x5e,
444 0x01, 0xa4, 0x82, 0x6b, 445 0x01, 0xa4, 0x84, 0x6b,
445 0xff, 0x38, 0x78, 0x7b, 446 0xff, 0x38, 0x7a, 0x7b,
446 0x01, 0x38, 0xc8, 0x30, 447 0x01, 0x38, 0xc8, 0x30,
447 0x00, 0x08, 0x40, 0x19, 448 0x00, 0x08, 0x40, 0x19,
448 0xff, 0x6a, 0xc8, 0x08, 449 0xff, 0x6a, 0xc8, 0x08,
449 0x00, 0x09, 0x42, 0x21, 450 0x00, 0x09, 0x42, 0x21,
450 0x00, 0x0a, 0x44, 0x21, 451 0x00, 0x0a, 0x44, 0x21,
451 0xff, 0x6a, 0x70, 0x08, 452 0xff, 0x6a, 0x70, 0x08,
452 0x00, 0x65, 0x7a, 0x43, 453 0x00, 0x65, 0x7c, 0x43,
453 0x03, 0x08, 0x40, 0x31, 454 0x03, 0x08, 0x40, 0x31,
454 0x03, 0x08, 0x40, 0x31, 455 0x03, 0x08, 0x40, 0x31,
455 0x01, 0x08, 0x40, 0x31, 456 0x01, 0x08, 0x40, 0x31,
@@ -461,16 +462,16 @@ static uint8_t seqprog[] = {
461 0x04, 0x3c, 0xcc, 0x79, 462 0x04, 0x3c, 0xcc, 0x79,
462 0xfb, 0x3c, 0x78, 0x08, 463 0xfb, 0x3c, 0x78, 0x08,
463 0x04, 0x93, 0x20, 0x79, 464 0x04, 0x93, 0x20, 0x79,
464 0x01, 0x0c, 0x8e, 0x6b, 465 0x01, 0x0c, 0x90, 0x6b,
465 0x80, 0xba, 0x20, 0x79, 466 0x80, 0xba, 0x20, 0x79,
466 0x80, 0x04, 0x20, 0x79, 467 0x80, 0x04, 0x20, 0x79,
467 0xe4, 0x6a, 0x6e, 0x5d, 468 0xe4, 0x6a, 0x70, 0x5d,
468 0x23, 0x6a, 0x84, 0x5d, 469 0x23, 0x6a, 0x86, 0x5d,
469 0x01, 0x6a, 0x84, 0x5d, 470 0x01, 0x6a, 0x86, 0x5d,
470 0x00, 0x65, 0x20, 0x41, 471 0x00, 0x65, 0x20, 0x41,
471 0x00, 0x65, 0xcc, 0x41, 472 0x00, 0x65, 0xcc, 0x41,
472 0x80, 0x3c, 0xa2, 0x7b, 473 0x80, 0x3c, 0xa4, 0x7b,
473 0x21, 0x6a, 0xd8, 0x5e, 474 0x21, 0x6a, 0xda, 0x5e,
474 0x01, 0xbc, 0x18, 0x31, 475 0x01, 0xbc, 0x18, 0x31,
475 0x02, 0x6a, 0x1a, 0x31, 476 0x02, 0x6a, 0x1a, 0x31,
476 0x02, 0x6a, 0xf8, 0x01, 477 0x02, 0x6a, 0xf8, 0x01,
@@ -480,16 +481,16 @@ static uint8_t seqprog[] = {
480 0xff, 0x6a, 0x12, 0x08, 481 0xff, 0x6a, 0x12, 0x08,
481 0xff, 0x6a, 0x14, 0x08, 482 0xff, 0x6a, 0x14, 0x08,
482 0xf3, 0xbc, 0xd4, 0x18, 483 0xf3, 0xbc, 0xd4, 0x18,
483 0xa0, 0x6a, 0xc8, 0x53, 484 0xa0, 0x6a, 0xca, 0x53,
484 0x04, 0xa0, 0x10, 0x31, 485 0x04, 0xa0, 0x10, 0x31,
485 0xac, 0x6a, 0x26, 0x01, 486 0xac, 0x6a, 0x26, 0x01,
486 0x04, 0xa0, 0x10, 0x31, 487 0x04, 0xa0, 0x10, 0x31,
487 0x03, 0x08, 0x18, 0x31, 488 0x03, 0x08, 0x18, 0x31,
488 0x88, 0x6a, 0xcc, 0x00, 489 0x88, 0x6a, 0xcc, 0x00,
489 0xa0, 0x6a, 0xf4, 0x5d, 490 0xa0, 0x6a, 0xf6, 0x5d,
490 0x00, 0xbc, 0xe0, 0x5d, 491 0x00, 0xbc, 0xe2, 0x5d,
491 0x3d, 0x6a, 0x26, 0x01, 492 0x3d, 0x6a, 0x26, 0x01,
492 0x00, 0x65, 0xe0, 0x43, 493 0x00, 0x65, 0xe2, 0x43,
493 0xff, 0x6a, 0x10, 0x09, 494 0xff, 0x6a, 0x10, 0x09,
494 0xa4, 0x6a, 0x26, 0x01, 495 0xa4, 0x6a, 0x26, 0x01,
495 0x0c, 0xa0, 0x32, 0x31, 496 0x0c, 0xa0, 0x32, 0x31,
@@ -499,128 +500,128 @@ static uint8_t seqprog[] = {
499 0x36, 0x6a, 0x26, 0x01, 500 0x36, 0x6a, 0x26, 0x01,
500 0x02, 0x93, 0x26, 0x01, 501 0x02, 0x93, 0x26, 0x01,
501 0x35, 0x6a, 0x26, 0x01, 502 0x35, 0x6a, 0x26, 0x01,
502 0x00, 0x65, 0x9c, 0x5e, 503 0x00, 0x65, 0x9e, 0x5e,
503 0x00, 0x65, 0x9c, 0x5e, 504 0x00, 0x65, 0x9e, 0x5e,
504 0x02, 0x93, 0x26, 0x01, 505 0x02, 0x93, 0x26, 0x01,
505 0xbf, 0x3c, 0x78, 0x08, 506 0xbf, 0x3c, 0x78, 0x08,
506 0x04, 0x0b, 0xe6, 0x6b, 507 0x04, 0x0b, 0xe8, 0x6b,
507 0x10, 0x0c, 0xe2, 0x7b, 508 0x10, 0x0c, 0xe4, 0x7b,
508 0x01, 0x03, 0xe6, 0x6b, 509 0x01, 0x03, 0xe8, 0x6b,
509 0x20, 0x93, 0xe8, 0x6b, 510 0x20, 0x93, 0xea, 0x6b,
510 0x04, 0x0b, 0xee, 0x6b, 511 0x04, 0x0b, 0xf0, 0x6b,
511 0x40, 0x3c, 0x78, 0x00, 512 0x40, 0x3c, 0x78, 0x00,
512 0xc7, 0x93, 0x26, 0x09, 513 0xc7, 0x93, 0x26, 0x09,
513 0x38, 0x93, 0xf0, 0x6b, 514 0x38, 0x93, 0xf2, 0x6b,
514 0x00, 0x65, 0xcc, 0x41, 515 0x00, 0x65, 0xcc, 0x41,
515 0x80, 0x3c, 0x56, 0x6c, 516 0x80, 0x3c, 0x58, 0x6c,
516 0x01, 0x06, 0x50, 0x31, 517 0x01, 0x06, 0x50, 0x31,
517 0x80, 0xb8, 0x70, 0x01, 518 0x80, 0xb8, 0x70, 0x01,
518 0x00, 0x65, 0xcc, 0x41, 519 0x00, 0x65, 0xcc, 0x41,
519 0x10, 0x3f, 0x06, 0x00, 520 0x10, 0x3f, 0x06, 0x00,
520 0x10, 0x6a, 0x06, 0x00, 521 0x10, 0x6a, 0x06, 0x00,
521 0x01, 0x3a, 0xca, 0x30, 522 0x01, 0x3a, 0xca, 0x30,
522 0x80, 0x65, 0x1c, 0x64, 523 0x80, 0x65, 0x1e, 0x64,
523 0x10, 0xb8, 0x40, 0x6c, 524 0x10, 0xb8, 0x42, 0x6c,
524 0xc0, 0x3e, 0xca, 0x00, 525 0xc0, 0x3e, 0xca, 0x00,
525 0x40, 0xb8, 0x0c, 0x6c, 526 0x40, 0xb8, 0x0e, 0x6c,
526 0xbf, 0x65, 0xca, 0x08, 527 0xbf, 0x65, 0xca, 0x08,
527 0x20, 0xb8, 0x20, 0x7c, 528 0x20, 0xb8, 0x22, 0x7c,
528 0x01, 0x65, 0x0c, 0x30, 529 0x01, 0x65, 0x0c, 0x30,
529 0x00, 0x65, 0xd8, 0x5d, 530 0x00, 0x65, 0xda, 0x5d,
530 0xa0, 0x3f, 0x28, 0x64, 531 0xa0, 0x3f, 0x2a, 0x64,
531 0x23, 0xb8, 0x0c, 0x08, 532 0x23, 0xb8, 0x0c, 0x08,
532 0x00, 0x65, 0xd8, 0x5d, 533 0x00, 0x65, 0xda, 0x5d,
533 0xa0, 0x3f, 0x28, 0x64, 534 0xa0, 0x3f, 0x2a, 0x64,
534 0x00, 0xbb, 0x20, 0x44, 535 0x00, 0xbb, 0x22, 0x44,
535 0xff, 0x65, 0x20, 0x64, 536 0xff, 0x65, 0x22, 0x64,
536 0x00, 0x65, 0x40, 0x44, 537 0x00, 0x65, 0x42, 0x44,
537 0x40, 0x6a, 0x18, 0x00, 538 0x40, 0x6a, 0x18, 0x00,
538 0x01, 0x65, 0x0c, 0x30, 539 0x01, 0x65, 0x0c, 0x30,
539 0x00, 0x65, 0xd8, 0x5d, 540 0x00, 0x65, 0xda, 0x5d,
540 0xa0, 0x3f, 0xfc, 0x73, 541 0xa0, 0x3f, 0xfe, 0x73,
541 0x40, 0x6a, 0x18, 0x00, 542 0x40, 0x6a, 0x18, 0x00,
542 0x01, 0x3a, 0xa6, 0x30, 543 0x01, 0x3a, 0xa6, 0x30,
543 0x08, 0x6a, 0x74, 0x00, 544 0x08, 0x6a, 0x74, 0x00,
544 0x00, 0x65, 0xcc, 0x41, 545 0x00, 0x65, 0xcc, 0x41,
545 0x64, 0x6a, 0x68, 0x5d, 546 0x64, 0x6a, 0x6a, 0x5d,
546 0x80, 0x64, 0xd8, 0x6c, 547 0x80, 0x64, 0xda, 0x6c,
547 0x04, 0x64, 0x9a, 0x74, 548 0x04, 0x64, 0x9c, 0x74,
548 0x02, 0x64, 0xaa, 0x74, 549 0x02, 0x64, 0xac, 0x74,
549 0x00, 0x6a, 0x60, 0x74, 550 0x00, 0x6a, 0x62, 0x74,
550 0x03, 0x64, 0xc8, 0x74, 551 0x03, 0x64, 0xca, 0x74,
551 0x23, 0x64, 0x48, 0x74, 552 0x23, 0x64, 0x4a, 0x74,
552 0x08, 0x64, 0x5c, 0x74, 553 0x08, 0x64, 0x5e, 0x74,
553 0x61, 0x6a, 0xd8, 0x5e, 554 0x61, 0x6a, 0xda, 0x5e,
554 0x00, 0x65, 0xd8, 0x5d, 555 0x00, 0x65, 0xda, 0x5d,
555 0x08, 0x51, 0xce, 0x71, 556 0x08, 0x51, 0xce, 0x71,
556 0x00, 0x65, 0x40, 0x44, 557 0x00, 0x65, 0x42, 0x44,
557 0x80, 0x04, 0x5a, 0x7c, 558 0x80, 0x04, 0x5c, 0x7c,
558 0x51, 0x6a, 0x5e, 0x5d, 559 0x51, 0x6a, 0x60, 0x5d,
559 0x01, 0x51, 0x5a, 0x64, 560 0x01, 0x51, 0x5c, 0x64,
560 0x01, 0xa4, 0x52, 0x7c, 561 0x01, 0xa4, 0x54, 0x7c,
561 0x80, 0xba, 0x5c, 0x6c, 562 0x80, 0xba, 0x5e, 0x6c,
562 0x41, 0x6a, 0xd8, 0x5e, 563 0x41, 0x6a, 0xda, 0x5e,
563 0x00, 0x65, 0x5c, 0x44, 564 0x00, 0x65, 0x5e, 0x44,
564 0x21, 0x6a, 0xd8, 0x5e, 565 0x21, 0x6a, 0xda, 0x5e,
565 0x00, 0x65, 0x5c, 0x44, 566 0x00, 0x65, 0x5e, 0x44,
566 0x07, 0x6a, 0x54, 0x5d, 567 0x07, 0x6a, 0x56, 0x5d,
567 0x01, 0x06, 0xd4, 0x30, 568 0x01, 0x06, 0xd4, 0x30,
568 0x00, 0x65, 0xcc, 0x41, 569 0x00, 0x65, 0xcc, 0x41,
569 0x80, 0xb8, 0x56, 0x7c, 570 0x80, 0xb8, 0x58, 0x7c,
570 0xc0, 0x3c, 0x6a, 0x7c, 571 0xc0, 0x3c, 0x6c, 0x7c,
571 0x80, 0x3c, 0x56, 0x6c, 572 0x80, 0x3c, 0x58, 0x6c,
572 0xff, 0xa8, 0x6a, 0x6c, 573 0xff, 0xa8, 0x6c, 0x6c,
573 0x40, 0x3c, 0x56, 0x6c, 574 0x40, 0x3c, 0x58, 0x6c,
574 0x10, 0xb8, 0x6e, 0x7c, 575 0x10, 0xb8, 0x70, 0x7c,
575 0xa1, 0x6a, 0xd8, 0x5e, 576 0xa1, 0x6a, 0xda, 0x5e,
576 0x01, 0xb4, 0x74, 0x6c, 577 0x01, 0xb4, 0x76, 0x6c,
577 0x02, 0xb4, 0x76, 0x6c, 578 0x02, 0xb4, 0x78, 0x6c,
578 0x01, 0xa4, 0x76, 0x7c, 579 0x01, 0xa4, 0x78, 0x7c,
579 0xff, 0xa8, 0x86, 0x7c, 580 0xff, 0xa8, 0x88, 0x7c,
580 0x04, 0xb4, 0x68, 0x01, 581 0x04, 0xb4, 0x68, 0x01,
581 0x01, 0x6a, 0x76, 0x00, 582 0x01, 0x6a, 0x76, 0x00,
582 0x00, 0xbb, 0x12, 0x5e, 583 0x00, 0xbb, 0x14, 0x5e,
583 0xff, 0xa8, 0x86, 0x7c, 584 0xff, 0xa8, 0x88, 0x7c,
584 0x71, 0x6a, 0xd8, 0x5e, 585 0x71, 0x6a, 0xda, 0x5e,
585 0x40, 0x51, 0x86, 0x64, 586 0x40, 0x51, 0x88, 0x64,
586 0x00, 0x65, 0xb2, 0x5e, 587 0x00, 0x65, 0xb4, 0x5e,
587 0x00, 0x65, 0xde, 0x41, 588 0x00, 0x65, 0xde, 0x41,
588 0x00, 0xbb, 0x8a, 0x5c, 589 0x00, 0xbb, 0x8c, 0x5c,
589 0x00, 0x65, 0xde, 0x41, 590 0x00, 0x65, 0xde, 0x41,
590 0x00, 0x65, 0xb2, 0x5e, 591 0x00, 0x65, 0xb4, 0x5e,
591 0x01, 0x65, 0xa2, 0x30, 592 0x01, 0x65, 0xa2, 0x30,
592 0x01, 0xf8, 0xc8, 0x30, 593 0x01, 0xf8, 0xc8, 0x30,
593 0x01, 0x4e, 0xc8, 0x30, 594 0x01, 0x4e, 0xc8, 0x30,
594 0x00, 0x6a, 0xb6, 0xdd, 595 0x00, 0x6a, 0xb8, 0xdd,
595 0x00, 0x51, 0xc8, 0x5d, 596 0x00, 0x51, 0xca, 0x5d,
596 0x01, 0x4e, 0x9c, 0x18, 597 0x01, 0x4e, 0x9c, 0x18,
597 0x02, 0x6a, 0x22, 0x05, 598 0x02, 0x6a, 0x22, 0x05,
598 0xc0, 0x3c, 0x56, 0x6c, 599 0xc0, 0x3c, 0x58, 0x6c,
599 0x04, 0xb8, 0x70, 0x01, 600 0x04, 0xb8, 0x70, 0x01,
600 0x00, 0x65, 0xd4, 0x5e, 601 0x00, 0x65, 0xd6, 0x5e,
601 0x20, 0xb8, 0xde, 0x69, 602 0x20, 0xb8, 0xde, 0x69,
602 0x01, 0xbb, 0xa2, 0x30, 603 0x01, 0xbb, 0xa2, 0x30,
603 0x3f, 0xba, 0x7c, 0x08, 604 0x3f, 0xba, 0x7c, 0x08,
604 0x00, 0xb9, 0xce, 0x5c, 605 0x00, 0xb9, 0xd0, 0x5c,
605 0x00, 0x65, 0xde, 0x41, 606 0x00, 0x65, 0xde, 0x41,
606 0x01, 0x06, 0xd4, 0x30, 607 0x01, 0x06, 0xd4, 0x30,
607 0x20, 0x3c, 0xcc, 0x79, 608 0x20, 0x3c, 0xcc, 0x79,
608 0x20, 0x3c, 0x5c, 0x7c, 609 0x20, 0x3c, 0x5e, 0x7c,
609 0x01, 0xa4, 0xb8, 0x7c, 610 0x01, 0xa4, 0xba, 0x7c,
610 0x01, 0xb4, 0x68, 0x01, 611 0x01, 0xb4, 0x68, 0x01,
611 0x00, 0x65, 0xcc, 0x41, 612 0x00, 0x65, 0xcc, 0x41,
612 0x00, 0x65, 0x5c, 0x44, 613 0x00, 0x65, 0x5e, 0x44,
613 0x04, 0x14, 0x58, 0x31, 614 0x04, 0x14, 0x58, 0x31,
614 0x01, 0x06, 0xd4, 0x30, 615 0x01, 0x06, 0xd4, 0x30,
615 0x08, 0xa0, 0x60, 0x31, 616 0x08, 0xa0, 0x60, 0x31,
616 0xac, 0x6a, 0xcc, 0x00, 617 0xac, 0x6a, 0xcc, 0x00,
617 0x14, 0x6a, 0xf4, 0x5d, 618 0x14, 0x6a, 0xf6, 0x5d,
618 0x01, 0x06, 0xd4, 0x30, 619 0x01, 0x06, 0xd4, 0x30,
619 0xa0, 0x6a, 0xec, 0x5d, 620 0xa0, 0x6a, 0xee, 0x5d,
620 0x00, 0x65, 0xcc, 0x41, 621 0x00, 0x65, 0xcc, 0x41,
621 0xdf, 0x3c, 0x78, 0x08, 622 0xdf, 0x3c, 0x78, 0x08,
622 0x12, 0x01, 0x02, 0x00, 623 0x12, 0x01, 0x02, 0x00,
623 0x00, 0x65, 0x5c, 0x44, 624 0x00, 0x65, 0x5e, 0x44,
624 0x4c, 0x65, 0xcc, 0x28, 625 0x4c, 0x65, 0xcc, 0x28,
625 0x01, 0x3e, 0x20, 0x31, 626 0x01, 0x3e, 0x20, 0x31,
626 0xd0, 0x66, 0xcc, 0x18, 627 0xd0, 0x66, 0xcc, 0x18,
@@ -631,102 +632,102 @@ static uint8_t seqprog[] = {
631 0xd0, 0x65, 0xca, 0x18, 632 0xd0, 0x65, 0xca, 0x18,
632 0x01, 0x3e, 0x20, 0x31, 633 0x01, 0x3e, 0x20, 0x31,
633 0x30, 0x65, 0xd4, 0x18, 634 0x30, 0x65, 0xd4, 0x18,
634 0x00, 0x65, 0xe6, 0x4c, 635 0x00, 0x65, 0xe8, 0x4c,
635 0xe1, 0x6a, 0x22, 0x01, 636 0xe1, 0x6a, 0x22, 0x01,
636 0xff, 0x6a, 0xd4, 0x08, 637 0xff, 0x6a, 0xd4, 0x08,
637 0x20, 0x65, 0xd4, 0x18, 638 0x20, 0x65, 0xd4, 0x18,
638 0x00, 0x65, 0xee, 0x54, 639 0x00, 0x65, 0xf0, 0x54,
639 0xe1, 0x6a, 0x22, 0x01, 640 0xe1, 0x6a, 0x22, 0x01,
640 0xff, 0x6a, 0xd4, 0x08, 641 0xff, 0x6a, 0xd4, 0x08,
641 0x20, 0x65, 0xca, 0x18, 642 0x20, 0x65, 0xca, 0x18,
642 0xe0, 0x65, 0xd4, 0x18, 643 0xe0, 0x65, 0xd4, 0x18,
643 0x00, 0x65, 0xf8, 0x4c, 644 0x00, 0x65, 0xfa, 0x4c,
644 0xe1, 0x6a, 0x22, 0x01, 645 0xe1, 0x6a, 0x22, 0x01,
645 0xff, 0x6a, 0xd4, 0x08, 646 0xff, 0x6a, 0xd4, 0x08,
646 0xd0, 0x65, 0xd4, 0x18, 647 0xd0, 0x65, 0xd4, 0x18,
647 0x00, 0x65, 0x00, 0x55, 648 0x00, 0x65, 0x02, 0x55,
648 0xe1, 0x6a, 0x22, 0x01, 649 0xe1, 0x6a, 0x22, 0x01,
649 0xff, 0x6a, 0xd4, 0x08, 650 0xff, 0x6a, 0xd4, 0x08,
650 0x01, 0x6c, 0xa2, 0x30, 651 0x01, 0x6c, 0xa2, 0x30,
651 0xff, 0x51, 0x12, 0x75, 652 0xff, 0x51, 0x14, 0x75,
652 0x00, 0x51, 0x8e, 0x5d, 653 0x00, 0x51, 0x90, 0x5d,
653 0x01, 0x51, 0x20, 0x31, 654 0x01, 0x51, 0x20, 0x31,
654 0x00, 0x65, 0x34, 0x45, 655 0x00, 0x65, 0x36, 0x45,
655 0x3f, 0xba, 0xc8, 0x08, 656 0x3f, 0xba, 0xc8, 0x08,
656 0x00, 0x3e, 0x34, 0x75, 657 0x00, 0x3e, 0x36, 0x75,
657 0x00, 0x65, 0xb0, 0x5e, 658 0x00, 0x65, 0xb2, 0x5e,
658 0x80, 0x3c, 0x78, 0x00, 659 0x80, 0x3c, 0x78, 0x00,
659 0x01, 0x06, 0xd4, 0x30, 660 0x01, 0x06, 0xd4, 0x30,
660 0x00, 0x65, 0xd8, 0x5d, 661 0x00, 0x65, 0xda, 0x5d,
661 0x01, 0x3c, 0x78, 0x00, 662 0x01, 0x3c, 0x78, 0x00,
662 0xe0, 0x3f, 0x50, 0x65, 663 0xe0, 0x3f, 0x52, 0x65,
663 0x02, 0x3c, 0x78, 0x00, 664 0x02, 0x3c, 0x78, 0x00,
664 0x20, 0x12, 0x50, 0x65, 665 0x20, 0x12, 0x52, 0x65,
665 0x51, 0x6a, 0x5e, 0x5d, 666 0x51, 0x6a, 0x60, 0x5d,
666 0x00, 0x51, 0x8e, 0x5d, 667 0x00, 0x51, 0x90, 0x5d,
667 0x51, 0x6a, 0x5e, 0x5d, 668 0x51, 0x6a, 0x60, 0x5d,
668 0x01, 0x51, 0x20, 0x31, 669 0x01, 0x51, 0x20, 0x31,
669 0x04, 0x3c, 0x78, 0x00, 670 0x04, 0x3c, 0x78, 0x00,
670 0x01, 0xb9, 0xc8, 0x30, 671 0x01, 0xb9, 0xc8, 0x30,
671 0x00, 0x3d, 0x4e, 0x65, 672 0x00, 0x3d, 0x50, 0x65,
672 0x08, 0x3c, 0x78, 0x00, 673 0x08, 0x3c, 0x78, 0x00,
673 0x3f, 0xba, 0xc8, 0x08, 674 0x3f, 0xba, 0xc8, 0x08,
674 0x00, 0x3e, 0x4e, 0x65, 675 0x00, 0x3e, 0x50, 0x65,
675 0x10, 0x3c, 0x78, 0x00, 676 0x10, 0x3c, 0x78, 0x00,
676 0x04, 0xb8, 0x4e, 0x7d, 677 0x04, 0xb8, 0x50, 0x7d,
677 0xfb, 0xb8, 0x70, 0x09, 678 0xfb, 0xb8, 0x70, 0x09,
678 0x20, 0xb8, 0x44, 0x6d, 679 0x20, 0xb8, 0x46, 0x6d,
679 0x01, 0x90, 0xc8, 0x30, 680 0x01, 0x90, 0xc8, 0x30,
680 0xff, 0x6a, 0xa2, 0x00, 681 0xff, 0x6a, 0xa2, 0x00,
681 0x00, 0x3d, 0xce, 0x5c, 682 0x00, 0x3d, 0xd0, 0x5c,
682 0x01, 0x64, 0x20, 0x31, 683 0x01, 0x64, 0x20, 0x31,
683 0xff, 0x6a, 0x78, 0x08, 684 0xff, 0x6a, 0x78, 0x08,
684 0x00, 0x65, 0xea, 0x58, 685 0x00, 0x65, 0xea, 0x58,
685 0x10, 0xb8, 0x5c, 0x7c, 686 0x10, 0xb8, 0x5e, 0x7c,
686 0xff, 0x6a, 0x54, 0x5d, 687 0xff, 0x6a, 0x56, 0x5d,
687 0x00, 0x65, 0x5c, 0x44, 688 0x00, 0x65, 0x5e, 0x44,
688 0x00, 0x65, 0xb0, 0x5e, 689 0x00, 0x65, 0xb2, 0x5e,
689 0x31, 0x6a, 0xd8, 0x5e, 690 0x31, 0x6a, 0xda, 0x5e,
690 0x00, 0x65, 0x5c, 0x44, 691 0x00, 0x65, 0x5e, 0x44,
691 0x10, 0x3f, 0x06, 0x00, 692 0x10, 0x3f, 0x06, 0x00,
692 0x10, 0x6a, 0x06, 0x00, 693 0x10, 0x6a, 0x06, 0x00,
693 0x01, 0x65, 0x74, 0x34, 694 0x01, 0x65, 0x74, 0x34,
694 0x81, 0x6a, 0xd8, 0x5e, 695 0x81, 0x6a, 0xda, 0x5e,
695 0x00, 0x65, 0x60, 0x45, 696 0x00, 0x65, 0x62, 0x45,
696 0x01, 0x06, 0xd4, 0x30, 697 0x01, 0x06, 0xd4, 0x30,
697 0x01, 0x0c, 0x60, 0x7d, 698 0x01, 0x0c, 0x62, 0x7d,
698 0x04, 0x0c, 0x5a, 0x6d, 699 0x04, 0x0c, 0x5c, 0x6d,
699 0xe0, 0x03, 0x7e, 0x08, 700 0xe0, 0x03, 0x7e, 0x08,
700 0xe0, 0x3f, 0xcc, 0x61, 701 0xe0, 0x3f, 0xcc, 0x61,
701 0x01, 0x65, 0xcc, 0x30, 702 0x01, 0x65, 0xcc, 0x30,
702 0x01, 0x12, 0xda, 0x34, 703 0x01, 0x12, 0xda, 0x34,
703 0x01, 0x06, 0xd4, 0x34, 704 0x01, 0x06, 0xd4, 0x34,
704 0x01, 0x03, 0x6e, 0x6d, 705 0x01, 0x03, 0x70, 0x6d,
705 0x40, 0x03, 0xcc, 0x08, 706 0x40, 0x03, 0xcc, 0x08,
706 0x01, 0x65, 0x06, 0x30, 707 0x01, 0x65, 0x06, 0x30,
707 0x40, 0x65, 0xc8, 0x08, 708 0x40, 0x65, 0xc8, 0x08,
708 0x00, 0x66, 0x7c, 0x75, 709 0x00, 0x66, 0x7e, 0x75,
709 0x40, 0x65, 0x7c, 0x7d, 710 0x40, 0x65, 0x7e, 0x7d,
710 0x00, 0x65, 0x7c, 0x5d, 711 0x00, 0x65, 0x7e, 0x5d,
711 0xff, 0x6a, 0xd4, 0x08, 712 0xff, 0x6a, 0xd4, 0x08,
712 0xff, 0x6a, 0xd4, 0x08, 713 0xff, 0x6a, 0xd4, 0x08,
713 0xff, 0x6a, 0xd4, 0x08, 714 0xff, 0x6a, 0xd4, 0x08,
714 0xff, 0x6a, 0xd4, 0x0c, 715 0xff, 0x6a, 0xd4, 0x0c,
715 0x08, 0x01, 0x02, 0x00, 716 0x08, 0x01, 0x02, 0x00,
716 0x02, 0x0b, 0x86, 0x7d, 717 0x02, 0x0b, 0x88, 0x7d,
717 0x01, 0x65, 0x0c, 0x30, 718 0x01, 0x65, 0x0c, 0x30,
718 0x02, 0x0b, 0x8a, 0x7d, 719 0x02, 0x0b, 0x8c, 0x7d,
719 0xf7, 0x01, 0x02, 0x0c, 720 0xf7, 0x01, 0x02, 0x0c,
720 0x01, 0x65, 0xc8, 0x30, 721 0x01, 0x65, 0xc8, 0x30,
721 0xff, 0x41, 0xae, 0x75, 722 0xff, 0x41, 0xb0, 0x75,
722 0x01, 0x41, 0x20, 0x31, 723 0x01, 0x41, 0x20, 0x31,
723 0xff, 0x6a, 0xa4, 0x00, 724 0xff, 0x6a, 0xa4, 0x00,
724 0x00, 0x65, 0x9e, 0x45, 725 0x00, 0x65, 0xa0, 0x45,
725 0xff, 0xbf, 0xae, 0x75, 726 0xff, 0xbf, 0xb0, 0x75,
726 0x01, 0x90, 0xa4, 0x30, 727 0x01, 0x90, 0xa4, 0x30,
727 0x01, 0xbf, 0x20, 0x31, 728 0x01, 0xbf, 0x20, 0x31,
728 0x00, 0xbb, 0x98, 0x65, 729 0x00, 0xbb, 0x9a, 0x65,
729 0xff, 0x52, 0xac, 0x75, 730 0xff, 0x52, 0xae, 0x75,
730 0x01, 0xbf, 0xcc, 0x30, 731 0x01, 0xbf, 0xcc, 0x30,
731 0x01, 0x90, 0xca, 0x30, 732 0x01, 0x90, 0xca, 0x30,
732 0x01, 0x52, 0x20, 0x31, 733 0x01, 0x52, 0x20, 0x31,
@@ -734,28 +735,28 @@ static uint8_t seqprog[] = {
734 0x01, 0x65, 0x20, 0x35, 735 0x01, 0x65, 0x20, 0x35,
735 0x01, 0xbf, 0x82, 0x34, 736 0x01, 0xbf, 0x82, 0x34,
736 0x01, 0x64, 0xa2, 0x30, 737 0x01, 0x64, 0xa2, 0x30,
737 0x00, 0x6a, 0xc0, 0x5e, 738 0x00, 0x6a, 0xc2, 0x5e,
738 0x0d, 0x6a, 0x76, 0x00, 739 0x0d, 0x6a, 0x76, 0x00,
739 0x00, 0x51, 0x12, 0x46, 740 0x00, 0x51, 0x14, 0x46,
740 0x01, 0x65, 0xa4, 0x30, 741 0x01, 0x65, 0xa4, 0x30,
741 0xe0, 0x6a, 0xcc, 0x00, 742 0xe0, 0x6a, 0xcc, 0x00,
742 0x48, 0x6a, 0x06, 0x5e, 743 0x48, 0x6a, 0x08, 0x5e,
743 0x01, 0x6a, 0xd0, 0x01, 744 0x01, 0x6a, 0xd0, 0x01,
744 0x01, 0x6a, 0xdc, 0x05, 745 0x01, 0x6a, 0xdc, 0x05,
745 0x88, 0x6a, 0xcc, 0x00, 746 0x88, 0x6a, 0xcc, 0x00,
746 0x48, 0x6a, 0x06, 0x5e, 747 0x48, 0x6a, 0x08, 0x5e,
747 0x01, 0x6a, 0xe0, 0x5d, 748 0x01, 0x6a, 0xe2, 0x5d,
748 0x01, 0x6a, 0x26, 0x05, 749 0x01, 0x6a, 0x26, 0x05,
749 0x01, 0x65, 0xd8, 0x31, 750 0x01, 0x65, 0xd8, 0x31,
750 0x09, 0xee, 0xdc, 0x01, 751 0x09, 0xee, 0xdc, 0x01,
751 0x80, 0xee, 0xcc, 0x7d, 752 0x80, 0xee, 0xce, 0x7d,
752 0xff, 0x6a, 0xdc, 0x0d, 753 0xff, 0x6a, 0xdc, 0x0d,
753 0x01, 0x65, 0x32, 0x31, 754 0x01, 0x65, 0x32, 0x31,
754 0x0a, 0x93, 0x26, 0x01, 755 0x0a, 0x93, 0x26, 0x01,
755 0x00, 0x65, 0xa8, 0x46, 756 0x00, 0x65, 0xaa, 0x46,
756 0x81, 0x6a, 0xd8, 0x5e, 757 0x81, 0x6a, 0xda, 0x5e,
757 0x01, 0x0c, 0xd8, 0x7d, 758 0x01, 0x0c, 0xda, 0x7d,
758 0x04, 0x0c, 0xd6, 0x6d, 759 0x04, 0x0c, 0xd8, 0x6d,
759 0xe0, 0x03, 0x06, 0x08, 760 0xe0, 0x03, 0x06, 0x08,
760 0xe0, 0x03, 0x7e, 0x0c, 761 0xe0, 0x03, 0x7e, 0x0c,
761 0x01, 0x65, 0x18, 0x31, 762 0x01, 0x65, 0x18, 0x31,
@@ -774,7 +775,7 @@ static uint8_t seqprog[] = {
774 0x01, 0x6c, 0xda, 0x34, 775 0x01, 0x6c, 0xda, 0x34,
775 0x3d, 0x64, 0xa4, 0x28, 776 0x3d, 0x64, 0xa4, 0x28,
776 0x55, 0x64, 0xc8, 0x28, 777 0x55, 0x64, 0xc8, 0x28,
777 0x00, 0x65, 0x06, 0x46, 778 0x00, 0x65, 0x08, 0x46,
778 0x2e, 0x64, 0xa4, 0x28, 779 0x2e, 0x64, 0xa4, 0x28,
779 0x66, 0x64, 0xc8, 0x28, 780 0x66, 0x64, 0xc8, 0x28,
780 0x00, 0x6c, 0xda, 0x18, 781 0x00, 0x6c, 0xda, 0x18,
@@ -785,63 +786,63 @@ static uint8_t seqprog[] = {
785 0x00, 0x6c, 0xda, 0x24, 786 0x00, 0x6c, 0xda, 0x24,
786 0x01, 0x65, 0xc8, 0x30, 787 0x01, 0x65, 0xc8, 0x30,
787 0xe0, 0x6a, 0xcc, 0x00, 788 0xe0, 0x6a, 0xcc, 0x00,
788 0x44, 0x6a, 0x02, 0x5e, 789 0x44, 0x6a, 0x04, 0x5e,
789 0x01, 0x90, 0xe2, 0x31, 790 0x01, 0x90, 0xe2, 0x31,
790 0x04, 0x3b, 0x26, 0x7e, 791 0x04, 0x3b, 0x28, 0x7e,
791 0x30, 0x6a, 0xd0, 0x01, 792 0x30, 0x6a, 0xd0, 0x01,
792 0x20, 0x6a, 0xd0, 0x01, 793 0x20, 0x6a, 0xd0, 0x01,
793 0x1d, 0x6a, 0xdc, 0x01, 794 0x1d, 0x6a, 0xdc, 0x01,
794 0xdc, 0xee, 0x22, 0x66, 795 0xdc, 0xee, 0x24, 0x66,
795 0x00, 0x65, 0x3e, 0x46, 796 0x00, 0x65, 0x40, 0x46,
796 0x20, 0x6a, 0xd0, 0x01, 797 0x20, 0x6a, 0xd0, 0x01,
797 0x01, 0x6a, 0xdc, 0x01, 798 0x01, 0x6a, 0xdc, 0x01,
798 0x20, 0xa0, 0xd8, 0x31, 799 0x20, 0xa0, 0xd8, 0x31,
799 0x09, 0xee, 0xdc, 0x01, 800 0x09, 0xee, 0xdc, 0x01,
800 0x80, 0xee, 0x2e, 0x7e, 801 0x80, 0xee, 0x30, 0x7e,
801 0x11, 0x6a, 0xdc, 0x01, 802 0x11, 0x6a, 0xdc, 0x01,
802 0x50, 0xee, 0x32, 0x66, 803 0x50, 0xee, 0x34, 0x66,
803 0x20, 0x6a, 0xd0, 0x01, 804 0x20, 0x6a, 0xd0, 0x01,
804 0x09, 0x6a, 0xdc, 0x01, 805 0x09, 0x6a, 0xdc, 0x01,
805 0x88, 0xee, 0x38, 0x66, 806 0x88, 0xee, 0x3a, 0x66,
806 0x19, 0x6a, 0xdc, 0x01, 807 0x19, 0x6a, 0xdc, 0x01,
807 0xd8, 0xee, 0x3c, 0x66, 808 0xd8, 0xee, 0x3e, 0x66,
808 0xff, 0x6a, 0xdc, 0x09, 809 0xff, 0x6a, 0xdc, 0x09,
809 0x18, 0xee, 0x40, 0x6e, 810 0x18, 0xee, 0x42, 0x6e,
810 0xff, 0x6a, 0xd4, 0x0c, 811 0xff, 0x6a, 0xd4, 0x0c,
811 0x88, 0x6a, 0xcc, 0x00, 812 0x88, 0x6a, 0xcc, 0x00,
812 0x44, 0x6a, 0x02, 0x5e, 813 0x44, 0x6a, 0x04, 0x5e,
813 0x20, 0x6a, 0xe0, 0x5d, 814 0x20, 0x6a, 0xe2, 0x5d,
814 0x01, 0x3b, 0x26, 0x31, 815 0x01, 0x3b, 0x26, 0x31,
815 0x04, 0x3b, 0x5a, 0x6e, 816 0x04, 0x3b, 0x5c, 0x6e,
816 0xa0, 0x6a, 0xca, 0x00, 817 0xa0, 0x6a, 0xca, 0x00,
817 0x20, 0x65, 0xc8, 0x18, 818 0x20, 0x65, 0xc8, 0x18,
818 0x00, 0x65, 0x98, 0x5e, 819 0x00, 0x65, 0x9a, 0x5e,
819 0x00, 0x65, 0x52, 0x66, 820 0x00, 0x65, 0x54, 0x66,
820 0x0a, 0x93, 0x26, 0x01, 821 0x0a, 0x93, 0x26, 0x01,
821 0x00, 0x65, 0xa8, 0x46, 822 0x00, 0x65, 0xaa, 0x46,
822 0xa0, 0x6a, 0xcc, 0x00, 823 0xa0, 0x6a, 0xcc, 0x00,
823 0xff, 0x6a, 0xc8, 0x08, 824 0xff, 0x6a, 0xc8, 0x08,
824 0x20, 0x94, 0x5e, 0x6e, 825 0x20, 0x94, 0x60, 0x6e,
825 0x10, 0x94, 0x60, 0x6e, 826 0x10, 0x94, 0x62, 0x6e,
826 0x08, 0x94, 0x7a, 0x6e, 827 0x08, 0x94, 0x7c, 0x6e,
827 0x08, 0x94, 0x7a, 0x6e, 828 0x08, 0x94, 0x7c, 0x6e,
828 0x08, 0x94, 0x7a, 0x6e, 829 0x08, 0x94, 0x7c, 0x6e,
829 0xff, 0x8c, 0xc8, 0x10, 830 0xff, 0x8c, 0xc8, 0x10,
830 0xc1, 0x64, 0xc8, 0x18, 831 0xc1, 0x64, 0xc8, 0x18,
831 0xf8, 0x64, 0xc8, 0x08, 832 0xf8, 0x64, 0xc8, 0x08,
832 0x01, 0x99, 0xda, 0x30, 833 0x01, 0x99, 0xda, 0x30,
833 0x00, 0x66, 0x6e, 0x66, 834 0x00, 0x66, 0x70, 0x66,
834 0xc0, 0x66, 0xaa, 0x76, 835 0xc0, 0x66, 0xac, 0x76,
835 0x60, 0x66, 0xc8, 0x18, 836 0x60, 0x66, 0xc8, 0x18,
836 0x3d, 0x64, 0xc8, 0x28, 837 0x3d, 0x64, 0xc8, 0x28,
837 0x00, 0x65, 0x5e, 0x46, 838 0x00, 0x65, 0x60, 0x46,
838 0xf7, 0x93, 0x26, 0x09, 839 0xf7, 0x93, 0x26, 0x09,
839 0x08, 0x93, 0x7c, 0x6e, 840 0x08, 0x93, 0x7e, 0x6e,
840 0x00, 0x62, 0xc4, 0x18, 841 0x00, 0x62, 0xc4, 0x18,
841 0x00, 0x65, 0xa8, 0x5e, 842 0x00, 0x65, 0xaa, 0x5e,
842 0x00, 0x65, 0x88, 0x5e, 843 0x00, 0x65, 0x8a, 0x5e,
843 0x00, 0x65, 0x88, 0x5e, 844 0x00, 0x65, 0x8a, 0x5e,
844 0x00, 0x65, 0x88, 0x5e, 845 0x00, 0x65, 0x8a, 0x5e,
845 0x01, 0x99, 0xda, 0x30, 846 0x01, 0x99, 0xda, 0x30,
846 0x01, 0x99, 0xda, 0x30, 847 0x01, 0x99, 0xda, 0x30,
847 0x01, 0x99, 0xda, 0x30, 848 0x01, 0x99, 0xda, 0x30,
@@ -858,11 +859,11 @@ static uint8_t seqprog[] = {
858 0x01, 0x6c, 0x32, 0x31, 859 0x01, 0x6c, 0x32, 0x31,
859 0x01, 0x6c, 0x32, 0x31, 860 0x01, 0x6c, 0x32, 0x31,
860 0x01, 0x6c, 0x32, 0x35, 861 0x01, 0x6c, 0x32, 0x35,
861 0x08, 0x94, 0xa8, 0x7e, 862 0x08, 0x94, 0xaa, 0x7e,
862 0xf7, 0x93, 0x26, 0x09, 863 0xf7, 0x93, 0x26, 0x09,
863 0x08, 0x93, 0xac, 0x6e, 864 0x08, 0x93, 0xae, 0x6e,
864 0xff, 0x6a, 0xd4, 0x0c, 865 0xff, 0x6a, 0xd4, 0x0c,
865 0x04, 0xb8, 0xd4, 0x6e, 866 0x04, 0xb8, 0xd6, 0x6e,
866 0x01, 0x42, 0x7e, 0x31, 867 0x01, 0x42, 0x7e, 0x31,
867 0xff, 0x6a, 0x76, 0x01, 868 0xff, 0x6a, 0x76, 0x01,
868 0x01, 0x90, 0x84, 0x34, 869 0x01, 0x90, 0x84, 0x34,
@@ -870,14 +871,14 @@ static uint8_t seqprog[] = {
870 0x01, 0x85, 0x0a, 0x01, 871 0x01, 0x85, 0x0a, 0x01,
871 0x7f, 0x65, 0x10, 0x09, 872 0x7f, 0x65, 0x10, 0x09,
872 0xfe, 0x85, 0x0a, 0x0d, 873 0xfe, 0x85, 0x0a, 0x0d,
873 0xff, 0x42, 0xd0, 0x66, 874 0xff, 0x42, 0xd2, 0x66,
874 0xff, 0x41, 0xc8, 0x66, 875 0xff, 0x41, 0xca, 0x66,
875 0xd1, 0x6a, 0xd8, 0x5e, 876 0xd1, 0x6a, 0xda, 0x5e,
876 0xff, 0x6a, 0xca, 0x04, 877 0xff, 0x6a, 0xca, 0x04,
877 0x01, 0x41, 0x20, 0x31, 878 0x01, 0x41, 0x20, 0x31,
878 0x01, 0xbf, 0x82, 0x30, 879 0x01, 0xbf, 0x82, 0x30,
879 0x01, 0x6a, 0x76, 0x00, 880 0x01, 0x6a, 0x76, 0x00,
880 0x00, 0xbb, 0x12, 0x46, 881 0x00, 0xbb, 0x14, 0x46,
881 0x01, 0x42, 0x20, 0x31, 882 0x01, 0x42, 0x20, 0x31,
882 0x01, 0xbf, 0x84, 0x34, 883 0x01, 0xbf, 0x84, 0x34,
883 0x01, 0x41, 0x7e, 0x31, 884 0x01, 0x41, 0x7e, 0x31,
@@ -941,7 +942,7 @@ static ahc_patch_func_t ahc_patch17_func;
941static int 942static int
942ahc_patch17_func(struct ahc_softc *ahc) 943ahc_patch17_func(struct ahc_softc *ahc)
943{ 944{
944 return ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0); 945 return ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0);
945} 946}
946 947
947static ahc_patch_func_t ahc_patch16_func; 948static ahc_patch_func_t ahc_patch16_func;
@@ -1142,152 +1143,152 @@ static struct patch {
1142 { ahc_patch0_func, 196, 1, 1 }, 1143 { ahc_patch0_func, 196, 1, 1 },
1143 { ahc_patch9_func, 212, 6, 2 }, 1144 { ahc_patch9_func, 212, 6, 2 },
1144 { ahc_patch0_func, 218, 6, 1 }, 1145 { ahc_patch0_func, 218, 6, 1 },
1145 { ahc_patch8_func, 226, 20, 2 }, 1146 { ahc_patch8_func, 226, 21, 2 },
1146 { ahc_patch1_func, 241, 1, 1 }, 1147 { ahc_patch1_func, 241, 1, 1 },
1147 { ahc_patch1_func, 248, 1, 2 }, 1148 { ahc_patch1_func, 249, 1, 2 },
1148 { ahc_patch0_func, 249, 2, 2 }, 1149 { ahc_patch0_func, 250, 2, 2 },
1149 { ahc_patch11_func, 250, 1, 1 }, 1150 { ahc_patch11_func, 251, 1, 1 },
1150 { ahc_patch9_func, 258, 27, 3 }, 1151 { ahc_patch9_func, 259, 27, 3 },
1151 { ahc_patch1_func, 274, 10, 2 }, 1152 { ahc_patch1_func, 275, 10, 2 },
1152 { ahc_patch13_func, 277, 1, 1 }, 1153 { ahc_patch13_func, 278, 1, 1 },
1153 { ahc_patch14_func, 285, 14, 1 }, 1154 { ahc_patch14_func, 286, 14, 1 },
1154 { ahc_patch1_func, 301, 1, 2 }, 1155 { ahc_patch1_func, 302, 1, 2 },
1155 { ahc_patch0_func, 302, 1, 1 }, 1156 { ahc_patch0_func, 303, 1, 1 },
1156 { ahc_patch9_func, 305, 1, 1 }, 1157 { ahc_patch9_func, 306, 1, 1 },
1157 { ahc_patch13_func, 310, 1, 1 }, 1158 { ahc_patch13_func, 311, 1, 1 },
1158 { ahc_patch9_func, 311, 2, 2 }, 1159 { ahc_patch9_func, 312, 2, 2 },
1159 { ahc_patch0_func, 313, 4, 1 }, 1160 { ahc_patch0_func, 314, 4, 1 },
1160 { ahc_patch14_func, 317, 1, 1 }, 1161 { ahc_patch14_func, 318, 1, 1 },
1161 { ahc_patch15_func, 319, 2, 3 }, 1162 { ahc_patch15_func, 320, 2, 3 },
1162 { ahc_patch9_func, 319, 1, 2 }, 1163 { ahc_patch9_func, 320, 1, 2 },
1163 { ahc_patch0_func, 320, 1, 1 }, 1164 { ahc_patch0_func, 321, 1, 1 },
1164 { ahc_patch6_func, 325, 1, 2 }, 1165 { ahc_patch6_func, 326, 1, 2 },
1165 { ahc_patch0_func, 326, 1, 1 }, 1166 { ahc_patch0_func, 327, 1, 1 },
1166 { ahc_patch1_func, 330, 47, 11 }, 1167 { ahc_patch1_func, 331, 47, 11 },
1167 { ahc_patch6_func, 337, 2, 4 }, 1168 { ahc_patch6_func, 338, 2, 4 },
1168 { ahc_patch7_func, 337, 1, 1 }, 1169 { ahc_patch7_func, 338, 1, 1 },
1169 { ahc_patch8_func, 338, 1, 1 }, 1170 { ahc_patch8_func, 339, 1, 1 },
1170 { ahc_patch0_func, 339, 1, 1 }, 1171 { ahc_patch0_func, 340, 1, 1 },
1171 { ahc_patch16_func, 340, 1, 1 }, 1172 { ahc_patch16_func, 341, 1, 1 },
1172 { ahc_patch6_func, 356, 6, 3 }, 1173 { ahc_patch6_func, 357, 6, 3 },
1173 { ahc_patch16_func, 356, 5, 1 }, 1174 { ahc_patch16_func, 357, 5, 1 },
1174 { ahc_patch0_func, 362, 7, 1 }, 1175 { ahc_patch0_func, 363, 7, 1 },
1175 { ahc_patch13_func, 372, 5, 1 }, 1176 { ahc_patch13_func, 373, 5, 1 },
1176 { ahc_patch0_func, 377, 52, 17 }, 1177 { ahc_patch0_func, 378, 52, 17 },
1177 { ahc_patch14_func, 377, 1, 1 }, 1178 { ahc_patch14_func, 378, 1, 1 },
1178 { ahc_patch7_func, 379, 2, 2 }, 1179 { ahc_patch7_func, 380, 2, 2 },
1179 { ahc_patch17_func, 380, 1, 1 }, 1180 { ahc_patch17_func, 381, 1, 1 },
1180 { ahc_patch9_func, 383, 1, 1 }, 1181 { ahc_patch9_func, 384, 1, 1 },
1181 { ahc_patch18_func, 390, 1, 1 }, 1182 { ahc_patch18_func, 391, 1, 1 },
1182 { ahc_patch14_func, 395, 9, 3 }, 1183 { ahc_patch14_func, 396, 9, 3 },
1183 { ahc_patch9_func, 396, 3, 2 }, 1184 { ahc_patch9_func, 397, 3, 2 },
1184 { ahc_patch0_func, 399, 3, 1 }, 1185 { ahc_patch0_func, 400, 3, 1 },
1185 { ahc_patch9_func, 407, 6, 2 }, 1186 { ahc_patch9_func, 408, 6, 2 },
1186 { ahc_patch0_func, 413, 9, 2 }, 1187 { ahc_patch0_func, 414, 9, 2 },
1187 { ahc_patch13_func, 413, 1, 1 }, 1188 { ahc_patch13_func, 414, 1, 1 },
1188 { ahc_patch13_func, 422, 2, 1 }, 1189 { ahc_patch13_func, 423, 2, 1 },
1189 { ahc_patch14_func, 424, 1, 1 }, 1190 { ahc_patch14_func, 425, 1, 1 },
1190 { ahc_patch9_func, 426, 1, 2 }, 1191 { ahc_patch9_func, 427, 1, 2 },
1191 { ahc_patch0_func, 427, 1, 1 }, 1192 { ahc_patch0_func, 428, 1, 1 },
1192 { ahc_patch7_func, 428, 1, 1 },
1193 { ahc_patch7_func, 429, 1, 1 }, 1193 { ahc_patch7_func, 429, 1, 1 },
1194 { ahc_patch8_func, 430, 3, 3 }, 1194 { ahc_patch7_func, 430, 1, 1 },
1195 { ahc_patch6_func, 431, 1, 2 }, 1195 { ahc_patch8_func, 431, 3, 3 },
1196 { ahc_patch0_func, 432, 1, 1 }, 1196 { ahc_patch6_func, 432, 1, 2 },
1197 { ahc_patch9_func, 433, 1, 1 }, 1197 { ahc_patch0_func, 433, 1, 1 },
1198 { ahc_patch15_func, 434, 1, 2 }, 1198 { ahc_patch9_func, 434, 1, 1 },
1199 { ahc_patch13_func, 434, 1, 1 }, 1199 { ahc_patch15_func, 435, 1, 2 },
1200 { ahc_patch14_func, 436, 9, 4 }, 1200 { ahc_patch13_func, 435, 1, 1 },
1201 { ahc_patch9_func, 436, 1, 1 }, 1201 { ahc_patch14_func, 437, 9, 4 },
1202 { ahc_patch9_func, 443, 2, 1 }, 1202 { ahc_patch9_func, 437, 1, 1 },
1203 { ahc_patch0_func, 445, 4, 3 }, 1203 { ahc_patch9_func, 444, 2, 1 },
1204 { ahc_patch9_func, 445, 1, 2 }, 1204 { ahc_patch0_func, 446, 4, 3 },
1205 { ahc_patch0_func, 446, 3, 1 }, 1205 { ahc_patch9_func, 446, 1, 2 },
1206 { ahc_patch1_func, 450, 2, 1 }, 1206 { ahc_patch0_func, 447, 3, 1 },
1207 { ahc_patch7_func, 452, 10, 2 }, 1207 { ahc_patch1_func, 451, 2, 1 },
1208 { ahc_patch0_func, 462, 1, 1 }, 1208 { ahc_patch7_func, 453, 10, 2 },
1209 { ahc_patch8_func, 463, 118, 22 }, 1209 { ahc_patch0_func, 463, 1, 1 },
1210 { ahc_patch1_func, 465, 3, 2 }, 1210 { ahc_patch8_func, 464, 118, 22 },
1211 { ahc_patch0_func, 468, 5, 3 }, 1211 { ahc_patch1_func, 466, 3, 2 },
1212 { ahc_patch9_func, 468, 2, 2 }, 1212 { ahc_patch0_func, 469, 5, 3 },
1213 { ahc_patch0_func, 470, 3, 1 }, 1213 { ahc_patch9_func, 469, 2, 2 },
1214 { ahc_patch1_func, 475, 2, 2 }, 1214 { ahc_patch0_func, 471, 3, 1 },
1215 { ahc_patch0_func, 477, 6, 3 }, 1215 { ahc_patch1_func, 476, 2, 2 },
1216 { ahc_patch9_func, 477, 2, 2 }, 1216 { ahc_patch0_func, 478, 6, 3 },
1217 { ahc_patch0_func, 479, 3, 1 }, 1217 { ahc_patch9_func, 478, 2, 2 },
1218 { ahc_patch1_func, 485, 2, 2 }, 1218 { ahc_patch0_func, 480, 3, 1 },
1219 { ahc_patch0_func, 487, 9, 7 }, 1219 { ahc_patch1_func, 486, 2, 2 },
1220 { ahc_patch9_func, 487, 5, 6 }, 1220 { ahc_patch0_func, 488, 9, 7 },
1221 { ahc_patch19_func, 487, 1, 2 }, 1221 { ahc_patch9_func, 488, 5, 6 },
1222 { ahc_patch0_func, 488, 1, 1 }, 1222 { ahc_patch19_func, 488, 1, 2 },
1223 { ahc_patch19_func, 490, 1, 2 }, 1223 { ahc_patch0_func, 489, 1, 1 },
1224 { ahc_patch0_func, 491, 1, 1 }, 1224 { ahc_patch19_func, 491, 1, 2 },
1225 { ahc_patch0_func, 492, 4, 1 }, 1225 { ahc_patch0_func, 492, 1, 1 },
1226 { ahc_patch6_func, 497, 3, 2 }, 1226 { ahc_patch0_func, 493, 4, 1 },
1227 { ahc_patch0_func, 500, 1, 1 }, 1227 { ahc_patch6_func, 498, 3, 2 },
1228 { ahc_patch6_func, 510, 1, 2 }, 1228 { ahc_patch0_func, 501, 1, 1 },
1229 { ahc_patch0_func, 511, 1, 1 }, 1229 { ahc_patch6_func, 511, 1, 2 },
1230 { ahc_patch20_func, 548, 7, 1 }, 1230 { ahc_patch0_func, 512, 1, 1 },
1231 { ahc_patch3_func, 583, 1, 2 }, 1231 { ahc_patch20_func, 549, 7, 1 },
1232 { ahc_patch0_func, 584, 1, 1 }, 1232 { ahc_patch3_func, 584, 1, 2 },
1233 { ahc_patch21_func, 587, 1, 1 }, 1233 { ahc_patch0_func, 585, 1, 1 },
1234 { ahc_patch8_func, 589, 106, 33 }, 1234 { ahc_patch21_func, 588, 1, 1 },
1235 { ahc_patch4_func, 591, 1, 1 }, 1235 { ahc_patch8_func, 590, 106, 33 },
1236 { ahc_patch1_func, 597, 2, 2 }, 1236 { ahc_patch4_func, 592, 1, 1 },
1237 { ahc_patch0_func, 599, 1, 1 }, 1237 { ahc_patch1_func, 598, 2, 2 },
1238 { ahc_patch1_func, 602, 1, 2 }, 1238 { ahc_patch0_func, 600, 1, 1 },
1239 { ahc_patch0_func, 603, 1, 1 }, 1239 { ahc_patch1_func, 603, 1, 2 },
1240 { ahc_patch9_func, 604, 3, 3 }, 1240 { ahc_patch0_func, 604, 1, 1 },
1241 { ahc_patch15_func, 605, 1, 1 }, 1241 { ahc_patch9_func, 605, 3, 3 },
1242 { ahc_patch0_func, 607, 4, 1 }, 1242 { ahc_patch15_func, 606, 1, 1 },
1243 { ahc_patch19_func, 616, 2, 2 }, 1243 { ahc_patch0_func, 608, 4, 1 },
1244 { ahc_patch0_func, 618, 1, 1 }, 1244 { ahc_patch19_func, 617, 2, 2 },
1245 { ahc_patch19_func, 622, 10, 3 }, 1245 { ahc_patch0_func, 619, 1, 1 },
1246 { ahc_patch5_func, 624, 8, 1 }, 1246 { ahc_patch19_func, 623, 10, 3 },
1247 { ahc_patch0_func, 632, 9, 2 }, 1247 { ahc_patch5_func, 625, 8, 1 },
1248 { ahc_patch5_func, 633, 8, 1 }, 1248 { ahc_patch0_func, 633, 9, 2 },
1249 { ahc_patch4_func, 643, 1, 2 }, 1249 { ahc_patch5_func, 634, 8, 1 },
1250 { ahc_patch0_func, 644, 1, 1 }, 1250 { ahc_patch4_func, 644, 1, 2 },
1251 { ahc_patch19_func, 645, 1, 2 }, 1251 { ahc_patch0_func, 645, 1, 1 },
1252 { ahc_patch0_func, 646, 3, 2 }, 1252 { ahc_patch19_func, 646, 1, 2 },
1253 { ahc_patch4_func, 648, 1, 1 }, 1253 { ahc_patch0_func, 647, 3, 2 },
1254 { ahc_patch5_func, 649, 1, 1 }, 1254 { ahc_patch4_func, 649, 1, 1 },
1255 { ahc_patch5_func, 652, 1, 1 }, 1255 { ahc_patch5_func, 650, 1, 1 },
1256 { ahc_patch5_func, 654, 1, 1 }, 1256 { ahc_patch5_func, 653, 1, 1 },
1257 { ahc_patch4_func, 656, 2, 2 }, 1257 { ahc_patch5_func, 655, 1, 1 },
1258 { ahc_patch0_func, 658, 2, 1 }, 1258 { ahc_patch4_func, 657, 2, 2 },
1259 { ahc_patch5_func, 660, 1, 1 }, 1259 { ahc_patch0_func, 659, 2, 1 },
1260 { ahc_patch5_func, 663, 1, 1 }, 1260 { ahc_patch5_func, 661, 1, 1 },
1261 { ahc_patch5_func, 666, 1, 1 }, 1261 { ahc_patch5_func, 664, 1, 1 },
1262 { ahc_patch19_func, 670, 1, 1 }, 1262 { ahc_patch5_func, 667, 1, 1 },
1263 { ahc_patch19_func, 673, 1, 1 }, 1263 { ahc_patch19_func, 671, 1, 1 },
1264 { ahc_patch4_func, 679, 1, 1 }, 1264 { ahc_patch19_func, 674, 1, 1 },
1265 { ahc_patch6_func, 682, 1, 2 }, 1265 { ahc_patch4_func, 680, 1, 1 },
1266 { ahc_patch0_func, 683, 1, 1 }, 1266 { ahc_patch6_func, 683, 1, 2 },
1267 { ahc_patch7_func, 695, 16, 1 }, 1267 { ahc_patch0_func, 684, 1, 1 },
1268 { ahc_patch4_func, 711, 20, 1 }, 1268 { ahc_patch7_func, 696, 16, 1 },
1269 { ahc_patch9_func, 732, 4, 2 }, 1269 { ahc_patch4_func, 712, 20, 1 },
1270 { ahc_patch0_func, 736, 4, 1 }, 1270 { ahc_patch9_func, 733, 4, 2 },
1271 { ahc_patch9_func, 740, 4, 2 }, 1271 { ahc_patch0_func, 737, 4, 1 },
1272 { ahc_patch0_func, 744, 3, 1 }, 1272 { ahc_patch9_func, 741, 4, 2 },
1273 { ahc_patch6_func, 750, 1, 1 }, 1273 { ahc_patch0_func, 745, 3, 1 },
1274 { ahc_patch22_func, 752, 14, 1 }, 1274 { ahc_patch6_func, 751, 1, 1 },
1275 { ahc_patch7_func, 766, 3, 1 }, 1275 { ahc_patch22_func, 753, 14, 1 },
1276 { ahc_patch9_func, 778, 24, 8 }, 1276 { ahc_patch7_func, 767, 3, 1 },
1277 { ahc_patch19_func, 782, 1, 2 }, 1277 { ahc_patch9_func, 779, 24, 8 },
1278 { ahc_patch0_func, 783, 1, 1 }, 1278 { ahc_patch19_func, 783, 1, 2 },
1279 { ahc_patch15_func, 788, 4, 2 }, 1279 { ahc_patch0_func, 784, 1, 1 },
1280 { ahc_patch0_func, 792, 7, 3 }, 1280 { ahc_patch15_func, 789, 4, 2 },
1281 { ahc_patch23_func, 792, 5, 2 }, 1281 { ahc_patch0_func, 793, 7, 3 },
1282 { ahc_patch0_func, 797, 2, 1 }, 1282 { ahc_patch23_func, 793, 5, 2 },
1283 { ahc_patch0_func, 802, 42, 3 }, 1283 { ahc_patch0_func, 798, 2, 1 },
1284 { ahc_patch18_func, 814, 18, 2 }, 1284 { ahc_patch0_func, 803, 42, 3 },
1285 { ahc_patch0_func, 832, 1, 1 }, 1285 { ahc_patch18_func, 815, 18, 2 },
1286 { ahc_patch4_func, 856, 1, 1 }, 1286 { ahc_patch0_func, 833, 1, 1 },
1287 { ahc_patch4_func, 857, 3, 2 }, 1287 { ahc_patch4_func, 857, 1, 1 },
1288 { ahc_patch0_func, 860, 1, 1 }, 1288 { ahc_patch4_func, 858, 3, 2 },
1289 { ahc_patch13_func, 861, 3, 1 }, 1289 { ahc_patch0_func, 861, 1, 1 },
1290 { ahc_patch4_func, 864, 12, 1 } 1290 { ahc_patch13_func, 862, 3, 1 },
1291 { ahc_patch4_func, 865, 12, 1 }
1291}; 1292};
1292 1293
1293static struct cs { 1294static struct cs {
@@ -1296,11 +1297,11 @@ static struct cs {
1296} critical_sections[] = { 1297} critical_sections[] = {
1297 { 11, 18 }, 1298 { 11, 18 },
1298 { 21, 30 }, 1299 { 21, 30 },
1299 { 711, 727 }, 1300 { 712, 728 },
1300 { 857, 860 }, 1301 { 858, 861 },
1301 { 864, 870 }, 1302 { 865, 871 },
1302 { 872, 874 }, 1303 { 873, 875 },
1303 { 874, 876 } 1304 { 875, 877 }
1304}; 1305};
1305 1306
1306static const int num_critical_sections = sizeof(critical_sections) 1307static const int num_critical_sections = sizeof(critical_sections)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
index 7c5a6db0e672..828ae3d9a510 100644
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ b/drivers/scsi/aic7xxx/aiclib.c
@@ -30,1382 +30,5 @@
30 * $Id$ 30 * $Id$
31 */ 31 */
32 32
33#include <linux/blkdev.h>
34#include <linux/delay.h>
35#include <linux/version.h>
36
37/* Core SCSI definitions */
38#include <scsi/scsi_host.h>
39#include "aiclib.h" 33#include "aiclib.h"
40#include "cam.h"
41
42#ifndef FALSE
43#define FALSE 0
44#endif /* FALSE */
45#ifndef TRUE
46#define TRUE 1
47#endif /* TRUE */
48#ifndef ERESTART
49#define ERESTART -1 /* restart syscall */
50#endif
51#ifndef EJUSTRETURN
52#define EJUSTRETURN -2 /* don't modify regs, just return */
53#endif
54
55static int ascentrycomp(const void *key, const void *member);
56static int senseentrycomp(const void *key, const void *member);
57static void fetchtableentries(int sense_key, int asc, int ascq,
58 struct scsi_inquiry_data *,
59 const struct sense_key_table_entry **,
60 const struct asc_table_entry **);
61static void * scsibsearch(const void *key, const void *base, size_t nmemb,
62 size_t size,
63 int (*compar)(const void *, const void *));
64typedef int (cam_quirkmatch_t)(caddr_t, caddr_t);
65static int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern,
66 int str_len);
67static caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table,
68 int num_entries, int entry_size,
69 cam_quirkmatch_t *comp_func);
70
71#define SCSI_NO_SENSE_STRINGS 1
72#if !defined(SCSI_NO_SENSE_STRINGS)
73#define SST(asc, ascq, action, desc) \
74 asc, ascq, action, desc
75#else
76static const char empty_string[] = "";
77
78#define SST(asc, ascq, action, desc) \
79 asc, ascq, action, empty_string
80#endif
81
82static const struct sense_key_table_entry sense_key_table[] =
83{
84 { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" },
85 { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" },
86 {
87 SSD_KEY_NOT_READY, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
88 "NOT READY"
89 },
90 { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" },
91 { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" },
92 { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" },
93 { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" },
94 { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" },
95 { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" },
96 { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" },
97 { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" },
98 { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" },
99 { SSD_KEY_EQUAL, SS_NOP, "EQUAL" },
100 { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" },
101 { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" },
102 { SSD_KEY_RESERVED, SS_FATAL|EIO, "RESERVED" }
103};
104
105static const int sense_key_table_size =
106 sizeof(sense_key_table)/sizeof(sense_key_table[0]);
107
108static struct asc_table_entry quantum_fireball_entries[] = {
109 {SST(0x04, 0x0b, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
110 "Logical unit not ready, initializing cmd. required")}
111};
112
113static struct asc_table_entry sony_mo_entries[] = {
114 {SST(0x04, 0x00, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
115 "Logical unit not ready, cause not reportable")}
116};
117
118static struct scsi_sense_quirk_entry sense_quirk_table[] = {
119 {
120 /*
121 * The Quantum Fireball ST and SE like to return 0x04 0x0b when
122 * they really should return 0x04 0x02. 0x04,0x0b isn't
123 * defined in any SCSI spec, and it isn't mentioned in the
124 * hardware manual for these drives.
125 */
126 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"},
127 /*num_sense_keys*/0,
128 sizeof(quantum_fireball_entries)/sizeof(struct asc_table_entry),
129 /*sense key entries*/NULL,
130 quantum_fireball_entries
131 },
132 {
133 /*
134 * This Sony MO drive likes to return 0x04, 0x00 when it
135 * isn't spun up.
136 */
137 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"},
138 /*num_sense_keys*/0,
139 sizeof(sony_mo_entries)/sizeof(struct asc_table_entry),
140 /*sense key entries*/NULL,
141 sony_mo_entries
142 }
143};
144
145static const int sense_quirk_table_size =
146 sizeof(sense_quirk_table)/sizeof(sense_quirk_table[0]);
147
148static struct asc_table_entry asc_table[] = {
149/*
150 * From File: ASC-NUM.TXT
151 * SCSI ASC/ASCQ Assignments
152 * Numeric Sorted Listing
153 * as of 5/12/97
154 *
155 * D - DIRECT ACCESS DEVICE (SBC) device column key
156 * .T - SEQUENTIAL ACCESS DEVICE (SSC) -------------------
157 * . L - PRINTER DEVICE (SSC) blank = reserved
158 * . P - PROCESSOR DEVICE (SPC) not blank = allowed
159 * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC)
160 * . . R - CD DEVICE (MMC)
161 * . . S - SCANNER DEVICE (SGC)
162 * . . .O - OPTICAL MEMORY DEVICE (SBC)
163 * . . . M - MEDIA CHANGER DEVICE (SMC)
164 * . . . C - COMMUNICATION DEVICE (SSC)
165 * . . . .A - STORAGE ARRAY DEVICE (SCC)
166 * . . . . E - ENCLOSURE SERVICES DEVICE (SES)
167 * DTLPWRSOMCAE ASC ASCQ Action Description
168 * ------------ ---- ---- ------ -----------------------------------*/
169/* DTLPWRSOMCAE */{SST(0x00, 0x00, SS_NOP,
170 "No additional sense information") },
171/* T S */{SST(0x00, 0x01, SS_RDEF,
172 "Filemark detected") },
173/* T S */{SST(0x00, 0x02, SS_RDEF,
174 "End-of-partition/medium detected") },
175/* T */{SST(0x00, 0x03, SS_RDEF,
176 "Setmark detected") },
177/* T S */{SST(0x00, 0x04, SS_RDEF,
178 "Beginning-of-partition/medium detected") },
179/* T S */{SST(0x00, 0x05, SS_RDEF,
180 "End-of-data detected") },
181/* DTLPWRSOMCAE */{SST(0x00, 0x06, SS_RDEF,
182 "I/O process terminated") },
183/* R */{SST(0x00, 0x11, SS_FATAL|EBUSY,
184 "Audio play operation in progress") },
185/* R */{SST(0x00, 0x12, SS_NOP,
186 "Audio play operation paused") },
187/* R */{SST(0x00, 0x13, SS_NOP,
188 "Audio play operation successfully completed") },
189/* R */{SST(0x00, 0x14, SS_RDEF,
190 "Audio play operation stopped due to error") },
191/* R */{SST(0x00, 0x15, SS_NOP,
192 "No current audio status to return") },
193/* DTLPWRSOMCAE */{SST(0x00, 0x16, SS_FATAL|EBUSY,
194 "Operation in progress") },
195/* DTL WRSOM AE */{SST(0x00, 0x17, SS_RDEF,
196 "Cleaning requested") },
197/* D W O */{SST(0x01, 0x00, SS_RDEF,
198 "No index/sector signal") },
199/* D WR OM */{SST(0x02, 0x00, SS_RDEF,
200 "No seek complete") },
201/* DTL W SO */{SST(0x03, 0x00, SS_RDEF,
202 "Peripheral device write fault") },
203/* T */{SST(0x03, 0x01, SS_RDEF,
204 "No write current") },
205/* T */{SST(0x03, 0x02, SS_RDEF,
206 "Excessive write errors") },
207/* DTLPWRSOMCAE */{SST(0x04, 0x00,
208 SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EIO,
209 "Logical unit not ready, cause not reportable") },
210/* DTLPWRSOMCAE */{SST(0x04, 0x01,
211 SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
212 "Logical unit is in process of becoming ready") },
213/* DTLPWRSOMCAE */{SST(0x04, 0x02, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
214 "Logical unit not ready, initializing cmd. required") },
215/* DTLPWRSOMCAE */{SST(0x04, 0x03, SS_FATAL|ENXIO,
216 "Logical unit not ready, manual intervention required")},
217/* DTL O */{SST(0x04, 0x04, SS_FATAL|EBUSY,
218 "Logical unit not ready, format in progress") },
219/* DT W OMCA */{SST(0x04, 0x05, SS_FATAL|EBUSY,
220 "Logical unit not ready, rebuild in progress") },
221/* DT W OMCA */{SST(0x04, 0x06, SS_FATAL|EBUSY,
222 "Logical unit not ready, recalculation in progress") },
223/* DTLPWRSOMCAE */{SST(0x04, 0x07, SS_FATAL|EBUSY,
224 "Logical unit not ready, operation in progress") },
225/* R */{SST(0x04, 0x08, SS_FATAL|EBUSY,
226 "Logical unit not ready, long write in progress") },
227/* DTL WRSOMCAE */{SST(0x05, 0x00, SS_RDEF,
228 "Logical unit does not respond to selection") },
229/* D WR OM */{SST(0x06, 0x00, SS_RDEF,
230 "No reference position found") },
231/* DTL WRSOM */{SST(0x07, 0x00, SS_RDEF,
232 "Multiple peripheral devices selected") },
233/* DTL WRSOMCAE */{SST(0x08, 0x00, SS_RDEF,
234 "Logical unit communication failure") },
235/* DTL WRSOMCAE */{SST(0x08, 0x01, SS_RDEF,
236 "Logical unit communication time-out") },
237/* DTL WRSOMCAE */{SST(0x08, 0x02, SS_RDEF,
238 "Logical unit communication parity error") },
239/* DT R OM */{SST(0x08, 0x03, SS_RDEF,
240 "Logical unit communication crc error (ultra-dma/32)")},
241/* DT WR O */{SST(0x09, 0x00, SS_RDEF,
242 "Track following error") },
243/* WR O */{SST(0x09, 0x01, SS_RDEF,
244 "Tracking servo failure") },
245/* WR O */{SST(0x09, 0x02, SS_RDEF,
246 "Focus servo failure") },
247/* WR O */{SST(0x09, 0x03, SS_RDEF,
248 "Spindle servo failure") },
249/* DT WR O */{SST(0x09, 0x04, SS_RDEF,
250 "Head select fault") },
251/* DTLPWRSOMCAE */{SST(0x0A, 0x00, SS_FATAL|ENOSPC,
252 "Error log overflow") },
253/* DTLPWRSOMCAE */{SST(0x0B, 0x00, SS_RDEF,
254 "Warning") },
255/* DTLPWRSOMCAE */{SST(0x0B, 0x01, SS_RDEF,
256 "Specified temperature exceeded") },
257/* DTLPWRSOMCAE */{SST(0x0B, 0x02, SS_RDEF,
258 "Enclosure degraded") },
259/* T RS */{SST(0x0C, 0x00, SS_RDEF,
260 "Write error") },
261/* D W O */{SST(0x0C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
262 "Write error - recovered with auto reallocation") },
263/* D W O */{SST(0x0C, 0x02, SS_RDEF,
264 "Write error - auto reallocation failed") },
265/* D W O */{SST(0x0C, 0x03, SS_RDEF,
266 "Write error - recommend reassignment") },
267/* DT W O */{SST(0x0C, 0x04, SS_RDEF,
268 "Compression check miscompare error") },
269/* DT W O */{SST(0x0C, 0x05, SS_RDEF,
270 "Data expansion occurred during compression") },
271/* DT W O */{SST(0x0C, 0x06, SS_RDEF,
272 "Block not compressible") },
273/* R */{SST(0x0C, 0x07, SS_RDEF,
274 "Write error - recovery needed") },
275/* R */{SST(0x0C, 0x08, SS_RDEF,
276 "Write error - recovery failed") },
277/* R */{SST(0x0C, 0x09, SS_RDEF,
278 "Write error - loss of streaming") },
279/* R */{SST(0x0C, 0x0A, SS_RDEF,
280 "Write error - padding blocks added") },
281/* D W O */{SST(0x10, 0x00, SS_RDEF,
282 "ID CRC or ECC error") },
283/* DT WRSO */{SST(0x11, 0x00, SS_RDEF,
284 "Unrecovered read error") },
285/* DT W SO */{SST(0x11, 0x01, SS_RDEF,
286 "Read retries exhausted") },
287/* DT W SO */{SST(0x11, 0x02, SS_RDEF,
288 "Error too long to correct") },
289/* DT W SO */{SST(0x11, 0x03, SS_RDEF,
290 "Multiple read errors") },
291/* D W O */{SST(0x11, 0x04, SS_RDEF,
292 "Unrecovered read error - auto reallocate failed") },
293/* WR O */{SST(0x11, 0x05, SS_RDEF,
294 "L-EC uncorrectable error") },
295/* WR O */{SST(0x11, 0x06, SS_RDEF,
296 "CIRC unrecovered error") },
297/* W O */{SST(0x11, 0x07, SS_RDEF,
298 "Data re-synchronization error") },
299/* T */{SST(0x11, 0x08, SS_RDEF,
300 "Incomplete block read") },
301/* T */{SST(0x11, 0x09, SS_RDEF,
302 "No gap found") },
303/* DT O */{SST(0x11, 0x0A, SS_RDEF,
304 "Miscorrected error") },
305/* D W O */{SST(0x11, 0x0B, SS_RDEF,
306 "Unrecovered read error - recommend reassignment") },
307/* D W O */{SST(0x11, 0x0C, SS_RDEF,
308 "Unrecovered read error - recommend rewrite the data")},
309/* DT WR O */{SST(0x11, 0x0D, SS_RDEF,
310 "De-compression CRC error") },
311/* DT WR O */{SST(0x11, 0x0E, SS_RDEF,
312 "Cannot decompress using declared algorithm") },
313/* R */{SST(0x11, 0x0F, SS_RDEF,
314 "Error reading UPC/EAN number") },
315/* R */{SST(0x11, 0x10, SS_RDEF,
316 "Error reading ISRC number") },
317/* R */{SST(0x11, 0x11, SS_RDEF,
318 "Read error - loss of streaming") },
319/* D W O */{SST(0x12, 0x00, SS_RDEF,
320 "Address mark not found for id field") },
321/* D W O */{SST(0x13, 0x00, SS_RDEF,
322 "Address mark not found for data field") },
323/* DTL WRSO */{SST(0x14, 0x00, SS_RDEF,
324 "Recorded entity not found") },
325/* DT WR O */{SST(0x14, 0x01, SS_RDEF,
326 "Record not found") },
327/* T */{SST(0x14, 0x02, SS_RDEF,
328 "Filemark or setmark not found") },
329/* T */{SST(0x14, 0x03, SS_RDEF,
330 "End-of-data not found") },
331/* T */{SST(0x14, 0x04, SS_RDEF,
332 "Block sequence error") },
333/* DT W O */{SST(0x14, 0x05, SS_RDEF,
334 "Record not found - recommend reassignment") },
335/* DT W O */{SST(0x14, 0x06, SS_RDEF,
336 "Record not found - data auto-reallocated") },
337/* DTL WRSOM */{SST(0x15, 0x00, SS_RDEF,
338 "Random positioning error") },
339/* DTL WRSOM */{SST(0x15, 0x01, SS_RDEF,
340 "Mechanical positioning error") },
341/* DT WR O */{SST(0x15, 0x02, SS_RDEF,
342 "Positioning error detected by read of medium") },
343/* D W O */{SST(0x16, 0x00, SS_RDEF,
344 "Data synchronization mark error") },
345/* D W O */{SST(0x16, 0x01, SS_RDEF,
346 "Data sync error - data rewritten") },
347/* D W O */{SST(0x16, 0x02, SS_RDEF,
348 "Data sync error - recommend rewrite") },
349/* D W O */{SST(0x16, 0x03, SS_NOP|SSQ_PRINT_SENSE,
350 "Data sync error - data auto-reallocated") },
351/* D W O */{SST(0x16, 0x04, SS_RDEF,
352 "Data sync error - recommend reassignment") },
353/* DT WRSO */{SST(0x17, 0x00, SS_NOP|SSQ_PRINT_SENSE,
354 "Recovered data with no error correction applied") },
355/* DT WRSO */{SST(0x17, 0x01, SS_NOP|SSQ_PRINT_SENSE,
356 "Recovered data with retries") },
357/* DT WR O */{SST(0x17, 0x02, SS_NOP|SSQ_PRINT_SENSE,
358 "Recovered data with positive head offset") },
359/* DT WR O */{SST(0x17, 0x03, SS_NOP|SSQ_PRINT_SENSE,
360 "Recovered data with negative head offset") },
361/* WR O */{SST(0x17, 0x04, SS_NOP|SSQ_PRINT_SENSE,
362 "Recovered data with retries and/or CIRC applied") },
363/* D WR O */{SST(0x17, 0x05, SS_NOP|SSQ_PRINT_SENSE,
364 "Recovered data using previous sector id") },
365/* D W O */{SST(0x17, 0x06, SS_NOP|SSQ_PRINT_SENSE,
366 "Recovered data without ECC - data auto-reallocated") },
367/* D W O */{SST(0x17, 0x07, SS_NOP|SSQ_PRINT_SENSE,
368 "Recovered data without ECC - recommend reassignment")},
369/* D W O */{SST(0x17, 0x08, SS_NOP|SSQ_PRINT_SENSE,
370 "Recovered data without ECC - recommend rewrite") },
371/* D W O */{SST(0x17, 0x09, SS_NOP|SSQ_PRINT_SENSE,
372 "Recovered data without ECC - data rewritten") },
373/* D W O */{SST(0x18, 0x00, SS_NOP|SSQ_PRINT_SENSE,
374 "Recovered data with error correction applied") },
375/* D WR O */{SST(0x18, 0x01, SS_NOP|SSQ_PRINT_SENSE,
376 "Recovered data with error corr. & retries applied") },
377/* D WR O */{SST(0x18, 0x02, SS_NOP|SSQ_PRINT_SENSE,
378 "Recovered data - data auto-reallocated") },
379/* R */{SST(0x18, 0x03, SS_NOP|SSQ_PRINT_SENSE,
380 "Recovered data with CIRC") },
381/* R */{SST(0x18, 0x04, SS_NOP|SSQ_PRINT_SENSE,
382 "Recovered data with L-EC") },
383/* D WR O */{SST(0x18, 0x05, SS_NOP|SSQ_PRINT_SENSE,
384 "Recovered data - recommend reassignment") },
385/* D WR O */{SST(0x18, 0x06, SS_NOP|SSQ_PRINT_SENSE,
386 "Recovered data - recommend rewrite") },
387/* D W O */{SST(0x18, 0x07, SS_NOP|SSQ_PRINT_SENSE,
388 "Recovered data with ECC - data rewritten") },
389/* D O */{SST(0x19, 0x00, SS_RDEF,
390 "Defect list error") },
391/* D O */{SST(0x19, 0x01, SS_RDEF,
392 "Defect list not available") },
393/* D O */{SST(0x19, 0x02, SS_RDEF,
394 "Defect list error in primary list") },
395/* D O */{SST(0x19, 0x03, SS_RDEF,
396 "Defect list error in grown list") },
397/* DTLPWRSOMCAE */{SST(0x1A, 0x00, SS_RDEF,
398 "Parameter list length error") },
399/* DTLPWRSOMCAE */{SST(0x1B, 0x00, SS_RDEF,
400 "Synchronous data transfer error") },
401/* D O */{SST(0x1C, 0x00, SS_RDEF,
402 "Defect list not found") },
403/* D O */{SST(0x1C, 0x01, SS_RDEF,
404 "Primary defect list not found") },
405/* D O */{SST(0x1C, 0x02, SS_RDEF,
406 "Grown defect list not found") },
407/* D W O */{SST(0x1D, 0x00, SS_FATAL,
408 "Miscompare during verify operation" )},
409/* D W O */{SST(0x1E, 0x00, SS_NOP|SSQ_PRINT_SENSE,
410 "Recovered id with ecc correction") },
411/* D O */{SST(0x1F, 0x00, SS_RDEF,
412 "Partial defect list transfer") },
413/* DTLPWRSOMCAE */{SST(0x20, 0x00, SS_FATAL|EINVAL,
414 "Invalid command operation code") },
415/* DT WR OM */{SST(0x21, 0x00, SS_FATAL|EINVAL,
416 "Logical block address out of range" )},
417/* DT WR OM */{SST(0x21, 0x01, SS_FATAL|EINVAL,
418 "Invalid element address") },
419/* D */{SST(0x22, 0x00, SS_FATAL|EINVAL,
420 "Illegal function") }, /* Deprecated. Use 20 00, 24 00, or 26 00 instead */
421/* DTLPWRSOMCAE */{SST(0x24, 0x00, SS_FATAL|EINVAL,
422 "Invalid field in CDB") },
423/* DTLPWRSOMCAE */{SST(0x25, 0x00, SS_FATAL|ENXIO,
424 "Logical unit not supported") },
425/* DTLPWRSOMCAE */{SST(0x26, 0x00, SS_FATAL|EINVAL,
426 "Invalid field in parameter list") },
427/* DTLPWRSOMCAE */{SST(0x26, 0x01, SS_FATAL|EINVAL,
428 "Parameter not supported") },
429/* DTLPWRSOMCAE */{SST(0x26, 0x02, SS_FATAL|EINVAL,
430 "Parameter value invalid") },
431/* DTLPWRSOMCAE */{SST(0x26, 0x03, SS_FATAL|EINVAL,
432 "Threshold parameters not supported") },
433/* DTLPWRSOMCAE */{SST(0x26, 0x04, SS_FATAL|EINVAL,
434 "Invalid release of active persistent reservation") },
435/* DT W O */{SST(0x27, 0x00, SS_FATAL|EACCES,
436 "Write protected") },
437/* DT W O */{SST(0x27, 0x01, SS_FATAL|EACCES,
438 "Hardware write protected") },
439/* DT W O */{SST(0x27, 0x02, SS_FATAL|EACCES,
440 "Logical unit software write protected") },
441/* T */{SST(0x27, 0x03, SS_FATAL|EACCES,
442 "Associated write protect") },
443/* T */{SST(0x27, 0x04, SS_FATAL|EACCES,
444 "Persistent write protect") },
445/* T */{SST(0x27, 0x05, SS_FATAL|EACCES,
446 "Permanent write protect") },
447/* DTLPWRSOMCAE */{SST(0x28, 0x00, SS_RDEF,
448 "Not ready to ready change, medium may have changed") },
449/* DTLPWRSOMCAE */{SST(0x28, 0x01, SS_FATAL|ENXIO,
450 "Import or export element accessed") },
451/*
452 * XXX JGibbs - All of these should use the same errno, but I don't think
453 * ENXIO is the correct choice. Should we borrow from the networking
454 * errnos? ECONNRESET anyone?
455 */
456/* DTLPWRSOMCAE */{SST(0x29, 0x00, SS_RDEF,
457 "Power on, reset, or bus device reset occurred") },
458/* DTLPWRSOMCAE */{SST(0x29, 0x01, SS_RDEF,
459 "Power on occurred") },
460/* DTLPWRSOMCAE */{SST(0x29, 0x02, SS_RDEF,
461 "Scsi bus reset occurred") },
462/* DTLPWRSOMCAE */{SST(0x29, 0x03, SS_RDEF,
463 "Bus device reset function occurred") },
464/* DTLPWRSOMCAE */{SST(0x29, 0x04, SS_RDEF,
465 "Device internal reset") },
466/* DTLPWRSOMCAE */{SST(0x29, 0x05, SS_RDEF,
467 "Transceiver mode changed to single-ended") },
468/* DTLPWRSOMCAE */{SST(0x29, 0x06, SS_RDEF,
469 "Transceiver mode changed to LVD") },
470/* DTL WRSOMCAE */{SST(0x2A, 0x00, SS_RDEF,
471 "Parameters changed") },
472/* DTL WRSOMCAE */{SST(0x2A, 0x01, SS_RDEF,
473 "Mode parameters changed") },
474/* DTL WRSOMCAE */{SST(0x2A, 0x02, SS_RDEF,
475 "Log parameters changed") },
476/* DTLPWRSOMCAE */{SST(0x2A, 0x03, SS_RDEF,
477 "Reservations preempted") },
478/* DTLPWRSO C */{SST(0x2B, 0x00, SS_RDEF,
479 "Copy cannot execute since host cannot disconnect") },
480/* DTLPWRSOMCAE */{SST(0x2C, 0x00, SS_RDEF,
481 "Command sequence error") },
482/* S */{SST(0x2C, 0x01, SS_RDEF,
483 "Too many windows specified") },
484/* S */{SST(0x2C, 0x02, SS_RDEF,
485 "Invalid combination of windows specified") },
486/* R */{SST(0x2C, 0x03, SS_RDEF,
487 "Current program area is not empty") },
488/* R */{SST(0x2C, 0x04, SS_RDEF,
489 "Current program area is empty") },
490/* T */{SST(0x2D, 0x00, SS_RDEF,
491 "Overwrite error on update in place") },
492/* DTLPWRSOMCAE */{SST(0x2F, 0x00, SS_RDEF,
493 "Commands cleared by another initiator") },
494/* DT WR OM */{SST(0x30, 0x00, SS_RDEF,
495 "Incompatible medium installed") },
496/* DT WR O */{SST(0x30, 0x01, SS_RDEF,
497 "Cannot read medium - unknown format") },
498/* DT WR O */{SST(0x30, 0x02, SS_RDEF,
499 "Cannot read medium - incompatible format") },
500/* DT */{SST(0x30, 0x03, SS_RDEF,
501 "Cleaning cartridge installed") },
502/* DT WR O */{SST(0x30, 0x04, SS_RDEF,
503 "Cannot write medium - unknown format") },
504/* DT WR O */{SST(0x30, 0x05, SS_RDEF,
505 "Cannot write medium - incompatible format") },
506/* DT W O */{SST(0x30, 0x06, SS_RDEF,
507 "Cannot format medium - incompatible medium") },
508/* DTL WRSOM AE */{SST(0x30, 0x07, SS_RDEF,
509 "Cleaning failure") },
510/* R */{SST(0x30, 0x08, SS_RDEF,
511 "Cannot write - application code mismatch") },
512/* R */{SST(0x30, 0x09, SS_RDEF,
513 "Current session not fixated for append") },
514/* DT WR O */{SST(0x31, 0x00, SS_RDEF,
515 "Medium format corrupted") },
516/* D L R O */{SST(0x31, 0x01, SS_RDEF,
517 "Format command failed") },
518/* D W O */{SST(0x32, 0x00, SS_RDEF,
519 "No defect spare location available") },
520/* D W O */{SST(0x32, 0x01, SS_RDEF,
521 "Defect list update failure") },
522/* T */{SST(0x33, 0x00, SS_RDEF,
523 "Tape length error") },
524/* DTLPWRSOMCAE */{SST(0x34, 0x00, SS_RDEF,
525 "Enclosure failure") },
526/* DTLPWRSOMCAE */{SST(0x35, 0x00, SS_RDEF,
527 "Enclosure services failure") },
528/* DTLPWRSOMCAE */{SST(0x35, 0x01, SS_RDEF,
529 "Unsupported enclosure function") },
530/* DTLPWRSOMCAE */{SST(0x35, 0x02, SS_RDEF,
531 "Enclosure services unavailable") },
532/* DTLPWRSOMCAE */{SST(0x35, 0x03, SS_RDEF,
533 "Enclosure services transfer failure") },
534/* DTLPWRSOMCAE */{SST(0x35, 0x04, SS_RDEF,
535 "Enclosure services transfer refused") },
536/* L */{SST(0x36, 0x00, SS_RDEF,
537 "Ribbon, ink, or toner failure") },
538/* DTL WRSOMCAE */{SST(0x37, 0x00, SS_RDEF,
539 "Rounded parameter") },
540/* DTL WRSOMCAE */{SST(0x39, 0x00, SS_RDEF,
541 "Saving parameters not supported") },
542/* DTL WRSOM */{SST(0x3A, 0x00, SS_NOP,
543 "Medium not present") },
544/* DT WR OM */{SST(0x3A, 0x01, SS_NOP,
545 "Medium not present - tray closed") },
546/* DT WR OM */{SST(0x3A, 0x01, SS_NOP,
547 "Medium not present - tray open") },
548/* DT WR OM */{SST(0x3A, 0x03, SS_NOP,
549 "Medium not present - Loadable") },
550/* DT WR OM */{SST(0x3A, 0x04, SS_NOP,
551 "Medium not present - medium auxiliary "
552 "memory accessible") },
553/* DT WR OM */{SST(0x3A, 0xFF, SS_NOP, NULL) },/* Range 0x05->0xFF */
554/* TL */{SST(0x3B, 0x00, SS_RDEF,
555 "Sequential positioning error") },
556/* T */{SST(0x3B, 0x01, SS_RDEF,
557 "Tape position error at beginning-of-medium") },
558/* T */{SST(0x3B, 0x02, SS_RDEF,
559 "Tape position error at end-of-medium") },
560/* L */{SST(0x3B, 0x03, SS_RDEF,
561 "Tape or electronic vertical forms unit not ready") },
562/* L */{SST(0x3B, 0x04, SS_RDEF,
563 "Slew failure") },
564/* L */{SST(0x3B, 0x05, SS_RDEF,
565 "Paper jam") },
566/* L */{SST(0x3B, 0x06, SS_RDEF,
567 "Failed to sense top-of-form") },
568/* L */{SST(0x3B, 0x07, SS_RDEF,
569 "Failed to sense bottom-of-form") },
570/* T */{SST(0x3B, 0x08, SS_RDEF,
571 "Reposition error") },
572/* S */{SST(0x3B, 0x09, SS_RDEF,
573 "Read past end of medium") },
574/* S */{SST(0x3B, 0x0A, SS_RDEF,
575 "Read past beginning of medium") },
576/* S */{SST(0x3B, 0x0B, SS_RDEF,
577 "Position past end of medium") },
578/* T S */{SST(0x3B, 0x0C, SS_RDEF,
579 "Position past beginning of medium") },
580/* DT WR OM */{SST(0x3B, 0x0D, SS_FATAL|ENOSPC,
581 "Medium destination element full") },
582/* DT WR OM */{SST(0x3B, 0x0E, SS_RDEF,
583 "Medium source element empty") },
584/* R */{SST(0x3B, 0x0F, SS_RDEF,
585 "End of medium reached") },
586/* DT WR OM */{SST(0x3B, 0x11, SS_RDEF,
587 "Medium magazine not accessible") },
588/* DT WR OM */{SST(0x3B, 0x12, SS_RDEF,
589 "Medium magazine removed") },
590/* DT WR OM */{SST(0x3B, 0x13, SS_RDEF,
591 "Medium magazine inserted") },
592/* DT WR OM */{SST(0x3B, 0x14, SS_RDEF,
593 "Medium magazine locked") },
594/* DT WR OM */{SST(0x3B, 0x15, SS_RDEF,
595 "Medium magazine unlocked") },
596/* DTLPWRSOMCAE */{SST(0x3D, 0x00, SS_RDEF,
597 "Invalid bits in identify message") },
598/* DTLPWRSOMCAE */{SST(0x3E, 0x00, SS_RDEF,
599 "Logical unit has not self-configured yet") },
600/* DTLPWRSOMCAE */{SST(0x3E, 0x01, SS_RDEF,
601 "Logical unit failure") },
602/* DTLPWRSOMCAE */{SST(0x3E, 0x02, SS_RDEF,
603 "Timeout on logical unit") },
604/* DTLPWRSOMCAE */{SST(0x3F, 0x00, SS_RDEF,
605 "Target operating conditions have changed") },
606/* DTLPWRSOMCAE */{SST(0x3F, 0x01, SS_RDEF,
607 "Microcode has been changed") },
608/* DTLPWRSOMC */{SST(0x3F, 0x02, SS_RDEF,
609 "Changed operating definition") },
610/* DTLPWRSOMCAE */{SST(0x3F, 0x03, SS_INQ_REFRESH|SSQ_DECREMENT_COUNT,
611 "Inquiry data has changed") },
612/* DT WR OMCAE */{SST(0x3F, 0x04, SS_RDEF,
613 "Component device attached") },
614/* DT WR OMCAE */{SST(0x3F, 0x05, SS_RDEF,
615 "Device identifier changed") },
616/* DT WR OMCAE */{SST(0x3F, 0x06, SS_RDEF,
617 "Redundancy group created or modified") },
618/* DT WR OMCAE */{SST(0x3F, 0x07, SS_RDEF,
619 "Redundancy group deleted") },
620/* DT WR OMCAE */{SST(0x3F, 0x08, SS_RDEF,
621 "Spare created or modified") },
622/* DT WR OMCAE */{SST(0x3F, 0x09, SS_RDEF,
623 "Spare deleted") },
624/* DT WR OMCAE */{SST(0x3F, 0x0A, SS_RDEF,
625 "Volume set created or modified") },
626/* DT WR OMCAE */{SST(0x3F, 0x0B, SS_RDEF,
627 "Volume set deleted") },
628/* DT WR OMCAE */{SST(0x3F, 0x0C, SS_RDEF,
629 "Volume set deassigned") },
630/* DT WR OMCAE */{SST(0x3F, 0x0D, SS_RDEF,
631 "Volume set reassigned") },
632/* DTLPWRSOMCAE */{SST(0x3F, 0x0E, SS_RDEF,
633 "Reported luns data has changed") },
634/* DTLPWRSOMCAE */{SST(0x3F, 0x0F, SS_RETRY|SSQ_DECREMENT_COUNT
635 | SSQ_DELAY_RANDOM|EBUSY,
636 "Echo buffer overwritten") },
637/* DT WR OM B*/{SST(0x3F, 0x0F, SS_RDEF, "Medium Loadable") },
638/* DT WR OM B*/{SST(0x3F, 0x0F, SS_RDEF,
639 "Medium auxiliary memory accessible") },
640/* D */{SST(0x40, 0x00, SS_RDEF,
641 "Ram failure") }, /* deprecated - use 40 NN instead */
642/* DTLPWRSOMCAE */{SST(0x40, 0x80, SS_RDEF,
643 "Diagnostic failure: ASCQ = Component ID") },
644/* DTLPWRSOMCAE */{SST(0x40, 0xFF, SS_RDEF|SSQ_RANGE,
645 NULL) },/* Range 0x80->0xFF */
646/* D */{SST(0x41, 0x00, SS_RDEF,
647 "Data path failure") }, /* deprecated - use 40 NN instead */
648/* D */{SST(0x42, 0x00, SS_RDEF,
649 "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */
650/* DTLPWRSOMCAE */{SST(0x43, 0x00, SS_RDEF,
651 "Message error") },
652/* DTLPWRSOMCAE */{SST(0x44, 0x00, SS_RDEF,
653 "Internal target failure") },
654/* DTLPWRSOMCAE */{SST(0x45, 0x00, SS_RDEF,
655 "Select or reselect failure") },
656/* DTLPWRSOMC */{SST(0x46, 0x00, SS_RDEF,
657 "Unsuccessful soft reset") },
658/* DTLPWRSOMCAE */{SST(0x47, 0x00, SS_RDEF|SSQ_FALLBACK,
659 "SCSI parity error") },
660/* DTLPWRSOMCAE */{SST(0x47, 0x01, SS_RDEF|SSQ_FALLBACK,
661 "Data Phase CRC error detected") },
662/* DTLPWRSOMCAE */{SST(0x47, 0x02, SS_RDEF|SSQ_FALLBACK,
663 "SCSI parity error detected during ST data phase") },
664/* DTLPWRSOMCAE */{SST(0x47, 0x03, SS_RDEF|SSQ_FALLBACK,
665 "Information Unit iuCRC error") },
666/* DTLPWRSOMCAE */{SST(0x47, 0x04, SS_RDEF|SSQ_FALLBACK,
667 "Asynchronous information protection error detected") },
668/* DTLPWRSOMCAE */{SST(0x47, 0x05, SS_RDEF|SSQ_FALLBACK,
669 "Protocol server CRC error") },
670/* DTLPWRSOMCAE */{SST(0x48, 0x00, SS_RDEF|SSQ_FALLBACK,
671 "Initiator detected error message received") },
672/* DTLPWRSOMCAE */{SST(0x49, 0x00, SS_RDEF,
673 "Invalid message error") },
674/* DTLPWRSOMCAE */{SST(0x4A, 0x00, SS_RDEF,
675 "Command phase error") },
676/* DTLPWRSOMCAE */{SST(0x4B, 0x00, SS_RDEF,
677 "Data phase error") },
678/* DTLPWRSOMCAE */{SST(0x4C, 0x00, SS_RDEF,
679 "Logical unit failed self-configuration") },
680/* DTLPWRSOMCAE */{SST(0x4D, 0x00, SS_RDEF,
681 "Tagged overlapped commands: ASCQ = Queue tag ID") },
682/* DTLPWRSOMCAE */{SST(0x4D, 0xFF, SS_RDEF|SSQ_RANGE,
683 NULL)}, /* Range 0x00->0xFF */
684/* DTLPWRSOMCAE */{SST(0x4E, 0x00, SS_RDEF,
685 "Overlapped commands attempted") },
686/* T */{SST(0x50, 0x00, SS_RDEF,
687 "Write append error") },
688/* T */{SST(0x50, 0x01, SS_RDEF,
689 "Write append position error") },
690/* T */{SST(0x50, 0x02, SS_RDEF,
691 "Position error related to timing") },
692/* T O */{SST(0x51, 0x00, SS_RDEF,
693 "Erase failure") },
694/* T */{SST(0x52, 0x00, SS_RDEF,
695 "Cartridge fault") },
696/* DTL WRSOM */{SST(0x53, 0x00, SS_RDEF,
697 "Media load or eject failed") },
698/* T */{SST(0x53, 0x01, SS_RDEF,
699 "Unload tape failure") },
700/* DT WR OM */{SST(0x53, 0x02, SS_RDEF,
701 "Medium removal prevented") },
702/* P */{SST(0x54, 0x00, SS_RDEF,
703 "Scsi to host system interface failure") },
704/* P */{SST(0x55, 0x00, SS_RDEF,
705 "System resource failure") },
706/* D O */{SST(0x55, 0x01, SS_FATAL|ENOSPC,
707 "System buffer full") },
708/* R */{SST(0x57, 0x00, SS_RDEF,
709 "Unable to recover table-of-contents") },
710/* O */{SST(0x58, 0x00, SS_RDEF,
711 "Generation does not exist") },
712/* O */{SST(0x59, 0x00, SS_RDEF,
713 "Updated block read") },
714/* DTLPWRSOM */{SST(0x5A, 0x00, SS_RDEF,
715 "Operator request or state change input") },
716/* DT WR OM */{SST(0x5A, 0x01, SS_RDEF,
717 "Operator medium removal request") },
718/* DT W O */{SST(0x5A, 0x02, SS_RDEF,
719 "Operator selected write protect") },
720/* DT W O */{SST(0x5A, 0x03, SS_RDEF,
721 "Operator selected write permit") },
722/* DTLPWRSOM */{SST(0x5B, 0x00, SS_RDEF,
723 "Log exception") },
724/* DTLPWRSOM */{SST(0x5B, 0x01, SS_RDEF,
725 "Threshold condition met") },
726/* DTLPWRSOM */{SST(0x5B, 0x02, SS_RDEF,
727 "Log counter at maximum") },
728/* DTLPWRSOM */{SST(0x5B, 0x03, SS_RDEF,
729 "Log list codes exhausted") },
730/* D O */{SST(0x5C, 0x00, SS_RDEF,
731 "RPL status change") },
732/* D O */{SST(0x5C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
733 "Spindles synchronized") },
734/* D O */{SST(0x5C, 0x02, SS_RDEF,
735 "Spindles not synchronized") },
736/* DTLPWRSOMCAE */{SST(0x5D, 0x00, SS_RDEF,
737 "Failure prediction threshold exceeded") },
738/* DTLPWRSOMCAE */{SST(0x5D, 0xFF, SS_RDEF,
739 "Failure prediction threshold exceeded (false)") },
740/* DTLPWRSO CA */{SST(0x5E, 0x00, SS_RDEF,
741 "Low power condition on") },
742/* DTLPWRSO CA */{SST(0x5E, 0x01, SS_RDEF,
743 "Idle condition activated by timer") },
744/* DTLPWRSO CA */{SST(0x5E, 0x02, SS_RDEF,
745 "Standby condition activated by timer") },
746/* DTLPWRSO CA */{SST(0x5E, 0x03, SS_RDEF,
747 "Idle condition activated by command") },
748/* DTLPWRSO CA */{SST(0x5E, 0x04, SS_RDEF,
749 "Standby condition activated by command") },
750/* S */{SST(0x60, 0x00, SS_RDEF,
751 "Lamp failure") },
752/* S */{SST(0x61, 0x00, SS_RDEF,
753 "Video acquisition error") },
754/* S */{SST(0x61, 0x01, SS_RDEF,
755 "Unable to acquire video") },
756/* S */{SST(0x61, 0x02, SS_RDEF,
757 "Out of focus") },
758/* S */{SST(0x62, 0x00, SS_RDEF,
759 "Scan head positioning error") },
760/* R */{SST(0x63, 0x00, SS_RDEF,
761 "End of user area encountered on this track") },
762/* R */{SST(0x63, 0x01, SS_FATAL|ENOSPC,
763 "Packet does not fit in available space") },
764/* R */{SST(0x64, 0x00, SS_RDEF,
765 "Illegal mode for this track") },
766/* R */{SST(0x64, 0x01, SS_RDEF,
767 "Invalid packet size") },
768/* DTLPWRSOMCAE */{SST(0x65, 0x00, SS_RDEF,
769 "Voltage fault") },
770/* S */{SST(0x66, 0x00, SS_RDEF,
771 "Automatic document feeder cover up") },
772/* S */{SST(0x66, 0x01, SS_RDEF,
773 "Automatic document feeder lift up") },
774/* S */{SST(0x66, 0x02, SS_RDEF,
775 "Document jam in automatic document feeder") },
776/* S */{SST(0x66, 0x03, SS_RDEF,
777 "Document miss feed automatic in document feeder") },
778/* A */{SST(0x67, 0x00, SS_RDEF,
779 "Configuration failure") },
780/* A */{SST(0x67, 0x01, SS_RDEF,
781 "Configuration of incapable logical units failed") },
782/* A */{SST(0x67, 0x02, SS_RDEF,
783 "Add logical unit failed") },
784/* A */{SST(0x67, 0x03, SS_RDEF,
785 "Modification of logical unit failed") },
786/* A */{SST(0x67, 0x04, SS_RDEF,
787 "Exchange of logical unit failed") },
788/* A */{SST(0x67, 0x05, SS_RDEF,
789 "Remove of logical unit failed") },
790/* A */{SST(0x67, 0x06, SS_RDEF,
791 "Attachment of logical unit failed") },
792/* A */{SST(0x67, 0x07, SS_RDEF,
793 "Creation of logical unit failed") },
794/* A */{SST(0x68, 0x00, SS_RDEF,
795 "Logical unit not configured") },
796/* A */{SST(0x69, 0x00, SS_RDEF,
797 "Data loss on logical unit") },
798/* A */{SST(0x69, 0x01, SS_RDEF,
799 "Multiple logical unit failures") },
800/* A */{SST(0x69, 0x02, SS_RDEF,
801 "Parity/data mismatch") },
802/* A */{SST(0x6A, 0x00, SS_RDEF,
803 "Informational, refer to log") },
804/* A */{SST(0x6B, 0x00, SS_RDEF,
805 "State change has occurred") },
806/* A */{SST(0x6B, 0x01, SS_RDEF,
807 "Redundancy level got better") },
808/* A */{SST(0x6B, 0x02, SS_RDEF,
809 "Redundancy level got worse") },
810/* A */{SST(0x6C, 0x00, SS_RDEF,
811 "Rebuild failure occurred") },
812/* A */{SST(0x6D, 0x00, SS_RDEF,
813 "Recalculate failure occurred") },
814/* A */{SST(0x6E, 0x00, SS_RDEF,
815 "Command to logical unit failed") },
816/* T */{SST(0x70, 0x00, SS_RDEF,
817 "Decompression exception short: ASCQ = Algorithm ID") },
818/* T */{SST(0x70, 0xFF, SS_RDEF|SSQ_RANGE,
819 NULL) }, /* Range 0x00 -> 0xFF */
820/* T */{SST(0x71, 0x00, SS_RDEF,
821 "Decompression exception long: ASCQ = Algorithm ID") },
822/* T */{SST(0x71, 0xFF, SS_RDEF|SSQ_RANGE,
823 NULL) }, /* Range 0x00 -> 0xFF */
824/* R */{SST(0x72, 0x00, SS_RDEF,
825 "Session fixation error") },
826/* R */{SST(0x72, 0x01, SS_RDEF,
827 "Session fixation error writing lead-in") },
828/* R */{SST(0x72, 0x02, SS_RDEF,
829 "Session fixation error writing lead-out") },
830/* R */{SST(0x72, 0x03, SS_RDEF,
831 "Session fixation error - incomplete track in session") },
832/* R */{SST(0x72, 0x04, SS_RDEF,
833 "Empty or partially written reserved track") },
834/* R */{SST(0x73, 0x00, SS_RDEF,
835 "CD control error") },
836/* R */{SST(0x73, 0x01, SS_RDEF,
837 "Power calibration area almost full") },
838/* R */{SST(0x73, 0x02, SS_FATAL|ENOSPC,
839 "Power calibration area is full") },
840/* R */{SST(0x73, 0x03, SS_RDEF,
841 "Power calibration area error") },
842/* R */{SST(0x73, 0x04, SS_RDEF,
843 "Program memory area update failure") },
844/* R */{SST(0x73, 0x05, SS_RDEF,
845 "program memory area is full") }
846};
847
848static const int asc_table_size = sizeof(asc_table)/sizeof(asc_table[0]);
849
850struct asc_key
851{
852 int asc;
853 int ascq;
854};
855
856static int
857ascentrycomp(const void *key, const void *member)
858{
859 int asc;
860 int ascq;
861 const struct asc_table_entry *table_entry;
862
863 asc = ((const struct asc_key *)key)->asc;
864 ascq = ((const struct asc_key *)key)->ascq;
865 table_entry = (const struct asc_table_entry *)member;
866
867 if (asc >= table_entry->asc) {
868
869 if (asc > table_entry->asc)
870 return (1);
871
872 if (ascq <= table_entry->ascq) {
873 /* Check for ranges */
874 if (ascq == table_entry->ascq
875 || ((table_entry->action & SSQ_RANGE) != 0
876 && ascq >= (table_entry - 1)->ascq))
877 return (0);
878 return (-1);
879 }
880 return (1);
881 }
882 return (-1);
883}
884
885static int
886senseentrycomp(const void *key, const void *member)
887{
888 int sense_key;
889 const struct sense_key_table_entry *table_entry;
890
891 sense_key = *((const int *)key);
892 table_entry = (const struct sense_key_table_entry *)member;
893
894 if (sense_key >= table_entry->sense_key) {
895 if (sense_key == table_entry->sense_key)
896 return (0);
897 return (1);
898 }
899 return (-1);
900}
901
902static void
903fetchtableentries(int sense_key, int asc, int ascq,
904 struct scsi_inquiry_data *inq_data,
905 const struct sense_key_table_entry **sense_entry,
906 const struct asc_table_entry **asc_entry)
907{
908 void *match;
909 const struct asc_table_entry *asc_tables[2];
910 const struct sense_key_table_entry *sense_tables[2];
911 struct asc_key asc_ascq;
912 size_t asc_tables_size[2];
913 size_t sense_tables_size[2];
914 int num_asc_tables;
915 int num_sense_tables;
916 int i;
917
918 /* Default to failure */
919 *sense_entry = NULL;
920 *asc_entry = NULL;
921 match = NULL;
922 if (inq_data != NULL)
923 match = cam_quirkmatch((void *)inq_data,
924 (void *)sense_quirk_table,
925 sense_quirk_table_size,
926 sizeof(*sense_quirk_table),
927 aic_inquiry_match);
928
929 if (match != NULL) {
930 struct scsi_sense_quirk_entry *quirk;
931
932 quirk = (struct scsi_sense_quirk_entry *)match;
933 asc_tables[0] = quirk->asc_info;
934 asc_tables_size[0] = quirk->num_ascs;
935 asc_tables[1] = asc_table;
936 asc_tables_size[1] = asc_table_size;
937 num_asc_tables = 2;
938 sense_tables[0] = quirk->sense_key_info;
939 sense_tables_size[0] = quirk->num_sense_keys;
940 sense_tables[1] = sense_key_table;
941 sense_tables_size[1] = sense_key_table_size;
942 num_sense_tables = 2;
943 } else {
944 asc_tables[0] = asc_table;
945 asc_tables_size[0] = asc_table_size;
946 num_asc_tables = 1;
947 sense_tables[0] = sense_key_table;
948 sense_tables_size[0] = sense_key_table_size;
949 num_sense_tables = 1;
950 }
951
952 asc_ascq.asc = asc;
953 asc_ascq.ascq = ascq;
954 for (i = 0; i < num_asc_tables; i++) {
955 void *found_entry;
956
957 found_entry = scsibsearch(&asc_ascq, asc_tables[i],
958 asc_tables_size[i],
959 sizeof(**asc_tables),
960 ascentrycomp);
961
962 if (found_entry) {
963 *asc_entry = (struct asc_table_entry *)found_entry;
964 break;
965 }
966 }
967
968 for (i = 0; i < num_sense_tables; i++) {
969 void *found_entry;
970
971 found_entry = scsibsearch(&sense_key, sense_tables[i],
972 sense_tables_size[i],
973 sizeof(**sense_tables),
974 senseentrycomp);
975
976 if (found_entry) {
977 *sense_entry =
978 (struct sense_key_table_entry *)found_entry;
979 break;
980 }
981 }
982}
983
984static void *
985scsibsearch(const void *key, const void *base, size_t nmemb, size_t size,
986 int (*compar)(const void *, const void *))
987{
988 const void *entry;
989 u_int l;
990 u_int u;
991 u_int m;
992
993 l = -1;
994 u = nmemb;
995 while (l + 1 != u) {
996 m = (l + u) / 2;
997 entry = base + m * size;
998 if (compar(key, entry) > 0)
999 l = m;
1000 else
1001 u = m;
1002 }
1003
1004 entry = base + u * size;
1005 if (u == nmemb
1006 || compar(key, entry) != 0)
1007 return (NULL);
1008
1009 return ((void *)entry);
1010}
1011
1012/*
1013 * Compare string with pattern, returning 0 on match.
1014 * Short pattern matches trailing blanks in name,
1015 * wildcard '*' in pattern matches rest of name,
1016 * wildcard '?' matches a single non-space character.
1017 */
1018static int
1019cam_strmatch(const uint8_t *str, const uint8_t *pattern, int str_len)
1020{
1021
1022 while (*pattern != '\0'&& str_len > 0) {
1023
1024 if (*pattern == '*') {
1025 return (0);
1026 }
1027 if ((*pattern != *str)
1028 && (*pattern != '?' || *str == ' ')) {
1029 return (1);
1030 }
1031 pattern++;
1032 str++;
1033 str_len--;
1034 }
1035 while (str_len > 0 && *str++ == ' ')
1036 str_len--;
1037
1038 return (str_len);
1039}
1040
1041static caddr_t
1042cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries,
1043 int entry_size, cam_quirkmatch_t *comp_func)
1044{
1045 for (; num_entries > 0; num_entries--, quirk_table += entry_size) {
1046 if ((*comp_func)(target, quirk_table) == 0)
1047 return (quirk_table);
1048 }
1049 return (NULL);
1050}
1051
1052void
1053aic_sense_desc(int sense_key, int asc, int ascq,
1054 struct scsi_inquiry_data *inq_data,
1055 const char **sense_key_desc, const char **asc_desc)
1056{
1057 const struct asc_table_entry *asc_entry;
1058 const struct sense_key_table_entry *sense_entry;
1059
1060 fetchtableentries(sense_key, asc, ascq,
1061 inq_data,
1062 &sense_entry,
1063 &asc_entry);
1064
1065 *sense_key_desc = sense_entry->desc;
1066
1067 if (asc_entry != NULL)
1068 *asc_desc = asc_entry->desc;
1069 else if (asc >= 0x80 && asc <= 0xff)
1070 *asc_desc = "Vendor Specific ASC";
1071 else if (ascq >= 0x80 && ascq <= 0xff)
1072 *asc_desc = "Vendor Specific ASCQ";
1073 else
1074 *asc_desc = "Reserved ASC/ASCQ pair";
1075}
1076
1077/*
1078 * Given sense and device type information, return the appropriate action.
1079 * If we do not understand the specific error as identified by the ASC/ASCQ
1080 * pair, fall back on the more generic actions derived from the sense key.
1081 */
1082aic_sense_action
1083aic_sense_error_action(struct scsi_sense_data *sense_data,
1084 struct scsi_inquiry_data *inq_data, uint32_t sense_flags)
1085{
1086 const struct asc_table_entry *asc_entry;
1087 const struct sense_key_table_entry *sense_entry;
1088 int error_code, sense_key, asc, ascq;
1089 aic_sense_action action;
1090
1091 scsi_extract_sense(sense_data, &error_code, &sense_key, &asc, &ascq);
1092
1093 if (error_code == SSD_DEFERRED_ERROR) {
1094 /*
1095 * XXX dufault@FreeBSD.org
1096 * This error doesn't relate to the command associated
1097 * with this request sense. A deferred error is an error
1098 * for a command that has already returned GOOD status
1099 * (see SCSI2 8.2.14.2).
1100 *
1101 * By my reading of that section, it looks like the current
1102 * command has been cancelled, we should now clean things up
1103 * (hopefully recovering any lost data) and then retry the
1104 * current command. There are two easy choices, both wrong:
1105 *
1106 * 1. Drop through (like we had been doing), thus treating
1107 * this as if the error were for the current command and
1108 * return and stop the current command.
1109 *
1110 * 2. Issue a retry (like I made it do) thus hopefully
1111 * recovering the current transfer, and ignoring the
1112 * fact that we've dropped a command.
1113 *
1114 * These should probably be handled in a device specific
1115 * sense handler or punted back up to a user mode daemon
1116 */
1117 action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE;
1118 } else {
1119 fetchtableentries(sense_key, asc, ascq,
1120 inq_data,
1121 &sense_entry,
1122 &asc_entry);
1123
1124 /*
1125 * Override the 'No additional Sense' entry (0,0)
1126 * with the error action of the sense key.
1127 */
1128 if (asc_entry != NULL
1129 && (asc != 0 || ascq != 0))
1130 action = asc_entry->action;
1131 else
1132 action = sense_entry->action;
1133
1134 if (sense_key == SSD_KEY_RECOVERED_ERROR) {
1135 /*
1136 * The action succeeded but the device wants
1137 * the user to know that some recovery action
1138 * was required.
1139 */
1140 action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK);
1141 action |= SS_NOP|SSQ_PRINT_SENSE;
1142 } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) {
1143 if ((sense_flags & SF_QUIET_IR) != 0)
1144 action &= ~SSQ_PRINT_SENSE;
1145 } else if (sense_key == SSD_KEY_UNIT_ATTENTION) {
1146 if ((sense_flags & SF_RETRY_UA) != 0
1147 && (action & SS_MASK) == SS_FAIL) {
1148 action &= ~(SS_MASK|SSQ_MASK);
1149 action |= SS_RETRY|SSQ_DECREMENT_COUNT|
1150 SSQ_PRINT_SENSE;
1151 }
1152 }
1153 }
1154
1155 if ((sense_flags & SF_PRINT_ALWAYS) != 0)
1156 action |= SSQ_PRINT_SENSE;
1157 else if ((sense_flags & SF_NO_PRINT) != 0)
1158 action &= ~SSQ_PRINT_SENSE;
1159
1160 return (action);
1161}
1162
1163/*
1164 * Try make as good a match as possible with
1165 * available sub drivers
1166 */
1167int
1168aic_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
1169{
1170 struct scsi_inquiry_pattern *entry;
1171 struct scsi_inquiry_data *inq;
1172
1173 entry = (struct scsi_inquiry_pattern *)table_entry;
1174 inq = (struct scsi_inquiry_data *)inqbuffer;
1175
1176 if (((SID_TYPE(inq) == entry->type)
1177 || (entry->type == T_ANY))
1178 && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
1179 : entry->media_type & SIP_MEDIA_FIXED)
1180 && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
1181 && (cam_strmatch(inq->product, entry->product,
1182 sizeof(inq->product)) == 0)
1183 && (cam_strmatch(inq->revision, entry->revision,
1184 sizeof(inq->revision)) == 0)) {
1185 return (0);
1186 }
1187 return (-1);
1188}
1189
1190/*
1191 * Table of syncrates that don't follow the "divisible by 4"
1192 * rule. This table will be expanded in future SCSI specs.
1193 */
1194static struct {
1195 u_int period_factor;
1196 u_int period; /* in 100ths of ns */
1197} scsi_syncrates[] = {
1198 { 0x08, 625 }, /* FAST-160 */
1199 { 0x09, 1250 }, /* FAST-80 */
1200 { 0x0a, 2500 }, /* FAST-40 40MHz */
1201 { 0x0b, 3030 }, /* FAST-40 33MHz */
1202 { 0x0c, 5000 } /* FAST-20 */
1203};
1204
1205/*
1206 * Return the frequency in kHz corresponding to the given
1207 * sync period factor.
1208 */
1209u_int
1210aic_calc_syncsrate(u_int period_factor)
1211{
1212 int i;
1213 int num_syncrates;
1214
1215 num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
1216 /* See if the period is in the "exception" table */
1217 for (i = 0; i < num_syncrates; i++) {
1218
1219 if (period_factor == scsi_syncrates[i].period_factor) {
1220 /* Period in kHz */
1221 return (100000000 / scsi_syncrates[i].period);
1222 }
1223 }
1224
1225 /*
1226 * Wasn't in the table, so use the standard
1227 * 4 times conversion.
1228 */
1229 return (10000000 / (period_factor * 4 * 10));
1230}
1231
1232/*
1233 * Return speed in KB/s.
1234 */
1235u_int
1236aic_calc_speed(u_int width, u_int period, u_int offset, u_int min_rate)
1237{
1238 u_int freq;
1239
1240 if (offset != 0 && period < min_rate)
1241 freq = aic_calc_syncsrate(period);
1242 else
1243 /* Roughly 3.3MB/s for async */
1244 freq = 3300;
1245 freq <<= width;
1246 return (freq);
1247}
1248
1249uint32_t
1250aic_error_action(struct scsi_cmnd *cmd, struct scsi_inquiry_data *inq_data,
1251 cam_status status, u_int scsi_status)
1252{
1253 aic_sense_action err_action;
1254 int sense;
1255
1256 sense = (cmd->result >> 24) == DRIVER_SENSE;
1257
1258 switch (status) {
1259 case CAM_REQ_CMP:
1260 err_action = SS_NOP;
1261 break;
1262 case CAM_AUTOSENSE_FAIL:
1263 case CAM_SCSI_STATUS_ERROR:
1264
1265 switch (scsi_status) {
1266 case SCSI_STATUS_OK:
1267 case SCSI_STATUS_COND_MET:
1268 case SCSI_STATUS_INTERMED:
1269 case SCSI_STATUS_INTERMED_COND_MET:
1270 err_action = SS_NOP;
1271 break;
1272 case SCSI_STATUS_CMD_TERMINATED:
1273 case SCSI_STATUS_CHECK_COND:
1274 if (sense != 0) {
1275 struct scsi_sense_data *sense;
1276
1277 sense = (struct scsi_sense_data *)
1278 &cmd->sense_buffer;
1279 err_action =
1280 aic_sense_error_action(sense, inq_data, 0);
1281
1282 } else {
1283 err_action = SS_RETRY|SSQ_FALLBACK
1284 | SSQ_DECREMENT_COUNT|EIO;
1285 }
1286 break;
1287 case SCSI_STATUS_QUEUE_FULL:
1288 case SCSI_STATUS_BUSY:
1289 err_action = SS_RETRY|SSQ_DELAY|SSQ_MANY
1290 | SSQ_DECREMENT_COUNT|EBUSY;
1291 break;
1292 case SCSI_STATUS_RESERV_CONFLICT:
1293 default:
1294 err_action = SS_FAIL|EBUSY;
1295 break;
1296 }
1297 break;
1298 case CAM_CMD_TIMEOUT:
1299 case CAM_REQ_CMP_ERR:
1300 case CAM_UNEXP_BUSFREE:
1301 case CAM_UNCOR_PARITY:
1302 case CAM_DATA_RUN_ERR:
1303 err_action = SS_RETRY|SSQ_FALLBACK|EIO;
1304 break;
1305 case CAM_UA_ABORT:
1306 case CAM_UA_TERMIO:
1307 case CAM_MSG_REJECT_REC:
1308 case CAM_SEL_TIMEOUT:
1309 err_action = SS_FAIL|EIO;
1310 break;
1311 case CAM_REQ_INVALID:
1312 case CAM_PATH_INVALID:
1313 case CAM_DEV_NOT_THERE:
1314 case CAM_NO_HBA:
1315 case CAM_PROVIDE_FAIL:
1316 case CAM_REQ_TOO_BIG:
1317 case CAM_RESRC_UNAVAIL:
1318 case CAM_BUSY:
1319 default:
1320 /* panic?? These should never occur in our application. */
1321 err_action = SS_FAIL|EIO;
1322 break;
1323 case CAM_SCSI_BUS_RESET:
1324 case CAM_BDR_SENT:
1325 case CAM_REQUEUE_REQ:
1326 /* Unconditional requeue */
1327 err_action = SS_RETRY;
1328 break;
1329 }
1330
1331 return (err_action);
1332}
1333
1334char *
1335aic_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
1336 aic_option_callback_t *callback, u_long callback_arg)
1337{
1338 char *tok_end;
1339 char *tok_end2;
1340 int i;
1341 int instance;
1342 int targ;
1343 int done;
1344 char tok_list[] = {'.', ',', '{', '}', '\0'};
1345 34
1346 /* All options use a ':' name/arg separator */
1347 if (*opt_arg != ':')
1348 return (opt_arg);
1349 opt_arg++;
1350 instance = -1;
1351 targ = -1;
1352 done = FALSE;
1353 /*
1354 * Restore separator that may be in
1355 * the middle of our option argument.
1356 */
1357 tok_end = strchr(opt_arg, '\0');
1358 if (tok_end < end)
1359 *tok_end = ',';
1360 while (!done) {
1361 switch (*opt_arg) {
1362 case '{':
1363 if (instance == -1) {
1364 instance = 0;
1365 } else {
1366 if (depth > 1) {
1367 if (targ == -1)
1368 targ = 0;
1369 } else {
1370 printf("Malformed Option %s\n",
1371 opt_name);
1372 done = TRUE;
1373 }
1374 }
1375 opt_arg++;
1376 break;
1377 case '}':
1378 if (targ != -1)
1379 targ = -1;
1380 else if (instance != -1)
1381 instance = -1;
1382 opt_arg++;
1383 break;
1384 case ',':
1385 case '.':
1386 if (instance == -1)
1387 done = TRUE;
1388 else if (targ >= 0)
1389 targ++;
1390 else if (instance >= 0)
1391 instance++;
1392 opt_arg++;
1393 break;
1394 case '\0':
1395 done = TRUE;
1396 break;
1397 default:
1398 tok_end = end;
1399 for (i = 0; tok_list[i]; i++) {
1400 tok_end2 = strchr(opt_arg, tok_list[i]);
1401 if ((tok_end2) && (tok_end2 < tok_end))
1402 tok_end = tok_end2;
1403 }
1404 callback(callback_arg, instance, targ,
1405 simple_strtol(opt_arg, NULL, 0));
1406 opt_arg = tok_end;
1407 break;
1408 }
1409 }
1410 return (opt_arg);
1411}
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
index bfe6f954d3c4..3bfbf0fe1ec2 100644
--- a/drivers/scsi/aic7xxx/aiclib.h
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -57,121 +57,6 @@
57#ifndef _AICLIB_H 57#ifndef _AICLIB_H
58#define _AICLIB_H 58#define _AICLIB_H
59 59
60/*
61 * Linux Interrupt Support.
62 */
63#ifndef IRQ_RETVAL
64typedef void irqreturn_t;
65#define IRQ_RETVAL(x)
66#endif
67
68/*
69 * SCSI command format
70 */
71
72/*
73 * Define dome bits that are in ALL (or a lot of) scsi commands
74 */
75#define SCSI_CTL_LINK 0x01
76#define SCSI_CTL_FLAG 0x02
77#define SCSI_CTL_VENDOR 0xC0
78#define SCSI_CMD_LUN 0xA0 /* these two should not be needed */
79#define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */
80
81#define SCSI_MAX_CDBLEN 16 /*
82 * 16 byte commands are in the
83 * SCSI-3 spec
84 */
85/* 6byte CDBs special case 0 length to be 256 */
86#define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len)
87
88/*
89 * This type defines actions to be taken when a particular sense code is
90 * received. Right now, these flags are only defined to take up 16 bits,
91 * but can be expanded in the future if necessary.
92 */
93typedef enum {
94 SS_NOP = 0x000000, /* Do nothing */
95 SS_RETRY = 0x010000, /* Retry the command */
96 SS_FAIL = 0x020000, /* Bail out */
97 SS_START = 0x030000, /* Send a Start Unit command to the device,
98 * then retry the original command.
99 */
100 SS_TUR = 0x040000, /* Send a Test Unit Ready command to the
101 * device, then retry the original command.
102 */
103 SS_REQSENSE = 0x050000, /* Send a RequestSense command to the
104 * device, then retry the original command.
105 */
106 SS_INQ_REFRESH = 0x060000,
107 SS_MASK = 0xff0000
108} aic_sense_action;
109
110typedef enum {
111 SSQ_NONE = 0x0000,
112 SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */
113 SSQ_MANY = 0x0200, /* send lots of recovery commands */
114 SSQ_RANGE = 0x0400, /*
115 * This table entry represents the
116 * end of a range of ASCQs that
117 * have identical error actions
118 * and text.
119 */
120 SSQ_PRINT_SENSE = 0x0800,
121 SSQ_DELAY = 0x1000, /* Delay before retry. */
122 SSQ_DELAY_RANDOM = 0x2000, /* Randomized delay before retry. */
123 SSQ_FALLBACK = 0x4000, /* Do a speed fallback to recover */
124 SSQ_MASK = 0xff00
125} aic_sense_action_qualifier;
126
127/* Mask for error status values */
128#define SS_ERRMASK 0xff
129
130/* The default, retyable, error action */
131#define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
132
133/* The retyable, error action, with table specified error code */
134#define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
135
136/* Fatal error action, with table specified error code */
137#define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE
138
139struct scsi_generic
140{
141 uint8_t opcode;
142 uint8_t bytes[11];
143};
144
145struct scsi_request_sense
146{
147 uint8_t opcode;
148 uint8_t byte2;
149 uint8_t unused[2];
150 uint8_t length;
151 uint8_t control;
152};
153
154struct scsi_test_unit_ready
155{
156 uint8_t opcode;
157 uint8_t byte2;
158 uint8_t unused[3];
159 uint8_t control;
160};
161
162struct scsi_send_diag
163{
164 uint8_t opcode;
165 uint8_t byte2;
166#define SSD_UOL 0x01
167#define SSD_DOL 0x02
168#define SSD_SELFTEST 0x04
169#define SSD_PF 0x10
170 uint8_t unused[1];
171 uint8_t paramlen[2];
172 uint8_t control;
173};
174
175struct scsi_sense 60struct scsi_sense
176{ 61{
177 uint8_t opcode; 62 uint8_t opcode;
@@ -181,537 +66,12 @@ struct scsi_sense
181 uint8_t control; 66 uint8_t control;
182}; 67};
183 68
184struct scsi_inquiry
185{
186 uint8_t opcode;
187 uint8_t byte2;
188#define SI_EVPD 0x01
189 uint8_t page_code;
190 uint8_t reserved;
191 uint8_t length;
192 uint8_t control;
193};
194
195struct scsi_mode_sense_6
196{
197 uint8_t opcode;
198 uint8_t byte2;
199#define SMS_DBD 0x08
200 uint8_t page;
201#define SMS_PAGE_CODE 0x3F
202#define SMS_VENDOR_SPECIFIC_PAGE 0x00
203#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
204#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
205#define SMS_CONTROL_MODE_PAGE 0x0A
206#define SMS_ALL_PAGES_PAGE 0x3F
207#define SMS_PAGE_CTRL_MASK 0xC0
208#define SMS_PAGE_CTRL_CURRENT 0x00
209#define SMS_PAGE_CTRL_CHANGEABLE 0x40
210#define SMS_PAGE_CTRL_DEFAULT 0x80
211#define SMS_PAGE_CTRL_SAVED 0xC0
212 uint8_t unused;
213 uint8_t length;
214 uint8_t control;
215};
216
217struct scsi_mode_sense_10
218{
219 uint8_t opcode;
220 uint8_t byte2; /* same bits as small version */
221 uint8_t page; /* same bits as small version */
222 uint8_t unused[4];
223 uint8_t length[2];
224 uint8_t control;
225};
226
227struct scsi_mode_select_6
228{
229 uint8_t opcode;
230 uint8_t byte2;
231#define SMS_SP 0x01
232#define SMS_PF 0x10
233 uint8_t unused[2];
234 uint8_t length;
235 uint8_t control;
236};
237
238struct scsi_mode_select_10
239{
240 uint8_t opcode;
241 uint8_t byte2; /* same bits as small version */
242 uint8_t unused[5];
243 uint8_t length[2];
244 uint8_t control;
245};
246
247/*
248 * When sending a mode select to a tape drive, the medium type must be 0.
249 */
250struct scsi_mode_hdr_6
251{
252 uint8_t datalen;
253 uint8_t medium_type;
254 uint8_t dev_specific;
255 uint8_t block_descr_len;
256};
257
258struct scsi_mode_hdr_10
259{
260 uint8_t datalen[2];
261 uint8_t medium_type;
262 uint8_t dev_specific;
263 uint8_t reserved[2];
264 uint8_t block_descr_len[2];
265};
266
267struct scsi_mode_block_descr
268{
269 uint8_t density_code;
270 uint8_t num_blocks[3];
271 uint8_t reserved;
272 uint8_t block_len[3];
273};
274
275struct scsi_log_sense
276{
277 uint8_t opcode;
278 uint8_t byte2;
279#define SLS_SP 0x01
280#define SLS_PPC 0x02
281 uint8_t page;
282#define SLS_PAGE_CODE 0x3F
283#define SLS_ALL_PAGES_PAGE 0x00
284#define SLS_OVERRUN_PAGE 0x01
285#define SLS_ERROR_WRITE_PAGE 0x02
286#define SLS_ERROR_READ_PAGE 0x03
287#define SLS_ERROR_READREVERSE_PAGE 0x04
288#define SLS_ERROR_VERIFY_PAGE 0x05
289#define SLS_ERROR_NONMEDIUM_PAGE 0x06
290#define SLS_ERROR_LASTN_PAGE 0x07
291#define SLS_PAGE_CTRL_MASK 0xC0
292#define SLS_PAGE_CTRL_THRESHOLD 0x00
293#define SLS_PAGE_CTRL_CUMULATIVE 0x40
294#define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80
295#define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0
296 uint8_t reserved[2];
297 uint8_t paramptr[2];
298 uint8_t length[2];
299 uint8_t control;
300};
301
302struct scsi_log_select
303{
304 uint8_t opcode;
305 uint8_t byte2;
306/* SLS_SP 0x01 */
307#define SLS_PCR 0x02
308 uint8_t page;
309/* SLS_PAGE_CTRL_MASK 0xC0 */
310/* SLS_PAGE_CTRL_THRESHOLD 0x00 */
311/* SLS_PAGE_CTRL_CUMULATIVE 0x40 */
312/* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */
313/* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */
314 uint8_t reserved[4];
315 uint8_t length[2];
316 uint8_t control;
317};
318
319struct scsi_log_header
320{
321 uint8_t page;
322 uint8_t reserved;
323 uint8_t datalen[2];
324};
325
326struct scsi_log_param_header {
327 uint8_t param_code[2];
328 uint8_t param_control;
329#define SLP_LP 0x01
330#define SLP_LBIN 0x02
331#define SLP_TMC_MASK 0x0C
332#define SLP_TMC_ALWAYS 0x00
333#define SLP_TMC_EQUAL 0x04
334#define SLP_TMC_NOTEQUAL 0x08
335#define SLP_TMC_GREATER 0x0C
336#define SLP_ETC 0x10
337#define SLP_TSD 0x20
338#define SLP_DS 0x40
339#define SLP_DU 0x80
340 uint8_t param_len;
341};
342
343struct scsi_control_page {
344 uint8_t page_code;
345 uint8_t page_length;
346 uint8_t rlec;
347#define SCB_RLEC 0x01 /*Report Log Exception Cond*/
348 uint8_t queue_flags;
349#define SCP_QUEUE_ALG_MASK 0xF0
350#define SCP_QUEUE_ALG_RESTRICTED 0x00
351#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
352#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
353#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
354 uint8_t eca_and_aen;
355#define SCP_EECA 0x80 /*Enable Extended CA*/
356#define SCP_RAENP 0x04 /*Ready AEN Permission*/
357#define SCP_UAAENP 0x02 /*UA AEN Permission*/
358#define SCP_EAENP 0x01 /*Error AEN Permission*/
359 uint8_t reserved;
360 uint8_t aen_holdoff_period[2];
361};
362
363struct scsi_reserve
364{
365 uint8_t opcode;
366 uint8_t byte2;
367 uint8_t unused[2];
368 uint8_t length;
369 uint8_t control;
370};
371
372struct scsi_release
373{
374 uint8_t opcode;
375 uint8_t byte2;
376 uint8_t unused[2];
377 uint8_t length;
378 uint8_t control;
379};
380
381struct scsi_prevent
382{
383 uint8_t opcode;
384 uint8_t byte2;
385 uint8_t unused[2];
386 uint8_t how;
387 uint8_t control;
388};
389#define PR_PREVENT 0x01
390#define PR_ALLOW 0x00
391
392struct scsi_sync_cache
393{
394 uint8_t opcode;
395 uint8_t byte2;
396 uint8_t begin_lba[4];
397 uint8_t reserved;
398 uint8_t lb_count[2];
399 uint8_t control;
400};
401
402
403struct scsi_changedef
404{
405 uint8_t opcode;
406 uint8_t byte2;
407 uint8_t unused1;
408 uint8_t how;
409 uint8_t unused[4];
410 uint8_t datalen;
411 uint8_t control;
412};
413
414struct scsi_read_buffer
415{
416 uint8_t opcode;
417 uint8_t byte2;
418#define RWB_MODE 0x07
419#define RWB_MODE_HDR_DATA 0x00
420#define RWB_MODE_DATA 0x02
421#define RWB_MODE_DOWNLOAD 0x04
422#define RWB_MODE_DOWNLOAD_SAVE 0x05
423 uint8_t buffer_id;
424 uint8_t offset[3];
425 uint8_t length[3];
426 uint8_t control;
427};
428
429struct scsi_write_buffer
430{
431 uint8_t opcode;
432 uint8_t byte2;
433 uint8_t buffer_id;
434 uint8_t offset[3];
435 uint8_t length[3];
436 uint8_t control;
437};
438
439struct scsi_rw_6
440{
441 uint8_t opcode;
442 uint8_t addr[3];
443/* only 5 bits are valid in the MSB address byte */
444#define SRW_TOPADDR 0x1F
445 uint8_t length;
446 uint8_t control;
447};
448
449struct scsi_rw_10
450{
451 uint8_t opcode;
452#define SRW10_RELADDR 0x01
453#define SRW10_FUA 0x08
454#define SRW10_DPO 0x10
455 uint8_t byte2;
456 uint8_t addr[4];
457 uint8_t reserved;
458 uint8_t length[2];
459 uint8_t control;
460};
461
462struct scsi_rw_12
463{
464 uint8_t opcode;
465#define SRW12_RELADDR 0x01
466#define SRW12_FUA 0x08
467#define SRW12_DPO 0x10
468 uint8_t byte2;
469 uint8_t addr[4];
470 uint8_t length[4];
471 uint8_t reserved;
472 uint8_t control;
473};
474
475struct scsi_start_stop_unit
476{
477 uint8_t opcode;
478 uint8_t byte2;
479#define SSS_IMMED 0x01
480 uint8_t reserved[2];
481 uint8_t how;
482#define SSS_START 0x01
483#define SSS_LOEJ 0x02
484 uint8_t control;
485};
486
487#define SC_SCSI_1 0x01
488#define SC_SCSI_2 0x03
489
490/*
491 * Opcodes
492 */
493
494#define TEST_UNIT_READY 0x00
495#define REQUEST_SENSE 0x03
496#define READ_6 0x08
497#define WRITE_6 0x0a
498#define INQUIRY 0x12
499#define MODE_SELECT_6 0x15
500#define MODE_SENSE_6 0x1a
501#define START_STOP_UNIT 0x1b
502#define START_STOP 0x1b
503#define RESERVE 0x16
504#define RELEASE 0x17
505#define RECEIVE_DIAGNOSTIC 0x1c
506#define SEND_DIAGNOSTIC 0x1d
507#define PREVENT_ALLOW 0x1e
508#define READ_CAPACITY 0x25
509#define READ_10 0x28
510#define WRITE_10 0x2a
511#define POSITION_TO_ELEMENT 0x2b
512#define SYNCHRONIZE_CACHE 0x35
513#define WRITE_BUFFER 0x3b
514#define READ_BUFFER 0x3c
515#define CHANGE_DEFINITION 0x40
516#define LOG_SELECT 0x4c
517#define LOG_SENSE 0x4d
518#ifdef XXXCAM
519#define MODE_SENSE_10 0x5A
520#endif
521#define MODE_SELECT_10 0x55
522#define MOVE_MEDIUM 0xa5
523#define READ_12 0xa8
524#define WRITE_12 0xaa
525#define READ_ELEMENT_STATUS 0xb8
526
527
528/*
529 * Device Types
530 */
531#define T_DIRECT 0x00
532#define T_SEQUENTIAL 0x01
533#define T_PRINTER 0x02
534#define T_PROCESSOR 0x03
535#define T_WORM 0x04
536#define T_CDROM 0x05
537#define T_SCANNER 0x06
538#define T_OPTICAL 0x07
539#define T_CHANGER 0x08
540#define T_COMM 0x09
541#define T_ASC0 0x0a
542#define T_ASC1 0x0b
543#define T_STORARRAY 0x0c
544#define T_ENCLOSURE 0x0d
545#define T_RBC 0x0e
546#define T_OCRW 0x0f
547#define T_NODEVICE 0x1F
548#define T_ANY 0xFF /* Used in Quirk table matches */
549
550#define T_REMOV 1
551#define T_FIXED 0
552
553/*
554 * This length is the initial inquiry length used by the probe code, as
555 * well as the legnth necessary for aic_print_inquiry() to function
556 * correctly. If either use requires a different length in the future,
557 * the two values should be de-coupled.
558 */
559#define SHORT_INQUIRY_LENGTH 36
560
561struct scsi_inquiry_data
562{
563 uint8_t device;
564#define SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
565#define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
566#define SID_QUAL_LU_CONNECTED 0x00 /*
567 * The specified peripheral device
568 * type is currently connected to
569 * logical unit. If the target cannot
570 * determine whether or not a physical
571 * device is currently connected, it
572 * shall also use this peripheral
573 * qualifier when returning the INQUIRY
574 * data. This peripheral qualifier
575 * does not mean that the device is
576 * ready for access by the initiator.
577 */
578#define SID_QUAL_LU_OFFLINE 0x01 /*
579 * The target is capable of supporting
580 * the specified peripheral device type
581 * on this logical unit; however, the
582 * physical device is not currently
583 * connected to this logical unit.
584 */
585#define SID_QUAL_RSVD 0x02
586#define SID_QUAL_BAD_LU 0x03 /*
587 * The target is not capable of
588 * supporting a physical device on
589 * this logical unit. For this
590 * peripheral qualifier the peripheral
591 * device type shall be set to 1Fh to
592 * provide compatibility with previous
593 * versions of SCSI. All other
594 * peripheral device type values are
595 * reserved for this peripheral
596 * qualifier.
597 */
598#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
599 uint8_t dev_qual2;
600#define SID_QUAL2 0x7F
601#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
602 uint8_t version;
603#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
604#define SCSI_REV_0 0 69#define SCSI_REV_0 0
605#define SCSI_REV_CCS 1 70#define SCSI_REV_CCS 1
606#define SCSI_REV_2 2 71#define SCSI_REV_2 2
607#define SCSI_REV_SPC 3 72#define SCSI_REV_SPC 3
608#define SCSI_REV_SPC2 4 73#define SCSI_REV_SPC2 4
609 74
610#define SID_ECMA 0x38
611#define SID_ISO 0xC0
612 uint8_t response_format;
613#define SID_AENC 0x80
614#define SID_TrmIOP 0x40
615 uint8_t additional_length;
616 uint8_t reserved[2];
617 uint8_t flags;
618#define SID_SftRe 0x01
619#define SID_CmdQue 0x02
620#define SID_Linked 0x08
621#define SID_Sync 0x10
622#define SID_WBus16 0x20
623#define SID_WBus32 0x40
624#define SID_RelAdr 0x80
625#define SID_VENDOR_SIZE 8
626 char vendor[SID_VENDOR_SIZE];
627#define SID_PRODUCT_SIZE 16
628 char product[SID_PRODUCT_SIZE];
629#define SID_REVISION_SIZE 4
630 char revision[SID_REVISION_SIZE];
631 /*
632 * The following fields were taken from SCSI Primary Commands - 2
633 * (SPC-2) Revision 14, Dated 11 November 1999
634 */
635#define SID_VENDOR_SPECIFIC_0_SIZE 20
636 uint8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE];
637 /*
638 * An extension of SCSI Parallel Specific Values
639 */
640#define SID_SPI_IUS 0x01
641#define SID_SPI_QAS 0x02
642#define SID_SPI_CLOCK_ST 0x00
643#define SID_SPI_CLOCK_DT 0x04
644#define SID_SPI_CLOCK_DT_ST 0x0C
645#define SID_SPI_MASK 0x0F
646 uint8_t spi3data;
647 uint8_t reserved2;
648 /*
649 * Version Descriptors, stored 2 byte values.
650 */
651 uint8_t version1[2];
652 uint8_t version2[2];
653 uint8_t version3[2];
654 uint8_t version4[2];
655 uint8_t version5[2];
656 uint8_t version6[2];
657 uint8_t version7[2];
658 uint8_t version8[2];
659
660 uint8_t reserved3[22];
661
662#define SID_VENDOR_SPECIFIC_1_SIZE 160
663 uint8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE];
664};
665
666struct scsi_vpd_unit_serial_number
667{
668 uint8_t device;
669 uint8_t page_code;
670#define SVPD_UNIT_SERIAL_NUMBER 0x80
671 uint8_t reserved;
672 uint8_t length; /* serial number length */
673#define SVPD_SERIAL_NUM_SIZE 251
674 uint8_t serial_num[SVPD_SERIAL_NUM_SIZE];
675};
676
677struct scsi_read_capacity
678{
679 uint8_t opcode;
680 uint8_t byte2;
681 uint8_t addr[4];
682 uint8_t unused[3];
683 uint8_t control;
684};
685
686struct scsi_read_capacity_data
687{
688 uint8_t addr[4];
689 uint8_t length[4];
690};
691
692struct scsi_report_luns
693{
694 uint8_t opcode;
695 uint8_t byte2;
696 uint8_t unused[3];
697 uint8_t addr[4];
698 uint8_t control;
699};
700
701struct scsi_report_luns_data {
702 uint8_t length[4]; /* length of LUN inventory, in bytes */
703 uint8_t reserved[4]; /* unused */
704 /*
705 * LUN inventory- we only support the type zero form for now.
706 */
707 struct {
708 uint8_t lundata[8];
709 } luns[1];
710};
711#define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */
712#define RPL_LUNDATA_T0LUN 1 /* @ lundata[1] */
713
714
715struct scsi_sense_data 75struct scsi_sense_data
716{ 76{
717 uint8_t error_code; 77 uint8_t error_code;
@@ -757,41 +117,6 @@ struct scsi_sense_data
757#define SSD_FULL_SIZE sizeof(struct scsi_sense_data) 117#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
758}; 118};
759 119
760struct scsi_mode_header_6
761{
762 uint8_t data_length; /* Sense data length */
763 uint8_t medium_type;
764 uint8_t dev_spec;
765 uint8_t blk_desc_len;
766};
767
768struct scsi_mode_header_10
769{
770 uint8_t data_length[2];/* Sense data length */
771 uint8_t medium_type;
772 uint8_t dev_spec;
773 uint8_t unused[2];
774 uint8_t blk_desc_len[2];
775};
776
777struct scsi_mode_page_header
778{
779 uint8_t page_code;
780 uint8_t page_length;
781};
782
783struct scsi_mode_blk_desc
784{
785 uint8_t density;
786 uint8_t nblocks[3];
787 uint8_t reserved;
788 uint8_t blklen[3];
789};
790
791#define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */
792#define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */
793
794
795/* 120/*
796 * Status Byte 121 * Status Byte
797 */ 122 */
@@ -807,76 +132,7 @@ struct scsi_mode_blk_desc
807#define SCSI_STATUS_ACA_ACTIVE 0x30 132#define SCSI_STATUS_ACA_ACTIVE 0x30
808#define SCSI_STATUS_TASK_ABORTED 0x40 133#define SCSI_STATUS_TASK_ABORTED 0x40
809 134
810struct scsi_inquiry_pattern {
811 uint8_t type;
812 uint8_t media_type;
813#define SIP_MEDIA_REMOVABLE 0x01
814#define SIP_MEDIA_FIXED 0x02
815 const char *vendor;
816 const char *product;
817 const char *revision;
818};
819
820struct scsi_static_inquiry_pattern {
821 uint8_t type;
822 uint8_t media_type;
823 char vendor[SID_VENDOR_SIZE+1];
824 char product[SID_PRODUCT_SIZE+1];
825 char revision[SID_REVISION_SIZE+1];
826};
827
828struct scsi_sense_quirk_entry {
829 struct scsi_inquiry_pattern inq_pat;
830 int num_sense_keys;
831 int num_ascs;
832 struct sense_key_table_entry *sense_key_info;
833 struct asc_table_entry *asc_info;
834};
835
836struct sense_key_table_entry {
837 uint8_t sense_key;
838 uint32_t action;
839 const char *desc;
840};
841
842struct asc_table_entry {
843 uint8_t asc;
844 uint8_t ascq;
845 uint32_t action;
846 const char *desc;
847};
848
849struct op_table_entry {
850 uint8_t opcode;
851 uint16_t opmask;
852 const char *desc;
853};
854
855struct scsi_op_quirk_entry {
856 struct scsi_inquiry_pattern inq_pat;
857 int num_ops;
858 struct op_table_entry *op_table;
859};
860
861typedef enum {
862 SSS_FLAG_NONE = 0x00,
863 SSS_FLAG_PRINT_COMMAND = 0x01
864} scsi_sense_string_flags;
865
866extern const char *scsi_sense_key_text[];
867
868/************************* Large Disk Handling ********************************/ 135/************************* Large Disk Handling ********************************/
869#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
870static __inline int aic_sector_div(u_long capacity, int heads, int sectors);
871
872static __inline int
873aic_sector_div(u_long capacity, int heads, int sectors)
874{
875 return (capacity / (heads * sectors));
876}
877#else
878static __inline int aic_sector_div(sector_t capacity, int heads, int sectors);
879
880static __inline int 136static __inline int
881aic_sector_div(sector_t capacity, int heads, int sectors) 137aic_sector_div(sector_t capacity, int heads, int sectors)
882{ 138{
@@ -884,152 +140,6 @@ aic_sector_div(sector_t capacity, int heads, int sectors)
884 sector_div(capacity, (heads * sectors)); 140 sector_div(capacity, (heads * sectors));
885 return (int)capacity; 141 return (int)capacity;
886} 142}
887#endif
888
889/**************************** Module Library Hack *****************************/
890/*
891 * What we'd like to do is have a single "scsi library" module that both the
892 * aic7xxx and aic79xx drivers could load and depend on. A cursory examination
893 * of implementing module dependencies in Linux (handling the install and
894 * initrd cases) does not look promissing. For now, we just duplicate this
895 * code in both drivers using a simple symbol renaming scheme that hides this
896 * hack from the drivers.
897 */
898#define AIC_LIB_ENTRY_CONCAT(x, prefix) prefix ## x
899#define AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix)
900#define AIC_LIB_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX)
901
902#define aic_sense_desc AIC_LIB_ENTRY(_sense_desc)
903#define aic_sense_error_action AIC_LIB_ENTRY(_sense_error_action)
904#define aic_error_action AIC_LIB_ENTRY(_error_action)
905#define aic_op_desc AIC_LIB_ENTRY(_op_desc)
906#define aic_cdb_string AIC_LIB_ENTRY(_cdb_string)
907#define aic_print_inquiry AIC_LIB_ENTRY(_print_inquiry)
908#define aic_calc_syncsrate AIC_LIB_ENTRY(_calc_syncrate)
909#define aic_calc_syncparam AIC_LIB_ENTRY(_calc_syncparam)
910#define aic_calc_speed AIC_LIB_ENTRY(_calc_speed)
911#define aic_inquiry_match AIC_LIB_ENTRY(_inquiry_match)
912#define aic_static_inquiry_match AIC_LIB_ENTRY(_static_inquiry_match)
913#define aic_parse_brace_option AIC_LIB_ENTRY(_parse_brace_option)
914
915/******************************************************************************/
916
917void aic_sense_desc(int /*sense_key*/, int /*asc*/,
918 int /*ascq*/, struct scsi_inquiry_data*,
919 const char** /*sense_key_desc*/,
920 const char** /*asc_desc*/);
921aic_sense_action aic_sense_error_action(struct scsi_sense_data*,
922 struct scsi_inquiry_data*,
923 uint32_t /*sense_flags*/);
924uint32_t aic_error_action(struct scsi_cmnd *,
925 struct scsi_inquiry_data *,
926 cam_status, u_int);
927
928#define SF_RETRY_UA 0x01
929#define SF_NO_PRINT 0x02
930#define SF_QUIET_IR 0x04 /* Be quiet about Illegal Request reponses */
931#define SF_PRINT_ALWAYS 0x08
932
933
934const char * aic_op_desc(uint16_t /*opcode*/, struct scsi_inquiry_data*);
935char * aic_cdb_string(uint8_t* /*cdb_ptr*/, char* /*cdb_string*/,
936 size_t /*len*/);
937void aic_print_inquiry(struct scsi_inquiry_data*);
938
939u_int aic_calc_syncsrate(u_int /*period_factor*/);
940u_int aic_calc_syncparam(u_int /*period*/);
941u_int aic_calc_speed(u_int width, u_int period, u_int offset,
942 u_int min_rate);
943
944int aic_inquiry_match(caddr_t /*inqbuffer*/,
945 caddr_t /*table_entry*/);
946int aic_static_inquiry_match(caddr_t /*inqbuffer*/,
947 caddr_t /*table_entry*/);
948
949typedef void aic_option_callback_t(u_long, int, int, int32_t);
950char * aic_parse_brace_option(char *opt_name, char *opt_arg,
951 char *end, int depth,
952 aic_option_callback_t *, u_long);
953
954static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
955 int *error_code, int *sense_key,
956 int *asc, int *ascq);
957static __inline void scsi_ulto2b(uint32_t val, uint8_t *bytes);
958static __inline void scsi_ulto3b(uint32_t val, uint8_t *bytes);
959static __inline void scsi_ulto4b(uint32_t val, uint8_t *bytes);
960static __inline uint32_t scsi_2btoul(uint8_t *bytes);
961static __inline uint32_t scsi_3btoul(uint8_t *bytes);
962static __inline int32_t scsi_3btol(uint8_t *bytes);
963static __inline uint32_t scsi_4btoul(uint8_t *bytes);
964
965static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
966 int *error_code, int *sense_key,
967 int *asc, int *ascq)
968{
969 *error_code = sense->error_code & SSD_ERRCODE;
970 *sense_key = sense->flags & SSD_KEY;
971 *asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
972 *ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
973}
974
975static __inline void
976scsi_ulto2b(uint32_t val, uint8_t *bytes)
977{
978
979 bytes[0] = (val >> 8) & 0xff;
980 bytes[1] = val & 0xff;
981}
982
983static __inline void
984scsi_ulto3b(uint32_t val, uint8_t *bytes)
985{
986
987 bytes[0] = (val >> 16) & 0xff;
988 bytes[1] = (val >> 8) & 0xff;
989 bytes[2] = val & 0xff;
990}
991
992static __inline void
993scsi_ulto4b(uint32_t val, uint8_t *bytes)
994{
995
996 bytes[0] = (val >> 24) & 0xff;
997 bytes[1] = (val >> 16) & 0xff;
998 bytes[2] = (val >> 8) & 0xff;
999 bytes[3] = val & 0xff;
1000}
1001
1002static __inline uint32_t
1003scsi_2btoul(uint8_t *bytes)
1004{
1005 uint32_t rv;
1006
1007 rv = (bytes[0] << 8) |
1008 bytes[1];
1009 return (rv);
1010}
1011
1012static __inline uint32_t
1013scsi_3btoul(uint8_t *bytes)
1014{
1015 uint32_t rv;
1016
1017 rv = (bytes[0] << 16) |
1018 (bytes[1] << 8) |
1019 bytes[2];
1020 return (rv);
1021}
1022
1023static __inline int32_t
1024scsi_3btol(uint8_t *bytes)
1025{
1026 uint32_t rc = scsi_3btoul(bytes);
1027
1028 if (rc & 0x00800000)
1029 rc |= 0xff000000;
1030
1031 return (int32_t) rc;
1032}
1033 143
1034static __inline uint32_t 144static __inline uint32_t
1035scsi_4btoul(uint8_t *bytes) 145scsi_4btoul(uint8_t *bytes)
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fb28c1261848..deec0cef88d9 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -583,8 +583,7 @@ static void pci_enable_intx(struct pci_dev *pdev)
583#define AHCI_ENABLE (1 << 31) 583#define AHCI_ENABLE (1 << 31)
584static int piix_disable_ahci(struct pci_dev *pdev) 584static int piix_disable_ahci(struct pci_dev *pdev)
585{ 585{
586 void *mmio; 586 void __iomem *mmio;
587 unsigned long addr;
588 u32 tmp; 587 u32 tmp;
589 int rc = 0; 588 int rc = 0;
590 589
@@ -592,11 +591,11 @@ static int piix_disable_ahci(struct pci_dev *pdev)
592 * works because this device is usually set up by BIOS. 591 * works because this device is usually set up by BIOS.
593 */ 592 */
594 593
595 addr = pci_resource_start(pdev, AHCI_PCI_BAR); 594 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
596 if (!addr || !pci_resource_len(pdev, AHCI_PCI_BAR)) 595 !pci_resource_len(pdev, AHCI_PCI_BAR))
597 return 0; 596 return 0;
598 597
599 mmio = ioremap(addr, 64); 598 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
600 if (!mmio) 599 if (!mmio)
601 return -ENOMEM; 600 return -ENOMEM;
602 601
@@ -610,7 +609,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
610 rc = -EIO; 609 rc = -EIO;
611 } 610 }
612 611
613 iounmap(mmio); 612 pci_iounmap(pdev, mmio);
614 return rc; 613 return rc;
615} 614}
616 615
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 3900e28ac7d6..bd0e1b6be1ea 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -20,7 +20,6 @@
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/completion.h> 22#include <linux/completion.h>
23#include <linux/devfs_fs_kernel.h>
24#include <linux/ioctl32.h> 23#include <linux/ioctl32.h>
25#include <linux/compat.h> 24#include <linux/compat.h>
26#include <linux/chio.h> /* here are all the ioctls */ 25#include <linux/chio.h> /* here are all the ioctls */
@@ -31,7 +30,7 @@
31#include <scsi/scsi_ioctl.h> 30#include <scsi/scsi_ioctl.h>
32#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
33#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
34#include <scsi/scsi_request.h> 33#include <scsi/scsi_eh.h>
35#include <scsi/scsi_dbg.h> 34#include <scsi/scsi_dbg.h>
36 35
37#define CH_DT_MAX 16 36#define CH_DT_MAX 16
@@ -181,17 +180,17 @@ static struct {
181 180
182/* ------------------------------------------------------------------- */ 181/* ------------------------------------------------------------------- */
183 182
184static int ch_find_errno(unsigned char *sense_buffer) 183static int ch_find_errno(struct scsi_sense_hdr *sshdr)
185{ 184{
186 int i,errno = 0; 185 int i,errno = 0;
187 186
188 /* Check to see if additional sense information is available */ 187 /* Check to see if additional sense information is available */
189 if (sense_buffer[7] > 5 && 188 if (scsi_sense_valid(sshdr) &&
190 sense_buffer[12] != 0) { 189 sshdr->asc != 0) {
191 for (i = 0; err[i].errno != 0; i++) { 190 for (i = 0; err[i].errno != 0; i++) {
192 if (err[i].sense == sense_buffer[ 2] && 191 if (err[i].sense == sshdr->sense_key &&
193 err[i].asc == sense_buffer[12] && 192 err[i].asc == sshdr->asc &&
194 err[i].ascq == sense_buffer[13]) { 193 err[i].ascq == sshdr->ascq) {
195 errno = -err[i].errno; 194 errno = -err[i].errno;
196 break; 195 break;
197 } 196 }
@@ -207,13 +206,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
207 void *buffer, unsigned buflength, 206 void *buffer, unsigned buflength,
208 enum dma_data_direction direction) 207 enum dma_data_direction direction)
209{ 208{
210 int errno, retries = 0, timeout; 209 int errno, retries = 0, timeout, result;
211 struct scsi_request *sr; 210 struct scsi_sense_hdr sshdr;
212 211
213 sr = scsi_allocate_request(ch->device, GFP_KERNEL);
214 if (NULL == sr)
215 return -ENOMEM;
216
217 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) 212 timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
218 ? timeout_init : timeout_move; 213 ? timeout_init : timeout_move;
219 214
@@ -224,16 +219,17 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
224 __scsi_print_command(cmd); 219 __scsi_print_command(cmd);
225 } 220 }
226 221
227 scsi_wait_req(sr, cmd, buffer, buflength, 222 result = scsi_execute_req(ch->device, cmd, direction, buffer,
228 timeout * HZ, MAX_RETRIES); 223 buflength, &sshdr, timeout * HZ,
224 MAX_RETRIES);
229 225
230 dprintk("result: 0x%x\n",sr->sr_result); 226 dprintk("result: 0x%x\n",result);
231 if (driver_byte(sr->sr_result) & DRIVER_SENSE) { 227 if (driver_byte(result) & DRIVER_SENSE) {
232 if (debug) 228 if (debug)
233 scsi_print_req_sense(ch->name, sr); 229 scsi_print_sense_hdr(ch->name, &sshdr);
234 errno = ch_find_errno(sr->sr_sense_buffer); 230 errno = ch_find_errno(&sshdr);
235 231
236 switch(sr->sr_sense_buffer[2] & 0xf) { 232 switch(sshdr.sense_key) {
237 case UNIT_ATTENTION: 233 case UNIT_ATTENTION:
238 ch->unit_attention = 1; 234 ch->unit_attention = 1;
239 if (retries++ < 3) 235 if (retries++ < 3)
@@ -241,7 +237,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
241 break; 237 break;
242 } 238 }
243 } 239 }
244 scsi_release_request(sr);
245 return errno; 240 return errno;
246} 241}
247 242
@@ -940,8 +935,6 @@ static int ch_probe(struct device *dev)
940 if (init) 935 if (init)
941 ch_init_elem(ch); 936 ch_init_elem(ch);
942 937
943 devfs_mk_cdev(MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
944 S_IFCHR | S_IRUGO | S_IWUGO, ch->name);
945 class_device_create(ch_sysfs_class, 938 class_device_create(ch_sysfs_class,
946 MKDEV(SCSI_CHANGER_MAJOR,ch->minor), 939 MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
947 dev, "s%s", ch->name); 940 dev, "s%s", ch->name);
@@ -974,7 +967,6 @@ static int ch_remove(struct device *dev)
974 967
975 class_device_destroy(ch_sysfs_class, 968 class_device_destroy(ch_sysfs_class,
976 MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); 969 MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
977 devfs_remove(ch->name);
978 kfree(ch->dt); 970 kfree(ch->dt);
979 kfree(ch); 971 kfree(ch);
980 ch_devcount--; 972 ch_devcount--;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index ec161733a82b..f6be2c1c3942 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -17,6 +17,7 @@
17#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
18#include <scsi/scsi_request.h> 18#include <scsi/scsi_request.h>
19#include <scsi/scsi_eh.h> 19#include <scsi/scsi_eh.h>
20#include <scsi/scsi_dbg.h>
20 21
21 22
22 23
@@ -1155,6 +1156,31 @@ scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
1155 } 1156 }
1156} 1157}
1157 1158
1159void
1160scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
1161{
1162 const char *sense_txt;
1163 /* An example of deferred is when an earlier write to disk cache
1164 * succeeded, but now the disk discovers that it cannot write the
1165 * data to the magnetic media.
1166 */
1167 const char *error = scsi_sense_is_deferred(sshdr) ?
1168 "<<DEFERRED>>" : "Current";
1169 printk(KERN_INFO "%s: %s", name, error);
1170 if (sshdr->response_code >= 0x72)
1171 printk(" [descriptor]");
1172
1173 sense_txt = scsi_sense_key_string(sshdr->sense_key);
1174 if (sense_txt)
1175 printk(": sense key: %s\n", sense_txt);
1176 else
1177 printk(": sense key=0x%x\n", sshdr->sense_key);
1178 printk(KERN_INFO " ");
1179 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1180 printk("\n");
1181}
1182EXPORT_SYMBOL(scsi_print_sense_hdr);
1183
1158/* Print sense information */ 1184/* Print sense information */
1159void 1185void
1160__scsi_print_sense(const char *name, const unsigned char *sense_buffer, 1186__scsi_print_sense(const char *name, const unsigned char *sense_buffer,
@@ -1162,8 +1188,6 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1162{ 1188{
1163 int k, num, res; 1189 int k, num, res;
1164 unsigned int info; 1190 unsigned int info;
1165 const char *error;
1166 const char *sense_txt;
1167 struct scsi_sense_hdr ssh; 1191 struct scsi_sense_hdr ssh;
1168 1192
1169 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); 1193 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh);
@@ -1181,26 +1205,7 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1181 printk("\n"); 1205 printk("\n");
1182 return; 1206 return;
1183 } 1207 }
1184 1208 scsi_print_sense_hdr(name, &ssh);
1185 /* An example of deferred is when an earlier write to disk cache
1186 * succeeded, but now the disk discovers that it cannot write the
1187 * data to the magnetic media.
1188 */
1189 error = scsi_sense_is_deferred(&ssh) ?
1190 "<<DEFERRED>>" : "Current";
1191 printk(KERN_INFO "%s: %s", name, error);
1192 if (ssh.response_code >= 0x72)
1193 printk(" [descriptor]");
1194
1195 sense_txt = scsi_sense_key_string(ssh.sense_key);
1196 if (sense_txt)
1197 printk(": sense key: %s\n", sense_txt);
1198 else
1199 printk(": sense key=0x%x\n", ssh.sense_key);
1200 printk(KERN_INFO " ");
1201 scsi_show_extd_sense(ssh.asc, ssh.ascq);
1202 printk("\n");
1203
1204 if (ssh.response_code < 0x72) { 1209 if (ssh.response_code < 0x72) {
1205 /* only decode extras for "fixed" format now */ 1210 /* only decode extras for "fixed" format now */
1206 char buff[80]; 1211 char buff[80];
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5feb886c3392..85503fad789a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/kthread.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -52,21 +53,80 @@ static struct class shost_class = {
52}; 53};
53 54
54/** 55/**
55 * scsi_host_cancel - cancel outstanding IO to this host 56 * scsi_host_set_state - Take the given host through the host
56 * @shost: pointer to struct Scsi_Host 57 * state model.
57 * recovery: recovery requested to run. 58 * @shost: scsi host to change the state of.
59 * @state: state to change to.
60 *
61 * Returns zero if unsuccessful or an error if the requested
62 * transition is illegal.
58 **/ 63 **/
59static void scsi_host_cancel(struct Scsi_Host *shost, int recovery) 64int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
60{ 65{
61 struct scsi_device *sdev; 66 enum scsi_host_state oldstate = shost->shost_state;
67
68 if (state == oldstate)
69 return 0;
70
71 switch (state) {
72 case SHOST_CREATED:
73 /* There are no legal states that come back to
74 * created. This is the manually initialised start
75 * state */
76 goto illegal;
77
78 case SHOST_RUNNING:
79 switch (oldstate) {
80 case SHOST_CREATED:
81 case SHOST_RECOVERY:
82 break;
83 default:
84 goto illegal;
85 }
86 break;
87
88 case SHOST_RECOVERY:
89 switch (oldstate) {
90 case SHOST_RUNNING:
91 break;
92 default:
93 goto illegal;
94 }
95 break;
96
97 case SHOST_CANCEL:
98 switch (oldstate) {
99 case SHOST_CREATED:
100 case SHOST_RUNNING:
101 break;
102 default:
103 goto illegal;
104 }
105 break;
106
107 case SHOST_DEL:
108 switch (oldstate) {
109 case SHOST_CANCEL:
110 break;
111 default:
112 goto illegal;
113 }
114 break;
62 115
63 set_bit(SHOST_CANCEL, &shost->shost_state);
64 shost_for_each_device(sdev, shost) {
65 scsi_device_cancel(sdev, recovery);
66 } 116 }
67 wait_event(shost->host_wait, (!test_bit(SHOST_RECOVERY, 117 shost->shost_state = state;
68 &shost->shost_state))); 118 return 0;
119
120 illegal:
121 SCSI_LOG_ERROR_RECOVERY(1,
122 dev_printk(KERN_ERR, &shost->shost_gendev,
123 "Illegal host state transition"
124 "%s->%s\n",
125 scsi_host_state_name(oldstate),
126 scsi_host_state_name(state)));
127 return -EINVAL;
69} 128}
129EXPORT_SYMBOL(scsi_host_set_state);
70 130
71/** 131/**
72 * scsi_remove_host - remove a scsi host 132 * scsi_remove_host - remove a scsi host
@@ -74,11 +134,13 @@ static void scsi_host_cancel(struct Scsi_Host *shost, int recovery)
74 **/ 134 **/
75void scsi_remove_host(struct Scsi_Host *shost) 135void scsi_remove_host(struct Scsi_Host *shost)
76{ 136{
137 down(&shost->scan_mutex);
138 scsi_host_set_state(shost, SHOST_CANCEL);
139 up(&shost->scan_mutex);
77 scsi_forget_host(shost); 140 scsi_forget_host(shost);
78 scsi_host_cancel(shost, 0);
79 scsi_proc_host_rm(shost); 141 scsi_proc_host_rm(shost);
80 142
81 set_bit(SHOST_DEL, &shost->shost_state); 143 scsi_host_set_state(shost, SHOST_DEL);
82 144
83 transport_unregister_device(&shost->shost_gendev); 145 transport_unregister_device(&shost->shost_gendev);
84 class_device_unregister(&shost->shost_classdev); 146 class_device_unregister(&shost->shost_classdev);
@@ -115,7 +177,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
115 if (error) 177 if (error)
116 goto out; 178 goto out;
117 179
118 set_bit(SHOST_ADD, &shost->shost_state); 180 scsi_host_set_state(shost, SHOST_RUNNING);
119 get_device(shost->shost_gendev.parent); 181 get_device(shost->shost_gendev.parent);
120 182
121 error = class_device_add(&shost->shost_classdev); 183 error = class_device_add(&shost->shost_classdev);
@@ -164,15 +226,8 @@ static void scsi_host_dev_release(struct device *dev)
164 struct Scsi_Host *shost = dev_to_shost(dev); 226 struct Scsi_Host *shost = dev_to_shost(dev);
165 struct device *parent = dev->parent; 227 struct device *parent = dev->parent;
166 228
167 if (shost->ehandler) { 229 if (shost->ehandler)
168 DECLARE_COMPLETION(sem); 230 kthread_stop(shost->ehandler);
169 shost->eh_notify = &sem;
170 shost->eh_kill = 1;
171 up(shost->eh_wait);
172 wait_for_completion(&sem);
173 shost->eh_notify = NULL;
174 }
175
176 if (shost->work_q) 231 if (shost->work_q)
177 destroy_workqueue(shost->work_q); 232 destroy_workqueue(shost->work_q);
178 233
@@ -202,7 +257,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
202{ 257{
203 struct Scsi_Host *shost; 258 struct Scsi_Host *shost;
204 int gfp_mask = GFP_KERNEL, rval; 259 int gfp_mask = GFP_KERNEL, rval;
205 DECLARE_COMPLETION(complete);
206 260
207 if (sht->unchecked_isa_dma && privsize) 261 if (sht->unchecked_isa_dma && privsize)
208 gfp_mask |= __GFP_DMA; 262 gfp_mask |= __GFP_DMA;
@@ -226,6 +280,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
226 280
227 spin_lock_init(&shost->default_lock); 281 spin_lock_init(&shost->default_lock);
228 scsi_assign_lock(shost, &shost->default_lock); 282 scsi_assign_lock(shost, &shost->default_lock);
283 shost->shost_state = SHOST_CREATED;
229 INIT_LIST_HEAD(&shost->__devices); 284 INIT_LIST_HEAD(&shost->__devices);
230 INIT_LIST_HEAD(&shost->__targets); 285 INIT_LIST_HEAD(&shost->__targets);
231 INIT_LIST_HEAD(&shost->eh_cmd_q); 286 INIT_LIST_HEAD(&shost->eh_cmd_q);
@@ -307,12 +362,12 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
307 snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d", 362 snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
308 shost->host_no); 363 shost->host_no);
309 364
310 shost->eh_notify = &complete; 365 shost->ehandler = kthread_run(scsi_error_handler, shost,
311 rval = kernel_thread(scsi_error_handler, shost, 0); 366 "scsi_eh_%d", shost->host_no);
312 if (rval < 0) 367 if (IS_ERR(shost->ehandler)) {
368 rval = PTR_ERR(shost->ehandler);
313 goto fail_destroy_freelist; 369 goto fail_destroy_freelist;
314 wait_for_completion(&complete); 370 }
315 shost->eh_notify = NULL;
316 371
317 scsi_proc_hostdir_add(shost->hostt); 372 scsi_proc_hostdir_add(shost->hostt);
318 return shost; 373 return shost;
@@ -382,7 +437,7 @@ EXPORT_SYMBOL(scsi_host_lookup);
382 **/ 437 **/
383struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) 438struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
384{ 439{
385 if (test_bit(SHOST_DEL, &shost->shost_state) || 440 if ((shost->shost_state == SHOST_DEL) ||
386 !get_device(&shost->shost_gendev)) 441 !get_device(&shost->shost_gendev))
387 return NULL; 442 return NULL;
388 return shost; 443 return shost;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index fe09d145542a..5b14934ba861 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -87,7 +87,7 @@ static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = 50;
89 89
90#define IBMVSCSI_VERSION "1.5.6" 90#define IBMVSCSI_VERSION "1.5.7"
91 91
92MODULE_DESCRIPTION("IBM Virtual SCSI"); 92MODULE_DESCRIPTION("IBM Virtual SCSI");
93MODULE_AUTHOR("Dave Boutcher"); 93MODULE_AUTHOR("Dave Boutcher");
@@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool,
145 sizeof(*evt->xfer_iu) * i; 145 sizeof(*evt->xfer_iu) * i;
146 evt->xfer_iu = pool->iu_storage + i; 146 evt->xfer_iu = pool->iu_storage + i;
147 evt->hostdata = hostdata; 147 evt->hostdata = hostdata;
148 evt->ext_list = NULL;
149 evt->ext_list_token = 0;
148 } 150 }
149 151
150 return 0; 152 return 0;
@@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool,
161 struct ibmvscsi_host_data *hostdata) 163 struct ibmvscsi_host_data *hostdata)
162{ 164{
163 int i, in_use = 0; 165 int i, in_use = 0;
164 for (i = 0; i < pool->size; ++i) 166 for (i = 0; i < pool->size; ++i) {
165 if (atomic_read(&pool->events[i].free) != 1) 167 if (atomic_read(&pool->events[i].free) != 1)
166 ++in_use; 168 ++in_use;
169 if (pool->events[i].ext_list) {
170 dma_free_coherent(hostdata->dev,
171 SG_ALL * sizeof(struct memory_descriptor),
172 pool->events[i].ext_list,
173 pool->events[i].ext_list_token);
174 }
175 }
167 if (in_use) 176 if (in_use)
168 printk(KERN_WARNING 177 printk(KERN_WARNING
169 "ibmvscsi: releasing event pool with %d " 178 "ibmvscsi: releasing event pool with %d "
@@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
286 } else { 295 } else {
287 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 296 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
288 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; 297 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
289 srp_cmd->data_out_count = numbuf; 298 srp_cmd->data_out_count =
299 numbuf < MAX_INDIRECT_BUFS ?
300 numbuf: MAX_INDIRECT_BUFS;
290 } else { 301 } else {
291 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; 302 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
292 srp_cmd->data_in_count = numbuf; 303 srp_cmd->data_in_count =
304 numbuf < MAX_INDIRECT_BUFS ?
305 numbuf: MAX_INDIRECT_BUFS;
293 } 306 }
294 } 307 }
295} 308}
296 309
310static void unmap_sg_list(int num_entries,
311 struct device *dev,
312 struct memory_descriptor *md)
313{
314 int i;
315
316 for (i = 0; i < num_entries; ++i) {
317 dma_unmap_single(dev,
318 md[i].virtual_address,
319 md[i].length, DMA_BIDIRECTIONAL);
320 }
321}
322
297/** 323/**
298 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 324 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
299 * @cmd: srp_cmd whose additional_data member will be unmapped 325 * @cmd: srp_cmd whose additional_data member will be unmapped
300 * @dev: device for which the memory is mapped 326 * @dev: device for which the memory is mapped
301 * 327 *
302*/ 328*/
303static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) 329static void unmap_cmd_data(struct srp_cmd *cmd,
330 struct srp_event_struct *evt_struct,
331 struct device *dev)
304{ 332{
305 int i;
306
307 if ((cmd->data_out_format == SRP_NO_BUFFER) && 333 if ((cmd->data_out_format == SRP_NO_BUFFER) &&
308 (cmd->data_in_format == SRP_NO_BUFFER)) 334 (cmd->data_in_format == SRP_NO_BUFFER))
309 return; 335 return;
@@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
318 (struct indirect_descriptor *)cmd->additional_data; 344 (struct indirect_descriptor *)cmd->additional_data;
319 int num_mapped = indirect->head.length / 345 int num_mapped = indirect->head.length /
320 sizeof(indirect->list[0]); 346 sizeof(indirect->list[0]);
321 for (i = 0; i < num_mapped; ++i) { 347
322 struct memory_descriptor *data = &indirect->list[i]; 348 if (num_mapped <= MAX_INDIRECT_BUFS) {
323 dma_unmap_single(dev, 349 unmap_sg_list(num_mapped, dev, &indirect->list[0]);
324 data->virtual_address, 350 return;
325 data->length, DMA_BIDIRECTIONAL);
326 } 351 }
352
353 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
327 } 354 }
328} 355}
329 356
357static int map_sg_list(int num_entries,
358 struct scatterlist *sg,
359 struct memory_descriptor *md)
360{
361 int i;
362 u64 total_length = 0;
363
364 for (i = 0; i < num_entries; ++i) {
365 struct memory_descriptor *descr = md + i;
366 struct scatterlist *sg_entry = &sg[i];
367 descr->virtual_address = sg_dma_address(sg_entry);
368 descr->length = sg_dma_len(sg_entry);
369 descr->memory_handle = 0;
370 total_length += sg_dma_len(sg_entry);
371 }
372 return total_length;
373}
374
330/** 375/**
331 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
332 * @cmd: Scsi_Cmnd with the scatterlist 377 * @cmd: Scsi_Cmnd with the scatterlist
@@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
337 * Returns 1 on success. 382 * Returns 1 on success.
338*/ 383*/
339static int map_sg_data(struct scsi_cmnd *cmd, 384static int map_sg_data(struct scsi_cmnd *cmd,
385 struct srp_event_struct *evt_struct,
340 struct srp_cmd *srp_cmd, struct device *dev) 386 struct srp_cmd *srp_cmd, struct device *dev)
341{ 387{
342 388
343 int i, sg_mapped; 389 int sg_mapped;
344 u64 total_length = 0; 390 u64 total_length = 0;
345 struct scatterlist *sg = cmd->request_buffer; 391 struct scatterlist *sg = cmd->request_buffer;
346 struct memory_descriptor *data = 392 struct memory_descriptor *data =
@@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd,
363 return 1; 409 return 1;
364 } 410 }
365 411
366 if (sg_mapped > MAX_INDIRECT_BUFS) { 412 if (sg_mapped > SG_ALL) {
367 printk(KERN_ERR 413 printk(KERN_ERR
368 "ibmvscsi: More than %d mapped sg entries, got %d\n", 414 "ibmvscsi: More than %d mapped sg entries, got %d\n",
369 MAX_INDIRECT_BUFS, sg_mapped); 415 SG_ALL, sg_mapped);
370 return 0; 416 return 0;
371 } 417 }
372 418
373 indirect->head.virtual_address = 0; 419 indirect->head.virtual_address = 0;
374 indirect->head.length = sg_mapped * sizeof(indirect->list[0]); 420 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
375 indirect->head.memory_handle = 0; 421 indirect->head.memory_handle = 0;
376 for (i = 0; i < sg_mapped; ++i) { 422
377 struct memory_descriptor *descr = &indirect->list[i]; 423 if (sg_mapped <= MAX_INDIRECT_BUFS) {
378 struct scatterlist *sg_entry = &sg[i]; 424 total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
379 descr->virtual_address = sg_dma_address(sg_entry); 425 indirect->total_length = total_length;
380 descr->length = sg_dma_len(sg_entry); 426 return 1;
381 descr->memory_handle = 0;
382 total_length += sg_dma_len(sg_entry);
383 } 427 }
384 indirect->total_length = total_length;
385 428
386 return 1; 429 /* get indirect table */
430 if (!evt_struct->ext_list) {
431 evt_struct->ext_list =(struct memory_descriptor*)
432 dma_alloc_coherent(dev,
433 SG_ALL * sizeof(struct memory_descriptor),
434 &evt_struct->ext_list_token, 0);
435 if (!evt_struct->ext_list) {
436 printk(KERN_ERR
437 "ibmvscsi: Can't allocate memory for indirect table\n");
438 return 0;
439
440 }
441 }
442
443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
444
445 indirect->total_length = total_length;
446 indirect->head.virtual_address = evt_struct->ext_list_token;
447 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
448 memcpy(indirect->list, evt_struct->ext_list,
449 MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
450
451 return 1;
387} 452}
388 453
389/** 454/**
@@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd,
428 * Returns 1 on success. 493 * Returns 1 on success.
429*/ 494*/
430static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 495static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
496 struct srp_event_struct *evt_struct,
431 struct srp_cmd *srp_cmd, struct device *dev) 497 struct srp_cmd *srp_cmd, struct device *dev)
432{ 498{
433 switch (cmd->sc_data_direction) { 499 switch (cmd->sc_data_direction) {
@@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
450 if (!cmd->request_buffer) 516 if (!cmd->request_buffer)
451 return 1; 517 return 1;
452 if (cmd->use_sg) 518 if (cmd->use_sg)
453 return map_sg_data(cmd, srp_cmd, dev); 519 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
454 return map_single_data(cmd, srp_cmd, dev); 520 return map_single_data(cmd, srp_cmd, dev);
455} 521}
456 522
@@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
486 printk(KERN_WARNING 552 printk(KERN_WARNING
487 "ibmvscsi: Warning, request_limit exceeded\n"); 553 "ibmvscsi: Warning, request_limit exceeded\n");
488 unmap_cmd_data(&evt_struct->iu.srp.cmd, 554 unmap_cmd_data(&evt_struct->iu.srp.cmd,
555 evt_struct,
489 hostdata->dev); 556 hostdata->dev);
490 free_event_struct(&hostdata->pool, evt_struct); 557 free_event_struct(&hostdata->pool, evt_struct);
491 return SCSI_MLQUEUE_HOST_BUSY; 558 return SCSI_MLQUEUE_HOST_BUSY;
@@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
513 return 0; 580 return 0;
514 581
515 send_error: 582 send_error:
516 unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); 583 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
517 584
518 if ((cmnd = evt_struct->cmnd) != NULL) { 585 if ((cmnd = evt_struct->cmnd) != NULL) {
519 cmnd->result = DID_ERROR << 16; 586 cmnd->result = DID_ERROR << 16;
@@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
551 rsp->sense_and_response_data, 618 rsp->sense_and_response_data,
552 rsp->sense_data_list_length); 619 rsp->sense_data_list_length);
553 unmap_cmd_data(&evt_struct->iu.srp.cmd, 620 unmap_cmd_data(&evt_struct->iu.srp.cmd,
621 evt_struct,
554 evt_struct->hostdata->dev); 622 evt_struct->hostdata->dev);
555 623
556 if (rsp->doover) 624 if (rsp->doover)
@@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
583{ 651{
584 struct srp_cmd *srp_cmd; 652 struct srp_cmd *srp_cmd;
585 struct srp_event_struct *evt_struct; 653 struct srp_event_struct *evt_struct;
654 struct indirect_descriptor *indirect;
586 struct ibmvscsi_host_data *hostdata = 655 struct ibmvscsi_host_data *hostdata =
587 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 656 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
588 u16 lun = lun_from_dev(cmnd->device); 657 u16 lun = lun_from_dev(cmnd->device);
@@ -591,14 +660,6 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
591 if (!evt_struct) 660 if (!evt_struct)
592 return SCSI_MLQUEUE_HOST_BUSY; 661 return SCSI_MLQUEUE_HOST_BUSY;
593 662
594 init_event_struct(evt_struct,
595 handle_cmd_rsp,
596 VIOSRP_SRP_FORMAT,
597 cmnd->timeout);
598
599 evt_struct->cmnd = cmnd;
600 evt_struct->cmnd_done = done;
601
602 /* Set up the actual SRP IU */ 663 /* Set up the actual SRP IU */
603 srp_cmd = &evt_struct->iu.srp.cmd; 664 srp_cmd = &evt_struct->iu.srp.cmd;
604 memset(srp_cmd, 0x00, sizeof(*srp_cmd)); 665 memset(srp_cmd, 0x00, sizeof(*srp_cmd));
@@ -606,17 +667,25 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
606 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 667 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
607 srp_cmd->lun = ((u64) lun) << 48; 668 srp_cmd->lun = ((u64) lun) << 48;
608 669
609 if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { 670 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
610 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 671 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
611 free_event_struct(&hostdata->pool, evt_struct); 672 free_event_struct(&hostdata->pool, evt_struct);
612 return SCSI_MLQUEUE_HOST_BUSY; 673 return SCSI_MLQUEUE_HOST_BUSY;
613 } 674 }
614 675
676 init_event_struct(evt_struct,
677 handle_cmd_rsp,
678 VIOSRP_SRP_FORMAT,
679 cmnd->timeout_per_command/HZ);
680
681 evt_struct->cmnd = cmnd;
682 evt_struct->cmnd_done = done;
683
615 /* Fix up dma address of the buffer itself */ 684 /* Fix up dma address of the buffer itself */
616 if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || 685 indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
617 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { 686 if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
618 struct indirect_descriptor *indirect = 687 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
619 (struct indirect_descriptor *)srp_cmd->additional_data; 688 (indirect->head.virtual_address == 0)) {
620 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + 689 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
621 offsetof(struct srp_cmd, additional_data) + 690 offsetof(struct srp_cmd, additional_data) +
622 offsetof(struct indirect_descriptor, list); 691 offsetof(struct indirect_descriptor, list);
@@ -826,11 +895,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
826 struct srp_event_struct *tmp_evt, *found_evt; 895 struct srp_event_struct *tmp_evt, *found_evt;
827 union viosrp_iu srp_rsp; 896 union viosrp_iu srp_rsp;
828 int rsp_rc; 897 int rsp_rc;
898 unsigned long flags;
829 u16 lun = lun_from_dev(cmd->device); 899 u16 lun = lun_from_dev(cmd->device);
830 900
831 /* First, find this command in our sent list so we can figure 901 /* First, find this command in our sent list so we can figure
832 * out the correct tag 902 * out the correct tag
833 */ 903 */
904 spin_lock_irqsave(hostdata->host->host_lock, flags);
834 found_evt = NULL; 905 found_evt = NULL;
835 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 906 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
836 if (tmp_evt->cmnd == cmd) { 907 if (tmp_evt->cmnd == cmd) {
@@ -839,11 +910,14 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
839 } 910 }
840 } 911 }
841 912
842 if (!found_evt) 913 if (!found_evt) {
914 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
843 return FAILED; 915 return FAILED;
916 }
844 917
845 evt = get_event_struct(&hostdata->pool); 918 evt = get_event_struct(&hostdata->pool);
846 if (evt == NULL) { 919 if (evt == NULL) {
920 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
847 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 921 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
848 return FAILED; 922 return FAILED;
849 } 923 }
@@ -867,7 +941,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
867 941
868 evt->sync_srp = &srp_rsp; 942 evt->sync_srp = &srp_rsp;
869 init_completion(&evt->comp); 943 init_completion(&evt->comp);
870 if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { 944 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
945 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
946 if (rsp_rc != 0) {
871 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 947 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
872 return FAILED; 948 return FAILED;
873 } 949 }
@@ -901,6 +977,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
901 * The event is no longer in our list. Make sure it didn't 977 * The event is no longer in our list. Make sure it didn't
902 * complete while we were aborting 978 * complete while we were aborting
903 */ 979 */
980 spin_lock_irqsave(hostdata->host->host_lock, flags);
904 found_evt = NULL; 981 found_evt = NULL;
905 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 982 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
906 if (tmp_evt->cmnd == cmd) { 983 if (tmp_evt->cmnd == cmd) {
@@ -910,6 +987,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
910 } 987 }
911 988
912 if (found_evt == NULL) { 989 if (found_evt == NULL) {
990 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
913 printk(KERN_INFO 991 printk(KERN_INFO
914 "ibmvscsi: aborted task tag 0x%lx completed\n", 992 "ibmvscsi: aborted task tag 0x%lx completed\n",
915 tsk_mgmt->managed_task_tag); 993 tsk_mgmt->managed_task_tag);
@@ -922,8 +1000,10 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
922 1000
923 cmd->result = (DID_ABORT << 16); 1001 cmd->result = (DID_ABORT << 16);
924 list_del(&found_evt->list); 1002 list_del(&found_evt->list);
925 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); 1003 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1004 found_evt->hostdata->dev);
926 free_event_struct(&found_evt->hostdata->pool, found_evt); 1005 free_event_struct(&found_evt->hostdata->pool, found_evt);
1006 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
927 atomic_inc(&hostdata->request_limit); 1007 atomic_inc(&hostdata->request_limit);
928 return SUCCESS; 1008 return SUCCESS;
929} 1009}
@@ -943,10 +1023,13 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
943 struct srp_event_struct *tmp_evt, *pos; 1023 struct srp_event_struct *tmp_evt, *pos;
944 union viosrp_iu srp_rsp; 1024 union viosrp_iu srp_rsp;
945 int rsp_rc; 1025 int rsp_rc;
1026 unsigned long flags;
946 u16 lun = lun_from_dev(cmd->device); 1027 u16 lun = lun_from_dev(cmd->device);
947 1028
1029 spin_lock_irqsave(hostdata->host->host_lock, flags);
948 evt = get_event_struct(&hostdata->pool); 1030 evt = get_event_struct(&hostdata->pool);
949 if (evt == NULL) { 1031 if (evt == NULL) {
1032 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
950 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1033 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
951 return FAILED; 1034 return FAILED;
952 } 1035 }
@@ -969,7 +1052,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
969 1052
970 evt->sync_srp = &srp_rsp; 1053 evt->sync_srp = &srp_rsp;
971 init_completion(&evt->comp); 1054 init_completion(&evt->comp);
972 if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { 1055 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
1056 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1057 if (rsp_rc != 0) {
973 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1058 printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
974 return FAILED; 1059 return FAILED;
975 } 1060 }
@@ -1002,12 +1087,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1002 /* We need to find all commands for this LUN that have not yet been 1087 /* We need to find all commands for this LUN that have not yet been
1003 * responded to, and fail them with DID_RESET 1088 * responded to, and fail them with DID_RESET
1004 */ 1089 */
1090 spin_lock_irqsave(hostdata->host->host_lock, flags);
1005 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1091 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1006 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { 1092 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1007 if (tmp_evt->cmnd) 1093 if (tmp_evt->cmnd)
1008 tmp_evt->cmnd->result = (DID_RESET << 16); 1094 tmp_evt->cmnd->result = (DID_RESET << 16);
1009 list_del(&tmp_evt->list); 1095 list_del(&tmp_evt->list);
1010 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); 1096 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1097 tmp_evt->hostdata->dev);
1011 free_event_struct(&tmp_evt->hostdata->pool, 1098 free_event_struct(&tmp_evt->hostdata->pool,
1012 tmp_evt); 1099 tmp_evt);
1013 atomic_inc(&hostdata->request_limit); 1100 atomic_inc(&hostdata->request_limit);
@@ -1017,6 +1104,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1017 tmp_evt->done(tmp_evt); 1104 tmp_evt->done(tmp_evt);
1018 } 1105 }
1019 } 1106 }
1107 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1020 return SUCCESS; 1108 return SUCCESS;
1021} 1109}
1022 1110
@@ -1035,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
1035 if (tmp_evt->cmnd) { 1123 if (tmp_evt->cmnd) {
1036 tmp_evt->cmnd->result = (DID_ERROR << 16); 1124 tmp_evt->cmnd->result = (DID_ERROR << 16);
1037 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1125 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
1126 tmp_evt,
1038 tmp_evt->hostdata->dev); 1127 tmp_evt->hostdata->dev);
1039 if (tmp_evt->cmnd_done) 1128 if (tmp_evt->cmnd_done)
1040 tmp_evt->cmnd_done(tmp_evt->cmnd); 1129 tmp_evt->cmnd_done(tmp_evt->cmnd);
@@ -1339,7 +1428,7 @@ static struct scsi_host_template driver_template = {
1339 .cmd_per_lun = 16, 1428 .cmd_per_lun = 16,
1340 .can_queue = 1, /* Updated after SRP_LOGIN */ 1429 .can_queue = 1, /* Updated after SRP_LOGIN */
1341 .this_id = -1, 1430 .this_id = -1,
1342 .sg_tablesize = MAX_INDIRECT_BUFS, 1431 .sg_tablesize = SG_ALL,
1343 .use_clustering = ENABLE_CLUSTERING, 1432 .use_clustering = ENABLE_CLUSTERING,
1344 .shost_attrs = ibmvscsi_attrs, 1433 .shost_attrs = ibmvscsi_attrs,
1345}; 1434};
@@ -1442,7 +1531,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1442 */ 1531 */
1443static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { 1532static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1444 {"vscsi", "IBM,v-scsi"}, 1533 {"vscsi", "IBM,v-scsi"},
1445 {0,} 1534 { "", "" }
1446}; 1535};
1447 1536
1448MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1537MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 1030b703c30e..8bec0438dc8a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -68,6 +68,8 @@ struct srp_event_struct {
68 void (*cmnd_done) (struct scsi_cmnd *); 68 void (*cmnd_done) (struct scsi_cmnd *);
69 struct completion comp; 69 struct completion comp;
70 union viosrp_iu *sync_srp; 70 union viosrp_iu *sync_srp;
71 struct memory_descriptor *ext_list;
72 dma_addr_t ext_list_token;
71}; 73};
72 74
73/* a pool of event structs for use */ 75/* a pool of event structs for use */
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 035f615817d7..8bf5652f1060 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <asm/vio.h> 30#include <asm/vio.h>
31#include <asm/prom.h>
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/hvcall.h> 33#include <asm/hvcall.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index dee4b12b0342..5cc53cd9323e 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -75,6 +75,10 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc);
75static unsigned int ata_unique_id = 1; 75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq; 76static struct workqueue_struct *ata_wq;
77 77
78int atapi_enabled = 0;
79module_param(atapi_enabled, int, 0444);
80MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81
78MODULE_AUTHOR("Jeff Garzik"); 82MODULE_AUTHOR("Jeff Garzik");
79MODULE_DESCRIPTION("Library module for ATA devices"); 83MODULE_DESCRIPTION("Library module for ATA devices");
80MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
@@ -2527,7 +2531,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
2527 * @ap: port to read/write 2531 * @ap: port to read/write
2528 * @buf: data buffer 2532 * @buf: data buffer
2529 * @buflen: buffer length 2533 * @buflen: buffer length
2530 * @do_write: read/write 2534 * @write_data: read/write
2531 * 2535 *
2532 * Transfer data from/to the device data register by MMIO. 2536 * Transfer data from/to the device data register by MMIO.
2533 * 2537 *
@@ -2573,7 +2577,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2573 * @ap: port to read/write 2577 * @ap: port to read/write
2574 * @buf: data buffer 2578 * @buf: data buffer
2575 * @buflen: buffer length 2579 * @buflen: buffer length
2576 * @do_write: read/write 2580 * @write_data: read/write
2577 * 2581 *
2578 * Transfer data from/to the device data register by PIO. 2582 * Transfer data from/to the device data register by PIO.
2579 * 2583 *
@@ -4200,6 +4204,15 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4200 4204
4201 4205
4202 4206
4207#ifdef CONFIG_PCI
4208
4209void ata_pci_host_stop (struct ata_host_set *host_set)
4210{
4211 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4212
4213 pci_iounmap(pdev, host_set->mmio_base);
4214}
4215
4203/** 4216/**
4204 * ata_pci_init_native_mode - Initialize native-mode driver 4217 * ata_pci_init_native_mode - Initialize native-mode driver
4205 * @pdev: pci device to be initialized 4218 * @pdev: pci device to be initialized
@@ -4212,7 +4225,6 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4212 * ata_probe_ent structure should then be freed with kfree(). 4225 * ata_probe_ent structure should then be freed with kfree().
4213 */ 4226 */
4214 4227
4215#ifdef CONFIG_PCI
4216struct ata_probe_ent * 4228struct ata_probe_ent *
4217ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4229ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4218{ 4230{
@@ -4595,6 +4607,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4595 4607
4596#ifdef CONFIG_PCI 4608#ifdef CONFIG_PCI
4597EXPORT_SYMBOL_GPL(pci_test_config_bits); 4609EXPORT_SYMBOL_GPL(pci_test_config_bits);
4610EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4598EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 4611EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4599EXPORT_SYMBOL_GPL(ata_pci_init_one); 4612EXPORT_SYMBOL_GPL(ata_pci_init_one);
4600EXPORT_SYMBOL_GPL(ata_pci_remove_one); 4613EXPORT_SYMBOL_GPL(ata_pci_remove_one);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 346eb36b1e31..104fd9a63e73 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1470,10 +1470,10 @@ ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev)
1470 if (unlikely(!ata_dev_present(dev))) 1470 if (unlikely(!ata_dev_present(dev)))
1471 return NULL; 1471 return NULL;
1472 1472
1473#ifndef ATA_ENABLE_ATAPI 1473 if (!atapi_enabled) {
1474 if (unlikely(dev->class == ATA_DEV_ATAPI)) 1474 if (unlikely(dev->class == ATA_DEV_ATAPI))
1475 return NULL; 1475 return NULL;
1476#endif 1476 }
1477 1477
1478 return dev; 1478 return dev;
1479} 1479}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 809c634afbcd..d608b3a0f6fe 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -38,6 +38,7 @@ struct ata_scsi_args {
38}; 38};
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled;
41extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
42 struct ata_device *dev); 43 struct ata_device *dev);
43extern void ata_qc_free(struct ata_queued_cmd *qc); 44extern void ata_qc_free(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3bb82aae432e..adb95674823f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -342,9 +342,6 @@ struct lpfc_hba {
342#define VPD_MASK 0xf /* mask for any vpd data */ 342#define VPD_MASK 0xf /* mask for any vpd data */
343 343
344 struct timer_list els_tmofunc; 344 struct timer_list els_tmofunc;
345
346 void *link_stats;
347
348 /* 345 /*
349 * stat counters 346 * stat counters
350 */ 347 */
@@ -370,6 +367,8 @@ struct lpfc_hba {
370 struct list_head freebufList; 367 struct list_head freebufList;
371 struct list_head ctrspbuflist; 368 struct list_head ctrspbuflist;
372 struct list_head rnidrspbuflist; 369 struct list_head rnidrspbuflist;
370
371 struct fc_host_statistics link_stats;
373}; 372};
374 373
375 374
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3cea92883019..0e089a42c03a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_tcq.h> 29#include <scsi/scsi_tcq.h>
@@ -988,8 +989,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
988{ 989{
989 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 990 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
990 struct lpfc_sli *psli = &phba->sli; 991 struct lpfc_sli *psli = &phba->sli;
991 struct fc_host_statistics *hs = 992 struct fc_host_statistics *hs = &phba->link_stats;
992 (struct fc_host_statistics *)phba->link_stats;
993 LPFC_MBOXQ_t *pmboxq; 993 LPFC_MBOXQ_t *pmboxq;
994 MAILBOX_t *pmb; 994 MAILBOX_t *pmb;
995 int rc=0; 995 int rc=0;
@@ -1020,6 +1020,8 @@ lpfc_get_stats(struct Scsi_Host *shost)
1020 return NULL; 1020 return NULL;
1021 } 1021 }
1022 1022
1023 memset(hs, 0, sizeof (struct fc_host_statistics));
1024
1023 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 1025 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
1024 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); 1026 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
1025 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 1027 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 78adee4699af..1280f0e54636 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -27,8 +27,10 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29 29
30#include <scsi/scsi.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
32 34
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2b1c9572dae7..63caf7fe9725 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 233901e9dfde..0a8269d6b130 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -24,6 +24,7 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26 26
27#include <scsi/scsi.h>
27#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
@@ -1135,6 +1136,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1135 switch(list) { 1136 switch(list) {
1136 case NLP_NO_LIST: /* No list, just remove it */ 1137 case NLP_NO_LIST: /* No list, just remove it */
1137 lpfc_nlp_remove(phba, nlp); 1138 lpfc_nlp_remove(phba, nlp);
1139 /* as node removed - stop further transport calls */
1140 rport_del = none;
1138 break; 1141 break;
1139 case NLP_UNUSED_LIST: 1142 case NLP_UNUSED_LIST:
1140 spin_lock_irq(phba->host->host_lock); 1143 spin_lock_irq(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 34d416d2b007..6f3cb59bf9e0 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30 30
31#include <scsi/scsi.h>
31#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
@@ -1339,14 +1340,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1339 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1340 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1340 goto out_disable_device; 1341 goto out_disable_device;
1341 1342
1342 host = scsi_host_alloc(&lpfc_template, 1343 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
1343 sizeof (struct lpfc_hba) + sizeof (unsigned long));
1344 if (!host) 1344 if (!host)
1345 goto out_release_regions; 1345 goto out_release_regions;
1346 1346
1347 phba = (struct lpfc_hba*)host->hostdata; 1347 phba = (struct lpfc_hba*)host->hostdata;
1348 memset(phba, 0, sizeof (struct lpfc_hba)); 1348 memset(phba, 0, sizeof (struct lpfc_hba));
1349 phba->link_stats = (void *)&phba[1];
1350 phba->host = host; 1349 phba->host = host;
1351 1350
1352 phba->fc_flag |= FC_LOADING; 1351 phba->fc_flag |= FC_LOADING;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c27cf94795db..73eb89f91593 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -23,6 +23,11 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h>
30
26#include "lpfc_hw.h" 31#include "lpfc_hw.h"
27#include "lpfc_sli.h" 32#include "lpfc_sli.h"
28#include "lpfc_disc.h" 33#include "lpfc_disc.h"
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index a5cfb6421fa9..0aba13ceaacf 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -23,6 +23,11 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h>
30
26#include "lpfc_hw.h" 31#include "lpfc_hw.h"
27#include "lpfc_sli.h" 32#include "lpfc_sli.h"
28#include "lpfc_disc.h" 33#include "lpfc_disc.h"
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 45dc0210fc49..9b35eaac781d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 17e4974d4445..b5ad1871d34b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -40,11 +40,6 @@
40#define LPFC_RESET_WAIT 2 40#define LPFC_RESET_WAIT 2
41#define LPFC_ABORT_WAIT 2 41#define LPFC_ABORT_WAIT 2
42 42
43static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
44{
45 fcmd->fcpLunLsl = 0;
46 fcmd->fcpLunMsl = swab16((uint16_t)lun);
47}
48 43
49/* 44/*
50 * This routine allocates a scsi buffer, which contains all the necessary 45 * This routine allocates a scsi buffer, which contains all the necessary
@@ -238,6 +233,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
238 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 233 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
239 if (datadir == DMA_TO_DEVICE) 234 if (datadir == DMA_TO_DEVICE)
240 bpl->tus.f.bdeFlags = 0; 235 bpl->tus.f.bdeFlags = 0;
236 else
237 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
241 bpl->tus.w = le32_to_cpu(bpl->tus.w); 238 bpl->tus.w = le32_to_cpu(bpl->tus.w);
242 num_bde = 1; 239 num_bde = 1;
243 bpl++; 240 bpl++;
@@ -245,8 +242,11 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
245 242
246 /* 243 /*
247 * Finish initializing those IOCB fields that are dependent on the 244 * Finish initializing those IOCB fields that are dependent on the
248 * scsi_cmnd request_buffer 245 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
246 * reinitialized since all iocb memory resources are used many times
247 * for transmit, receive, and continuation bpl's.
249 */ 248 */
249 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
250 iocb_cmd->un.fcpi64.bdl.bdeSize += 250 iocb_cmd->un.fcpi64.bdl.bdeSize +=
251 (num_bde * sizeof (struct ulp_bde64)); 251 (num_bde * sizeof (struct ulp_bde64));
252 iocb_cmd->ulpBdeCount = 1; 252 iocb_cmd->ulpBdeCount = 1;
@@ -445,8 +445,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
445 int datadir = scsi_cmnd->sc_data_direction; 445 int datadir = scsi_cmnd->sc_data_direction;
446 446
447 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 447 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
448 /* clear task management bits */
449 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
448 450
449 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); 451 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
452 &lpfc_cmd->fcp_cmnd->fcp_lun);
450 453
451 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 454 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
452 455
@@ -545,7 +548,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
545 piocb = &piocbq->iocb; 548 piocb = &piocbq->iocb;
546 549
547 fcp_cmnd = lpfc_cmd->fcp_cmnd; 550 fcp_cmnd = lpfc_cmd->fcp_cmnd;
548 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); 551 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
552 &lpfc_cmd->fcp_cmnd->fcp_lun);
549 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 553 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
550 554
551 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 555 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -746,6 +750,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
746 cmnd->result = ScsiResult(DID_NO_CONNECT, 0); 750 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
747 goto out_fail_command; 751 goto out_fail_command;
748 } 752 }
753 else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
754 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
755 goto out_fail_command;
756 }
749 /* 757 /*
750 * The device is most likely recovered and the driver 758 * The device is most likely recovered and the driver
751 * needs a bit more time to finish. Ask the midlayer 759 * needs a bit more time to finish. Ask the midlayer
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0fd9ba14e1b5..acd64c49e849 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -78,18 +78,7 @@ struct fcp_rsp {
78}; 78};
79 79
80struct fcp_cmnd { 80struct fcp_cmnd {
81 uint32_t fcpLunMsl; /* most significant lun word (32 bits) */ 81 struct scsi_lun fcp_lun;
82 uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
83 /* # of bits to shift lun id to end up in right
84 * payload word, little endian = 8, big = 16.
85 */
86#ifdef __BIG_ENDIAN
87#define FC_LUN_SHIFT 16
88#define FC_ADDR_MODE_SHIFT 24
89#else /* __LITTLE_ENDIAN */
90#define FC_LUN_SHIFT 8
91#define FC_ADDR_MODE_SHIFT 0
92#endif
93 82
94 uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ 83 uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
95 uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ 84 uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1775508ed276..e74e224fd77c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -24,9 +24,11 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26 26
27#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h>
30 32
31#include "lpfc_hw.h" 33#include "lpfc_hw.h"
32#include "lpfc_sli.h" 34#include "lpfc_sli.h"
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 47dea48ee0ec..7e6747b06f90 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.0.29" 21#define LPFC_DRIVER_VERSION "8.0.30"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index ff1933298da6..a4857db4f9b8 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1766,7 +1766,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t state)
1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1766 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1767 unsigned long flags; 1767 unsigned long flags;
1768 1768
1769 if (state == mdev->ofdev.dev.power.power_state || state < 2) 1769 if (state.event == mdev->ofdev.dev.power.power_state.event || state.event < 2)
1770 return 0; 1770 return 0;
1771 1771
1772 scsi_block_requests(ms->host); 1772 scsi_block_requests(ms->host);
@@ -1791,7 +1791,7 @@ static int mesh_resume(struct macio_dev *mdev)
1791 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1791 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1792 unsigned long flags; 1792 unsigned long flags;
1793 1793
1794 if (mdev->ofdev.dev.power.power_state == 0) 1794 if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
1795 return 0; 1795 return 0;
1796 1796
1797 set_mesh_power(ms, 1); 1797 set_mesh_power(ms, 1);
@@ -1802,7 +1802,7 @@ static int mesh_resume(struct macio_dev *mdev)
1802 enable_irq(ms->meshintr); 1802 enable_irq(ms->meshintr);
1803 scsi_unblock_requests(ms->host); 1803 scsi_unblock_requests(ms->host);
1804 1804
1805 mdev->ofdev.dev.power.power_state = 0; 1805 mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1806 1806
1807 return 0; 1807 return 0;
1808} 1808}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b993652bfa25..637fb6565d28 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -996,7 +996,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
996 break; 996 break;
997 997
998 case ABORT_DEVICE: 998 case ABORT_DEVICE:
999 ha->flags.in_reset = 1;
1000 if (qla1280_verbose) 999 if (qla1280_verbose)
1001 printk(KERN_INFO 1000 printk(KERN_INFO
1002 "scsi(%ld:%d:%d:%d): Queueing abort device " 1001 "scsi(%ld:%d:%d:%d): Queueing abort device "
@@ -1010,7 +1009,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1010 printk(KERN_INFO 1009 printk(KERN_INFO
1011 "scsi(%ld:%d:%d:%d): Queueing device reset " 1010 "scsi(%ld:%d:%d:%d): Queueing device reset "
1012 "command.\n", ha->host_no, bus, target, lun); 1011 "command.\n", ha->host_no, bus, target, lun);
1013 ha->flags.in_reset = 1;
1014 if (qla1280_device_reset(ha, bus, target) == 0) 1012 if (qla1280_device_reset(ha, bus, target) == 0)
1015 result = SUCCESS; 1013 result = SUCCESS;
1016 break; 1014 break;
@@ -1019,7 +1017,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1019 if (qla1280_verbose) 1017 if (qla1280_verbose)
1020 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS " 1018 printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS "
1021 "DEVICE RESET\n", ha->host_no, bus); 1019 "DEVICE RESET\n", ha->host_no, bus);
1022 ha->flags.in_reset = 1;
1023 if (qla1280_bus_reset(ha, bus == 0)) 1020 if (qla1280_bus_reset(ha, bus == 0))
1024 result = SUCCESS; 1021 result = SUCCESS;
1025 1022
@@ -1047,7 +1044,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
1047 1044
1048 if (!list_empty(&ha->done_q)) 1045 if (!list_empty(&ha->done_q))
1049 qla1280_done(ha); 1046 qla1280_done(ha);
1050 ha->flags.in_reset = 0;
1051 1047
1052 /* If we didn't manage to issue the action, or we have no 1048 /* If we didn't manage to issue the action, or we have no
1053 * command to wait for, exit here */ 1049 * command to wait for, exit here */
@@ -1269,6 +1265,22 @@ qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[])
1269 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom); 1265 return qla1280_biosparam(disk->device, NULL, disk->capacity, geom);
1270} 1266}
1271#endif 1267#endif
1268
1269/* disable risc and host interrupts */
1270static inline void
1271qla1280_disable_intrs(struct scsi_qla_host *ha)
1272{
1273 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1274 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1275}
1276
1277/* enable risc and host interrupts */
1278static inline void
1279qla1280_enable_intrs(struct scsi_qla_host *ha)
1280{
1281 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1282 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1283}
1272 1284
1273/************************************************************************** 1285/**************************************************************************
1274 * qla1280_intr_handler 1286 * qla1280_intr_handler
@@ -1290,7 +1302,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1290 ha->isr_count++; 1302 ha->isr_count++;
1291 reg = ha->iobase; 1303 reg = ha->iobase;
1292 1304
1293 WRT_REG_WORD(&reg->ictrl, 0); /* disable our interrupt. */ 1305 qla1280_disable_intrs(ha);
1294 1306
1295 data = qla1280_debounce_register(&reg->istatus); 1307 data = qla1280_debounce_register(&reg->istatus);
1296 /* Check for pending interrupts. */ 1308 /* Check for pending interrupts. */
@@ -1303,8 +1315,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
1303 1315
1304 spin_unlock(HOST_LOCK); 1316 spin_unlock(HOST_LOCK);
1305 1317
1306 /* enable our interrupt. */ 1318 qla1280_enable_intrs(ha);
1307 WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1308 1319
1309 LEAVE_INTR("qla1280_intr_handler"); 1320 LEAVE_INTR("qla1280_intr_handler");
1310 return IRQ_RETVAL(handled); 1321 return IRQ_RETVAL(handled);
@@ -1317,7 +1328,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1317 uint8_t mr; 1328 uint8_t mr;
1318 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1329 uint16_t mb[MAILBOX_REGISTER_COUNT];
1319 struct nvram *nv; 1330 struct nvram *nv;
1320 int status; 1331 int status, lun;
1321 1332
1322 nv = &ha->nvram; 1333 nv = &ha->nvram;
1323 1334
@@ -1325,24 +1336,38 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1325 1336
1326 /* Set Target Parameters. */ 1337 /* Set Target Parameters. */
1327 mb[0] = MBC_SET_TARGET_PARAMETERS; 1338 mb[0] = MBC_SET_TARGET_PARAMETERS;
1328 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 1339 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1329 mb[1] <<= 8; 1340 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1330 1341 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1331 mb[2] = (nv->bus[bus].target[target].parameter.c << 8); 1342 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1343 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1344 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1345 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1346 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1347 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1332 1348
1333 if (IS_ISP1x160(ha)) { 1349 if (IS_ISP1x160(ha)) {
1334 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1350 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1335 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8) | 1351 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1336 nv->bus[bus].target[target].sync_period;
1337 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1352 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1338 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1353 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1339 mr |= BIT_6; 1354 mr |= BIT_6;
1340 } else { 1355 } else {
1341 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8) | 1356 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1342 nv->bus[bus].target[target].sync_period;
1343 } 1357 }
1358 mb[3] |= nv->bus[bus].target[target].sync_period;
1344 1359
1345 status = qla1280_mailbox_command(ha, mr, &mb[0]); 1360 status = qla1280_mailbox_command(ha, mr, mb);
1361
1362 /* Set Device Queue Parameters. */
1363 for (lun = 0; lun < MAX_LUNS; lun++) {
1364 mb[0] = MBC_SET_DEVICE_QUEUE;
1365 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1366 mb[1] |= lun;
1367 mb[2] = nv->bus[bus].max_queue_depth;
1368 mb[3] = nv->bus[bus].target[target].execution_throttle;
1369 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1370 }
1346 1371
1347 if (status) 1372 if (status)
1348 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1373 printk(KERN_WARNING "scsi(%ld:%i:%i): "
@@ -1389,19 +1414,19 @@ qla1280_slave_configure(struct scsi_device *device)
1389 } 1414 }
1390 1415
1391#if LINUX_VERSION_CODE > 0x020500 1416#if LINUX_VERSION_CODE > 0x020500
1392 nv->bus[bus].target[target].parameter.f.enable_sync = device->sdtr; 1417 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1393 nv->bus[bus].target[target].parameter.f.enable_wide = device->wdtr; 1418 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1394 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1419 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1395#endif 1420#endif
1396 1421
1397 if (driver_setup.no_sync || 1422 if (driver_setup.no_sync ||
1398 (driver_setup.sync_mask && 1423 (driver_setup.sync_mask &&
1399 (~driver_setup.sync_mask & (1 << target)))) 1424 (~driver_setup.sync_mask & (1 << target))))
1400 nv->bus[bus].target[target].parameter.f.enable_sync = 0; 1425 nv->bus[bus].target[target].parameter.enable_sync = 0;
1401 if (driver_setup.no_wide || 1426 if (driver_setup.no_wide ||
1402 (driver_setup.wide_mask && 1427 (driver_setup.wide_mask &&
1403 (~driver_setup.wide_mask & (1 << target)))) 1428 (~driver_setup.wide_mask & (1 << target))))
1404 nv->bus[bus].target[target].parameter.f.enable_wide = 0; 1429 nv->bus[bus].target[target].parameter.enable_wide = 0;
1405 if (IS_ISP1x160(ha)) { 1430 if (IS_ISP1x160(ha)) {
1406 if (driver_setup.no_ppr || 1431 if (driver_setup.no_ppr ||
1407 (driver_setup.ppr_mask && 1432 (driver_setup.ppr_mask &&
@@ -1410,7 +1435,7 @@ qla1280_slave_configure(struct scsi_device *device)
1410 } 1435 }
1411 1436
1412 spin_lock_irqsave(HOST_LOCK, flags); 1437 spin_lock_irqsave(HOST_LOCK, flags);
1413 if (nv->bus[bus].target[target].parameter.f.enable_sync) 1438 if (nv->bus[bus].target[target].parameter.enable_sync)
1414 status = qla1280_set_target_parameters(ha, bus, target); 1439 status = qla1280_set_target_parameters(ha, bus, target);
1415 qla1280_get_target_parameters(ha, device); 1440 qla1280_get_target_parameters(ha, device);
1416 spin_unlock_irqrestore(HOST_LOCK, flags); 1441 spin_unlock_irqrestore(HOST_LOCK, flags);
@@ -1448,7 +1473,6 @@ qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q)
1448 * 1473 *
1449 * Input: 1474 * Input:
1450 * ha = adapter block pointer. 1475 * ha = adapter block pointer.
1451 * done_q = done queue.
1452 */ 1476 */
1453static void 1477static void
1454qla1280_done(struct scsi_qla_host *ha) 1478qla1280_done(struct scsi_qla_host *ha)
@@ -1522,7 +1546,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1522 int host_status = DID_ERROR; 1546 int host_status = DID_ERROR;
1523 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1547 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1524 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1548 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1525 uint16_t residual_length = le16_to_cpu(sts->residual_length); 1549 uint16_t residual_length = le32_to_cpu(sts->residual_length);
1526 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1550 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1527#if DEBUG_QLA1280_INTR 1551#if DEBUG_QLA1280_INTR
1528 static char *reason[] = { 1552 static char *reason[] = {
@@ -1582,7 +1606,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1582 1606
1583 case CS_DATA_OVERRUN: 1607 case CS_DATA_OVERRUN:
1584 dprintk(2, "Data overrun 0x%x\n", residual_length); 1608 dprintk(2, "Data overrun 0x%x\n", residual_length);
1585 dprintk(2, "qla1280_isr: response packet data\n"); 1609 dprintk(2, "qla1280_return_status: response packet data\n");
1586 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1610 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1587 host_status = DID_ERROR; 1611 host_status = DID_ERROR;
1588 break; 1612 break;
@@ -1617,40 +1641,6 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1617/* QLogic ISP1280 Hardware Support Functions. */ 1641/* QLogic ISP1280 Hardware Support Functions. */
1618/****************************************************************************/ 1642/****************************************************************************/
1619 1643
1620 /*
1621 * qla2100_enable_intrs
1622 * qla2100_disable_intrs
1623 *
1624 * Input:
1625 * ha = adapter block pointer.
1626 *
1627 * Returns:
1628 * None
1629 */
1630static inline void
1631qla1280_enable_intrs(struct scsi_qla_host *ha)
1632{
1633 struct device_reg __iomem *reg;
1634
1635 reg = ha->iobase;
1636 /* enable risc and host interrupts */
1637 WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1638 RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
1639 ha->flags.ints_enabled = 1;
1640}
1641
1642static inline void
1643qla1280_disable_intrs(struct scsi_qla_host *ha)
1644{
1645 struct device_reg __iomem *reg;
1646
1647 reg = ha->iobase;
1648 /* disable risc and host interrupts */
1649 WRT_REG_WORD(&reg->ictrl, 0);
1650 RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
1651 ha->flags.ints_enabled = 0;
1652}
1653
1654/* 1644/*
1655 * qla1280_initialize_adapter 1645 * qla1280_initialize_adapter
1656 * Initialize board. 1646 * Initialize board.
@@ -1679,7 +1669,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
1679 ha->flags.reset_active = 0; 1669 ha->flags.reset_active = 0;
1680 ha->flags.abort_isp_active = 0; 1670 ha->flags.abort_isp_active = 0;
1681 1671
1682 ha->flags.ints_enabled = 0;
1683#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1672#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1684 if (ia64_platform_is("sn2")) { 1673 if (ia64_platform_is("sn2")) {
1685 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1674 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
@@ -1758,69 +1747,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
1758 return status; 1747 return status;
1759} 1748}
1760 1749
1761
1762/*
1763 * ISP Firmware Test
1764 * Checks if present version of RISC firmware is older than
1765 * driver firmware.
1766 *
1767 * Input:
1768 * ha = adapter block pointer.
1769 *
1770 * Returns:
1771 * 0 = firmware does not need to be loaded.
1772 */
1773static int
1774qla1280_isp_firmware(struct scsi_qla_host *ha)
1775{
1776 struct nvram *nv = (struct nvram *) ha->response_ring;
1777 int status = 0; /* dg 2/27 always loads RISC */
1778 uint16_t mb[MAILBOX_REGISTER_COUNT];
1779
1780 ENTER("qla1280_isp_firmware");
1781
1782 dprintk(1, "scsi(%li): Determining if RISC is loaded\n", ha->host_no);
1783
1784 /* Bad NVRAM data, load RISC code. */
1785 if (!ha->nvram_valid) {
1786 ha->flags.disable_risc_code_load = 0;
1787 } else
1788 ha->flags.disable_risc_code_load =
1789 nv->cntr_flags_1.disable_loading_risc_code;
1790
1791 if (ha->flags.disable_risc_code_load) {
1792 dprintk(3, "qla1280_isp_firmware: Telling RISC to verify "
1793 "checksum of loaded BIOS code.\n");
1794
1795 /* Verify checksum of loaded RISC code. */
1796 mb[0] = MBC_VERIFY_CHECKSUM;
1797 /* mb[1] = ql12_risc_code_addr01; */
1798 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1799
1800 if (!(status =
1801 qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]))) {
1802 /* Start firmware execution. */
1803 dprintk(3, "qla1280_isp_firmware: Startng F/W "
1804 "execution.\n");
1805
1806 mb[0] = MBC_EXECUTE_FIRMWARE;
1807 /* mb[1] = ql12_risc_code_addr01; */
1808 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1809 qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1810 } else
1811 printk(KERN_INFO "qla1280: RISC checksum failed.\n");
1812 } else {
1813 dprintk(1, "qla1280: NVRAM configured to load RISC load.\n");
1814 status = 1;
1815 }
1816
1817 if (status)
1818 dprintk(2, "qla1280_isp_firmware: **** Load RISC code ****\n");
1819
1820 LEAVE("qla1280_isp_firmware");
1821 return status;
1822}
1823
1824/* 1750/*
1825 * Chip diagnostics 1751 * Chip diagnostics
1826 * Test chip for proper operation. 1752 * Test chip for proper operation.
@@ -2006,7 +1932,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
2006 "%d,%d(0x%x)\n", 1932 "%d,%d(0x%x)\n",
2007 risc_code_address, cnt, num, risc_address); 1933 risc_code_address, cnt, num, risc_address);
2008 for(i = 0; i < cnt; i++) 1934 for(i = 0; i < cnt; i++)
2009 ((uint16_t *)ha->request_ring)[i] = 1935 ((__le16 *)ha->request_ring)[i] =
2010 cpu_to_le16(risc_code_address[i]); 1936 cpu_to_le16(risc_code_address[i]);
2011 1937
2012 mb[0] = MBC_LOAD_RAM; 1938 mb[0] = MBC_LOAD_RAM;
@@ -2085,7 +2011,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
2085 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 2011 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
2086 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2012 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2087 if (err) { 2013 if (err) {
2088 printk(KERN_ERR "scsi(%li): Failed checksum\n", ha->host_no); 2014 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
2089 return err; 2015 return err;
2090 } 2016 }
2091 2017
@@ -2105,14 +2031,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
2105static int 2031static int
2106qla1280_load_firmware(struct scsi_qla_host *ha) 2032qla1280_load_firmware(struct scsi_qla_host *ha)
2107{ 2033{
2108 int err = -ENODEV; 2034 int err;
2109
2110 /* If firmware needs to be loaded */
2111 if (!qla1280_isp_firmware(ha)) {
2112 printk(KERN_ERR "scsi(%li): isp_firmware() failed!\n",
2113 ha->host_no);
2114 goto out;
2115 }
2116 2035
2117 err = qla1280_chip_diag(ha); 2036 err = qla1280_chip_diag(ha);
2118 if (err) 2037 if (err)
@@ -2246,17 +2165,17 @@ qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2246{ 2165{
2247 struct nvram *nv = &ha->nvram; 2166 struct nvram *nv = &ha->nvram;
2248 2167
2249 nv->bus[bus].target[target].parameter.f.renegotiate_on_error = 1; 2168 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2250 nv->bus[bus].target[target].parameter.f.auto_request_sense = 1; 2169 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2251 nv->bus[bus].target[target].parameter.f.tag_queuing = 1; 2170 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2252 nv->bus[bus].target[target].parameter.f.enable_sync = 1; 2171 nv->bus[bus].target[target].parameter.enable_sync = 1;
2253#if 1 /* Some SCSI Processors do not seem to like this */ 2172#if 1 /* Some SCSI Processors do not seem to like this */
2254 nv->bus[bus].target[target].parameter.f.enable_wide = 1; 2173 nv->bus[bus].target[target].parameter.enable_wide = 1;
2255#endif 2174#endif
2256 nv->bus[bus].target[target].parameter.f.parity_checking = 1;
2257 nv->bus[bus].target[target].parameter.f.disconnect_allowed = 1;
2258 nv->bus[bus].target[target].execution_throttle = 2175 nv->bus[bus].target[target].execution_throttle =
2259 nv->bus[bus].max_queue_depth - 1; 2176 nv->bus[bus].max_queue_depth - 1;
2177 nv->bus[bus].target[target].parameter.parity_checking = 1;
2178 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2260 2179
2261 if (IS_ISP1x160(ha)) { 2180 if (IS_ISP1x160(ha)) {
2262 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2181 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
@@ -2284,9 +2203,9 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
2284 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2203 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2285 nv->firmware_feature.f.enable_fast_posting = 1; 2204 nv->firmware_feature.f.enable_fast_posting = 1;
2286 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2205 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2287 nv->termination.f.scsi_bus_0_control = 3; 2206 nv->termination.scsi_bus_0_control = 3;
2288 nv->termination.f.scsi_bus_1_control = 3; 2207 nv->termination.scsi_bus_1_control = 3;
2289 nv->termination.f.auto_term_support = 1; 2208 nv->termination.auto_term_support = 1;
2290 2209
2291 /* 2210 /*
2292 * Set default FIFO magic - What appropriate values would be here 2211 * Set default FIFO magic - What appropriate values would be here
@@ -2296,7 +2215,12 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
2296 * header file provided by QLogic seems to be bogus or incomplete 2215 * header file provided by QLogic seems to be bogus or incomplete
2297 * at best. 2216 * at best.
2298 */ 2217 */
2299 nv->isp_config.c = ISP_CFG1_BENAB|ISP_CFG1_F128; 2218 nv->isp_config.burst_enable = 1;
2219 if (IS_ISP1040(ha))
2220 nv->isp_config.fifo_threshold |= 3;
2221 else
2222 nv->isp_config.fifo_threshold |= 4;
2223
2300 if (IS_ISP1x160(ha)) 2224 if (IS_ISP1x160(ha))
2301 nv->isp_parameter = 0x01; /* fast memory enable */ 2225 nv->isp_parameter = 0x01; /* fast memory enable */
2302 2226
@@ -2327,66 +2251,53 @@ qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2327 struct nvram *nv = &ha->nvram; 2251 struct nvram *nv = &ha->nvram;
2328 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2252 uint16_t mb[MAILBOX_REGISTER_COUNT];
2329 int status, lun; 2253 int status, lun;
2254 uint16_t flag;
2330 2255
2331 /* Set Target Parameters. */ 2256 /* Set Target Parameters. */
2332 mb[0] = MBC_SET_TARGET_PARAMETERS; 2257 mb[0] = MBC_SET_TARGET_PARAMETERS;
2333 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 2258 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2334 mb[1] <<= 8;
2335
2336 /*
2337 * Do not enable wide, sync, and ppr for the initial
2338 * INQUIRY run. We enable this later if we determine
2339 * the target actually supports it.
2340 */
2341 nv->bus[bus].target[target].parameter.f.
2342 auto_request_sense = 1;
2343 nv->bus[bus].target[target].parameter.f.
2344 stop_queue_on_check = 0;
2345
2346 if (IS_ISP1x160(ha))
2347 nv->bus[bus].target[target].ppr_1x160.
2348 flags.enable_ppr = 0;
2349 2259
2350 /* 2260 /*
2351 * No sync, wide, etc. while probing 2261 * Do not enable sync and ppr for the initial INQUIRY run. We
2262 * enable this later if we determine the target actually
2263 * supports it.
2352 */ 2264 */
2353 mb[2] = (nv->bus[bus].target[target].parameter.c << 8) & 2265 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2354 ~(TP_SYNC /*| TP_WIDE | TP_PPR*/); 2266 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2355 2267
2356 if (IS_ISP1x160(ha)) 2268 if (IS_ISP1x160(ha))
2357 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2269 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2358 else 2270 else
2359 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2271 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2360 mb[3] |= nv->bus[bus].target[target].sync_period; 2272 mb[3] |= nv->bus[bus].target[target].sync_period;
2361 2273 status = qla1280_mailbox_command(ha, 0x0f, mb);
2362 status = qla1280_mailbox_command(ha, BIT_3 | BIT_2 | BIT_1 | BIT_0, &mb[0]);
2363 2274
2364 /* Save Tag queuing enable flag. */ 2275 /* Save Tag queuing enable flag. */
2365 mb[0] = BIT_0 << target; 2276 flag = (BIT_0 << target) & mb[0];
2366 if (nv->bus[bus].target[target].parameter.f.tag_queuing) 2277 if (nv->bus[bus].target[target].parameter.tag_queuing)
2367 ha->bus_settings[bus].qtag_enables |= mb[0]; 2278 ha->bus_settings[bus].qtag_enables |= flag;
2368 2279
2369 /* Save Device enable flag. */ 2280 /* Save Device enable flag. */
2370 if (IS_ISP1x160(ha)) { 2281 if (IS_ISP1x160(ha)) {
2371 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2282 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2372 ha->bus_settings[bus].device_enables |= mb[0]; 2283 ha->bus_settings[bus].device_enables |= flag;
2373 ha->bus_settings[bus].lun_disables |= 0; 2284 ha->bus_settings[bus].lun_disables |= 0;
2374 } else { 2285 } else {
2375 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2286 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2376 ha->bus_settings[bus].device_enables |= mb[0]; 2287 ha->bus_settings[bus].device_enables |= flag;
2377 /* Save LUN disable flag. */ 2288 /* Save LUN disable flag. */
2378 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2289 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2379 ha->bus_settings[bus].lun_disables |= mb[0]; 2290 ha->bus_settings[bus].lun_disables |= flag;
2380 } 2291 }
2381 2292
2382 /* Set Device Queue Parameters. */ 2293 /* Set Device Queue Parameters. */
2383 for (lun = 0; lun < MAX_LUNS; lun++) { 2294 for (lun = 0; lun < MAX_LUNS; lun++) {
2384 mb[0] = MBC_SET_DEVICE_QUEUE; 2295 mb[0] = MBC_SET_DEVICE_QUEUE;
2385 mb[1] = (uint16_t)(bus ? target | BIT_7 : target); 2296 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2386 mb[1] = mb[1] << 8 | lun; 2297 mb[1] |= lun;
2387 mb[2] = nv->bus[bus].max_queue_depth; 2298 mb[2] = nv->bus[bus].max_queue_depth;
2388 mb[3] = nv->bus[bus].target[target].execution_throttle; 2299 mb[3] = nv->bus[bus].target[target].execution_throttle;
2389 status |= qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2300 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2390 } 2301 }
2391 2302
2392 return status; 2303 return status;
@@ -2431,7 +2342,6 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2431 struct nvram *nv = &ha->nvram; 2342 struct nvram *nv = &ha->nvram;
2432 int bus, target, status = 0; 2343 int bus, target, status = 0;
2433 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2344 uint16_t mb[MAILBOX_REGISTER_COUNT];
2434 uint16_t mask;
2435 2345
2436 ENTER("qla1280_nvram_config"); 2346 ENTER("qla1280_nvram_config");
2437 2347
@@ -2439,7 +2349,7 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2439 /* Always force AUTO sense for LINUX SCSI */ 2349 /* Always force AUTO sense for LINUX SCSI */
2440 for (bus = 0; bus < MAX_BUSES; bus++) 2350 for (bus = 0; bus < MAX_BUSES; bus++)
2441 for (target = 0; target < MAX_TARGETS; target++) { 2351 for (target = 0; target < MAX_TARGETS; target++) {
2442 nv->bus[bus].target[target].parameter.f. 2352 nv->bus[bus].target[target].parameter.
2443 auto_request_sense = 1; 2353 auto_request_sense = 1;
2444 } 2354 }
2445 } else { 2355 } else {
@@ -2457,31 +2367,40 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2457 2367
2458 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK; 2368 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
2459 2369
2460 cfg1 = RD_REG_WORD(&reg->cfg_1); 2370 cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2461 cdma_conf = RD_REG_WORD(&reg->cdma_cfg); 2371 cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
2462 ddma_conf = RD_REG_WORD(&reg->ddma_cfg); 2372 ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
2463 2373
2464 /* Busted fifo, says mjacob. */ 2374 /* Busted fifo, says mjacob. */
2465 if (hwrev == ISP_CFG0_1040A) 2375 if (hwrev != ISP_CFG0_1040A)
2466 WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64); 2376 cfg1 |= nv->isp_config.fifo_threshold << 4;
2467 else 2377
2468 WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64 | ISP_CFG1_BENAB); 2378 cfg1 |= nv->isp_config.burst_enable << 2;
2379 WRT_REG_WORD(&reg->cfg_1, cfg1);
2469 2380
2470 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2381 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2471 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2382 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2472 } else { 2383 } else {
2384 uint16_t cfg1, term;
2385
2473 /* Set ISP hardware DMA burst */ 2386 /* Set ISP hardware DMA burst */
2474 mb[0] = nv->isp_config.c; 2387 cfg1 = nv->isp_config.fifo_threshold << 4;
2388 cfg1 |= nv->isp_config.burst_enable << 2;
2475 /* Enable DMA arbitration on dual channel controllers */ 2389 /* Enable DMA arbitration on dual channel controllers */
2476 if (ha->ports > 1) 2390 if (ha->ports > 1)
2477 mb[0] |= BIT_13; 2391 cfg1 |= BIT_13;
2478 WRT_REG_WORD(&reg->cfg_1, mb[0]); 2392 WRT_REG_WORD(&reg->cfg_1, cfg1);
2479 2393
2480 /* Set SCSI termination. */ 2394 /* Set SCSI termination. */
2481 WRT_REG_WORD(&reg->gpio_enable, (BIT_3 + BIT_2 + BIT_1 + BIT_0)); 2395 WRT_REG_WORD(&reg->gpio_enable,
2482 mb[0] = nv->termination.c & (BIT_3 + BIT_2 + BIT_1 + BIT_0); 2396 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2483 WRT_REG_WORD(&reg->gpio_data, mb[0]); 2397 term = nv->termination.scsi_bus_1_control;
2398 term |= nv->termination.scsi_bus_0_control << 2;
2399 term |= nv->termination.auto_term_support << 7;
2400 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2401 WRT_REG_WORD(&reg->gpio_data, term);
2484 } 2402 }
2403 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2485 2404
2486 /* ISP parameter word. */ 2405 /* ISP parameter word. */
2487 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2406 mb[0] = MBC_SET_SYSTEM_PARAMETER;
@@ -2497,16 +2416,17 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2497 2416
2498 /* Firmware feature word. */ 2417 /* Firmware feature word. */
2499 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2418 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2500 mask = BIT_5 | BIT_1 | BIT_0; 2419 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2501 mb[1] = le16_to_cpu(nv->firmware_feature.w) & (mask); 2420 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2421 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2502#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2422#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2503 if (ia64_platform_is("sn2")) { 2423 if (ia64_platform_is("sn2")) {
2504 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2424 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2505 "workaround\n", ha->host_no); 2425 "workaround\n", ha->host_no);
2506 mb[1] |= BIT_9; 2426 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2507 } 2427 }
2508#endif 2428#endif
2509 status |= qla1280_mailbox_command(ha, mask, &mb[0]); 2429 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2510 2430
2511 /* Retry count and delay. */ 2431 /* Retry count and delay. */
2512 mb[0] = MBC_SET_RETRY_COUNT; 2432 mb[0] = MBC_SET_RETRY_COUNT;
@@ -2535,27 +2455,27 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
2535 mb[2] |= BIT_5; 2455 mb[2] |= BIT_5;
2536 if (nv->bus[1].config_2.data_line_active_negation) 2456 if (nv->bus[1].config_2.data_line_active_negation)
2537 mb[2] |= BIT_4; 2457 mb[2] |= BIT_4;
2538 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2458 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2539 2459
2540 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2460 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2541 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2461 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2542 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2462 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2543 2463
2544 /* thingy */ 2464 /* thingy */
2545 mb[0] = MBC_SET_PCI_CONTROL; 2465 mb[0] = MBC_SET_PCI_CONTROL;
2546 mb[1] = 2; /* Data DMA Channel Burst Enable */ 2466 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2547 mb[2] = 2; /* Command DMA Channel Burst Enable */ 2467 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2548 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2468 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2549 2469
2550 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2470 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2551 mb[1] = 8; 2471 mb[1] = 8;
2552 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2472 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2553 2473
2554 /* Selection timeout. */ 2474 /* Selection timeout. */
2555 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2475 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2556 mb[1] = nv->bus[0].selection_timeout; 2476 mb[1] = nv->bus[0].selection_timeout;
2557 mb[2] = nv->bus[1].selection_timeout; 2477 mb[2] = nv->bus[1].selection_timeout;
2558 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2478 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2559 2479
2560 for (bus = 0; bus < ha->ports; bus++) 2480 for (bus = 0; bus < ha->ports; bus++)
2561 status |= qla1280_config_bus(ha, bus); 2481 status |= qla1280_config_bus(ha, bus);
@@ -3066,7 +2986,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3066 struct scsi_cmnd *cmd = sp->cmd; 2986 struct scsi_cmnd *cmd = sp->cmd;
3067 cmd_a64_entry_t *pkt; 2987 cmd_a64_entry_t *pkt;
3068 struct scatterlist *sg = NULL; 2988 struct scatterlist *sg = NULL;
3069 u32 *dword_ptr; 2989 __le32 *dword_ptr;
3070 dma_addr_t dma_handle; 2990 dma_addr_t dma_handle;
3071 int status = 0; 2991 int status = 0;
3072 int cnt; 2992 int cnt;
@@ -3104,10 +3024,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3104 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3024 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3105 } 3025 }
3106 3026
3027 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3028 ha->req_q_cnt, seg_cnt);
3029
3107 /* If room for request in request ring. */ 3030 /* If room for request in request ring. */
3108 if ((req_cnt + 2) >= ha->req_q_cnt) { 3031 if ((req_cnt + 2) >= ha->req_q_cnt) {
3109 status = 1; 3032 status = 1;
3110 dprintk(2, "qla1280_64bit_start_scsi: in-ptr=0x%x req_q_cnt=" 3033 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
3111 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 3034 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
3112 req_cnt); 3035 req_cnt);
3113 goto out; 3036 goto out;
@@ -3119,7 +3042,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3119 3042
3120 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3043 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3121 status = 1; 3044 status = 1;
3122 dprintk(2, "qla1280_64bit_start_scsi: NO ROOM IN " 3045 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
3123 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 3046 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
3124 goto out; 3047 goto out;
3125 } 3048 }
@@ -3128,7 +3051,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3128 ha->req_q_cnt -= req_cnt; 3051 ha->req_q_cnt -= req_cnt;
3129 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 3052 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
3130 3053
3131 dprintk(2, "64bit_start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 3054 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
3132 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 3055 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
3133 dprintk(2, " bus %i, target %i, lun %i\n", 3056 dprintk(2, " bus %i, target %i, lun %i\n",
3134 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3057 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
@@ -3350,7 +3273,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3350 struct scsi_cmnd *cmd = sp->cmd; 3273 struct scsi_cmnd *cmd = sp->cmd;
3351 struct cmd_entry *pkt; 3274 struct cmd_entry *pkt;
3352 struct scatterlist *sg = NULL; 3275 struct scatterlist *sg = NULL;
3353 uint32_t *dword_ptr; 3276 __le32 *dword_ptr;
3354 int status = 0; 3277 int status = 0;
3355 int cnt; 3278 int cnt;
3356 int req_cnt; 3279 int req_cnt;
@@ -3993,21 +3916,21 @@ qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha)
3993 result = cmd->request_buffer; 3916 result = cmd->request_buffer;
3994 n = &ha->nvram; 3917 n = &ha->nvram;
3995 3918
3996 n->bus[bus].target[target].parameter.f.enable_wide = 0; 3919 n->bus[bus].target[target].parameter.enable_wide = 0;
3997 n->bus[bus].target[target].parameter.f.enable_sync = 0; 3920 n->bus[bus].target[target].parameter.enable_sync = 0;
3998 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 3921 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
3999 3922
4000 if (result[7] & 0x60) 3923 if (result[7] & 0x60)
4001 n->bus[bus].target[target].parameter.f.enable_wide = 1; 3924 n->bus[bus].target[target].parameter.enable_wide = 1;
4002 if (result[7] & 0x10) 3925 if (result[7] & 0x10)
4003 n->bus[bus].target[target].parameter.f.enable_sync = 1; 3926 n->bus[bus].target[target].parameter.enable_sync = 1;
4004 if ((result[2] >= 3) && (result[4] + 5 > 56) && 3927 if ((result[2] >= 3) && (result[4] + 5 > 56) &&
4005 (result[56] & 0x4)) 3928 (result[56] & 0x4))
4006 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 3929 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
4007 3930
4008 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n", 3931 dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n",
4009 n->bus[bus].target[target].parameter.f.enable_wide, 3932 n->bus[bus].target[target].parameter.enable_wide,
4010 n->bus[bus].target[target].parameter.f.enable_sync, 3933 n->bus[bus].target[target].parameter.enable_sync,
4011 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr); 3934 n->bus[bus].target[target].ppr_1x160.flags.enable_ppr);
4012} 3935}
4013#endif 3936#endif
@@ -4071,7 +3994,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
4071 /* Save ISP completion status */ 3994 /* Save ISP completion status */
4072 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3995 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
4073 3996
4074 if (scsi_status & SS_CHECK_CONDITION) { 3997 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
4075 if (comp_status != CS_ARS_FAILED) { 3998 if (comp_status != CS_ARS_FAILED) {
4076 uint16_t req_sense_length = 3999 uint16_t req_sense_length =
4077 le16_to_cpu(pkt->req_sense_length); 4000 le16_to_cpu(pkt->req_sense_length);
@@ -4650,7 +4573,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4650 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { 4573 if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
4651 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4574 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
4652 printk(KERN_WARNING "scsi(%li): Unable to set a " 4575 printk(KERN_WARNING "scsi(%li): Unable to set a "
4653 " suitable DMA mask - aboring\n", ha->host_no); 4576 "suitable DMA mask - aborting\n", ha->host_no);
4654 error = -ENODEV; 4577 error = -ENODEV;
4655 goto error_free_irq; 4578 goto error_free_irq;
4656 } 4579 }
@@ -4660,14 +4583,14 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4660#else 4583#else
4661 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { 4584 if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
4662 printk(KERN_WARNING "scsi(%li): Unable to set a " 4585 printk(KERN_WARNING "scsi(%li): Unable to set a "
4663 " suitable DMA mask - aboring\n", ha->host_no); 4586 "suitable DMA mask - aborting\n", ha->host_no);
4664 error = -ENODEV; 4587 error = -ENODEV;
4665 goto error_free_irq; 4588 goto error_free_irq;
4666 } 4589 }
4667#endif 4590#endif
4668 4591
4669 ha->request_ring = pci_alloc_consistent(ha->pdev, 4592 ha->request_ring = pci_alloc_consistent(ha->pdev,
4670 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4593 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4671 &ha->request_dma); 4594 &ha->request_dma);
4672 if (!ha->request_ring) { 4595 if (!ha->request_ring) {
4673 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4596 printk(KERN_INFO "qla1280: Failed to get request memory\n");
@@ -4675,7 +4598,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4675 } 4598 }
4676 4599
4677 ha->response_ring = pci_alloc_consistent(ha->pdev, 4600 ha->response_ring = pci_alloc_consistent(ha->pdev,
4678 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4601 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4679 &ha->response_dma); 4602 &ha->response_dma);
4680 if (!ha->response_ring) { 4603 if (!ha->response_ring) {
4681 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4604 printk(KERN_INFO "qla1280: Failed to get response memory\n");
@@ -4758,7 +4681,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4758 4681
4759#if LINUX_VERSION_CODE >= 0x020600 4682#if LINUX_VERSION_CODE >= 0x020600
4760 error_disable_adapter: 4683 error_disable_adapter:
4761 WRT_REG_WORD(&ha->iobase->ictrl, 0); 4684 qla1280_disable_intrs(ha);
4762#endif 4685#endif
4763 error_free_irq: 4686 error_free_irq:
4764 free_irq(pdev->irq, ha); 4687 free_irq(pdev->irq, ha);
@@ -4770,11 +4693,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4770#endif 4693#endif
4771 error_free_response_ring: 4694 error_free_response_ring:
4772 pci_free_consistent(ha->pdev, 4695 pci_free_consistent(ha->pdev,
4773 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4696 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4774 ha->response_ring, ha->response_dma); 4697 ha->response_ring, ha->response_dma);
4775 error_free_request_ring: 4698 error_free_request_ring:
4776 pci_free_consistent(ha->pdev, 4699 pci_free_consistent(ha->pdev,
4777 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4700 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4778 ha->request_ring, ha->request_dma); 4701 ha->request_ring, ha->request_dma);
4779 error_put_host: 4702 error_put_host:
4780 scsi_host_put(host); 4703 scsi_host_put(host);
@@ -4795,7 +4718,7 @@ qla1280_remove_one(struct pci_dev *pdev)
4795 scsi_remove_host(host); 4718 scsi_remove_host(host);
4796#endif 4719#endif
4797 4720
4798 WRT_REG_WORD(&ha->iobase->ictrl, 0); 4721 qla1280_disable_intrs(ha);
4799 4722
4800 free_irq(pdev->irq, ha); 4723 free_irq(pdev->irq, ha);
4801 4724
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d245ae07518e..59915fb70301 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -94,9 +94,6 @@
94#define REQUEST_ENTRY_CNT 256 /* Number of request entries. */ 94#define REQUEST_ENTRY_CNT 256 /* Number of request entries. */
95#define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */ 95#define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */
96 96
97/* Number of segments 1 - 65535 */
98#define SG_SEGMENTS 32 /* Cmd entry + 6 continuations */
99
100/* 97/*
101 * SCSI Request Block structure (sp) that is placed 98 * SCSI Request Block structure (sp) that is placed
102 * on cmd->SCp location of every I/O 99 * on cmd->SCp location of every I/O
@@ -378,29 +375,23 @@ struct nvram {
378 uint16_t unused_12; /* 12, 13 */ 375 uint16_t unused_12; /* 12, 13 */
379 uint16_t unused_14; /* 14, 15 */ 376 uint16_t unused_14; /* 14, 15 */
380 377
381 union { 378 struct {
382 uint8_t c; 379 uint8_t reserved:2;
383 struct { 380 uint8_t burst_enable:1;
384 uint8_t reserved:2; 381 uint8_t reserved_1:1;
385 uint8_t burst_enable:1; 382 uint8_t fifo_threshold:4;
386 uint8_t reserved_1:1;
387 uint8_t fifo_threshold:4;
388 } f;
389 } isp_config; /* 16 */ 383 } isp_config; /* 16 */
390 384
391 /* Termination 385 /* Termination
392 * 0 = Disable, 1 = high only, 3 = Auto term 386 * 0 = Disable, 1 = high only, 3 = Auto term
393 */ 387 */
394 union { 388 struct {
395 uint8_t c; 389 uint8_t scsi_bus_1_control:2;
396 struct { 390 uint8_t scsi_bus_0_control:2;
397 uint8_t scsi_bus_1_control:2; 391 uint8_t unused_0:1;
398 uint8_t scsi_bus_0_control:2; 392 uint8_t unused_1:1;
399 uint8_t unused_0:1; 393 uint8_t unused_2:1;
400 uint8_t unused_1:1; 394 uint8_t auto_term_support:1;
401 uint8_t unused_2:1;
402 uint8_t auto_term_support:1;
403 } f;
404 } termination; /* 17 */ 395 } termination; /* 17 */
405 396
406 uint16_t isp_parameter; /* 18, 19 */ 397 uint16_t isp_parameter; /* 18, 19 */
@@ -460,18 +451,15 @@ struct nvram {
460 uint16_t unused_38; /* 38, 39 */ 451 uint16_t unused_38; /* 38, 39 */
461 452
462 struct { 453 struct {
463 union { 454 struct {
464 uint8_t c; 455 uint8_t renegotiate_on_error:1;
465 struct { 456 uint8_t stop_queue_on_check:1;
466 uint8_t renegotiate_on_error:1; 457 uint8_t auto_request_sense:1;
467 uint8_t stop_queue_on_check:1; 458 uint8_t tag_queuing:1;
468 uint8_t auto_request_sense:1; 459 uint8_t enable_sync:1;
469 uint8_t tag_queuing:1; 460 uint8_t enable_wide:1;
470 uint8_t enable_sync:1; 461 uint8_t parity_checking:1;
471 uint8_t enable_wide:1; 462 uint8_t disconnect_allowed:1;
472 uint8_t parity_checking:1;
473 uint8_t disconnect_allowed:1;
474 } f;
475 } parameter; /* 40 */ 463 } parameter; /* 40 */
476 464
477 uint8_t execution_throttle; /* 41 */ 465 uint8_t execution_throttle; /* 41 */
@@ -528,23 +516,23 @@ struct cmd_entry {
528 uint8_t entry_count; /* Entry count. */ 516 uint8_t entry_count; /* Entry count. */
529 uint8_t sys_define; /* System defined. */ 517 uint8_t sys_define; /* System defined. */
530 uint8_t entry_status; /* Entry Status. */ 518 uint8_t entry_status; /* Entry Status. */
531 uint32_t handle; /* System handle. */ 519 __le32 handle; /* System handle. */
532 uint8_t lun; /* SCSI LUN */ 520 uint8_t lun; /* SCSI LUN */
533 uint8_t target; /* SCSI ID */ 521 uint8_t target; /* SCSI ID */
534 uint16_t cdb_len; /* SCSI command length. */ 522 __le16 cdb_len; /* SCSI command length. */
535 uint16_t control_flags; /* Control flags. */ 523 __le16 control_flags; /* Control flags. */
536 uint16_t reserved; 524 __le16 reserved;
537 uint16_t timeout; /* Command timeout. */ 525 __le16 timeout; /* Command timeout. */
538 uint16_t dseg_count; /* Data segment count. */ 526 __le16 dseg_count; /* Data segment count. */
539 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 527 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
540 uint32_t dseg_0_address; /* Data segment 0 address. */ 528 __le32 dseg_0_address; /* Data segment 0 address. */
541 uint32_t dseg_0_length; /* Data segment 0 length. */ 529 __le32 dseg_0_length; /* Data segment 0 length. */
542 uint32_t dseg_1_address; /* Data segment 1 address. */ 530 __le32 dseg_1_address; /* Data segment 1 address. */
543 uint32_t dseg_1_length; /* Data segment 1 length. */ 531 __le32 dseg_1_length; /* Data segment 1 length. */
544 uint32_t dseg_2_address; /* Data segment 2 address. */ 532 __le32 dseg_2_address; /* Data segment 2 address. */
545 uint32_t dseg_2_length; /* Data segment 2 length. */ 533 __le32 dseg_2_length; /* Data segment 2 length. */
546 uint32_t dseg_3_address; /* Data segment 3 address. */ 534 __le32 dseg_3_address; /* Data segment 3 address. */
547 uint32_t dseg_3_length; /* Data segment 3 length. */ 535 __le32 dseg_3_length; /* Data segment 3 length. */
548}; 536};
549 537
550/* 538/*
@@ -556,21 +544,21 @@ struct cont_entry {
556 uint8_t entry_count; /* Entry count. */ 544 uint8_t entry_count; /* Entry count. */
557 uint8_t sys_define; /* System defined. */ 545 uint8_t sys_define; /* System defined. */
558 uint8_t entry_status; /* Entry Status. */ 546 uint8_t entry_status; /* Entry Status. */
559 uint32_t reserved; /* Reserved */ 547 __le32 reserved; /* Reserved */
560 uint32_t dseg_0_address; /* Data segment 0 address. */ 548 __le32 dseg_0_address; /* Data segment 0 address. */
561 uint32_t dseg_0_length; /* Data segment 0 length. */ 549 __le32 dseg_0_length; /* Data segment 0 length. */
562 uint32_t dseg_1_address; /* Data segment 1 address. */ 550 __le32 dseg_1_address; /* Data segment 1 address. */
563 uint32_t dseg_1_length; /* Data segment 1 length. */ 551 __le32 dseg_1_length; /* Data segment 1 length. */
564 uint32_t dseg_2_address; /* Data segment 2 address. */ 552 __le32 dseg_2_address; /* Data segment 2 address. */
565 uint32_t dseg_2_length; /* Data segment 2 length. */ 553 __le32 dseg_2_length; /* Data segment 2 length. */
566 uint32_t dseg_3_address; /* Data segment 3 address. */ 554 __le32 dseg_3_address; /* Data segment 3 address. */
567 uint32_t dseg_3_length; /* Data segment 3 length. */ 555 __le32 dseg_3_length; /* Data segment 3 length. */
568 uint32_t dseg_4_address; /* Data segment 4 address. */ 556 __le32 dseg_4_address; /* Data segment 4 address. */
569 uint32_t dseg_4_length; /* Data segment 4 length. */ 557 __le32 dseg_4_length; /* Data segment 4 length. */
570 uint32_t dseg_5_address; /* Data segment 5 address. */ 558 __le32 dseg_5_address; /* Data segment 5 address. */
571 uint32_t dseg_5_length; /* Data segment 5 length. */ 559 __le32 dseg_5_length; /* Data segment 5 length. */
572 uint32_t dseg_6_address; /* Data segment 6 address. */ 560 __le32 dseg_6_address; /* Data segment 6 address. */
573 uint32_t dseg_6_length; /* Data segment 6 length. */ 561 __le32 dseg_6_length; /* Data segment 6 length. */
574}; 562};
575 563
576/* 564/*
@@ -586,22 +574,22 @@ struct response {
586#define RF_FULL BIT_1 /* Full */ 574#define RF_FULL BIT_1 /* Full */
587#define RF_BAD_HEADER BIT_2 /* Bad header. */ 575#define RF_BAD_HEADER BIT_2 /* Bad header. */
588#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */ 576#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */
589 uint32_t handle; /* System handle. */ 577 __le32 handle; /* System handle. */
590 uint16_t scsi_status; /* SCSI status. */ 578 __le16 scsi_status; /* SCSI status. */
591 uint16_t comp_status; /* Completion status. */ 579 __le16 comp_status; /* Completion status. */
592 uint16_t state_flags; /* State flags. */ 580 __le16 state_flags; /* State flags. */
593#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */ 581#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */
594#define SF_GOT_SENSE BIT_13 /* Got Sense */ 582#define SF_GOT_SENSE BIT_13 /* Got Sense */
595#define SF_GOT_STATUS BIT_12 /* Got Status */ 583#define SF_GOT_STATUS BIT_12 /* Got Status */
596#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */ 584#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */
597#define SF_SENT_CDB BIT_10 /* Send CDB */ 585#define SF_SENT_CDB BIT_10 /* Send CDB */
598#define SF_GOT_TARGET BIT_9 /* */ 586#define SF_GOT_TARGET BIT_9 /* */
599#define SF_GOT_BUS BIT_8 /* */ 587#define SF_GOT_BUS BIT_8 /* */
600 uint16_t status_flags; /* Status flags. */ 588 __le16 status_flags; /* Status flags. */
601 uint16_t time; /* Time. */ 589 __le16 time; /* Time. */
602 uint16_t req_sense_length; /* Request sense data length. */ 590 __le16 req_sense_length;/* Request sense data length. */
603 uint32_t residual_length; /* Residual transfer length. */ 591 __le32 residual_length; /* Residual transfer length. */
604 uint16_t reserved[4]; 592 __le16 reserved[4];
605 uint8_t req_sense_data[32]; /* Request sense data. */ 593 uint8_t req_sense_data[32]; /* Request sense data. */
606}; 594};
607 595
@@ -614,7 +602,7 @@ struct mrk_entry {
614 uint8_t entry_count; /* Entry count. */ 602 uint8_t entry_count; /* Entry count. */
615 uint8_t sys_define; /* System defined. */ 603 uint8_t sys_define; /* System defined. */
616 uint8_t entry_status; /* Entry Status. */ 604 uint8_t entry_status; /* Entry Status. */
617 uint32_t reserved; 605 __le32 reserved;
618 uint8_t lun; /* SCSI LUN */ 606 uint8_t lun; /* SCSI LUN */
619 uint8_t target; /* SCSI ID */ 607 uint8_t target; /* SCSI ID */
620 uint8_t modifier; /* Modifier (7-0). */ 608 uint8_t modifier; /* Modifier (7-0). */
@@ -638,11 +626,11 @@ struct ecmd_entry {
638 uint32_t handle; /* System handle. */ 626 uint32_t handle; /* System handle. */
639 uint8_t lun; /* SCSI LUN */ 627 uint8_t lun; /* SCSI LUN */
640 uint8_t target; /* SCSI ID */ 628 uint8_t target; /* SCSI ID */
641 uint16_t cdb_len; /* SCSI command length. */ 629 __le16 cdb_len; /* SCSI command length. */
642 uint16_t control_flags; /* Control flags. */ 630 __le16 control_flags; /* Control flags. */
643 uint16_t reserved; 631 __le16 reserved;
644 uint16_t timeout; /* Command timeout. */ 632 __le16 timeout; /* Command timeout. */
645 uint16_t dseg_count; /* Data segment count. */ 633 __le16 dseg_count; /* Data segment count. */
646 uint8_t scsi_cdb[88]; /* SCSI command words. */ 634 uint8_t scsi_cdb[88]; /* SCSI command words. */
647}; 635};
648 636
@@ -655,20 +643,20 @@ typedef struct {
655 uint8_t entry_count; /* Entry count. */ 643 uint8_t entry_count; /* Entry count. */
656 uint8_t sys_define; /* System defined. */ 644 uint8_t sys_define; /* System defined. */
657 uint8_t entry_status; /* Entry Status. */ 645 uint8_t entry_status; /* Entry Status. */
658 uint32_t handle; /* System handle. */ 646 __le32 handle; /* System handle. */
659 uint8_t lun; /* SCSI LUN */ 647 uint8_t lun; /* SCSI LUN */
660 uint8_t target; /* SCSI ID */ 648 uint8_t target; /* SCSI ID */
661 uint16_t cdb_len; /* SCSI command length. */ 649 __le16 cdb_len; /* SCSI command length. */
662 uint16_t control_flags; /* Control flags. */ 650 __le16 control_flags; /* Control flags. */
663 uint16_t reserved; 651 __le16 reserved;
664 uint16_t timeout; /* Command timeout. */ 652 __le16 timeout; /* Command timeout. */
665 uint16_t dseg_count; /* Data segment count. */ 653 __le16 dseg_count; /* Data segment count. */
666 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 654 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
667 uint32_t reserved_1[2]; /* unused */ 655 __le32 reserved_1[2]; /* unused */
668 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 656 __le32 dseg_0_address[2]; /* Data segment 0 address. */
669 uint32_t dseg_0_length; /* Data segment 0 length. */ 657 __le32 dseg_0_length; /* Data segment 0 length. */
670 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 658 __le32 dseg_1_address[2]; /* Data segment 1 address. */
671 uint32_t dseg_1_length; /* Data segment 1 length. */ 659 __le32 dseg_1_length; /* Data segment 1 length. */
672} cmd_a64_entry_t, request_t; 660} cmd_a64_entry_t, request_t;
673 661
674/* 662/*
@@ -680,16 +668,16 @@ struct cont_a64_entry {
680 uint8_t entry_count; /* Entry count. */ 668 uint8_t entry_count; /* Entry count. */
681 uint8_t sys_define; /* System defined. */ 669 uint8_t sys_define; /* System defined. */
682 uint8_t entry_status; /* Entry Status. */ 670 uint8_t entry_status; /* Entry Status. */
683 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 671 __le32 dseg_0_address[2]; /* Data segment 0 address. */
684 uint32_t dseg_0_length; /* Data segment 0 length. */ 672 __le32 dseg_0_length; /* Data segment 0 length. */
685 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 673 __le32 dseg_1_address[2]; /* Data segment 1 address. */
686 uint32_t dseg_1_length; /* Data segment 1 length. */ 674 __le32 dseg_1_length; /* Data segment 1 length. */
687 uint32_t dseg_2_address[2]; /* Data segment 2 address. */ 675 __le32 dseg_2_address[2]; /* Data segment 2 address. */
688 uint32_t dseg_2_length; /* Data segment 2 length. */ 676 __le32 dseg_2_length; /* Data segment 2 length. */
689 uint32_t dseg_3_address[2]; /* Data segment 3 address. */ 677 __le32 dseg_3_address[2]; /* Data segment 3 address. */
690 uint32_t dseg_3_length; /* Data segment 3 length. */ 678 __le32 dseg_3_length; /* Data segment 3 length. */
691 uint32_t dseg_4_address[2]; /* Data segment 4 address. */ 679 __le32 dseg_4_address[2]; /* Data segment 4 address. */
692 uint32_t dseg_4_length; /* Data segment 4 length. */ 680 __le32 dseg_4_length; /* Data segment 4 length. */
693}; 681};
694 682
695/* 683/*
@@ -701,10 +689,10 @@ struct elun_entry {
701 uint8_t entry_count; /* Entry count. */ 689 uint8_t entry_count; /* Entry count. */
702 uint8_t reserved_1; 690 uint8_t reserved_1;
703 uint8_t entry_status; /* Entry Status not used. */ 691 uint8_t entry_status; /* Entry Status not used. */
704 uint32_t reserved_2; 692 __le32 reserved_2;
705 uint16_t lun; /* Bit 15 is bus number. */ 693 __le16 lun; /* Bit 15 is bus number. */
706 uint16_t reserved_4; 694 __le16 reserved_4;
707 uint32_t option_flags; 695 __le32 option_flags;
708 uint8_t status; 696 uint8_t status;
709 uint8_t reserved_5; 697 uint8_t reserved_5;
710 uint8_t command_count; /* Number of ATIOs allocated. */ 698 uint8_t command_count; /* Number of ATIOs allocated. */
@@ -714,8 +702,8 @@ struct elun_entry {
714 /* commands (2-26). */ 702 /* commands (2-26). */
715 uint8_t group_7_length; /* SCSI CDB length for group 7 */ 703 uint8_t group_7_length; /* SCSI CDB length for group 7 */
716 /* commands (2-26). */ 704 /* commands (2-26). */
717 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 705 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
718 uint16_t reserved_6[20]; 706 __le16 reserved_6[20];
719}; 707};
720 708
721/* 709/*
@@ -729,20 +717,20 @@ struct modify_lun_entry {
729 uint8_t entry_count; /* Entry count. */ 717 uint8_t entry_count; /* Entry count. */
730 uint8_t reserved_1; 718 uint8_t reserved_1;
731 uint8_t entry_status; /* Entry Status. */ 719 uint8_t entry_status; /* Entry Status. */
732 uint32_t reserved_2; 720 __le32 reserved_2;
733 uint8_t lun; /* SCSI LUN */ 721 uint8_t lun; /* SCSI LUN */
734 uint8_t reserved_3; 722 uint8_t reserved_3;
735 uint8_t operators; 723 uint8_t operators;
736 uint8_t reserved_4; 724 uint8_t reserved_4;
737 uint32_t option_flags; 725 __le32 option_flags;
738 uint8_t status; 726 uint8_t status;
739 uint8_t reserved_5; 727 uint8_t reserved_5;
740 uint8_t command_count; /* Number of ATIOs allocated. */ 728 uint8_t command_count; /* Number of ATIOs allocated. */
741 uint8_t immed_notify_count; /* Number of Immediate Notify */ 729 uint8_t immed_notify_count; /* Number of Immediate Notify */
742 /* entries allocated. */ 730 /* entries allocated. */
743 uint16_t reserved_6; 731 __le16 reserved_6;
744 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 732 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
745 uint16_t reserved_7[20]; 733 __le16 reserved_7[20];
746}; 734};
747 735
748/* 736/*
@@ -754,20 +742,20 @@ struct notify_entry {
754 uint8_t entry_count; /* Entry count. */ 742 uint8_t entry_count; /* Entry count. */
755 uint8_t reserved_1; 743 uint8_t reserved_1;
756 uint8_t entry_status; /* Entry Status. */ 744 uint8_t entry_status; /* Entry Status. */
757 uint32_t reserved_2; 745 __le32 reserved_2;
758 uint8_t lun; 746 uint8_t lun;
759 uint8_t initiator_id; 747 uint8_t initiator_id;
760 uint8_t reserved_3; 748 uint8_t reserved_3;
761 uint8_t target_id; 749 uint8_t target_id;
762 uint32_t option_flags; 750 __le32 option_flags;
763 uint8_t status; 751 uint8_t status;
764 uint8_t reserved_4; 752 uint8_t reserved_4;
765 uint8_t tag_value; /* Received queue tag message value */ 753 uint8_t tag_value; /* Received queue tag message value */
766 uint8_t tag_type; /* Received queue tag message type */ 754 uint8_t tag_type; /* Received queue tag message type */
767 /* entries allocated. */ 755 /* entries allocated. */
768 uint16_t seq_id; 756 __le16 seq_id;
769 uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */ 757 uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */
770 uint16_t reserved_5[8]; 758 __le16 reserved_5[8];
771 uint8_t sense_data[18]; 759 uint8_t sense_data[18];
772}; 760};
773 761
@@ -780,16 +768,16 @@ struct nack_entry {
780 uint8_t entry_count; /* Entry count. */ 768 uint8_t entry_count; /* Entry count. */
781 uint8_t reserved_1; 769 uint8_t reserved_1;
782 uint8_t entry_status; /* Entry Status. */ 770 uint8_t entry_status; /* Entry Status. */
783 uint32_t reserved_2; 771 __le32 reserved_2;
784 uint8_t lun; 772 uint8_t lun;
785 uint8_t initiator_id; 773 uint8_t initiator_id;
786 uint8_t reserved_3; 774 uint8_t reserved_3;
787 uint8_t target_id; 775 uint8_t target_id;
788 uint32_t option_flags; 776 __le32 option_flags;
789 uint8_t status; 777 uint8_t status;
790 uint8_t event; 778 uint8_t event;
791 uint16_t seq_id; 779 __le16 seq_id;
792 uint16_t reserved_4[22]; 780 __le16 reserved_4[22];
793}; 781};
794 782
795/* 783/*
@@ -801,12 +789,12 @@ struct atio_entry {
801 uint8_t entry_count; /* Entry count. */ 789 uint8_t entry_count; /* Entry count. */
802 uint8_t reserved_1; 790 uint8_t reserved_1;
803 uint8_t entry_status; /* Entry Status. */ 791 uint8_t entry_status; /* Entry Status. */
804 uint32_t reserved_2; 792 __le32 reserved_2;
805 uint8_t lun; 793 uint8_t lun;
806 uint8_t initiator_id; 794 uint8_t initiator_id;
807 uint8_t cdb_len; 795 uint8_t cdb_len;
808 uint8_t target_id; 796 uint8_t target_id;
809 uint32_t option_flags; 797 __le32 option_flags;
810 uint8_t status; 798 uint8_t status;
811 uint8_t scsi_status; 799 uint8_t scsi_status;
812 uint8_t tag_value; /* Received queue tag message value */ 800 uint8_t tag_value; /* Received queue tag message value */
@@ -824,28 +812,28 @@ struct ctio_entry {
824 uint8_t entry_count; /* Entry count. */ 812 uint8_t entry_count; /* Entry count. */
825 uint8_t reserved_1; 813 uint8_t reserved_1;
826 uint8_t entry_status; /* Entry Status. */ 814 uint8_t entry_status; /* Entry Status. */
827 uint32_t reserved_2; 815 __le32 reserved_2;
828 uint8_t lun; /* SCSI LUN */ 816 uint8_t lun; /* SCSI LUN */
829 uint8_t initiator_id; 817 uint8_t initiator_id;
830 uint8_t reserved_3; 818 uint8_t reserved_3;
831 uint8_t target_id; 819 uint8_t target_id;
832 uint32_t option_flags; 820 __le32 option_flags;
833 uint8_t status; 821 uint8_t status;
834 uint8_t scsi_status; 822 uint8_t scsi_status;
835 uint8_t tag_value; /* Received queue tag message value */ 823 uint8_t tag_value; /* Received queue tag message value */
836 uint8_t tag_type; /* Received queue tag message type */ 824 uint8_t tag_type; /* Received queue tag message type */
837 uint32_t transfer_length; 825 __le32 transfer_length;
838 uint32_t residual; 826 __le32 residual;
839 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 827 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
840 uint16_t dseg_count; /* Data segment count. */ 828 __le16 dseg_count; /* Data segment count. */
841 uint32_t dseg_0_address; /* Data segment 0 address. */ 829 __le32 dseg_0_address; /* Data segment 0 address. */
842 uint32_t dseg_0_length; /* Data segment 0 length. */ 830 __le32 dseg_0_length; /* Data segment 0 length. */
843 uint32_t dseg_1_address; /* Data segment 1 address. */ 831 __le32 dseg_1_address; /* Data segment 1 address. */
844 uint32_t dseg_1_length; /* Data segment 1 length. */ 832 __le32 dseg_1_length; /* Data segment 1 length. */
845 uint32_t dseg_2_address; /* Data segment 2 address. */ 833 __le32 dseg_2_address; /* Data segment 2 address. */
846 uint32_t dseg_2_length; /* Data segment 2 length. */ 834 __le32 dseg_2_length; /* Data segment 2 length. */
847 uint32_t dseg_3_address; /* Data segment 3 address. */ 835 __le32 dseg_3_address; /* Data segment 3 address. */
848 uint32_t dseg_3_length; /* Data segment 3 length. */ 836 __le32 dseg_3_length; /* Data segment 3 length. */
849}; 837};
850 838
851/* 839/*
@@ -857,24 +845,24 @@ struct ctio_ret_entry {
857 uint8_t entry_count; /* Entry count. */ 845 uint8_t entry_count; /* Entry count. */
858 uint8_t reserved_1; 846 uint8_t reserved_1;
859 uint8_t entry_status; /* Entry Status. */ 847 uint8_t entry_status; /* Entry Status. */
860 uint32_t reserved_2; 848 __le32 reserved_2;
861 uint8_t lun; /* SCSI LUN */ 849 uint8_t lun; /* SCSI LUN */
862 uint8_t initiator_id; 850 uint8_t initiator_id;
863 uint8_t reserved_3; 851 uint8_t reserved_3;
864 uint8_t target_id; 852 uint8_t target_id;
865 uint32_t option_flags; 853 __le32 option_flags;
866 uint8_t status; 854 uint8_t status;
867 uint8_t scsi_status; 855 uint8_t scsi_status;
868 uint8_t tag_value; /* Received queue tag message value */ 856 uint8_t tag_value; /* Received queue tag message value */
869 uint8_t tag_type; /* Received queue tag message type */ 857 uint8_t tag_type; /* Received queue tag message type */
870 uint32_t transfer_length; 858 __le32 transfer_length;
871 uint32_t residual; 859 __le32 residual;
872 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 860 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
873 uint16_t dseg_count; /* Data segment count. */ 861 __le16 dseg_count; /* Data segment count. */
874 uint32_t dseg_0_address; /* Data segment 0 address. */ 862 __le32 dseg_0_address; /* Data segment 0 address. */
875 uint32_t dseg_0_length; /* Data segment 0 length. */ 863 __le32 dseg_0_length; /* Data segment 0 length. */
876 uint32_t dseg_1_address; /* Data segment 1 address. */ 864 __le32 dseg_1_address; /* Data segment 1 address. */
877 uint16_t dseg_1_length; /* Data segment 1 length. */ 865 __le16 dseg_1_length; /* Data segment 1 length. */
878 uint8_t sense_data[18]; 866 uint8_t sense_data[18];
879}; 867};
880 868
@@ -887,25 +875,25 @@ struct ctio_a64_entry {
887 uint8_t entry_count; /* Entry count. */ 875 uint8_t entry_count; /* Entry count. */
888 uint8_t reserved_1; 876 uint8_t reserved_1;
889 uint8_t entry_status; /* Entry Status. */ 877 uint8_t entry_status; /* Entry Status. */
890 uint32_t reserved_2; 878 __le32 reserved_2;
891 uint8_t lun; /* SCSI LUN */ 879 uint8_t lun; /* SCSI LUN */
892 uint8_t initiator_id; 880 uint8_t initiator_id;
893 uint8_t reserved_3; 881 uint8_t reserved_3;
894 uint8_t target_id; 882 uint8_t target_id;
895 uint32_t option_flags; 883 __le32 option_flags;
896 uint8_t status; 884 uint8_t status;
897 uint8_t scsi_status; 885 uint8_t scsi_status;
898 uint8_t tag_value; /* Received queue tag message value */ 886 uint8_t tag_value; /* Received queue tag message value */
899 uint8_t tag_type; /* Received queue tag message type */ 887 uint8_t tag_type; /* Received queue tag message type */
900 uint32_t transfer_length; 888 __le32 transfer_length;
901 uint32_t residual; 889 __le32 residual;
902 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 890 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
903 uint16_t dseg_count; /* Data segment count. */ 891 __le16 dseg_count; /* Data segment count. */
904 uint32_t reserved_4[2]; 892 __le32 reserved_4[2];
905 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 893 __le32 dseg_0_address[2];/* Data segment 0 address. */
906 uint32_t dseg_0_length; /* Data segment 0 length. */ 894 __le32 dseg_0_length; /* Data segment 0 length. */
907 uint32_t dseg_1_address[2]; /* Data segment 1 address. */ 895 __le32 dseg_1_address[2];/* Data segment 1 address. */
908 uint32_t dseg_1_length; /* Data segment 1 length. */ 896 __le32 dseg_1_length; /* Data segment 1 length. */
909}; 897};
910 898
911/* 899/*
@@ -917,21 +905,21 @@ struct ctio_a64_ret_entry {
917 uint8_t entry_count; /* Entry count. */ 905 uint8_t entry_count; /* Entry count. */
918 uint8_t reserved_1; 906 uint8_t reserved_1;
919 uint8_t entry_status; /* Entry Status. */ 907 uint8_t entry_status; /* Entry Status. */
920 uint32_t reserved_2; 908 __le32 reserved_2;
921 uint8_t lun; /* SCSI LUN */ 909 uint8_t lun; /* SCSI LUN */
922 uint8_t initiator_id; 910 uint8_t initiator_id;
923 uint8_t reserved_3; 911 uint8_t reserved_3;
924 uint8_t target_id; 912 uint8_t target_id;
925 uint32_t option_flags; 913 __le32 option_flags;
926 uint8_t status; 914 uint8_t status;
927 uint8_t scsi_status; 915 uint8_t scsi_status;
928 uint8_t tag_value; /* Received queue tag message value */ 916 uint8_t tag_value; /* Received queue tag message value */
929 uint8_t tag_type; /* Received queue tag message type */ 917 uint8_t tag_type; /* Received queue tag message type */
930 uint32_t transfer_length; 918 __le32 transfer_length;
931 uint32_t residual; 919 __le32 residual;
932 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ 920 __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
933 uint16_t dseg_count; /* Data segment count. */ 921 __le16 dseg_count; /* Data segment count. */
934 uint16_t reserved_4[7]; 922 __le16 reserved_4[7];
935 uint8_t sense_data[18]; 923 uint8_t sense_data[18];
936}; 924};
937 925
@@ -979,14 +967,6 @@ struct ctio_a64_ret_entry {
979#define CS_RETRY 0x82 /* Driver defined */ 967#define CS_RETRY 0x82 /* Driver defined */
980 968
981/* 969/*
982 * ISP status entry - SCSI status byte bit definitions.
983 */
984#define SS_CHECK_CONDITION BIT_1
985#define SS_CONDITION_MET BIT_2
986#define SS_BUSY_CONDITION BIT_3
987#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
988
989/*
990 * ISP target entries - Option flags bit definitions. 970 * ISP target entries - Option flags bit definitions.
991 */ 971 */
992#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */ 972#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */
@@ -1082,10 +1062,6 @@ struct scsi_qla_host {
1082 uint32_t reset_active:1; /* 3 */ 1062 uint32_t reset_active:1; /* 3 */
1083 uint32_t abort_isp_active:1; /* 4 */ 1063 uint32_t abort_isp_active:1; /* 4 */
1084 uint32_t disable_risc_code_load:1; /* 5 */ 1064 uint32_t disable_risc_code_load:1; /* 5 */
1085 uint32_t enable_64bit_addressing:1; /* 6 */
1086 uint32_t in_reset:1; /* 7 */
1087 uint32_t ints_enabled:1;
1088 uint32_t ignore_nvram:1;
1089#ifdef __ia64__ 1065#ifdef __ia64__
1090 uint32_t use_pci_vchannel:1; 1066 uint32_t use_pci_vchannel:1;
1091#endif 1067#endif
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 659a5d63467d..fe0fce71adc7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -211,6 +211,138 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
211 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 211 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
212} 212}
213 213
214/* Scsi_Host attributes. */
215
216static ssize_t
217qla2x00_drvr_version_show(struct class_device *cdev, char *buf)
218{
219 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
220}
221
222static ssize_t
223qla2x00_fw_version_show(struct class_device *cdev, char *buf)
224{
225 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
226 char fw_str[30];
227
228 return snprintf(buf, PAGE_SIZE, "%s\n",
229 ha->isp_ops.fw_version_str(ha, fw_str));
230}
231
232static ssize_t
233qla2x00_serial_num_show(struct class_device *cdev, char *buf)
234{
235 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
236 uint32_t sn;
237
238 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
239 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
240 sn % 100000);
241}
242
243static ssize_t
244qla2x00_isp_name_show(struct class_device *cdev, char *buf)
245{
246 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
247 return snprintf(buf, PAGE_SIZE, "%s\n", ha->brd_info->isp_name);
248}
249
250static ssize_t
251qla2x00_isp_id_show(struct class_device *cdev, char *buf)
252{
253 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
254 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
255 ha->product_id[0], ha->product_id[1], ha->product_id[2],
256 ha->product_id[3]);
257}
258
259static ssize_t
260qla2x00_model_name_show(struct class_device *cdev, char *buf)
261{
262 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
263 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
264}
265
266static ssize_t
267qla2x00_model_desc_show(struct class_device *cdev, char *buf)
268{
269 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
270 return snprintf(buf, PAGE_SIZE, "%s\n",
271 ha->model_desc ? ha->model_desc: "");
272}
273
274static ssize_t
275qla2x00_pci_info_show(struct class_device *cdev, char *buf)
276{
277 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
278 char pci_info[30];
279
280 return snprintf(buf, PAGE_SIZE, "%s\n",
281 ha->isp_ops.pci_info_str(ha, pci_info));
282}
283
284static ssize_t
285qla2x00_state_show(struct class_device *cdev, char *buf)
286{
287 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
288 int len = 0;
289
290 if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
291 atomic_read(&ha->loop_state) == LOOP_DEAD)
292 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
293 else if (atomic_read(&ha->loop_state) != LOOP_READY ||
294 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
295 test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
296 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
297 else {
298 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
299
300 switch (ha->current_topology) {
301 case ISP_CFG_NL:
302 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
303 break;
304 case ISP_CFG_FL:
305 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
306 break;
307 case ISP_CFG_N:
308 len += snprintf(buf + len, PAGE_SIZE-len,
309 "N_Port to N_Port\n");
310 break;
311 case ISP_CFG_F:
312 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
313 break;
314 default:
315 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
316 break;
317 }
318 }
319 return len;
320}
321
322static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
323 NULL);
324static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
325static CLASS_DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
326static CLASS_DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
327static CLASS_DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
328static CLASS_DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
329static CLASS_DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
330static CLASS_DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
331static CLASS_DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
332
333struct class_device_attribute *qla2x00_host_attrs[] = {
334 &class_device_attr_driver_version,
335 &class_device_attr_fw_version,
336 &class_device_attr_serial_num,
337 &class_device_attr_isp_name,
338 &class_device_attr_isp_id,
339 &class_device_attr_model_name,
340 &class_device_attr_model_desc,
341 &class_device_attr_pci_info,
342 &class_device_attr_state,
343 NULL,
344};
345
214/* Host attributes. */ 346/* Host attributes. */
215 347
216static void 348static void
@@ -304,10 +436,13 @@ struct fc_function_template qla2xxx_transport_functions = {
304 436
305 .show_host_node_name = 1, 437 .show_host_node_name = 1,
306 .show_host_port_name = 1, 438 .show_host_port_name = 1,
439 .show_host_supported_classes = 1,
440
307 .get_host_port_id = qla2x00_get_host_port_id, 441 .get_host_port_id = qla2x00_get_host_port_id,
308 .show_host_port_id = 1, 442 .show_host_port_id = 1,
309 443
310 .dd_fcrport_size = sizeof(struct fc_port *), 444 .dd_fcrport_size = sizeof(struct fc_port *),
445 .show_rport_supported_classes = 1,
311 446
312 .get_starget_node_name = qla2x00_get_starget_node_name, 447 .get_starget_node_name = qla2x00_get_starget_node_name,
313 .show_starget_node_name = 1, 448 .show_starget_node_name = 1,
@@ -329,4 +464,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
329 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name); 464 be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
330 fc_host_port_name(ha->host) = 465 fc_host_port_name(ha->host) =
331 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name); 466 be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
467 fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
332} 468}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index b8d90e97e017..9684e7a91fa9 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -81,6 +81,7 @@
81#define DEBUG2_3_11(x) do {x;} while (0); 81#define DEBUG2_3_11(x) do {x;} while (0);
82#define DEBUG2_9_10(x) do {x;} while (0); 82#define DEBUG2_9_10(x) do {x;} while (0);
83#define DEBUG2_11(x) do {x;} while (0); 83#define DEBUG2_11(x) do {x;} while (0);
84#define DEBUG2_13(x) do {x;} while (0);
84#else 85#else
85#define DEBUG2(x) do {} while (0); 86#define DEBUG2(x) do {} while (0);
86#endif 87#endif
@@ -169,8 +170,14 @@
169 170
170#if defined(QL_DEBUG_LEVEL_13) 171#if defined(QL_DEBUG_LEVEL_13)
171#define DEBUG13(x) do {x;} while (0) 172#define DEBUG13(x) do {x;} while (0)
173#if !defined(DEBUG2_13)
174#define DEBUG2_13(x) do {x;} while(0)
175#endif
172#else 176#else
173#define DEBUG13(x) do {} while (0) 177#define DEBUG13(x) do {} while (0)
178#if !defined(QL_DEBUG_LEVEL_2)
179#define DEBUG2_13(x) do {} while(0)
180#endif
174#endif 181#endif
175 182
176#if defined(QL_DEBUG_LEVEL_14) 183#if defined(QL_DEBUG_LEVEL_14)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1c6d366f4fad..b455c31405e4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -214,6 +214,7 @@
214 * valid range of an N-PORT id is 0 through 0x7ef. 214 * valid range of an N-PORT id is 0 through 0x7ef.
215 */ 215 */
216#define NPH_LAST_HANDLE 0x7ef 216#define NPH_LAST_HANDLE 0x7ef
217#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
217#define NPH_SNS 0x7fc /* FFFFFC */ 218#define NPH_SNS 0x7fc /* FFFFFC */
218#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */ 219#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
219#define NPH_F_PORT 0x7fe /* FFFFFE */ 220#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -630,6 +631,7 @@ typedef struct {
630#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */ 631#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
631#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */ 632#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */
632#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */ 633#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */
634#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */
633#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */ 635#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */
634#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */ 636#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */
635#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */ 637#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
@@ -913,7 +915,7 @@ typedef struct {
913 * MSB BIT 1 = 915 * MSB BIT 1 =
914 * MSB BIT 2 = 916 * MSB BIT 2 =
915 * MSB BIT 3 = 917 * MSB BIT 3 =
916 * MSB BIT 4 = 918 * MSB BIT 4 = LED mode
917 * MSB BIT 5 = enable 50 ohm termination 919 * MSB BIT 5 = enable 50 ohm termination
918 * MSB BIT 6 = Data Rate (2300 only) 920 * MSB BIT 6 = Data Rate (2300 only)
919 * MSB BIT 7 = Data Rate (2300 only) 921 * MSB BIT 7 = Data Rate (2300 only)
@@ -1035,7 +1037,7 @@ typedef struct {
1035 * MSB BIT 1 = 1037 * MSB BIT 1 =
1036 * MSB BIT 2 = 1038 * MSB BIT 2 =
1037 * MSB BIT 3 = 1039 * MSB BIT 3 =
1038 * MSB BIT 4 = 1040 * MSB BIT 4 = LED mode
1039 * MSB BIT 5 = enable 50 ohm termination 1041 * MSB BIT 5 = enable 50 ohm termination
1040 * MSB BIT 6 = Data Rate (2300 only) 1042 * MSB BIT 6 = Data Rate (2300 only)
1041 * MSB BIT 7 = Data Rate (2300 only) 1043 * MSB BIT 7 = Data Rate (2300 only)
@@ -1131,10 +1133,7 @@ typedef struct {
1131 1133
1132 uint8_t link_down_timeout; 1134 uint8_t link_down_timeout;
1133 1135
1134 uint8_t adapter_id_0[4]; 1136 uint8_t adapter_id[16];
1135 uint8_t adapter_id_1[4];
1136 uint8_t adapter_id_2[4];
1137 uint8_t adapter_id_3[4];
1138 1137
1139 uint8_t alt1_boot_node_name[WWN_SIZE]; 1138 uint8_t alt1_boot_node_name[WWN_SIZE];
1140 uint16_t alt1_boot_lun_number; 1139 uint16_t alt1_boot_lun_number;
@@ -1673,6 +1672,7 @@ typedef struct fc_port {
1673 uint8_t cur_path; /* current path id */ 1672 uint8_t cur_path; /* current path id */
1674 1673
1675 struct fc_rport *rport; 1674 struct fc_rport *rport;
1675 u32 supported_classes;
1676} fc_port_t; 1676} fc_port_t;
1677 1677
1678/* 1678/*
@@ -1727,6 +1727,8 @@ typedef struct fc_port {
1727 1727
1728#define CT_REJECT_RESPONSE 0x8001 1728#define CT_REJECT_RESPONSE 0x8001
1729#define CT_ACCEPT_RESPONSE 0x8002 1729#define CT_ACCEPT_RESPONSE 0x8002
1730#define CT_REASON_CANNOT_PERFORM 0x09
1731#define CT_EXPL_ALREADY_REGISTERED 0x10
1730 1732
1731#define NS_N_PORT_TYPE 0x01 1733#define NS_N_PORT_TYPE 0x01
1732#define NS_NL_PORT_TYPE 0x02 1734#define NS_NL_PORT_TYPE 0x02
@@ -1768,6 +1770,100 @@ typedef struct fc_port {
1768#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255) 1770#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
1769#define RSNN_NN_RSP_SIZE 16 1771#define RSNN_NN_RSP_SIZE 16
1770 1772
1773/*
1774 * HBA attribute types.
1775 */
1776#define FDMI_HBA_ATTR_COUNT 9
1777#define FDMI_HBA_NODE_NAME 1
1778#define FDMI_HBA_MANUFACTURER 2
1779#define FDMI_HBA_SERIAL_NUMBER 3
1780#define FDMI_HBA_MODEL 4
1781#define FDMI_HBA_MODEL_DESCRIPTION 5
1782#define FDMI_HBA_HARDWARE_VERSION 6
1783#define FDMI_HBA_DRIVER_VERSION 7
1784#define FDMI_HBA_OPTION_ROM_VERSION 8
1785#define FDMI_HBA_FIRMWARE_VERSION 9
1786#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
1787#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
1788
1789struct ct_fdmi_hba_attr {
1790 uint16_t type;
1791 uint16_t len;
1792 union {
1793 uint8_t node_name[WWN_SIZE];
1794 uint8_t manufacturer[32];
1795 uint8_t serial_num[8];
1796 uint8_t model[16];
1797 uint8_t model_desc[80];
1798 uint8_t hw_version[16];
1799 uint8_t driver_version[32];
1800 uint8_t orom_version[16];
1801 uint8_t fw_version[16];
1802 uint8_t os_version[128];
1803 uint8_t max_ct_len[4];
1804 } a;
1805};
1806
1807struct ct_fdmi_hba_attributes {
1808 uint32_t count;
1809 struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
1810};
1811
1812/*
1813 * Port attribute types.
1814 */
1815#define FDMI_PORT_ATTR_COUNT 5
1816#define FDMI_PORT_FC4_TYPES 1
1817#define FDMI_PORT_SUPPORT_SPEED 2
1818#define FDMI_PORT_CURRENT_SPEED 3
1819#define FDMI_PORT_MAX_FRAME_SIZE 4
1820#define FDMI_PORT_OS_DEVICE_NAME 5
1821#define FDMI_PORT_HOST_NAME 6
1822
1823struct ct_fdmi_port_attr {
1824 uint16_t type;
1825 uint16_t len;
1826 union {
1827 uint8_t fc4_types[32];
1828 uint32_t sup_speed;
1829 uint32_t cur_speed;
1830 uint32_t max_frame_size;
1831 uint8_t os_dev_name[32];
1832 uint8_t host_name[32];
1833 } a;
1834};
1835
1836/*
1837 * Port Attribute Block.
1838 */
1839struct ct_fdmi_port_attributes {
1840 uint32_t count;
1841 struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
1842};
1843
1844/* FDMI definitions. */
1845#define GRHL_CMD 0x100
1846#define GHAT_CMD 0x101
1847#define GRPL_CMD 0x102
1848#define GPAT_CMD 0x110
1849
1850#define RHBA_CMD 0x200
1851#define RHBA_RSP_SIZE 16
1852
1853#define RHAT_CMD 0x201
1854#define RPRT_CMD 0x210
1855
1856#define RPA_CMD 0x211
1857#define RPA_RSP_SIZE 16
1858
1859#define DHBA_CMD 0x300
1860#define DHBA_REQ_SIZE (16 + 8)
1861#define DHBA_RSP_SIZE 16
1862
1863#define DHAT_CMD 0x301
1864#define DPRT_CMD 0x310
1865#define DPA_CMD 0x311
1866
1771/* CT command header -- request/response common fields */ 1867/* CT command header -- request/response common fields */
1772struct ct_cmd_hdr { 1868struct ct_cmd_hdr {
1773 uint8_t revision; 1869 uint8_t revision;
@@ -1825,6 +1921,43 @@ struct ct_sns_req {
1825 uint8_t name_len; 1921 uint8_t name_len;
1826 uint8_t sym_node_name[255]; 1922 uint8_t sym_node_name[255];
1827 } rsnn_nn; 1923 } rsnn_nn;
1924
1925 struct {
1926 uint8_t hba_indentifier[8];
1927 } ghat;
1928
1929 struct {
1930 uint8_t hba_identifier[8];
1931 uint32_t entry_count;
1932 uint8_t port_name[8];
1933 struct ct_fdmi_hba_attributes attrs;
1934 } rhba;
1935
1936 struct {
1937 uint8_t hba_identifier[8];
1938 struct ct_fdmi_hba_attributes attrs;
1939 } rhat;
1940
1941 struct {
1942 uint8_t port_name[8];
1943 struct ct_fdmi_port_attributes attrs;
1944 } rpa;
1945
1946 struct {
1947 uint8_t port_name[8];
1948 } dhba;
1949
1950 struct {
1951 uint8_t port_name[8];
1952 } dhat;
1953
1954 struct {
1955 uint8_t port_name[8];
1956 } dprt;
1957
1958 struct {
1959 uint8_t port_name[8];
1960 } dpa;
1828 } req; 1961 } req;
1829}; 1962};
1830 1963
@@ -1882,6 +2015,12 @@ struct ct_sns_rsp {
1882 struct { 2015 struct {
1883 uint8_t fc4_types[32]; 2016 uint8_t fc4_types[32];
1884 } gft_id; 2017 } gft_id;
2018
2019 struct {
2020 uint32_t entry_count;
2021 uint8_t port_name[8];
2022 struct ct_fdmi_hba_attributes attrs;
2023 } ghat;
1885 } rsp; 2024 } rsp;
1886}; 2025};
1887 2026
@@ -2032,6 +2171,8 @@ struct isp_operations {
2032 uint16_t (*calc_req_entries) (uint16_t); 2171 uint16_t (*calc_req_entries) (uint16_t);
2033 void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); 2172 void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
2034 void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); 2173 void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
2174 void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
2175 uint32_t);
2035 2176
2036 uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, 2177 uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
2037 uint32_t, uint32_t); 2178 uint32_t, uint32_t);
@@ -2111,6 +2252,7 @@ typedef struct scsi_qla_host {
2111#define IOCTL_ERROR_RECOVERY 23 2252#define IOCTL_ERROR_RECOVERY 23
2112#define LOOP_RESET_NEEDED 24 2253#define LOOP_RESET_NEEDED 24
2113#define BEACON_BLINK_NEEDED 25 2254#define BEACON_BLINK_NEEDED 25
2255#define REGISTER_FDMI_NEEDED 26
2114 2256
2115 uint32_t device_flags; 2257 uint32_t device_flags;
2116#define DFLG_LOCAL_DEVICES BIT_0 2258#define DFLG_LOCAL_DEVICES BIT_0
@@ -2204,6 +2346,7 @@ typedef struct scsi_qla_host {
2204 int port_down_retry_count; 2346 int port_down_retry_count;
2205 uint8_t mbx_count; 2347 uint8_t mbx_count;
2206 uint16_t last_loop_id; 2348 uint16_t last_loop_id;
2349 uint16_t mgmt_svr_loop_id;
2207 2350
2208 uint32_t login_retry_count; 2351 uint32_t login_retry_count;
2209 2352
@@ -2318,6 +2461,7 @@ typedef struct scsi_qla_host {
2318 uint8_t model_number[16+1]; 2461 uint8_t model_number[16+1];
2319#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 2462#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
2320 char *model_desc; 2463 char *model_desc;
2464 uint8_t adapter_id[16+1];
2321 2465
2322 uint8_t *node_name; 2466 uint8_t *node_name;
2323 uint8_t *port_name; 2467 uint8_t *port_name;
@@ -2377,6 +2521,7 @@ typedef struct scsi_qla_host {
2377#define QLA_SUSPENDED 0x106 2521#define QLA_SUSPENDED 0x106
2378#define QLA_BUSY 0x107 2522#define QLA_BUSY 0x107
2379#define QLA_RSCNS_HANDLED 0x108 2523#define QLA_RSCNS_HANDLED 0x108
2524#define QLA_ALREADY_REGISTERED 0x109
2380 2525
2381/* 2526/*
2382* Stat info for all adpaters 2527* Stat info for all adpaters
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 665c203e0675..1ed32e7b5472 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -79,6 +79,7 @@ extern int ql2xplogiabsentdevice;
79extern int ql2xenablezio; 79extern int ql2xenablezio;
80extern int ql2xintrdelaytimer; 80extern int ql2xintrdelaytimer;
81extern int ql2xloginretrycount; 81extern int ql2xloginretrycount;
82extern int ql2xfdmienable;
82 83
83extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 84extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
84 85
@@ -147,9 +148,6 @@ qla2x00_abort_target(fc_port_t *);
147#endif 148#endif
148 149
149extern int 150extern int
150qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
151
152extern int
153qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 151qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
154 uint8_t *, uint16_t *); 152 uint8_t *, uint16_t *);
155 153
@@ -215,6 +213,9 @@ qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
215extern int 213extern int
216qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); 214qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
217 215
216extern int
217qla2x00_stop_firmware(scsi_qla_host_t *);
218
218/* 219/*
219 * Global Function Prototypes in qla_isr.c source file. 220 * Global Function Prototypes in qla_isr.c source file.
220 */ 221 */
@@ -269,6 +270,9 @@ extern int qla2x00_rft_id(scsi_qla_host_t *);
269extern int qla2x00_rff_id(scsi_qla_host_t *); 270extern int qla2x00_rff_id(scsi_qla_host_t *);
270extern int qla2x00_rnn_id(scsi_qla_host_t *); 271extern int qla2x00_rnn_id(scsi_qla_host_t *);
271extern int qla2x00_rsnn_nn(scsi_qla_host_t *); 272extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
273extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
274extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
275extern int qla2x00_fdmi_register(scsi_qla_host_t *);
272 276
273/* 277/*
274 * Global Function Prototypes in qla_rscn.c source file. 278 * Global Function Prototypes in qla_rscn.c source file.
@@ -289,6 +293,8 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
289/* 293/*
290 * Global Function Prototypes in qla_attr.c source file. 294 * Global Function Prototypes in qla_attr.c source file.
291 */ 295 */
296struct class_device_attribute;
297extern struct class_device_attribute *qla2x00_host_attrs[];
292struct fc_function_template; 298struct fc_function_template;
293extern struct fc_function_template qla2xxx_transport_functions; 299extern struct fc_function_template qla2xxx_transport_functions;
294extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 300extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 31ce4f62da13..e7b138c2e339 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1099,3 +1099,567 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
1099 1099
1100 return (rval); 1100 return (rval);
1101} 1101}
1102
1103/**
1104 * qla2x00_mgmt_svr_login() - Login to fabric Managment Service.
1105 * @ha: HA context
1106 *
1107 * Returns 0 on success.
1108 */
1109static int
1110qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1111{
1112 int ret;
1113 uint16_t mb[MAILBOX_REGISTER_COUNT];
1114
1115 ret = QLA_SUCCESS;
1116 if (ha->flags.management_server_logged_in)
1117 return ret;
1118
1119 ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1120 mb, BIT_1);
1121 if (mb[0] != MBS_COMMAND_COMPLETE) {
1122 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1123 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
1124 __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
1125 mb[2], mb[6], mb[7]));
1126 ret = QLA_FUNCTION_FAILED;
1127 } else
1128 ha->flags.management_server_logged_in = 1;
1129
1130 return ret;
1131}
1132
1133/**
1134 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1135 * @ha: HA context
1136 * @req_size: request size in bytes
1137 * @rsp_size: response size in bytes
1138 *
1139 * Returns a pointer to the @ha's ms_iocb.
1140 */
1141void *
1142qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1143 uint32_t rsp_size)
1144{
1145 ms_iocb_entry_t *ms_pkt;
1146
1147 ms_pkt = ha->ms_iocb;
1148 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1149
1150 ms_pkt->entry_type = MS_IOCB_TYPE;
1151 ms_pkt->entry_count = 1;
1152 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
1153 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1154 ms_pkt->timeout = __constant_cpu_to_le16(59);
1155 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1156 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
1157 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1158 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1159
1160 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1161 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1162 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1163
1164 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1165 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1166 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1167
1168 return ms_pkt;
1169}
1170
1171/**
1172 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1173 * @ha: HA context
1174 * @req_size: request size in bytes
1175 * @rsp_size: response size in bytes
1176 *
1177 * Returns a pointer to the @ha's ms_iocb.
1178 */
1179void *
1180qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1181 uint32_t rsp_size)
1182{
1183 struct ct_entry_24xx *ct_pkt;
1184
1185 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1186 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1187
1188 ct_pkt->entry_type = CT_IOCB_TYPE;
1189 ct_pkt->entry_count = 1;
1190 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1191 ct_pkt->timeout = __constant_cpu_to_le16(59);
1192 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1193 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1194 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1195 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1196
1197 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1198 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1199 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1200
1201 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1202 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1203 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1204
1205 return ct_pkt;
1206}
1207
1208static inline ms_iocb_entry_t *
1209qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
1210{
1211 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1212 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1213
1214 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1215 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1216 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1217 } else {
1218 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1219 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1220 }
1221
1222 return ms_pkt;
1223}
1224
1225/**
1226 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1227 * @ct_req: CT request buffer
1228 * @cmd: GS command
1229 * @rsp_size: response size in bytes
1230 *
1231 * Returns a pointer to the intitialized @ct_req.
1232 */
1233static inline struct ct_sns_req *
1234qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
1235 uint16_t rsp_size)
1236{
1237 memset(ct_req, 0, sizeof(struct ct_sns_pkt));
1238
1239 ct_req->header.revision = 0x01;
1240 ct_req->header.gs_type = 0xFA;
1241 ct_req->header.gs_subtype = 0x10;
1242 ct_req->command = cpu_to_be16(cmd);
1243 ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1244
1245 return ct_req;
1246}
1247
1248/**
1249 * qla2x00_fdmi_rhba() -
1250 * @ha: HA context
1251 *
1252 * Returns 0 on success.
1253 */
1254static int
1255qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1256{
1257 int rval, alen;
1258 uint32_t size, sn;
1259
1260 ms_iocb_entry_t *ms_pkt;
1261 struct ct_sns_req *ct_req;
1262 struct ct_sns_rsp *ct_rsp;
1263 uint8_t *entries;
1264 struct ct_fdmi_hba_attr *eiter;
1265
1266 /* Issue RHBA */
1267 /* Prepare common MS IOCB */
1268 /* Request size adjusted after CT preparation */
1269 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
1270
1271 /* Prepare CT request */
1272 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
1273 RHBA_RSP_SIZE);
1274 ct_rsp = &ha->ct_sns->p.rsp;
1275
1276 /* Prepare FDMI command arguments -- attribute block, attributes. */
1277 memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
1278 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
1279 memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
1280 size = 2 * WWN_SIZE + 4 + 4;
1281
1282 /* Attributes */
1283 ct_req->req.rhba.attrs.count =
1284 __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1285 entries = ct_req->req.rhba.hba_identifier;
1286
1287 /* Nodename. */
1288 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1289 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1290 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1291 memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
1292 size += 4 + WWN_SIZE;
1293
1294 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
1295 __func__, ha->host_no,
1296 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
1297 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
1298 eiter->a.node_name[6], eiter->a.node_name[7]));
1299
1300 /* Manufacturer. */
1301 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1302 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
1303 strcpy(eiter->a.manufacturer, "QLogic Corporation");
1304 alen = strlen(eiter->a.manufacturer);
1305 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1306 eiter->len = cpu_to_be16(4 + alen);
1307 size += 4 + alen;
1308
1309 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
1310 eiter->a.manufacturer));
1311
1312 /* Serial number. */
1313 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1314 eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1315 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1316 sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
1317 alen = strlen(eiter->a.serial_num);
1318 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1319 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen;
1321
1322 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
1323 eiter->a.serial_num));
1324
1325 /* Model name. */
1326 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1327 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
1328 strcpy(eiter->a.model, ha->model_number);
1329 alen = strlen(eiter->a.model);
1330 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1331 eiter->len = cpu_to_be16(4 + alen);
1332 size += 4 + alen;
1333
1334 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
1335 eiter->a.model));
1336
1337 /* Model description. */
1338 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1339 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1340 if (ha->model_desc)
1341 strncpy(eiter->a.model_desc, ha->model_desc, 80);
1342 alen = strlen(eiter->a.model_desc);
1343 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1344 eiter->len = cpu_to_be16(4 + alen);
1345 size += 4 + alen;
1346
1347 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
1348 eiter->a.model_desc));
1349
1350 /* Hardware version. */
1351 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1352 eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1353 strcpy(eiter->a.hw_version, ha->adapter_id);
1354 alen = strlen(eiter->a.hw_version);
1355 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1356 eiter->len = cpu_to_be16(4 + alen);
1357 size += 4 + alen;
1358
1359 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
1360 eiter->a.hw_version));
1361
1362 /* Driver version. */
1363 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1364 eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1365 strcpy(eiter->a.driver_version, qla2x00_version_str);
1366 alen = strlen(eiter->a.driver_version);
1367 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1368 eiter->len = cpu_to_be16(4 + alen);
1369 size += 4 + alen;
1370
1371 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
1372 eiter->a.driver_version));
1373
1374 /* Option ROM version. */
1375 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1376 eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1377 strcpy(eiter->a.orom_version, "0.00");
1378 alen = strlen(eiter->a.orom_version);
1379 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1380 eiter->len = cpu_to_be16(4 + alen);
1381 size += 4 + alen;
1382
1383 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
1384 eiter->a.orom_version));
1385
1386 /* Firmware version */
1387 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1388 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1389 ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
1390 alen = strlen(eiter->a.fw_version);
1391 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1392 eiter->len = cpu_to_be16(4 + alen);
1393 size += 4 + alen;
1394
1395 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
1396 eiter->a.fw_version));
1397
1398 /* Update MS request size. */
1399 qla2x00_update_ms_fdmi_iocb(ha, size + 16);
1400
1401 DEBUG13(printk("%s(%ld): RHBA identifier="
1402 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1403 ha->host_no, ct_req->req.rhba.hba_identifier[0],
1404 ct_req->req.rhba.hba_identifier[1],
1405 ct_req->req.rhba.hba_identifier[2],
1406 ct_req->req.rhba.hba_identifier[3],
1407 ct_req->req.rhba.hba_identifier[4],
1408 ct_req->req.rhba.hba_identifier[5],
1409 ct_req->req.rhba.hba_identifier[6],
1410 ct_req->req.rhba.hba_identifier[7], size));
1411 DEBUG13(qla2x00_dump_buffer(entries, size));
1412
1413 /* Execute MS IOCB */
1414 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1415 sizeof(ms_iocb_entry_t));
1416 if (rval != QLA_SUCCESS) {
1417 /*EMPTY*/
1418 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
1419 ha->host_no, rval));
1420 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
1421 QLA_SUCCESS) {
1422 rval = QLA_FUNCTION_FAILED;
1423 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1424 ct_rsp->header.explanation_code ==
1425 CT_EXPL_ALREADY_REGISTERED) {
1426 DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
1427 __func__, ha->host_no));
1428 rval = QLA_ALREADY_REGISTERED;
1429 }
1430 } else {
1431 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
1432 ha->host_no));
1433 }
1434
1435 return rval;
1436}
1437
1438/**
1439 * qla2x00_fdmi_dhba() -
1440 * @ha: HA context
1441 *
1442 * Returns 0 on success.
1443 */
1444static int
1445qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1446{
1447 int rval;
1448
1449 ms_iocb_entry_t *ms_pkt;
1450 struct ct_sns_req *ct_req;
1451 struct ct_sns_rsp *ct_rsp;
1452
1453 /* Issue RPA */
1454 /* Prepare common MS IOCB */
1455 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
1456 DHBA_RSP_SIZE);
1457
1458 /* Prepare CT request */
1459 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, DHBA_CMD,
1460 DHBA_RSP_SIZE);
1461 ct_rsp = &ha->ct_sns->p.rsp;
1462
1463 /* Prepare FDMI command arguments -- portname. */
1464 memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
1465
1466 DEBUG13(printk("%s(%ld): DHBA portname="
1467 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
1468 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1469 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1470 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1471 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
1472
1473 /* Execute MS IOCB */
1474 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1475 sizeof(ms_iocb_entry_t));
1476 if (rval != QLA_SUCCESS) {
1477 /*EMPTY*/
1478 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
1479 ha->host_no, rval));
1480 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
1481 QLA_SUCCESS) {
1482 rval = QLA_FUNCTION_FAILED;
1483 } else {
1484 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
1485 ha->host_no));
1486 }
1487
1488 return rval;
1489}
1490
1491/**
1492 * qla2x00_fdmi_rpa() -
1493 * @ha: HA context
1494 *
1495 * Returns 0 on success.
1496 */
1497static int
1498qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1499{
1500 int rval, alen;
1501 uint32_t size, max_frame_size;
1502
1503 ms_iocb_entry_t *ms_pkt;
1504 struct ct_sns_req *ct_req;
1505 struct ct_sns_rsp *ct_rsp;
1506 uint8_t *entries;
1507 struct ct_fdmi_port_attr *eiter;
1508 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1509
1510 /* Issue RPA */
1511 /* Prepare common MS IOCB */
1512 /* Request size adjusted after CT preparation */
1513 ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
1514
1515 /* Prepare CT request */
1516 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
1517 RPA_RSP_SIZE);
1518 ct_rsp = &ha->ct_sns->p.rsp;
1519
1520 /* Prepare FDMI command arguments -- attribute block, attributes. */
1521 memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
1522 size = WWN_SIZE + 4;
1523
1524 /* Attributes */
1525 ct_req->req.rpa.attrs.count =
1526 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1527 entries = ct_req->req.rpa.port_name;
1528
1529 /* FC4 types. */
1530 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1531 eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
1532 eiter->len = __constant_cpu_to_be16(4 + 32);
1533 eiter->a.fc4_types[2] = 0x01;
1534 size += 4 + 32;
1535
1536 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
1537 eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
1538
1539 /* Supported speed. */
1540 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1541 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1542 eiter->len = __constant_cpu_to_be16(4 + 4);
1543 if (IS_QLA25XX(ha))
1544 eiter->a.sup_speed = __constant_cpu_to_be32(4);
1545 else if (IS_QLA24XX(ha))
1546 eiter->a.sup_speed = __constant_cpu_to_be32(8);
1547 else if (IS_QLA23XX(ha))
1548 eiter->a.sup_speed = __constant_cpu_to_be32(2);
1549 else
1550 eiter->a.sup_speed = __constant_cpu_to_be32(1);
1551 size += 4 + 4;
1552
1553 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
1554 eiter->a.sup_speed));
1555
1556 /* Current speed. */
1557 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1558 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1559 eiter->len = __constant_cpu_to_be16(4 + 4);
1560 switch (ha->link_data_rate) {
1561 case 0:
1562 eiter->a.cur_speed = __constant_cpu_to_be32(1);
1563 break;
1564 case 1:
1565 eiter->a.cur_speed = __constant_cpu_to_be32(2);
1566 break;
1567 case 3:
1568 eiter->a.cur_speed = __constant_cpu_to_be32(8);
1569 break;
1570 case 4:
1571 eiter->a.cur_speed = __constant_cpu_to_be32(4);
1572 break;
1573 }
1574 size += 4 + 4;
1575
1576 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
1577 eiter->a.cur_speed));
1578
1579 /* Max frame size. */
1580 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1581 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1582 eiter->len = __constant_cpu_to_be16(4 + 4);
1583 max_frame_size = IS_QLA24XX(ha) || IS_QLA25XX(ha) ?
1584 (uint32_t) icb24->frame_payload_size:
1585 (uint32_t) ha->init_cb->frame_payload_size;
1586 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1587 size += 4 + 4;
1588
1589 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
1590 eiter->a.max_frame_size));
1591
1592 /* OS device name. */
1593 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1594 eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1595 sprintf(eiter->a.os_dev_name, "/proc/scsi/qla2xxx/%ld", ha->host_no);
1596 alen = strlen(eiter->a.os_dev_name);
1597 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1598 eiter->len = cpu_to_be16(4 + alen);
1599 size += 4 + alen;
1600
1601 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
1602 eiter->a.os_dev_name));
1603
1604 /* Update MS request size. */
1605 qla2x00_update_ms_fdmi_iocb(ha, size + 16);
1606
1607 DEBUG13(printk("%s(%ld): RPA portname="
1608 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1609 ha->host_no, ct_req->req.rpa.port_name[0],
1610 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
1611 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
1612 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
1613 ct_req->req.rpa.port_name[7], size));
1614 DEBUG13(qla2x00_dump_buffer(entries, size));
1615
1616 /* Execute MS IOCB */
1617 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
1618 sizeof(ms_iocb_entry_t));
1619 if (rval != QLA_SUCCESS) {
1620 /*EMPTY*/
1621 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
1622 ha->host_no, rval));
1623 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
1624 QLA_SUCCESS) {
1625 rval = QLA_FUNCTION_FAILED;
1626 } else {
1627 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
1628 ha->host_no));
1629 }
1630
1631 return rval;
1632}
1633
1634/**
1635 * qla2x00_fdmi_register() -
1636 * @ha: HA context
1637 *
1638 * Returns 0 on success.
1639 */
1640int
1641qla2x00_fdmi_register(scsi_qla_host_t *ha)
1642{
1643 int rval;
1644
1645 rval = qla2x00_mgmt_svr_login(ha);
1646 if (rval)
1647 return rval;
1648
1649 rval = qla2x00_fdmi_rhba(ha);
1650 if (rval) {
1651 if (rval != QLA_ALREADY_REGISTERED)
1652 return rval;
1653
1654 rval = qla2x00_fdmi_dhba(ha);
1655 if (rval)
1656 return rval;
1657
1658 rval = qla2x00_fdmi_rhba(ha);
1659 if (rval)
1660 return rval;
1661 }
1662 rval = qla2x00_fdmi_rpa(ha);
1663
1664 return rval;
1665}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a6d2559217cd..c619583e646b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -88,6 +88,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
88 ha->mbx_flags = 0; 88 ha->mbx_flags = 0;
89 ha->isp_abort_cnt = 0; 89 ha->isp_abort_cnt = 0;
90 ha->beacon_blink_led = 0; 90 ha->beacon_blink_led = 0;
91 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
91 92
92 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 93 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
93 rval = ha->isp_ops.pci_config(ha); 94 rval = ha->isp_ops.pci_config(ha);
@@ -1563,7 +1564,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1563 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); 1564 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
1564 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1565 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1565 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1566 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1566 ha->flags.enable_led_scheme = ((nv->efi_parameters & BIT_3) ? 1 : 0); 1567 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1567 1568
1568 ha->operating_mode = 1569 ha->operating_mode =
1569 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1570 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -1697,6 +1698,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
1697 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 1698 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
1698 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1699 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1699 fcport->flags = FCF_RLC_SUPPORT; 1700 fcport->flags = FCF_RLC_SUPPORT;
1701 fcport->supported_classes = FC_COS_UNSPECIFIED;
1700 1702
1701 return (fcport); 1703 return (fcport);
1702} 1704}
@@ -1898,7 +1900,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
1898 continue; 1900 continue;
1899 1901
1900 /* Bypass if not same domain and area of adapter. */ 1902 /* Bypass if not same domain and area of adapter. */
1901 if (area != ha->d_id.b.area || domain != ha->d_id.b.domain) 1903 if (area && domain &&
1904 (area != ha->d_id.b.area || domain != ha->d_id.b.domain))
1902 continue; 1905 continue;
1903 1906
1904 /* Bypass invalid local loop ID. */ 1907 /* Bypass invalid local loop ID. */
@@ -2075,6 +2078,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2075 return; 2078 return;
2076 } 2079 }
2077 rport->dd_data = fcport; 2080 rport->dd_data = fcport;
2081 rport->supported_classes = fcport->supported_classes;
2078 2082
2079 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2083 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2080 if (fcport->port_type == FCT_INITIATOR) 2084 if (fcport->port_type == FCT_INITIATOR)
@@ -2130,6 +2134,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2130 return (QLA_SUCCESS); 2134 return (QLA_SUCCESS);
2131 } 2135 }
2132 do { 2136 do {
2137 /* FDMI support. */
2138 if (ql2xfdmienable &&
2139 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
2140 qla2x00_fdmi_register(ha);
2141
2133 /* Ensure we are logged into the SNS. */ 2142 /* Ensure we are logged into the SNS. */
2134 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) 2143 if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
2135 loop_id = NPH_SNS; 2144 loop_id = NPH_SNS;
@@ -2392,6 +2401,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2392 if (new_fcport->d_id.b24 == ha->d_id.b24) 2401 if (new_fcport->d_id.b24 == ha->d_id.b24)
2393 continue; 2402 continue;
2394 2403
2404 /* Bypass if same domain and area of adapter. */
2405 if (((new_fcport->d_id.b24 & 0xffff00) ==
2406 (ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2407 ISP_CFG_FL)
2408 continue;
2409
2395 /* Bypass reserved domain fields. */ 2410 /* Bypass reserved domain fields. */
2396 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) 2411 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
2397 continue; 2412 continue;
@@ -2794,6 +2809,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
2794 } 2809 }
2795 } 2810 }
2796 2811
2812 if (mb[10] & BIT_0)
2813 fcport->supported_classes |= FC_COS_CLASS2;
2814 if (mb[10] & BIT_1)
2815 fcport->supported_classes |= FC_COS_CLASS3;
2816
2797 rval = QLA_SUCCESS; 2817 rval = QLA_SUCCESS;
2798 break; 2818 break;
2799 } else if (mb[0] == MBS_LOOP_ID_USED) { 2819 } else if (mb[0] == MBS_LOOP_ID_USED) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ebdc3c54d155..37f82e2cd7fb 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -810,12 +810,8 @@ qla24xx_start_scsi(srb_t *sp)
810 ha->req_q_cnt = ha->request_q_length - 810 ha->req_q_cnt = ha->request_q_length -
811 (ha->req_ring_index - cnt); 811 (ha->req_ring_index - cnt);
812 } 812 }
813 if (ha->req_q_cnt < (req_cnt + 2)) { 813 if (ha->req_q_cnt < (req_cnt + 2))
814 if (cmd->use_sg)
815 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
816 cmd->sc_data_direction);
817 goto queuing_error; 814 goto queuing_error;
818 }
819 815
820 /* Build command packet. */ 816 /* Build command packet. */
821 ha->current_outstanding_cmd = handle; 817 ha->current_outstanding_cmd = handle;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f910de6dd437..c255bb0268a9 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -451,6 +451,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
451 451
452 ha->flags.management_server_logged_in = 0; 452 ha->flags.management_server_logged_in = 0;
453 ha->link_data_rate = 0; 453 ha->link_data_rate = 0;
454 if (ql2xfdmienable)
455 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
454 456
455 /* Update AEN queue. */ 457 /* Update AEN queue. */
456 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL); 458 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 409ea0ac4032..13e1c9047079 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -19,6 +19,7 @@
19#include "qla_def.h" 19#include "qla_def.h"
20 20
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <scsi/scsi_transport_fc.h>
22 23
23static void 24static void
24qla2x00_mbx_sem_timeout(unsigned long data) 25qla2x00_mbx_sem_timeout(unsigned long data)
@@ -251,7 +252,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
251 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 252 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
252 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 253 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
253 } else { 254 } else {
254 mb0 = RD_MAILBOX_REG(ha, reg->isp, 0); 255 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
255 ictrl = RD_REG_WORD(&reg->isp.ictrl); 256 ictrl = RD_REG_WORD(&reg->isp.ictrl);
256 } 257 }
257 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 258 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
@@ -983,58 +984,6 @@ qla2x00_abort_target(fc_port_t *fcport)
983#endif 984#endif
984 985
985/* 986/*
986 * qla2x00_target_reset
987 * Issue target reset mailbox command.
988 *
989 * Input:
990 * ha = adapter block pointer.
991 * TARGET_QUEUE_LOCK must be released.
992 * ADAPTER_STATE_LOCK must be released.
993 *
994 * Returns:
995 * qla2x00 local function return status code.
996 *
997 * Context:
998 * Kernel context.
999 */
1000int
1001qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
1002{
1003 int rval;
1004 mbx_cmd_t mc;
1005 mbx_cmd_t *mcp = &mc;
1006
1007 DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
1008
1009 if (atomic_read(&fcport->state) != FCS_ONLINE)
1010 return 0;
1011
1012 mcp->mb[0] = MBC_TARGET_RESET;
1013 if (HAS_EXTENDED_IDS(ha))
1014 mcp->mb[1] = fcport->loop_id;
1015 else
1016 mcp->mb[1] = fcport->loop_id << 8;
1017 mcp->mb[2] = ha->loop_reset_delay;
1018 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1019 mcp->in_mb = MBX_0;
1020 mcp->tov = 30;
1021 mcp->flags = 0;
1022 rval = qla2x00_mailbox_command(ha, mcp);
1023
1024 if (rval != QLA_SUCCESS) {
1025 /*EMPTY*/
1026 DEBUG2_3_11(printk("qla2x00_target_reset(%ld): failed=%x.\n",
1027 ha->host_no, rval);)
1028 } else {
1029 /*EMPTY*/
1030 DEBUG11(printk("qla2x00_target_reset(%ld): done.\n",
1031 ha->host_no);)
1032 }
1033
1034 return rval;
1035}
1036
1037/*
1038 * qla2x00_get_adapter_id 987 * qla2x00_get_adapter_id
1039 * Get adapter ID and topology. 988 * Get adapter ID and topology.
1040 * 989 *
@@ -1326,6 +1275,10 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1326 fcport->port_type = FCT_INITIATOR; 1275 fcport->port_type = FCT_INITIATOR;
1327 else 1276 else
1328 fcport->port_type = FCT_TARGET; 1277 fcport->port_type = FCT_TARGET;
1278
1279 /* Passback COS information. */
1280 fcport->supported_classes = (pd->options & BIT_4) ?
1281 FC_COS_CLASS2: FC_COS_CLASS3;
1329 } 1282 }
1330 1283
1331gpd_error_out: 1284gpd_error_out:
@@ -1661,6 +1614,13 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1661 mb[1] |= BIT_1; 1614 mb[1] |= BIT_1;
1662 } else 1615 } else
1663 mb[1] = BIT_0; 1616 mb[1] = BIT_0;
1617
1618 /* Passback COS information. */
1619 mb[10] = 0;
1620 if (lg->io_parameter[7] || lg->io_parameter[8])
1621 mb[10] |= BIT_0; /* Class 2. */
1622 if (lg->io_parameter[9] || lg->io_parameter[10])
1623 mb[10] |= BIT_1; /* Class 3. */
1664 } 1624 }
1665 1625
1666 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1626 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1723,6 +1683,8 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1723 mb[2] = mcp->mb[2]; 1683 mb[2] = mcp->mb[2];
1724 mb[6] = mcp->mb[6]; 1684 mb[6] = mcp->mb[6];
1725 mb[7] = mcp->mb[7]; 1685 mb[7] = mcp->mb[7];
1686 /* COS retrieved from Get-Port-Database mailbox command. */
1687 mb[10] = 0;
1726 } 1688 }
1727 1689
1728 if (rval != QLA_SUCCESS) { 1690 if (rval != QLA_SUCCESS) {
@@ -2465,3 +2427,32 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
2465 2427
2466 return rval; 2428 return rval;
2467} 2429}
2430
2431int
2432qla2x00_stop_firmware(scsi_qla_host_t *ha)
2433{
2434 int rval;
2435 mbx_cmd_t mc;
2436 mbx_cmd_t *mcp = &mc;
2437
2438 if (!IS_QLA24XX(ha) && !IS_QLA25XX(ha))
2439 return QLA_FUNCTION_FAILED;
2440
2441 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2442
2443 mcp->mb[0] = MBC_STOP_FIRMWARE;
2444 mcp->out_mb = MBX_0;
2445 mcp->in_mb = MBX_0;
2446 mcp->tov = 5;
2447 mcp->flags = 0;
2448 rval = qla2x00_mailbox_command(ha, mcp);
2449
2450 if (rval != QLA_SUCCESS) {
2451 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2452 ha->host_no, rval));
2453 } else {
2454 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2455 }
2456
2457 return rval;
2458}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9000659bfbcf..8982978c42fd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -79,7 +79,7 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
79MODULE_PARM_DESC(ql2xloginretrycount, 79MODULE_PARM_DESC(ql2xloginretrycount,
80 "Specify an alternate value for the NVRAM login retry count."); 80 "Specify an alternate value for the NVRAM login retry count.");
81 81
82int ql2xfwloadbin; 82int ql2xfwloadbin=1;
83module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); 83module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
84MODULE_PARM_DESC(ql2xfwloadbin, 84MODULE_PARM_DESC(ql2xfwloadbin,
85 "Load ISP2xxx firmware image via hotplug."); 85 "Load ISP2xxx firmware image via hotplug.");
@@ -88,6 +88,12 @@ static void qla2x00_free_device(scsi_qla_host_t *);
88 88
89static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 89static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
90 90
91int ql2xfdmienable;
92module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
93MODULE_PARM_DESC(ql2xfdmienable,
94 "Enables FDMI registratons "
95 "Default is 0 - no FDMI. 1 - perfom FDMI.");
96
91/* 97/*
92 * SCSI host template entry points 98 * SCSI host template entry points
93 */ 99 */
@@ -105,6 +111,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
105static int qla2x00_loop_reset(scsi_qla_host_t *ha); 111static int qla2x00_loop_reset(scsi_qla_host_t *ha);
106static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); 112static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
107 113
114static int qla2x00_change_queue_depth(struct scsi_device *, int);
115static int qla2x00_change_queue_type(struct scsi_device *, int);
116
108static struct scsi_host_template qla2x00_driver_template = { 117static struct scsi_host_template qla2x00_driver_template = {
109 .module = THIS_MODULE, 118 .module = THIS_MODULE,
110 .name = "qla2xxx", 119 .name = "qla2xxx",
@@ -119,6 +128,8 @@ static struct scsi_host_template qla2x00_driver_template = {
119 128
120 .slave_alloc = qla2xxx_slave_alloc, 129 .slave_alloc = qla2xxx_slave_alloc,
121 .slave_destroy = qla2xxx_slave_destroy, 130 .slave_destroy = qla2xxx_slave_destroy,
131 .change_queue_depth = qla2x00_change_queue_depth,
132 .change_queue_type = qla2x00_change_queue_type,
122 .this_id = -1, 133 .this_id = -1,
123 .cmd_per_lun = 3, 134 .cmd_per_lun = 3,
124 .use_clustering = ENABLE_CLUSTERING, 135 .use_clustering = ENABLE_CLUSTERING,
@@ -129,6 +140,7 @@ static struct scsi_host_template qla2x00_driver_template = {
129 * which equates to 0x800000 sectors. 140 * which equates to 0x800000 sectors.
130 */ 141 */
131 .max_sectors = 0xFFFF, 142 .max_sectors = 0xFFFF,
143 .shost_attrs = qla2x00_host_attrs,
132}; 144};
133 145
134static struct scsi_host_template qla24xx_driver_template = { 146static struct scsi_host_template qla24xx_driver_template = {
@@ -145,12 +157,15 @@ static struct scsi_host_template qla24xx_driver_template = {
145 157
146 .slave_alloc = qla2xxx_slave_alloc, 158 .slave_alloc = qla2xxx_slave_alloc,
147 .slave_destroy = qla2xxx_slave_destroy, 159 .slave_destroy = qla2xxx_slave_destroy,
160 .change_queue_depth = qla2x00_change_queue_depth,
161 .change_queue_type = qla2x00_change_queue_type,
148 .this_id = -1, 162 .this_id = -1,
149 .cmd_per_lun = 3, 163 .cmd_per_lun = 3,
150 .use_clustering = ENABLE_CLUSTERING, 164 .use_clustering = ENABLE_CLUSTERING,
151 .sg_tablesize = SG_ALL, 165 .sg_tablesize = SG_ALL,
152 166
153 .max_sectors = 0xFFFF, 167 .max_sectors = 0xFFFF,
168 .shost_attrs = qla2x00_host_attrs,
154}; 169};
155 170
156static struct scsi_transport_template *qla2xxx_transport_template = NULL; 171static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -487,14 +502,13 @@ qc24_fail_command:
487static int 502static int
488qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 503qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
489{ 504{
490#define ABORT_POLLING_PERIOD HZ 505#define ABORT_POLLING_PERIOD 1000
491#define ABORT_WAIT_ITER ((10 * HZ) / (ABORT_POLLING_PERIOD)) 506#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
492 unsigned long wait_iter = ABORT_WAIT_ITER; 507 unsigned long wait_iter = ABORT_WAIT_ITER;
493 int ret = QLA_SUCCESS; 508 int ret = QLA_SUCCESS;
494 509
495 while (CMD_SP(cmd)) { 510 while (CMD_SP(cmd)) {
496 set_current_state(TASK_UNINTERRUPTIBLE); 511 msleep(ABORT_POLLING_PERIOD);
497 schedule_timeout(ABORT_POLLING_PERIOD);
498 512
499 if (--wait_iter) 513 if (--wait_iter)
500 break; 514 break;
@@ -1016,7 +1030,7 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1016 if (fcport->port_type != FCT_TARGET) 1030 if (fcport->port_type != FCT_TARGET)
1017 continue; 1031 continue;
1018 1032
1019 status = qla2x00_target_reset(ha, fcport); 1033 status = qla2x00_device_reset(ha, fcport);
1020 if (status != QLA_SUCCESS) 1034 if (status != QLA_SUCCESS)
1021 break; 1035 break;
1022 } 1036 }
@@ -1103,6 +1117,28 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1103 sdev->hostdata = NULL; 1117 sdev->hostdata = NULL;
1104} 1118}
1105 1119
1120static int
1121qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
1122{
1123 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1124 return sdev->queue_depth;
1125}
1126
1127static int
1128qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1129{
1130 if (sdev->tagged_supported) {
1131 scsi_set_tag_type(sdev, tag_type);
1132 if (tag_type)
1133 scsi_activate_tcq(sdev, sdev->queue_depth);
1134 else
1135 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1136 } else
1137 tag_type = 0;
1138
1139 return tag_type;
1140}
1141
1106/** 1142/**
1107 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1143 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1108 * @ha: HA context 1144 * @ha: HA context
@@ -1113,36 +1149,23 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1113static void 1149static void
1114qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1150qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1115{ 1151{
1116 /* Assume 32bit DMA address */ 1152 /* Assume a 32bit DMA mask. */
1117 ha->flags.enable_64bit_addressing = 0; 1153 ha->flags.enable_64bit_addressing = 0;
1118 1154
1119 /* 1155 if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
1120 * Given the two variants pci_set_dma_mask(), allow the compiler to 1156 /* Any upper-dword bits set? */
1121 * assist in setting the proper dma mask. 1157 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1122 */ 1158 !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
1123 if (sizeof(dma_addr_t) > 4) { 1159 /* Ok, a 64bit DMA mask is applicable. */
1124 if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
1125 ha->flags.enable_64bit_addressing = 1; 1160 ha->flags.enable_64bit_addressing = 1;
1126 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; 1161 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
1127 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; 1162 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
1128 1163 return;
1129 if (pci_set_consistent_dma_mask(ha->pdev,
1130 DMA_64BIT_MASK)) {
1131 qla_printk(KERN_DEBUG, ha,
1132 "Failed to set 64 bit PCI consistent mask; "
1133 "using 32 bit.\n");
1134 pci_set_consistent_dma_mask(ha->pdev,
1135 DMA_32BIT_MASK);
1136 }
1137 } else {
1138 qla_printk(KERN_DEBUG, ha,
1139 "Failed to set 64 bit PCI DMA mask, falling back "
1140 "to 32 bit MASK.\n");
1141 pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1142 } 1164 }
1143 } else {
1144 pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
1145 } 1165 }
1166
1167 dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK);
1168 pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
1146} 1169}
1147 1170
1148static int 1171static int
@@ -1316,6 +1339,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1316 ha->prev_topology = 0; 1339 ha->prev_topology = 0;
1317 ha->ports = MAX_BUSES; 1340 ha->ports = MAX_BUSES;
1318 ha->init_cb_size = sizeof(init_cb_t); 1341 ha->init_cb_size = sizeof(init_cb_t);
1342 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1319 1343
1320 /* Assign ISP specific operations. */ 1344 /* Assign ISP specific operations. */
1321 ha->isp_ops.pci_config = qla2100_pci_config; 1345 ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1338,6 +1362,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1338 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32; 1362 ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
1339 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32; 1363 ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
1340 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb; 1364 ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
1365 ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
1341 ha->isp_ops.read_nvram = qla2x00_read_nvram_data; 1366 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1342 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1367 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1343 ha->isp_ops.fw_dump = qla2100_fw_dump; 1368 ha->isp_ops.fw_dump = qla2100_fw_dump;
@@ -1375,6 +1400,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1375 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1400 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1376 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1401 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1377 ha->init_cb_size = sizeof(struct init_cb_24xx); 1402 ha->init_cb_size = sizeof(struct init_cb_24xx);
1403 ha->mgmt_svr_loop_id = 10;
1378 ha->isp_ops.pci_config = qla24xx_pci_config; 1404 ha->isp_ops.pci_config = qla24xx_pci_config;
1379 ha->isp_ops.reset_chip = qla24xx_reset_chip; 1405 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1380 ha->isp_ops.chip_diag = qla24xx_chip_diag; 1406 ha->isp_ops.chip_diag = qla24xx_chip_diag;
@@ -1395,6 +1421,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1395 ha->isp_ops.fabric_login = qla24xx_login_fabric; 1421 ha->isp_ops.fabric_login = qla24xx_login_fabric;
1396 ha->isp_ops.fabric_logout = qla24xx_fabric_logout; 1422 ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
1397 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb; 1423 ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
1424 ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
1398 ha->isp_ops.read_nvram = qla24xx_read_nvram_data; 1425 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1399 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1426 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1400 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1427 ha->isp_ops.fw_dump = qla24xx_fw_dump;
@@ -1558,8 +1585,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1558 return 0; 1585 return 0;
1559 1586
1560probe_failed: 1587probe_failed:
1561 fc_remove_host(ha->host);
1562
1563 qla2x00_free_device(ha); 1588 qla2x00_free_device(ha);
1564 1589
1565 scsi_host_put(host); 1590 scsi_host_put(host);
@@ -1601,10 +1626,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1601 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1626 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
1602 qla2x00_cancel_io_descriptors(ha); 1627 qla2x00_cancel_io_descriptors(ha);
1603 1628
1604 /* turn-off interrupts on the card */
1605 if (ha->interrupts_on)
1606 ha->isp_ops.disable_intrs(ha);
1607
1608 /* Disable timer */ 1629 /* Disable timer */
1609 if (ha->timer_active) 1630 if (ha->timer_active)
1610 qla2x00_stop_timer(ha); 1631 qla2x00_stop_timer(ha);
@@ -1624,8 +1645,14 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1624 } 1645 }
1625 } 1646 }
1626 1647
1627 qla2x00_mem_free(ha); 1648 /* Stop currently executing firmware. */
1649 qla2x00_stop_firmware(ha);
1650
1651 /* turn-off interrupts on the card */
1652 if (ha->interrupts_on)
1653 ha->isp_ops.disable_intrs(ha);
1628 1654
1655 qla2x00_mem_free(ha);
1629 1656
1630 ha->flags.online = 0; 1657 ha->flags.online = 0;
1631 1658
@@ -1934,7 +1961,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
1934{ 1961{
1935 struct list_head *fcpl, *fcptemp; 1962 struct list_head *fcpl, *fcptemp;
1936 fc_port_t *fcport; 1963 fc_port_t *fcport;
1937 unsigned long wtime;/* max wait time if mbx cmd is busy. */ 1964 unsigned int wtime;/* max wait time if mbx cmd is busy. */
1938 1965
1939 if (ha == NULL) { 1966 if (ha == NULL) {
1940 /* error */ 1967 /* error */
@@ -1943,11 +1970,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
1943 } 1970 }
1944 1971
1945 /* Make sure all other threads are stopped. */ 1972 /* Make sure all other threads are stopped. */
1946 wtime = 60 * HZ; 1973 wtime = 60 * 1000;
1947 while (ha->dpc_wait && wtime) { 1974 while (ha->dpc_wait && wtime)
1948 set_current_state(TASK_INTERRUPTIBLE); 1975 wtime = msleep_interruptible(wtime);
1949 wtime = schedule_timeout(wtime);
1950 }
1951 1976
1952 /* free ioctl memory */ 1977 /* free ioctl memory */
1953 qla2x00_free_ioctl_mem(ha); 1978 qla2x00_free_ioctl_mem(ha);
@@ -2478,15 +2503,15 @@ qla2x00_timer(scsi_qla_host_t *ha)
2478int 2503int
2479qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) 2504qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
2480{ 2505{
2481 const unsigned int step = HZ/10; 2506 const unsigned int step = 100; /* msecs */
2507 unsigned int iterations = jiffies_to_msecs(timeout)/100;
2482 2508
2483 do { 2509 do {
2484 if (!down_trylock(sema)) 2510 if (!down_trylock(sema))
2485 return 0; 2511 return 0;
2486 set_current_state(TASK_INTERRUPTIBLE); 2512 if (msleep_interruptible(step))
2487 if (schedule_timeout(step))
2488 break; 2513 break;
2489 } while ((timeout -= step) > 0); 2514 } while (--iterations >= 0);
2490 2515
2491 return -ETIMEDOUT; 2516 return -ETIMEDOUT;
2492} 2517}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index d7f5c608009c..c14abf743b7c 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -468,21 +468,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
468 uint32_t dwords) 468 uint32_t dwords)
469{ 469{
470 uint32_t i; 470 uint32_t i;
471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
472
473 /* Pause RISC. */
474 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
475 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
476 471
477 /* Dword reads to flash. */ 472 /* Dword reads to flash. */
478 for (i = 0; i < dwords; i++, faddr++) 473 for (i = 0; i < dwords; i++, faddr++)
479 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 474 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
480 flash_data_to_access_addr(faddr))); 475 flash_data_to_access_addr(faddr)));
481 476
482 /* Release RISC pause. */
483 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
484 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
485
486 return dwptr; 477 return dwptr;
487} 478}
488 479
@@ -532,10 +523,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
532 523
533 ret = QLA_SUCCESS; 524 ret = QLA_SUCCESS;
534 525
535 /* Pause RISC. */
536 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
537 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
538
539 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 526 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
540 DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__, 527 DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__,
541 ha->host_no, man_id, flash_id)); 528 ha->host_no, man_id, flash_id));
@@ -599,10 +586,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
599 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 586 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
600 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 587 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
601 588
602 /* Release RISC pause. */
603 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
604 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
605
606 return ret; 589 return ret;
607} 590}
608 591
@@ -630,11 +613,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
630{ 613{
631 uint32_t i; 614 uint32_t i;
632 uint32_t *dwptr; 615 uint32_t *dwptr;
633 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
634
635 /* Pause RISC. */
636 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
637 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
638 616
639 /* Dword reads to flash. */ 617 /* Dword reads to flash. */
640 dwptr = (uint32_t *)buf; 618 dwptr = (uint32_t *)buf;
@@ -642,10 +620,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
642 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 620 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
643 nvram_data_to_access_addr(naddr))); 621 nvram_data_to_access_addr(naddr)));
644 622
645 /* Release RISC pause. */
646 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
647 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
648
649 return buf; 623 return buf;
650} 624}
651 625
@@ -690,10 +664,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
690 664
691 ret = QLA_SUCCESS; 665 ret = QLA_SUCCESS;
692 666
693 /* Pause RISC. */
694 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
695 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
696
697 /* Enable flash write. */ 667 /* Enable flash write. */
698 WRT_REG_DWORD(&reg->ctrl_status, 668 WRT_REG_DWORD(&reg->ctrl_status,
699 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); 669 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -728,9 +698,5 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
728 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 698 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
729 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 699 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
730 700
731 /* Release RISC pause. */
732 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
733 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
734
735 return ret; 701 return ret;
736} 702}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e3cd3618bc54..eae7d6edd531 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -19,9 +19,9 @@
19/* 19/*
20 * Driver version 20 * Driver version
21 */ 21 */
22#define QLA2XXX_VERSION "8.01.00b5-k" 22#define QLA2XXX_VERSION "8.01.00-k"
23 23
24#define QLA_DRIVER_MAJOR_VER 8 24#define QLA_DRIVER_MAJOR_VER 8
25#define QLA_DRIVER_MINOR_VER 1 25#define QLA_DRIVER_MINOR_VER 1
26#define QLA_DRIVER_PATCH_VER 0 26#define QLA_DRIVER_PATCH_VER 0
27#define QLA_DRIVER_BETA_VER 5 27#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
new file mode 100644
index 000000000000..f1ea5027865f
--- /dev/null
+++ b/drivers/scsi/raid_class.c
@@ -0,0 +1,250 @@
1/*
2 * RAID Attributes
3 */
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/list.h>
7#include <linux/raid_class.h>
8#include <scsi/scsi_device.h>
9#include <scsi/scsi_host.h>
10
11#define RAID_NUM_ATTRS 3
12
13struct raid_internal {
14 struct raid_template r;
15 struct raid_function_template *f;
16 /* The actual attributes */
17 struct class_device_attribute private_attrs[RAID_NUM_ATTRS];
18 /* The array of null terminated pointers to attributes
19 * needed by scsi_sysfs.c */
20 struct class_device_attribute *attrs[RAID_NUM_ATTRS + 1];
21};
22
23struct raid_component {
24 struct list_head node;
25 struct device *dev;
26 int num;
27};
28
29#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
30
31#define tc_to_raid_internal(tcont) ({ \
32 struct raid_template *r = \
33 container_of(tcont, struct raid_template, raid_attrs); \
34 to_raid_internal(r); \
35})
36
37#define ac_to_raid_internal(acont) ({ \
38 struct transport_container *tc = \
39 container_of(acont, struct transport_container, ac); \
40 tc_to_raid_internal(tc); \
41})
42
43#define class_device_to_raid_internal(cdev) ({ \
44 struct attribute_container *ac = \
45 attribute_container_classdev_to_container(cdev); \
46 ac_to_raid_internal(ac); \
47})
48
49
50static int raid_match(struct attribute_container *cont, struct device *dev)
51{
52 /* We have to look for every subsystem that could house
53 * emulated RAID devices, so start with SCSI */
54 struct raid_internal *i = ac_to_raid_internal(cont);
55
56 if (scsi_is_sdev_device(dev)) {
57 struct scsi_device *sdev = to_scsi_device(dev);
58
59 if (i->f->cookie != sdev->host->hostt)
60 return 0;
61
62 return i->f->is_raid(dev);
63 }
64 /* FIXME: look at other subsystems too */
65 return 0;
66}
67
68static int raid_setup(struct transport_container *tc, struct device *dev,
69 struct class_device *cdev)
70{
71 struct raid_data *rd;
72
73 BUG_ON(class_get_devdata(cdev));
74
75 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
76 if (!rd)
77 return -ENOMEM;
78
79 memset(rd, 0, sizeof(*rd));
80 INIT_LIST_HEAD(&rd->component_list);
81 class_set_devdata(cdev, rd);
82
83 return 0;
84}
85
86static int raid_remove(struct transport_container *tc, struct device *dev,
87 struct class_device *cdev)
88{
89 struct raid_data *rd = class_get_devdata(cdev);
90 struct raid_component *rc, *next;
91 class_set_devdata(cdev, NULL);
92 list_for_each_entry_safe(rc, next, &rd->component_list, node) {
93 char buf[40];
94 snprintf(buf, sizeof(buf), "component-%d", rc->num);
95 list_del(&rc->node);
96 sysfs_remove_link(&cdev->kobj, buf);
97 kfree(rc);
98 }
99 kfree(class_get_devdata(cdev));
100 return 0;
101}
102
103static DECLARE_TRANSPORT_CLASS(raid_class,
104 "raid_devices",
105 raid_setup,
106 raid_remove,
107 NULL);
108
109static struct {
110 enum raid_state value;
111 char *name;
112} raid_states[] = {
113 { RAID_ACTIVE, "active" },
114 { RAID_DEGRADED, "degraded" },
115 { RAID_RESYNCING, "resyncing" },
116 { RAID_OFFLINE, "offline" },
117};
118
119static const char *raid_state_name(enum raid_state state)
120{
121 int i;
122 char *name = NULL;
123
124 for (i = 0; i < sizeof(raid_states)/sizeof(raid_states[0]); i++) {
125 if (raid_states[i].value == state) {
126 name = raid_states[i].name;
127 break;
128 }
129 }
130 return name;
131}
132
133
134#define raid_attr_show_internal(attr, fmt, var, code) \
135static ssize_t raid_show_##attr(struct class_device *cdev, char *buf) \
136{ \
137 struct raid_data *rd = class_get_devdata(cdev); \
138 code \
139 return snprintf(buf, 20, #fmt "\n", var); \
140}
141
142#define raid_attr_ro_states(attr, states, code) \
143raid_attr_show_internal(attr, %s, name, \
144 const char *name; \
145 code \
146 name = raid_##states##_name(rd->attr); \
147) \
148static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
149
150
151#define raid_attr_ro_internal(attr, code) \
152raid_attr_show_internal(attr, %d, rd->attr, code) \
153static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
154
155#define ATTR_CODE(attr) \
156 struct raid_internal *i = class_device_to_raid_internal(cdev); \
157 if (i->f->get_##attr) \
158 i->f->get_##attr(cdev->dev);
159
160#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
161#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
162#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
163
164raid_attr_ro(level);
165raid_attr_ro_fn(resync);
166raid_attr_ro_state(state);
167
168void raid_component_add(struct raid_template *r,struct device *raid_dev,
169 struct device *component_dev)
170{
171 struct class_device *cdev =
172 attribute_container_find_class_device(&r->raid_attrs.ac,
173 raid_dev);
174 struct raid_component *rc;
175 struct raid_data *rd = class_get_devdata(cdev);
176 char buf[40];
177
178 rc = kmalloc(sizeof(*rc), GFP_KERNEL);
179 if (!rc)
180 return;
181
182 INIT_LIST_HEAD(&rc->node);
183 rc->dev = component_dev;
184 rc->num = rd->component_count++;
185
186 snprintf(buf, sizeof(buf), "component-%d", rc->num);
187 list_add_tail(&rc->node, &rd->component_list);
188 sysfs_create_link(&cdev->kobj, &component_dev->kobj, buf);
189}
190EXPORT_SYMBOL(raid_component_add);
191
192struct raid_template *
193raid_class_attach(struct raid_function_template *ft)
194{
195 struct raid_internal *i = kmalloc(sizeof(struct raid_internal),
196 GFP_KERNEL);
197 int count = 0;
198
199 if (unlikely(!i))
200 return NULL;
201
202 memset(i, 0, sizeof(*i));
203
204 i->f = ft;
205
206 i->r.raid_attrs.ac.class = &raid_class.class;
207 i->r.raid_attrs.ac.match = raid_match;
208 i->r.raid_attrs.ac.attrs = &i->attrs[0];
209
210 attribute_container_register(&i->r.raid_attrs.ac);
211
212 i->attrs[count++] = &class_device_attr_level;
213 i->attrs[count++] = &class_device_attr_resync;
214 i->attrs[count++] = &class_device_attr_state;
215
216 i->attrs[count] = NULL;
217 BUG_ON(count > RAID_NUM_ATTRS);
218
219 return &i->r;
220}
221EXPORT_SYMBOL(raid_class_attach);
222
223void
224raid_class_release(struct raid_template *r)
225{
226 struct raid_internal *i = to_raid_internal(r);
227
228 attribute_container_unregister(&i->r.raid_attrs.ac);
229
230 kfree(i);
231}
232EXPORT_SYMBOL(raid_class_release);
233
234static __init int raid_init(void)
235{
236 return transport_class_register(&raid_class);
237}
238
239static __exit void raid_exit(void)
240{
241 transport_class_unregister(&raid_class);
242}
243
244MODULE_AUTHOR("James Bottomley");
245MODULE_DESCRIPTION("RAID device class");
246MODULE_LICENSE("GPL");
247
248module_init(raid_init);
249module_exit(raid_exit);
250
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
new file mode 100644
index 000000000000..f97e3afa97d9
--- /dev/null
+++ b/drivers/scsi/sata_mv.c
@@ -0,0 +1,843 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 *
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/init.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/sched.h>
31#include <linux/dma-mapping.h>
32#include "scsi.h"
33#include <scsi/scsi_host.h>
34#include <linux/libata.h>
35#include <asm/io.h>
36
37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12"
39
40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */
42 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
43 MV_IO_BAR = 2, /* offset 0x18: IO space */
44 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
45
46 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
47 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
48
49 MV_PCI_REG_BASE = 0,
50 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
51 MV_SATAHC0_REG_BASE = 0x20000,
52
53 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
54 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57
58 MV_Q_CT = 32,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61
62 MV_DMA_BOUNDARY = 0xffffffffU,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)),
64
65 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */
69 MV_PORT_MASK = 3,
70
71 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */
75
76 chip_504x = 0,
77 chip_508x = 1,
78 chip_604x = 2,
79 chip_608x = 3,
80
81 /* PCI interface registers */
82
83 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3),
86 GLOB_SFT_RST = (1 << 4),
87
88 PCI_IRQ_CAUSE_OFS = 0x1d58,
89 PCI_IRQ_MASK_OFS = 0x1d5c,
90 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
91
92 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
93 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
94 PORT0_ERR = (1 << 0), /* shift by port # */
95 PORT0_DONE = (1 << 1), /* shift by port # */
96 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
97 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
98 PCI_ERR = (1 << 18),
99 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
100 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
101 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
102 GPIO_INT = (1 << 22),
103 SELF_INT = (1 << 23),
104 TWSI_INT = (1 << 24),
105 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
106 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
107 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
108 HC_MAIN_RSVD),
109
110 /* SATAHC registers */
111 HC_CFG_OFS = 0,
112
113 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */
117
118 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104,
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128
129 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
131 SATA_ACTIVE_OFS = 0x350,
132
133 /* Port registers */
134 EDMA_CFG_OFS = 0,
135
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc,
138 EDMA_ERR_D_PAR = (1 << 0),
139 EDMA_ERR_PRD_PAR = (1 << 1),
140 EDMA_ERR_DEV = (1 << 2),
141 EDMA_ERR_DEV_DCON = (1 << 3),
142 EDMA_ERR_DEV_CON = (1 << 4),
143 EDMA_ERR_SERR = (1 << 5),
144 EDMA_ERR_SELF_DIS = (1 << 7),
145 EDMA_ERR_BIST_ASYNC = (1 << 8),
146 EDMA_ERR_CRBQ_PAR = (1 << 9),
147 EDMA_ERR_CRPB_PAR = (1 << 10),
148 EDMA_ERR_INTRL_PAR = (1 << 11),
149 EDMA_ERR_IORDY = (1 << 12),
150 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
151 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
152 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
153 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
154 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
155 EDMA_ERR_TRANS_PROTO = (1 << 31),
156 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
157 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
158 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
159 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
160 EDMA_ERR_LNK_DATA_RX |
161 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO),
163
164 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2),
168
169 /* BDMA is 6xxx part only */
170 BDMA_CMD_OFS = 0x224,
171 BDMA_START = (1 << 0),
172
173 MV_UNDEF = 0,
174};
175
176struct mv_port_priv {
177
178};
179
180struct mv_host_priv {
181
182};
183
184static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
187static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192
193static Scsi_Host_Template mv_sht = {
194 .module = THIS_MODULE,
195 .name = DRV_NAME,
196 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE,
200 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF,
202 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF,
206 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config,
209 .bios_param = ata_std_bios_param,
210 .ordered_flush = 1,
211};
212
213static struct ata_port_operations mv_ops = {
214 .port_disable = ata_port_disable,
215
216 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read,
218 .check_status = ata_check_status,
219 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select,
221
222 .phy_reset = mv_phy_reset,
223
224 .qc_prep = ata_qc_prep,
225 .qc_issue = ata_qc_issue_prot,
226
227 .eng_timeout = ata_eng_timeout,
228
229 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear,
231
232 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write,
234
235 .port_start = ata_port_start,
236 .port_stop = ata_port_stop,
237 .host_stop = ata_host_stop,
238};
239
240static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */
242 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
245 .pio_mask = 0x1f, /* pio4-0 */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops,
248 },
249 { /* chip_508x */
250 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
253 MV_FLAG_DUAL_HC),
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops,
257 },
258 { /* chip_604x */
259 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA),
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops,
266 },
267 { /* chip_608x */
268 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC |
272 MV_FLAG_BDMA),
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops,
276 },
277};
278
279static struct pci_device_id mv_pci_tbl[] = {
280 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
281 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
282 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_508x},
283 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
284
285 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
286 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
287 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
288 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
289 {} /* terminate list */
290};
291
292static struct pci_driver mv_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = mv_pci_tbl,
295 .probe = mv_init_one,
296 .remove = ata_pci_remove_one,
297};
298
299/*
300 * Functions
301 */
302
303static inline void writelfl(unsigned long data, void __iomem *addr)
304{
305 writel(data, addr);
306 (void) readl(addr); /* flush to avoid PCI posted write */
307}
308
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
318}
319
320static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
321{
322 return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
323 MV_SATAHC_ARBTR_REG_SZ +
324 ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
325}
326
327static inline void __iomem *mv_ap_base(struct ata_port *ap)
328{
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330}
331
332static inline int mv_get_hc_count(unsigned long flags)
333{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335}
336
337static inline int mv_is_edma_active(struct ata_port *ap)
338{
339 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
341}
342
343static inline int mv_port_bdma_capable(struct ata_port *ap)
344{
345 return (ap->flags & MV_FLAG_BDMA);
346}
347
348static void mv_irq_clear(struct ata_port *ap)
349{
350}
351
352static unsigned int mv_scr_offset(unsigned int sc_reg_in)
353{
354 unsigned int ofs;
355
356 switch (sc_reg_in) {
357 case SCR_STATUS:
358 case SCR_CONTROL:
359 case SCR_ERROR:
360 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
361 break;
362 case SCR_ACTIVE:
363 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
364 break;
365 default:
366 ofs = 0xffffffffU;
367 break;
368 }
369 return ofs;
370}
371
372static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
373{
374 unsigned int ofs = mv_scr_offset(sc_reg_in);
375
376 if (0xffffffffU != ofs) {
377 return readl(mv_ap_base(ap) + ofs);
378 } else {
379 return (u32) ofs;
380 }
381}
382
383static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
384{
385 unsigned int ofs = mv_scr_offset(sc_reg_in);
386
387 if (0xffffffffU != ofs) {
388 writelfl(val, mv_ap_base(ap) + ofs);
389 }
390}
391
392static int mv_master_reset(void __iomem *mmio_base)
393{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0;
396 u32 t;
397
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status
401 * register" table.
402 */
403 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg);
405
406 for (i = 0; i < 100; i++) {
407 msleep(10);
408 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) {
410 break;
411 }
412 }
413 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n");
415 rc = 1; /* broken HW? */
416 goto done;
417 }
418
419 /* set reset */
420 i = 5;
421 do {
422 writel(t | GLOB_SFT_RST, reg);
423 t = readl(reg);
424 udelay(1);
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426
427 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n");
429 rc = 1; /* broken HW? */
430 goto done;
431 }
432
433 /* clear reset */
434 i = 5;
435 do {
436 writel(t & ~GLOB_SFT_RST, reg);
437 t = readl(reg);
438 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440
441 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n");
443 rc = 1; /* broken HW? */
444 }
445
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc;
449}
450
451static void mv_err_intr(struct ata_port *ap)
452{
453 void __iomem *port_mmio;
454 u32 edma_err_cause, serr = 0;
455
456 /* bug here b/c we got an err int on a port we don't know about,
457 * so there's no way to clear it
458 */
459 BUG_ON(NULL == ap);
460 port_mmio = mv_ap_base(ap);
461
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463
464 if (EDMA_ERR_SERR & edma_err_cause) {
465 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr);
467 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n",
469 ap->port_no, edma_err_cause, serr);
470
471 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
473
474 /* check for fatal here and recover if needed */
475 if (EDMA_ERR_FATAL & edma_err_cause) {
476 mv_phy_reset(ap);
477 }
478}
479
480/* Handle any outstanding interrupts in a single SATAHC
481 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc)
484{
485 void __iomem *mmio = host_set->mmio_base;
486 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
487 struct ata_port *ap;
488 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause;
490 int shift, port, port0, hard_port;
491 u8 ata_status;
492
493 if (hc == 0) {
494 port0 = 0;
495 } else {
496 port0 = MV_PORTS_PER_HC;
497 }
498
499 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
503 }
504
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
506 hc,relevant,hc_irq_cause);
507
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU;
512
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap);
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */
516 /* This is needed to clear the ATA INTRQ.
517 * FIXME: don't read the status reg in EDMA mode!
518 */
519 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr);
521 }
522
523 shift = port * 2;
524 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 }
527 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */
530 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR;
532 }
533
534 if (ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status);
542 }
543 }
544 }
545 VPRINTK("EXIT\n");
546}
547
548static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs)
550{
551 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio;
554 u32 irq_stat;
555
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559
560 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault
562 */
563 if (!irq_stat || (0xffffffffU == irq_stat)) {
564 return IRQ_NONE;
565 }
566
567 spin_lock(&host_set->lock);
568
569 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) {
572 mv_host_intr(host_set, relevant, hc);
573 handled = 1;
574 }
575 }
576 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need
578 * to recover from them properly.
579 */
580 }
581
582 spin_unlock(&host_set->lock);
583
584 return IRQ_RETVAL(handled);
585}
586
587static void mv_phy_reset(struct ata_port *ap)
588{
589 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma;
593
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595
596 edma = readl(port_mmio + EDMA_CMD_OFS);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */
610
611 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however.
613 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS);
615
616 VPRINTK("Done. Now calling __sata_phy_reset()\n");
617
618 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap);
620
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
622 VPRINTK("Port disabled pre-sig. Exiting.\n");
623 return;
624 }
625
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
628 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
629 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
630
631 dev->class = ata_dev_classify(&tf);
632 if (!ata_dev_present(dev)) {
633 VPRINTK("Port disabled post-sig: No device present.\n");
634 ata_port_disable(ap);
635 }
636 VPRINTK("EXIT\n");
637}
638
639static void mv_port_init(struct ata_ioports *port, unsigned long base)
640{
641 /* PIO related setup */
642 port->data_addr = base + SHD_PIO_DATA_OFS;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS;
644 port->nsect_addr = base + SHD_SECT_CNT_OFS;
645 port->lbal_addr = base + SHD_LBA_L_OFS;
646 port->lbam_addr = base + SHD_LBA_M_OFS;
647 port->lbah_addr = base + SHD_LBA_H_OFS;
648 port->device_addr = base + SHD_DEV_HD_OFS;
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS;
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS;
651 /* unused */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653
654 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS);
656
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS));
661}
662
663static int mv_host_init(struct ata_probe_ent *probe_ent)
664{
665 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio;
668
669 if (mv_master_reset(probe_ent->mmio_base)) {
670 rc = 1;
671 goto done;
672 }
673
674 n_hc = mv_get_hc_count(probe_ent->host_flags);
675 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
676
677 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio);
680 }
681
682 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc,
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS),
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS));
686 }
687
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
690
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n",
693 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS));
697
698 done:
699 return rc;
700}
701
702/* move to PCI layer, integrate w/ MSI stuff */
703static void pci_intx(struct pci_dev *pdev, int enable)
704{
705 u16 pci_command, new;
706
707 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
708
709 if (enable)
710 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
711 else
712 new = pci_command | PCI_COMMAND_INTX_DISABLE;
713
714 if (new != pci_command)
715 pci_write_config_word(pdev, PCI_COMMAND, pci_command);
716}
717
718static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
719{
720 static int printed_version = 0;
721 struct ata_probe_ent *probe_ent = NULL;
722 struct mv_host_priv *hpriv;
723 unsigned int board_idx = (unsigned int)ent->driver_data;
724 void __iomem *mmio_base;
725 int pci_dev_busy = 0;
726 int rc;
727
728 if (!printed_version++) {
729 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
730 }
731
732 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
733 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
734
735 rc = pci_enable_device(pdev);
736 if (rc) {
737 return rc;
738 }
739
740 rc = pci_request_regions(pdev, DRV_NAME);
741 if (rc) {
742 pci_dev_busy = 1;
743 goto err_out;
744 }
745
746 pci_intx(pdev, 1);
747
748 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
749 if (probe_ent == NULL) {
750 rc = -ENOMEM;
751 goto err_out_regions;
752 }
753
754 memset(probe_ent, 0, sizeof(*probe_ent));
755 probe_ent->dev = pci_dev_to_dev(pdev);
756 INIT_LIST_HEAD(&probe_ent->node);
757
758 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR),
759 pci_resource_len(pdev, MV_PRIMARY_BAR));
760 if (mmio_base == NULL) {
761 rc = -ENOMEM;
762 goto err_out_free_ent;
763 }
764
765 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
766 if (!hpriv) {
767 rc = -ENOMEM;
768 goto err_out_iounmap;
769 }
770 memset(hpriv, 0, sizeof(*hpriv));
771
772 probe_ent->sht = mv_port_info[board_idx].sht;
773 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
774 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
775 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
776 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
777
778 probe_ent->irq = pdev->irq;
779 probe_ent->irq_flags = SA_SHIRQ;
780 probe_ent->mmio_base = mmio_base;
781 probe_ent->private_data = hpriv;
782
783 /* initialize adapter */
784 rc = mv_host_init(probe_ent);
785 if (rc) {
786 goto err_out_hpriv;
787 }
788/* mv_print_info(probe_ent); */
789
790 {
791 int b, w;
792 u32 dw[4]; /* hold a line of 16b */
793 VPRINTK("PCI config space:\n");
794 for (b = 0; b < 0x40; ) {
795 for (w = 0; w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw[w]);
797 b += sizeof(*dw);
798 }
799 VPRINTK("%08x %08x %08x %08x\n",
800 dw[0],dw[1],dw[2],dw[3]);
801 }
802 }
803
804 /* FIXME: check ata_device_add return value */
805 ata_device_add(probe_ent);
806 kfree(probe_ent);
807
808 return 0;
809
810 err_out_hpriv:
811 kfree(hpriv);
812 err_out_iounmap:
813 iounmap(mmio_base);
814 err_out_free_ent:
815 kfree(probe_ent);
816 err_out_regions:
817 pci_release_regions(pdev);
818 err_out:
819 if (!pci_dev_busy) {
820 pci_disable_device(pdev);
821 }
822
823 return rc;
824}
825
826static int __init mv_init(void)
827{
828 return pci_module_init(&mv_pci_driver);
829}
830
831static void __exit mv_exit(void)
832{
833 pci_unregister_driver(&mv_pci_driver);
834}
835
836MODULE_AUTHOR("Brett Russ");
837MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
838MODULE_LICENSE("GPL");
839MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
840MODULE_VERSION(DRV_VERSION);
841
842module_init(mv_init);
843module_exit(mv_exit);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 03d9bc6e69df..a1d62dee3be6 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -351,6 +351,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
351static void nv_host_stop (struct ata_host_set *host_set) 351static void nv_host_stop (struct ata_host_set *host_set)
352{ 352{
353 struct nv_host *host = host_set->private_data; 353 struct nv_host *host = host_set->private_data;
354 struct pci_dev *pdev = to_pci_dev(host_set->dev);
354 355
355 // Disable hotplug event interrupts. 356 // Disable hotplug event interrupts.
356 if (host->host_desc->disable_hotplug) 357 if (host->host_desc->disable_hotplug)
@@ -358,7 +359,8 @@ static void nv_host_stop (struct ata_host_set *host_set)
358 359
359 kfree(host); 360 kfree(host);
360 361
361 ata_host_stop(host_set); 362 if (host_set->mmio_base)
363 pci_iounmap(pdev, host_set->mmio_base);
362} 364}
363 365
364static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 366static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -420,8 +422,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
420 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { 422 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
421 unsigned long base; 423 unsigned long base;
422 424
423 probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5), 425 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
424 pci_resource_len(pdev, 5));
425 if (probe_ent->mmio_base == NULL) { 426 if (probe_ent->mmio_base == NULL) {
426 rc = -EIO; 427 rc = -EIO;
427 goto err_out_free_host; 428 goto err_out_free_host;
@@ -457,7 +458,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
457 458
458err_out_iounmap: 459err_out_iounmap:
459 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) 460 if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
460 iounmap(probe_ent->mmio_base); 461 pci_iounmap(pdev, probe_ent->mmio_base);
461err_out_free_host: 462err_out_free_host:
462 kfree(host); 463 kfree(host);
463err_out_free_ent: 464err_out_free_ent:
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 4d8201422a12..538ad727bd2e 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -84,13 +84,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
84static void pdc_eng_timeout(struct ata_port *ap); 84static void pdc_eng_timeout(struct ata_port *ap);
85static int pdc_port_start(struct ata_port *ap); 85static int pdc_port_start(struct ata_port *ap);
86static void pdc_port_stop(struct ata_port *ap); 86static void pdc_port_stop(struct ata_port *ap);
87static void pdc_phy_reset(struct ata_port *ap); 87static void pdc_pata_phy_reset(struct ata_port *ap);
88static void pdc_sata_phy_reset(struct ata_port *ap);
88static void pdc_qc_prep(struct ata_queued_cmd *qc); 89static void pdc_qc_prep(struct ata_queued_cmd *qc);
89static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 90static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
90static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 91static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
91static void pdc_irq_clear(struct ata_port *ap); 92static void pdc_irq_clear(struct ata_port *ap);
92static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 93static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
93 94
95
94static Scsi_Host_Template pdc_ata_sht = { 96static Scsi_Host_Template pdc_ata_sht = {
95 .module = THIS_MODULE, 97 .module = THIS_MODULE,
96 .name = DRV_NAME, 98 .name = DRV_NAME,
@@ -111,24 +113,48 @@ static Scsi_Host_Template pdc_ata_sht = {
111 .ordered_flush = 1, 113 .ordered_flush = 1,
112}; 114};
113 115
114static struct ata_port_operations pdc_ata_ops = { 116static struct ata_port_operations pdc_sata_ops = {
115 .port_disable = ata_port_disable, 117 .port_disable = ata_port_disable,
116 .tf_load = pdc_tf_load_mmio, 118 .tf_load = pdc_tf_load_mmio,
117 .tf_read = ata_tf_read, 119 .tf_read = ata_tf_read,
118 .check_status = ata_check_status, 120 .check_status = ata_check_status,
119 .exec_command = pdc_exec_command_mmio, 121 .exec_command = pdc_exec_command_mmio,
120 .dev_select = ata_std_dev_select, 122 .dev_select = ata_std_dev_select,
121 .phy_reset = pdc_phy_reset, 123
124 .phy_reset = pdc_sata_phy_reset,
125
122 .qc_prep = pdc_qc_prep, 126 .qc_prep = pdc_qc_prep,
123 .qc_issue = pdc_qc_issue_prot, 127 .qc_issue = pdc_qc_issue_prot,
124 .eng_timeout = pdc_eng_timeout, 128 .eng_timeout = pdc_eng_timeout,
125 .irq_handler = pdc_interrupt, 129 .irq_handler = pdc_interrupt,
126 .irq_clear = pdc_irq_clear, 130 .irq_clear = pdc_irq_clear,
131
127 .scr_read = pdc_sata_scr_read, 132 .scr_read = pdc_sata_scr_read,
128 .scr_write = pdc_sata_scr_write, 133 .scr_write = pdc_sata_scr_write,
129 .port_start = pdc_port_start, 134 .port_start = pdc_port_start,
130 .port_stop = pdc_port_stop, 135 .port_stop = pdc_port_stop,
131 .host_stop = ata_host_stop, 136 .host_stop = ata_pci_host_stop,
137};
138
139static struct ata_port_operations pdc_pata_ops = {
140 .port_disable = ata_port_disable,
141 .tf_load = pdc_tf_load_mmio,
142 .tf_read = ata_tf_read,
143 .check_status = ata_check_status,
144 .exec_command = pdc_exec_command_mmio,
145 .dev_select = ata_std_dev_select,
146
147 .phy_reset = pdc_pata_phy_reset,
148
149 .qc_prep = pdc_qc_prep,
150 .qc_issue = pdc_qc_issue_prot,
151 .eng_timeout = pdc_eng_timeout,
152 .irq_handler = pdc_interrupt,
153 .irq_clear = pdc_irq_clear,
154
155 .port_start = pdc_port_start,
156 .port_stop = pdc_port_stop,
157 .host_stop = ata_pci_host_stop,
132}; 158};
133 159
134static struct ata_port_info pdc_port_info[] = { 160static struct ata_port_info pdc_port_info[] = {
@@ -140,7 +166,7 @@ static struct ata_port_info pdc_port_info[] = {
140 .pio_mask = 0x1f, /* pio0-4 */ 166 .pio_mask = 0x1f, /* pio0-4 */
141 .mwdma_mask = 0x07, /* mwdma0-2 */ 167 .mwdma_mask = 0x07, /* mwdma0-2 */
142 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 168 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
143 .port_ops = &pdc_ata_ops, 169 .port_ops = &pdc_sata_ops,
144 }, 170 },
145 171
146 /* board_20319 */ 172 /* board_20319 */
@@ -151,7 +177,7 @@ static struct ata_port_info pdc_port_info[] = {
151 .pio_mask = 0x1f, /* pio0-4 */ 177 .pio_mask = 0x1f, /* pio0-4 */
152 .mwdma_mask = 0x07, /* mwdma0-2 */ 178 .mwdma_mask = 0x07, /* mwdma0-2 */
153 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 179 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
154 .port_ops = &pdc_ata_ops, 180 .port_ops = &pdc_sata_ops,
155 }, 181 },
156 182
157 /* board_20619 */ 183 /* board_20619 */
@@ -162,7 +188,7 @@ static struct ata_port_info pdc_port_info[] = {
162 .pio_mask = 0x1f, /* pio0-4 */ 188 .pio_mask = 0x1f, /* pio0-4 */
163 .mwdma_mask = 0x07, /* mwdma0-2 */ 189 .mwdma_mask = 0x07, /* mwdma0-2 */
164 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 190 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
165 .port_ops = &pdc_ata_ops, 191 .port_ops = &pdc_pata_ops,
166 }, 192 },
167}; 193};
168 194
@@ -257,7 +283,7 @@ static void pdc_port_stop(struct ata_port *ap)
257 283
258static void pdc_reset_port(struct ata_port *ap) 284static void pdc_reset_port(struct ata_port *ap)
259{ 285{
260 void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 286 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
261 unsigned int i; 287 unsigned int i;
262 u32 tmp; 288 u32 tmp;
263 289
@@ -277,12 +303,23 @@ static void pdc_reset_port(struct ata_port *ap)
277 readl(mmio); /* flush */ 303 readl(mmio); /* flush */
278} 304}
279 305
280static void pdc_phy_reset(struct ata_port *ap) 306static void pdc_sata_phy_reset(struct ata_port *ap)
281{ 307{
282 pdc_reset_port(ap); 308 pdc_reset_port(ap);
283 sata_phy_reset(ap); 309 sata_phy_reset(ap);
284} 310}
285 311
312static void pdc_pata_phy_reset(struct ata_port *ap)
313{
314 /* FIXME: add cable detect. Don't assume 40-pin cable */
315 ap->cbl = ATA_CBL_PATA40;
316 ap->udma_mask &= ATA_UDMA_MASK_40C;
317
318 pdc_reset_port(ap);
319 ata_port_probe(ap);
320 ata_bus_reset(ap);
321}
322
286static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 323static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
287{ 324{
288 if (sc_reg > SCR_CONTROL) 325 if (sc_reg > SCR_CONTROL)
@@ -382,7 +419,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
382 u8 status; 419 u8 status;
383 unsigned int handled = 0, have_err = 0; 420 unsigned int handled = 0, have_err = 0;
384 u32 tmp; 421 u32 tmp;
385 void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; 422 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
386 423
387 tmp = readl(mmio); 424 tmp = readl(mmio);
388 if (tmp & PDC_ERR_MASK) { 425 if (tmp & PDC_ERR_MASK) {
@@ -411,7 +448,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
411static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
412{ 449{
413 struct ata_host_set *host_set = ap->host_set; 450 struct ata_host_set *host_set = ap->host_set;
414 void *mmio = host_set->mmio_base; 451 void __iomem *mmio = host_set->mmio_base;
415 452
416 readl(mmio + PDC_INT_SEQMASK); 453 readl(mmio + PDC_INT_SEQMASK);
417} 454}
@@ -423,7 +460,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
423 u32 mask = 0; 460 u32 mask = 0;
424 unsigned int i, tmp; 461 unsigned int i, tmp;
425 unsigned int handled = 0; 462 unsigned int handled = 0;
426 void *mmio_base; 463 void __iomem *mmio_base;
427 464
428 VPRINTK("ENTER\n"); 465 VPRINTK("ENTER\n");
429 466
@@ -545,7 +582,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
545 582
546static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 583static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
547{ 584{
548 void *mmio = pe->mmio_base; 585 void __iomem *mmio = pe->mmio_base;
549 u32 tmp; 586 u32 tmp;
550 587
551 /* 588 /*
@@ -588,7 +625,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
588 static int printed_version; 625 static int printed_version;
589 struct ata_probe_ent *probe_ent = NULL; 626 struct ata_probe_ent *probe_ent = NULL;
590 unsigned long base; 627 unsigned long base;
591 void *mmio_base; 628 void __iomem *mmio_base;
592 unsigned int board_idx = (unsigned int) ent->driver_data; 629 unsigned int board_idx = (unsigned int) ent->driver_data;
593 int pci_dev_busy = 0; 630 int pci_dev_busy = 0;
594 int rc; 631 int rc;
@@ -627,8 +664,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
627 probe_ent->dev = pci_dev_to_dev(pdev); 664 probe_ent->dev = pci_dev_to_dev(pdev);
628 INIT_LIST_HEAD(&probe_ent->node); 665 INIT_LIST_HEAD(&probe_ent->node);
629 666
630 mmio_base = ioremap(pci_resource_start(pdev, 3), 667 mmio_base = pci_iomap(pdev, 3, 0);
631 pci_resource_len(pdev, 3));
632 if (mmio_base == NULL) { 668 if (mmio_base == NULL) {
633 rc = -ENOMEM; 669 rc = -ENOMEM;
634 goto err_out_free_ent; 670 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 9c99ab433bd3..ffcdeb68641c 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -494,7 +494,7 @@ static int qs_port_start(struct ata_port *ap)
494 if (rc) 494 if (rc)
495 return rc; 495 return rc;
496 qs_enter_reg_mode(ap); 496 qs_enter_reg_mode(ap);
497 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); 497 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
498 if (!pp) { 498 if (!pp) {
499 rc = -ENOMEM; 499 rc = -ENOMEM;
500 goto err_out; 500 goto err_out;
@@ -538,11 +538,12 @@ static void qs_port_stop(struct ata_port *ap)
538static void qs_host_stop(struct ata_host_set *host_set) 538static void qs_host_stop(struct ata_host_set *host_set)
539{ 539{
540 void __iomem *mmio_base = host_set->mmio_base; 540 void __iomem *mmio_base = host_set->mmio_base;
541 struct pci_dev *pdev = to_pci_dev(host_set->dev);
541 542
542 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 543 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
543 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ 544 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
544 545
545 ata_host_stop(host_set); 546 pci_iounmap(pdev, mmio_base);
546} 547}
547 548
548static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 549static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
@@ -646,8 +647,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
646 goto err_out_regions; 647 goto err_out_regions;
647 } 648 }
648 649
649 mmio_base = ioremap(pci_resource_start(pdev, 4), 650 mmio_base = pci_iomap(pdev, 4, 0);
650 pci_resource_len(pdev, 4));
651 if (mmio_base == NULL) { 651 if (mmio_base == NULL) {
652 rc = -ENOMEM; 652 rc = -ENOMEM;
653 goto err_out_regions; 653 goto err_out_regions;
@@ -697,7 +697,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
697 return 0; 697 return 0;
698 698
699err_out_iounmap: 699err_out_iounmap:
700 iounmap(mmio_base); 700 pci_iounmap(pdev, mmio_base);
701err_out_regions: 701err_out_regions:
702 pci_release_regions(pdev); 702 pci_release_regions(pdev);
703err_out: 703err_out:
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 71d49548f0a3..ba98a175ee3a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -86,6 +86,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 86static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static void sil_post_set_mode (struct ata_port *ap); 87static void sil_post_set_mode (struct ata_port *ap);
88 88
89
89static struct pci_device_id sil_pci_tbl[] = { 90static struct pci_device_id sil_pci_tbl[] = {
90 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 91 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
91 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 92 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
@@ -172,7 +173,7 @@ static struct ata_port_operations sil_ops = {
172 .scr_write = sil_scr_write, 173 .scr_write = sil_scr_write,
173 .port_start = ata_port_start, 174 .port_start = ata_port_start,
174 .port_stop = ata_port_stop, 175 .port_stop = ata_port_stop,
175 .host_stop = ata_host_stop, 176 .host_stop = ata_pci_host_stop,
176}; 177};
177 178
178static struct ata_port_info sil_port_info[] = { 179static struct ata_port_info sil_port_info[] = {
@@ -231,6 +232,7 @@ MODULE_LICENSE("GPL");
231MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 232MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
232MODULE_VERSION(DRV_VERSION); 233MODULE_VERSION(DRV_VERSION);
233 234
235
234static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 236static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
235{ 237{
236 u8 cache_line = 0; 238 u8 cache_line = 0;
@@ -242,7 +244,8 @@ static void sil_post_set_mode (struct ata_port *ap)
242{ 244{
243 struct ata_host_set *host_set = ap->host_set; 245 struct ata_host_set *host_set = ap->host_set;
244 struct ata_device *dev; 246 struct ata_device *dev;
245 void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode; 247 void __iomem *addr =
248 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
246 u32 tmp, dev_mode[2]; 249 u32 tmp, dev_mode[2];
247 unsigned int i; 250 unsigned int i;
248 251
@@ -375,7 +378,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
375 static int printed_version; 378 static int printed_version;
376 struct ata_probe_ent *probe_ent = NULL; 379 struct ata_probe_ent *probe_ent = NULL;
377 unsigned long base; 380 unsigned long base;
378 void *mmio_base; 381 void __iomem *mmio_base;
379 int rc; 382 int rc;
380 unsigned int i; 383 unsigned int i;
381 int pci_dev_busy = 0; 384 int pci_dev_busy = 0;
@@ -425,8 +428,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
425 probe_ent->irq_flags = SA_SHIRQ; 428 probe_ent->irq_flags = SA_SHIRQ;
426 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; 429 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
427 430
428 mmio_base = ioremap(pci_resource_start(pdev, 5), 431 mmio_base = pci_iomap(pdev, 5, 0);
429 pci_resource_len(pdev, 5));
430 if (mmio_base == NULL) { 432 if (mmio_base == NULL) {
431 rc = -ENOMEM; 433 rc = -ENOMEM;
432 goto err_out_free_ent; 434 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 19d3bb3b0fb6..d89d968bedac 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -318,7 +318,7 @@ static struct ata_port_operations k2_sata_ops = {
318 .scr_write = k2_sata_scr_write, 318 .scr_write = k2_sata_scr_write,
319 .port_start = ata_port_start, 319 .port_start = ata_port_start,
320 .port_stop = ata_port_stop, 320 .port_stop = ata_port_stop,
321 .host_stop = ata_host_stop, 321 .host_stop = ata_pci_host_stop,
322}; 322};
323 323
324static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) 324static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -346,7 +346,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
346 static int printed_version; 346 static int printed_version;
347 struct ata_probe_ent *probe_ent = NULL; 347 struct ata_probe_ent *probe_ent = NULL;
348 unsigned long base; 348 unsigned long base;
349 void *mmio_base; 349 void __iomem *mmio_base;
350 int pci_dev_busy = 0; 350 int pci_dev_busy = 0;
351 int rc; 351 int rc;
352 int i; 352 int i;
@@ -392,8 +392,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
392 probe_ent->dev = pci_dev_to_dev(pdev); 392 probe_ent->dev = pci_dev_to_dev(pdev);
393 INIT_LIST_HEAD(&probe_ent->node); 393 INIT_LIST_HEAD(&probe_ent->node);
394 394
395 mmio_base = ioremap(pci_resource_start(pdev, 5), 395 mmio_base = pci_iomap(pdev, 5, 0);
396 pci_resource_len(pdev, 5));
397 if (mmio_base == NULL) { 396 if (mmio_base == NULL) {
398 rc = -ENOMEM; 397 rc = -ENOMEM;
399 goto err_out_free_ent; 398 goto err_out_free_ent;
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index c72fcc46f0fa..540a85191172 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -245,13 +245,14 @@ static struct pci_driver pdc_sata_pci_driver = {
245 245
246static void pdc20621_host_stop(struct ata_host_set *host_set) 246static void pdc20621_host_stop(struct ata_host_set *host_set)
247{ 247{
248 struct pci_dev *pdev = to_pci_dev(host_set->dev);
248 struct pdc_host_priv *hpriv = host_set->private_data; 249 struct pdc_host_priv *hpriv = host_set->private_data;
249 void *dimm_mmio = hpriv->dimm_mmio; 250 void *dimm_mmio = hpriv->dimm_mmio;
250 251
251 iounmap(dimm_mmio); 252 pci_iounmap(pdev, dimm_mmio);
252 kfree(hpriv); 253 kfree(hpriv);
253 254
254 ata_host_stop(host_set); 255 pci_iounmap(pdev, host_set->mmio_base);
255} 256}
256 257
257static int pdc_port_start(struct ata_port *ap) 258static int pdc_port_start(struct ata_port *ap)
@@ -451,9 +452,9 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
451 struct scatterlist *sg = qc->sg; 452 struct scatterlist *sg = qc->sg;
452 struct ata_port *ap = qc->ap; 453 struct ata_port *ap = qc->ap;
453 struct pdc_port_priv *pp = ap->private_data; 454 struct pdc_port_priv *pp = ap->private_data;
454 void *mmio = ap->host_set->mmio_base; 455 void __iomem *mmio = ap->host_set->mmio_base;
455 struct pdc_host_priv *hpriv = ap->host_set->private_data; 456 struct pdc_host_priv *hpriv = ap->host_set->private_data;
456 void *dimm_mmio = hpriv->dimm_mmio; 457 void __iomem *dimm_mmio = hpriv->dimm_mmio;
457 unsigned int portno = ap->port_no; 458 unsigned int portno = ap->port_no;
458 unsigned int i, last, idx, total_len = 0, sgt_len; 459 unsigned int i, last, idx, total_len = 0, sgt_len;
459 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 460 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
@@ -513,9 +514,9 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
513{ 514{
514 struct ata_port *ap = qc->ap; 515 struct ata_port *ap = qc->ap;
515 struct pdc_port_priv *pp = ap->private_data; 516 struct pdc_port_priv *pp = ap->private_data;
516 void *mmio = ap->host_set->mmio_base; 517 void __iomem *mmio = ap->host_set->mmio_base;
517 struct pdc_host_priv *hpriv = ap->host_set->private_data; 518 struct pdc_host_priv *hpriv = ap->host_set->private_data;
518 void *dimm_mmio = hpriv->dimm_mmio; 519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
519 unsigned int portno = ap->port_no; 520 unsigned int portno = ap->port_no;
520 unsigned int i; 521 unsigned int i;
521 522
@@ -565,7 +566,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
565{ 566{
566 struct ata_port *ap = qc->ap; 567 struct ata_port *ap = qc->ap;
567 struct ata_host_set *host_set = ap->host_set; 568 struct ata_host_set *host_set = ap->host_set;
568 void *mmio = host_set->mmio_base; 569 void __iomem *mmio = host_set->mmio_base;
569 570
570 /* hard-code chip #0 */ 571 /* hard-code chip #0 */
571 mmio += PDC_CHIP0_OFS; 572 mmio += PDC_CHIP0_OFS;
@@ -639,7 +640,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639 struct ata_port *ap = qc->ap; 640 struct ata_port *ap = qc->ap;
640 struct ata_host_set *host_set = ap->host_set; 641 struct ata_host_set *host_set = ap->host_set;
641 unsigned int port_no = ap->port_no; 642 unsigned int port_no = ap->port_no;
642 void *mmio = host_set->mmio_base; 643 void __iomem *mmio = host_set->mmio_base;
643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
644 u8 seq = (u8) (port_no + 1); 645 u8 seq = (u8) (port_no + 1);
645 unsigned int port_ofs; 646 unsigned int port_ofs;
@@ -699,7 +700,7 @@ static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
699static inline unsigned int pdc20621_host_intr( struct ata_port *ap, 700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
700 struct ata_queued_cmd *qc, 701 struct ata_queued_cmd *qc,
701 unsigned int doing_hdma, 702 unsigned int doing_hdma,
702 void *mmio) 703 void __iomem *mmio)
703{ 704{
704 unsigned int port_no = ap->port_no; 705 unsigned int port_no = ap->port_no;
705 unsigned int port_ofs = 706 unsigned int port_ofs =
@@ -778,7 +779,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
778static void pdc20621_irq_clear(struct ata_port *ap) 779static void pdc20621_irq_clear(struct ata_port *ap)
779{ 780{
780 struct ata_host_set *host_set = ap->host_set; 781 struct ata_host_set *host_set = ap->host_set;
781 void *mmio = host_set->mmio_base; 782 void __iomem *mmio = host_set->mmio_base;
782 783
783 mmio += PDC_CHIP0_OFS; 784 mmio += PDC_CHIP0_OFS;
784 785
@@ -792,7 +793,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
792 u32 mask = 0; 793 u32 mask = 0;
793 unsigned int i, tmp, port_no; 794 unsigned int i, tmp, port_no;
794 unsigned int handled = 0; 795 unsigned int handled = 0;
795 void *mmio_base; 796 void __iomem *mmio_base;
796 797
797 VPRINTK("ENTER\n"); 798 VPRINTK("ENTER\n");
798 799
@@ -940,9 +941,9 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
940 u16 idx; 941 u16 idx;
941 u8 page_mask; 942 u8 page_mask;
942 long dist; 943 long dist;
943 void *mmio = pe->mmio_base; 944 void __iomem *mmio = pe->mmio_base;
944 struct pdc_host_priv *hpriv = pe->private_data; 945 struct pdc_host_priv *hpriv = pe->private_data;
945 void *dimm_mmio = hpriv->dimm_mmio; 946 void __iomem *dimm_mmio = hpriv->dimm_mmio;
946 947
947 /* hard-code chip #0 */ 948 /* hard-code chip #0 */
948 mmio += PDC_CHIP0_OFS; 949 mmio += PDC_CHIP0_OFS;
@@ -996,9 +997,9 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
996 u16 idx; 997 u16 idx;
997 u8 page_mask; 998 u8 page_mask;
998 long dist; 999 long dist;
999 void *mmio = pe->mmio_base; 1000 void __iomem *mmio = pe->mmio_base;
1000 struct pdc_host_priv *hpriv = pe->private_data; 1001 struct pdc_host_priv *hpriv = pe->private_data;
1001 void *dimm_mmio = hpriv->dimm_mmio; 1002 void __iomem *dimm_mmio = hpriv->dimm_mmio;
1002 1003
1003 /* hard-code chip #0 */ 1004 /* hard-code chip #0 */
1004 mmio += PDC_CHIP0_OFS; 1005 mmio += PDC_CHIP0_OFS;
@@ -1044,7 +1045,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1044static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, 1045static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1045 u32 subaddr, u32 *pdata) 1046 u32 subaddr, u32 *pdata)
1046{ 1047{
1047 void *mmio = pe->mmio_base; 1048 void __iomem *mmio = pe->mmio_base;
1048 u32 i2creg = 0; 1049 u32 i2creg = 0;
1049 u32 status; 1050 u32 status;
1050 u32 count =0; 1051 u32 count =0;
@@ -1103,7 +1104,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1103 u32 data = 0; 1104 u32 data = 0;
1104 int size, i; 1105 int size, i;
1105 u8 bdimmsize; 1106 u8 bdimmsize;
1106 void *mmio = pe->mmio_base; 1107 void __iomem *mmio = pe->mmio_base;
1107 static const struct { 1108 static const struct {
1108 unsigned int reg; 1109 unsigned int reg;
1109 unsigned int ofs; 1110 unsigned int ofs;
@@ -1166,7 +1167,7 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1166{ 1167{
1167 u32 data, spd0; 1168 u32 data, spd0;
1168 int error, i; 1169 int error, i;
1169 void *mmio = pe->mmio_base; 1170 void __iomem *mmio = pe->mmio_base;
1170 1171
1171 /* hard-code chip #0 */ 1172 /* hard-code chip #0 */
1172 mmio += PDC_CHIP0_OFS; 1173 mmio += PDC_CHIP0_OFS;
@@ -1220,7 +1221,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1220 u32 ticks=0; 1221 u32 ticks=0;
1221 u32 clock=0; 1222 u32 clock=0;
1222 u32 fparam=0; 1223 u32 fparam=0;
1223 void *mmio = pe->mmio_base; 1224 void __iomem *mmio = pe->mmio_base;
1224 1225
1225 /* hard-code chip #0 */ 1226 /* hard-code chip #0 */
1226 mmio += PDC_CHIP0_OFS; 1227 mmio += PDC_CHIP0_OFS;
@@ -1344,7 +1345,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1344static void pdc_20621_init(struct ata_probe_ent *pe) 1345static void pdc_20621_init(struct ata_probe_ent *pe)
1345{ 1346{
1346 u32 tmp; 1347 u32 tmp;
1347 void *mmio = pe->mmio_base; 1348 void __iomem *mmio = pe->mmio_base;
1348 1349
1349 /* hard-code chip #0 */ 1350 /* hard-code chip #0 */
1350 mmio += PDC_CHIP0_OFS; 1351 mmio += PDC_CHIP0_OFS;
@@ -1377,7 +1378,8 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1377 static int printed_version; 1378 static int printed_version;
1378 struct ata_probe_ent *probe_ent = NULL; 1379 struct ata_probe_ent *probe_ent = NULL;
1379 unsigned long base; 1380 unsigned long base;
1380 void *mmio_base, *dimm_mmio = NULL; 1381 void __iomem *mmio_base;
1382 void __iomem *dimm_mmio = NULL;
1381 struct pdc_host_priv *hpriv = NULL; 1383 struct pdc_host_priv *hpriv = NULL;
1382 unsigned int board_idx = (unsigned int) ent->driver_data; 1384 unsigned int board_idx = (unsigned int) ent->driver_data;
1383 int pci_dev_busy = 0; 1385 int pci_dev_busy = 0;
@@ -1417,8 +1419,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1417 probe_ent->dev = pci_dev_to_dev(pdev); 1419 probe_ent->dev = pci_dev_to_dev(pdev);
1418 INIT_LIST_HEAD(&probe_ent->node); 1420 INIT_LIST_HEAD(&probe_ent->node);
1419 1421
1420 mmio_base = ioremap(pci_resource_start(pdev, 3), 1422 mmio_base = pci_iomap(pdev, 3, 0);
1421 pci_resource_len(pdev, 3));
1422 if (mmio_base == NULL) { 1423 if (mmio_base == NULL) {
1423 rc = -ENOMEM; 1424 rc = -ENOMEM;
1424 goto err_out_free_ent; 1425 goto err_out_free_ent;
@@ -1432,8 +1433,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1432 } 1433 }
1433 memset(hpriv, 0, sizeof(*hpriv)); 1434 memset(hpriv, 0, sizeof(*hpriv));
1434 1435
1435 dimm_mmio = ioremap(pci_resource_start(pdev, 4), 1436 dimm_mmio = pci_iomap(pdev, 4, 0);
1436 pci_resource_len(pdev, 4));
1437 if (!dimm_mmio) { 1437 if (!dimm_mmio) {
1438 kfree(hpriv); 1438 kfree(hpriv);
1439 rc = -ENOMEM; 1439 rc = -ENOMEM;
@@ -1480,9 +1480,9 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1480 1480
1481err_out_iounmap_dimm: /* only get to this label if 20621 */ 1481err_out_iounmap_dimm: /* only get to this label if 20621 */
1482 kfree(hpriv); 1482 kfree(hpriv);
1483 iounmap(dimm_mmio); 1483 pci_iounmap(pdev, dimm_mmio);
1484err_out_iounmap: 1484err_out_iounmap:
1485 iounmap(mmio_base); 1485 pci_iounmap(pdev, mmio_base);
1486err_out_free_ent: 1486err_out_free_ent:
1487 kfree(probe_ent); 1487 kfree(probe_ent);
1488err_out_regions: 1488err_out_regions:
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 1566886815fb..42e13ed8eb5b 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -125,8 +125,8 @@ static struct ata_port_info uli_port_info = {
125 .sht = &uli_sht, 125 .sht = &uli_sht,
126 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 126 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
127 ATA_FLAG_NO_LEGACY, 127 ATA_FLAG_NO_LEGACY,
128 .pio_mask = 0x03, //support pio mode 4 (FIXME) 128 .pio_mask = 0x1f, /* pio0-4 */
129 .udma_mask = 0x7f, //support udma mode 6 129 .udma_mask = 0x7f, /* udma0-6 */
130 .port_ops = &uli_ops, 130 .port_ops = &uli_ops,
131}; 131};
132 132
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 3985f344da4d..cf94e0158a8d 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -252,7 +252,7 @@ static struct ata_port_operations vsc_sata_ops = {
252 .scr_write = vsc_sata_scr_write, 252 .scr_write = vsc_sata_scr_write,
253 .port_start = ata_port_start, 253 .port_start = ata_port_start,
254 .port_stop = ata_port_stop, 254 .port_stop = ata_port_stop,
255 .host_stop = ata_host_stop, 255 .host_stop = ata_pci_host_stop,
256}; 256};
257 257
258static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) 258static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -326,8 +326,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
326 probe_ent->dev = pci_dev_to_dev(pdev); 326 probe_ent->dev = pci_dev_to_dev(pdev);
327 INIT_LIST_HEAD(&probe_ent->node); 327 INIT_LIST_HEAD(&probe_ent->node);
328 328
329 mmio_base = ioremap(pci_resource_start(pdev, 0), 329 mmio_base = pci_iomap(pdev, 0, 0);
330 pci_resource_len(pdev, 0));
331 if (mmio_base == NULL) { 330 if (mmio_base == NULL) {
332 rc = -ENOMEM; 331 rc = -ENOMEM;
333 goto err_out_free_ent; 332 goto err_out_free_ent;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d14523d7e449..a780546eda9c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -268,6 +268,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
268 } else 268 } else
269 put_device(&dev->sdev_gendev); 269 put_device(&dev->sdev_gendev);
270 270
271 cmd->jiffies_at_alloc = jiffies;
271 return cmd; 272 return cmd;
272} 273}
273EXPORT_SYMBOL(scsi_get_command); 274EXPORT_SYMBOL(scsi_get_command);
@@ -627,7 +628,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
627 spin_lock_irqsave(host->host_lock, flags); 628 spin_lock_irqsave(host->host_lock, flags);
628 scsi_cmd_get_serial(host, cmd); 629 scsi_cmd_get_serial(host, cmd);
629 630
630 if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { 631 if (unlikely(host->shost_state == SHOST_DEL)) {
631 cmd->result = (DID_NO_CONNECT << 16); 632 cmd->result = (DID_NO_CONNECT << 16);
632 scsi_done(cmd); 633 scsi_done(cmd);
633 } else { 634 } else {
@@ -798,9 +799,23 @@ static void scsi_softirq(struct softirq_action *h)
798 while (!list_empty(&local_q)) { 799 while (!list_empty(&local_q)) {
799 struct scsi_cmnd *cmd = list_entry(local_q.next, 800 struct scsi_cmnd *cmd = list_entry(local_q.next,
800 struct scsi_cmnd, eh_entry); 801 struct scsi_cmnd, eh_entry);
802 /* The longest time any command should be outstanding is the
803 * per command timeout multiplied by the number of retries.
804 *
805 * For a typical command, this is 2.5 minutes */
806 unsigned long wait_for
807 = cmd->allowed * cmd->timeout_per_command;
801 list_del_init(&cmd->eh_entry); 808 list_del_init(&cmd->eh_entry);
802 809
803 disposition = scsi_decide_disposition(cmd); 810 disposition = scsi_decide_disposition(cmd);
811 if (disposition != SUCCESS &&
812 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
813 dev_printk(KERN_ERR, &cmd->device->sdev_gendev,
814 "timing out command, waited %lus\n",
815 wait_for/HZ);
816 disposition = SUCCESS;
817 }
818
804 scsi_log_completion(cmd, disposition); 819 scsi_log_completion(cmd, disposition);
805 switch (disposition) { 820 switch (disposition) {
806 case SUCCESS: 821 case SUCCESS:
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6121dc1bfada..07b554affcf2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -114,6 +114,7 @@ static struct {
114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ 115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
116 {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ 116 {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */
117 {"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */
117 118
118 /* 119 /*
119 * Other types of devices that have special flags. 120 * Other types of devices that have special flags.
@@ -135,7 +136,7 @@ static struct {
135 {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, 136 {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
136 {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 137 {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
137 {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, 138 {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN},
138 {"DEC", "HSG80", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, 139 {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
139 {"DELL", "PV660F", NULL, BLIST_SPARSELUN}, 140 {"DELL", "PV660F", NULL, BLIST_SPARSELUN},
140 {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, 141 {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN},
141 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ 142 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
@@ -191,6 +192,7 @@ static struct {
191 {"SGI", "RAID5", "*", BLIST_SPARSELUN}, 192 {"SGI", "RAID5", "*", BLIST_SPARSELUN},
192 {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, 193 {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
193 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 194 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
195 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
194 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 196 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
195 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 197 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
196 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 198 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0fc8b48f052b..895c9452be4c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kthread.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
@@ -75,7 +76,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
75 76
76 scmd->eh_eflags |= eh_flag; 77 scmd->eh_eflags |= eh_flag;
77 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 78 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
78 set_bit(SHOST_RECOVERY, &shost->shost_state); 79 scsi_host_set_state(shost, SHOST_RECOVERY);
79 shost->host_failed++; 80 shost->host_failed++;
80 scsi_eh_wakeup(shost); 81 scsi_eh_wakeup(shost);
81 spin_unlock_irqrestore(shost->host_lock, flags); 82 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -115,7 +116,6 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
115 116
116 add_timer(&scmd->eh_timeout); 117 add_timer(&scmd->eh_timeout);
117} 118}
118EXPORT_SYMBOL(scsi_add_timer);
119 119
120/** 120/**
121 * scsi_delete_timer - Delete/cancel timer for a given function. 121 * scsi_delete_timer - Delete/cancel timer for a given function.
@@ -143,7 +143,6 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
143 143
144 return rtn; 144 return rtn;
145} 145}
146EXPORT_SYMBOL(scsi_delete_timer);
147 146
148/** 147/**
149 * scsi_times_out - Timeout function for normal scsi commands. 148 * scsi_times_out - Timeout function for normal scsi commands.
@@ -197,7 +196,8 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
197{ 196{
198 int online; 197 int online;
199 198
200 wait_event(sdev->host->host_wait, (!test_bit(SHOST_RECOVERY, &sdev->host->shost_state))); 199 wait_event(sdev->host->host_wait, (sdev->host->shost_state !=
200 SHOST_RECOVERY));
201 201
202 online = scsi_device_online(sdev); 202 online = scsi_device_online(sdev);
203 203
@@ -775,9 +775,11 @@ retry_tur:
775 __FUNCTION__, scmd, rtn)); 775 __FUNCTION__, scmd, rtn));
776 if (rtn == SUCCESS) 776 if (rtn == SUCCESS)
777 return 0; 777 return 0;
778 else if (rtn == NEEDS_RETRY) 778 else if (rtn == NEEDS_RETRY) {
779 if (retry_cnt--) 779 if (retry_cnt--)
780 goto retry_tur; 780 goto retry_tur;
781 return 0;
782 }
781 return 1; 783 return 1;
782} 784}
783 785
@@ -1458,7 +1460,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1458 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1460 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1459 __FUNCTION__)); 1461 __FUNCTION__));
1460 1462
1461 clear_bit(SHOST_RECOVERY, &shost->shost_state); 1463 scsi_host_set_state(shost, SHOST_RUNNING);
1462 1464
1463 wake_up(&shost->host_wait); 1465 wake_up(&shost->host_wait);
1464 1466
@@ -1582,16 +1584,8 @@ int scsi_error_handler(void *data)
1582 int rtn; 1584 int rtn;
1583 DECLARE_MUTEX_LOCKED(sem); 1585 DECLARE_MUTEX_LOCKED(sem);
1584 1586
1585 /*
1586 * Flush resources
1587 */
1588
1589 daemonize("scsi_eh_%d", shost->host_no);
1590
1591 current->flags |= PF_NOFREEZE; 1587 current->flags |= PF_NOFREEZE;
1592
1593 shost->eh_wait = &sem; 1588 shost->eh_wait = &sem;
1594 shost->ehandler = current;
1595 1589
1596 /* 1590 /*
1597 * Wake up the thread that created us. 1591 * Wake up the thread that created us.
@@ -1599,8 +1593,6 @@ int scsi_error_handler(void *data)
1599 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1593 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
1600 " scsi_eh_%d\n",shost->host_no)); 1594 " scsi_eh_%d\n",shost->host_no));
1601 1595
1602 complete(shost->eh_notify);
1603
1604 while (1) { 1596 while (1) {
1605 /* 1597 /*
1606 * If we get a signal, it means we are supposed to go 1598 * If we get a signal, it means we are supposed to go
@@ -1621,7 +1613,7 @@ int scsi_error_handler(void *data)
1621 * semaphores isn't unreasonable. 1613 * semaphores isn't unreasonable.
1622 */ 1614 */
1623 down_interruptible(&sem); 1615 down_interruptible(&sem);
1624 if (shost->eh_kill) 1616 if (kthread_should_stop())
1625 break; 1617 break;
1626 1618
1627 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
@@ -1660,22 +1652,6 @@ int scsi_error_handler(void *data)
1660 * Make sure that nobody tries to wake us up again. 1652 * Make sure that nobody tries to wake us up again.
1661 */ 1653 */
1662 shost->eh_wait = NULL; 1654 shost->eh_wait = NULL;
1663
1664 /*
1665 * Knock this down too. From this point on, the host is flying
1666 * without a pilot. If this is because the module is being unloaded,
1667 * that's fine. If the user sent a signal to this thing, we are
1668 * potentially in real danger.
1669 */
1670 shost->eh_active = 0;
1671 shost->ehandler = NULL;
1672
1673 /*
1674 * If anyone is waiting for us to exit (i.e. someone trying to unload
1675 * a driver), then wake up that process to let them know we are on
1676 * the way out the door.
1677 */
1678 complete_and_exit(shost->eh_notify, 0);
1679 return 0; 1655 return 0;
1680} 1656}
1681 1657
@@ -1846,12 +1822,16 @@ EXPORT_SYMBOL(scsi_reset_provider);
1846int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1822int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1847 struct scsi_sense_hdr *sshdr) 1823 struct scsi_sense_hdr *sshdr)
1848{ 1824{
1849 if (!sense_buffer || !sb_len || (sense_buffer[0] & 0x70) != 0x70) 1825 if (!sense_buffer || !sb_len)
1850 return 0; 1826 return 0;
1851 1827
1852 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 1828 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1853 1829
1854 sshdr->response_code = (sense_buffer[0] & 0x7f); 1830 sshdr->response_code = (sense_buffer[0] & 0x7f);
1831
1832 if (!scsi_sense_valid(sshdr))
1833 return 0;
1834
1855 if (sshdr->response_code >= 0x72) { 1835 if (sshdr->response_code >= 0x72) {
1856 /* 1836 /*
1857 * descriptor format 1837 * descriptor format
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 7a6b530115ac..b7fddac81347 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -30,20 +30,20 @@
30 30
31#define MAX_BUF PAGE_SIZE 31#define MAX_BUF PAGE_SIZE
32 32
33/* 33/**
34 * If we are told to probe a host, we will return 0 if the host is not 34 * ioctl_probe -- return host identification
35 * present, 1 if the host is present, and will return an identifying 35 * @host: host to identify
36 * string at *arg, if arg is non null, filling to the length stored at 36 * @buffer: userspace buffer for identification
37 * (int *) arg 37 *
38 * Return an identifying string at @buffer, if @buffer is non-NULL, filling
39 * to the length stored at * (int *) @buffer.
38 */ 40 */
39
40static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) 41static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
41{ 42{
42 unsigned int len, slen; 43 unsigned int len, slen;
43 const char *string; 44 const char *string;
44 int temp = host->hostt->present;
45 45
46 if (temp && buffer) { 46 if (buffer) {
47 if (get_user(len, (unsigned int __user *) buffer)) 47 if (get_user(len, (unsigned int __user *) buffer))
48 return -EFAULT; 48 return -EFAULT;
49 49
@@ -59,7 +59,7 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
59 return -EFAULT; 59 return -EFAULT;
60 } 60 }
61 } 61 }
62 return temp; 62 return 1;
63} 63}
64 64
65/* 65/*
@@ -88,25 +88,18 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
88static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, 88static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
89 int timeout, int retries) 89 int timeout, int retries)
90{ 90{
91 struct scsi_request *sreq;
92 int result; 91 int result;
93 struct scsi_sense_hdr sshdr; 92 struct scsi_sense_hdr sshdr;
94 93
95 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); 94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
96 95
97 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
98 if (!sreq) { 97 &sshdr, timeout, retries);
99 printk(KERN_WARNING "SCSI internal ioctl failed, no memory\n");
100 return -ENOMEM;
101 }
102
103 sreq->sr_data_direction = DMA_NONE;
104 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
105 98
106 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", sreq->sr_result)); 99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
107 100
108 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && 101 if ((driver_byte(result) & DRIVER_SENSE) &&
109 (scsi_request_normalize_sense(sreq, &sshdr))) { 102 (scsi_sense_valid(&sshdr))) {
110 switch (sshdr.sense_key) { 103 switch (sshdr.sense_key) {
111 case ILLEGAL_REQUEST: 104 case ILLEGAL_REQUEST:
112 if (cmd[0] == ALLOW_MEDIUM_REMOVAL) 105 if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -125,7 +118,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
125 case UNIT_ATTENTION: 118 case UNIT_ATTENTION:
126 if (sdev->removable) { 119 if (sdev->removable) {
127 sdev->changed = 1; 120 sdev->changed = 1;
128 sreq->sr_result = 0; /* This is no longer considered an error */ 121 result = 0; /* This is no longer considered an error */
129 break; 122 break;
130 } 123 }
131 default: /* Fall through for non-removable media */ 124 default: /* Fall through for non-removable media */
@@ -135,15 +128,13 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
135 sdev->channel, 128 sdev->channel,
136 sdev->id, 129 sdev->id,
137 sdev->lun, 130 sdev->lun,
138 sreq->sr_result); 131 result);
139 scsi_print_req_sense(" ", sreq); 132 scsi_print_sense_hdr(" ", &sshdr);
140 break; 133 break;
141 } 134 }
142 } 135 }
143 136
144 result = sreq->sr_result;
145 SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); 137 SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n"));
146 scsi_release_request(sreq);
147 return result; 138 return result;
148} 139}
149 140
@@ -208,8 +199,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
208{ 199{
209 char *buf; 200 char *buf;
210 unsigned char cmd[MAX_COMMAND_SIZE]; 201 unsigned char cmd[MAX_COMMAND_SIZE];
202 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
211 char __user *cmd_in; 203 char __user *cmd_in;
212 struct scsi_request *sreq;
213 unsigned char opcode; 204 unsigned char opcode;
214 unsigned int inlen, outlen, cmdlen; 205 unsigned int inlen, outlen, cmdlen;
215 unsigned int needed, buf_needed; 206 unsigned int needed, buf_needed;
@@ -321,31 +312,23 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
321 break; 312 break;
322 } 313 }
323 314
324 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 315 result = scsi_execute(sdev, cmd, data_direction, buf, needed,
325 if (!sreq) { 316 sense, timeout, retries, 0);
326 result = -EINTR;
327 goto error;
328 }
329
330 sreq->sr_data_direction = data_direction;
331 scsi_wait_req(sreq, cmd, buf, needed, timeout, retries);
332 317
333 /* 318 /*
334 * If there was an error condition, pass the info back to the user. 319 * If there was an error condition, pass the info back to the user.
335 */ 320 */
336 result = sreq->sr_result;
337 if (result) { 321 if (result) {
338 int sb_len = sizeof(sreq->sr_sense_buffer); 322 int sb_len = sizeof(*sense);
339 323
340 sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len; 324 sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
341 if (copy_to_user(cmd_in, sreq->sr_sense_buffer, sb_len)) 325 if (copy_to_user(cmd_in, sense, sb_len))
342 result = -EFAULT; 326 result = -EFAULT;
343 } else { 327 } else {
344 if (copy_to_user(cmd_in, buf, outlen)) 328 if (copy_to_user(cmd_in, buf, outlen))
345 result = -EFAULT; 329 result = -EFAULT;
346 } 330 }
347 331
348 scsi_release_request(sreq);
349error: 332error:
350 kfree(buf); 333 kfree(buf);
351 return result; 334 return result;
@@ -475,8 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
475 * error processing, as long as the device was opened 458 * error processing, as long as the device was opened
476 * non-blocking */ 459 * non-blocking */
477 if (filp && filp->f_flags & O_NONBLOCK) { 460 if (filp && filp->f_flags & O_NONBLOCK) {
478 if (test_bit(SHOST_RECOVERY, 461 if (sdev->host->shost_state == SHOST_RECOVERY)
479 &sdev->host->shost_state))
480 return -ENODEV; 462 return -ENODEV;
481 } else if (!scsi_block_when_processing_errors(sdev)) 463 } else if (!scsi_block_when_processing_errors(sdev))
482 return -ENODEV; 464 return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7a91ca3d32a6..77f2d444f7e0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -232,23 +232,6 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
232} 232}
233EXPORT_SYMBOL(scsi_do_req); 233EXPORT_SYMBOL(scsi_do_req);
234 234
235static void scsi_wait_done(struct scsi_cmnd *cmd)
236{
237 struct request *req = cmd->request;
238 struct request_queue *q = cmd->device->request_queue;
239 unsigned long flags;
240
241 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
242
243 spin_lock_irqsave(q->queue_lock, flags);
244 if (blk_rq_tagged(req))
245 blk_queue_end_tag(q, req);
246 spin_unlock_irqrestore(q->queue_lock, flags);
247
248 if (req->waiting)
249 complete(req->waiting);
250}
251
252/* This is the end routine we get to if a command was never attached 235/* This is the end routine we get to if a command was never attached
253 * to the request. Simply complete the request without changing 236 * to the request. Simply complete the request without changing
254 * rq_status; this will cause a DRIVER_ERROR. */ 237 * rq_status; this will cause a DRIVER_ERROR. */
@@ -263,21 +246,114 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
263 unsigned bufflen, int timeout, int retries) 246 unsigned bufflen, int timeout, int retries)
264{ 247{
265 DECLARE_COMPLETION(wait); 248 DECLARE_COMPLETION(wait);
266 249 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
267 sreq->sr_request->waiting = &wait; 250 struct request *req;
268 sreq->sr_request->rq_status = RQ_SCSI_BUSY; 251
269 sreq->sr_request->end_io = scsi_wait_req_end_io; 252 req = blk_get_request(sreq->sr_device->request_queue, write,
270 scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done, 253 __GFP_WAIT);
271 timeout, retries); 254 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) {
256 sreq->sr_result = DRIVER_ERROR << 24;
257 blk_put_request(req);
258 return;
259 }
260
261 req->flags |= REQ_NOMERGE;
262 req->waiting = &wait;
263 req->end_io = scsi_wait_req_end_io;
264 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
265 req->sense = sreq->sr_sense_buffer;
266 req->sense_len = 0;
267 memcpy(req->cmd, cmnd, req->cmd_len);
268 req->timeout = timeout;
269 req->flags |= REQ_BLOCK_PC;
270 req->rq_disk = NULL;
271 blk_insert_request(sreq->sr_device->request_queue, req,
272 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
272 wait_for_completion(&wait); 273 wait_for_completion(&wait);
273 sreq->sr_request->waiting = NULL; 274 sreq->sr_request->waiting = NULL;
274 if (sreq->sr_request->rq_status != RQ_SCSI_DONE) 275 sreq->sr_result = req->errors;
276 if (req->errors)
275 sreq->sr_result |= (DRIVER_ERROR << 24); 277 sreq->sr_result |= (DRIVER_ERROR << 24);
276 278
277 __scsi_release_request(sreq); 279 blk_put_request(req);
278} 280}
281
279EXPORT_SYMBOL(scsi_wait_req); 282EXPORT_SYMBOL(scsi_wait_req);
280 283
284/**
285 * scsi_execute - insert request and wait for the result
286 * @sdev: scsi device
287 * @cmd: scsi command
288 * @data_direction: data direction
289 * @buffer: data buffer
290 * @bufflen: len of buffer
291 * @sense: optional sense buffer
292 * @timeout: request timeout in seconds
293 * @retries: number of times to retry request
294 * @flags: or into request flags;
295 *
296 * returns the req->errors value which is the the scsi_cmnd result
297 * field.
298 **/
299int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
300 int data_direction, void *buffer, unsigned bufflen,
301 unsigned char *sense, int timeout, int retries, int flags)
302{
303 struct request *req;
304 int write = (data_direction == DMA_TO_DEVICE);
305 int ret = DRIVER_ERROR << 24;
306
307 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
308
309 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
310 buffer, bufflen, __GFP_WAIT))
311 goto out;
312
313 req->cmd_len = COMMAND_SIZE(cmd[0]);
314 memcpy(req->cmd, cmd, req->cmd_len);
315 req->sense = sense;
316 req->sense_len = 0;
317 req->timeout = timeout;
318 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
319
320 /*
321 * head injection *required* here otherwise quiesce won't work
322 */
323 blk_execute_rq(req->q, NULL, req, 1);
324
325 ret = req->errors;
326 out:
327 blk_put_request(req);
328
329 return ret;
330}
331EXPORT_SYMBOL(scsi_execute);
332
333
334int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
335 int data_direction, void *buffer, unsigned bufflen,
336 struct scsi_sense_hdr *sshdr, int timeout, int retries)
337{
338 char *sense = NULL;
339 int result;
340
341 if (sshdr) {
342 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
343 if (!sense)
344 return DRIVER_ERROR << 24;
345 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
346 }
347 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
348 sense, timeout, retries, 0);
349 if (sshdr)
350 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
351
352 kfree(sense);
353 return result;
354}
355EXPORT_SYMBOL(scsi_execute_req);
356
281/* 357/*
282 * Function: scsi_init_cmd_errh() 358 * Function: scsi_init_cmd_errh()
283 * 359 *
@@ -348,7 +424,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
348 424
349 spin_lock_irqsave(shost->host_lock, flags); 425 spin_lock_irqsave(shost->host_lock, flags);
350 shost->host_busy--; 426 shost->host_busy--;
351 if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) && 427 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
352 shost->host_failed)) 428 shost->host_failed))
353 scsi_eh_wakeup(shost); 429 scsi_eh_wakeup(shost);
354 spin_unlock(shost->host_lock); 430 spin_unlock(shost->host_lock);
@@ -851,17 +927,20 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
851 scsi_requeue_command(q, cmd); 927 scsi_requeue_command(q, cmd);
852 return; 928 return;
853 } 929 }
854 printk(KERN_INFO "Device %s not ready.\n", 930 if (!(req->flags & REQ_QUIET))
855 req->rq_disk ? req->rq_disk->disk_name : ""); 931 dev_printk(KERN_INFO,
932 &cmd->device->sdev_gendev,
933 "Device not ready.\n");
856 cmd = scsi_end_request(cmd, 0, this_count, 1); 934 cmd = scsi_end_request(cmd, 0, this_count, 1);
857 return; 935 return;
858 case VOLUME_OVERFLOW: 936 case VOLUME_OVERFLOW:
859 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ", 937 if (!(req->flags & REQ_QUIET)) {
860 cmd->device->host->host_no, 938 dev_printk(KERN_INFO,
861 (int)cmd->device->channel, 939 &cmd->device->sdev_gendev,
862 (int)cmd->device->id, (int)cmd->device->lun); 940 "Volume overflow, CDB: ");
863 __scsi_print_command(cmd->data_cmnd); 941 __scsi_print_command(cmd->data_cmnd);
864 scsi_print_sense("", cmd); 942 scsi_print_sense("", cmd);
943 }
865 cmd = scsi_end_request(cmd, 0, block_bytes, 1); 944 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
866 return; 945 return;
867 default: 946 default:
@@ -878,14 +957,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
878 return; 957 return;
879 } 958 }
880 if (result) { 959 if (result) {
881 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code " 960 if (!(req->flags & REQ_QUIET)) {
882 "= 0x%x\n", cmd->device->host->host_no, 961 dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
883 cmd->device->channel, 962 "SCSI error: return code = 0x%x\n", result);
884 cmd->device->id, 963
885 cmd->device->lun, result); 964 if (driver_byte(result) & DRIVER_SENSE)
886 965 scsi_print_sense("", cmd);
887 if (driver_byte(result) & DRIVER_SENSE) 966 }
888 scsi_print_sense("", cmd);
889 /* 967 /*
890 * Mark a single buffer as not uptodate. Queue the remainder. 968 * Mark a single buffer as not uptodate. Queue the remainder.
891 * We sometimes get this cruft in the event that a medium error 969 * We sometimes get this cruft in the event that a medium error
@@ -1020,6 +1098,12 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1020 return -EOPNOTSUPP; 1098 return -EOPNOTSUPP;
1021} 1099}
1022 1100
1101static void scsi_generic_done(struct scsi_cmnd *cmd)
1102{
1103 BUG_ON(!blk_pc_request(cmd->request));
1104 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1105}
1106
1023static int scsi_prep_fn(struct request_queue *q, struct request *req) 1107static int scsi_prep_fn(struct request_queue *q, struct request *req)
1024{ 1108{
1025 struct scsi_device *sdev = q->queuedata; 1109 struct scsi_device *sdev = q->queuedata;
@@ -1061,7 +1145,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1061 * these two cases differently. We differentiate by looking 1145 * these two cases differently. We differentiate by looking
1062 * at request->cmd, as this tells us the real story. 1146 * at request->cmd, as this tells us the real story.
1063 */ 1147 */
1064 if (req->flags & REQ_SPECIAL) { 1148 if (req->flags & REQ_SPECIAL && req->special) {
1065 struct scsi_request *sreq = req->special; 1149 struct scsi_request *sreq = req->special;
1066 1150
1067 if (sreq->sr_magic == SCSI_REQ_MAGIC) { 1151 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
@@ -1073,7 +1157,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1073 cmd = req->special; 1157 cmd = req->special;
1074 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1158 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1075 1159
1076 if(unlikely(specials_only)) { 1160 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1077 if(specials_only == SDEV_QUIESCE || 1161 if(specials_only == SDEV_QUIESCE ||
1078 specials_only == SDEV_BLOCK) 1162 specials_only == SDEV_BLOCK)
1079 return BLKPREP_DEFER; 1163 return BLKPREP_DEFER;
@@ -1142,11 +1226,26 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1142 /* 1226 /*
1143 * Initialize the actual SCSI command for this request. 1227 * Initialize the actual SCSI command for this request.
1144 */ 1228 */
1145 drv = *(struct scsi_driver **)req->rq_disk->private_data; 1229 if (req->rq_disk) {
1146 if (unlikely(!drv->init_command(cmd))) { 1230 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1147 scsi_release_buffers(cmd); 1231 if (unlikely(!drv->init_command(cmd))) {
1148 scsi_put_command(cmd); 1232 scsi_release_buffers(cmd);
1149 return BLKPREP_KILL; 1233 scsi_put_command(cmd);
1234 return BLKPREP_KILL;
1235 }
1236 } else {
1237 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1238 if (rq_data_dir(req) == WRITE)
1239 cmd->sc_data_direction = DMA_TO_DEVICE;
1240 else if (req->data_len)
1241 cmd->sc_data_direction = DMA_FROM_DEVICE;
1242 else
1243 cmd->sc_data_direction = DMA_NONE;
1244
1245 cmd->transfersize = req->data_len;
1246 cmd->allowed = 3;
1247 cmd->timeout_per_command = req->timeout;
1248 cmd->done = scsi_generic_done;
1150 } 1249 }
1151 } 1250 }
1152 1251
@@ -1207,7 +1306,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1207 struct Scsi_Host *shost, 1306 struct Scsi_Host *shost,
1208 struct scsi_device *sdev) 1307 struct scsi_device *sdev)
1209{ 1308{
1210 if (test_bit(SHOST_RECOVERY, &shost->shost_state)) 1309 if (shost->shost_state == SHOST_RECOVERY)
1211 return 0; 1310 return 0;
1212 if (shost->host_busy == 0 && shost->host_blocked) { 1311 if (shost->host_busy == 0 && shost->host_blocked) {
1213 /* 1312 /*
@@ -1539,9 +1638,9 @@ void scsi_exit_queue(void)
1539 } 1638 }
1540} 1639}
1541/** 1640/**
1542 * __scsi_mode_sense - issue a mode sense, falling back from 10 to 1641 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1543 * six bytes if necessary. 1642 * six bytes if necessary.
1544 * @sreq: SCSI request to fill in with the MODE_SENSE 1643 * @sdev: SCSI device to be queried
1545 * @dbd: set if mode sense will allow block descriptors to be returned 1644 * @dbd: set if mode sense will allow block descriptors to be returned
1546 * @modepage: mode page being requested 1645 * @modepage: mode page being requested
1547 * @buffer: request buffer (may not be smaller than eight bytes) 1646 * @buffer: request buffer (may not be smaller than eight bytes)
@@ -1549,26 +1648,34 @@ void scsi_exit_queue(void)
1549 * @timeout: command timeout 1648 * @timeout: command timeout
1550 * @retries: number of retries before failing 1649 * @retries: number of retries before failing
1551 * @data: returns a structure abstracting the mode header data 1650 * @data: returns a structure abstracting the mode header data
1651 * @sense: place to put sense data (or NULL if no sense to be collected).
1652 * must be SCSI_SENSE_BUFFERSIZE big.
1552 * 1653 *
1553 * Returns zero if unsuccessful, or the header offset (either 4 1654 * Returns zero if unsuccessful, or the header offset (either 4
1554 * or 8 depending on whether a six or ten byte command was 1655 * or 8 depending on whether a six or ten byte command was
1555 * issued) if successful. 1656 * issued) if successful.
1556 **/ 1657 **/
1557int 1658int
1558__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage, 1659scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1559 unsigned char *buffer, int len, int timeout, int retries, 1660 unsigned char *buffer, int len, int timeout, int retries,
1560 struct scsi_mode_data *data) { 1661 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1561 unsigned char cmd[12]; 1662 unsigned char cmd[12];
1562 int use_10_for_ms; 1663 int use_10_for_ms;
1563 int header_length; 1664 int header_length;
1665 int result;
1666 struct scsi_sense_hdr my_sshdr;
1564 1667
1565 memset(data, 0, sizeof(*data)); 1668 memset(data, 0, sizeof(*data));
1566 memset(&cmd[0], 0, 12); 1669 memset(&cmd[0], 0, 12);
1567 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1670 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1568 cmd[2] = modepage; 1671 cmd[2] = modepage;
1569 1672
1673 /* caller might not be interested in sense, but we need it */
1674 if (!sshdr)
1675 sshdr = &my_sshdr;
1676
1570 retry: 1677 retry:
1571 use_10_for_ms = sreq->sr_device->use_10_for_ms; 1678 use_10_for_ms = sdev->use_10_for_ms;
1572 1679
1573 if (use_10_for_ms) { 1680 if (use_10_for_ms) {
1574 if (len < 8) 1681 if (len < 8)
@@ -1586,36 +1693,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1586 header_length = 4; 1693 header_length = 4;
1587 } 1694 }
1588 1695
1589 sreq->sr_cmd_len = 0;
1590 memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1591 sreq->sr_data_direction = DMA_FROM_DEVICE;
1592
1593 memset(buffer, 0, len); 1696 memset(buffer, 0, len);
1594 1697
1595 scsi_wait_req(sreq, cmd, buffer, len, timeout, retries); 1698 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1699 sshdr, timeout, retries);
1596 1700
1597 /* This code looks awful: what it's doing is making sure an 1701 /* This code looks awful: what it's doing is making sure an
1598 * ILLEGAL REQUEST sense return identifies the actual command 1702 * ILLEGAL REQUEST sense return identifies the actual command
1599 * byte as the problem. MODE_SENSE commands can return 1703 * byte as the problem. MODE_SENSE commands can return
1600 * ILLEGAL REQUEST if the code page isn't supported */ 1704 * ILLEGAL REQUEST if the code page isn't supported */
1601 1705
1602 if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) && 1706 if (use_10_for_ms && !scsi_status_is_good(result) &&
1603 (driver_byte(sreq->sr_result) & DRIVER_SENSE)) { 1707 (driver_byte(result) & DRIVER_SENSE)) {
1604 struct scsi_sense_hdr sshdr; 1708 if (scsi_sense_valid(sshdr)) {
1605 1709 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1606 if (scsi_request_normalize_sense(sreq, &sshdr)) { 1710 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1607 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1608 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1609 /* 1711 /*
1610 * Invalid command operation code 1712 * Invalid command operation code
1611 */ 1713 */
1612 sreq->sr_device->use_10_for_ms = 0; 1714 sdev->use_10_for_ms = 0;
1613 goto retry; 1715 goto retry;
1614 } 1716 }
1615 } 1717 }
1616 } 1718 }
1617 1719
1618 if(scsi_status_is_good(sreq->sr_result)) { 1720 if(scsi_status_is_good(result)) {
1619 data->header_length = header_length; 1721 data->header_length = header_length;
1620 if(use_10_for_ms) { 1722 if(use_10_for_ms) {
1621 data->length = buffer[0]*256 + buffer[1] + 2; 1723 data->length = buffer[0]*256 + buffer[1] + 2;
@@ -1632,73 +1734,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1632 } 1734 }
1633 } 1735 }
1634 1736
1635 return sreq->sr_result; 1737 return result;
1636}
1637EXPORT_SYMBOL(__scsi_mode_sense);
1638
1639/**
1640 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1641 * six bytes if necessary.
1642 * @sdev: scsi device to send command to.
1643 * @dbd: set if mode sense will disable block descriptors in the return
1644 * @modepage: mode page being requested
1645 * @buffer: request buffer (may not be smaller than eight bytes)
1646 * @len: length of request buffer.
1647 * @timeout: command timeout
1648 * @retries: number of retries before failing
1649 *
1650 * Returns zero if unsuccessful, or the header offset (either 4
1651 * or 8 depending on whether a six or ten byte command was
1652 * issued) if successful.
1653 **/
1654int
1655scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1656 unsigned char *buffer, int len, int timeout, int retries,
1657 struct scsi_mode_data *data)
1658{
1659 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1660 int ret;
1661
1662 if (!sreq)
1663 return -1;
1664
1665 ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1666 timeout, retries, data);
1667
1668 scsi_release_request(sreq);
1669
1670 return ret;
1671} 1738}
1672EXPORT_SYMBOL(scsi_mode_sense); 1739EXPORT_SYMBOL(scsi_mode_sense);
1673 1740
1674int 1741int
1675scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) 1742scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1676{ 1743{
1677 struct scsi_request *sreq;
1678 char cmd[] = { 1744 char cmd[] = {
1679 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1745 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1680 }; 1746 };
1747 struct scsi_sense_hdr sshdr;
1681 int result; 1748 int result;
1682 1749
1683 sreq = scsi_allocate_request(sdev, GFP_KERNEL); 1750 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1684 if (!sreq) 1751 timeout, retries);
1685 return -ENOMEM;
1686
1687 sreq->sr_data_direction = DMA_NONE;
1688 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1689 1752
1690 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) { 1753 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1691 struct scsi_sense_hdr sshdr;
1692 1754
1693 if ((scsi_request_normalize_sense(sreq, &sshdr)) && 1755 if ((scsi_sense_valid(&sshdr)) &&
1694 ((sshdr.sense_key == UNIT_ATTENTION) || 1756 ((sshdr.sense_key == UNIT_ATTENTION) ||
1695 (sshdr.sense_key == NOT_READY))) { 1757 (sshdr.sense_key == NOT_READY))) {
1696 sdev->changed = 1; 1758 sdev->changed = 1;
1697 sreq->sr_result = 0; 1759 result = 0;
1698 } 1760 }
1699 } 1761 }
1700 result = sreq->sr_result;
1701 scsi_release_request(sreq);
1702 return result; 1762 return result;
1703} 1763}
1704EXPORT_SYMBOL(scsi_test_unit_ready); 1764EXPORT_SYMBOL(scsi_test_unit_ready);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d30d7f4e63ec..ee6de1768e53 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -63,6 +63,9 @@ extern int __init scsi_init_devinfo(void);
63extern void scsi_exit_devinfo(void); 63extern void scsi_exit_devinfo(void);
64 64
65/* scsi_error.c */ 65/* scsi_error.c */
66extern void scsi_add_timer(struct scsi_cmnd *, int,
67 void (*)(struct scsi_cmnd *));
68extern int scsi_delete_timer(struct scsi_cmnd *);
66extern void scsi_times_out(struct scsi_cmnd *cmd); 69extern void scsi_times_out(struct scsi_cmnd *cmd);
67extern int scsi_error_handler(void *host); 70extern int scsi_error_handler(void *host);
68extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 71extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 48edd67982a5..19c9a232a754 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -111,15 +111,14 @@ MODULE_PARM_DESC(inq_timeout,
111 111
112/** 112/**
113 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 113 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
114 * @sreq: used to send the command 114 * @sdev: scsi device to send command to
115 * @result: area to store the result of the MODE SENSE 115 * @result: area to store the result of the MODE SENSE
116 * 116 *
117 * Description: 117 * Description:
118 * Send a vendor specific MODE SENSE (not a MODE SELECT) command using 118 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
119 * @sreq to unlock a device, storing the (unused) results into result.
120 * Called for BLIST_KEY devices. 119 * Called for BLIST_KEY devices.
121 **/ 120 **/
122static void scsi_unlock_floptical(struct scsi_request *sreq, 121static void scsi_unlock_floptical(struct scsi_device *sdev,
123 unsigned char *result) 122 unsigned char *result)
124{ 123{
125 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 124 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -129,11 +128,10 @@ static void scsi_unlock_floptical(struct scsi_request *sreq,
129 scsi_cmd[1] = 0; 128 scsi_cmd[1] = 0;
130 scsi_cmd[2] = 0x2e; 129 scsi_cmd[2] = 0x2e;
131 scsi_cmd[3] = 0; 130 scsi_cmd[3] = 0;
132 scsi_cmd[4] = 0x2a; /* size */ 131 scsi_cmd[4] = 0x2a; /* size */
133 scsi_cmd[5] = 0; 132 scsi_cmd[5] = 0;
134 sreq->sr_cmd_len = 0; 133 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
135 sreq->sr_data_direction = DMA_FROM_DEVICE; 134 SCSI_TIMEOUT, 3);
136 scsi_wait_req(sreq, scsi_cmd, result, 0x2a /* size */, SCSI_TIMEOUT, 3);
137} 135}
138 136
139/** 137/**
@@ -433,26 +431,25 @@ void scsi_target_reap(struct scsi_target *starget)
433 431
434/** 432/**
435 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 433 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
436 * @sreq: used to send the INQUIRY 434 * @sdev: scsi_device to probe
437 * @inq_result: area to store the INQUIRY result 435 * @inq_result: area to store the INQUIRY result
436 * @result_len: len of inq_result
438 * @bflags: store any bflags found here 437 * @bflags: store any bflags found here
439 * 438 *
440 * Description: 439 * Description:
441 * Probe the lun associated with @sreq using a standard SCSI INQUIRY; 440 * Probe the lun associated with @req using a standard SCSI INQUIRY;
442 * 441 *
443 * If the INQUIRY is successful, sreq->sr_result is zero and: the 442 * If the INQUIRY is successful, zero is returned and the
444 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 443 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
445 * are copied to the Scsi_Device at @sreq->sr_device (sdev); 444 * are copied to the Scsi_Device any flags value is stored in *@bflags.
446 * any flags value is stored in *@bflags.
447 **/ 445 **/
448static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result, 446static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
449 int *bflags) 447 int result_len, int *bflags)
450{ 448{
451 struct scsi_device *sdev = sreq->sr_device; /* a bit ugly */
452 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 449 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
453 int first_inquiry_len, try_inquiry_len, next_inquiry_len; 450 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
454 int response_len = 0; 451 int response_len = 0;
455 int pass, count; 452 int pass, count, result;
456 struct scsi_sense_hdr sshdr; 453 struct scsi_sense_hdr sshdr;
457 454
458 *bflags = 0; 455 *bflags = 0;
@@ -475,28 +472,26 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
475 memset(scsi_cmd, 0, 6); 472 memset(scsi_cmd, 0, 6);
476 scsi_cmd[0] = INQUIRY; 473 scsi_cmd[0] = INQUIRY;
477 scsi_cmd[4] = (unsigned char) try_inquiry_len; 474 scsi_cmd[4] = (unsigned char) try_inquiry_len;
478 sreq->sr_cmd_len = 0;
479 sreq->sr_data_direction = DMA_FROM_DEVICE;
480 475
481 memset(inq_result, 0, try_inquiry_len); 476 memset(inq_result, 0, try_inquiry_len);
482 scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result, 477
483 try_inquiry_len, 478 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
484 HZ/2 + HZ*scsi_inq_timeout, 3); 479 inq_result, try_inquiry_len, &sshdr,
480 HZ / 2 + HZ * scsi_inq_timeout, 3);
485 481
486 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " 482 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
487 "with code 0x%x\n", 483 "with code 0x%x\n",
488 sreq->sr_result ? "failed" : "successful", 484 result ? "failed" : "successful", result));
489 sreq->sr_result));
490 485
491 if (sreq->sr_result) { 486 if (result) {
492 /* 487 /*
493 * not-ready to ready transition [asc/ascq=0x28/0x0] 488 * not-ready to ready transition [asc/ascq=0x28/0x0]
494 * or power-on, reset [asc/ascq=0x29/0x0], continue. 489 * or power-on, reset [asc/ascq=0x29/0x0], continue.
495 * INQUIRY should not yield UNIT_ATTENTION 490 * INQUIRY should not yield UNIT_ATTENTION
496 * but many buggy devices do so anyway. 491 * but many buggy devices do so anyway.
497 */ 492 */
498 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && 493 if ((driver_byte(result) & DRIVER_SENSE) &&
499 scsi_request_normalize_sense(sreq, &sshdr)) { 494 scsi_sense_valid(&sshdr)) {
500 if ((sshdr.sense_key == UNIT_ATTENTION) && 495 if ((sshdr.sense_key == UNIT_ATTENTION) &&
501 ((sshdr.asc == 0x28) || 496 ((sshdr.asc == 0x28) ||
502 (sshdr.asc == 0x29)) && 497 (sshdr.asc == 0x29)) &&
@@ -507,7 +502,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
507 break; 502 break;
508 } 503 }
509 504
510 if (sreq->sr_result == 0) { 505 if (result == 0) {
511 response_len = (unsigned char) inq_result[4] + 5; 506 response_len = (unsigned char) inq_result[4] + 5;
512 if (response_len > 255) 507 if (response_len > 255)
513 response_len = first_inquiry_len; /* sanity */ 508 response_len = first_inquiry_len; /* sanity */
@@ -556,8 +551,8 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
556 551
557 /* If the last transfer attempt got an error, assume the 552 /* If the last transfer attempt got an error, assume the
558 * peripheral doesn't exist or is dead. */ 553 * peripheral doesn't exist or is dead. */
559 if (sreq->sr_result) 554 if (result)
560 return; 555 return -EIO;
561 556
562 /* Don't report any more data than the device says is valid */ 557 /* Don't report any more data than the device says is valid */
563 sdev->inquiry_len = min(try_inquiry_len, response_len); 558 sdev->inquiry_len = min(try_inquiry_len, response_len);
@@ -593,7 +588,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
593 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
594 sdev->scsi_level++; 589 sdev->scsi_level++;
595 590
596 return; 591 return 0;
597} 592}
598 593
599/** 594/**
@@ -800,9 +795,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
800 void *hostdata) 795 void *hostdata)
801{ 796{
802 struct scsi_device *sdev; 797 struct scsi_device *sdev;
803 struct scsi_request *sreq;
804 unsigned char *result; 798 unsigned char *result;
805 int bflags, res = SCSI_SCAN_NO_RESPONSE; 799 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
806 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 800 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
807 801
808 /* 802 /*
@@ -831,16 +825,13 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
831 sdev = scsi_alloc_sdev(starget, lun, hostdata); 825 sdev = scsi_alloc_sdev(starget, lun, hostdata);
832 if (!sdev) 826 if (!sdev)
833 goto out; 827 goto out;
834 sreq = scsi_allocate_request(sdev, GFP_ATOMIC); 828
835 if (!sreq) 829 result = kmalloc(result_len, GFP_ATOMIC |
836 goto out_free_sdev;
837 result = kmalloc(256, GFP_ATOMIC |
838 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); 830 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
839 if (!result) 831 if (!result)
840 goto out_free_sreq; 832 goto out_free_sdev;
841 833
842 scsi_probe_lun(sreq, result, &bflags); 834 if (scsi_probe_lun(sdev, result, result_len, &bflags))
843 if (sreq->sr_result)
844 goto out_free_result; 835 goto out_free_result;
845 836
846 /* 837 /*
@@ -868,7 +859,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
868 if (res == SCSI_SCAN_LUN_PRESENT) { 859 if (res == SCSI_SCAN_LUN_PRESENT) {
869 if (bflags & BLIST_KEY) { 860 if (bflags & BLIST_KEY) {
870 sdev->lockable = 0; 861 sdev->lockable = 0;
871 scsi_unlock_floptical(sreq, result); 862 scsi_unlock_floptical(sdev, result);
872 } 863 }
873 if (bflagsp) 864 if (bflagsp)
874 *bflagsp = bflags; 865 *bflagsp = bflags;
@@ -876,8 +867,6 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
876 867
877 out_free_result: 868 out_free_result:
878 kfree(result); 869 kfree(result);
879 out_free_sreq:
880 scsi_release_request(sreq);
881 out_free_sdev: 870 out_free_sdev:
882 if (res == SCSI_SCAN_LUN_PRESENT) { 871 if (res == SCSI_SCAN_LUN_PRESENT) {
883 if (sdevp) { 872 if (sdevp) {
@@ -1070,8 +1059,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1070 unsigned int lun; 1059 unsigned int lun;
1071 unsigned int num_luns; 1060 unsigned int num_luns;
1072 unsigned int retries; 1061 unsigned int retries;
1062 int result;
1073 struct scsi_lun *lunp, *lun_data; 1063 struct scsi_lun *lunp, *lun_data;
1074 struct scsi_request *sreq;
1075 u8 *data; 1064 u8 *data;
1076 struct scsi_sense_hdr sshdr; 1065 struct scsi_sense_hdr sshdr;
1077 struct scsi_target *starget = scsi_target(sdev); 1066 struct scsi_target *starget = scsi_target(sdev);
@@ -1089,10 +1078,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1089 if (bflags & BLIST_NOLUN) 1078 if (bflags & BLIST_NOLUN)
1090 return 0; 1079 return 0;
1091 1080
1092 sreq = scsi_allocate_request(sdev, GFP_ATOMIC);
1093 if (!sreq)
1094 goto out;
1095
1096 sprintf(devname, "host %d channel %d id %d", 1081 sprintf(devname, "host %d channel %d id %d",
1097 sdev->host->host_no, sdev->channel, sdev->id); 1082 sdev->host->host_no, sdev->channel, sdev->id);
1098 1083
@@ -1110,7 +1095,7 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1110 lun_data = kmalloc(length, GFP_ATOMIC | 1095 lun_data = kmalloc(length, GFP_ATOMIC |
1111 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1096 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1112 if (!lun_data) 1097 if (!lun_data)
1113 goto out_release_request; 1098 goto out;
1114 1099
1115 scsi_cmd[0] = REPORT_LUNS; 1100 scsi_cmd[0] = REPORT_LUNS;
1116 1101
@@ -1129,8 +1114,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1129 1114
1130 scsi_cmd[10] = 0; /* reserved */ 1115 scsi_cmd[10] = 0; /* reserved */
1131 scsi_cmd[11] = 0; /* control */ 1116 scsi_cmd[11] = 0; /* control */
1132 sreq->sr_cmd_len = 0;
1133 sreq->sr_data_direction = DMA_FROM_DEVICE;
1134 1117
1135 /* 1118 /*
1136 * We can get a UNIT ATTENTION, for example a power on/reset, so 1119 * We can get a UNIT ATTENTION, for example a power on/reset, so
@@ -1146,29 +1129,29 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1146 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending" 1129 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending"
1147 " REPORT LUNS to %s (try %d)\n", devname, 1130 " REPORT LUNS to %s (try %d)\n", devname,
1148 retries)); 1131 retries));
1149 scsi_wait_req(sreq, scsi_cmd, lun_data, length, 1132
1150 SCSI_TIMEOUT + 4*HZ, 3); 1133 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1134 lun_data, length, &sshdr,
1135 SCSI_TIMEOUT + 4 * HZ, 3);
1136
1151 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" 1137 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
1152 " %s (try %d) result 0x%x\n", sreq->sr_result 1138 " %s (try %d) result 0x%x\n", result
1153 ? "failed" : "successful", retries, 1139 ? "failed" : "successful", retries, result));
1154 sreq->sr_result)); 1140 if (result == 0)
1155 if (sreq->sr_result == 0)
1156 break; 1141 break;
1157 else if (scsi_request_normalize_sense(sreq, &sshdr)) { 1142 else if (scsi_sense_valid(&sshdr)) {
1158 if (sshdr.sense_key != UNIT_ATTENTION) 1143 if (sshdr.sense_key != UNIT_ATTENTION)
1159 break; 1144 break;
1160 } 1145 }
1161 } 1146 }
1162 1147
1163 if (sreq->sr_result) { 1148 if (result) {
1164 /* 1149 /*
1165 * The device probably does not support a REPORT LUN command 1150 * The device probably does not support a REPORT LUN command
1166 */ 1151 */
1167 kfree(lun_data); 1152 kfree(lun_data);
1168 scsi_release_request(sreq);
1169 return 1; 1153 return 1;
1170 } 1154 }
1171 scsi_release_request(sreq);
1172 1155
1173 /* 1156 /*
1174 * Get the length from the first four bytes of lun_data. 1157 * Get the length from the first four bytes of lun_data.
@@ -1242,8 +1225,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1242 kfree(lun_data); 1225 kfree(lun_data);
1243 return 0; 1226 return 0;
1244 1227
1245 out_release_request:
1246 scsi_release_request(sreq);
1247 out: 1228 out:
1248 /* 1229 /*
1249 * We are out of memory, don't try scanning any further. 1230 * We are out of memory, don't try scanning any further.
@@ -1265,9 +1246,12 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1265 1246
1266 get_device(&starget->dev); 1247 get_device(&starget->dev);
1267 down(&shost->scan_mutex); 1248 down(&shost->scan_mutex);
1268 res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1249 if (scsi_host_scan_allowed(shost)) {
1269 if (res != SCSI_SCAN_LUN_PRESENT) 1250 res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1,
1270 sdev = ERR_PTR(-ENODEV); 1251 hostdata);
1252 if (res != SCSI_SCAN_LUN_PRESENT)
1253 sdev = ERR_PTR(-ENODEV);
1254 }
1271 up(&shost->scan_mutex); 1255 up(&shost->scan_mutex);
1272 scsi_target_reap(starget); 1256 scsi_target_reap(starget);
1273 put_device(&starget->dev); 1257 put_device(&starget->dev);
@@ -1417,11 +1401,15 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1417 return -EINVAL; 1401 return -EINVAL;
1418 1402
1419 down(&shost->scan_mutex); 1403 down(&shost->scan_mutex);
1420 if (channel == SCAN_WILD_CARD) 1404 if (scsi_host_scan_allowed(shost)) {
1421 for (channel = 0; channel <= shost->max_channel; channel++) 1405 if (channel == SCAN_WILD_CARD)
1406 for (channel = 0; channel <= shost->max_channel;
1407 channel++)
1408 scsi_scan_channel(shost, channel, id, lun,
1409 rescan);
1410 else
1422 scsi_scan_channel(shost, channel, id, lun, rescan); 1411 scsi_scan_channel(shost, channel, id, lun, rescan);
1423 else 1412 }
1424 scsi_scan_channel(shost, channel, id, lun, rescan);
1425 up(&shost->scan_mutex); 1413 up(&shost->scan_mutex);
1426 1414
1427 return 0; 1415 return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index beed7fbe1cbe..dae59d1da07a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -48,6 +48,30 @@ const char *scsi_device_state_name(enum scsi_device_state state)
48 return name; 48 return name;
49} 49}
50 50
51static struct {
52 enum scsi_host_state value;
53 char *name;
54} shost_states[] = {
55 { SHOST_CREATED, "created" },
56 { SHOST_RUNNING, "running" },
57 { SHOST_CANCEL, "cancel" },
58 { SHOST_DEL, "deleted" },
59 { SHOST_RECOVERY, "recovery" },
60};
61const char *scsi_host_state_name(enum scsi_host_state state)
62{
63 int i;
64 char *name = NULL;
65
66 for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
67 if (shost_states[i].value == state) {
68 name = shost_states[i].name;
69 break;
70 }
71 }
72 return name;
73}
74
51static int check_set(unsigned int *val, char *src) 75static int check_set(unsigned int *val, char *src)
52{ 76{
53 char *last; 77 char *last;
@@ -124,6 +148,43 @@ static ssize_t store_scan(struct class_device *class_dev, const char *buf,
124}; 148};
125static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); 149static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
126 150
151static ssize_t
152store_shost_state(struct class_device *class_dev, const char *buf, size_t count)
153{
154 int i;
155 struct Scsi_Host *shost = class_to_shost(class_dev);
156 enum scsi_host_state state = 0;
157
158 for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
159 const int len = strlen(shost_states[i].name);
160 if (strncmp(shost_states[i].name, buf, len) == 0 &&
161 buf[len] == '\n') {
162 state = shost_states[i].value;
163 break;
164 }
165 }
166 if (!state)
167 return -EINVAL;
168
169 if (scsi_host_set_state(shost, state))
170 return -EINVAL;
171 return count;
172}
173
174static ssize_t
175show_shost_state(struct class_device *class_dev, char *buf)
176{
177 struct Scsi_Host *shost = class_to_shost(class_dev);
178 const char *name = scsi_host_state_name(shost->shost_state);
179
180 if (!name)
181 return -EINVAL;
182
183 return snprintf(buf, 20, "%s\n", name);
184}
185
186static CLASS_DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
187
127shost_rd_attr(unique_id, "%u\n"); 188shost_rd_attr(unique_id, "%u\n");
128shost_rd_attr(host_busy, "%hu\n"); 189shost_rd_attr(host_busy, "%hu\n");
129shost_rd_attr(cmd_per_lun, "%hd\n"); 190shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -139,6 +200,7 @@ static struct class_device_attribute *scsi_sysfs_shost_attrs[] = {
139 &class_device_attr_unchecked_isa_dma, 200 &class_device_attr_unchecked_isa_dma,
140 &class_device_attr_proc_name, 201 &class_device_attr_proc_name,
141 &class_device_attr_scan, 202 &class_device_attr_scan,
203 &class_device_attr_state,
142 NULL 204 NULL
143}; 205};
144 206
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e6412fce423c..2cab556b6e82 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -252,7 +252,8 @@ struct fc_internal {
252 252
253#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) 253#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
254 254
255static int fc_target_setup(struct device *dev) 255static int fc_target_setup(struct transport_container *tc, struct device *dev,
256 struct class_device *cdev)
256{ 257{
257 struct scsi_target *starget = to_scsi_target(dev); 258 struct scsi_target *starget = to_scsi_target(dev);
258 struct fc_rport *rport = starget_to_rport(starget); 259 struct fc_rport *rport = starget_to_rport(starget);
@@ -281,7 +282,8 @@ static DECLARE_TRANSPORT_CLASS(fc_transport_class,
281 NULL, 282 NULL,
282 NULL); 283 NULL);
283 284
284static int fc_host_setup(struct device *dev) 285static int fc_host_setup(struct transport_container *tc, struct device *dev,
286 struct class_device *cdev)
285{ 287{
286 struct Scsi_Host *shost = dev_to_shost(dev); 288 struct Scsi_Host *shost = dev_to_shost(dev);
287 289
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 7670919a087a..ef577c8c2182 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -28,14 +28,14 @@
28#include "scsi_priv.h" 28#include "scsi_priv.h"
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_request.h> 31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_eh.h> 32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport.h> 33#include <scsi/scsi_transport.h>
34#include <scsi/scsi_transport_spi.h> 34#include <scsi/scsi_transport_spi.h>
35 35
36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) 36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
37 37
38#define SPI_NUM_ATTRS 13 /* increase this if you add attributes */ 38#define SPI_NUM_ATTRS 14 /* increase this if you add attributes */
39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always 39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
40 * on" attributes */ 40 * on" attributes */
41#define SPI_HOST_ATTRS 1 41#define SPI_HOST_ATTRS 1
@@ -106,27 +106,31 @@ static int sprint_frac(char *dest, int value, int denom)
106 return result; 106 return result;
107} 107}
108 108
109/* Modification of scsi_wait_req that will clear UNIT ATTENTION conditions 109static int spi_execute(struct scsi_device *sdev, const void *cmd,
110 * resulting from (likely) bus and device resets */ 110 enum dma_data_direction dir,
111static void spi_wait_req(struct scsi_request *sreq, const void *cmd, 111 void *buffer, unsigned bufflen,
112 void *buffer, unsigned bufflen) 112 struct scsi_sense_hdr *sshdr)
113{ 113{
114 int i; 114 int i, result;
115 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
115 116
116 for(i = 0; i < DV_RETRIES; i++) { 117 for(i = 0; i < DV_RETRIES; i++) {
117 sreq->sr_request->flags |= REQ_FAILFAST; 118 result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
118 119 sense, DV_TIMEOUT, /* retries */ 1,
119 scsi_wait_req(sreq, cmd, buffer, bufflen, 120 REQ_FAILFAST);
120 DV_TIMEOUT, /* retries */ 1); 121 if (result & DRIVER_SENSE) {
121 if (sreq->sr_result & DRIVER_SENSE) { 122 struct scsi_sense_hdr sshdr_tmp;
122 struct scsi_sense_hdr sshdr; 123 if (!sshdr)
123 124 sshdr = &sshdr_tmp;
124 if (scsi_request_normalize_sense(sreq, &sshdr) 125
125 && sshdr.sense_key == UNIT_ATTENTION) 126 if (scsi_normalize_sense(sense, sizeof(*sense),
127 sshdr)
128 && sshdr->sense_key == UNIT_ATTENTION)
126 continue; 129 continue;
127 } 130 }
128 break; 131 break;
129 } 132 }
133 return result;
130} 134}
131 135
132static struct { 136static struct {
@@ -162,7 +166,8 @@ static inline enum spi_signal_type spi_signal_to_value(const char *name)
162 return SPI_SIGNAL_UNKNOWN; 166 return SPI_SIGNAL_UNKNOWN;
163} 167}
164 168
165static int spi_host_setup(struct device *dev) 169static int spi_host_setup(struct transport_container *tc, struct device *dev,
170 struct class_device *cdev)
166{ 171{
167 struct Scsi_Host *shost = dev_to_shost(dev); 172 struct Scsi_Host *shost = dev_to_shost(dev);
168 173
@@ -196,7 +201,9 @@ static int spi_host_match(struct attribute_container *cont,
196 return &i->t.host_attrs.ac == cont; 201 return &i->t.host_attrs.ac == cont;
197} 202}
198 203
199static int spi_device_configure(struct device *dev) 204static int spi_device_configure(struct transport_container *tc,
205 struct device *dev,
206 struct class_device *cdev)
200{ 207{
201 struct scsi_device *sdev = to_scsi_device(dev); 208 struct scsi_device *sdev = to_scsi_device(dev);
202 struct scsi_target *starget = sdev->sdev_target; 209 struct scsi_target *starget = sdev->sdev_target;
@@ -214,7 +221,9 @@ static int spi_device_configure(struct device *dev)
214 return 0; 221 return 0;
215} 222}
216 223
217static int spi_setup_transport_attrs(struct device *dev) 224static int spi_setup_transport_attrs(struct transport_container *tc,
225 struct device *dev,
226 struct class_device *cdev)
218{ 227{
219 struct scsi_target *starget = to_scsi_target(dev); 228 struct scsi_target *starget = to_scsi_target(dev);
220 229
@@ -231,6 +240,7 @@ static int spi_setup_transport_attrs(struct device *dev)
231 spi_rd_strm(starget) = 0; 240 spi_rd_strm(starget) = 0;
232 spi_rti(starget) = 0; 241 spi_rti(starget) = 0;
233 spi_pcomp_en(starget) = 0; 242 spi_pcomp_en(starget) = 0;
243 spi_hold_mcs(starget) = 0;
234 spi_dv_pending(starget) = 0; 244 spi_dv_pending(starget) = 0;
235 spi_initial_dv(starget) = 0; 245 spi_initial_dv(starget) = 0;
236 init_MUTEX(&spi_dv_sem(starget)); 246 init_MUTEX(&spi_dv_sem(starget));
@@ -347,6 +357,7 @@ spi_transport_rd_attr(wr_flow, "%d\n");
347spi_transport_rd_attr(rd_strm, "%d\n"); 357spi_transport_rd_attr(rd_strm, "%d\n");
348spi_transport_rd_attr(rti, "%d\n"); 358spi_transport_rd_attr(rti, "%d\n");
349spi_transport_rd_attr(pcomp_en, "%d\n"); 359spi_transport_rd_attr(pcomp_en, "%d\n");
360spi_transport_rd_attr(hold_mcs, "%d\n");
350 361
351/* we only care about the first child device so we return 1 */ 362/* we only care about the first child device so we return 1 */
352static int child_iter(struct device *dev, void *data) 363static int child_iter(struct device *dev, void *data)
@@ -539,13 +550,13 @@ enum spi_compare_returns {
539/* This is for read/write Domain Validation: If the device supports 550/* This is for read/write Domain Validation: If the device supports
540 * an echo buffer, we do read/write tests to it */ 551 * an echo buffer, we do read/write tests to it */
541static enum spi_compare_returns 552static enum spi_compare_returns
542spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer, 553spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
543 u8 *ptr, const int retries) 554 u8 *ptr, const int retries)
544{ 555{
545 struct scsi_device *sdev = sreq->sr_device;
546 int len = ptr - buffer; 556 int len = ptr - buffer;
547 int j, k, r; 557 int j, k, r, result;
548 unsigned int pattern = 0x0000ffff; 558 unsigned int pattern = 0x0000ffff;
559 struct scsi_sense_hdr sshdr;
549 560
550 const char spi_write_buffer[] = { 561 const char spi_write_buffer[] = {
551 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 562 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
@@ -590,14 +601,12 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
590 } 601 }
591 602
592 for (r = 0; r < retries; r++) { 603 for (r = 0; r < retries; r++) {
593 sreq->sr_cmd_len = 0; /* wait_req to fill in */ 604 result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
594 sreq->sr_data_direction = DMA_TO_DEVICE; 605 buffer, len, &sshdr);
595 spi_wait_req(sreq, spi_write_buffer, buffer, len); 606 if(result || !scsi_device_online(sdev)) {
596 if(sreq->sr_result || !scsi_device_online(sdev)) {
597 struct scsi_sense_hdr sshdr;
598 607
599 scsi_device_set_state(sdev, SDEV_QUIESCE); 608 scsi_device_set_state(sdev, SDEV_QUIESCE);
600 if (scsi_request_normalize_sense(sreq, &sshdr) 609 if (scsi_sense_valid(&sshdr)
601 && sshdr.sense_key == ILLEGAL_REQUEST 610 && sshdr.sense_key == ILLEGAL_REQUEST
602 /* INVALID FIELD IN CDB */ 611 /* INVALID FIELD IN CDB */
603 && sshdr.asc == 0x24 && sshdr.ascq == 0x00) 612 && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
@@ -609,14 +618,13 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
609 return SPI_COMPARE_SKIP_TEST; 618 return SPI_COMPARE_SKIP_TEST;
610 619
611 620
612 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result); 621 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", result);
613 return SPI_COMPARE_FAILURE; 622 return SPI_COMPARE_FAILURE;
614 } 623 }
615 624
616 memset(ptr, 0, len); 625 memset(ptr, 0, len);
617 sreq->sr_cmd_len = 0; /* wait_req to fill in */ 626 spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
618 sreq->sr_data_direction = DMA_FROM_DEVICE; 627 ptr, len, NULL);
619 spi_wait_req(sreq, spi_read_buffer, ptr, len);
620 scsi_device_set_state(sdev, SDEV_QUIESCE); 628 scsi_device_set_state(sdev, SDEV_QUIESCE);
621 629
622 if (memcmp(buffer, ptr, len) != 0) 630 if (memcmp(buffer, ptr, len) != 0)
@@ -628,25 +636,22 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
628/* This is for the simplest form of Domain Validation: a read test 636/* This is for the simplest form of Domain Validation: a read test
629 * on the inquiry data from the device */ 637 * on the inquiry data from the device */
630static enum spi_compare_returns 638static enum spi_compare_returns
631spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer, 639spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
632 u8 *ptr, const int retries) 640 u8 *ptr, const int retries)
633{ 641{
634 int r; 642 int r, result;
635 const int len = sreq->sr_device->inquiry_len; 643 const int len = sdev->inquiry_len;
636 struct scsi_device *sdev = sreq->sr_device;
637 const char spi_inquiry[] = { 644 const char spi_inquiry[] = {
638 INQUIRY, 0, 0, 0, len, 0 645 INQUIRY, 0, 0, 0, len, 0
639 }; 646 };
640 647
641 for (r = 0; r < retries; r++) { 648 for (r = 0; r < retries; r++) {
642 sreq->sr_cmd_len = 0; /* wait_req to fill in */
643 sreq->sr_data_direction = DMA_FROM_DEVICE;
644
645 memset(ptr, 0, len); 649 memset(ptr, 0, len);
646 650
647 spi_wait_req(sreq, spi_inquiry, ptr, len); 651 result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
652 ptr, len, NULL);
648 653
649 if(sreq->sr_result || !scsi_device_online(sdev)) { 654 if(result || !scsi_device_online(sdev)) {
650 scsi_device_set_state(sdev, SDEV_QUIESCE); 655 scsi_device_set_state(sdev, SDEV_QUIESCE);
651 return SPI_COMPARE_FAILURE; 656 return SPI_COMPARE_FAILURE;
652 } 657 }
@@ -667,12 +672,11 @@ spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer,
667} 672}
668 673
669static enum spi_compare_returns 674static enum spi_compare_returns
670spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr, 675spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
671 enum spi_compare_returns 676 enum spi_compare_returns
672 (*compare_fn)(struct scsi_request *, u8 *, u8 *, int)) 677 (*compare_fn)(struct scsi_device *, u8 *, u8 *, int))
673{ 678{
674 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 679 struct spi_internal *i = to_spi_internal(sdev->host->transportt);
675 struct scsi_device *sdev = sreq->sr_device;
676 struct scsi_target *starget = sdev->sdev_target; 680 struct scsi_target *starget = sdev->sdev_target;
677 int period = 0, prevperiod = 0; 681 int period = 0, prevperiod = 0;
678 enum spi_compare_returns retval; 682 enum spi_compare_returns retval;
@@ -680,7 +684,7 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
680 684
681 for (;;) { 685 for (;;) {
682 int newperiod; 686 int newperiod;
683 retval = compare_fn(sreq, buffer, ptr, DV_LOOPS); 687 retval = compare_fn(sdev, buffer, ptr, DV_LOOPS);
684 688
685 if (retval == SPI_COMPARE_SUCCESS 689 if (retval == SPI_COMPARE_SUCCESS
686 || retval == SPI_COMPARE_SKIP_TEST) 690 || retval == SPI_COMPARE_SKIP_TEST)
@@ -726,9 +730,9 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
726} 730}
727 731
728static int 732static int
729spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer) 733spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
730{ 734{
731 int l; 735 int l, result;
732 736
733 /* first off do a test unit ready. This can error out 737 /* first off do a test unit ready. This can error out
734 * because of reservations or some other reason. If it 738 * because of reservations or some other reason. If it
@@ -744,18 +748,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
744 }; 748 };
745 749
746 750
747 sreq->sr_cmd_len = 0;
748 sreq->sr_data_direction = DMA_NONE;
749
750 /* We send a set of three TURs to clear any outstanding 751 /* We send a set of three TURs to clear any outstanding
751 * unit attention conditions if they exist (Otherwise the 752 * unit attention conditions if they exist (Otherwise the
752 * buffer tests won't be happy). If the TUR still fails 753 * buffer tests won't be happy). If the TUR still fails
753 * (reservation conflict, device not ready, etc) just 754 * (reservation conflict, device not ready, etc) just
754 * skip the write tests */ 755 * skip the write tests */
755 for (l = 0; ; l++) { 756 for (l = 0; ; l++) {
756 spi_wait_req(sreq, spi_test_unit_ready, NULL, 0); 757 result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
758 NULL, 0, NULL);
757 759
758 if(sreq->sr_result) { 760 if(result) {
759 if(l >= 3) 761 if(l >= 3)
760 return 0; 762 return 0;
761 } else { 763 } else {
@@ -764,12 +766,10 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
764 } 766 }
765 } 767 }
766 768
767 sreq->sr_cmd_len = 0; 769 result = spi_execute(sdev, spi_read_buffer_descriptor,
768 sreq->sr_data_direction = DMA_FROM_DEVICE; 770 DMA_FROM_DEVICE, buffer, 4, NULL);
769
770 spi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4);
771 771
772 if (sreq->sr_result) 772 if (result)
773 /* Device has no echo buffer */ 773 /* Device has no echo buffer */
774 return 0; 774 return 0;
775 775
@@ -777,17 +777,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
777} 777}
778 778
779static void 779static void
780spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer) 780spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
781{ 781{
782 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 782 struct spi_internal *i = to_spi_internal(sdev->host->transportt);
783 struct scsi_device *sdev = sreq->sr_device;
784 struct scsi_target *starget = sdev->sdev_target; 783 struct scsi_target *starget = sdev->sdev_target;
785 int len = sdev->inquiry_len; 784 int len = sdev->inquiry_len;
786 /* first set us up for narrow async */ 785 /* first set us up for narrow async */
787 DV_SET(offset, 0); 786 DV_SET(offset, 0);
788 DV_SET(width, 0); 787 DV_SET(width, 0);
789 788
790 if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) 789 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
791 != SPI_COMPARE_SUCCESS) { 790 != SPI_COMPARE_SUCCESS) {
792 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); 791 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
793 /* FIXME: should probably offline the device here? */ 792 /* FIXME: should probably offline the device here? */
@@ -799,7 +798,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
799 scsi_device_wide(sdev)) { 798 scsi_device_wide(sdev)) {
800 i->f->set_width(starget, 1); 799 i->f->set_width(starget, 1);
801 800
802 if (spi_dv_device_compare_inquiry(sreq, buffer, 801 if (spi_dv_device_compare_inquiry(sdev, buffer,
803 buffer + len, 802 buffer + len,
804 DV_LOOPS) 803 DV_LOOPS)
805 != SPI_COMPARE_SUCCESS) { 804 != SPI_COMPARE_SUCCESS) {
@@ -820,7 +819,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
820 819
821 len = 0; 820 len = 0;
822 if (scsi_device_dt(sdev)) 821 if (scsi_device_dt(sdev))
823 len = spi_dv_device_get_echo_buffer(sreq, buffer); 822 len = spi_dv_device_get_echo_buffer(sdev, buffer);
824 823
825 retry: 824 retry:
826 825
@@ -846,7 +845,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
846 845
847 if (len == 0) { 846 if (len == 0) {
848 SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n"); 847 SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n");
849 spi_dv_retrain(sreq, buffer, buffer + len, 848 spi_dv_retrain(sdev, buffer, buffer + len,
850 spi_dv_device_compare_inquiry); 849 spi_dv_device_compare_inquiry);
851 return; 850 return;
852 } 851 }
@@ -856,7 +855,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
856 len = SPI_MAX_ECHO_BUFFER_SIZE; 855 len = SPI_MAX_ECHO_BUFFER_SIZE;
857 } 856 }
858 857
859 if (spi_dv_retrain(sreq, buffer, buffer + len, 858 if (spi_dv_retrain(sdev, buffer, buffer + len,
860 spi_dv_device_echo_buffer) 859 spi_dv_device_echo_buffer)
861 == SPI_COMPARE_SKIP_TEST) { 860 == SPI_COMPARE_SKIP_TEST) {
862 /* OK, the stupid drive can't do a write echo buffer 861 /* OK, the stupid drive can't do a write echo buffer
@@ -879,16 +878,12 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
879void 878void
880spi_dv_device(struct scsi_device *sdev) 879spi_dv_device(struct scsi_device *sdev)
881{ 880{
882 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
883 struct scsi_target *starget = sdev->sdev_target; 881 struct scsi_target *starget = sdev->sdev_target;
884 u8 *buffer; 882 u8 *buffer;
885 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; 883 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
886 884
887 if (unlikely(!sreq))
888 return;
889
890 if (unlikely(scsi_device_get(sdev))) 885 if (unlikely(scsi_device_get(sdev)))
891 goto out_free_req; 886 return;
892 887
893 buffer = kmalloc(len, GFP_KERNEL); 888 buffer = kmalloc(len, GFP_KERNEL);
894 889
@@ -909,7 +904,7 @@ spi_dv_device(struct scsi_device *sdev)
909 904
910 SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n"); 905 SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n");
911 906
912 spi_dv_device_internal(sreq, buffer); 907 spi_dv_device_internal(sdev, buffer);
913 908
914 SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n"); 909 SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n");
915 910
@@ -924,8 +919,6 @@ spi_dv_device(struct scsi_device *sdev)
924 kfree(buffer); 919 kfree(buffer);
925 out_put: 920 out_put:
926 scsi_device_put(sdev); 921 scsi_device_put(sdev);
927 out_free_req:
928 scsi_release_request(sreq);
929} 922}
930EXPORT_SYMBOL(spi_dv_device); 923EXPORT_SYMBOL(spi_dv_device);
931 924
@@ -1028,10 +1021,17 @@ void spi_display_xfer_agreement(struct scsi_target *starget)
1028 sprint_frac(tmp, picosec, 1000); 1021 sprint_frac(tmp, picosec, 1000);
1029 1022
1030 dev_info(&starget->dev, 1023 dev_info(&starget->dev,
1031 "%s %sSCSI %d.%d MB/s %s%s%s (%s ns, offset %d)\n", 1024 "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n",
1032 scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, 1025 scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10,
1033 tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", 1026 tp->dt ? "DT" : "ST",
1034 tp->qas ? " QAS" : "", tmp, tp->offset); 1027 tp->iu ? " IU" : "",
1028 tp->qas ? " QAS" : "",
1029 tp->rd_strm ? " RDSTRM" : "",
1030 tp->rti ? " RTI" : "",
1031 tp->wr_flow ? " WRFLOW" : "",
1032 tp->pcomp_en ? " PCOMP" : "",
1033 tp->hold_mcs ? " HMCS" : "",
1034 tmp, tp->offset);
1035 } else { 1035 } else {
1036 dev_info(&starget->dev, "%sasynchronous.\n", 1036 dev_info(&starget->dev, "%sasynchronous.\n",
1037 tp->width ? "wide " : ""); 1037 tp->width ? "wide " : "");
@@ -1073,6 +1073,7 @@ static int spi_device_match(struct attribute_container *cont,
1073{ 1073{
1074 struct scsi_device *sdev; 1074 struct scsi_device *sdev;
1075 struct Scsi_Host *shost; 1075 struct Scsi_Host *shost;
1076 struct spi_internal *i;
1076 1077
1077 if (!scsi_is_sdev_device(dev)) 1078 if (!scsi_is_sdev_device(dev))
1078 return 0; 1079 return 0;
@@ -1085,6 +1086,9 @@ static int spi_device_match(struct attribute_container *cont,
1085 /* Note: this class has no device attributes, so it has 1086 /* Note: this class has no device attributes, so it has
1086 * no per-HBA allocation and thus we don't need to distinguish 1087 * no per-HBA allocation and thus we don't need to distinguish
1087 * the attribute containers for the device */ 1088 * the attribute containers for the device */
1089 i = to_spi_internal(shost->transportt);
1090 if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target))
1091 return 0;
1088 return 1; 1092 return 1;
1089} 1093}
1090 1094
@@ -1092,6 +1096,7 @@ static int spi_target_match(struct attribute_container *cont,
1092 struct device *dev) 1096 struct device *dev)
1093{ 1097{
1094 struct Scsi_Host *shost; 1098 struct Scsi_Host *shost;
1099 struct scsi_target *starget;
1095 struct spi_internal *i; 1100 struct spi_internal *i;
1096 1101
1097 if (!scsi_is_target_device(dev)) 1102 if (!scsi_is_target_device(dev))
@@ -1103,7 +1108,11 @@ static int spi_target_match(struct attribute_container *cont,
1103 return 0; 1108 return 0;
1104 1109
1105 i = to_spi_internal(shost->transportt); 1110 i = to_spi_internal(shost->transportt);
1106 1111 starget = to_scsi_target(dev);
1112
1113 if (i->f->deny_binding && i->f->deny_binding(starget))
1114 return 0;
1115
1107 return &i->t.target_attrs.ac == cont; 1116 return &i->t.target_attrs.ac == cont;
1108} 1117}
1109 1118
@@ -1154,6 +1163,7 @@ spi_attach_transport(struct spi_function_template *ft)
1154 SETUP_ATTRIBUTE(rd_strm); 1163 SETUP_ATTRIBUTE(rd_strm);
1155 SETUP_ATTRIBUTE(rti); 1164 SETUP_ATTRIBUTE(rti);
1156 SETUP_ATTRIBUTE(pcomp_en); 1165 SETUP_ATTRIBUTE(pcomp_en);
1166 SETUP_ATTRIBUTE(hold_mcs);
1157 1167
1158 /* if you add an attribute but forget to increase SPI_NUM_ATTRS 1168 /* if you add an attribute but forget to increase SPI_NUM_ATTRS
1159 * this bug will trigger */ 1169 * this bug will trigger */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0410e1bf109a..de564b386052 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -59,7 +59,6 @@
59#include <scsi/scsi_eh.h> 59#include <scsi/scsi_eh.h>
60#include <scsi/scsi_host.h> 60#include <scsi/scsi_host.h>
61#include <scsi/scsi_ioctl.h> 61#include <scsi/scsi_ioctl.h>
62#include <scsi/scsi_request.h>
63#include <scsi/scsicam.h> 62#include <scsi/scsicam.h>
64 63
65#include "scsi_logging.h" 64#include "scsi_logging.h"
@@ -125,7 +124,7 @@ static int sd_issue_flush(struct device *, sector_t *);
125static void sd_end_flush(request_queue_t *, struct request *); 124static void sd_end_flush(request_queue_t *, struct request *);
126static int sd_prepare_flush(request_queue_t *, struct request *); 125static int sd_prepare_flush(request_queue_t *, struct request *);
127static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
128 struct scsi_request *SRpnt, unsigned char *buffer); 127 unsigned char *buffer);
129 128
130static struct scsi_driver sd_template = { 129static struct scsi_driver sd_template = {
131 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
@@ -682,19 +681,13 @@ not_present:
682 681
683static int sd_sync_cache(struct scsi_device *sdp) 682static int sd_sync_cache(struct scsi_device *sdp)
684{ 683{
685 struct scsi_request *sreq;
686 int retries, res; 684 int retries, res;
685 struct scsi_sense_hdr sshdr;
687 686
688 if (!scsi_device_online(sdp)) 687 if (!scsi_device_online(sdp))
689 return -ENODEV; 688 return -ENODEV;
690 689
691 sreq = scsi_allocate_request(sdp, GFP_KERNEL);
692 if (!sreq) {
693 printk("FAILED\n No memory for request\n");
694 return -ENOMEM;
695 }
696 690
697 sreq->sr_data_direction = DMA_NONE;
698 for (retries = 3; retries > 0; --retries) { 691 for (retries = 3; retries > 0; --retries) {
699 unsigned char cmd[10] = { 0 }; 692 unsigned char cmd[10] = { 0 };
700 693
@@ -703,22 +696,20 @@ static int sd_sync_cache(struct scsi_device *sdp)
703 * Leave the rest of the command zero to indicate 696 * Leave the rest of the command zero to indicate
704 * flush everything. 697 * flush everything.
705 */ 698 */
706 scsi_wait_req(sreq, cmd, NULL, 0, SD_TIMEOUT, SD_MAX_RETRIES); 699 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
707 if (sreq->sr_result == 0) 700 SD_TIMEOUT, SD_MAX_RETRIES);
701 if (res == 0)
708 break; 702 break;
709 } 703 }
710 704
711 res = sreq->sr_result; 705 if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
712 if (res) {
713 printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
714 "host = %d, driver = %02x\n ", 706 "host = %d, driver = %02x\n ",
715 status_byte(res), msg_byte(res), 707 status_byte(res), msg_byte(res),
716 host_byte(res), driver_byte(res)); 708 host_byte(res), driver_byte(res));
717 if (driver_byte(res) & DRIVER_SENSE) 709 if (driver_byte(res) & DRIVER_SENSE)
718 scsi_print_req_sense("sd", sreq); 710 scsi_print_sense_hdr("sd", &sshdr);
719 } 711 }
720 712
721 scsi_release_request(sreq);
722 return res; 713 return res;
723} 714}
724 715
@@ -957,22 +948,19 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
957 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9); 948 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
958} 949}
959 950
960static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp) 951static int media_not_present(struct scsi_disk *sdkp,
952 struct scsi_sense_hdr *sshdr)
961{ 953{
962 struct scsi_sense_hdr sshdr;
963 954
964 if (!srp->sr_result) 955 if (!scsi_sense_valid(sshdr))
965 return 0;
966 if (!(driver_byte(srp->sr_result) & DRIVER_SENSE))
967 return 0; 956 return 0;
968 /* not invoked for commands that could return deferred errors */ 957 /* not invoked for commands that could return deferred errors */
969 if (scsi_request_normalize_sense(srp, &sshdr)) { 958 if (sshdr->sense_key != NOT_READY &&
970 if (sshdr.sense_key != NOT_READY && 959 sshdr->sense_key != UNIT_ATTENTION)
971 sshdr.sense_key != UNIT_ATTENTION) 960 return 0;
972 return 0; 961 if (sshdr->asc != 0x3A) /* medium not present */
973 if (sshdr.asc != 0x3A) /* medium not present */ 962 return 0;
974 return 0; 963
975 }
976 set_media_not_present(sdkp); 964 set_media_not_present(sdkp);
977 return 1; 965 return 1;
978} 966}
@@ -981,10 +969,10 @@ static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp)
981 * spinup disk - called only in sd_revalidate_disk() 969 * spinup disk - called only in sd_revalidate_disk()
982 */ 970 */
983static void 971static void
984sd_spinup_disk(struct scsi_disk *sdkp, char *diskname, 972sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
985 struct scsi_request *SRpnt, unsigned char *buffer) { 973{
986 unsigned char cmd[10]; 974 unsigned char cmd[10];
987 unsigned long spintime_value = 0; 975 unsigned long spintime_expire = 0;
988 int retries, spintime; 976 int retries, spintime;
989 unsigned int the_result; 977 unsigned int the_result;
990 struct scsi_sense_hdr sshdr; 978 struct scsi_sense_hdr sshdr;
@@ -1001,18 +989,13 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1001 cmd[0] = TEST_UNIT_READY; 989 cmd[0] = TEST_UNIT_READY;
1002 memset((void *) &cmd[1], 0, 9); 990 memset((void *) &cmd[1], 0, 9);
1003 991
1004 SRpnt->sr_cmd_len = 0; 992 the_result = scsi_execute_req(sdkp->device, cmd,
1005 memset(SRpnt->sr_sense_buffer, 0, 993 DMA_NONE, NULL, 0,
1006 SCSI_SENSE_BUFFERSIZE); 994 &sshdr, SD_TIMEOUT,
1007 SRpnt->sr_data_direction = DMA_NONE; 995 SD_MAX_RETRIES);
1008 996
1009 scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
1010 0/*512*/, SD_TIMEOUT, SD_MAX_RETRIES);
1011
1012 the_result = SRpnt->sr_result;
1013 if (the_result) 997 if (the_result)
1014 sense_valid = scsi_request_normalize_sense( 998 sense_valid = scsi_sense_valid(&sshdr);
1015 SRpnt, &sshdr);
1016 retries++; 999 retries++;
1017 } while (retries < 3 && 1000 } while (retries < 3 &&
1018 (!scsi_status_is_good(the_result) || 1001 (!scsi_status_is_good(the_result) ||
@@ -1024,7 +1007,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1024 * any media in it, don't bother with any of the rest of 1007 * any media in it, don't bother with any of the rest of
1025 * this crap. 1008 * this crap.
1026 */ 1009 */
1027 if (media_not_present(sdkp, SRpnt)) 1010 if (media_not_present(sdkp, &sshdr))
1028 return; 1011 return;
1029 1012
1030 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1013 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
@@ -1063,33 +1046,42 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1063 cmd[1] = 1; /* Return immediately */ 1046 cmd[1] = 1; /* Return immediately */
1064 memset((void *) &cmd[2], 0, 8); 1047 memset((void *) &cmd[2], 0, 8);
1065 cmd[4] = 1; /* Start spin cycle */ 1048 cmd[4] = 1; /* Start spin cycle */
1066 SRpnt->sr_cmd_len = 0; 1049 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1067 memset(SRpnt->sr_sense_buffer, 0, 1050 NULL, 0, &sshdr,
1068 SCSI_SENSE_BUFFERSIZE); 1051 SD_TIMEOUT, SD_MAX_RETRIES);
1069 1052 spintime_expire = jiffies + 100 * HZ;
1070 SRpnt->sr_data_direction = DMA_NONE; 1053 spintime = 1;
1071 scsi_wait_req(SRpnt, (void *)cmd,
1072 (void *) buffer, 0/*512*/,
1073 SD_TIMEOUT, SD_MAX_RETRIES);
1074 spintime_value = jiffies;
1075 } 1054 }
1076 spintime = 1;
1077 /* Wait 1 second for next try */ 1055 /* Wait 1 second for next try */
1078 msleep(1000); 1056 msleep(1000);
1079 printk("."); 1057 printk(".");
1058
1059 /*
1060 * Wait for USB flash devices with slow firmware.
1061 * Yes, this sense key/ASC combination shouldn't
1062 * occur here. It's characteristic of these devices.
1063 */
1064 } else if (sense_valid &&
1065 sshdr.sense_key == UNIT_ATTENTION &&
1066 sshdr.asc == 0x28) {
1067 if (!spintime) {
1068 spintime_expire = jiffies + 5 * HZ;
1069 spintime = 1;
1070 }
1071 /* Wait 1 second for next try */
1072 msleep(1000);
1080 } else { 1073 } else {
1081 /* we don't understand the sense code, so it's 1074 /* we don't understand the sense code, so it's
1082 * probably pointless to loop */ 1075 * probably pointless to loop */
1083 if(!spintime) { 1076 if(!spintime) {
1084 printk(KERN_NOTICE "%s: Unit Not Ready, " 1077 printk(KERN_NOTICE "%s: Unit Not Ready, "
1085 "sense:\n", diskname); 1078 "sense:\n", diskname);
1086 scsi_print_req_sense("", SRpnt); 1079 scsi_print_sense_hdr("", &sshdr);
1087 } 1080 }
1088 break; 1081 break;
1089 } 1082 }
1090 1083
1091 } while (spintime && 1084 } while (spintime && time_before_eq(jiffies, spintime_expire));
1092 time_after(spintime_value + 100 * HZ, jiffies));
1093 1085
1094 if (spintime) { 1086 if (spintime) {
1095 if (scsi_status_is_good(the_result)) 1087 if (scsi_status_is_good(the_result))
@@ -1104,14 +1096,15 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
1104 */ 1096 */
1105static void 1097static void
1106sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 1098sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
1107 struct scsi_request *SRpnt, unsigned char *buffer) { 1099 unsigned char *buffer)
1100{
1108 unsigned char cmd[16]; 1101 unsigned char cmd[16];
1109 struct scsi_device *sdp = sdkp->device;
1110 int the_result, retries; 1102 int the_result, retries;
1111 int sector_size = 0; 1103 int sector_size = 0;
1112 int longrc = 0; 1104 int longrc = 0;
1113 struct scsi_sense_hdr sshdr; 1105 struct scsi_sense_hdr sshdr;
1114 int sense_valid = 0; 1106 int sense_valid = 0;
1107 struct scsi_device *sdp = sdkp->device;
1115 1108
1116repeat: 1109repeat:
1117 retries = 3; 1110 retries = 3;
@@ -1128,20 +1121,15 @@ repeat:
1128 memset((void *) buffer, 0, 8); 1121 memset((void *) buffer, 0, 8);
1129 } 1122 }
1130 1123
1131 SRpnt->sr_cmd_len = 0; 1124 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1132 memset(SRpnt->sr_sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1125 buffer, longrc ? 12 : 8, &sshdr,
1133 SRpnt->sr_data_direction = DMA_FROM_DEVICE; 1126 SD_TIMEOUT, SD_MAX_RETRIES);
1134
1135 scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
1136 longrc ? 12 : 8, SD_TIMEOUT, SD_MAX_RETRIES);
1137 1127
1138 if (media_not_present(sdkp, SRpnt)) 1128 if (media_not_present(sdkp, &sshdr))
1139 return; 1129 return;
1140 1130
1141 the_result = SRpnt->sr_result;
1142 if (the_result) 1131 if (the_result)
1143 sense_valid = scsi_request_normalize_sense(SRpnt, 1132 sense_valid = scsi_sense_valid(&sshdr);
1144 &sshdr);
1145 retries--; 1133 retries--;
1146 1134
1147 } while (the_result && retries); 1135 } while (the_result && retries);
@@ -1156,7 +1144,7 @@ repeat:
1156 driver_byte(the_result)); 1144 driver_byte(the_result));
1157 1145
1158 if (driver_byte(the_result) & DRIVER_SENSE) 1146 if (driver_byte(the_result) & DRIVER_SENSE)
1159 scsi_print_req_sense("sd", SRpnt); 1147 scsi_print_sense_hdr("sd", &sshdr);
1160 else 1148 else
1161 printk("%s : sense not available. \n", diskname); 1149 printk("%s : sense not available. \n", diskname);
1162 1150
@@ -1296,11 +1284,13 @@ got_data:
1296 1284
1297/* called with buffer of length 512 */ 1285/* called with buffer of length 512 */
1298static inline int 1286static inline int
1299sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage, 1287sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1300 unsigned char *buffer, int len, struct scsi_mode_data *data) 1288 unsigned char *buffer, int len, struct scsi_mode_data *data,
1289 struct scsi_sense_hdr *sshdr)
1301{ 1290{
1302 return __scsi_mode_sense(SRpnt, dbd, modepage, buffer, len, 1291 return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
1303 SD_TIMEOUT, SD_MAX_RETRIES, data); 1292 SD_TIMEOUT, SD_MAX_RETRIES, data,
1293 sshdr);
1304} 1294}
1305 1295
1306/* 1296/*
@@ -1309,25 +1299,27 @@ sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage,
1309 */ 1299 */
1310static void 1300static void
1311sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname, 1301sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1312 struct scsi_request *SRpnt, unsigned char *buffer) { 1302 unsigned char *buffer)
1303{
1313 int res; 1304 int res;
1305 struct scsi_device *sdp = sdkp->device;
1314 struct scsi_mode_data data; 1306 struct scsi_mode_data data;
1315 1307
1316 set_disk_ro(sdkp->disk, 0); 1308 set_disk_ro(sdkp->disk, 0);
1317 if (sdkp->device->skip_ms_page_3f) { 1309 if (sdp->skip_ms_page_3f) {
1318 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname); 1310 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname);
1319 return; 1311 return;
1320 } 1312 }
1321 1313
1322 if (sdkp->device->use_192_bytes_for_3f) { 1314 if (sdp->use_192_bytes_for_3f) {
1323 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 192, &data); 1315 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
1324 } else { 1316 } else {
1325 /* 1317 /*
1326 * First attempt: ask for all pages (0x3F), but only 4 bytes. 1318 * First attempt: ask for all pages (0x3F), but only 4 bytes.
1327 * We have to start carefully: some devices hang if we ask 1319 * We have to start carefully: some devices hang if we ask
1328 * for more than is available. 1320 * for more than is available.
1329 */ 1321 */
1330 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 4, &data); 1322 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
1331 1323
1332 /* 1324 /*
1333 * Second attempt: ask for page 0 When only page 0 is 1325 * Second attempt: ask for page 0 When only page 0 is
@@ -1336,14 +1328,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1336 * CDB. 1328 * CDB.
1337 */ 1329 */
1338 if (!scsi_status_is_good(res)) 1330 if (!scsi_status_is_good(res))
1339 res = sd_do_mode_sense(SRpnt, 0, 0, buffer, 4, &data); 1331 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
1340 1332
1341 /* 1333 /*
1342 * Third attempt: ask 255 bytes, as we did earlier. 1334 * Third attempt: ask 255 bytes, as we did earlier.
1343 */ 1335 */
1344 if (!scsi_status_is_good(res)) 1336 if (!scsi_status_is_good(res))
1345 res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 255, 1337 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
1346 &data); 1338 &data, NULL);
1347 } 1339 }
1348 1340
1349 if (!scsi_status_is_good(res)) { 1341 if (!scsi_status_is_good(res)) {
@@ -1365,19 +1357,20 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1365 */ 1357 */
1366static void 1358static void
1367sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, 1359sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1368 struct scsi_request *SRpnt, unsigned char *buffer) 1360 unsigned char *buffer)
1369{ 1361{
1370 int len = 0, res; 1362 int len = 0, res;
1363 struct scsi_device *sdp = sdkp->device;
1371 1364
1372 int dbd; 1365 int dbd;
1373 int modepage; 1366 int modepage;
1374 struct scsi_mode_data data; 1367 struct scsi_mode_data data;
1375 struct scsi_sense_hdr sshdr; 1368 struct scsi_sense_hdr sshdr;
1376 1369
1377 if (sdkp->device->skip_ms_page_8) 1370 if (sdp->skip_ms_page_8)
1378 goto defaults; 1371 goto defaults;
1379 1372
1380 if (sdkp->device->type == TYPE_RBC) { 1373 if (sdp->type == TYPE_RBC) {
1381 modepage = 6; 1374 modepage = 6;
1382 dbd = 8; 1375 dbd = 8;
1383 } else { 1376 } else {
@@ -1386,7 +1379,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1386 } 1379 }
1387 1380
1388 /* cautiously ask */ 1381 /* cautiously ask */
1389 res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, 4, &data); 1382 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
1390 1383
1391 if (!scsi_status_is_good(res)) 1384 if (!scsi_status_is_good(res))
1392 goto bad_sense; 1385 goto bad_sense;
@@ -1407,7 +1400,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1407 len += data.header_length + data.block_descriptor_length; 1400 len += data.header_length + data.block_descriptor_length;
1408 1401
1409 /* Get the data */ 1402 /* Get the data */
1410 res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, len, &data); 1403 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1411 1404
1412 if (scsi_status_is_good(res)) { 1405 if (scsi_status_is_good(res)) {
1413 const char *types[] = { 1406 const char *types[] = {
@@ -1439,7 +1432,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1439 } 1432 }
1440 1433
1441bad_sense: 1434bad_sense:
1442 if (scsi_request_normalize_sense(SRpnt, &sshdr) && 1435 if (scsi_sense_valid(&sshdr) &&
1443 sshdr.sense_key == ILLEGAL_REQUEST && 1436 sshdr.sense_key == ILLEGAL_REQUEST &&
1444 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 1437 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1445 printk(KERN_NOTICE "%s: cache data unavailable\n", 1438 printk(KERN_NOTICE "%s: cache data unavailable\n",
@@ -1464,7 +1457,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
1464{ 1457{
1465 struct scsi_disk *sdkp = scsi_disk(disk); 1458 struct scsi_disk *sdkp = scsi_disk(disk);
1466 struct scsi_device *sdp = sdkp->device; 1459 struct scsi_device *sdp = sdkp->device;
1467 struct scsi_request *sreq;
1468 unsigned char *buffer; 1460 unsigned char *buffer;
1469 1461
1470 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1462 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
@@ -1476,18 +1468,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
1476 if (!scsi_device_online(sdp)) 1468 if (!scsi_device_online(sdp))
1477 goto out; 1469 goto out;
1478 1470
1479 sreq = scsi_allocate_request(sdp, GFP_KERNEL);
1480 if (!sreq) {
1481 printk(KERN_WARNING "(sd_revalidate_disk:) Request allocation "
1482 "failure.\n");
1483 goto out;
1484 }
1485
1486 buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA); 1471 buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA);
1487 if (!buffer) { 1472 if (!buffer) {
1488 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation " 1473 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation "
1489 "failure.\n"); 1474 "failure.\n");
1490 goto out_release_request; 1475 goto out;
1491 } 1476 }
1492 1477
1493 /* defaults, until the device tells us otherwise */ 1478 /* defaults, until the device tells us otherwise */
@@ -1498,25 +1483,23 @@ static int sd_revalidate_disk(struct gendisk *disk)
1498 sdkp->WCE = 0; 1483 sdkp->WCE = 0;
1499 sdkp->RCD = 0; 1484 sdkp->RCD = 0;
1500 1485
1501 sd_spinup_disk(sdkp, disk->disk_name, sreq, buffer); 1486 sd_spinup_disk(sdkp, disk->disk_name);
1502 1487
1503 /* 1488 /*
1504 * Without media there is no reason to ask; moreover, some devices 1489 * Without media there is no reason to ask; moreover, some devices
1505 * react badly if we do. 1490 * react badly if we do.
1506 */ 1491 */
1507 if (sdkp->media_present) { 1492 if (sdkp->media_present) {
1508 sd_read_capacity(sdkp, disk->disk_name, sreq, buffer); 1493 sd_read_capacity(sdkp, disk->disk_name, buffer);
1509 if (sdp->removable) 1494 if (sdp->removable)
1510 sd_read_write_protect_flag(sdkp, disk->disk_name, 1495 sd_read_write_protect_flag(sdkp, disk->disk_name,
1511 sreq, buffer); 1496 buffer);
1512 sd_read_cache_type(sdkp, disk->disk_name, sreq, buffer); 1497 sd_read_cache_type(sdkp, disk->disk_name, buffer);
1513 } 1498 }
1514 1499
1515 set_capacity(disk, sdkp->capacity); 1500 set_capacity(disk, sdkp->capacity);
1516 kfree(buffer); 1501 kfree(buffer);
1517 1502
1518 out_release_request:
1519 scsi_release_request(sreq);
1520 out: 1503 out:
1521 return 0; 1504 return 0;
1522} 1505}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e822ca0e97cf..b1b69d738d08 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -61,7 +61,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
61 61
62#ifdef CONFIG_SCSI_PROC_FS 62#ifdef CONFIG_SCSI_PROC_FS
63#include <linux/proc_fs.h> 63#include <linux/proc_fs.h>
64static char *sg_version_date = "20050328"; 64static char *sg_version_date = "20050901";
65 65
66static int sg_proc_init(void); 66static int sg_proc_init(void);
67static void sg_proc_cleanup(void); 67static void sg_proc_cleanup(void);
@@ -1027,8 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1027 if (sdp->detached) 1027 if (sdp->detached)
1028 return -ENODEV; 1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) { 1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (test_bit(SHOST_RECOVERY, 1030 if (sdp->device->host->shost_state == SHOST_RECOVERY)
1031 &sdp->device->host->shost_state))
1032 return -EBUSY; 1031 return -EBUSY;
1033 } else if (!scsi_block_when_processing_errors(sdp->device)) 1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1034 return -EBUSY; 1033 return -EBUSY;
@@ -1795,12 +1794,12 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1795 unsigned long uaddr, size_t count, int rw, 1794 unsigned long uaddr, size_t count, int rw,
1796 unsigned long max_pfn) 1795 unsigned long max_pfn)
1797{ 1796{
1797 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1798 unsigned long start = uaddr >> PAGE_SHIFT;
1799 const int nr_pages = end - start;
1798 int res, i, j; 1800 int res, i, j;
1799 unsigned int nr_pages;
1800 struct page **pages; 1801 struct page **pages;
1801 1802
1802 nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
1803
1804 /* User attempted Overflow! */ 1803 /* User attempted Overflow! */
1805 if ((uaddr + count) < uaddr) 1804 if ((uaddr + count) < uaddr)
1806 return -EINVAL; 1805 return -EINVAL;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 2f259f249522..ce63fc8312dc 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -50,10 +50,10 @@
50#include <scsi/scsi_dbg.h> 50#include <scsi/scsi_dbg.h>
51#include <scsi/scsi_device.h> 51#include <scsi/scsi_device.h>
52#include <scsi/scsi_driver.h> 52#include <scsi/scsi_driver.h>
53#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_eh.h> 54#include <scsi/scsi_eh.h>
54#include <scsi/scsi_host.h> 55#include <scsi/scsi_host.h>
55#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */ 56#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */
56#include <scsi/scsi_request.h>
57 57
58#include "scsi_logging.h" 58#include "scsi_logging.h"
59#include "sr.h" 59#include "sr.h"
@@ -199,15 +199,7 @@ int sr_media_change(struct cdrom_device_info *cdi, int slot)
199 /* check multisession offset etc */ 199 /* check multisession offset etc */
200 sr_cd_check(cdi); 200 sr_cd_check(cdi);
201 201
202 /* 202 get_sectorsize(cd);
203 * If the disk changed, the capacity will now be different,
204 * so we force a re-read of this information
205 * Force 2048 for the sector size so that filesystems won't
206 * be trying to use something that is too small if the disc
207 * has changed.
208 */
209 cd->needs_sector_size = 1;
210 cd->device->sector_size = 2048;
211 } 203 }
212 return retval; 204 return retval;
213} 205}
@@ -538,13 +530,6 @@ static int sr_open(struct cdrom_device_info *cdi, int purpose)
538 if (!scsi_block_when_processing_errors(sdev)) 530 if (!scsi_block_when_processing_errors(sdev))
539 goto error_out; 531 goto error_out;
540 532
541 /*
542 * If this device did not have media in the drive at boot time, then
543 * we would have been unable to get the sector size. Check to see if
544 * this is the case, and try again.
545 */
546 if (cd->needs_sector_size)
547 get_sectorsize(cd);
548 return 0; 533 return 0;
549 534
550error_out: 535error_out:
@@ -604,7 +589,6 @@ static int sr_probe(struct device *dev)
604 cd->driver = &sr_template; 589 cd->driver = &sr_template;
605 cd->disk = disk; 590 cd->disk = disk;
606 cd->capacity = 0x1fffff; 591 cd->capacity = 0x1fffff;
607 cd->needs_sector_size = 1;
608 cd->device->changed = 1; /* force recheck CD type */ 592 cd->device->changed = 1; /* force recheck CD type */
609 cd->use = 1; 593 cd->use = 1;
610 cd->readcd_known = 0; 594 cd->readcd_known = 0;
@@ -658,43 +642,30 @@ static void get_sectorsize(struct scsi_cd *cd)
658 unsigned char *buffer; 642 unsigned char *buffer;
659 int the_result, retries = 3; 643 int the_result, retries = 3;
660 int sector_size; 644 int sector_size;
661 struct scsi_request *SRpnt = NULL;
662 request_queue_t *queue; 645 request_queue_t *queue;
663 646
664 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 647 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
665 if (!buffer) 648 if (!buffer)
666 goto Enomem; 649 goto Enomem;
667 SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
668 if (!SRpnt)
669 goto Enomem;
670 650
671 do { 651 do {
672 cmd[0] = READ_CAPACITY; 652 cmd[0] = READ_CAPACITY;
673 memset((void *) &cmd[1], 0, 9); 653 memset((void *) &cmd[1], 0, 9);
674 /* Mark as really busy */
675 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
676 SRpnt->sr_cmd_len = 0;
677
678 memset(buffer, 0, 8); 654 memset(buffer, 0, 8);
679 655
680 /* Do the command and wait.. */ 656 /* Do the command and wait.. */
681 SRpnt->sr_data_direction = DMA_FROM_DEVICE; 657 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
682 scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer, 658 buffer, 8, NULL, SR_TIMEOUT,
683 8, SR_TIMEOUT, MAX_RETRIES); 659 MAX_RETRIES);
684 660
685 the_result = SRpnt->sr_result;
686 retries--; 661 retries--;
687 662
688 } while (the_result && retries); 663 } while (the_result && retries);
689 664
690 665
691 scsi_release_request(SRpnt);
692 SRpnt = NULL;
693
694 if (the_result) { 666 if (the_result) {
695 cd->capacity = 0x1fffff; 667 cd->capacity = 0x1fffff;
696 sector_size = 2048; /* A guess, just in case */ 668 sector_size = 2048; /* A guess, just in case */
697 cd->needs_sector_size = 1;
698 } else { 669 } else {
699#if 0 670#if 0
700 if (cdrom_get_last_written(&cd->cdi, 671 if (cdrom_get_last_written(&cd->cdi,
@@ -727,7 +698,6 @@ static void get_sectorsize(struct scsi_cd *cd)
727 printk("%s: unsupported sector size %d.\n", 698 printk("%s: unsupported sector size %d.\n",
728 cd->cdi.name, sector_size); 699 cd->cdi.name, sector_size);
729 cd->capacity = 0; 700 cd->capacity = 0;
730 cd->needs_sector_size = 1;
731 } 701 }
732 702
733 cd->device->sector_size = sector_size; 703 cd->device->sector_size = sector_size;
@@ -736,7 +706,6 @@ static void get_sectorsize(struct scsi_cd *cd)
736 * Add this so that we have the ability to correctly gauge 706 * Add this so that we have the ability to correctly gauge
737 * what the device is capable of. 707 * what the device is capable of.
738 */ 708 */
739 cd->needs_sector_size = 0;
740 set_capacity(cd->disk, cd->capacity); 709 set_capacity(cd->disk, cd->capacity);
741 } 710 }
742 711
@@ -748,10 +717,7 @@ out:
748 717
749Enomem: 718Enomem:
750 cd->capacity = 0x1fffff; 719 cd->capacity = 0x1fffff;
751 sector_size = 2048; /* A guess, just in case */ 720 cd->device->sector_size = 2048; /* A guess, just in case */
752 cd->needs_sector_size = 1;
753 if (SRpnt)
754 scsi_release_request(SRpnt);
755 goto out; 721 goto out;
756} 722}
757 723
@@ -759,8 +725,8 @@ static void get_capabilities(struct scsi_cd *cd)
759{ 725{
760 unsigned char *buffer; 726 unsigned char *buffer;
761 struct scsi_mode_data data; 727 struct scsi_mode_data data;
762 struct scsi_request *SRpnt;
763 unsigned char cmd[MAX_COMMAND_SIZE]; 728 unsigned char cmd[MAX_COMMAND_SIZE];
729 struct scsi_sense_hdr sshdr;
764 unsigned int the_result; 730 unsigned int the_result;
765 int retries, rc, n; 731 int retries, rc, n;
766 732
@@ -776,19 +742,11 @@ static void get_capabilities(struct scsi_cd *cd)
776 "" 742 ""
777 }; 743 };
778 744
779 /* allocate a request for the TEST_UNIT_READY */
780 SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
781 if (!SRpnt) {
782 printk(KERN_WARNING "(get_capabilities:) Request allocation "
783 "failure.\n");
784 return;
785 }
786 745
787 /* allocate transfer buffer */ 746 /* allocate transfer buffer */
788 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 747 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
789 if (!buffer) { 748 if (!buffer) {
790 printk(KERN_ERR "sr: out of memory.\n"); 749 printk(KERN_ERR "sr: out of memory.\n");
791 scsi_release_request(SRpnt);
792 return; 750 return;
793 } 751 }
794 752
@@ -800,24 +758,19 @@ static void get_capabilities(struct scsi_cd *cd)
800 memset((void *)cmd, 0, MAX_COMMAND_SIZE); 758 memset((void *)cmd, 0, MAX_COMMAND_SIZE);
801 cmd[0] = TEST_UNIT_READY; 759 cmd[0] = TEST_UNIT_READY;
802 760
803 SRpnt->sr_cmd_len = 0; 761 the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
804 SRpnt->sr_sense_buffer[0] = 0; 762 0, &sshdr, SR_TIMEOUT,
805 SRpnt->sr_sense_buffer[2] = 0; 763 MAX_RETRIES);
806 SRpnt->sr_data_direction = DMA_NONE;
807
808 scsi_wait_req (SRpnt, (void *) cmd, buffer,
809 0, SR_TIMEOUT, MAX_RETRIES);
810 764
811 the_result = SRpnt->sr_result;
812 retries++; 765 retries++;
813 } while (retries < 5 && 766 } while (retries < 5 &&
814 (!scsi_status_is_good(the_result) || 767 (!scsi_status_is_good(the_result) ||
815 ((driver_byte(the_result) & DRIVER_SENSE) && 768 (scsi_sense_valid(&sshdr) &&
816 SRpnt->sr_sense_buffer[2] == UNIT_ATTENTION))); 769 sshdr.sense_key == UNIT_ATTENTION)));
817 770
818 /* ask for mode page 0x2a */ 771 /* ask for mode page 0x2a */
819 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 772 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
820 SR_TIMEOUT, 3, &data); 773 SR_TIMEOUT, 3, &data, NULL);
821 774
822 if (!scsi_status_is_good(rc)) { 775 if (!scsi_status_is_good(rc)) {
823 /* failed, drive doesn't have capabilities mode page */ 776 /* failed, drive doesn't have capabilities mode page */
@@ -825,7 +778,6 @@ static void get_capabilities(struct scsi_cd *cd)
825 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 778 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
826 CDC_DVD | CDC_DVD_RAM | 779 CDC_DVD | CDC_DVD_RAM |
827 CDC_SELECT_DISC | CDC_SELECT_SPEED); 780 CDC_SELECT_DISC | CDC_SELECT_SPEED);
828 scsi_release_request(SRpnt);
829 kfree(buffer); 781 kfree(buffer);
830 printk("%s: scsi-1 drive\n", cd->cdi.name); 782 printk("%s: scsi-1 drive\n", cd->cdi.name);
831 return; 783 return;
@@ -885,7 +837,6 @@ static void get_capabilities(struct scsi_cd *cd)
885 cd->device->writeable = 1; 837 cd->device->writeable = 1;
886 } 838 }
887 839
888 scsi_release_request(SRpnt);
889 kfree(buffer); 840 kfree(buffer);
890} 841}
891 842
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 0b3178007203..d2bcd99c272f 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -33,7 +33,6 @@ typedef struct scsi_cd {
33 struct scsi_device *device; 33 struct scsi_device *device;
34 unsigned int vendor; /* vendor code, see sr_vendor.c */ 34 unsigned int vendor; /* vendor code, see sr_vendor.c */
35 unsigned long ms_offset; /* for reading multisession-CD's */ 35 unsigned long ms_offset; /* for reading multisession-CD's */
36 unsigned needs_sector_size:1; /* needs to get sector size */
37 unsigned use:1; /* is this device still supportable */ 36 unsigned use:1; /* is this device still supportable */
38 unsigned xa_flag:1; /* CD has XA sectors ? */ 37 unsigned xa_flag:1; /* CD has XA sectors ? */
39 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ 38 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 82d68fdb1548..6e45ac3c43c5 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -17,7 +17,7 @@
17#include <scsi/scsi_eh.h> 17#include <scsi/scsi_eh.h>
18#include <scsi/scsi_host.h> 18#include <scsi/scsi_host.h>
19#include <scsi/scsi_ioctl.h> 19#include <scsi/scsi_ioctl.h>
20#include <scsi/scsi_request.h> 20#include <scsi/scsi_cmnd.h>
21 21
22#include "sr.h" 22#include "sr.h"
23 23
@@ -84,41 +84,37 @@ static int sr_fake_playtrkind(struct cdrom_device_info *cdi, struct cdrom_ti *ti
84 84
85int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) 85int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
86{ 86{
87 struct scsi_request *SRpnt;
88 struct scsi_device *SDev; 87 struct scsi_device *SDev;
89 struct request *req; 88 struct scsi_sense_hdr sshdr;
90 int result, err = 0, retries = 0; 89 int result, err = 0, retries = 0;
90 struct request_sense *sense = cgc->sense;
91 91
92 SDev = cd->device; 92 SDev = cd->device;
93 SRpnt = scsi_allocate_request(SDev, GFP_KERNEL); 93
94 if (!SRpnt) { 94 if (!sense) {
95 printk(KERN_ERR "Unable to allocate SCSI request in sr_do_ioctl"); 95 sense = kmalloc(sizeof(*sense), GFP_KERNEL);
96 err = -ENOMEM; 96 if (!sense) {
97 goto out; 97 err = -ENOMEM;
98 } 98 goto out;
99 SRpnt->sr_data_direction = cgc->data_direction; 99 }
100 }
100 101
101 retry: 102 retry:
102 if (!scsi_block_when_processing_errors(SDev)) { 103 if (!scsi_block_when_processing_errors(SDev)) {
103 err = -ENODEV; 104 err = -ENODEV;
104 goto out_free; 105 goto out;
105 } 106 }
106 107
107 scsi_wait_req(SRpnt, cgc->cmd, cgc->buffer, cgc->buflen, 108 memset(sense, 0, sizeof(*sense));
108 cgc->timeout, IOCTL_RETRIES); 109 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
109 110 cgc->buffer, cgc->buflen, (char *)sense,
110 req = SRpnt->sr_request; 111 cgc->timeout, IOCTL_RETRIES, 0);
111 if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
112 memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
113 kfree(SRpnt->sr_buffer);
114 SRpnt->sr_buffer = req->buffer;
115 }
116 112
117 result = SRpnt->sr_result; 113 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
118 114
119 /* Minimal error checking. Ignore cases we know about, and report the rest. */ 115 /* Minimal error checking. Ignore cases we know about, and report the rest. */
120 if (driver_byte(result) != 0) { 116 if (driver_byte(result) != 0) {
121 switch (SRpnt->sr_sense_buffer[2] & 0xf) { 117 switch (sshdr.sense_key) {
122 case UNIT_ATTENTION: 118 case UNIT_ATTENTION:
123 SDev->changed = 1; 119 SDev->changed = 1;
124 if (!cgc->quiet) 120 if (!cgc->quiet)
@@ -128,8 +124,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
128 err = -ENOMEDIUM; 124 err = -ENOMEDIUM;
129 break; 125 break;
130 case NOT_READY: /* This happens if there is no disc in drive */ 126 case NOT_READY: /* This happens if there is no disc in drive */
131 if (SRpnt->sr_sense_buffer[12] == 0x04 && 127 if (sshdr.asc == 0x04 &&
132 SRpnt->sr_sense_buffer[13] == 0x01) { 128 sshdr.ascq == 0x01) {
133 /* sense: Logical unit is in process of becoming ready */ 129 /* sense: Logical unit is in process of becoming ready */
134 if (!cgc->quiet) 130 if (!cgc->quiet)
135 printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name); 131 printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name);
@@ -146,37 +142,33 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
146 if (!cgc->quiet) 142 if (!cgc->quiet)
147 printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name); 143 printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name);
148#ifdef DEBUG 144#ifdef DEBUG
149 scsi_print_req_sense("sr", SRpnt); 145 scsi_print_sense_hdr("sr", &sshdr);
150#endif 146#endif
151 err = -ENOMEDIUM; 147 err = -ENOMEDIUM;
152 break; 148 break;
153 case ILLEGAL_REQUEST: 149 case ILLEGAL_REQUEST:
154 err = -EIO; 150 err = -EIO;
155 if (SRpnt->sr_sense_buffer[12] == 0x20 && 151 if (sshdr.asc == 0x20 &&
156 SRpnt->sr_sense_buffer[13] == 0x00) 152 sshdr.ascq == 0x00)
157 /* sense: Invalid command operation code */ 153 /* sense: Invalid command operation code */
158 err = -EDRIVE_CANT_DO_THIS; 154 err = -EDRIVE_CANT_DO_THIS;
159#ifdef DEBUG 155#ifdef DEBUG
160 __scsi_print_command(cgc->cmd); 156 __scsi_print_command(cgc->cmd);
161 scsi_print_req_sense("sr", SRpnt); 157 scsi_print_sense_hdr("sr", &sshdr);
162#endif 158#endif
163 break; 159 break;
164 default: 160 default:
165 printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name); 161 printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name);
166 __scsi_print_command(cgc->cmd); 162 __scsi_print_command(cgc->cmd);
167 scsi_print_req_sense("sr", SRpnt); 163 scsi_print_sense_hdr("sr", &sshdr);
168 err = -EIO; 164 err = -EIO;
169 } 165 }
170 } 166 }
171 167
172 if (cgc->sense)
173 memcpy(cgc->sense, SRpnt->sr_sense_buffer, sizeof(*cgc->sense));
174
175 /* Wake up a process waiting for device */ 168 /* Wake up a process waiting for device */
176 out_free:
177 scsi_release_request(SRpnt);
178 SRpnt = NULL;
179 out: 169 out:
170 if (!cgc->sense)
171 kfree(sense);
180 cgc->stat = err; 172 cgc->stat = err;
181 return err; 173 return err;
182} 174}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 0a7839db5752..a93308ae9736 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static char *verstr = "20050501"; 20static char *verstr = "20050830";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -219,6 +219,12 @@ static int switch_partition(struct scsi_tape *);
219 219
220static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long); 220static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
221 221
222static void scsi_tape_release(struct kref *);
223
224#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
225
226static DECLARE_MUTEX(st_ref_sem);
227
222 228
223#include "osst_detect.h" 229#include "osst_detect.h"
224#ifndef SIGS_FROM_OSST 230#ifndef SIGS_FROM_OSST
@@ -230,6 +236,46 @@ static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
230 {"OnStream", "FW-", "", "osst"} 236 {"OnStream", "FW-", "", "osst"}
231#endif 237#endif
232 238
239static struct scsi_tape *scsi_tape_get(int dev)
240{
241 struct scsi_tape *STp = NULL;
242
243 down(&st_ref_sem);
244 write_lock(&st_dev_arr_lock);
245
246 if (dev < st_dev_max && scsi_tapes != NULL)
247 STp = scsi_tapes[dev];
248 if (!STp) goto out;
249
250 kref_get(&STp->kref);
251
252 if (!STp->device)
253 goto out_put;
254
255 if (scsi_device_get(STp->device))
256 goto out_put;
257
258 goto out;
259
260out_put:
261 kref_put(&STp->kref, scsi_tape_release);
262 STp = NULL;
263out:
264 write_unlock(&st_dev_arr_lock);
265 up(&st_ref_sem);
266 return STp;
267}
268
269static void scsi_tape_put(struct scsi_tape *STp)
270{
271 struct scsi_device *sdev = STp->device;
272
273 down(&st_ref_sem);
274 kref_put(&STp->kref, scsi_tape_release);
275 scsi_device_put(sdev);
276 up(&st_ref_sem);
277}
278
233struct st_reject_data { 279struct st_reject_data {
234 char *vendor; 280 char *vendor;
235 char *model; 281 char *model;
@@ -311,7 +357,7 @@ static int st_chk_result(struct scsi_tape *STp, struct scsi_request * SRpnt)
311 return 0; 357 return 0;
312 358
313 cmdstatp = &STp->buffer->cmdstat; 359 cmdstatp = &STp->buffer->cmdstat;
314 st_analyze_sense(STp->buffer->last_SRpnt, cmdstatp); 360 st_analyze_sense(SRpnt, cmdstatp);
315 361
316 if (cmdstatp->have_sense) 362 if (cmdstatp->have_sense)
317 scode = STp->buffer->cmdstat.sense_hdr.sense_key; 363 scode = STp->buffer->cmdstat.sense_hdr.sense_key;
@@ -399,10 +445,10 @@ static void st_sleep_done(struct scsi_cmnd * SCpnt)
399 445
400 (STp->buffer)->cmdstat.midlevel_result = SCpnt->result; 446 (STp->buffer)->cmdstat.midlevel_result = SCpnt->result;
401 SCpnt->request->rq_status = RQ_SCSI_DONE; 447 SCpnt->request->rq_status = RQ_SCSI_DONE;
402 (STp->buffer)->last_SRpnt = SCpnt->sc_request;
403 DEB( STp->write_pending = 0; ) 448 DEB( STp->write_pending = 0; )
404 449
405 complete(SCpnt->request->waiting); 450 if (SCpnt->request->waiting)
451 complete(SCpnt->request->waiting);
406} 452}
407 453
408/* Do the scsi command. Waits until command performed if do_wait is true. 454/* Do the scsi command. Waits until command performed if do_wait is true.
@@ -412,8 +458,20 @@ static struct scsi_request *
412st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd, 458st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
413 int bytes, int direction, int timeout, int retries, int do_wait) 459 int bytes, int direction, int timeout, int retries, int do_wait)
414{ 460{
461 struct completion *waiting;
415 unsigned char *bp; 462 unsigned char *bp;
416 463
464 /* if async, make sure there's no command outstanding */
465 if (!do_wait && ((STp->buffer)->last_SRpnt)) {
466 printk(KERN_ERR "%s: Async command already active.\n",
467 tape_name(STp));
468 if (signal_pending(current))
469 (STp->buffer)->syscall_result = (-EINTR);
470 else
471 (STp->buffer)->syscall_result = (-EBUSY);
472 return NULL;
473 }
474
417 if (SRpnt == NULL) { 475 if (SRpnt == NULL) {
418 SRpnt = scsi_allocate_request(STp->device, GFP_ATOMIC); 476 SRpnt = scsi_allocate_request(STp->device, GFP_ATOMIC);
419 if (SRpnt == NULL) { 477 if (SRpnt == NULL) {
@@ -427,7 +485,13 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
427 } 485 }
428 } 486 }
429 487
430 init_completion(&STp->wait); 488 /* If async IO, set last_SRpnt. This ptr tells write_behind_check
489 which IO is outstanding. It's nulled out when the IO completes. */
490 if (!do_wait)
491 (STp->buffer)->last_SRpnt = SRpnt;
492
493 waiting = &STp->wait;
494 init_completion(waiting);
431 SRpnt->sr_use_sg = STp->buffer->do_dio || (bytes > (STp->buffer)->frp[0].length); 495 SRpnt->sr_use_sg = STp->buffer->do_dio || (bytes > (STp->buffer)->frp[0].length);
432 if (SRpnt->sr_use_sg) { 496 if (SRpnt->sr_use_sg) {
433 if (!STp->buffer->do_dio) 497 if (!STp->buffer->do_dio)
@@ -438,17 +502,20 @@ st_do_scsi(struct scsi_request * SRpnt, struct scsi_tape * STp, unsigned char *c
438 bp = (STp->buffer)->b_data; 502 bp = (STp->buffer)->b_data;
439 SRpnt->sr_data_direction = direction; 503 SRpnt->sr_data_direction = direction;
440 SRpnt->sr_cmd_len = 0; 504 SRpnt->sr_cmd_len = 0;
441 SRpnt->sr_request->waiting = &(STp->wait); 505 SRpnt->sr_request->waiting = waiting;
442 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY; 506 SRpnt->sr_request->rq_status = RQ_SCSI_BUSY;
443 SRpnt->sr_request->rq_disk = STp->disk; 507 SRpnt->sr_request->rq_disk = STp->disk;
508 SRpnt->sr_request->end_io = blk_end_sync_rq;
444 STp->buffer->cmdstat.have_sense = 0; 509 STp->buffer->cmdstat.have_sense = 0;
445 510
446 scsi_do_req(SRpnt, (void *) cmd, bp, bytes, 511 scsi_do_req(SRpnt, (void *) cmd, bp, bytes,
447 st_sleep_done, timeout, retries); 512 st_sleep_done, timeout, retries);
448 513
449 if (do_wait) { 514 if (do_wait) {
450 wait_for_completion(SRpnt->sr_request->waiting); 515 wait_for_completion(waiting);
451 SRpnt->sr_request->waiting = NULL; 516 SRpnt->sr_request->waiting = NULL;
517 if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
518 SRpnt->sr_result |= (DRIVER_ERROR << 24);
452 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 519 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
453 } 520 }
454 return SRpnt; 521 return SRpnt;
@@ -465,6 +532,7 @@ static int write_behind_check(struct scsi_tape * STp)
465 struct st_buffer *STbuffer; 532 struct st_buffer *STbuffer;
466 struct st_partstat *STps; 533 struct st_partstat *STps;
467 struct st_cmdstatus *cmdstatp; 534 struct st_cmdstatus *cmdstatp;
535 struct scsi_request *SRpnt;
468 536
469 STbuffer = STp->buffer; 537 STbuffer = STp->buffer;
470 if (!STbuffer->writing) 538 if (!STbuffer->writing)
@@ -478,10 +546,14 @@ static int write_behind_check(struct scsi_tape * STp)
478 ) /* end DEB */ 546 ) /* end DEB */
479 547
480 wait_for_completion(&(STp->wait)); 548 wait_for_completion(&(STp->wait));
481 (STp->buffer)->last_SRpnt->sr_request->waiting = NULL; 549 SRpnt = STbuffer->last_SRpnt;
550 STbuffer->last_SRpnt = NULL;
551 SRpnt->sr_request->waiting = NULL;
552 if (SRpnt->sr_request->rq_status != RQ_SCSI_DONE)
553 SRpnt->sr_result |= (DRIVER_ERROR << 24);
482 554
483 (STp->buffer)->syscall_result = st_chk_result(STp, (STp->buffer)->last_SRpnt); 555 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
484 scsi_release_request((STp->buffer)->last_SRpnt); 556 scsi_release_request(SRpnt);
485 557
486 STbuffer->buffer_bytes -= STbuffer->writing; 558 STbuffer->buffer_bytes -= STbuffer->writing;
487 STps = &(STp->ps[STp->partition]); 559 STps = &(STp->ps[STp->partition]);
@@ -1055,25 +1127,20 @@ static int st_open(struct inode *inode, struct file *filp)
1055 */ 1127 */
1056 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); 1128 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
1057 1129
1130 if (!(STp = scsi_tape_get(dev)))
1131 return -ENXIO;
1132
1058 write_lock(&st_dev_arr_lock); 1133 write_lock(&st_dev_arr_lock);
1059 if (dev >= st_dev_max || scsi_tapes == NULL ||
1060 ((STp = scsi_tapes[dev]) == NULL)) {
1061 write_unlock(&st_dev_arr_lock);
1062 return (-ENXIO);
1063 }
1064 filp->private_data = STp; 1134 filp->private_data = STp;
1065 name = tape_name(STp); 1135 name = tape_name(STp);
1066 1136
1067 if (STp->in_use) { 1137 if (STp->in_use) {
1068 write_unlock(&st_dev_arr_lock); 1138 write_unlock(&st_dev_arr_lock);
1139 scsi_tape_put(STp);
1069 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) 1140 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
1070 return (-EBUSY); 1141 return (-EBUSY);
1071 } 1142 }
1072 1143
1073 if(scsi_device_get(STp->device)) {
1074 write_unlock(&st_dev_arr_lock);
1075 return (-ENXIO);
1076 }
1077 STp->in_use = 1; 1144 STp->in_use = 1;
1078 write_unlock(&st_dev_arr_lock); 1145 write_unlock(&st_dev_arr_lock);
1079 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; 1146 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
@@ -1118,7 +1185,7 @@ static int st_open(struct inode *inode, struct file *filp)
1118 err_out: 1185 err_out:
1119 normalize_buffer(STp->buffer); 1186 normalize_buffer(STp->buffer);
1120 STp->in_use = 0; 1187 STp->in_use = 0;
1121 scsi_device_put(STp->device); 1188 scsi_tape_put(STp);
1122 return retval; 1189 return retval;
1123 1190
1124} 1191}
@@ -1250,7 +1317,7 @@ static int st_release(struct inode *inode, struct file *filp)
1250 write_lock(&st_dev_arr_lock); 1317 write_lock(&st_dev_arr_lock);
1251 STp->in_use = 0; 1318 STp->in_use = 0;
1252 write_unlock(&st_dev_arr_lock); 1319 write_unlock(&st_dev_arr_lock);
1253 scsi_device_put(STp->device); 1320 scsi_tape_put(STp);
1254 1321
1255 return result; 1322 return result;
1256} 1323}
@@ -3887,6 +3954,7 @@ static int st_probe(struct device *dev)
3887 goto out_put_disk; 3954 goto out_put_disk;
3888 } 3955 }
3889 memset(tpnt, 0, sizeof(struct scsi_tape)); 3956 memset(tpnt, 0, sizeof(struct scsi_tape));
3957 kref_init(&tpnt->kref);
3890 tpnt->disk = disk; 3958 tpnt->disk = disk;
3891 sprintf(disk->disk_name, "st%d", i); 3959 sprintf(disk->disk_name, "st%d", i);
3892 disk->private_data = &tpnt->driver; 3960 disk->private_data = &tpnt->driver;
@@ -3902,6 +3970,7 @@ static int st_probe(struct device *dev)
3902 tpnt->tape_type = MT_ISSCSI2; 3970 tpnt->tape_type = MT_ISSCSI2;
3903 3971
3904 tpnt->buffer = buffer; 3972 tpnt->buffer = buffer;
3973 tpnt->buffer->last_SRpnt = NULL;
3905 3974
3906 tpnt->inited = 0; 3975 tpnt->inited = 0;
3907 tpnt->dirty = 0; 3976 tpnt->dirty = 0;
@@ -4076,15 +4145,10 @@ static int st_remove(struct device *dev)
4076 tpnt->modes[mode].cdevs[j] = NULL; 4145 tpnt->modes[mode].cdevs[j] = NULL;
4077 } 4146 }
4078 } 4147 }
4079 tpnt->device = NULL;
4080 4148
4081 if (tpnt->buffer) { 4149 down(&st_ref_sem);
4082 tpnt->buffer->orig_frp_segs = 0; 4150 kref_put(&tpnt->kref, scsi_tape_release);
4083 normalize_buffer(tpnt->buffer); 4151 up(&st_ref_sem);
4084 kfree(tpnt->buffer);
4085 }
4086 put_disk(tpnt->disk);
4087 kfree(tpnt);
4088 return 0; 4152 return 0;
4089 } 4153 }
4090 } 4154 }
@@ -4093,6 +4157,34 @@ static int st_remove(struct device *dev)
4093 return 0; 4157 return 0;
4094} 4158}
4095 4159
4160/**
4161 * scsi_tape_release - Called to free the Scsi_Tape structure
4162 * @kref: pointer to embedded kref
4163 *
4164 * st_ref_sem must be held entering this routine. Because it is
4165 * called on last put, you should always use the scsi_tape_get()
4166 * scsi_tape_put() helpers which manipulate the semaphore directly
4167 * and never do a direct kref_put().
4168 **/
4169static void scsi_tape_release(struct kref *kref)
4170{
4171 struct scsi_tape *tpnt = to_scsi_tape(kref);
4172 struct gendisk *disk = tpnt->disk;
4173
4174 tpnt->device = NULL;
4175
4176 if (tpnt->buffer) {
4177 tpnt->buffer->orig_frp_segs = 0;
4178 normalize_buffer(tpnt->buffer);
4179 kfree(tpnt->buffer);
4180 }
4181
4182 disk->private_data = NULL;
4183 put_disk(disk);
4184 kfree(tpnt);
4185 return;
4186}
4187
4096static void st_intr(struct scsi_cmnd *SCpnt) 4188static void st_intr(struct scsi_cmnd *SCpnt)
4097{ 4189{
4098 scsi_io_completion(SCpnt, (SCpnt->result ? 0: SCpnt->bufflen), 1); 4190 scsi_io_completion(SCpnt, (SCpnt->result ? 0: SCpnt->bufflen), 1);
@@ -4348,12 +4440,12 @@ static int st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pag
4348static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 4440static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
4349 unsigned long uaddr, size_t count, int rw) 4441 unsigned long uaddr, size_t count, int rw)
4350{ 4442{
4443 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
4444 unsigned long start = uaddr >> PAGE_SHIFT;
4445 const int nr_pages = end - start;
4351 int res, i, j; 4446 int res, i, j;
4352 unsigned int nr_pages;
4353 struct page **pages; 4447 struct page **pages;
4354 4448
4355 nr_pages = ((uaddr & ~PAGE_MASK) + count + ~PAGE_MASK) >> PAGE_SHIFT;
4356
4357 /* User attempted Overflow! */ 4449 /* User attempted Overflow! */
4358 if ((uaddr + count) < uaddr) 4450 if ((uaddr + count) < uaddr)
4359 return -EINVAL; 4451 return -EINVAL;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 061da111398e..790acac160bc 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -3,7 +3,7 @@
3#define _ST_H 3#define _ST_H
4 4
5#include <linux/completion.h> 5#include <linux/completion.h>
6 6#include <linux/kref.h>
7 7
8/* Descriptor for analyzed sense data */ 8/* Descriptor for analyzed sense data */
9struct st_cmdstatus { 9struct st_cmdstatus {
@@ -156,6 +156,7 @@ struct scsi_tape {
156 unsigned char last_sense[16]; 156 unsigned char last_sense[16];
157#endif 157#endif
158 struct gendisk *disk; 158 struct gendisk *disk;
159 struct kref kref;
159}; 160};
160 161
161/* Bit masks for use_pf */ 162/* Bit masks for use_pf */
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 0b10169961eb..aec39fb261ca 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -58,8 +58,7 @@ static const char serial21285_name[] = "Footbridge UART";
58 * int((BAUD_BASE - (baud >> 1)) / baud) 58 * int((BAUD_BASE - (baud >> 1)) / baud)
59 */ 59 */
60 60
61static void 61static void serial21285_stop_tx(struct uart_port *port)
62serial21285_stop_tx(struct uart_port *port, unsigned int tty_stop)
63{ 62{
64 if (tx_enabled(port)) { 63 if (tx_enabled(port)) {
65 disable_irq(IRQ_CONTX); 64 disable_irq(IRQ_CONTX);
@@ -67,8 +66,7 @@ serial21285_stop_tx(struct uart_port *port, unsigned int tty_stop)
67 } 66 }
68} 67}
69 68
70static void 69static void serial21285_start_tx(struct uart_port *port)
71serial21285_start_tx(struct uart_port *port, unsigned int tty_start)
72{ 70{
73 if (!tx_enabled(port)) { 71 if (!tx_enabled(port)) {
74 enable_irq(IRQ_CONTX); 72 enable_irq(IRQ_CONTX);
@@ -148,7 +146,7 @@ static irqreturn_t serial21285_tx_chars(int irq, void *dev_id, struct pt_regs *r
148 goto out; 146 goto out;
149 } 147 }
150 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 148 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
151 serial21285_stop_tx(port, 0); 149 serial21285_stop_tx(port);
152 goto out; 150 goto out;
153 } 151 }
154 152
@@ -164,7 +162,7 @@ static irqreturn_t serial21285_tx_chars(int irq, void *dev_id, struct pt_regs *r
164 uart_write_wakeup(port); 162 uart_write_wakeup(port);
165 163
166 if (uart_circ_empty(xmit)) 164 if (uart_circ_empty(xmit))
167 serial21285_stop_tx(port, 0); 165 serial21285_stop_tx(port);
168 166
169 out: 167 out:
170 return IRQ_HANDLED; 168 return IRQ_HANDLED;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 9097f2f7b12a..2efb317153ce 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -40,7 +40,6 @@
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <asm/system.h> 42#include <asm/system.h>
43#include <asm/segment.h>
44#include <asm/delay.h> 43#include <asm/delay.h>
45#include <asm/uaccess.h> 44#include <asm/uaccess.h>
46 45
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index b116122e569a..170c9d2a749c 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -2474,8 +2474,7 @@ static struct tty_operations rs_360_ops = {
2474 .tiocmset = rs_360_tiocmset, 2474 .tiocmset = rs_360_tiocmset,
2475}; 2475};
2476 2476
2477/* int __init rs_360_init(void) */ 2477static int __init rs_360_init(void)
2478int rs_360_init(void)
2479{ 2478{
2480 struct serial_state * state; 2479 struct serial_state * state;
2481 ser_info_t *info; 2480 ser_info_t *info;
@@ -2827,10 +2826,7 @@ int rs_360_init(void)
2827 2826
2828 return 0; 2827 return 0;
2829} 2828}
2830 2829module_init(rs_360_init);
2831
2832
2833
2834 2830
2835/* This must always be called before the rs_360_init() function, otherwise 2831/* This must always be called before the rs_360_init() function, otherwise
2836 * it blows away the port control information. 2832 * it blows away the port control information.
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 7e8fc7c1d4cc..30a0a3d10145 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1001,7 +1001,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
1001 } 1001 }
1002} 1002}
1003 1003
1004static void serial8250_stop_tx(struct uart_port *port, unsigned int tty_stop) 1004static void serial8250_stop_tx(struct uart_port *port)
1005{ 1005{
1006 struct uart_8250_port *up = (struct uart_8250_port *)port; 1006 struct uart_8250_port *up = (struct uart_8250_port *)port;
1007 1007
@@ -1018,7 +1018,7 @@ static void serial8250_stop_tx(struct uart_port *port, unsigned int tty_stop)
1018 1018
1019static void transmit_chars(struct uart_8250_port *up); 1019static void transmit_chars(struct uart_8250_port *up);
1020 1020
1021static void serial8250_start_tx(struct uart_port *port, unsigned int tty_start) 1021static void serial8250_start_tx(struct uart_port *port)
1022{ 1022{
1023 struct uart_8250_port *up = (struct uart_8250_port *)port; 1023 struct uart_8250_port *up = (struct uart_8250_port *)port;
1024 1024
@@ -1158,7 +1158,11 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up)
1158 up->port.x_char = 0; 1158 up->port.x_char = 0;
1159 return; 1159 return;
1160 } 1160 }
1161 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 1161 if (uart_tx_stopped(&up->port)) {
1162 serial8250_stop_tx(&up->port);
1163 return;
1164 }
1165 if (uart_circ_empty(xmit)) {
1162 __stop_tx(up); 1166 __stop_tx(up);
1163 return; 1167 return;
1164 } 1168 }
@@ -2586,82 +2590,3 @@ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
2586MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 2590MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
2587#endif 2591#endif
2588MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); 2592MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
2589
2590/**
2591 * register_serial - configure a 16x50 serial port at runtime
2592 * @req: request structure
2593 *
2594 * Configure the serial port specified by the request. If the
2595 * port exists and is in use an error is returned. If the port
2596 * is not currently in the table it is added.
2597 *
2598 * The port is then probed and if necessary the IRQ is autodetected
2599 * If this fails an error is returned.
2600 *
2601 * On success the port is ready to use and the line number is returned.
2602 *
2603 * Note: this function is deprecated - use serial8250_register_port
2604 * instead.
2605 */
2606int register_serial(struct serial_struct *req)
2607{
2608 struct uart_port port;
2609
2610 port.iobase = req->port;
2611 port.membase = req->iomem_base;
2612 port.irq = req->irq;
2613 port.uartclk = req->baud_base * 16;
2614 port.fifosize = req->xmit_fifo_size;
2615 port.regshift = req->iomem_reg_shift;
2616 port.iotype = req->io_type;
2617 port.flags = req->flags | UPF_BOOT_AUTOCONF;
2618 port.mapbase = req->iomap_base;
2619 port.dev = NULL;
2620
2621 if (share_irqs)
2622 port.flags |= UPF_SHARE_IRQ;
2623
2624 if (HIGH_BITS_OFFSET)
2625 port.iobase |= (long) req->port_high << HIGH_BITS_OFFSET;
2626
2627 /*
2628 * If a clock rate wasn't specified by the low level driver, then
2629 * default to the standard clock rate. This should be 115200 (*16)
2630 * and should not depend on the architecture's BASE_BAUD definition.
2631 * However, since this API will be deprecated, it's probably a
2632 * better idea to convert the drivers to use the new API
2633 * (serial8250_register_port and serial8250_unregister_port).
2634 */
2635 if (port.uartclk == 0) {
2636 printk(KERN_WARNING
2637 "Serial: registering port at [%08x,%08lx,%p] irq %d with zero baud_base\n",
2638 port.iobase, port.mapbase, port.membase, port.irq);
2639 printk(KERN_WARNING "Serial: see %s:%d for more information\n",
2640 __FILE__, __LINE__);
2641 dump_stack();
2642
2643 /*
2644 * Fix it up for now, but this is only a temporary measure.
2645 */
2646 port.uartclk = BASE_BAUD * 16;
2647 }
2648
2649 return serial8250_register_port(&port);
2650}
2651EXPORT_SYMBOL(register_serial);
2652
2653/**
2654 * unregister_serial - remove a 16x50 serial port at runtime
2655 * @line: serial line number
2656 *
2657 * Remove one serial port. This may not be called from interrupt
2658 * context. We hand the port back to our local PM control.
2659 *
2660 * Note: this function is deprecated - use serial8250_unregister_port
2661 * instead.
2662 */
2663void unregister_serial(int line)
2664{
2665 serial8250_unregister_port(line);
2666}
2667EXPORT_SYMBOL(unregister_serial);
diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
index 9225c82faeb8..b1b459efda52 100644
--- a/drivers/serial/8250.h
+++ b/drivers/serial/8250.h
@@ -16,11 +16,7 @@
16 */ 16 */
17 17
18#include <linux/config.h> 18#include <linux/config.h>
19 19#include <linux/serial_8250.h>
20int serial8250_register_port(struct uart_port *);
21void serial8250_unregister_port(int line);
22void serial8250_suspend_port(int line);
23void serial8250_resume_port(int line);
24 20
25struct old_serial_port { 21struct old_serial_port {
26 unsigned int uart; 22 unsigned int uart;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index d5797618a3b9..e39818a34a07 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -308,7 +308,7 @@ config SERIAL_S3C2410_CONSOLE
308 308
309config SERIAL_DZ 309config SERIAL_DZ
310 bool "DECstation DZ serial driver" 310 bool "DECstation DZ serial driver"
311 depends on MACH_DECSTATION && MIPS32 311 depends on MACH_DECSTATION && 32BIT
312 select SERIAL_CORE 312 select SERIAL_CORE
313 help 313 help
314 DZ11-family serial controllers for VAXstations, including the 314 DZ11-family serial controllers for VAXstations, including the
@@ -830,7 +830,7 @@ config SERIAL_M32R_PLDSIO
830 830
831config SERIAL_TXX9 831config SERIAL_TXX9
832 bool "TMPTX39XX/49XX SIO support" 832 bool "TMPTX39XX/49XX SIO support"
833 depends HAS_TXX9_SERIAL 833 depends HAS_TXX9_SERIAL && BROKEN
834 select SERIAL_CORE 834 select SERIAL_CORE
835 default y 835 default y
836 836
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 2884b310e54d..978e12437e61 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -105,7 +105,7 @@ struct uart_amba_port {
105 unsigned int old_status; 105 unsigned int old_status;
106}; 106};
107 107
108static void pl010_stop_tx(struct uart_port *port, unsigned int tty_stop) 108static void pl010_stop_tx(struct uart_port *port)
109{ 109{
110 unsigned int cr; 110 unsigned int cr;
111 111
@@ -114,7 +114,7 @@ static void pl010_stop_tx(struct uart_port *port, unsigned int tty_stop)
114 UART_PUT_CR(port, cr); 114 UART_PUT_CR(port, cr);
115} 115}
116 116
117static void pl010_start_tx(struct uart_port *port, unsigned int tty_start) 117static void pl010_start_tx(struct uart_port *port)
118{ 118{
119 unsigned int cr; 119 unsigned int cr;
120 120
@@ -219,7 +219,7 @@ static void pl010_tx_chars(struct uart_port *port)
219 return; 219 return;
220 } 220 }
221 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 221 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
222 pl010_stop_tx(port, 0); 222 pl010_stop_tx(port);
223 return; 223 return;
224 } 224 }
225 225
@@ -236,7 +236,7 @@ static void pl010_tx_chars(struct uart_port *port)
236 uart_write_wakeup(port); 236 uart_write_wakeup(port);
237 237
238 if (uart_circ_empty(xmit)) 238 if (uart_circ_empty(xmit))
239 pl010_stop_tx(port, 0); 239 pl010_stop_tx(port);
240} 240}
241 241
242static void pl010_modem_status(struct uart_port *port) 242static void pl010_modem_status(struct uart_port *port)
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 7db88ee18f75..56071309744c 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -74,7 +74,7 @@ struct uart_amba_port {
74 unsigned int old_status; 74 unsigned int old_status;
75}; 75};
76 76
77static void pl011_stop_tx(struct uart_port *port, unsigned int tty_stop) 77static void pl011_stop_tx(struct uart_port *port)
78{ 78{
79 struct uart_amba_port *uap = (struct uart_amba_port *)port; 79 struct uart_amba_port *uap = (struct uart_amba_port *)port;
80 80
@@ -82,7 +82,7 @@ static void pl011_stop_tx(struct uart_port *port, unsigned int tty_stop)
82 writew(uap->im, uap->port.membase + UART011_IMSC); 82 writew(uap->im, uap->port.membase + UART011_IMSC);
83} 83}
84 84
85static void pl011_start_tx(struct uart_port *port, unsigned int tty_start) 85static void pl011_start_tx(struct uart_port *port)
86{ 86{
87 struct uart_amba_port *uap = (struct uart_amba_port *)port; 87 struct uart_amba_port *uap = (struct uart_amba_port *)port;
88 88
@@ -184,7 +184,7 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
184 return; 184 return;
185 } 185 }
186 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 186 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
187 pl011_stop_tx(&uap->port, 0); 187 pl011_stop_tx(&uap->port);
188 return; 188 return;
189 } 189 }
190 190
@@ -201,7 +201,7 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
201 uart_write_wakeup(&uap->port); 201 uart_write_wakeup(&uap->port);
202 202
203 if (uart_circ_empty(xmit)) 203 if (uart_circ_empty(xmit))
204 pl011_stop_tx(&uap->port, 0); 204 pl011_stop_tx(&uap->port);
205} 205}
206 206
207static void pl011_modem_status(struct uart_amba_port *uap) 207static void pl011_modem_status(struct uart_amba_port *uap)
diff --git a/drivers/serial/au1x00_uart.c b/drivers/serial/au1x00_uart.c
index 6104aeef1243..a274ebf256a1 100644
--- a/drivers/serial/au1x00_uart.c
+++ b/drivers/serial/au1x00_uart.c
@@ -200,7 +200,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
200 DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name); 200 DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name);
201} 201}
202 202
203static void serial8250_stop_tx(struct uart_port *port, unsigned int tty_stop) 203static void serial8250_stop_tx(struct uart_port *port)
204{ 204{
205 struct uart_8250_port *up = (struct uart_8250_port *)port; 205 struct uart_8250_port *up = (struct uart_8250_port *)port;
206 206
@@ -210,7 +210,7 @@ static void serial8250_stop_tx(struct uart_port *port, unsigned int tty_stop)
210 } 210 }
211} 211}
212 212
213static void serial8250_start_tx(struct uart_port *port, unsigned int tty_start) 213static void serial8250_start_tx(struct uart_port *port)
214{ 214{
215 struct uart_8250_port *up = (struct uart_8250_port *)port; 215 struct uart_8250_port *up = (struct uart_8250_port *)port;
216 216
@@ -337,7 +337,7 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up)
337 return; 337 return;
338 } 338 }
339 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 339 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
340 serial8250_stop_tx(&up->port, 0); 340 serial8250_stop_tx(&up->port);
341 return; 341 return;
342 } 342 }
343 343
@@ -356,7 +356,7 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up)
356 DEBUG_INTR("THRE..."); 356 DEBUG_INTR("THRE...");
357 357
358 if (uart_circ_empty(xmit)) 358 if (uart_circ_empty(xmit))
359 serial8250_stop_tx(&up->port, 0); 359 serial8250_stop_tx(&up->port);
360} 360}
361 361
362static _INLINE_ void check_modem_status(struct uart_8250_port *up) 362static _INLINE_ void check_modem_status(struct uart_8250_port *up)
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index e92522b33c48..d822896b488c 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -69,8 +69,7 @@
69 69
70#define tx_enabled(port) ((port)->unused[0]) 70#define tx_enabled(port) ((port)->unused[0])
71 71
72static void 72static void clps711xuart_stop_tx(struct uart_port *port)
73clps711xuart_stop_tx(struct uart_port *port, unsigned int tty_stop)
74{ 73{
75 if (tx_enabled(port)) { 74 if (tx_enabled(port)) {
76 disable_irq(TX_IRQ(port)); 75 disable_irq(TX_IRQ(port));
@@ -78,8 +77,7 @@ clps711xuart_stop_tx(struct uart_port *port, unsigned int tty_stop)
78 } 77 }
79} 78}
80 79
81static void 80static void clps711xuart_start_tx(struct uart_port *port)
82clps711xuart_start_tx(struct uart_port *port, unsigned int tty_start)
83{ 81{
84 if (!tx_enabled(port)) { 82 if (!tx_enabled(port)) {
85 enable_irq(TX_IRQ(port)); 83 enable_irq(TX_IRQ(port));
@@ -165,7 +163,7 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id, struct pt_regs *re
165 return IRQ_HANDLED; 163 return IRQ_HANDLED;
166 } 164 }
167 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 165 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
168 clps711xuart_stop_tx(port, 0); 166 clps711xuart_stop_tx(port);
169 return IRQ_HANDLED; 167 return IRQ_HANDLED;
170 } 168 }
171 169
@@ -182,7 +180,7 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id, struct pt_regs *re
182 uart_write_wakeup(port); 180 uart_write_wakeup(port);
183 181
184 if (uart_circ_empty(xmit)) 182 if (uart_circ_empty(xmit))
185 clps711xuart_stop_tx(port, 0); 183 clps711xuart_stop_tx(port);
186 184
187 return IRQ_HANDLED; 185 return IRQ_HANDLED;
188} 186}
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index d639ac92a117..25825f2aba22 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -124,7 +124,7 @@ static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
124/* 124/*
125 * Stop transmitter 125 * Stop transmitter
126 */ 126 */
127static void cpm_uart_stop_tx(struct uart_port *port, unsigned int tty_stop) 127static void cpm_uart_stop_tx(struct uart_port *port)
128{ 128{
129 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; 129 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
130 volatile smc_t *smcp = pinfo->smcp; 130 volatile smc_t *smcp = pinfo->smcp;
@@ -141,7 +141,7 @@ static void cpm_uart_stop_tx(struct uart_port *port, unsigned int tty_stop)
141/* 141/*
142 * Start transmitter 142 * Start transmitter
143 */ 143 */
144static void cpm_uart_start_tx(struct uart_port *port, unsigned int tty_start) 144static void cpm_uart_start_tx(struct uart_port *port)
145{ 145{
146 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; 146 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
147 volatile smc_t *smcp = pinfo->smcp; 147 volatile smc_t *smcp = pinfo->smcp;
@@ -403,10 +403,8 @@ static int cpm_uart_startup(struct uart_port *port)
403 403
404inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo) 404inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo)
405{ 405{
406 unsigned long target_jiffies = jiffies + pinfo->wait_closing; 406 set_current_state(TASK_UNINTERRUPTIBLE);
407 407 schedule_timeout(pinfo->wait_closing);
408 while (!time_after(jiffies, target_jiffies))
409 schedule();
410} 408}
411 409
412/* 410/*
@@ -425,9 +423,12 @@ static void cpm_uart_shutdown(struct uart_port *port)
425 /* If the port is not the console, disable Rx and Tx. */ 423 /* If the port is not the console, disable Rx and Tx. */
426 if (!(pinfo->flags & FLAG_CONSOLE)) { 424 if (!(pinfo->flags & FLAG_CONSOLE)) {
427 /* Wait for all the BDs marked sent */ 425 /* Wait for all the BDs marked sent */
428 while(!cpm_uart_tx_empty(port)) 426 while(!cpm_uart_tx_empty(port)) {
427 set_current_state(TASK_UNINTERRUPTIBLE);
429 schedule_timeout(2); 428 schedule_timeout(2);
430 if(pinfo->wait_closing) 429 }
430
431 if (pinfo->wait_closing)
431 cpm_uart_wait_until_send(pinfo); 432 cpm_uart_wait_until_send(pinfo);
432 433
433 /* Stop uarts */ 434 /* Stop uarts */
@@ -623,7 +624,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
623 } 624 }
624 625
625 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 626 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
626 cpm_uart_stop_tx(port, 0); 627 cpm_uart_stop_tx(port);
627 return 0; 628 return 0;
628 } 629 }
629 630
@@ -656,7 +657,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
656 uart_write_wakeup(port); 657 uart_write_wakeup(port);
657 658
658 if (uart_circ_empty(xmit)) { 659 if (uart_circ_empty(xmit)) {
659 cpm_uart_stop_tx(port, 0); 660 cpm_uart_stop_tx(port);
660 return 0; 661 return 0;
661 } 662 }
662 663
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index c4c8f4b44f53..15ad58d94889 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -142,6 +142,14 @@ void scc2_lineif(struct uart_cpm_port *pinfo)
142 * be supported in a sane fashion. 142 * be supported in a sane fashion.
143 */ 143 */
144#ifndef CONFIG_STX_GP3 144#ifndef CONFIG_STX_GP3
145#ifdef CONFIG_MPC8560_ADS
146 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport;
147 io->iop_ppard |= 0x00000018;
148 io->iop_psord &= ~0x00000008; /* Rx */
149 io->iop_psord &= ~0x00000010; /* Tx */
150 io->iop_pdird &= ~0x00000008; /* Rx */
151 io->iop_pdird |= 0x00000010; /* Tx */
152#else
145 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; 153 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport;
146 io->iop_pparb |= 0x008b0000; 154 io->iop_pparb |= 0x008b0000;
147 io->iop_pdirb |= 0x00880000; 155 io->iop_pdirb |= 0x00880000;
@@ -149,6 +157,7 @@ void scc2_lineif(struct uart_cpm_port *pinfo)
149 io->iop_pdirb &= ~0x00030000; 157 io->iop_pdirb &= ~0x00030000;
150 io->iop_psorb &= ~0x00030000; 158 io->iop_psorb &= ~0x00030000;
151#endif 159#endif
160#endif
152 cpm2_immr->im_cpmux.cmx_scr &= 0xff00ffff; 161 cpm2_immr->im_cpmux.cmx_scr &= 0xff00ffff;
153 cpm2_immr->im_cpmux.cmx_scr |= 0x00090000; 162 cpm2_immr->im_cpmux.cmx_scr |= 0x00090000;
154 pinfo->brg = 2; 163 pinfo->brg = 2;
@@ -257,6 +266,7 @@ int cpm_uart_init_portdesc(void)
257 cpm_uart_ports[UART_SMC1].smcp = (smc_t *) & cpm2_immr->im_smc[0]; 266 cpm_uart_ports[UART_SMC1].smcp = (smc_t *) & cpm2_immr->im_smc[0];
258 cpm_uart_ports[UART_SMC1].smcup = 267 cpm_uart_ports[UART_SMC1].smcup =
259 (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC1]; 268 (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC1];
269 *(u16 *)(&cpm2_immr->im_dprambase[PROFF_SMC1_BASE]) = PROFF_SMC1;
260 cpm_uart_ports[UART_SMC1].port.mapbase = 270 cpm_uart_ports[UART_SMC1].port.mapbase =
261 (unsigned long)&cpm2_immr->im_smc[0]; 271 (unsigned long)&cpm2_immr->im_smc[0];
262 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); 272 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
@@ -269,6 +279,7 @@ int cpm_uart_init_portdesc(void)
269 cpm_uart_ports[UART_SMC2].smcp = (smc_t *) & cpm2_immr->im_smc[1]; 279 cpm_uart_ports[UART_SMC2].smcp = (smc_t *) & cpm2_immr->im_smc[1];
270 cpm_uart_ports[UART_SMC2].smcup = 280 cpm_uart_ports[UART_SMC2].smcup =
271 (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC2]; 281 (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC2];
282 *(u16 *)(&cpm2_immr->im_dprambase[PROFF_SMC2_BASE]) = PROFF_SMC2;
272 cpm_uart_ports[UART_SMC2].port.mapbase = 283 cpm_uart_ports[UART_SMC2].port.mapbase =
273 (unsigned long)&cpm2_immr->im_smc[1]; 284 (unsigned long)&cpm2_immr->im_smc[1];
274 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); 285 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 23b8871e74cc..40d3e7139cfe 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -446,7 +446,6 @@ static char *serial_version = "$Revision: 1.25 $";
446#include <asm/io.h> 446#include <asm/io.h>
447#include <asm/irq.h> 447#include <asm/irq.h>
448#include <asm/system.h> 448#include <asm/system.h>
449#include <asm/segment.h>
450#include <asm/bitops.h> 449#include <asm/bitops.h>
451#include <linux/delay.h> 450#include <linux/delay.h>
452 451
@@ -5041,17 +5040,3 @@ rs_init(void)
5041/* this makes sure that rs_init is called during kernel boot */ 5040/* this makes sure that rs_init is called during kernel boot */
5042 5041
5043module_init(rs_init); 5042module_init(rs_init);
5044
5045/*
5046 * register_serial and unregister_serial allows for serial ports to be
5047 * configured at run-time, to support PCMCIA modems.
5048 */
5049int
5050register_serial(struct serial_struct *req)
5051{
5052 return -1;
5053}
5054
5055void unregister_serial(int line)
5056{
5057}
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 97824eeeafae..e63b9dffc8d7 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -112,7 +112,7 @@ static inline void dz_out(struct dz_port *dport, unsigned offset,
112 * ------------------------------------------------------------ 112 * ------------------------------------------------------------
113 */ 113 */
114 114
115static void dz_stop_tx(struct uart_port *uport, unsigned int tty_stop) 115static void dz_stop_tx(struct uart_port *uport)
116{ 116{
117 struct dz_port *dport = (struct dz_port *)uport; 117 struct dz_port *dport = (struct dz_port *)uport;
118 unsigned short tmp, mask = 1 << dport->port.line; 118 unsigned short tmp, mask = 1 << dport->port.line;
@@ -125,7 +125,7 @@ static void dz_stop_tx(struct uart_port *uport, unsigned int tty_stop)
125 spin_unlock_irqrestore(&dport->port.lock, flags); 125 spin_unlock_irqrestore(&dport->port.lock, flags);
126} 126}
127 127
128static void dz_start_tx(struct uart_port *uport, unsigned int tty_start) 128static void dz_start_tx(struct uart_port *uport)
129{ 129{
130 struct dz_port *dport = (struct dz_port *)uport; 130 struct dz_port *dport = (struct dz_port *)uport;
131 unsigned short tmp, mask = 1 << dport->port.line; 131 unsigned short tmp, mask = 1 << dport->port.line;
@@ -290,7 +290,7 @@ static inline void dz_transmit_chars(struct dz_port *dport)
290 } 290 }
291 /* if nothing to do or stopped or hardware stopped */ 291 /* if nothing to do or stopped or hardware stopped */
292 if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) { 292 if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
293 dz_stop_tx(&dport->port, 0); 293 dz_stop_tx(&dport->port);
294 return; 294 return;
295 } 295 }
296 296
@@ -308,7 +308,7 @@ static inline void dz_transmit_chars(struct dz_port *dport)
308 308
309 /* Are we done */ 309 /* Are we done */
310 if (uart_circ_empty(xmit)) 310 if (uart_circ_empty(xmit))
311 dz_stop_tx(&dport->port, 0); 311 dz_stop_tx(&dport->port);
312} 312}
313 313
314/* 314/*
@@ -440,7 +440,7 @@ static int dz_startup(struct uart_port *uport)
440 */ 440 */
441static void dz_shutdown(struct uart_port *uport) 441static void dz_shutdown(struct uart_port *uport)
442{ 442{
443 dz_stop_tx(uport, 0); 443 dz_stop_tx(uport);
444} 444}
445 445
446/* 446/*
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index c112b32764e8..eb31125c6a30 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -56,7 +56,6 @@
56#include <linux/bitops.h> 56#include <linux/bitops.h>
57 57
58#include <asm/system.h> 58#include <asm/system.h>
59#include <asm/segment.h>
60#include <asm/io.h> 59#include <asm/io.h>
61#include <asm/irq.h> 60#include <asm/irq.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
@@ -989,18 +988,16 @@ static unsigned int icom_get_mctrl(struct uart_port *port)
989 return result; 988 return result;
990} 989}
991 990
992static void icom_stop_tx(struct uart_port *port, unsigned int tty_stop) 991static void icom_stop_tx(struct uart_port *port)
993{ 992{
994 unsigned char cmdReg; 993 unsigned char cmdReg;
995 994
996 if (tty_stop) { 995 trace(ICOM_PORT, "STOP", 0);
997 trace(ICOM_PORT, "STOP", 0); 996 cmdReg = readb(&ICOM_PORT->dram->CmdReg);
998 cmdReg = readb(&ICOM_PORT->dram->CmdReg); 997 writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
999 writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
1000 }
1001} 998}
1002 999
1003static void icom_start_tx(struct uart_port *port, unsigned int tty_start) 1000static void icom_start_tx(struct uart_port *port)
1004{ 1001{
1005 unsigned char cmdReg; 1002 unsigned char cmdReg;
1006 1003
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 01a8726a3f97..4c985e6b3784 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -124,7 +124,7 @@ static void imx_timeout(unsigned long data)
124/* 124/*
125 * interrupts disabled on entry 125 * interrupts disabled on entry
126 */ 126 */
127static void imx_stop_tx(struct uart_port *port, unsigned int tty_stop) 127static void imx_stop_tx(struct uart_port *port)
128{ 128{
129 struct imx_port *sport = (struct imx_port *)port; 129 struct imx_port *sport = (struct imx_port *)port;
130 UCR1((u32)sport->port.membase) &= ~UCR1_TXMPTYEN; 130 UCR1((u32)sport->port.membase) &= ~UCR1_TXMPTYEN;
@@ -165,13 +165,13 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
165 } while (!(UTS((u32)sport->port.membase) & UTS_TXFULL)); 165 } while (!(UTS((u32)sport->port.membase) & UTS_TXFULL));
166 166
167 if (uart_circ_empty(xmit)) 167 if (uart_circ_empty(xmit))
168 imx_stop_tx(&sport->port, 0); 168 imx_stop_tx(&sport->port);
169} 169}
170 170
171/* 171/*
172 * interrupts disabled on entry 172 * interrupts disabled on entry
173 */ 173 */
174static void imx_start_tx(struct uart_port *port, unsigned int tty_start) 174static void imx_start_tx(struct uart_port *port)
175{ 175{
176 struct imx_port *sport = (struct imx_port *)port; 176 struct imx_port *sport = (struct imx_port *)port;
177 177
@@ -196,7 +196,7 @@ static irqreturn_t imx_txint(int irq, void *dev_id, struct pt_regs *regs)
196 } 196 }
197 197
198 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { 198 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
199 imx_stop_tx(&sport->port, 0); 199 imx_stop_tx(&sport->port);
200 goto out; 200 goto out;
201 } 201 }
202 202
@@ -291,13 +291,31 @@ static unsigned int imx_tx_empty(struct uart_port *port)
291 return USR2((u32)sport->port.membase) & USR2_TXDC ? TIOCSER_TEMT : 0; 291 return USR2((u32)sport->port.membase) & USR2_TXDC ? TIOCSER_TEMT : 0;
292} 292}
293 293
294/*
295 * We have a modem side uart, so the meanings of RTS and CTS are inverted.
296 */
294static unsigned int imx_get_mctrl(struct uart_port *port) 297static unsigned int imx_get_mctrl(struct uart_port *port)
295{ 298{
296 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; 299 struct imx_port *sport = (struct imx_port *)port;
300 unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
301
302 if (USR1((u32)sport->port.membase) & USR1_RTSS)
303 tmp |= TIOCM_CTS;
304
305 if (UCR2((u32)sport->port.membase) & UCR2_CTS)
306 tmp |= TIOCM_RTS;
307
308 return tmp;
297} 309}
298 310
299static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) 311static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
300{ 312{
313 struct imx_port *sport = (struct imx_port *)port;
314
315 if (mctrl & TIOCM_RTS)
316 UCR2((u32)sport->port.membase) |= UCR2_CTS;
317 else
318 UCR2((u32)sport->port.membase) &= ~UCR2_CTS;
301} 319}
302 320
303/* 321/*
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 793c3a7cbe47..0c5c96a582b3 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2373,10 +2373,9 @@ static unsigned int ic4_tx_empty(struct uart_port *the_port)
2373/** 2373/**
2374 * ic4_stop_tx - stop the transmitter 2374 * ic4_stop_tx - stop the transmitter
2375 * @port: Port to operate on 2375 * @port: Port to operate on
2376 * @tty_stop: Set to 1 if called via uart_stop
2377 * 2376 *
2378 */ 2377 */
2379static void ic4_stop_tx(struct uart_port *the_port, unsigned int tty_stop) 2378static void ic4_stop_tx(struct uart_port *the_port)
2380{ 2379{
2381} 2380}
2382 2381
@@ -2471,10 +2470,9 @@ static unsigned int ic4_get_mctrl(struct uart_port *the_port)
2471/** 2470/**
2472 * ic4_start_tx - Start transmitter, flush any output 2471 * ic4_start_tx - Start transmitter, flush any output
2473 * @port: Port to operate on 2472 * @port: Port to operate on
2474 * @tty_stop: Set to 1 if called via uart_start
2475 * 2473 *
2476 */ 2474 */
2477static void ic4_start_tx(struct uart_port *the_port, unsigned int tty_stop) 2475static void ic4_start_tx(struct uart_port *the_port)
2478{ 2476{
2479 struct ioc4_port *port = get_ioc4_port(the_port); 2477 struct ioc4_port *port = get_ioc4_port(the_port);
2480 unsigned long flags; 2478 unsigned long flags;
diff --git a/drivers/serial/ip22zilog.c b/drivers/serial/ip22zilog.c
index ea5bf4d4daa3..ef132349f310 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/serial/ip22zilog.c
@@ -592,7 +592,7 @@ static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
592} 592}
593 593
594/* The port lock is held and interrupts are disabled. */ 594/* The port lock is held and interrupts are disabled. */
595static void ip22zilog_stop_tx(struct uart_port *port, unsigned int tty_stop) 595static void ip22zilog_stop_tx(struct uart_port *port)
596{ 596{
597 struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port; 597 struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
598 598
@@ -600,7 +600,7 @@ static void ip22zilog_stop_tx(struct uart_port *port, unsigned int tty_stop)
600} 600}
601 601
602/* The port lock is held and interrupts are disabled. */ 602/* The port lock is held and interrupts are disabled. */
603static void ip22zilog_start_tx(struct uart_port *port, unsigned int tty_start) 603static void ip22zilog_start_tx(struct uart_port *port)
604{ 604{
605 struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port; 605 struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
606 struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); 606 struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 98de2258fd06..6fa0d62d6f68 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -113,7 +113,7 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
113 udelay(10); 113 udelay(10);
114} 114}
115 115
116static void jsm_tty_start_tx(struct uart_port *port, unsigned int tty_start) 116static void jsm_tty_start_tx(struct uart_port *port)
117{ 117{
118 struct jsm_channel *channel = (struct jsm_channel *)port; 118 struct jsm_channel *channel = (struct jsm_channel *)port;
119 119
@@ -125,7 +125,7 @@ static void jsm_tty_start_tx(struct uart_port *port, unsigned int tty_start)
125 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n"); 125 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
126} 126}
127 127
128static void jsm_tty_stop_tx(struct uart_port *port, unsigned int tty_stop) 128static void jsm_tty_stop_tx(struct uart_port *port)
129{ 129{
130 struct jsm_channel *channel = (struct jsm_channel *)port; 130 struct jsm_channel *channel = (struct jsm_channel *)port;
131 131
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c
index 9b50560b9d16..b0ecc7537ce5 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/serial/m32r_sio.c
@@ -275,7 +275,7 @@ serial_out(struct uart_sio_port *up, int offset, int value)
275 __sio_out(value, offset); 275 __sio_out(value, offset);
276} 276}
277 277
278static void m32r_sio_stop_tx(struct uart_port *port, unsigned int tty_stop) 278static void m32r_sio_stop_tx(struct uart_port *port)
279{ 279{
280 struct uart_sio_port *up = (struct uart_sio_port *)port; 280 struct uart_sio_port *up = (struct uart_sio_port *)port;
281 281
@@ -285,7 +285,7 @@ static void m32r_sio_stop_tx(struct uart_port *port, unsigned int tty_stop)
285 } 285 }
286} 286}
287 287
288static void m32r_sio_start_tx(struct uart_port *port, unsigned int tty_start) 288static void m32r_sio_start_tx(struct uart_port *port)
289{ 289{
290#ifdef CONFIG_SERIAL_M32R_PLDSIO 290#ifdef CONFIG_SERIAL_M32R_PLDSIO
291 struct uart_sio_port *up = (struct uart_sio_port *)port; 291 struct uart_sio_port *up = (struct uart_sio_port *)port;
@@ -425,7 +425,7 @@ static _INLINE_ void transmit_chars(struct uart_sio_port *up)
425 return; 425 return;
426 } 426 }
427 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 427 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
428 m32r_sio_stop_tx(&up->port, 0); 428 m32r_sio_stop_tx(&up->port);
429 return; 429 return;
430 } 430 }
431 431
@@ -446,7 +446,7 @@ static _INLINE_ void transmit_chars(struct uart_sio_port *up)
446 DEBUG_INTR("THRE..."); 446 DEBUG_INTR("THRE...");
447 447
448 if (uart_circ_empty(xmit)) 448 if (uart_circ_empty(xmit))
449 m32r_sio_stop_tx(&up->port, 0); 449 m32r_sio_stop_tx(&up->port);
450} 450}
451 451
452/* 452/*
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 8c40167778de..43b03c55f453 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -40,7 +40,6 @@
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <asm/system.h> 42#include <asm/system.h>
43#include <asm/segment.h>
44#include <asm/semaphore.h> 43#include <asm/semaphore.h>
45#include <asm/delay.h> 44#include <asm/delay.h>
46#include <asm/coldfire.h> 45#include <asm/coldfire.h>
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 2a5cf174ca30..a3cd0ee8486d 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -119,7 +119,7 @@ mpc52xx_uart_get_mctrl(struct uart_port *port)
119} 119}
120 120
121static void 121static void
122mpc52xx_uart_stop_tx(struct uart_port *port, unsigned int tty_stop) 122mpc52xx_uart_stop_tx(struct uart_port *port)
123{ 123{
124 /* port->lock taken by caller */ 124 /* port->lock taken by caller */
125 port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY; 125 port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
@@ -127,7 +127,7 @@ mpc52xx_uart_stop_tx(struct uart_port *port, unsigned int tty_stop)
127} 127}
128 128
129static void 129static void
130mpc52xx_uart_start_tx(struct uart_port *port, unsigned int tty_start) 130mpc52xx_uart_start_tx(struct uart_port *port)
131{ 131{
132 /* port->lock taken by caller */ 132 /* port->lock taken by caller */
133 port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; 133 port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
@@ -485,7 +485,7 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
485 485
486 /* Nothing to do ? */ 486 /* Nothing to do ? */
487 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 487 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
488 mpc52xx_uart_stop_tx(port,0); 488 mpc52xx_uart_stop_tx(port);
489 return 0; 489 return 0;
490 } 490 }
491 491
@@ -504,7 +504,7 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
504 504
505 /* Maybe we're done after all */ 505 /* Maybe we're done after all */
506 if (uart_circ_empty(xmit)) { 506 if (uart_circ_empty(xmit)) {
507 mpc52xx_uart_stop_tx(port,0); 507 mpc52xx_uart_stop_tx(port);
508 return 0; 508 return 0;
509 } 509 }
510 510
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index e43276c6a954..efe79b1fd431 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -1072,18 +1072,18 @@ mpsc_get_mctrl(struct uart_port *port)
1072} 1072}
1073 1073
1074static void 1074static void
1075mpsc_stop_tx(struct uart_port *port, uint tty_start) 1075mpsc_stop_tx(struct uart_port *port)
1076{ 1076{
1077 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1077 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1078 1078
1079 pr_debug("mpsc_stop_tx[%d]: tty_start: %d\n", port->line, tty_start); 1079 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1080 1080
1081 mpsc_freeze(pi); 1081 mpsc_freeze(pi);
1082 return; 1082 return;
1083} 1083}
1084 1084
1085static void 1085static void
1086mpsc_start_tx(struct uart_port *port, uint tty_start) 1086mpsc_start_tx(struct uart_port *port)
1087{ 1087{
1088 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1088 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1089 1089
@@ -1091,7 +1091,7 @@ mpsc_start_tx(struct uart_port *port, uint tty_start)
1091 mpsc_copy_tx_data(pi); 1091 mpsc_copy_tx_data(pi);
1092 mpsc_sdma_start_tx(pi); 1092 mpsc_sdma_start_tx(pi);
1093 1093
1094 pr_debug("mpsc_start_tx[%d]: tty_start: %d\n", port->line, tty_start); 1094 pr_debug("mpsc_start_tx[%d]\n", port->line);
1095 return; 1095 return;
1096} 1096}
1097 1097
diff --git a/drivers/serial/mux.c b/drivers/serial/mux.c
index dadd7e19714e..189064607709 100644
--- a/drivers/serial/mux.c
+++ b/drivers/serial/mux.c
@@ -111,22 +111,20 @@ static unsigned int mux_get_mctrl(struct uart_port *port)
111/** 111/**
112 * mux_stop_tx - Stop transmitting characters. 112 * mux_stop_tx - Stop transmitting characters.
113 * @port: Ptr to the uart_port. 113 * @port: Ptr to the uart_port.
114 * @tty_stop: tty layer issue this command?
115 * 114 *
116 * The Serial MUX does not support this function. 115 * The Serial MUX does not support this function.
117 */ 116 */
118static void mux_stop_tx(struct uart_port *port, unsigned int tty_stop) 117static void mux_stop_tx(struct uart_port *port)
119{ 118{
120} 119}
121 120
122/** 121/**
123 * mux_start_tx - Start transmitting characters. 122 * mux_start_tx - Start transmitting characters.
124 * @port: Ptr to the uart_port. 123 * @port: Ptr to the uart_port.
125 * @tty_start: tty layer issue this command?
126 * 124 *
127 * The Serial Mux does not support this function. 125 * The Serial Mux does not support this function.
128 */ 126 */
129static void mux_start_tx(struct uart_port *port, unsigned int tty_start) 127static void mux_start_tx(struct uart_port *port)
130{ 128{
131} 129}
132 130
@@ -181,7 +179,7 @@ static void mux_write(struct uart_port *port)
181 } 179 }
182 180
183 if(uart_circ_empty(xmit) || uart_tx_stopped(port)) { 181 if(uart_circ_empty(xmit) || uart_tx_stopped(port)) {
184 mux_stop_tx(port, 0); 182 mux_stop_tx(port);
185 return; 183 return;
186 } 184 }
187 185
@@ -202,7 +200,7 @@ static void mux_write(struct uart_port *port)
202 uart_write_wakeup(port); 200 uart_write_wakeup(port);
203 201
204 if (uart_circ_empty(xmit)) 202 if (uart_circ_empty(xmit))
205 mux_stop_tx(port, 0); 203 mux_stop_tx(port);
206} 204}
207 205
208/** 206/**
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 7db2f37532cf..5ddd8ab1f108 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -630,11 +630,10 @@ static unsigned int pmz_get_mctrl(struct uart_port *port)
630 630
631/* 631/*
632 * Stop TX side. Dealt like sunzilog at next Tx interrupt, 632 * Stop TX side. Dealt like sunzilog at next Tx interrupt,
633 * though for DMA, we will have to do a bit more. What is 633 * though for DMA, we will have to do a bit more.
634 * the meaning of the tty_stop bit ? XXX
635 * The port lock is held and interrupts are disabled. 634 * The port lock is held and interrupts are disabled.
636 */ 635 */
637static void pmz_stop_tx(struct uart_port *port, unsigned int tty_stop) 636static void pmz_stop_tx(struct uart_port *port)
638{ 637{
639 to_pmz(port)->flags |= PMACZILOG_FLAG_TX_STOPPED; 638 to_pmz(port)->flags |= PMACZILOG_FLAG_TX_STOPPED;
640} 639}
@@ -643,7 +642,7 @@ static void pmz_stop_tx(struct uart_port *port, unsigned int tty_stop)
643 * Kick the Tx side. 642 * Kick the Tx side.
644 * The port lock is held and interrupts are disabled. 643 * The port lock is held and interrupts are disabled.
645 */ 644 */
646static void pmz_start_tx(struct uart_port *port, unsigned int tty_start) 645static void pmz_start_tx(struct uart_port *port)
647{ 646{
648 struct uart_pmac_port *uap = to_pmz(port); 647 struct uart_pmac_port *uap = to_pmz(port);
649 unsigned char status; 648 unsigned char status;
@@ -1601,7 +1600,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
1601 return 0; 1600 return 0;
1602 } 1601 }
1603 1602
1604 if (pm_state == mdev->ofdev.dev.power.power_state || pm_state < 2) 1603 if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
1605 return 0; 1604 return 0;
1606 1605
1607 pmz_debug("suspend, switching to state %d\n", pm_state); 1606 pmz_debug("suspend, switching to state %d\n", pm_state);
@@ -1661,7 +1660,7 @@ static int pmz_resume(struct macio_dev *mdev)
1661 if (uap == NULL) 1660 if (uap == NULL)
1662 return 0; 1661 return 0;
1663 1662
1664 if (mdev->ofdev.dev.power.power_state == 0) 1663 if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
1665 return 0; 1664 return 0;
1666 1665
1667 pmz_debug("resume, switching to state 0\n"); 1666 pmz_debug("resume, switching to state 0\n");
@@ -1714,7 +1713,7 @@ static int pmz_resume(struct macio_dev *mdev)
1714 1713
1715 pmz_debug("resume, switching complete\n"); 1714 pmz_debug("resume, switching complete\n");
1716 1715
1717 mdev->ofdev.dev.power.power_state = 0; 1716 mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1718 1717
1719 return 0; 1718 return 0;
1720} 1719}
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 461c81c93207..eaa0af835290 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -80,7 +80,7 @@ static void serial_pxa_enable_ms(struct uart_port *port)
80 serial_out(up, UART_IER, up->ier); 80 serial_out(up, UART_IER, up->ier);
81} 81}
82 82
83static void serial_pxa_stop_tx(struct uart_port *port, unsigned int tty_stop) 83static void serial_pxa_stop_tx(struct uart_port *port)
84{ 84{
85 struct uart_pxa_port *up = (struct uart_pxa_port *)port; 85 struct uart_pxa_port *up = (struct uart_pxa_port *)port;
86 86
@@ -185,7 +185,7 @@ static void transmit_chars(struct uart_pxa_port *up)
185 return; 185 return;
186 } 186 }
187 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 187 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
188 serial_pxa_stop_tx(&up->port, 0); 188 serial_pxa_stop_tx(&up->port);
189 return; 189 return;
190 } 190 }
191 191
@@ -203,10 +203,10 @@ static void transmit_chars(struct uart_pxa_port *up)
203 203
204 204
205 if (uart_circ_empty(xmit)) 205 if (uart_circ_empty(xmit))
206 serial_pxa_stop_tx(&up->port, 0); 206 serial_pxa_stop_tx(&up->port);
207} 207}
208 208
209static void serial_pxa_start_tx(struct uart_port *port, unsigned int tty_start) 209static void serial_pxa_start_tx(struct uart_port *port)
210{ 210{
211 struct uart_pxa_port *up = (struct uart_pxa_port *)port; 211 struct uart_pxa_port *up = (struct uart_pxa_port *)port;
212 212
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 7365d4b50b95..c361c6fb0809 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -246,8 +246,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
246 spin_unlock_irqrestore(&port->lock, flags); 246 spin_unlock_irqrestore(&port->lock, flags);
247} 247}
248 248
249static void 249static void s3c24xx_serial_stop_tx(struct uart_port *port)
250s3c24xx_serial_stop_tx(struct uart_port *port, unsigned int tty_stop)
251{ 250{
252 if (tx_enabled(port)) { 251 if (tx_enabled(port)) {
253 disable_irq(TX_IRQ(port)); 252 disable_irq(TX_IRQ(port));
@@ -257,8 +256,7 @@ s3c24xx_serial_stop_tx(struct uart_port *port, unsigned int tty_stop)
257 } 256 }
258} 257}
259 258
260static void 259static void s3c24xx_serial_start_tx(struct uart_port *port)
261s3c24xx_serial_start_tx(struct uart_port *port, unsigned int tty_start)
262{ 260{
263 if (!tx_enabled(port)) { 261 if (!tx_enabled(port)) {
264 if (port->flags & UPF_CONS_FLOW) 262 if (port->flags & UPF_CONS_FLOW)
@@ -424,7 +422,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id, struct pt_regs *re
424 */ 422 */
425 423
426 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 424 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
427 s3c24xx_serial_stop_tx(port, 0); 425 s3c24xx_serial_stop_tx(port);
428 goto out; 426 goto out;
429 } 427 }
430 428
@@ -443,7 +441,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id, struct pt_regs *re
443 uart_write_wakeup(port); 441 uart_write_wakeup(port);
444 442
445 if (uart_circ_empty(xmit)) 443 if (uart_circ_empty(xmit))
446 s3c24xx_serial_stop_tx(port, 0); 444 s3c24xx_serial_stop_tx(port);
447 445
448 out: 446 out:
449 return IRQ_HANDLED; 447 return IRQ_HANDLED;
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 98641c3f5ab9..1225b14f6e9d 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -145,7 +145,7 @@ static void sa1100_timeout(unsigned long data)
145/* 145/*
146 * interrupts disabled on entry 146 * interrupts disabled on entry
147 */ 147 */
148static void sa1100_stop_tx(struct uart_port *port, unsigned int tty_stop) 148static void sa1100_stop_tx(struct uart_port *port)
149{ 149{
150 struct sa1100_port *sport = (struct sa1100_port *)port; 150 struct sa1100_port *sport = (struct sa1100_port *)port;
151 u32 utcr3; 151 u32 utcr3;
@@ -158,7 +158,7 @@ static void sa1100_stop_tx(struct uart_port *port, unsigned int tty_stop)
158/* 158/*
159 * interrupts may not be disabled on entry 159 * interrupts may not be disabled on entry
160 */ 160 */
161static void sa1100_start_tx(struct uart_port *port, unsigned int tty_start) 161static void sa1100_start_tx(struct uart_port *port)
162{ 162{
163 struct sa1100_port *sport = (struct sa1100_port *)port; 163 struct sa1100_port *sport = (struct sa1100_port *)port;
164 unsigned long flags; 164 unsigned long flags;
@@ -264,7 +264,7 @@ static void sa1100_tx_chars(struct sa1100_port *sport)
264 sa1100_mctrl_check(sport); 264 sa1100_mctrl_check(sport);
265 265
266 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { 266 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
267 sa1100_stop_tx(&sport->port, 0); 267 sa1100_stop_tx(&sport->port);
268 return; 268 return;
269 } 269 }
270 270
@@ -284,7 +284,7 @@ static void sa1100_tx_chars(struct sa1100_port *sport)
284 uart_write_wakeup(&sport->port); 284 uart_write_wakeup(&sport->port);
285 285
286 if (uart_circ_empty(xmit)) 286 if (uart_circ_empty(xmit))
287 sa1100_stop_tx(&sport->port, 0); 287 sa1100_stop_tx(&sport->port);
288} 288}
289 289
290static irqreturn_t sa1100_int(int irq, void *dev_id, struct pt_regs *regs) 290static irqreturn_t sa1100_int(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 54699c3a00ab..2d8622eef701 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -80,7 +80,7 @@ static void uart_stop(struct tty_struct *tty)
80 unsigned long flags; 80 unsigned long flags;
81 81
82 spin_lock_irqsave(&port->lock, flags); 82 spin_lock_irqsave(&port->lock, flags);
83 port->ops->stop_tx(port, 1); 83 port->ops->stop_tx(port);
84 spin_unlock_irqrestore(&port->lock, flags); 84 spin_unlock_irqrestore(&port->lock, flags);
85} 85}
86 86
@@ -91,7 +91,7 @@ static void __uart_start(struct tty_struct *tty)
91 91
92 if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf && 92 if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf &&
93 !tty->stopped && !tty->hw_stopped) 93 !tty->stopped && !tty->hw_stopped)
94 port->ops->start_tx(port, 1); 94 port->ops->start_tx(port);
95} 95}
96 96
97static void uart_start(struct tty_struct *tty) 97static void uart_start(struct tty_struct *tty)
@@ -542,7 +542,7 @@ static void uart_send_xchar(struct tty_struct *tty, char ch)
542 port->x_char = ch; 542 port->x_char = ch;
543 if (ch) { 543 if (ch) {
544 spin_lock_irqsave(&port->lock, flags); 544 spin_lock_irqsave(&port->lock, flags);
545 port->ops->start_tx(port, 0); 545 port->ops->start_tx(port);
546 spin_unlock_irqrestore(&port->lock, flags); 546 spin_unlock_irqrestore(&port->lock, flags);
547 } 547 }
548 } 548 }
@@ -1146,7 +1146,7 @@ static void uart_set_termios(struct tty_struct *tty, struct termios *old_termios
1146 spin_lock_irqsave(&state->port->lock, flags); 1146 spin_lock_irqsave(&state->port->lock, flags);
1147 if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) { 1147 if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) {
1148 tty->hw_stopped = 1; 1148 tty->hw_stopped = 1;
1149 state->port->ops->stop_tx(state->port, 0); 1149 state->port->ops->stop_tx(state->port);
1150 } 1150 }
1151 spin_unlock_irqrestore(&state->port->lock, flags); 1151 spin_unlock_irqrestore(&state->port->lock, flags);
1152 } 1152 }
@@ -1869,7 +1869,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
1869 struct uart_ops *ops = port->ops; 1869 struct uart_ops *ops = port->ops;
1870 1870
1871 spin_lock_irq(&port->lock); 1871 spin_lock_irq(&port->lock);
1872 ops->stop_tx(port, 0); 1872 ops->stop_tx(port);
1873 ops->set_mctrl(port, 0); 1873 ops->set_mctrl(port, 0);
1874 ops->stop_rx(port); 1874 ops->stop_rx(port);
1875 spin_unlock_irq(&port->lock); 1875 spin_unlock_irq(&port->lock);
@@ -1935,7 +1935,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
1935 uart_change_speed(state, NULL); 1935 uart_change_speed(state, NULL);
1936 spin_lock_irq(&port->lock); 1936 spin_lock_irq(&port->lock);
1937 ops->set_mctrl(port, port->mctrl); 1937 ops->set_mctrl(port, port->mctrl);
1938 ops->start_tx(port, 0); 1938 ops->start_tx(port);
1939 spin_unlock_irq(&port->lock); 1939 spin_unlock_irq(&port->lock);
1940 } 1940 }
1941 1941
@@ -1947,21 +1947,29 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
1947static inline void 1947static inline void
1948uart_report_port(struct uart_driver *drv, struct uart_port *port) 1948uart_report_port(struct uart_driver *drv, struct uart_port *port)
1949{ 1949{
1950 printk("%s%d", drv->dev_name, port->line); 1950 char address[64];
1951 printk(" at "); 1951
1952 switch (port->iotype) { 1952 switch (port->iotype) {
1953 case UPIO_PORT: 1953 case UPIO_PORT:
1954 printk("I/O 0x%x", port->iobase); 1954 snprintf(address, sizeof(address),
1955 "I/O 0x%x", port->iobase);
1955 break; 1956 break;
1956 case UPIO_HUB6: 1957 case UPIO_HUB6:
1957 printk("I/O 0x%x offset 0x%x", port->iobase, port->hub6); 1958 snprintf(address, sizeof(address),
1959 "I/O 0x%x offset 0x%x", port->iobase, port->hub6);
1958 break; 1960 break;
1959 case UPIO_MEM: 1961 case UPIO_MEM:
1960 case UPIO_MEM32: 1962 case UPIO_MEM32:
1961 printk("MMIO 0x%lx", port->mapbase); 1963 snprintf(address, sizeof(address),
1964 "MMIO 0x%lx", port->mapbase);
1965 break;
1966 default:
1967 strlcpy(address, "*unknown*", sizeof(address));
1962 break; 1968 break;
1963 } 1969 }
1964 printk(" (irq = %d) is a %s\n", port->irq, uart_type(port)); 1970
1971 printk(KERN_INFO "%s%d at %s (irq = %d) is a %s\n",
1972 drv->dev_name, port->line, address, port->irq, uart_type(port));
1965} 1973}
1966 1974
1967static void 1975static void
@@ -2289,143 +2297,11 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
2289} 2297}
2290EXPORT_SYMBOL(uart_match_port); 2298EXPORT_SYMBOL(uart_match_port);
2291 2299
2292/*
2293 * Try to find an unused uart_state slot for a port.
2294 */
2295static struct uart_state *
2296uart_find_match_or_unused(struct uart_driver *drv, struct uart_port *port)
2297{
2298 int i;
2299
2300 /*
2301 * First, find a port entry which matches. Note: if we do
2302 * find a matching entry, and it has a non-zero use count,
2303 * then we can't register the port.
2304 */
2305 for (i = 0; i < drv->nr; i++)
2306 if (uart_match_port(drv->state[i].port, port))
2307 return &drv->state[i];
2308
2309 /*
2310 * We didn't find a matching entry, so look for the first
2311 * free entry. We look for one which hasn't been previously
2312 * used (indicated by zero iobase).
2313 */
2314 for (i = 0; i < drv->nr; i++)
2315 if (drv->state[i].port->type == PORT_UNKNOWN &&
2316 drv->state[i].port->iobase == 0 &&
2317 drv->state[i].count == 0)
2318 return &drv->state[i];
2319
2320 /*
2321 * That also failed. Last resort is to find any currently
2322 * entry which doesn't have a real port associated with it.
2323 */
2324 for (i = 0; i < drv->nr; i++)
2325 if (drv->state[i].port->type == PORT_UNKNOWN &&
2326 drv->state[i].count == 0)
2327 return &drv->state[i];
2328
2329 return NULL;
2330}
2331
2332/**
2333 * uart_register_port: register uart settings with a port
2334 * @drv: pointer to the uart low level driver structure for this port
2335 * @port: uart port structure describing the port
2336 *
2337 * Register UART settings with the specified low level driver. Detect
2338 * the type of the port if UPF_BOOT_AUTOCONF is set, and detect the
2339 * IRQ if UPF_AUTO_IRQ is set.
2340 *
2341 * We try to pick the same port for the same IO base address, so that
2342 * when a modem is plugged in, unplugged and plugged back in, it gets
2343 * allocated the same port.
2344 *
2345 * Returns negative error, or positive line number.
2346 */
2347int uart_register_port(struct uart_driver *drv, struct uart_port *port)
2348{
2349 struct uart_state *state;
2350 int ret;
2351
2352 down(&port_sem);
2353
2354 state = uart_find_match_or_unused(drv, port);
2355
2356 if (state) {
2357 /*
2358 * Ok, we've found a line that we can use.
2359 *
2360 * If we find a port that matches this one, and it appears
2361 * to be in-use (even if it doesn't have a type) we shouldn't
2362 * alter it underneath itself - the port may be open and
2363 * trying to do useful work.
2364 */
2365 if (uart_users(state) != 0) {
2366 ret = -EBUSY;
2367 goto out;
2368 }
2369
2370 /*
2371 * If the port is already initialised, don't touch it.
2372 */
2373 if (state->port->type == PORT_UNKNOWN) {
2374 state->port->iobase = port->iobase;
2375 state->port->membase = port->membase;
2376 state->port->irq = port->irq;
2377 state->port->uartclk = port->uartclk;
2378 state->port->fifosize = port->fifosize;
2379 state->port->regshift = port->regshift;
2380 state->port->iotype = port->iotype;
2381 state->port->flags = port->flags;
2382 state->port->line = state - drv->state;
2383 state->port->mapbase = port->mapbase;
2384
2385 uart_configure_port(drv, state, state->port);
2386 }
2387
2388 ret = state->port->line;
2389 } else
2390 ret = -ENOSPC;
2391 out:
2392 up(&port_sem);
2393 return ret;
2394}
2395
2396/**
2397 * uart_unregister_port - de-allocate a port
2398 * @drv: pointer to the uart low level driver structure for this port
2399 * @line: line index previously returned from uart_register_port()
2400 *
2401 * Hang up the specified line associated with the low level driver,
2402 * and mark the port as unused.
2403 */
2404void uart_unregister_port(struct uart_driver *drv, int line)
2405{
2406 struct uart_state *state;
2407
2408 if (line < 0 || line >= drv->nr) {
2409 printk(KERN_ERR "Attempt to unregister ");
2410 printk("%s%d", drv->dev_name, line);
2411 printk("\n");
2412 return;
2413 }
2414
2415 state = drv->state + line;
2416
2417 down(&port_sem);
2418 uart_unconfigure_port(drv, state);
2419 up(&port_sem);
2420}
2421
2422EXPORT_SYMBOL(uart_write_wakeup); 2300EXPORT_SYMBOL(uart_write_wakeup);
2423EXPORT_SYMBOL(uart_register_driver); 2301EXPORT_SYMBOL(uart_register_driver);
2424EXPORT_SYMBOL(uart_unregister_driver); 2302EXPORT_SYMBOL(uart_unregister_driver);
2425EXPORT_SYMBOL(uart_suspend_port); 2303EXPORT_SYMBOL(uart_suspend_port);
2426EXPORT_SYMBOL(uart_resume_port); 2304EXPORT_SYMBOL(uart_resume_port);
2427EXPORT_SYMBOL(uart_register_port);
2428EXPORT_SYMBOL(uart_unregister_port);
2429EXPORT_SYMBOL(uart_add_one_port); 2305EXPORT_SYMBOL(uart_add_one_port);
2430EXPORT_SYMBOL(uart_remove_one_port); 2306EXPORT_SYMBOL(uart_remove_one_port);
2431 2307
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 56f269b6bfb1..8302376800c0 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -112,13 +112,12 @@ struct uart_port_lh7a40x {
112 unsigned int statusPrev; /* Most recently read modem status */ 112 unsigned int statusPrev; /* Most recently read modem status */
113}; 113};
114 114
115static void lh7a40xuart_stop_tx (struct uart_port* port, unsigned int tty_stop) 115static void lh7a40xuart_stop_tx (struct uart_port* port)
116{ 116{
117 BIT_CLR (port, UART_R_INTEN, TxInt); 117 BIT_CLR (port, UART_R_INTEN, TxInt);
118} 118}
119 119
120static void lh7a40xuart_start_tx (struct uart_port* port, 120static void lh7a40xuart_start_tx (struct uart_port* port)
121 unsigned int tty_start)
122{ 121{
123 BIT_SET (port, UART_R_INTEN, TxInt); 122 BIT_SET (port, UART_R_INTEN, TxInt);
124 123
@@ -208,7 +207,7 @@ static void lh7a40xuart_tx_chars (struct uart_port* port)
208 return; 207 return;
209 } 208 }
210 if (uart_circ_empty (xmit) || uart_tx_stopped (port)) { 209 if (uart_circ_empty (xmit) || uart_tx_stopped (port)) {
211 lh7a40xuart_stop_tx (port, 0); 210 lh7a40xuart_stop_tx (port);
212 return; 211 return;
213 } 212 }
214 213
@@ -230,7 +229,7 @@ static void lh7a40xuart_tx_chars (struct uart_port* port)
230 uart_write_wakeup (port); 229 uart_write_wakeup (port);
231 230
232 if (uart_circ_empty (xmit)) 231 if (uart_circ_empty (xmit))
233 lh7a40xuart_stop_tx (port, 0); 232 lh7a40xuart_stop_tx (port);
234} 233}
235 234
236static void lh7a40xuart_modem_status (struct uart_port* port) 235static void lh7a40xuart_modem_status (struct uart_port* port)
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index d085030df70b..49afadbe461b 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -253,7 +253,7 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
253 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); 253 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
254} 254}
255 255
256static void serial_txx9_stop_tx(struct uart_port *port, unsigned int tty_stop) 256static void serial_txx9_stop_tx(struct uart_port *port)
257{ 257{
258 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 258 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
259 unsigned long flags; 259 unsigned long flags;
@@ -263,7 +263,7 @@ static void serial_txx9_stop_tx(struct uart_port *port, unsigned int tty_stop)
263 spin_unlock_irqrestore(&up->port.lock, flags); 263 spin_unlock_irqrestore(&up->port.lock, flags);
264} 264}
265 265
266static void serial_txx9_start_tx(struct uart_port *port, unsigned int tty_start) 266static void serial_txx9_start_tx(struct uart_port *port)
267{ 267{
268 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 268 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
269 unsigned long flags; 269 unsigned long flags;
@@ -372,7 +372,7 @@ static inline void transmit_chars(struct uart_txx9_port *up)
372 return; 372 return;
373 } 373 }
374 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 374 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
375 serial_txx9_stop_tx(&up->port, 0); 375 serial_txx9_stop_tx(&up->port);
376 return; 376 return;
377 } 377 }
378 378
@@ -389,7 +389,7 @@ static inline void transmit_chars(struct uart_txx9_port *up)
389 uart_write_wakeup(&up->port); 389 uart_write_wakeup(&up->port);
390 390
391 if (uart_circ_empty(xmit)) 391 if (uart_circ_empty(xmit))
392 serial_txx9_stop_tx(&up->port, 0); 392 serial_txx9_stop_tx(&up->port);
393} 393}
394 394
395static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id, struct pt_regs *regs) 395static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ad5b776d779b..512266307866 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -79,8 +79,8 @@ static struct sci_port *serial_console_port = 0;
79#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 79#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
80 80
81/* Function prototypes */ 81/* Function prototypes */
82static void sci_stop_tx(struct uart_port *port, unsigned int tty_stop); 82static void sci_stop_tx(struct uart_port *port);
83static void sci_start_tx(struct uart_port *port, unsigned int tty_start); 83static void sci_start_tx(struct uart_port *port);
84static void sci_start_rx(struct uart_port *port, unsigned int tty_start); 84static void sci_start_rx(struct uart_port *port, unsigned int tty_start);
85static void sci_stop_rx(struct uart_port *port); 85static void sci_stop_rx(struct uart_port *port);
86static int sci_request_irq(struct sci_port *port); 86static int sci_request_irq(struct sci_port *port);
@@ -455,7 +455,7 @@ static void sci_transmit_chars(struct uart_port *port)
455 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 455 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
456 uart_write_wakeup(port); 456 uart_write_wakeup(port);
457 if (uart_circ_empty(xmit)) { 457 if (uart_circ_empty(xmit)) {
458 sci_stop_tx(port, 0); 458 sci_stop_tx(port);
459 } else { 459 } else {
460 local_irq_save(flags); 460 local_irq_save(flags);
461 ctrl = sci_in(port, SCSCR); 461 ctrl = sci_in(port, SCSCR);
@@ -900,7 +900,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
900 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 900 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
901} 901}
902 902
903static void sci_start_tx(struct uart_port *port, unsigned int tty_start) 903static void sci_start_tx(struct uart_port *port)
904{ 904{
905 struct sci_port *s = &sci_ports[port->line]; 905 struct sci_port *s = &sci_ports[port->line];
906 906
@@ -909,7 +909,7 @@ static void sci_start_tx(struct uart_port *port, unsigned int tty_start)
909 enable_irq(s->irqs[SCIx_TXI_IRQ]); 909 enable_irq(s->irqs[SCIx_TXI_IRQ]);
910} 910}
911 911
912static void sci_stop_tx(struct uart_port *port, unsigned int tty_stop) 912static void sci_stop_tx(struct uart_port *port)
913{ 913{
914 unsigned long flags; 914 unsigned long flags;
915 unsigned short ctrl; 915 unsigned short ctrl;
@@ -978,7 +978,7 @@ static void sci_shutdown(struct uart_port *port)
978 struct sci_port *s = &sci_ports[port->line]; 978 struct sci_port *s = &sci_ports[port->line];
979 979
980 sci_stop_rx(port); 980 sci_stop_rx(port);
981 sci_stop_tx(port, 1); 981 sci_stop_tx(port);
982 sci_free_irq(s); 982 sci_free_irq(s);
983 983
984#if defined(__H8300S__) 984#if defined(__H8300S__)
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 12d1f14e78ce..313f9df24a2d 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -259,10 +259,9 @@ static unsigned int snp_tx_empty(struct uart_port *port)
259/** 259/**
260 * snp_stop_tx - stop the transmitter - no-op for us 260 * snp_stop_tx - stop the transmitter - no-op for us
261 * @port: Port to operat eon - we ignore - no-op function 261 * @port: Port to operat eon - we ignore - no-op function
262 * @tty_stop: Set to 1 if called via uart_stop
263 * 262 *
264 */ 263 */
265static void snp_stop_tx(struct uart_port *port, unsigned int tty_stop) 264static void snp_stop_tx(struct uart_port *port)
266{ 265{
267} 266}
268 267
@@ -325,10 +324,9 @@ static void snp_stop_rx(struct uart_port *port)
325/** 324/**
326 * snp_start_tx - Start transmitter 325 * snp_start_tx - Start transmitter
327 * @port: Port to operate on 326 * @port: Port to operate on
328 * @tty_stop: Set to 1 if called via uart_start
329 * 327 *
330 */ 328 */
331static void snp_start_tx(struct uart_port *port, unsigned int tty_stop) 329static void snp_start_tx(struct uart_port *port)
332{ 330{
333 if (sal_console_port.sc_ops->sal_wakeup_transmit) 331 if (sal_console_port.sc_ops->sal_wakeup_transmit)
334 sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, 332 sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port,
@@ -615,7 +613,7 @@ static void sn_transmit_chars(struct sn_cons_port *port, int raw)
615 uart_write_wakeup(&port->sc_port); 613 uart_write_wakeup(&port->sc_port);
616 614
617 if (uart_circ_empty(xmit)) 615 if (uart_circ_empty(xmit))
618 snp_stop_tx(&port->sc_port, 0); /* no-op for us */ 616 snp_stop_tx(&port->sc_port); /* no-op for us */
619} 617}
620 618
621/** 619/**
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 8d198880756a..e971156daa60 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -245,7 +245,7 @@ receive_chars(struct uart_sunsab_port *up,
245 return tty; 245 return tty;
246} 246}
247 247
248static void sunsab_stop_tx(struct uart_port *, unsigned int); 248static void sunsab_stop_tx(struct uart_port *);
249static void sunsab_tx_idle(struct uart_sunsab_port *); 249static void sunsab_tx_idle(struct uart_sunsab_port *);
250 250
251static void transmit_chars(struct uart_sunsab_port *up, 251static void transmit_chars(struct uart_sunsab_port *up,
@@ -301,7 +301,7 @@ static void transmit_chars(struct uart_sunsab_port *up,
301 uart_write_wakeup(&up->port); 301 uart_write_wakeup(&up->port);
302 302
303 if (uart_circ_empty(xmit)) 303 if (uart_circ_empty(xmit))
304 sunsab_stop_tx(&up->port, 0); 304 sunsab_stop_tx(&up->port);
305} 305}
306 306
307static void check_status(struct uart_sunsab_port *up, 307static void check_status(struct uart_sunsab_port *up,
@@ -448,7 +448,7 @@ static unsigned int sunsab_get_mctrl(struct uart_port *port)
448} 448}
449 449
450/* port->lock held by caller. */ 450/* port->lock held by caller. */
451static void sunsab_stop_tx(struct uart_port *port, unsigned int tty_stop) 451static void sunsab_stop_tx(struct uart_port *port)
452{ 452{
453 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 453 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
454 454
@@ -476,7 +476,7 @@ static void sunsab_tx_idle(struct uart_sunsab_port *up)
476} 476}
477 477
478/* port->lock held by caller. */ 478/* port->lock held by caller. */
479static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start) 479static void sunsab_start_tx(struct uart_port *port)
480{ 480{
481 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 481 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
482 struct circ_buf *xmit = &up->port.info->xmit; 482 struct circ_buf *xmit = &up->port.info->xmit;
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index d57a3553aea3..5959e6755a81 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -255,21 +255,30 @@ static void disable_rsa(struct uart_sunsu_port *up)
255} 255}
256#endif /* CONFIG_SERIAL_8250_RSA */ 256#endif /* CONFIG_SERIAL_8250_RSA */
257 257
258static void sunsu_stop_tx(struct uart_port *port, unsigned int tty_stop) 258static inline void __stop_tx(struct uart_sunsu_port *p)
259{
260 if (p->ier & UART_IER_THRI) {
261 p->ier &= ~UART_IER_THRI;
262 serial_out(p, UART_IER, p->ier);
263 }
264}
265
266static void sunsu_stop_tx(struct uart_port *port)
259{ 267{
260 struct uart_sunsu_port *up = (struct uart_sunsu_port *) port; 268 struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
261 269
262 if (up->ier & UART_IER_THRI) { 270 __stop_tx(up);
263 up->ier &= ~UART_IER_THRI; 271
264 serial_out(up, UART_IER, up->ier); 272 /*
265 } 273 * We really want to stop the transmitter from sending.
266 if (up->port.type == PORT_16C950 && tty_stop) { 274 */
275 if (up->port.type == PORT_16C950) {
267 up->acr |= UART_ACR_TXDIS; 276 up->acr |= UART_ACR_TXDIS;
268 serial_icr_write(up, UART_ACR, up->acr); 277 serial_icr_write(up, UART_ACR, up->acr);
269 } 278 }
270} 279}
271 280
272static void sunsu_start_tx(struct uart_port *port, unsigned int tty_start) 281static void sunsu_start_tx(struct uart_port *port)
273{ 282{
274 struct uart_sunsu_port *up = (struct uart_sunsu_port *) port; 283 struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
275 284
@@ -277,10 +286,11 @@ static void sunsu_start_tx(struct uart_port *port, unsigned int tty_start)
277 up->ier |= UART_IER_THRI; 286 up->ier |= UART_IER_THRI;
278 serial_out(up, UART_IER, up->ier); 287 serial_out(up, UART_IER, up->ier);
279 } 288 }
289
280 /* 290 /*
281 * We only do this from uart_start 291 * Re-enable the transmitter if we disabled it.
282 */ 292 */
283 if (tty_start && up->port.type == PORT_16C950) { 293 if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
284 up->acr &= ~UART_ACR_TXDIS; 294 up->acr &= ~UART_ACR_TXDIS;
285 serial_icr_write(up, UART_ACR, up->acr); 295 serial_icr_write(up, UART_ACR, up->acr);
286 } 296 }
@@ -413,8 +423,12 @@ static _INLINE_ void transmit_chars(struct uart_sunsu_port *up)
413 up->port.x_char = 0; 423 up->port.x_char = 0;
414 return; 424 return;
415 } 425 }
416 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 426 if (uart_tx_stopped(&up->port)) {
417 sunsu_stop_tx(&up->port, 0); 427 sunsu_stop_tx(&up->port);
428 return;
429 }
430 if (uart_circ_empty(xmit)) {
431 __stop_tx(up);
418 return; 432 return;
419 } 433 }
420 434
@@ -431,7 +445,7 @@ static _INLINE_ void transmit_chars(struct uart_sunsu_port *up)
431 uart_write_wakeup(&up->port); 445 uart_write_wakeup(&up->port);
432 446
433 if (uart_circ_empty(xmit)) 447 if (uart_circ_empty(xmit))
434 sunsu_stop_tx(&up->port, 0); 448 __stop_tx(up);
435} 449}
436 450
437static _INLINE_ void check_modem_status(struct uart_sunsu_port *up) 451static _INLINE_ void check_modem_status(struct uart_sunsu_port *up)
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index bff42a7b89d0..d75445738c88 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -684,7 +684,7 @@ static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
684} 684}
685 685
686/* The port lock is held and interrupts are disabled. */ 686/* The port lock is held and interrupts are disabled. */
687static void sunzilog_stop_tx(struct uart_port *port, unsigned int tty_stop) 687static void sunzilog_stop_tx(struct uart_port *port)
688{ 688{
689 struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; 689 struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
690 690
@@ -692,7 +692,7 @@ static void sunzilog_stop_tx(struct uart_port *port, unsigned int tty_stop)
692} 692}
693 693
694/* The port lock is held and interrupts are disabled. */ 694/* The port lock is held and interrupts are disabled. */
695static void sunzilog_start_tx(struct uart_port *port, unsigned int tty_start) 695static void sunzilog_start_tx(struct uart_port *port)
696{ 696{
697 struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; 697 struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
698 struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); 698 struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
diff --git a/drivers/serial/uart00.c b/drivers/serial/uart00.c
index 186f1300cead..47b504ff38b2 100644
--- a/drivers/serial/uart00.c
+++ b/drivers/serial/uart00.c
@@ -87,7 +87,7 @@
87#define UART_TX_READY(s) (((s) & UART_TSR_TX_LEVEL_MSK) < 15) 87#define UART_TX_READY(s) (((s) & UART_TSR_TX_LEVEL_MSK) < 15)
88//#define UART_TX_EMPTY(p) ((UART_GET_FR(p) & UART00_UARTFR_TMSK) == 0) 88//#define UART_TX_EMPTY(p) ((UART_GET_FR(p) & UART00_UARTFR_TMSK) == 0)
89 89
90static void uart00_stop_tx(struct uart_port *port, unsigned int tty_stop) 90static void uart00_stop_tx(struct uart_port *port)
91{ 91{
92 UART_PUT_IEC(port, UART_IEC_TIE_MSK); 92 UART_PUT_IEC(port, UART_IEC_TIE_MSK);
93} 93}
@@ -199,7 +199,7 @@ static void uart00_tx_chars(struct uart_port *port)
199 return; 199 return;
200 } 200 }
201 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 201 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
202 uart00_stop_tx(port, 0); 202 uart00_stop_tx(port);
203 return; 203 return;
204 } 204 }
205 205
@@ -218,10 +218,10 @@ static void uart00_tx_chars(struct uart_port *port)
218 uart_write_wakeup(port); 218 uart_write_wakeup(port);
219 219
220 if (uart_circ_empty(xmit)) 220 if (uart_circ_empty(xmit))
221 uart00_stop_tx(port, 0); 221 uart00_stop_tx(port);
222} 222}
223 223
224static void uart00_start_tx(struct uart_port *port, unsigned int tty_start) 224static void uart00_start_tx(struct uart_port *port)
225{ 225{
226 UART_PUT_IES(port, UART_IES_TIE_MSK); 226 UART_PUT_IES(port, UART_IES_TIE_MSK);
227 uart00_tx_chars(port); 227 uart00_tx_chars(port);
diff --git a/drivers/serial/v850e_uart.c b/drivers/serial/v850e_uart.c
index bb482780a41d..9378895a8d56 100644
--- a/drivers/serial/v850e_uart.c
+++ b/drivers/serial/v850e_uart.c
@@ -240,7 +240,7 @@ console_initcall(v850e_uart_console_init);
240 240
241/* TX/RX interrupt handlers. */ 241/* TX/RX interrupt handlers. */
242 242
243static void v850e_uart_stop_tx (struct uart_port *port, unsigned tty_stop); 243static void v850e_uart_stop_tx (struct uart_port *port);
244 244
245void v850e_uart_tx (struct uart_port *port) 245void v850e_uart_tx (struct uart_port *port)
246{ 246{
@@ -339,14 +339,14 @@ static unsigned v850e_uart_get_mctrl (struct uart_port *port)
339 return mctrl; 339 return mctrl;
340} 340}
341 341
342static void v850e_uart_start_tx (struct uart_port *port, unsigned tty_start) 342static void v850e_uart_start_tx (struct uart_port *port)
343{ 343{
344 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line)); 344 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line));
345 v850e_uart_tx (port); 345 v850e_uart_tx (port);
346 v850e_intc_enable_irq (V850E_UART_TX_IRQ (port->line)); 346 v850e_intc_enable_irq (V850E_UART_TX_IRQ (port->line));
347} 347}
348 348
349static void v850e_uart_stop_tx (struct uart_port *port, unsigned tty_stop) 349static void v850e_uart_stop_tx (struct uart_port *port)
350{ 350{
351 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line)); 351 v850e_intc_disable_irq (V850E_UART_TX_IRQ (port->line));
352} 352}
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 1f985327b0d4..0c5d65a08f6e 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -284,7 +284,7 @@ static unsigned int siu_get_mctrl(struct uart_port *port)
284 return mctrl; 284 return mctrl;
285} 285}
286 286
287static void siu_stop_tx(struct uart_port *port, unsigned int tty_stop) 287static void siu_stop_tx(struct uart_port *port)
288{ 288{
289 unsigned long flags; 289 unsigned long flags;
290 uint8_t ier; 290 uint8_t ier;
@@ -298,7 +298,7 @@ static void siu_stop_tx(struct uart_port *port, unsigned int tty_stop)
298 spin_unlock_irqrestore(&port->lock, flags); 298 spin_unlock_irqrestore(&port->lock, flags);
299} 299}
300 300
301static void siu_start_tx(struct uart_port *port, unsigned int tty_start) 301static void siu_start_tx(struct uart_port *port)
302{ 302{
303 unsigned long flags; 303 unsigned long flags;
304 uint8_t ier; 304 uint8_t ier;
@@ -458,7 +458,7 @@ static inline void transmit_chars(struct uart_port *port)
458 } 458 }
459 459
460 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 460 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
461 siu_stop_tx(port, 0); 461 siu_stop_tx(port);
462 return; 462 return;
463 } 463 }
464 464
@@ -474,7 +474,7 @@ static inline void transmit_chars(struct uart_port *port)
474 uart_write_wakeup(port); 474 uart_write_wakeup(port);
475 475
476 if (uart_circ_empty(xmit)) 476 if (uart_circ_empty(xmit))
477 siu_stop_tx(port, 0); 477 siu_stop_tx(port);
478} 478}
479 479
480static irqreturn_t siu_interrupt(int irq, void *dev_id, struct pt_regs *regs) 480static irqreturn_t siu_interrupt(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index bb1db1959854..c466739428b2 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -960,7 +960,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
960 intf->altsetting->desc.bInterfaceNumber); 960 intf->altsetting->desc.bInterfaceNumber);
961 961
962 /* instance init */ 962 /* instance init */
963 instance = kcalloc(1, sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL); 963 instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL);
964 if (!instance) { 964 if (!instance) {
965 dev_dbg(dev, "%s: no memory for instance data!\n", __func__); 965 dev_dbg(dev, "%s: no memory for instance data!\n", __func__);
966 return -ENOMEM; 966 return -ENOMEM;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 79422a3b07bc..12ecdb03ee5f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -782,7 +782,7 @@ static int usb_register_bus(struct usb_bus *bus)
782 return -E2BIG; 782 return -E2BIG;
783 } 783 }
784 784
785 bus->class_dev = class_device_create(usb_host_class, MKDEV(0,0), bus->controller, "usb%d", busnum); 785 bus->class_dev = class_device_create(usb_host_class, MKDEV(0,0), bus->controller, "usb_host%d", busnum);
786 if (IS_ERR(bus->class_dev)) { 786 if (IS_ERR(bus->class_dev)) {
787 clear_bit(busnum, busmap.busmap); 787 clear_bit(busnum, busmap.busmap);
788 up(&usb_bus_list_lock); 788 up(&usb_bus_list_lock);
@@ -1669,7 +1669,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
1669{ 1669{
1670 struct usb_hcd *hcd; 1670 struct usb_hcd *hcd;
1671 1671
1672 hcd = kcalloc(1, sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL); 1672 hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
1673 if (!hcd) { 1673 if (!hcd) {
1674 dev_dbg (dev, "hcd alloc failed\n"); 1674 dev_dbg (dev, "hcd alloc failed\n");
1675 return NULL; 1675 return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c3e46d24a37e..c9412daff682 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1570,7 +1570,7 @@ static int __usb_suspend_device (struct usb_device *udev, int port1,
1570 struct usb_driver *driver; 1570 struct usb_driver *driver;
1571 1571
1572 intf = udev->actconfig->interface[i]; 1572 intf = udev->actconfig->interface[i];
1573 if (state <= intf->dev.power.power_state) 1573 if (state.event <= intf->dev.power.power_state.event)
1574 continue; 1574 continue;
1575 if (!intf->dev.driver) 1575 if (!intf->dev.driver)
1576 continue; 1576 continue;
@@ -1578,11 +1578,11 @@ static int __usb_suspend_device (struct usb_device *udev, int port1,
1578 1578
1579 if (driver->suspend) { 1579 if (driver->suspend) {
1580 status = driver->suspend(intf, state); 1580 status = driver->suspend(intf, state);
1581 if (intf->dev.power.power_state != state 1581 if (intf->dev.power.power_state.event != state.event
1582 || status) 1582 || status)
1583 dev_err(&intf->dev, 1583 dev_err(&intf->dev,
1584 "suspend %d fail, code %d\n", 1584 "suspend %d fail, code %d\n",
1585 state, status); 1585 state.event, status);
1586 } 1586 }
1587 1587
1588 /* only drivers with suspend() can ever resume(); 1588 /* only drivers with suspend() can ever resume();
@@ -1595,7 +1595,7 @@ static int __usb_suspend_device (struct usb_device *udev, int port1,
1595 * since we know every driver's probe/disconnect works 1595 * since we know every driver's probe/disconnect works
1596 * even for drivers that can't suspend. 1596 * even for drivers that can't suspend.
1597 */ 1597 */
1598 if (!driver->suspend || state > PM_SUSPEND_MEM) { 1598 if (!driver->suspend || state.event > PM_EVENT_FREEZE) {
1599#if 1 1599#if 1
1600 dev_warn(&intf->dev, "resume is unsafe!\n"); 1600 dev_warn(&intf->dev, "resume is unsafe!\n");
1601#else 1601#else
@@ -1616,7 +1616,7 @@ static int __usb_suspend_device (struct usb_device *udev, int port1,
1616 * policies (when HNP doesn't apply) once we have mechanisms to 1616 * policies (when HNP doesn't apply) once we have mechanisms to
1617 * turn power back on! (Likely not before 2.7...) 1617 * turn power back on! (Likely not before 2.7...)
1618 */ 1618 */
1619 if (state > PM_SUSPEND_MEM) { 1619 if (state.event > PM_EVENT_FREEZE) {
1620 dev_warn(&udev->dev, "no poweroff yet, suspending instead\n"); 1620 dev_warn(&udev->dev, "no poweroff yet, suspending instead\n");
1621 } 1621 }
1622 1622
@@ -1733,7 +1733,7 @@ static int finish_port_resume(struct usb_device *udev)
1733 struct usb_driver *driver; 1733 struct usb_driver *driver;
1734 1734
1735 intf = udev->actconfig->interface[i]; 1735 intf = udev->actconfig->interface[i];
1736 if (intf->dev.power.power_state == PMSG_ON) 1736 if (intf->dev.power.power_state.event == PM_EVENT_ON)
1737 continue; 1737 continue;
1738 if (!intf->dev.driver) { 1738 if (!intf->dev.driver) {
1739 /* FIXME maybe force to alt 0 */ 1739 /* FIXME maybe force to alt 0 */
@@ -1747,11 +1747,11 @@ static int finish_port_resume(struct usb_device *udev)
1747 1747
1748 /* can we do better than just logging errors? */ 1748 /* can we do better than just logging errors? */
1749 status = driver->resume(intf); 1749 status = driver->resume(intf);
1750 if (intf->dev.power.power_state != PMSG_ON 1750 if (intf->dev.power.power_state.event != PM_EVENT_ON
1751 || status) 1751 || status)
1752 dev_dbg(&intf->dev, 1752 dev_dbg(&intf->dev,
1753 "resume fail, state %d code %d\n", 1753 "resume fail, state %d code %d\n",
1754 intf->dev.power.power_state, status); 1754 intf->dev.power.power_state.event, status);
1755 } 1755 }
1756 status = 0; 1756 status = 0;
1757 1757
@@ -1934,7 +1934,7 @@ static int hub_resume(struct usb_interface *intf)
1934 unsigned port1; 1934 unsigned port1;
1935 int status; 1935 int status;
1936 1936
1937 if (intf->dev.power.power_state == PM_SUSPEND_ON) 1937 if (intf->dev.power.power_state.event == PM_EVENT_ON)
1938 return 0; 1938 return 0;
1939 1939
1940 for (port1 = 1; port1 <= hdev->maxchild; port1++) { 1940 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 99c85d2f92da..2cddd8a00437 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1400,7 +1400,7 @@ static int usb_generic_suspend(struct device *dev, pm_message_t message)
1400 driver = to_usb_driver(dev->driver); 1400 driver = to_usb_driver(dev->driver);
1401 1401
1402 /* there's only one USB suspend state */ 1402 /* there's only one USB suspend state */
1403 if (intf->dev.power.power_state) 1403 if (intf->dev.power.power_state.event)
1404 return 0; 1404 return 0;
1405 1405
1406 if (driver->suspend) 1406 if (driver->suspend)
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index b01efb6b36f6..65ac9fef3a7c 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -641,7 +641,7 @@ show_registers (struct class_device *class_dev, char *buf)
641 641
642 spin_lock_irqsave (&ehci->lock, flags); 642 spin_lock_irqsave (&ehci->lock, flags);
643 643
644 if (bus->controller->power.power_state) { 644 if (bus->controller->power.power_state.event) {
645 size = scnprintf (next, size, 645 size = scnprintf (next, size,
646 "bus %s, device %s (driver " DRIVER_VERSION ")\n" 646 "bus %s, device %s (driver " DRIVER_VERSION ")\n"
647 "%s\n" 647 "%s\n"
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b56f25864ed6..4c972b57c7c3 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -638,7 +638,7 @@ iso_stream_alloc (unsigned mem_flags)
638{ 638{
639 struct ehci_iso_stream *stream; 639 struct ehci_iso_stream *stream;
640 640
641 stream = kcalloc(1, sizeof *stream, mem_flags); 641 stream = kzalloc(sizeof *stream, mem_flags);
642 if (likely (stream != NULL)) { 642 if (likely (stream != NULL)) {
643 INIT_LIST_HEAD(&stream->td_list); 643 INIT_LIST_HEAD(&stream->td_list);
644 INIT_LIST_HEAD(&stream->free_list); 644 INIT_LIST_HEAD(&stream->free_list);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 76cb496c5836..75128c371800 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -717,7 +717,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
717 } 717 }
718 /* avoid all allocations within spinlocks: request or endpoint */ 718 /* avoid all allocations within spinlocks: request or endpoint */
719 if (!hep->hcpriv) { 719 if (!hep->hcpriv) {
720 ep = kcalloc(1, sizeof *ep, mem_flags); 720 ep = kzalloc(sizeof *ep, mem_flags);
721 if (!ep) 721 if (!ep)
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index c58408c95c3d..447f488f5d93 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -631,7 +631,7 @@ show_registers (struct class_device *class_dev, char *buf)
631 hcd->product_desc, 631 hcd->product_desc,
632 hcd_name); 632 hcd_name);
633 633
634 if (bus->controller->power.power_state) { 634 if (bus->controller->power.power_state.event) {
635 size -= scnprintf (next, size, 635 size -= scnprintf (next, size,
636 "SUSPENDED (no register access)\n"); 636 "SUSPENDED (no register access)\n");
637 goto done; 637 goto done;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 7a890a65f55d..d2a1fd40dfcb 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -835,7 +835,7 @@ static int sl811h_urb_enqueue(
835 835
836 /* avoid all allocations within spinlocks */ 836 /* avoid all allocations within spinlocks */
837 if (!hep->hcpriv) 837 if (!hep->hcpriv)
838 ep = kcalloc(1, sizeof *ep, mem_flags); 838 ep = kzalloc(sizeof *ep, mem_flags);
839 839
840 spin_lock_irqsave(&sl811->lock, flags); 840 spin_lock_irqsave(&sl811->lock, flags);
841 841
@@ -1781,9 +1781,9 @@ sl811h_suspend(struct device *dev, pm_message_t state, u32 phase)
1781 if (phase != SUSPEND_POWER_DOWN) 1781 if (phase != SUSPEND_POWER_DOWN)
1782 return retval; 1782 return retval;
1783 1783
1784 if (state <= PM_SUSPEND_MEM) 1784 if (state.event == PM_EVENT_FREEZE)
1785 retval = sl811h_hub_suspend(hcd); 1785 retval = sl811h_hub_suspend(hcd);
1786 else 1786 else if (state.event == PM_EVENT_SUSPEND)
1787 port_power(sl811, 0); 1787 port_power(sl811, 0);
1788 if (retval == 0) 1788 if (retval == 0)
1789 dev->power.power_state = state; 1789 dev->power.power_state = state;
@@ -1802,7 +1802,7 @@ sl811h_resume(struct device *dev, u32 phase)
1802 /* with no "check to see if VBUS is still powered" board hook, 1802 /* with no "check to see if VBUS is still powered" board hook,
1803 * let's assume it'd only be powered to enable remote wakeup. 1803 * let's assume it'd only be powered to enable remote wakeup.
1804 */ 1804 */
1805 if (dev->power.power_state > PM_SUSPEND_MEM 1805 if (dev->power.power_state.event == PM_EVENT_SUSPEND
1806 || !hcd->can_wakeup) { 1806 || !hcd->can_wakeup) {
1807 sl811->port1 = 0; 1807 sl811->port1 = 0;
1808 port_power(sl811, 1); 1808 port_power(sl811, 1);
diff --git a/drivers/usb/input/acecad.c b/drivers/usb/input/acecad.c
index 13532f3e3efc..74f8760d7c07 100644
--- a/drivers/usb/input/acecad.c
+++ b/drivers/usb/input/acecad.c
@@ -152,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
152 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 152 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
153 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 153 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
154 154
155 acecad = kcalloc(1, sizeof(struct usb_acecad), GFP_KERNEL); 155 acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
156 if (!acecad) 156 if (!acecad)
157 return -ENOMEM; 157 return -ENOMEM;
158 158
diff --git a/drivers/usb/input/itmtouch.c b/drivers/usb/input/itmtouch.c
index 0dc439f10823..becb87efb869 100644
--- a/drivers/usb/input/itmtouch.c
+++ b/drivers/usb/input/itmtouch.c
@@ -166,7 +166,7 @@ static int itmtouch_probe(struct usb_interface *intf, const struct usb_device_id
166 interface = intf->cur_altsetting; 166 interface = intf->cur_altsetting;
167 endpoint = &interface->endpoint[0].desc; 167 endpoint = &interface->endpoint[0].desc;
168 168
169 if (!(itmtouch = kcalloc(1, sizeof(struct itmtouch_dev), GFP_KERNEL))) { 169 if (!(itmtouch = kzalloc(sizeof(struct itmtouch_dev), GFP_KERNEL))) {
170 err("%s - Out of memory.", __FUNCTION__); 170 err("%s - Out of memory.", __FUNCTION__);
171 return -ENOMEM; 171 return -ENOMEM;
172 } 172 }
diff --git a/drivers/usb/input/pid.c b/drivers/usb/input/pid.c
index 256963863478..acc71ec560e9 100644
--- a/drivers/usb/input/pid.c
+++ b/drivers/usb/input/pid.c
@@ -263,7 +263,7 @@ int hid_pid_init(struct hid_device *hid)
263 struct hid_ff_pid *private; 263 struct hid_ff_pid *private;
264 struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list); 264 struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list);
265 265
266 private = hid->ff_private = kcalloc(1, sizeof(struct hid_ff_pid), GFP_KERNEL); 266 private = hid->ff_private = kzalloc(sizeof(struct hid_ff_pid), GFP_KERNEL);
267 if (!private) 267 if (!private)
268 return -ENOMEM; 268 return -ENOMEM;
269 269
diff --git a/drivers/usb/media/w9968cf.c b/drivers/usb/media/w9968cf.c
index ca9f3a30634f..f36c0b6c6e36 100644
--- a/drivers/usb/media/w9968cf.c
+++ b/drivers/usb/media/w9968cf.c
@@ -1523,7 +1523,6 @@ static u32 w9968cf_i2c_func(struct i2c_adapter* adap)
1523static int w9968cf_i2c_attach_inform(struct i2c_client* client) 1523static int w9968cf_i2c_attach_inform(struct i2c_client* client)
1524{ 1524{
1525 struct w9968cf_device* cam = i2c_get_adapdata(client->adapter); 1525 struct w9968cf_device* cam = i2c_get_adapdata(client->adapter);
1526 const char* clientname = i2c_clientname(client);
1527 int id = client->driver->id, err = 0; 1526 int id = client->driver->id, err = 0;
1528 1527
1529 if (id == I2C_DRIVERID_OVCAMCHIP) { 1528 if (id == I2C_DRIVERID_OVCAMCHIP) {
@@ -1535,12 +1534,12 @@ static int w9968cf_i2c_attach_inform(struct i2c_client* client)
1535 } 1534 }
1536 } else { 1535 } else {
1537 DBG(4, "Rejected client [%s] with driver [%s]", 1536 DBG(4, "Rejected client [%s] with driver [%s]",
1538 clientname, client->driver->name) 1537 client->name, client->driver->name)
1539 return -EINVAL; 1538 return -EINVAL;
1540 } 1539 }
1541 1540
1542 DBG(5, "I2C attach client [%s] with driver [%s]", 1541 DBG(5, "I2C attach client [%s] with driver [%s]",
1543 clientname, client->driver->name) 1542 client->name, client->driver->name)
1544 1543
1545 return 0; 1544 return 0;
1546} 1545}
@@ -1549,12 +1548,11 @@ static int w9968cf_i2c_attach_inform(struct i2c_client* client)
1549static int w9968cf_i2c_detach_inform(struct i2c_client* client) 1548static int w9968cf_i2c_detach_inform(struct i2c_client* client)
1550{ 1549{
1551 struct w9968cf_device* cam = i2c_get_adapdata(client->adapter); 1550 struct w9968cf_device* cam = i2c_get_adapdata(client->adapter);
1552 const char* clientname = i2c_clientname(client);
1553 1551
1554 if (cam->sensor_client == client) 1552 if (cam->sensor_client == client)
1555 cam->sensor_client = NULL; 1553 cam->sensor_client = NULL;
1556 1554
1557 DBG(5, "I2C detach client [%s]", clientname) 1555 DBG(5, "I2C detach client [%s]", client->name)
1558 1556
1559 return 0; 1557 return 0;
1560} 1558}
@@ -1573,15 +1571,13 @@ static int w9968cf_i2c_init(struct w9968cf_device* cam)
1573 int err = 0; 1571 int err = 0;
1574 1572
1575 static struct i2c_algorithm algo = { 1573 static struct i2c_algorithm algo = {
1576 .name = "W996[87]CF algorithm",
1577 .id = I2C_ALGO_SMBUS,
1578 .smbus_xfer = w9968cf_i2c_smbus_xfer, 1574 .smbus_xfer = w9968cf_i2c_smbus_xfer,
1579 .algo_control = w9968cf_i2c_control, 1575 .algo_control = w9968cf_i2c_control,
1580 .functionality = w9968cf_i2c_func, 1576 .functionality = w9968cf_i2c_func,
1581 }; 1577 };
1582 1578
1583 static struct i2c_adapter adap = { 1579 static struct i2c_adapter adap = {
1584 .id = I2C_ALGO_SMBUS | I2C_HW_SMBUS_W9968CF, 1580 .id = I2C_HW_SMBUS_W9968CF,
1585 .class = I2C_CLASS_CAM_DIGITAL, 1581 .class = I2C_CLASS_CAM_DIGITAL,
1586 .owner = THIS_MODULE, 1582 .owner = THIS_MODULE,
1587 .client_register = w9968cf_i2c_attach_inform, 1583 .client_register = w9968cf_i2c_attach_inform,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index cda7249a90b2..fd7fb98e4b20 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1533,7 +1533,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1533 if (down_interruptible (&dev->sem)) 1533 if (down_interruptible (&dev->sem))
1534 return -ERESTARTSYS; 1534 return -ERESTARTSYS;
1535 1535
1536 if (intf->dev.power.power_state != PMSG_ON) { 1536 if (intf->dev.power.power_state.event != PM_EVENT_ON) {
1537 up (&dev->sem); 1537 up (&dev->sem);
1538 return -EHOSTUNREACH; 1538 return -EHOSTUNREACH;
1539 } 1539 }
diff --git a/drivers/usb/net/Makefile b/drivers/usb/net/Makefile
index 16f352195512..fe3fd4115e1e 100644
--- a/drivers/usb/net/Makefile
+++ b/drivers/usb/net/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
8obj-$(CONFIG_USB_RTL8150) += rtl8150.o 8obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_USBNET) += usbnet.o 9obj-$(CONFIG_USB_USBNET) += usbnet.o
10obj-$(CONFIG_USB_ZD1201) += zd1201.o 10obj-$(CONFIG_USB_ZD1201) += zd1201.o
11
12CFLAGS_zd1201.o = -Idrivers/net/wireless/
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 4528a00c45b0..a2f67245f6da 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
2903 * completion callbacks. 2.5 should have fixed those bugs... 2903 * completion callbacks. 2.5 should have fixed those bugs...
2904 */ 2904 */
2905 2905
2906static void defer_bh (struct usbnet *dev, struct sk_buff *skb) 2906static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
2907{ 2907{
2908 struct sk_buff_head *list = skb->list;
2909 unsigned long flags; 2908 unsigned long flags;
2910 2909
2911 spin_lock_irqsave (&list->lock, flags); 2910 spin_lock_irqsave(&list->lock, flags);
2912 __skb_unlink (skb, list); 2911 __skb_unlink(skb, list);
2913 spin_unlock (&list->lock); 2912 spin_unlock(&list->lock);
2914 spin_lock (&dev->done.lock); 2913 spin_lock(&dev->done.lock);
2915 __skb_queue_tail (&dev->done, skb); 2914 __skb_queue_tail(&dev->done, skb);
2916 if (dev->done.qlen == 1) 2915 if (dev->done.qlen == 1)
2917 tasklet_schedule (&dev->bh); 2916 tasklet_schedule(&dev->bh);
2918 spin_unlock_irqrestore (&dev->done.lock, flags); 2917 spin_unlock_irqrestore(&dev->done.lock, flags);
2919} 2918}
2920 2919
2921/* some work can't be done in tasklets, so we use keventd 2920/* some work can't be done in tasklets, so we use keventd
@@ -3120,7 +3119,7 @@ block:
3120 break; 3119 break;
3121 } 3120 }
3122 3121
3123 defer_bh (dev, skb); 3122 defer_bh(dev, skb, &dev->rxq);
3124 3123
3125 if (urb) { 3124 if (urb) {
3126 if (netif_running (dev->net) 3125 if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
3490 3489
3491 urb->dev = NULL; 3490 urb->dev = NULL;
3492 entry->state = tx_done; 3491 entry->state = tx_done;
3493 defer_bh (dev, skb); 3492 defer_bh(dev, skb, &dev->txq);
3494} 3493}
3495 3494
3496/*-------------------------------------------------------------------------*/ 3495/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c
index e32a80b39182..fc013978837e 100644
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/usb/net/zd1201.c
@@ -21,7 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/if_arp.h> 22#include <linux/if_arp.h>
23#include <linux/firmware.h> 23#include <linux/firmware.h>
24#include <ieee802_11.h> 24#include <net/ieee80211.h>
25#include "zd1201.h" 25#include "zd1201.h"
26 26
27static struct usb_device_id zd1201_table[] = { 27static struct usb_device_id zd1201_table[] = {
@@ -338,24 +338,24 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
338 goto resubmit; 338 goto resubmit;
339 } 339 }
340 340
341 if ((seq & IEEE802_11_SCTL_FRAG) || 341 if ((seq & IEEE80211_SCTL_FRAG) ||
342 (fc & IEEE802_11_FCTL_MOREFRAGS)) { 342 (fc & IEEE80211_FCTL_MOREFRAGS)) {
343 struct zd1201_frag *frag = NULL; 343 struct zd1201_frag *frag = NULL;
344 char *ptr; 344 char *ptr;
345 345
346 if (datalen<14) 346 if (datalen<14)
347 goto resubmit; 347 goto resubmit;
348 if ((seq & IEEE802_11_SCTL_FRAG) == 0) { 348 if ((seq & IEEE80211_SCTL_FRAG) == 0) {
349 frag = kmalloc(sizeof(*frag), GFP_ATOMIC); 349 frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
350 if (!frag) 350 if (!frag)
351 goto resubmit; 351 goto resubmit;
352 skb = dev_alloc_skb(IEEE802_11_DATA_LEN +14+2); 352 skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2);
353 if (!skb) { 353 if (!skb) {
354 kfree(frag); 354 kfree(frag);
355 goto resubmit; 355 goto resubmit;
356 } 356 }
357 frag->skb = skb; 357 frag->skb = skb;
358 frag->seq = seq & IEEE802_11_SCTL_SEQ; 358 frag->seq = seq & IEEE80211_SCTL_SEQ;
359 skb_reserve(skb, 2); 359 skb_reserve(skb, 2);
360 memcpy(skb_put(skb, 12), &data[datalen-14], 12); 360 memcpy(skb_put(skb, 12), &data[datalen-14], 12);
361 memcpy(skb_put(skb, 2), &data[6], 2); 361 memcpy(skb_put(skb, 2), &data[6], 2);
@@ -364,7 +364,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
364 goto resubmit; 364 goto resubmit;
365 } 365 }
366 hlist_for_each_entry(frag, node, &zd->fraglist, fnode) 366 hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
367 if(frag->seq == (seq&IEEE802_11_SCTL_SEQ)) 367 if(frag->seq == (seq&IEEE80211_SCTL_SEQ))
368 break; 368 break;
369 if (!frag) 369 if (!frag)
370 goto resubmit; 370 goto resubmit;
@@ -372,7 +372,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
372 ptr = skb_put(skb, len); 372 ptr = skb_put(skb, len);
373 if (ptr) 373 if (ptr)
374 memcpy(ptr, data+8, len); 374 memcpy(ptr, data+8, len);
375 if (fc & IEEE802_11_FCTL_MOREFRAGS) 375 if (fc & IEEE80211_FCTL_MOREFRAGS)
376 goto resubmit; 376 goto resubmit;
377 hlist_del_init(&frag->fnode); 377 hlist_del_init(&frag->fnode);
378 kfree(frag); 378 kfree(frag);
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 7bc1d44d8814..b0eba3ac6420 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -2323,17 +2323,16 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2323 * can properly take care of D3 ? Also, with swsusp, we 2323 * can properly take care of D3 ? Also, with swsusp, we
2324 * know we'll be rebooted, ... 2324 * know we'll be rebooted, ...
2325 */ 2325 */
2326#ifdef CONFIG_PPC_PMAC 2326#ifndef CONFIG_PPC_PMAC
2327 /* HACK ALERT ! Once I find a proper way to say to each driver 2327 /* HACK ALERT ! Once I find a proper way to say to each driver
2328 * individually what will happen with it's PCI slot, I'll change 2328 * individually what will happen with it's PCI slot, I'll change
2329 * that. On laptops, the AGP slot is just unclocked, so D2 is 2329 * that. On laptops, the AGP slot is just unclocked, so D2 is
2330 * expected, while on desktops, the card is powered off 2330 * expected, while on desktops, the card is powered off
2331 */ 2331 */
2332 if (state >= 3) 2332 return 0;
2333 state = 2;
2334#endif /* CONFIG_PPC_PMAC */ 2333#endif /* CONFIG_PPC_PMAC */
2335 2334
2336 if (state != 2 || state == pdev->dev.power.power_state) 2335 if (state.event == pdev->dev.power.power_state.event)
2337 return 0; 2336 return 0;
2338 2337
2339 printk(KERN_DEBUG "aty128fb: suspending...\n"); 2338 printk(KERN_DEBUG "aty128fb: suspending...\n");
@@ -2367,7 +2366,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2367 * used dummy fb ops, 2.5 need proper support for this at the 2366 * used dummy fb ops, 2.5 need proper support for this at the
2368 * fbdev level 2367 * fbdev level
2369 */ 2368 */
2370 if (state == 2) 2369 if (state.event != PM_EVENT_ON)
2371 aty128_set_suspend(par, 1); 2370 aty128_set_suspend(par, 1);
2372 2371
2373 release_console_sem(); 2372 release_console_sem();
@@ -2382,12 +2381,11 @@ static int aty128_do_resume(struct pci_dev *pdev)
2382 struct fb_info *info = pci_get_drvdata(pdev); 2381 struct fb_info *info = pci_get_drvdata(pdev);
2383 struct aty128fb_par *par = info->par; 2382 struct aty128fb_par *par = info->par;
2384 2383
2385 if (pdev->dev.power.power_state == 0) 2384 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2386 return 0; 2385 return 0;
2387 2386
2388 /* Wakeup chip */ 2387 /* Wakeup chip */
2389 if (pdev->dev.power.power_state == 2) 2388 aty128_set_suspend(par, 0);
2390 aty128_set_suspend(par, 0);
2391 par->asleep = 0; 2389 par->asleep = 0;
2392 2390
2393 /* Restore display & engine */ 2391 /* Restore display & engine */
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 8c42538dc8c1..3e10bd837d9e 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2022,17 +2022,16 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2022 struct fb_info *info = pci_get_drvdata(pdev); 2022 struct fb_info *info = pci_get_drvdata(pdev);
2023 struct atyfb_par *par = (struct atyfb_par *) info->par; 2023 struct atyfb_par *par = (struct atyfb_par *) info->par;
2024 2024
2025#ifdef CONFIG_PPC_PMAC 2025#ifndef CONFIG_PPC_PMAC
2026 /* HACK ALERT ! Once I find a proper way to say to each driver 2026 /* HACK ALERT ! Once I find a proper way to say to each driver
2027 * individually what will happen with it's PCI slot, I'll change 2027 * individually what will happen with it's PCI slot, I'll change
2028 * that. On laptops, the AGP slot is just unclocked, so D2 is 2028 * that. On laptops, the AGP slot is just unclocked, so D2 is
2029 * expected, while on desktops, the card is powered off 2029 * expected, while on desktops, the card is powered off
2030 */ 2030 */
2031 if (state >= 3) 2031 return 0;
2032 state = 2;
2033#endif /* CONFIG_PPC_PMAC */ 2032#endif /* CONFIG_PPC_PMAC */
2034 2033
2035 if (state != 2 || state == pdev->dev.power.power_state) 2034 if (state.event == pdev->dev.power.power_state.event)
2036 return 0; 2035 return 0;
2037 2036
2038 acquire_console_sem(); 2037 acquire_console_sem();
@@ -2071,12 +2070,12 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
2071 struct fb_info *info = pci_get_drvdata(pdev); 2070 struct fb_info *info = pci_get_drvdata(pdev);
2072 struct atyfb_par *par = (struct atyfb_par *) info->par; 2071 struct atyfb_par *par = (struct atyfb_par *) info->par;
2073 2072
2074 if (pdev->dev.power.power_state == 0) 2073 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2075 return 0; 2074 return 0;
2076 2075
2077 acquire_console_sem(); 2076 acquire_console_sem();
2078 2077
2079 if (pdev->dev.power.power_state == 2) 2078 if (pdev->dev.power.power_state.event == 2)
2080 aty_power_mgmt(0, par); 2079 aty_power_mgmt(0, par);
2081 par->asleep = 0; 2080 par->asleep = 0;
2082 2081
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 762244164c81..a9d0414e4655 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -75,7 +75,7 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
75 75
76 strcpy(chan->adapter.name, name); 76 strcpy(chan->adapter.name, name);
77 chan->adapter.owner = THIS_MODULE; 77 chan->adapter.owner = THIS_MODULE;
78 chan->adapter.id = I2C_ALGO_ATI; 78 chan->adapter.id = I2C_HW_B_RADEON;
79 chan->adapter.algo_data = &chan->algo; 79 chan->adapter.algo_data = &chan->algo;
80 chan->adapter.dev.parent = &chan->rinfo->pdev->dev; 80 chan->adapter.dev.parent = &chan->rinfo->pdev->dev;
81 chan->algo.setsda = radeon_gpio_setsda; 81 chan->algo.setsda = radeon_gpio_setsda;
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 98352af39325..59a1b6f85067 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2526,18 +2526,18 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2526 struct radeonfb_info *rinfo = info->par; 2526 struct radeonfb_info *rinfo = info->par;
2527 int i; 2527 int i;
2528 2528
2529 if (state == pdev->dev.power.power_state) 2529 if (state.event == pdev->dev.power.power_state.event)
2530 return 0; 2530 return 0;
2531 2531
2532 printk(KERN_DEBUG "radeonfb (%s): suspending to state: %d...\n", 2532 printk(KERN_DEBUG "radeonfb (%s): suspending to state: %d...\n",
2533 pci_name(pdev), state); 2533 pci_name(pdev), state.event);
2534 2534
2535 /* For suspend-to-disk, we cheat here. We don't suspend anything and 2535 /* For suspend-to-disk, we cheat here. We don't suspend anything and
2536 * let fbcon continue drawing until we are all set. That shouldn't 2536 * let fbcon continue drawing until we are all set. That shouldn't
2537 * really cause any problem at this point, provided that the wakeup 2537 * really cause any problem at this point, provided that the wakeup
2538 * code knows that any state in memory may not match the HW 2538 * code knows that any state in memory may not match the HW
2539 */ 2539 */
2540 if (state != PM_SUSPEND_MEM) 2540 if (state.event == PM_EVENT_FREEZE)
2541 goto done; 2541 goto done;
2542 2542
2543 acquire_console_sem(); 2543 acquire_console_sem();
@@ -2616,7 +2616,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2616 struct radeonfb_info *rinfo = info->par; 2616 struct radeonfb_info *rinfo = info->par;
2617 int rc = 0; 2617 int rc = 0;
2618 2618
2619 if (pdev->dev.power.power_state == 0) 2619 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2620 return 0; 2620 return 0;
2621 2621
2622 if (rinfo->no_schedule) { 2622 if (rinfo->no_schedule) {
@@ -2626,7 +2626,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2626 acquire_console_sem(); 2626 acquire_console_sem();
2627 2627
2628 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n", 2628 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
2629 pci_name(pdev), pdev->dev.power.power_state); 2629 pci_name(pdev), pdev->dev.power.power_state.event);
2630 2630
2631 2631
2632 if (pci_enable_device(pdev)) { 2632 if (pci_enable_device(pdev)) {
@@ -2637,7 +2637,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2637 } 2637 }
2638 pci_set_master(pdev); 2638 pci_set_master(pdev);
2639 2639
2640 if (pdev->dev.power.power_state == PM_SUSPEND_MEM) { 2640 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2641 /* Wakeup chip. Check from config space if we were powered off 2641 /* Wakeup chip. Check from config space if we were powered off
2642 * (todo: additionally, check CLK_PIN_CNTL too) 2642 * (todo: additionally, check CLK_PIN_CNTL too)
2643 */ 2643 */
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 9aae884475be..4af321fae390 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,3 +3,4 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
6obj-$(CONFIG_SHARP_LOCOMO) += locomolcd.o
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
new file mode 100644
index 000000000000..ada6e75eb048
--- /dev/null
+++ b/drivers/video/backlight/locomolcd.c
@@ -0,0 +1,157 @@
1/*
2 * Backlight control code for Sharp Zaurus SL-5500
3 *
4 * Copyright 2005 John Lenz <lenz@cs.wisc.edu>
5 * Maintainer: Pavel Machek <pavel@suse.cz> (unless John wants to :-)
6 * GPL v2
7 *
8 * This driver assumes single CPU. That's okay, because collie is
9 * slightly old hardware, and noone is going to retrofit second CPU to
10 * old PDA.
11 */
12
13/* LCD power functions */
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20
21#include <asm/hardware/locomo.h>
22#include <asm/irq.h>
23
24#ifdef CONFIG_SA1100_COLLIE
25#include <asm/arch/collie.h>
26#else
27#include <asm/arch/poodle.h>
28#endif
29
30extern void (*sa1100fb_lcd_power)(int on);
31
32static struct locomo_dev *locomolcd_dev;
33
34static void locomolcd_on(int comadj)
35{
36 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 0);
37 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 1);
38 mdelay(2);
39
40 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 0);
41 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 1);
42 mdelay(2);
43
44 locomo_m62332_senddata(locomolcd_dev, comadj, 0);
45 mdelay(5);
46
47 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 0);
48 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 1);
49 mdelay(10);
50
51 /* TFTCRST | CPSOUT=0 | CPSEN */
52 locomo_writel(0x01, locomolcd_dev->mapbase + LOCOMO_TC);
53
54 /* Set CPSD */
55 locomo_writel(6, locomolcd_dev->mapbase + LOCOMO_CPSD);
56
57 /* TFTCRST | CPSOUT=0 | CPSEN */
58 locomo_writel((0x04 | 0x01), locomolcd_dev->mapbase + LOCOMO_TC);
59 mdelay(10);
60
61 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 0);
62 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 1);
63}
64
65static void locomolcd_off(int comadj)
66{
67 /* TFTCRST=1 | CPSOUT=1 | CPSEN = 0 */
68 locomo_writel(0x06, locomolcd_dev->mapbase + LOCOMO_TC);
69 mdelay(1);
70
71 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 0);
72 mdelay(110);
73
74 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 0);
75 mdelay(700);
76
77 /* TFTCRST=0 | CPSOUT=0 | CPSEN = 0 */
78 locomo_writel(0, locomolcd_dev->mapbase + LOCOMO_TC);
79 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 0);
80 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 0);
81}
82
83void locomolcd_power(int on)
84{
85 int comadj = 118;
86 unsigned long flags;
87
88 local_irq_save(flags);
89
90 if (!locomolcd_dev) {
91 local_irq_restore(flags);
92 return;
93 }
94
95 /* read comadj */
96#ifdef CONFIG_MACH_POODLE
97 comadj = 118;
98#else
99 comadj = 128;
100#endif
101
102 if (on)
103 locomolcd_on(comadj);
104 else
105 locomolcd_off(comadj);
106
107 local_irq_restore(flags);
108}
109EXPORT_SYMBOL(locomolcd_power);
110
111static int poodle_lcd_probe(struct locomo_dev *dev)
112{
113 unsigned long flags;
114
115 local_irq_save(flags);
116 locomolcd_dev = dev;
117
118 /* the poodle_lcd_power function is called for the first time
119 * from fs_initcall, which is before locomo is activated.
120 * We need to recall poodle_lcd_power here*/
121#ifdef CONFIG_MACH_POODLE
122 locomolcd_power(1);
123#endif
124 local_irq_restore(flags);
125 return 0;
126}
127
128static int poodle_lcd_remove(struct locomo_dev *dev)
129{
130 unsigned long flags;
131 local_irq_save(flags);
132 locomolcd_dev = NULL;
133 local_irq_restore(flags);
134 return 0;
135}
136
137static struct locomo_driver poodle_lcd_driver = {
138 .drv = {
139 .name = "locomo-backlight",
140 },
141 .devid = LOCOMO_DEVID_BACKLIGHT,
142 .probe = poodle_lcd_probe,
143 .remove = poodle_lcd_remove,
144};
145
146static int __init poodle_lcd_init(void)
147{
148 int ret = locomo_driver_register(&poodle_lcd_driver);
149 if (ret) return ret;
150
151#ifdef CONFIG_SA1100_COLLIE
152 sa1100fb_lcd_power = locomolcd_power;
153#endif
154 return 0;
155}
156device_initcall(poodle_lcd_init);
157
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index e75a965ec760..4131243cfdf8 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -462,9 +462,9 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
462{ 462{
463 struct fb_info *p = pci_get_drvdata(pdev); 463 struct fb_info *p = pci_get_drvdata(pdev);
464 464
465 if (state == pdev->dev.power.power_state) 465 if (state.event == pdev->dev.power.power_state.event)
466 return 0; 466 return 0;
467 if (state != PM_SUSPEND_MEM) 467 if (state.event != PM_SUSPEND_MEM)
468 goto done; 468 goto done;
469 469
470 acquire_console_sem(); 470 acquire_console_sem();
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 7513fb9b19cf..6db183462b92 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1506,12 +1506,12 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t state)
1506 struct i810fb_par *par = (struct i810fb_par *) info->par; 1506 struct i810fb_par *par = (struct i810fb_par *) info->par;
1507 int blank = 0, prev_state = par->cur_state; 1507 int blank = 0, prev_state = par->cur_state;
1508 1508
1509 if (state == prev_state) 1509 if (state.event == prev_state)
1510 return 0; 1510 return 0;
1511 1511
1512 par->cur_state = state; 1512 par->cur_state = state.event;
1513 1513
1514 switch (state) { 1514 switch (state.event) {
1515 case 1: 1515 case 1:
1516 blank = VESA_VSYNC_SUSPEND; 1516 blank = VESA_VSYNC_SUSPEND;
1517 break; 1517 break;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 67f85344f0cc..ad60bbb16cdf 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1271,7 +1271,7 @@ ERROR0:;
1271} 1271}
1272 1272
1273static int maven_attach_adapter(struct i2c_adapter* adapter) { 1273static int maven_attach_adapter(struct i2c_adapter* adapter) {
1274 if (adapter->id == (I2C_ALGO_BIT | I2C_HW_B_G400)) 1274 if (adapter->id == I2C_HW_B_G400)
1275 return i2c_probe(adapter, &addr_data, &maven_detect_client); 1275 return i2c_probe(adapter, &addr_data, &maven_detect_client);
1276 return 0; 1276 return 0;
1277} 1277}
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 3757c1407c19..1a91bffdda26 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -90,14 +90,13 @@ static int nvidia_gpio_getsda(void *data)
90 return val; 90 return val;
91} 91}
92 92
93#define I2C_ALGO_NVIDIA 0x0e0000
94static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name) 93static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name)
95{ 94{
96 int rc; 95 int rc;
97 96
98 strcpy(chan->adapter.name, name); 97 strcpy(chan->adapter.name, name);
99 chan->adapter.owner = THIS_MODULE; 98 chan->adapter.owner = THIS_MODULE;
100 chan->adapter.id = I2C_ALGO_NVIDIA; 99 chan->adapter.id = I2C_HW_B_NVIDIA;
101 chan->adapter.algo_data = &chan->algo; 100 chan->adapter.algo_data = &chan->algo;
102 chan->adapter.dev.parent = &chan->par->pci_dev->dev; 101 chan->adapter.dev.parent = &chan->par->pci_dev->dev;
103 chan->algo.setsda = nvidia_gpio_setsda; 102 chan->algo.setsda = nvidia_gpio_setsda;
diff --git a/drivers/video/pmag-aa-fb.c b/drivers/video/pmag-aa-fb.c
index 3e00ad7f2e31..28d1fe5fe340 100644
--- a/drivers/video/pmag-aa-fb.c
+++ b/drivers/video/pmag-aa-fb.c
@@ -413,7 +413,7 @@ static struct fb_ops aafb_ops = {
413 413
414static int __init init_one(int slot) 414static int __init init_one(int slot)
415{ 415{
416 unsigned long base_addr = get_tc_base_addr(slot); 416 unsigned long base_addr = CKSEG1ADDR(get_tc_base_addr(slot));
417 struct aafb_info *ip = &my_fb_info[slot]; 417 struct aafb_info *ip = &my_fb_info[slot];
418 418
419 memset(ip, 0, sizeof(struct aafb_info)); 419 memset(ip, 0, sizeof(struct aafb_info));
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index f8095588e99d..c98f1c8d7dc2 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -1,57 +1,55 @@
1/* 1/*
2 * linux/drivers/video/pmag-ba-fb.c 2 * linux/drivers/video/pmag-ba-fb.c
3 * 3 *
4 * PMAG-BA TurboChannel framebuffer card support ... derived from: 4 * PMAG-BA TURBOchannel Color Frame Buffer (CFB) card support,
5 * derived from:
5 * "HP300 Topcat framebuffer support (derived from macfb of all things) 6 * "HP300 Topcat framebuffer support (derived from macfb of all things)
6 * Phil Blundell <philb@gnu.org> 1998", the original code can be 7 * Phil Blundell <philb@gnu.org> 1998", the original code can be
7 * found in the file hpfb.c in the same directory. 8 * found in the file hpfb.c in the same directory.
8 * 9 *
9 * Based on digital document: 10 * Based on digital document:
10 * "PMAG-BA TURBOchannel Color Frame Buffer 11 * "PMAG-BA TURBOchannel Color Frame Buffer
11 * Functional Specification", Revision 1.2, August 27, 1990 12 * Functional Specification", Revision 1.2, August 27, 1990
12 * 13 *
13 * DECstation related code Copyright (C) 1999, 2000, 2001 by 14 * DECstation related code Copyright (C) 1999, 2000, 2001 by
14 * Michael Engel <engel@unix-ag.org>, 15 * Michael Engel <engel@unix-ag.org>,
15 * Karsten Merker <merker@linuxtag.org> and 16 * Karsten Merker <merker@linuxtag.org> and
16 * Harald Koerfgen. 17 * Harald Koerfgen.
17 * This file is subject to the terms and conditions of the GNU General 18 * Copyright (c) 2005 Maciej W. Rozycki
18 * Public License. See the file COPYING in the main directory of this
19 * archive for more details.
20 * 19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file COPYING in the main directory of this
22 * archive for more details.
21 */ 23 */
22#include <linux/module.h> 24
23#include <linux/kernel.h> 25#include <linux/compiler.h>
24#include <linux/sched.h>
25#include <linux/errno.h> 26#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/timer.h>
28#include <linux/mm.h>
29#include <linux/tty.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/fb.h> 27#include <linux/fb.h>
34#include <asm/bootinfo.h> 28#include <linux/init.h>
35#include <asm/dec/machtype.h> 29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/types.h>
32
33#include <asm/bug.h>
34#include <asm/io.h>
35#include <asm/system.h>
36
36#include <asm/dec/tc.h> 37#include <asm/dec/tc.h>
38
37#include <video/pmag-ba-fb.h> 39#include <video/pmag-ba-fb.h>
38 40
39struct pmag_ba_ramdac_regs { 41
40 unsigned char addr_low; 42struct pmagbafb_par {
41 unsigned char pad0[3]; 43 struct fb_info *next;
42 unsigned char addr_hi; 44 volatile void __iomem *mmio;
43 unsigned char pad1[3]; 45 volatile u32 __iomem *dac;
44 unsigned char data; 46 int slot;
45 unsigned char pad2[3];
46 unsigned char cmap;
47}; 47};
48 48
49/*
50 * Max 3 TURBOchannel slots -> max 3 PMAG-BA :)
51 */
52static struct fb_info pmagba_fb_info[3];
53 49
54static struct fb_var_screeninfo pmagbafb_defined = { 50static struct fb_info *root_pmagbafb_dev;
51
52static struct fb_var_screeninfo pmagbafb_defined __initdata = {
55 .xres = 1024, 53 .xres = 1024,
56 .yres = 864, 54 .yres = 864,
57 .xres_virtual = 1024, 55 .xres_virtual = 1024,
@@ -61,58 +59,71 @@ static struct fb_var_screeninfo pmagbafb_defined = {
61 .green.length = 8, 59 .green.length = 8,
62 .blue.length = 8, 60 .blue.length = 8,
63 .activate = FB_ACTIVATE_NOW, 61 .activate = FB_ACTIVATE_NOW,
64 .height = 274, 62 .height = -1,
65 .width = 195, 63 .width = -1,
66 .accel = FB_ACCEL_NONE, 64 .accel_flags = FB_ACCEL_NONE,
65 .pixclock = 14452,
66 .left_margin = 116,
67 .right_margin = 12,
68 .upper_margin = 34,
69 .lower_margin = 12,
70 .hsync_len = 128,
71 .vsync_len = 3,
72 .sync = FB_SYNC_ON_GREEN,
67 .vmode = FB_VMODE_NONINTERLACED, 73 .vmode = FB_VMODE_NONINTERLACED,
68}; 74};
69 75
70static struct fb_fix_screeninfo pmagbafb_fix = { 76static struct fb_fix_screeninfo pmagbafb_fix __initdata = {
71 .id = "PMAG-BA", 77 .id = "PMAG-BA",
72 .smem_len = (1024 * 864), 78 .smem_len = (1024 * 1024),
73 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
74 .visual = FB_VISUAL_PSEUDOCOLOR, 80 .visual = FB_VISUAL_PSEUDOCOLOR,
75 .line_length = 1024, 81 .line_length = 1024,
82 .mmio_len = PMAG_BA_SIZE - PMAG_BA_BT459,
76}; 83};
77 84
78/* 85
79 * Turn hardware cursor off 86static inline void dac_write(struct pmagbafb_par *par, unsigned int reg, u8 v)
80 */
81void pmagbafb_erase_cursor(struct pmag_ba_ramdac_regs *bt459_regs)
82{ 87{
83 bt459_regs->addr_low = 0; 88 writeb(v, par->dac + reg / 4);
84 bt459_regs->addr_hi = 3;
85 bt459_regs->data = 0;
86} 89}
87 90
91static inline u8 dac_read(struct pmagbafb_par *par, unsigned int reg)
92{
93 return readb(par->dac + reg / 4);
94}
95
96
88/* 97/*
89 * Set the palette. 98 * Set the palette.
90 */ 99 */
91static int pmagbafb_setcolreg(unsigned regno, unsigned red, unsigned green, 100static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
92 unsigned blue, unsigned transp, 101 unsigned int green, unsigned int blue,
93 struct fb_info *info) 102 unsigned int transp, struct fb_info *info)
94{ 103{
95 struct pmag_ba_ramdac_regs *bt459_regs = (struct pmag_ba_ramdac_regs *) info->par; 104 struct pmagbafb_par *par = info->par;
96 105
97 if (regno >= info->cmap.len) 106 BUG_ON(regno >= info->cmap.len);
98 return 1;
99 107
100 red >>= 8; /* The cmap fields are 16 bits */ 108 red >>= 8; /* The cmap fields are 16 bits */
101 green >>= 8; /* wide, but the harware colormap */ 109 green >>= 8; /* wide, but the hardware colormap */
102 blue >>= 8; /* registers are only 8 bits wide */ 110 blue >>= 8; /* registers are only 8 bits wide */
103 111
104 bt459_regs->addr_low = (__u8) regno; 112 mb();
105 bt459_regs->addr_hi = 0; 113 dac_write(par, BT459_ADDR_LO, regno);
106 bt459_regs->cmap = red; 114 dac_write(par, BT459_ADDR_HI, 0x00);
107 bt459_regs->cmap = green; 115 wmb();
108 bt459_regs->cmap = blue; 116 dac_write(par, BT459_CMAP, red);
117 wmb();
118 dac_write(par, BT459_CMAP, green);
119 wmb();
120 dac_write(par, BT459_CMAP, blue);
121
109 return 0; 122 return 0;
110} 123}
111 124
112static struct fb_ops pmagbafb_ops = { 125static struct fb_ops pmagbafb_ops = {
113 .owner = THIS_MODULE, 126 .owner = THIS_MODULE,
114 .fb_get_fix = gen_get_fix,
115 .fb_get_var = gen_get_var,
116 .fb_setcolreg = pmagbafb_setcolreg, 127 .fb_setcolreg = pmagbafb_setcolreg,
117 .fb_fillrect = cfb_fillrect, 128 .fb_fillrect = cfb_fillrect,
118 .fb_copyarea = cfb_copyarea, 129 .fb_copyarea = cfb_copyarea,
@@ -120,63 +131,133 @@ static struct fb_ops pmagbafb_ops = {
120 .fb_cursor = soft_cursor, 131 .fb_cursor = soft_cursor,
121}; 132};
122 133
123int __init pmagbafb_init_one(int slot) 134
135/*
136 * Turn the hardware cursor off.
137 */
138static void __init pmagbafb_erase_cursor(struct fb_info *info)
139{
140 struct pmagbafb_par *par = info->par;
141
142 mb();
143 dac_write(par, BT459_ADDR_LO, 0x00);
144 dac_write(par, BT459_ADDR_HI, 0x03);
145 wmb();
146 dac_write(par, BT459_DATA, 0x00);
147}
148
149
150static int __init pmagbafb_init_one(int slot)
124{ 151{
125 unsigned long base_addr = get_tc_base_addr(slot); 152 struct fb_info *info;
126 struct fb_info *info = &pmagba_fb_info[slot]; 153 struct pmagbafb_par *par;
127 struct display *disp = &pmagba_disp[slot]; 154 unsigned long base_addr;
128 155
129 printk("PMAG-BA framebuffer in slot %d\n", slot); 156 info = framebuffer_alloc(sizeof(struct pmagbafb_par), NULL);
130 /* 157 if (!info)
131 * Framebuffer display memory base address and friends 158 return -ENOMEM;
132 */ 159
133 pmagbafb_fix.smem_start = base_addr + PMAG_BA_ONBOARD_FBMEM_OFFSET; 160 par = info->par;
134 info->par = (base_addr + PMAG_BA_BT459_OFFSET); 161 par->slot = slot;
135 162 claim_tc_card(par->slot);
136 /* 163
137 * Configure the Bt459 RAM DAC 164 base_addr = get_tc_base_addr(par->slot);
138 */ 165
139 pmagbafb_erase_cursor((struct pmag_ba_ramdac_regs *) info->par); 166 par->next = root_pmagbafb_dev;
140 167 root_pmagbafb_dev = info;
141 /* 168
142 * Let there be consoles.. 169 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
143 */ 170 goto err_alloc;
171
144 info->fbops = &pmagbafb_ops; 172 info->fbops = &pmagbafb_ops;
173 info->fix = pmagbafb_fix;
145 info->var = pmagbafb_defined; 174 info->var = pmagbafb_defined;
146 info->fix = pmagbafb_fix;
147 info->screen_base = pmagbafb_fix.smem_start;
148 info->flags = FBINFO_DEFAULT; 175 info->flags = FBINFO_DEFAULT;
149 176
150 fb_alloc_cmap(&fb_info.cmap, 256, 0); 177 /* MMIO mapping setup. */
178 info->fix.mmio_start = base_addr;
179 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
180 if (!par->mmio)
181 goto err_cmap;
182 par->dac = par->mmio + PMAG_BA_BT459;
183
184 /* Frame buffer mapping setup. */
185 info->fix.smem_start = base_addr + PMAG_BA_FBMEM;
186 info->screen_base = ioremap_nocache(info->fix.smem_start,
187 info->fix.smem_len);
188 if (!info->screen_base)
189 goto err_mmio_map;
190 info->screen_size = info->fix.smem_len;
191
192 pmagbafb_erase_cursor(info);
151 193
152 if (register_framebuffer(info) < 0) 194 if (register_framebuffer(info) < 0)
153 return 1; 195 goto err_smem_map;
196
197 pr_info("fb%d: %s frame buffer device in slot %d\n",
198 info->node, info->fix.id, par->slot);
199
154 return 0; 200 return 0;
201
202
203err_smem_map:
204 iounmap(info->screen_base);
205
206err_mmio_map:
207 iounmap(par->mmio);
208
209err_cmap:
210 fb_dealloc_cmap(&info->cmap);
211
212err_alloc:
213 root_pmagbafb_dev = par->next;
214 release_tc_card(par->slot);
215 framebuffer_release(info);
216 return -ENXIO;
155} 217}
156 218
157/* 219static void __exit pmagbafb_exit_one(void)
158 * Initialise the framebuffer 220{
159 */ 221 struct fb_info *info = root_pmagbafb_dev;
222 struct pmagbafb_par *par = info->par;
160 223
161int __init pmagbafb_init(void) 224 unregister_framebuffer(info);
225 iounmap(info->screen_base);
226 iounmap(par->mmio);
227 fb_dealloc_cmap(&info->cmap);
228 root_pmagbafb_dev = par->next;
229 release_tc_card(par->slot);
230 framebuffer_release(info);
231}
232
233
234/*
235 * Initialise the framebuffer.
236 */
237static int __init pmagbafb_init(void)
162{ 238{
163 int sid; 239 int count = 0;
164 int found = 0; 240 int slot;
165 241
166 if (fb_get_options("pmagbafb", NULL)) 242 if (fb_get_options("pmagbafb", NULL))
167 return -ENODEV; 243 return -ENXIO;
168 244
169 if (TURBOCHANNEL) { 245 while ((slot = search_tc_card("PMAG-BA")) >= 0) {
170 while ((sid = search_tc_card("PMAG-BA")) >= 0) { 246 if (pmagbafb_init_one(slot) < 0)
171 found = 1; 247 break;
172 claim_tc_card(sid); 248 count++;
173 pmagbafb_init_one(sid);
174 }
175 return found ? 0 : -ENODEV;
176 } else {
177 return -ENODEV;
178 } 249 }
250 return (count > 0) ? 0 : -ENXIO;
179} 251}
180 252
253static void __exit pmagbafb_exit(void)
254{
255 while (root_pmagbafb_dev)
256 pmagbafb_exit_one();
257}
258
259
181module_init(pmagbafb_init); 260module_init(pmagbafb_init);
261module_exit(pmagbafb_exit);
262
182MODULE_LICENSE("GPL"); 263MODULE_LICENSE("GPL");
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index d14eaee91cff..a483b13e117b 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -1,114 +1,128 @@
1/* 1/*
2 * linux/drivers/video/pmagb-b-fb.c 2 * linux/drivers/video/pmagb-b-fb.c
3 * 3 *
4 * PMAGB-B TurboChannel framebuffer card support ... derived from: 4 * PMAGB-B TURBOchannel Smart Frame Buffer (SFB) card support,
5 * derived from:
5 * "HP300 Topcat framebuffer support (derived from macfb of all things) 6 * "HP300 Topcat framebuffer support (derived from macfb of all things)
6 * Phil Blundell <philb@gnu.org> 1998", the original code can be 7 * Phil Blundell <philb@gnu.org> 1998", the original code can be
7 * found in the file hpfb.c in the same directory. 8 * found in the file hpfb.c in the same directory.
8 * 9 *
9 * DECstation related code Copyright (C) 1999, 2000, 2001 by 10 * DECstation related code Copyright (C) 1999, 2000, 2001 by
10 * Michael Engel <engel@unix-ag.org>, 11 * Michael Engel <engel@unix-ag.org>,
11 * Karsten Merker <merker@linuxtag.org> and 12 * Karsten Merker <merker@linuxtag.org> and
12 * Harald Koerfgen. 13 * Harald Koerfgen.
13 * This file is subject to the terms and conditions of the GNU General 14 * Copyright (c) 2005 Maciej W. Rozycki
14 * Public License. See the file COPYING in the main directory of this
15 * archive for more details.
16 * 15 *
16 * This file is subject to the terms and conditions of the GNU General
17 * Public License. See the file COPYING in the main directory of this
18 * archive for more details.
17 */ 19 */
18 20
19/* 21#include <linux/compiler.h>
20 * We currently only support the PMAGB-B in high resolution mode
21 * as I know of no way to detect low resolution mode set via jumper.
22 * KM, 2001/01/07
23 */
24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/timer.h>
31#include <linux/mm.h>
32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/delay.h> 22#include <linux/delay.h>
35#include <linux/init.h> 23#include <linux/errno.h>
36#include <linux/fb.h> 24#include <linux/fb.h>
37#include <asm/bootinfo.h> 25#include <linux/init.h>
38#include <asm/dec/machtype.h> 26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/types.h>
29
30#include <asm/bug.h>
31#include <asm/io.h>
32#include <asm/system.h>
33
39#include <asm/dec/tc.h> 34#include <asm/dec/tc.h>
35
40#include <video/pmagb-b-fb.h> 36#include <video/pmagb-b-fb.h>
41 37
42struct pmagb_b_ramdac_regs { 38
43 unsigned char addr_low; 39struct pmagbbfb_par {
44 unsigned char pad0[3]; 40 struct fb_info *next;
45 unsigned char addr_hi; 41 volatile void __iomem *mmio;
46 unsigned char pad1[3]; 42 volatile void __iomem *smem;
47 unsigned char data; 43 volatile u32 __iomem *sfb;
48 unsigned char pad2[3]; 44 volatile u32 __iomem *dac;
49 unsigned char cmap; 45 unsigned int osc0;
46 unsigned int osc1;
47 int slot;
50}; 48};
51 49
52/*
53 * Max 3 TURBOchannel slots -> max 3 PMAGB-B :)
54 */
55static struct fb_info pmagbb_fb_info[3];
56 50
57static struct fb_var_screeninfo pmagbbfb_defined = { 51static struct fb_info *root_pmagbbfb_dev;
58 .xres = 1280, 52
59 .yres = 1024, 53static struct fb_var_screeninfo pmagbbfb_defined __initdata = {
60 .xres_virtual = 1280,
61 .yres_virtual = 1024,
62 .bits_per_pixel = 8, 54 .bits_per_pixel = 8,
63 .red.length = 8, 55 .red.length = 8,
64 .green.length = 8, 56 .green.length = 8,
65 .blue.length = 8, 57 .blue.length = 8,
66 .activate = FB_ACTIVATE_NOW, 58 .activate = FB_ACTIVATE_NOW,
67 .height = 274, 59 .height = -1,
68 .width = 195, 60 .width = -1,
69 .accel_flags = FB_ACCEL_NONE, 61 .accel_flags = FB_ACCEL_NONE,
62 .sync = FB_SYNC_ON_GREEN,
70 .vmode = FB_VMODE_NONINTERLACED, 63 .vmode = FB_VMODE_NONINTERLACED,
71}; 64};
72 65
73static struct fb_fix_screeninfo pmagbafb_fix = { 66static struct fb_fix_screeninfo pmagbbfb_fix __initdata = {
74 .id = "PMAGB-BA", 67 .id = "PMAGB-BA",
75 .smem_len = (1280 * 1024), 68 .smem_len = (2048 * 1024),
76 .type = FB_TYPE_PACKED_PIXELS, 69 .type = FB_TYPE_PACKED_PIXELS,
77 .visual = FB_VISUAL_PSEUDOCOLOR, 70 .visual = FB_VISUAL_PSEUDOCOLOR,
78 .line_length = 1280, 71 .mmio_len = PMAGB_B_FBMEM,
72};
73
74
75static inline void sfb_write(struct pmagbbfb_par *par, unsigned int reg, u32 v)
76{
77 writel(v, par->sfb + reg / 4);
79} 78}
80 79
81/* 80static inline u32 sfb_read(struct pmagbbfb_par *par, unsigned int reg)
82 * Turn hardware cursor off 81{
83 */ 82 return readl(par->sfb + reg / 4);
84void pmagbbfb_erase_cursor(struct pmagb_b_ramdac_regs *bt459_regs) 83}
84
85static inline void dac_write(struct pmagbbfb_par *par, unsigned int reg, u8 v)
85{ 86{
86 bt459_regs->addr_low = 0; 87 writeb(v, par->dac + reg / 4);
87 bt459_regs->addr_hi = 3;
88 bt459_regs->data = 0;
89} 88}
90 89
90static inline u8 dac_read(struct pmagbbfb_par *par, unsigned int reg)
91{
92 return readb(par->dac + reg / 4);
93}
94
95static inline void gp0_write(struct pmagbbfb_par *par, u32 v)
96{
97 writel(v, par->mmio + PMAGB_B_GP0);
98}
99
100
91/* 101/*
92 * Set the palette. 102 * Set the palette.
93 */ 103 */
94static int pmagbbfb_setcolreg(unsigned regno, unsigned red, unsigned green, 104static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
95 unsigned blue, unsigned transp, 105 unsigned int green, unsigned int blue,
96 struct fb_info *info) 106 unsigned int transp, struct fb_info *info)
97{ 107{
98 struct pmagb_b_ramdac_regs *bt459_regs = (struct pmagb_b_ramdac_regs *) info->par; 108 struct pmagbbfb_par *par = info->par;
99 109
100 if (regno >= info->cmap.len) 110 BUG_ON(regno >= info->cmap.len);
101 return 1;
102 111
103 red >>= 8; /* The cmap fields are 16 bits */ 112 red >>= 8; /* The cmap fields are 16 bits */
104 green >>= 8; /* wide, but the harware colormap */ 113 green >>= 8; /* wide, but the hardware colormap */
105 blue >>= 8; /* registers are only 8 bits wide */ 114 blue >>= 8; /* registers are only 8 bits wide */
106 115
107 bt459_regs->addr_low = (__u8) regno; 116 mb();
108 bt459_regs->addr_hi = 0; 117 dac_write(par, BT459_ADDR_LO, regno);
109 bt459_regs->cmap = red; 118 dac_write(par, BT459_ADDR_HI, 0x00);
110 bt459_regs->cmap = green; 119 wmb();
111 bt459_regs->cmap = blue; 120 dac_write(par, BT459_CMAP, red);
121 wmb();
122 dac_write(par, BT459_CMAP, green);
123 wmb();
124 dac_write(par, BT459_CMAP, blue);
125
112 return 0; 126 return 0;
113} 127}
114 128
@@ -121,62 +135,247 @@ static struct fb_ops pmagbbfb_ops = {
121 .fb_cursor = soft_cursor, 135 .fb_cursor = soft_cursor,
122}; 136};
123 137
124int __init pmagbbfb_init_one(int slot) 138
139/*
140 * Turn the hardware cursor off.
141 */
142static void __init pmagbbfb_erase_cursor(struct fb_info *info)
143{
144 struct pmagbbfb_par *par = info->par;
145
146 mb();
147 dac_write(par, BT459_ADDR_LO, 0x00);
148 dac_write(par, BT459_ADDR_HI, 0x03);
149 wmb();
150 dac_write(par, BT459_DATA, 0x00);
151}
152
153/*
154 * Set up screen parameters.
155 */
156static void __init pmagbbfb_screen_setup(struct fb_info *info)
157{
158 struct pmagbbfb_par *par = info->par;
159
160 info->var.xres = ((sfb_read(par, SFB_REG_VID_HOR) >>
161 SFB_VID_HOR_PIX_SHIFT) & SFB_VID_HOR_PIX_MASK) * 4;
162 info->var.xres_virtual = info->var.xres;
163 info->var.yres = (sfb_read(par, SFB_REG_VID_VER) >>
164 SFB_VID_VER_SL_SHIFT) & SFB_VID_VER_SL_MASK;
165 info->var.yres_virtual = info->var.yres;
166 info->var.left_margin = ((sfb_read(par, SFB_REG_VID_HOR) >>
167 SFB_VID_HOR_BP_SHIFT) &
168 SFB_VID_HOR_BP_MASK) * 4;
169 info->var.right_margin = ((sfb_read(par, SFB_REG_VID_HOR) >>
170 SFB_VID_HOR_FP_SHIFT) &
171 SFB_VID_HOR_FP_MASK) * 4;
172 info->var.upper_margin = (sfb_read(par, SFB_REG_VID_VER) >>
173 SFB_VID_VER_BP_SHIFT) & SFB_VID_VER_BP_MASK;
174 info->var.lower_margin = (sfb_read(par, SFB_REG_VID_VER) >>
175 SFB_VID_VER_FP_SHIFT) & SFB_VID_VER_FP_MASK;
176 info->var.hsync_len = ((sfb_read(par, SFB_REG_VID_HOR) >>
177 SFB_VID_HOR_SYN_SHIFT) &
178 SFB_VID_HOR_SYN_MASK) * 4;
179 info->var.vsync_len = (sfb_read(par, SFB_REG_VID_VER) >>
180 SFB_VID_VER_SYN_SHIFT) & SFB_VID_VER_SYN_MASK;
181
182 info->fix.line_length = info->var.xres;
183};
184
185/*
186 * Determine oscillator configuration.
187 */
188static void __init pmagbbfb_osc_setup(struct fb_info *info)
125{ 189{
126 unsigned long base_addr = get_tc_base_addr(slot); 190 static unsigned int pmagbbfb_freqs[] __initdata = {
127 struct fb_info *info = &pmagbb_fb_info[slot]; 191 130808, 119843, 104000, 92980, 74367, 72800,
128 192 69197, 66000, 65000, 50350, 36000, 32000, 25175
129 printk("PMAGB-BA framebuffer in slot %d\n", slot); 193 };
130 /* 194 struct pmagbbfb_par *par = info->par;
131 * Framebuffer display memory base address and friends 195 u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8;
132 */ 196 u32 freq0, freq1, freqtc = get_tc_speed() / 250;
133 pmagbbfb_fix.smem_start = base_addr + PMAGB_B_ONBOARD_FBMEM_OFFSET; 197 int i, j;
134 info->par = (base_addr + PMAGB_B_BT459_OFFSET); 198
135 199 gp0_write(par, 0); /* select Osc0 */
136 /* 200 for (j = 0; j < 16; j++) {
137 * Configure the Bt459 RAM DAC 201 mb();
138 */ 202 sfb_write(par, SFB_REG_TCCLK_COUNT, 0);
139 pmagbbfb_erase_cursor((struct pmagb_b_ramdac_regs *) info->par); 203 mb();
140 204 for (i = 0; i < 100; i++) { /* nominally max. 20.5us */
141 /* 205 if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0)
142 * Let there be consoles.. 206 break;
143 */ 207 udelay(1);
208 }
209 count0 += sfb_read(par, SFB_REG_VIDCLK_COUNT);
210 }
211
212 gp0_write(par, 1); /* select Osc1 */
213 for (j = 0; j < 16; j++) {
214 mb();
215 sfb_write(par, SFB_REG_TCCLK_COUNT, 0);
216
217 for (i = 0; i < 100; i++) { /* nominally max. 20.5us */
218 if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0)
219 break;
220 udelay(1);
221 }
222 count1 += sfb_read(par, SFB_REG_VIDCLK_COUNT);
223 }
224
225 freq0 = (freqtc * count0 + counttc / 2) / counttc;
226 par->osc0 = freq0;
227 if (freq0 >= pmagbbfb_freqs[0] - (pmagbbfb_freqs[0] + 32) / 64 &&
228 freq0 <= pmagbbfb_freqs[0] + (pmagbbfb_freqs[0] + 32) / 64)
229 par->osc0 = pmagbbfb_freqs[0];
230
231 freq1 = (par->osc0 * count1 + count0 / 2) / count0;
232 par->osc1 = freq1;
233 for (i = 0; i < sizeof(pmagbbfb_freqs) / sizeof(*pmagbbfb_freqs); i++)
234 if (freq1 >= pmagbbfb_freqs[i] -
235 (pmagbbfb_freqs[i] + 128) / 256 &&
236 freq1 <= pmagbbfb_freqs[i] +
237 (pmagbbfb_freqs[i] + 128) / 256) {
238 par->osc1 = pmagbbfb_freqs[i];
239 break;
240 }
241
242 if (par->osc0 - par->osc1 <= (par->osc0 + par->osc1 + 256) / 512 ||
243 par->osc1 - par->osc0 <= (par->osc0 + par->osc1 + 256) / 512)
244 par->osc1 = 0;
245
246 gp0_write(par, par->osc1 != 0); /* reselect OscX */
247
248 info->var.pixclock = par->osc1 ?
249 (1000000000 + par->osc1 / 2) / par->osc1 :
250 (1000000000 + par->osc0 / 2) / par->osc0;
251};
252
253
254static int __init pmagbbfb_init_one(int slot)
255{
256 char freq0[12], freq1[12];
257 struct fb_info *info;
258 struct pmagbbfb_par *par;
259 unsigned long base_addr;
260 u32 vid_base;
261
262 info = framebuffer_alloc(sizeof(struct pmagbbfb_par), NULL);
263 if (!info)
264 return -ENOMEM;
265
266 par = info->par;
267 par->slot = slot;
268 claim_tc_card(par->slot);
269
270 base_addr = get_tc_base_addr(par->slot);
271
272 par->next = root_pmagbbfb_dev;
273 root_pmagbbfb_dev = info;
274
275 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
276 goto err_alloc;
277
144 info->fbops = &pmagbbfb_ops; 278 info->fbops = &pmagbbfb_ops;
145 info->var = pmagbbfb_defined;
146 info->fix = pmagbbfb_fix; 279 info->fix = pmagbbfb_fix;
147 info->screen_base = pmagbbfb_fix.smem_start; 280 info->var = pmagbbfb_defined;
148 info->flags = FBINFO_DEFAULT; 281 info->flags = FBINFO_DEFAULT;
149 282
150 fb_alloc_cmap(&fb_info.cmap, 256, 0); 283 /* MMIO mapping setup. */
284 info->fix.mmio_start = base_addr;
285 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
286 if (!par->mmio)
287 goto err_cmap;
288 par->sfb = par->mmio + PMAGB_B_SFB;
289 par->dac = par->mmio + PMAGB_B_BT459;
290
291 /* Frame buffer mapping setup. */
292 info->fix.smem_start = base_addr + PMAGB_B_FBMEM;
293 par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
294 if (!par->smem)
295 goto err_mmio_map;
296 vid_base = sfb_read(par, SFB_REG_VID_BASE);
297 info->screen_base = (void __iomem *)par->smem + vid_base * 0x1000;
298 info->screen_size = info->fix.smem_len - 2 * vid_base * 0x1000;
299
300 pmagbbfb_erase_cursor(info);
301 pmagbbfb_screen_setup(info);
302 pmagbbfb_osc_setup(info);
151 303
152 if (register_framebuffer(info) < 0) 304 if (register_framebuffer(info) < 0)
153 return 1; 305 goto err_smem_map;
306
307 snprintf(freq0, sizeof(freq0), "%u.%03uMHz",
308 par->osc0 / 1000, par->osc0 % 1000);
309 snprintf(freq1, sizeof(freq1), "%u.%03uMHz",
310 par->osc1 / 1000, par->osc1 % 1000);
311
312 pr_info("fb%d: %s frame buffer device in slot %d\n",
313 info->node, info->fix.id, par->slot);
314 pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n",
315 info->node, freq0, par->osc1 ? freq1 : "disabled",
316 par->osc1 != 0);
317
154 return 0; 318 return 0;
319
320
321err_smem_map:
322 iounmap(par->smem);
323
324err_mmio_map:
325 iounmap(par->mmio);
326
327err_cmap:
328 fb_dealloc_cmap(&info->cmap);
329
330err_alloc:
331 root_pmagbbfb_dev = par->next;
332 release_tc_card(par->slot);
333 framebuffer_release(info);
334 return -ENXIO;
155} 335}
156 336
157/* 337static void __exit pmagbbfb_exit_one(void)
158 * Initialise the framebuffer 338{
159 */ 339 struct fb_info *info = root_pmagbbfb_dev;
340 struct pmagbbfb_par *par = info->par;
341
342 unregister_framebuffer(info);
343 iounmap(par->smem);
344 iounmap(par->mmio);
345 fb_dealloc_cmap(&info->cmap);
346 root_pmagbbfb_dev = par->next;
347 release_tc_card(par->slot);
348 framebuffer_release(info);
349}
160 350
161int __init pmagbbfb_init(void) 351
352/*
353 * Initialise the framebuffer.
354 */
355static int __init pmagbbfb_init(void)
162{ 356{
163 int sid; 357 int count = 0;
164 int found = 0; 358 int slot;
165 359
166 if (fb_get_options("pmagbbfb", NULL)) 360 if (fb_get_options("pmagbbfb", NULL))
167 return -ENODEV; 361 return -ENXIO;
168 362
169 if (TURBOCHANNEL) { 363 while ((slot = search_tc_card("PMAGB-BA")) >= 0) {
170 while ((sid = search_tc_card("PMAGB-BA")) >= 0) { 364 if (pmagbbfb_init_one(slot) < 0)
171 found = 1; 365 break;
172 claim_tc_card(sid); 366 count++;
173 pmagbbfb_init_one(sid);
174 }
175 return found ? 0 : -ENODEV;
176 } else {
177 return -ENODEV;
178 } 367 }
368 return (count > 0) ? 0 : -ENXIO;
369}
370
371static void __exit pmagbbfb_exit(void)
372{
373 while (root_pmagbbfb_dev)
374 pmagbbfb_exit_one();
179} 375}
180 376
377
181module_init(pmagbbfb_init); 378module_init(pmagbbfb_init);
379module_exit(pmagbbfb_exit);
380
182MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 71b69da0c40d..162012bb9264 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -21,7 +21,6 @@
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/segment.h>
25#include <asm/system.h> 24#include <asm/system.h>
26#include <asm/q40_master.h> 25#include <asm/q40_master.h>
27#include <linux/fb.h> 26#include <linux/fb.h>
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
index da1334dfd51d..77151d8e0766 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/riva/rivafb-i2c.c
@@ -92,14 +92,13 @@ static int riva_gpio_getsda(void* data)
92 return val; 92 return val;
93} 93}
94 94
95#define I2C_ALGO_RIVA 0x0e0000
96static int riva_setup_i2c_bus(struct riva_i2c_chan *chan, const char *name) 95static int riva_setup_i2c_bus(struct riva_i2c_chan *chan, const char *name)
97{ 96{
98 int rc; 97 int rc;
99 98
100 strcpy(chan->adapter.name, name); 99 strcpy(chan->adapter.name, name);
101 chan->adapter.owner = THIS_MODULE; 100 chan->adapter.owner = THIS_MODULE;
102 chan->adapter.id = I2C_ALGO_RIVA; 101 chan->adapter.id = I2C_HW_B_RIVA;
103 chan->adapter.algo_data = &chan->algo; 102 chan->adapter.algo_data = &chan->algo;
104 chan->adapter.dev.parent = &chan->par->pdev->dev; 103 chan->adapter.dev.parent = &chan->par->pdev->dev;
105 chan->algo.setsda = riva_gpio_setsda; 104 chan->algo.setsda = riva_gpio_setsda;
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 3848be2b9d2d..fa98d91c42eb 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -655,7 +655,7 @@ bail:
655} 655}
656 656
657#ifdef CONFIG_PM 657#ifdef CONFIG_PM
658static int s1d13xxxfb_suspend(struct device *dev, u32 state, u32 level) 658static int s1d13xxxfb_suspend(struct device *dev, pm_message_t state, u32 level)
659{ 659{
660 struct fb_info *info = dev_get_drvdata(dev); 660 struct fb_info *info = dev_get_drvdata(dev);
661 struct s1d13xxxfb_par *s1dfb = info->par; 661 struct s1d13xxxfb_par *s1dfb = info->par;
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 024a0cecff15..847698b5cfe7 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -137,7 +137,6 @@ static int prosavage_gpio_getsda(void* data)
137 return (0 != (GET_CR_DATA(chan->ioaddr) & PROSAVAGE_I2C_SDA_IN)); 137 return (0 != (GET_CR_DATA(chan->ioaddr) & PROSAVAGE_I2C_SDA_IN));
138} 138}
139 139
140#define I2C_ALGO_SAVAGE 0x0f0000
141static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan, 140static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
142 const char *name) 141 const char *name)
143{ 142{
@@ -147,7 +146,7 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
147 if (add_bus && chan->par) { 146 if (add_bus && chan->par) {
148 strcpy(chan->adapter.name, name); 147 strcpy(chan->adapter.name, name);
149 chan->adapter.owner = THIS_MODULE; 148 chan->adapter.owner = THIS_MODULE;
150 chan->adapter.id = I2C_ALGO_SAVAGE; 149 chan->adapter.id = I2C_HW_B_SAVAGE;
151 chan->adapter.algo_data = &chan->algo; 150 chan->adapter.algo_data = &chan->algo;
152 chan->adapter.dev.parent = &chan->par->pcidev->dev; 151 chan->adapter.dev.parent = &chan->par->pcidev->dev;
153 chan->algo.udelay = 40; 152 chan->algo.udelay = 40;
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index f4633d1891f1..117ad42f120d 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2110,7 +2110,6 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
2110 struct savagefb_par *par = (struct savagefb_par *)info->par; 2110 struct savagefb_par *par = (struct savagefb_par *)info->par;
2111 2111
2112 DBG("savagefb_suspend"); 2112 DBG("savagefb_suspend");
2113 printk(KERN_DEBUG "state: %u\n", state);
2114 2113
2115 acquire_console_sem(); 2114 acquire_console_sem();
2116 fb_set_suspend(info, pci_choose_state(dev, state)); 2115 fb_set_suspend(info, pci_choose_state(dev, state));
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index adcda697ea60..0030c071da8f 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -5,9 +5,15 @@
5 * 5 *
6 * Copyright (C) 2002, ATI Corp. 6 * Copyright (C) 2002, ATI Corp.
7 * Copyright (C) 2004-2005 Richard Purdie 7 * Copyright (C) 2004-2005 Richard Purdie
8 * Copyright (c) 2005 Ian Molton
8 * 9 *
9 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net> 10 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
10 * 11 *
12 * Generic platform support by Ian Molton <spyro@f2s.com>
13 * and Richard Purdie <rpurdie@rpsys.net>
14 *
15 * w32xx support by Ian Molton
16 *
11 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 18 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 19 * published by the Free Software Foundation.
@@ -21,7 +27,7 @@
21#include <linux/mm.h> 27#include <linux/mm.h>
22#include <linux/device.h> 28#include <linux/device.h>
23#include <linux/string.h> 29#include <linux/string.h>
24#include <linux/proc_fs.h> 30#include <linux/vmalloc.h>
25#include <asm/io.h> 31#include <asm/io.h>
26#include <asm/uaccess.h> 32#include <asm/uaccess.h>
27#include <video/w100fb.h> 33#include <video/w100fb.h>
@@ -30,114 +36,78 @@
30/* 36/*
31 * Prototypes 37 * Prototypes
32 */ 38 */
33static void w100fb_save_buffer(void);
34static void w100fb_clear_buffer(void);
35static void w100fb_restore_buffer(void);
36static void w100fb_clear_screen(u32 mode, long int offset);
37static void w100_resume(void);
38static void w100_suspend(u32 mode); 39static void w100_suspend(u32 mode);
39static void w100_init_qvga_rotation(u16 deg);
40static void w100_init_vga_rotation(u16 deg);
41static void w100_vsync(void); 40static void w100_vsync(void);
42static void w100_init_sharp_lcd(u32 mode); 41static void w100_hw_init(struct w100fb_par*);
43static void w100_pwm_setup(void); 42static void w100_pwm_setup(struct w100fb_par*);
44static void w100_InitExtMem(u32 mode); 43static void w100_init_clocks(struct w100fb_par*);
45static void w100_hw_init(void); 44static void w100_setup_memory(struct w100fb_par*);
46static u16 w100_set_fastsysclk(u16 Freq); 45static void w100_init_lcd(struct w100fb_par*);
47 46static void w100_set_dispregs(struct w100fb_par*);
48static void lcdtg_hw_init(u32 mode); 47static void w100_update_enable(void);
49static void lcdtg_lcd_change(u32 mode); 48static void w100_update_disable(void);
50static void lcdtg_resume(void); 49static void calc_hsync(struct w100fb_par *par);
51static void lcdtg_suspend(void); 50struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
52
53
54/* Register offsets & lengths */
55#define REMAPPED_FB_LEN 0x15ffff
56
57#define BITS_PER_PIXEL 16
58 51
59/* Pseudo palette size */ 52/* Pseudo palette size */
60#define MAX_PALETTES 16 53#define MAX_PALETTES 16
61 54
62/* for resolution change */
63#define LCD_MODE_INIT (-1)
64#define LCD_MODE_480 0
65#define LCD_MODE_320 1
66#define LCD_MODE_240 2
67#define LCD_MODE_640 3
68
69#define LCD_SHARP_QVGA 0
70#define LCD_SHARP_VGA 1
71
72#define LCD_MODE_PORTRAIT 0
73#define LCD_MODE_LANDSCAPE 1
74
75#define W100_SUSPEND_EXTMEM 0 55#define W100_SUSPEND_EXTMEM 0
76#define W100_SUSPEND_ALL 1 56#define W100_SUSPEND_ALL 1
77 57
78/* General frame buffer data structures */ 58#define BITS_PER_PIXEL 16
79struct w100fb_par {
80 u32 xres;
81 u32 yres;
82 int fastsysclk_mode;
83 int lcdMode;
84 int rotation_flag;
85 int blanking_flag;
86 int comadj;
87 int phadadj;
88};
89
90static struct w100fb_par *current_par;
91 59
92/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */ 60/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
93static void *remapped_base; 61static void *remapped_base;
94static void *remapped_regs; 62static void *remapped_regs;
95static void *remapped_fbuf; 63static void *remapped_fbuf;
96 64
97/* External Function */ 65#define REMAPPED_FB_LEN 0x15ffff
98static void(*w100fb_ssp_send)(u8 adrs, u8 data); 66
67/* This is the offset in the w100's address space we map the current
68 framebuffer memory to. We use the position of external memory as
69 we can remap internal memory to there if external isn't present. */
70#define W100_FB_BASE MEM_EXT_BASE_VALUE
71
99 72
100/* 73/*
101 * Sysfs functions 74 * Sysfs functions
102 */ 75 */
103 76static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf)
104static ssize_t rotation_show(struct device *dev, struct device_attribute *attr, char *buf)
105{ 77{
106 struct fb_info *info = dev_get_drvdata(dev); 78 struct fb_info *info = dev_get_drvdata(dev);
107 struct w100fb_par *par=info->par; 79 struct w100fb_par *par=info->par;
108 80
109 return sprintf(buf, "%d\n",par->rotation_flag); 81 return sprintf(buf, "%d\n",par->flip);
110} 82}
111 83
112static ssize_t rotation_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 84static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
113{ 85{
114 unsigned int rotate; 86 unsigned int flip;
115 struct fb_info *info = dev_get_drvdata(dev); 87 struct fb_info *info = dev_get_drvdata(dev);
116 struct w100fb_par *par=info->par; 88 struct w100fb_par *par=info->par;
117 89
118 rotate = simple_strtoul(buf, NULL, 10); 90 flip = simple_strtoul(buf, NULL, 10);
91
92 if (flip > 0)
93 par->flip = 1;
94 else
95 par->flip = 0;
119 96
120 if (rotate > 0) par->rotation_flag = 1; 97 w100_update_disable();
121 else par->rotation_flag = 0; 98 w100_set_dispregs(par);
99 w100_update_enable();
122 100
123 if (par->lcdMode == LCD_MODE_320) 101 calc_hsync(par);
124 w100_init_qvga_rotation(par->rotation_flag ? 270 : 90);
125 else if (par->lcdMode == LCD_MODE_240)
126 w100_init_qvga_rotation(par->rotation_flag ? 180 : 0);
127 else if (par->lcdMode == LCD_MODE_640)
128 w100_init_vga_rotation(par->rotation_flag ? 270 : 90);
129 else if (par->lcdMode == LCD_MODE_480)
130 w100_init_vga_rotation(par->rotation_flag ? 180 : 0);
131 102
132 return count; 103 return count;
133} 104}
134 105
135static DEVICE_ATTR(rotation, 0644, rotation_show, rotation_store); 106static DEVICE_ATTR(flip, 0644, flip_show, flip_store);
136 107
137static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 108static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
138{ 109{
139 unsigned long param; 110 unsigned long regs, param;
140 unsigned long regs;
141 regs = simple_strtoul(buf, NULL, 16); 111 regs = simple_strtoul(buf, NULL, 16);
142 param = readl(remapped_regs + regs); 112 param = readl(remapped_regs + regs);
143 printk("Read Register 0x%08lX: 0x%08lX\n", regs, param); 113 printk("Read Register 0x%08lX: 0x%08lX\n", regs, param);
@@ -148,8 +118,7 @@ static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read);
148 118
149static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 119static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
150{ 120{
151 unsigned long regs; 121 unsigned long regs, param;
152 unsigned long param;
153 sscanf(buf, "%lx %lx", &regs, &param); 122 sscanf(buf, "%lx %lx", &regs, &param);
154 123
155 if (regs <= 0x2000) { 124 if (regs <= 0x2000) {
@@ -163,54 +132,56 @@ static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *att
163static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write); 132static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write);
164 133
165 134
166static ssize_t fastsysclk_show(struct device *dev, struct device_attribute *attr, char *buf) 135static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf)
167{ 136{
168 struct fb_info *info = dev_get_drvdata(dev); 137 struct fb_info *info = dev_get_drvdata(dev);
169 struct w100fb_par *par=info->par; 138 struct w100fb_par *par=info->par;
170 139
171 return sprintf(buf, "%d\n",par->fastsysclk_mode); 140 return sprintf(buf, "%d\n",par->fastpll_mode);
172} 141}
173 142
174static ssize_t fastsysclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 143static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
175{ 144{
176 int param;
177 struct fb_info *info = dev_get_drvdata(dev); 145 struct fb_info *info = dev_get_drvdata(dev);
178 struct w100fb_par *par=info->par; 146 struct w100fb_par *par=info->par;
179 147
180 param = simple_strtoul(buf, NULL, 10); 148 if (simple_strtoul(buf, NULL, 10) > 0) {
181 149 par->fastpll_mode=1;
182 if (param == 75) { 150 printk("w100fb: Using fast system clock (if possible)\n");
183 printk("Set fastsysclk %d\n", param); 151 } else {
184 par->fastsysclk_mode = param; 152 par->fastpll_mode=0;
185 w100_set_fastsysclk(par->fastsysclk_mode); 153 printk("w100fb: Using normal system clock\n");
186 } else if (param == 100) {
187 printk("Set fastsysclk %d\n", param);
188 par->fastsysclk_mode = param;
189 w100_set_fastsysclk(par->fastsysclk_mode);
190 } 154 }
155
156 w100_init_clocks(par);
157 calc_hsync(par);
158
191 return count; 159 return count;
192} 160}
193 161
194static DEVICE_ATTR(fastsysclk, 0644, fastsysclk_show, fastsysclk_store); 162static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store);
195 163
196/* 164/*
197 * The touchscreen on this device needs certain information 165 * Some touchscreens need hsync information from the video driver to
198 * from the video driver to function correctly. We export it here. 166 * function correctly. We export it here.
199 */ 167 */
200int w100fb_get_xres(void) { 168unsigned long w100fb_get_hsynclen(struct device *dev)
201 return current_par->xres; 169{
202} 170 struct fb_info *info = dev_get_drvdata(dev);
171 struct w100fb_par *par=info->par;
203 172
204int w100fb_get_blanking(void) { 173 /* If display is blanked/suspended, hsync isn't active */
205 return current_par->blanking_flag; 174 if (par->blanked)
175 return 0;
176 else
177 return par->hsync_len;
206} 178}
179EXPORT_SYMBOL(w100fb_get_hsynclen);
207 180
208int w100fb_get_fastsysclk(void) { 181static void w100fb_clear_screen(struct w100fb_par *par)
209 return current_par->fastsysclk_mode; 182{
183 memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8));
210} 184}
211EXPORT_SYMBOL(w100fb_get_xres);
212EXPORT_SYMBOL(w100fb_get_blanking);
213EXPORT_SYMBOL(w100fb_get_fastsysclk);
214 185
215 186
216/* 187/*
@@ -234,7 +205,6 @@ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
234 * according to the RGB bitfield information. 205 * according to the RGB bitfield information.
235 */ 206 */
236 if (regno < MAX_PALETTES) { 207 if (regno < MAX_PALETTES) {
237
238 u32 *pal = info->pseudo_palette; 208 u32 *pal = info->pseudo_palette;
239 209
240 val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); 210 val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
@@ -250,115 +220,90 @@ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
250 */ 220 */
251static int w100fb_blank(int blank_mode, struct fb_info *info) 221static int w100fb_blank(int blank_mode, struct fb_info *info)
252{ 222{
253 struct w100fb_par *par; 223 struct w100fb_par *par = info->par;
254 par=info->par; 224 struct w100_tg_info *tg = par->mach->tg;
255 225
256 switch(blank_mode) { 226 switch(blank_mode) {
257 227
258 case FB_BLANK_NORMAL: /* Normal blanking */ 228 case FB_BLANK_NORMAL: /* Normal blanking */
259 case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ 229 case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
260 case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ 230 case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
261 case FB_BLANK_POWERDOWN: /* Poweroff */ 231 case FB_BLANK_POWERDOWN: /* Poweroff */
262 if (par->blanking_flag == 0) { 232 if (par->blanked == 0) {
263 w100fb_save_buffer(); 233 if(tg && tg->suspend)
264 lcdtg_suspend(); 234 tg->suspend(par);
265 par->blanking_flag = 1; 235 par->blanked = 1;
266 } 236 }
267 break; 237 break;
268 238
269 case FB_BLANK_UNBLANK: /* Unblanking */ 239 case FB_BLANK_UNBLANK: /* Unblanking */
270 if (par->blanking_flag != 0) { 240 if (par->blanked != 0) {
271 w100fb_restore_buffer(); 241 if(tg && tg->resume)
272 lcdtg_resume(); 242 tg->resume(par);
273 par->blanking_flag = 0; 243 par->blanked = 0;
274 } 244 }
275 break; 245 break;
276 } 246 }
277 return 0; 247 return 0;
278} 248}
279 249
250
280/* 251/*
281 * Change the resolution by calling the appropriate hardware functions 252 * Change the resolution by calling the appropriate hardware functions
282 */ 253 */
283static void w100fb_changeres(int rotate_mode, u32 mode) 254static void w100fb_activate_var(struct w100fb_par *par)
284{ 255{
285 u16 rotation=0; 256 struct w100_tg_info *tg = par->mach->tg;
286
287 switch(rotate_mode) {
288 case LCD_MODE_LANDSCAPE:
289 rotation=(current_par->rotation_flag ? 270 : 90);
290 break;
291 case LCD_MODE_PORTRAIT:
292 rotation=(current_par->rotation_flag ? 180 : 0);
293 break;
294 }
295 257
296 w100_pwm_setup(); 258 w100_pwm_setup(par);
297 switch(mode) { 259 w100_setup_memory(par);
298 case LCD_SHARP_QVGA: 260 w100_init_clocks(par);
299 w100_vsync(); 261 w100fb_clear_screen(par);
300 w100_suspend(W100_SUSPEND_EXTMEM); 262 w100_vsync();
301 w100_init_sharp_lcd(LCD_SHARP_QVGA); 263
302 w100_init_qvga_rotation(rotation); 264 w100_update_disable();
303 w100_InitExtMem(LCD_SHARP_QVGA); 265 w100_init_lcd(par);
304 w100fb_clear_screen(LCD_SHARP_QVGA, 0); 266 w100_set_dispregs(par);
305 lcdtg_lcd_change(LCD_SHARP_QVGA); 267 w100_update_enable();
306 break; 268
307 case LCD_SHARP_VGA: 269 calc_hsync(par);
308 w100fb_clear_screen(LCD_SHARP_QVGA, 0); 270
309 writel(0xBFFFA000, remapped_regs + mmMC_EXT_MEM_LOCATION); 271 if (!par->blanked && tg && tg->change)
310 w100_InitExtMem(LCD_SHARP_VGA); 272 tg->change(par);
311 w100fb_clear_screen(LCD_SHARP_VGA, 0x200000);
312 w100_vsync();
313 w100_init_sharp_lcd(LCD_SHARP_VGA);
314 if (rotation != 0)
315 w100_init_vga_rotation(rotation);
316 lcdtg_lcd_change(LCD_SHARP_VGA);
317 break;
318 }
319} 273}
320 274
321/* 275
322 * Set up the display for the fb subsystem 276/* Select the smallest mode that allows the desired resolution to be
277 * displayed. If desired, the x and y parameters can be rounded up to
278 * match the selected mode.
323 */ 279 */
324static void w100fb_activate_var(struct fb_info *info) 280static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval)
325{ 281{
326 u32 temp32; 282 struct w100_mode *mode = NULL;
327 struct w100fb_par *par=info->par; 283 struct w100_mode *modelist = par->mach->modelist;
328 struct fb_var_screeninfo *var = &info->var; 284 unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
285 unsigned int i;
286
287 for (i = 0 ; i < par->mach->num_modes ; i++) {
288 if (modelist[i].xres >= *x && modelist[i].yres >= *y &&
289 modelist[i].xres < best_x && modelist[i].yres < best_y) {
290 best_x = modelist[i].xres;
291 best_y = modelist[i].yres;
292 mode = &modelist[i];
293 } else if(modelist[i].xres >= *y && modelist[i].yres >= *x &&
294 modelist[i].xres < best_y && modelist[i].yres < best_x) {
295 best_x = modelist[i].yres;
296 best_y = modelist[i].xres;
297 mode = &modelist[i];
298 }
299 }
329 300
330 /* Set the hardware to 565 */ 301 if (mode && saveval) {
331 temp32 = readl(remapped_regs + mmDISP_DEBUG2); 302 *x = best_x;
332 temp32 &= 0xff7fffff; 303 *y = best_y;
333 temp32 |= 0x00800000; 304 }
334 writel(temp32, remapped_regs + mmDISP_DEBUG2);
335 305
336 if (par->lcdMode == LCD_MODE_INIT) { 306 return mode;
337 w100_init_sharp_lcd(LCD_SHARP_VGA);
338 w100_init_vga_rotation(par->rotation_flag ? 270 : 90);
339 par->lcdMode = LCD_MODE_640;
340 lcdtg_hw_init(LCD_SHARP_VGA);
341 } else if (var->xres == 320 && var->yres == 240) {
342 if (par->lcdMode != LCD_MODE_320) {
343 w100fb_changeres(LCD_MODE_LANDSCAPE, LCD_SHARP_QVGA);
344 par->lcdMode = LCD_MODE_320;
345 }
346 } else if (var->xres == 240 && var->yres == 320) {
347 if (par->lcdMode != LCD_MODE_240) {
348 w100fb_changeres(LCD_MODE_PORTRAIT, LCD_SHARP_QVGA);
349 par->lcdMode = LCD_MODE_240;
350 }
351 } else if (var->xres == 640 && var->yres == 480) {
352 if (par->lcdMode != LCD_MODE_640) {
353 w100fb_changeres(LCD_MODE_LANDSCAPE, LCD_SHARP_VGA);
354 par->lcdMode = LCD_MODE_640;
355 }
356 } else if (var->xres == 480 && var->yres == 640) {
357 if (par->lcdMode != LCD_MODE_480) {
358 w100fb_changeres(LCD_MODE_PORTRAIT, LCD_SHARP_VGA);
359 par->lcdMode = LCD_MODE_480;
360 }
361 } else printk(KERN_ERR "W100FB: Resolution error!\n");
362} 307}
363 308
364 309
@@ -366,31 +311,19 @@ static void w100fb_activate_var(struct fb_info *info)
366 * w100fb_check_var(): 311 * w100fb_check_var():
367 * Get the video params out of 'var'. If a value doesn't fit, round it up, 312 * Get the video params out of 'var'. If a value doesn't fit, round it up,
368 * if it's too big, return -EINVAL. 313 * if it's too big, return -EINVAL.
369 *
370 */ 314 */
371static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 315static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
372{ 316{
373 if (var->xres < var->yres) { /* Portrait mode */ 317 struct w100fb_par *par=info->par;
374 if ((var->xres > 480) || (var->yres > 640)) { 318
375 return -EINVAL; 319 if(!w100fb_get_mode(par, &var->xres, &var->yres, 1))
376 } else if ((var->xres > 240) || (var->yres > 320)) { 320 return -EINVAL;
377 var->xres = 480; 321
378 var->yres = 640; 322 if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1)))
379 } else { 323 return -EINVAL;
380 var->xres = 240; 324
381 var->yres = 320; 325 if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)))
382 } 326 return -EINVAL;
383 } else { /* Landscape mode */
384 if ((var->xres > 640) || (var->yres > 480)) {
385 return -EINVAL;
386 } else if ((var->xres > 320) || (var->yres > 240)) {
387 var->xres = 640;
388 var->yres = 480;
389 } else {
390 var->xres = 320;
391 var->yres = 240;
392 }
393 }
394 327
395 var->xres_virtual = max(var->xres_virtual, var->xres); 328 var->xres_virtual = max(var->xres_virtual, var->xres);
396 var->yres_virtual = max(var->yres_virtual, var->yres); 329 var->yres_virtual = max(var->yres_virtual, var->yres);
@@ -409,13 +342,11 @@ static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
409 var->transp.offset = var->transp.length = 0; 342 var->transp.offset = var->transp.length = 0;
410 343
411 var->nonstd = 0; 344 var->nonstd = 0;
412
413 var->height = -1; 345 var->height = -1;
414 var->width = -1; 346 var->width = -1;
415 var->vmode = FB_VMODE_NONINTERLACED; 347 var->vmode = FB_VMODE_NONINTERLACED;
416
417 var->sync = 0; 348 var->sync = 0;
418 var->pixclock = 0x04; /* 171521; */ 349 var->pixclock = 0x04; /* 171521; */
419 350
420 return 0; 351 return 0;
421} 352}
@@ -430,274 +361,286 @@ static int w100fb_set_par(struct fb_info *info)
430{ 361{
431 struct w100fb_par *par=info->par; 362 struct w100fb_par *par=info->par;
432 363
433 par->xres = info->var.xres; 364 if (par->xres != info->var.xres || par->yres != info->var.yres) {
434 par->yres = info->var.yres; 365 par->xres = info->var.xres;
435 366 par->yres = info->var.yres;
436 info->fix.visual = FB_VISUAL_TRUECOLOR; 367 par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0);
437
438 info->fix.ypanstep = 0;
439 info->fix.ywrapstep = 0;
440 368
441 if (par->blanking_flag) 369 info->fix.visual = FB_VISUAL_TRUECOLOR;
442 w100fb_clear_buffer(); 370 info->fix.ypanstep = 0;
371 info->fix.ywrapstep = 0;
372 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
443 373
444 w100fb_activate_var(info); 374 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
375 par->extmem_active = 1;
376 info->fix.smem_len = par->mach->mem->size+1;
377 } else {
378 par->extmem_active = 0;
379 info->fix.smem_len = MEM_INT_SIZE+1;
380 }
445 381
446 if (par->lcdMode == LCD_MODE_480) { 382 w100fb_activate_var(par);
447 info->fix.line_length = (480 * BITS_PER_PIXEL) / 8;
448 info->fix.smem_len = 0x200000;
449 } else if (par->lcdMode == LCD_MODE_320) {
450 info->fix.line_length = (320 * BITS_PER_PIXEL) / 8;
451 info->fix.smem_len = 0x60000;
452 } else if (par->lcdMode == LCD_MODE_240) {
453 info->fix.line_length = (240 * BITS_PER_PIXEL) / 8;
454 info->fix.smem_len = 0x60000;
455 } else if (par->lcdMode == LCD_MODE_INIT || par->lcdMode == LCD_MODE_640) {
456 info->fix.line_length = (640 * BITS_PER_PIXEL) / 8;
457 info->fix.smem_len = 0x200000;
458 } 383 }
459
460 return 0; 384 return 0;
461} 385}
462 386
463 387
464/* 388/*
465 * Frame buffer operations 389 * Frame buffer operations
466 */ 390 */
467static struct fb_ops w100fb_ops = { 391static struct fb_ops w100fb_ops = {
468 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
469 .fb_check_var = w100fb_check_var, 393 .fb_check_var = w100fb_check_var,
470 .fb_set_par = w100fb_set_par, 394 .fb_set_par = w100fb_set_par,
471 .fb_setcolreg = w100fb_setcolreg, 395 .fb_setcolreg = w100fb_setcolreg,
472 .fb_blank = w100fb_blank, 396 .fb_blank = w100fb_blank,
473 .fb_fillrect = cfb_fillrect, 397 .fb_fillrect = cfb_fillrect,
474 .fb_copyarea = cfb_copyarea, 398 .fb_copyarea = cfb_copyarea,
475 .fb_imageblit = cfb_imageblit, 399 .fb_imageblit = cfb_imageblit,
476 .fb_cursor = soft_cursor, 400 .fb_cursor = soft_cursor,
477}; 401};
478 402
479 403#ifdef CONFIG_PM
480static void w100fb_clear_screen(u32 mode, long int offset) 404static void w100fb_save_vidmem(struct w100fb_par *par)
481{ 405{
482 int i, numPix = 0; 406 int memsize;
483
484 if (mode == LCD_SHARP_VGA)
485 numPix = 640 * 480;
486 else if (mode == LCD_SHARP_QVGA)
487 numPix = 320 * 240;
488 407
489 for (i = 0; i < numPix; i++) 408 if (par->extmem_active) {
490 writew(0xffff, remapped_fbuf + offset + (2*i)); 409 memsize=par->mach->mem->size;
491} 410 par->saved_extmem = vmalloc(memsize);
492 411 if (par->saved_extmem)
493 412 memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
494/* Need to split up the buffers to stay within the limits of kmalloc */
495#define W100_BUF_NUM 6
496static uint32_t *gSaveImagePtr[W100_BUF_NUM] = { NULL };
497
498static void w100fb_save_buffer(void)
499{
500 int i, j, bufsize;
501
502 bufsize=(current_par->xres * current_par->yres * BITS_PER_PIXEL / 8) / W100_BUF_NUM;
503 for (i = 0; i < W100_BUF_NUM; i++) {
504 if (gSaveImagePtr[i] == NULL)
505 gSaveImagePtr[i] = kmalloc(bufsize, GFP_KERNEL);
506 if (gSaveImagePtr[i] == NULL) {
507 w100fb_clear_buffer();
508 printk(KERN_WARNING "can't alloc pre-off image buffer %d\n", i);
509 break;
510 }
511 for (j = 0; j < bufsize/4; j++)
512 *(gSaveImagePtr[i] + j) = readl(remapped_fbuf + (bufsize*i) + j*4);
513 } 413 }
414 memsize=MEM_INT_SIZE;
415 par->saved_intmem = vmalloc(memsize);
416 if (par->saved_intmem && par->extmem_active)
417 memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize);
418 else if (par->saved_intmem)
419 memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
514} 420}
515 421
516 422static void w100fb_restore_vidmem(struct w100fb_par *par)
517static void w100fb_restore_buffer(void)
518{ 423{
519 int i, j, bufsize; 424 int memsize;
520 425
521 bufsize=(current_par->xres * current_par->yres * BITS_PER_PIXEL / 8) / W100_BUF_NUM; 426 if (par->extmem_active && par->saved_extmem) {
522 for (i = 0; i < W100_BUF_NUM; i++) { 427 memsize=par->mach->mem->size;
523 if (gSaveImagePtr[i] == NULL) { 428 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
524 printk(KERN_WARNING "can't find pre-off image buffer %d\n", i); 429 vfree(par->saved_extmem);
525 w100fb_clear_buffer();
526 break;
527 }
528 for (j = 0; j < (bufsize/4); j++)
529 writel(*(gSaveImagePtr[i] + j),remapped_fbuf + (bufsize*i) + (j*4));
530 kfree(gSaveImagePtr[i]);
531 gSaveImagePtr[i] = NULL;
532 } 430 }
533} 431 if (par->saved_intmem) {
534 432 memsize=MEM_INT_SIZE;
535 433 if (par->extmem_active)
536static void w100fb_clear_buffer(void) 434 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize);
537{ 435 else
538 int i; 436 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
539 for (i = 0; i < W100_BUF_NUM; i++) { 437 vfree(par->saved_intmem);
540 kfree(gSaveImagePtr[i]);
541 gSaveImagePtr[i] = NULL;
542 } 438 }
543} 439}
544 440
545 441static int w100fb_suspend(struct device *dev, pm_message_t state, uint32_t level)
546#ifdef CONFIG_PM
547static int w100fb_suspend(struct device *dev, pm_message_t state, u32 level)
548{ 442{
549 if (level == SUSPEND_POWER_DOWN) { 443 if (level == SUSPEND_POWER_DOWN) {
550 struct fb_info *info = dev_get_drvdata(dev); 444 struct fb_info *info = dev_get_drvdata(dev);
551 struct w100fb_par *par=info->par; 445 struct w100fb_par *par=info->par;
446 struct w100_tg_info *tg = par->mach->tg;
552 447
553 w100fb_save_buffer(); 448 w100fb_save_vidmem(par);
554 lcdtg_suspend(); 449 if(tg && tg->suspend)
450 tg->suspend(par);
555 w100_suspend(W100_SUSPEND_ALL); 451 w100_suspend(W100_SUSPEND_ALL);
556 par->blanking_flag = 1; 452 par->blanked = 1;
557 } 453 }
558 return 0; 454 return 0;
559} 455}
560 456
561static int w100fb_resume(struct device *dev, u32 level) 457static int w100fb_resume(struct device *dev, uint32_t level)
562{ 458{
563 if (level == RESUME_POWER_ON) { 459 if (level == RESUME_POWER_ON) {
564 struct fb_info *info = dev_get_drvdata(dev); 460 struct fb_info *info = dev_get_drvdata(dev);
565 struct w100fb_par *par=info->par; 461 struct w100fb_par *par=info->par;
566 462 struct w100_tg_info *tg = par->mach->tg;
567 w100_resume(); 463
568 w100fb_restore_buffer(); 464 w100_hw_init(par);
569 lcdtg_resume(); 465 w100fb_activate_var(par);
570 par->blanking_flag = 0; 466 w100fb_restore_vidmem(par);
467 if(tg && tg->resume)
468 tg->resume(par);
469 par->blanked = 0;
571 } 470 }
572 return 0; 471 return 0;
573} 472}
574#else 473#else
575#define w100fb_suspend NULL 474#define w100fb_suspend NULL
576#define w100fb_resume NULL 475#define w100fb_resume NULL
577#endif 476#endif
578 477
579 478
580int __init w100fb_probe(struct device *dev) 479int __init w100fb_probe(struct device *dev)
581{ 480{
481 int err = -EIO;
582 struct w100fb_mach_info *inf; 482 struct w100fb_mach_info *inf;
583 struct fb_info *info; 483 struct fb_info *info = NULL;
584 struct w100fb_par *par; 484 struct w100fb_par *par;
585 struct platform_device *pdev = to_platform_device(dev); 485 struct platform_device *pdev = to_platform_device(dev);
586 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 486 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
487 unsigned int chip_id;
587 488
588 if (!mem) 489 if (!mem)
589 return -EINVAL; 490 return -EINVAL;
590 491
591 /* remap the areas we're going to use */ 492 /* Remap the chip base address */
592 remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); 493 remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
593 if (remapped_base == NULL) 494 if (remapped_base == NULL)
594 return -EIO; 495 goto out;
595 496
497 /* Map the register space */
596 remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); 498 remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
597 if (remapped_regs == NULL) { 499 if (remapped_regs == NULL)
598 iounmap(remapped_base); 500 goto out;
599 return -EIO; 501
502 /* Identify the chip */
503 printk("Found ");
504 chip_id = readl(remapped_regs + mmCHIP_ID);
505 switch(chip_id) {
506 case CHIP_ID_W100: printk("w100"); break;
507 case CHIP_ID_W3200: printk("w3200"); break;
508 case CHIP_ID_W3220: printk("w3220"); break;
509 default:
510 printk("Unknown imageon chip ID\n");
511 err = -ENODEV;
512 goto out;
600 } 513 }
514 printk(" at 0x%08lx.\n", mem->start+W100_CFG_BASE);
601 515
602 remapped_fbuf = ioremap_nocache(mem->start+MEM_EXT_BASE_VALUE, REMAPPED_FB_LEN); 516 /* Remap the framebuffer */
603 if (remapped_fbuf == NULL) { 517 remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
604 iounmap(remapped_base); 518 if (remapped_fbuf == NULL)
605 iounmap(remapped_regs); 519 goto out;
606 return -EIO;
607 }
608 520
609 info=framebuffer_alloc(sizeof(struct w100fb_par), dev); 521 info=framebuffer_alloc(sizeof(struct w100fb_par), dev);
610 if (!info) { 522 if (!info) {
611 iounmap(remapped_base); 523 err = -ENOMEM;
612 iounmap(remapped_regs); 524 goto out;
613 iounmap(remapped_fbuf);
614 return -ENOMEM;
615 } 525 }
616 526
617 info->device=dev;
618 par = info->par; 527 par = info->par;
619 current_par=info->par;
620 dev_set_drvdata(dev, info); 528 dev_set_drvdata(dev, info);
621 529
622 inf = dev->platform_data; 530 inf = dev->platform_data;
623 par->phadadj = inf->phadadj; 531 par->chip_id = chip_id;
624 par->comadj = inf->comadj; 532 par->mach = inf;
625 par->fastsysclk_mode = 75; 533 par->fastpll_mode = 0;
626 par->lcdMode = LCD_MODE_INIT; 534 par->blanked = 0;
627 par->rotation_flag=0; 535
628 par->blanking_flag=0; 536 par->pll_table=w100_get_xtal_table(inf->xtal_freq);
629 w100fb_ssp_send = inf->w100fb_ssp_send; 537 if (!par->pll_table) {
630 538 printk(KERN_ERR "No matching Xtal definition found\n");
631 w100_hw_init(); 539 err = -EINVAL;
632 w100_pwm_setup(); 540 goto out;
541 }
633 542
634 info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL); 543 info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL);
635 if (!info->pseudo_palette) { 544 if (!info->pseudo_palette) {
636 iounmap(remapped_base); 545 err = -ENOMEM;
637 iounmap(remapped_regs); 546 goto out;
638 iounmap(remapped_fbuf);
639 return -ENOMEM;
640 } 547 }
641 548
642 info->fbops = &w100fb_ops; 549 info->fbops = &w100fb_ops;
643 info->flags = FBINFO_DEFAULT; 550 info->flags = FBINFO_DEFAULT;
644 info->node = -1; 551 info->node = -1;
645 info->screen_base = remapped_fbuf; 552 info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
646 info->screen_size = REMAPPED_FB_LEN; 553 info->screen_size = REMAPPED_FB_LEN;
647 554
648 info->var.xres = 640; 555 strcpy(info->fix.id, "w100fb");
556 info->fix.type = FB_TYPE_PACKED_PIXELS;
557 info->fix.type_aux = 0;
558 info->fix.accel = FB_ACCEL_NONE;
559 info->fix.smem_start = mem->start+W100_FB_BASE;
560 info->fix.mmio_start = mem->start+W100_REG_BASE;
561 info->fix.mmio_len = W100_REG_LEN;
562
563 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
564 err = -ENOMEM;
565 goto out;
566 }
567
568 par->mode = &inf->modelist[0];
569 if(inf->init_mode & INIT_MODE_ROTATED) {
570 info->var.xres = par->mode->yres;
571 info->var.yres = par->mode->xres;
572 }
573 else {
574 info->var.xres = par->mode->xres;
575 info->var.yres = par->mode->yres;
576 }
577
578 if(inf->init_mode &= INIT_MODE_FLIPPED)
579 par->flip = 1;
580 else
581 par->flip = 0;
582
649 info->var.xres_virtual = info->var.xres; 583 info->var.xres_virtual = info->var.xres;
650 info->var.yres = 480;
651 info->var.yres_virtual = info->var.yres; 584 info->var.yres_virtual = info->var.yres;
652 info->var.pixclock = 0x04; /* 171521; */ 585 info->var.pixclock = 0x04; /* 171521; */
653 info->var.sync = 0; 586 info->var.sync = 0;
654 info->var.grayscale = 0; 587 info->var.grayscale = 0;
655 info->var.xoffset = info->var.yoffset = 0; 588 info->var.xoffset = info->var.yoffset = 0;
656 info->var.accel_flags = 0; 589 info->var.accel_flags = 0;
657 info->var.activate = FB_ACTIVATE_NOW; 590 info->var.activate = FB_ACTIVATE_NOW;
658 591
659 strcpy(info->fix.id, "w100fb"); 592 w100_hw_init(par);
660 info->fix.type = FB_TYPE_PACKED_PIXELS; 593
661 info->fix.type_aux = 0; 594 if (w100fb_check_var(&info->var, info) < 0) {
662 info->fix.accel = FB_ACCEL_NONE; 595 err = -EINVAL;
663 info->fix.smem_start = mem->start+MEM_EXT_BASE_VALUE; 596 goto out;
664 info->fix.mmio_start = mem->start+W100_REG_BASE; 597 }
665 info->fix.mmio_len = W100_REG_LEN;
666 598
667 w100fb_check_var(&info->var, info);
668 w100fb_set_par(info); 599 w100fb_set_par(info);
669 600
670 if (register_framebuffer(info) < 0) { 601 if (register_framebuffer(info) < 0) {
671 kfree(info->pseudo_palette); 602 err = -EINVAL;
672 iounmap(remapped_base); 603 goto out;
673 iounmap(remapped_regs);
674 iounmap(remapped_fbuf);
675 return -EINVAL;
676 } 604 }
677 605
678 device_create_file(dev, &dev_attr_fastsysclk); 606 device_create_file(dev, &dev_attr_fastpllclk);
679 device_create_file(dev, &dev_attr_reg_read); 607 device_create_file(dev, &dev_attr_reg_read);
680 device_create_file(dev, &dev_attr_reg_write); 608 device_create_file(dev, &dev_attr_reg_write);
681 device_create_file(dev, &dev_attr_rotation); 609 device_create_file(dev, &dev_attr_flip);
682 610
683 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); 611 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
684 return 0; 612 return 0;
613out:
614 fb_dealloc_cmap(&info->cmap);
615 kfree(info->pseudo_palette);
616 if (remapped_fbuf != NULL)
617 iounmap(remapped_fbuf);
618 if (remapped_regs != NULL)
619 iounmap(remapped_regs);
620 if (remapped_base != NULL)
621 iounmap(remapped_base);
622 if (info)
623 framebuffer_release(info);
624 return err;
685} 625}
686 626
687 627
688static int w100fb_remove(struct device *dev) 628static int w100fb_remove(struct device *dev)
689{ 629{
690 struct fb_info *info = dev_get_drvdata(dev); 630 struct fb_info *info = dev_get_drvdata(dev);
631 struct w100fb_par *par=info->par;
691 632
692 device_remove_file(dev, &dev_attr_fastsysclk); 633 device_remove_file(dev, &dev_attr_fastpllclk);
693 device_remove_file(dev, &dev_attr_reg_read); 634 device_remove_file(dev, &dev_attr_reg_read);
694 device_remove_file(dev, &dev_attr_reg_write); 635 device_remove_file(dev, &dev_attr_reg_write);
695 device_remove_file(dev, &dev_attr_rotation); 636 device_remove_file(dev, &dev_attr_flip);
696 637
697 unregister_framebuffer(info); 638 unregister_framebuffer(info);
698 639
699 w100fb_clear_buffer(); 640 vfree(par->saved_intmem);
641 vfree(par->saved_extmem);
700 kfree(info->pseudo_palette); 642 kfree(info->pseudo_palette);
643 fb_dealloc_cmap(&info->cmap);
701 644
702 iounmap(remapped_base); 645 iounmap(remapped_base);
703 iounmap(remapped_regs); 646 iounmap(remapped_regs);
@@ -721,10 +664,54 @@ static void w100_soft_reset(void)
721 udelay(100); 664 udelay(100);
722} 665}
723 666
667static void w100_update_disable(void)
668{
669 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
670
671 /* Prevent display updates */
672 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
673 disp_db_buf_wr_cntl.f.update_db_buf = 0;
674 disp_db_buf_wr_cntl.f.en_db_buf = 0;
675 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
676}
677
678static void w100_update_enable(void)
679{
680 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
681
682 /* Enable display updates */
683 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
684 disp_db_buf_wr_cntl.f.update_db_buf = 1;
685 disp_db_buf_wr_cntl.f.en_db_buf = 1;
686 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
687}
688
689unsigned long w100fb_gpio_read(int port)
690{
691 unsigned long value;
692
693 if (port==W100_GPIO_PORT_A)
694 value = readl(remapped_regs + mmGPIO_DATA);
695 else
696 value = readl(remapped_regs + mmGPIO_DATA2);
697
698 return value;
699}
700
701void w100fb_gpio_write(int port, unsigned long value)
702{
703 if (port==W100_GPIO_PORT_A)
704 value = writel(value, remapped_regs + mmGPIO_DATA);
705 else
706 value = writel(value, remapped_regs + mmGPIO_DATA2);
707}
708EXPORT_SYMBOL(w100fb_gpio_read);
709EXPORT_SYMBOL(w100fb_gpio_write);
710
724/* 711/*
725 * Initialization of critical w100 hardware 712 * Initialization of critical w100 hardware
726 */ 713 */
727static void w100_hw_init(void) 714static void w100_hw_init(struct w100fb_par *par)
728{ 715{
729 u32 temp32; 716 u32 temp32;
730 union cif_cntl_u cif_cntl; 717 union cif_cntl_u cif_cntl;
@@ -735,8 +722,8 @@ static void w100_hw_init(void)
735 union cpu_defaults_u cpu_default; 722 union cpu_defaults_u cpu_default;
736 union cif_write_dbg_u cif_write_dbg; 723 union cif_write_dbg_u cif_write_dbg;
737 union wrap_start_dir_u wrap_start_dir; 724 union wrap_start_dir_u wrap_start_dir;
738 union mc_ext_mem_location_u mc_ext_mem_loc;
739 union cif_io_u cif_io; 725 union cif_io_u cif_io;
726 struct w100_gpio_regs *gpio = par->mach->gpio;
740 727
741 w100_soft_reset(); 728 w100_soft_reset();
742 729
@@ -791,19 +778,6 @@ static void w100_hw_init(void)
791 cfgreg_base.f.cfgreg_base = W100_CFG_BASE; 778 cfgreg_base.f.cfgreg_base = W100_CFG_BASE;
792 writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE); 779 writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE);
793 780
794 /* This location is relative to internal w100 addresses */
795 writel(0x15FF1000, remapped_regs + mmMC_FB_LOCATION);
796
797 mc_ext_mem_loc.val = defMC_EXT_MEM_LOCATION;
798 mc_ext_mem_loc.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
799 mc_ext_mem_loc.f.mc_ext_mem_top = MEM_EXT_TOP_VALUE >> 8;
800 writel((u32) (mc_ext_mem_loc.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
801
802 if ((current_par->lcdMode == LCD_MODE_240) || (current_par->lcdMode == LCD_MODE_320))
803 w100_InitExtMem(LCD_SHARP_QVGA);
804 else
805 w100_InitExtMem(LCD_SHARP_VGA);
806
807 wrap_start_dir.val = defWRAP_START_DIR; 781 wrap_start_dir.val = defWRAP_START_DIR;
808 wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1; 782 wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1;
809 writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR); 783 writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR);
@@ -813,21 +787,24 @@ static void w100_hw_init(void)
813 writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR); 787 writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR);
814 788
815 writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL); 789 writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL);
816}
817 790
791 /* Set the hardware to 565 colour */
792 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
793 temp32 &= 0xff7fffff;
794 temp32 |= 0x00800000;
795 writel(temp32, remapped_regs + mmDISP_DEBUG2);
818 796
819/* 797 /* Initialise the GPIO lines */
820 * Types 798 if (gpio) {
821 */ 799 writel(gpio->init_data1, remapped_regs + mmGPIO_DATA);
800 writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2);
801 writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1);
802 writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2);
803 writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3);
804 writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4);
805 }
806}
822 807
823struct pll_parm {
824 u16 freq; /* desired Fout for PLL */
825 u8 M;
826 u8 N_int;
827 u8 N_fac;
828 u8 tfgoal;
829 u8 lock_time;
830};
831 808
832struct power_state { 809struct power_state {
833 union clk_pin_cntl_u clk_pin_cntl; 810 union clk_pin_cntl_u clk_pin_cntl;
@@ -835,317 +812,275 @@ struct power_state {
835 union pll_cntl_u pll_cntl; 812 union pll_cntl_u pll_cntl;
836 union sclk_cntl_u sclk_cntl; 813 union sclk_cntl_u sclk_cntl;
837 union pclk_cntl_u pclk_cntl; 814 union pclk_cntl_u pclk_cntl;
838 union clk_test_cntl_u clk_test_cntl;
839 union pwrmgt_cntl_u pwrmgt_cntl; 815 union pwrmgt_cntl_u pwrmgt_cntl;
840 u32 freq; /* Fout for PLL calibration */ 816 int auto_mode; /* system clock auto changing? */
841 u8 tf100; /* for pll calibration */
842 u8 tf80; /* for pll calibration */
843 u8 tf20; /* for pll calibration */
844 u8 M; /* for pll calibration */
845 u8 N_int; /* for pll calibration */
846 u8 N_fac; /* for pll calibration */
847 u8 lock_time; /* for pll calibration */
848 u8 tfgoal; /* for pll calibration */
849 u8 auto_mode; /* hardware auto switch? */
850 u8 pwm_mode; /* 0 fast, 1 normal/slow */
851 u16 fast_sclk; /* fast clk freq */
852 u16 norm_sclk; /* slow clk freq */
853}; 817};
854 818
855 819
856/*
857 * Global state variables
858 */
859
860static struct power_state w100_pwr_state; 820static struct power_state w100_pwr_state;
861 821
862/* This table is specific for 12.5MHz ref crystal. */ 822/* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */
863static struct pll_parm gPLLTable[] = { 823
864 /*freq M N_int N_fac tfgoal lock_time */ 824/* 12.5MHz Crystal PLL Table */
865 { 50, 0, 1, 0, 0xE0, 56}, /* 50.00 MHz */ 825static struct w100_pll_info xtal_12500000[] = {
866 { 75, 0, 5, 0, 0xDE, 37}, /* 75.00 MHz */ 826 /*freq M N_int N_fac tfgoal lock_time */
867 {100, 0, 7, 0, 0xE0, 28}, /* 100.00 MHz */ 827 { 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */
868 {125, 0, 9, 0, 0xE0, 22}, /* 125.00 MHz */ 828 { 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */
869 {150, 0, 11, 0, 0xE0, 17}, /* 150.00 MHz */ 829 {100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */
870 { 0, 0, 0, 0, 0, 0} /* Terminator */ 830 {125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */
831 {150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */
832 { 0, 0, 0, 0, 0, 0}, /* Terminator */
871}; 833};
872 834
835/* 14.318MHz Crystal PLL Table */
836static struct w100_pll_info xtal_14318000[] = {
837 /*freq M N_int N_fac tfgoal lock_time */
838 { 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */
839 { 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */
840 { 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */
841 { 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */
842 {100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */
843 { 0, 0, 0, 0, 0, 0},
844};
873 845
874static u8 w100_pll_get_testcount(u8 testclk_sel) 846/* 16MHz Crystal PLL Table */
847static struct w100_pll_info xtal_16000000[] = {
848 /*freq M N_int N_fac tfgoal lock_time */
849 { 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */
850 { 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */
851 { 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */
852 { 0, 0, 0, 0, 0, 0},
853};
854
855static struct pll_entries {
856 int xtal_freq;
857 struct w100_pll_info *pll_table;
858} w100_pll_tables[] = {
859 { 12500000, &xtal_12500000[0] },
860 { 14318000, &xtal_14318000[0] },
861 { 16000000, &xtal_16000000[0] },
862 { 0 },
863};
864
865struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
875{ 866{
867 struct pll_entries *pll_entry = w100_pll_tables;
868
869 do {
870 if (freq == pll_entry->xtal_freq)
871 return pll_entry->pll_table;
872 pll_entry++;
873 } while (pll_entry->xtal_freq);
874 return 0;
875}
876
877
878static unsigned int w100_get_testcount(unsigned int testclk_sel)
879{
880 union clk_test_cntl_u clk_test_cntl;
881
876 udelay(5); 882 udelay(5);
877 883
878 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0; 884 /* Select the test clock source and reset */
879 w100_pwr_state.clk_test_cntl.f.testclk_sel = testclk_sel; 885 clk_test_cntl.f.start_check_freq = 0x0;
880 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x1; /*reset test count */ 886 clk_test_cntl.f.testclk_sel = testclk_sel;
881 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 887 clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */
882 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x0; 888 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
883 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
884 889
885 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x1; 890 clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */
886 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 891 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
887 892
893 /* Run clock test */
894 clk_test_cntl.f.start_check_freq = 0x1;
895 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
896
897 /* Give the test time to complete */
888 udelay(20); 898 udelay(20);
889 899
890 w100_pwr_state.clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL); 900 /* Return the result */
891 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0; 901 clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL);
892 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 902 clk_test_cntl.f.start_check_freq = 0x0;
903 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
893 904
894 return w100_pwr_state.clk_test_cntl.f.test_count; 905 return clk_test_cntl.f.test_count;
895} 906}
896 907
897 908
898static u8 w100_pll_adjust(void) 909static int w100_pll_adjust(struct w100_pll_info *pll)
899{ 910{
911 unsigned int tf80;
912 unsigned int tf20;
913
914 /* Initial Settings */
915 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
916 w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
917 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
918 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
919 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
920 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
921 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
922
923 /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V
924 * therefore, commented out the following lines
925 * tf80 meant tf100
926 */
900 do { 927 do {
901 /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V 928 /* set VCO input = 0.8 * VDD */
902 * therefore, commented out the following lines
903 * tf80 meant tf100
904 * set VCO input = 0.8 * VDD
905 */
906 w100_pwr_state.pll_cntl.f.pll_dactal = 0xd; 929 w100_pwr_state.pll_cntl.f.pll_dactal = 0xd;
907 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 930 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
908 931
909 w100_pwr_state.tf80 = w100_pll_get_testcount(0x1); /* PLLCLK */ 932 tf80 = w100_get_testcount(TESTCLK_SRC_PLL);
910 if (w100_pwr_state.tf80 >= (w100_pwr_state.tfgoal)) { 933 if (tf80 >= (pll->tfgoal)) {
911 /* set VCO input = 0.2 * VDD */ 934 /* set VCO input = 0.2 * VDD */
912 w100_pwr_state.pll_cntl.f.pll_dactal = 0x7; 935 w100_pwr_state.pll_cntl.f.pll_dactal = 0x7;
913 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 936 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
914 937
915 w100_pwr_state.tf20 = w100_pll_get_testcount(0x1); /* PLLCLK */ 938 tf20 = w100_get_testcount(TESTCLK_SRC_PLL);
916 if (w100_pwr_state.tf20 <= (w100_pwr_state.tfgoal)) 939 if (tf20 <= (pll->tfgoal))
917 return 1; // Success 940 return 1; /* Success */
918 941
919 if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) && 942 if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) &&
920 ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) || 943 ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) ||
921 (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) { 944 (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) {
922 /* slow VCO config */ 945 /* slow VCO config */
923 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1; 946 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1;
924 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; 947 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
925 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 948 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
926 writel((u32) (w100_pwr_state.pll_cntl.val),
927 remapped_regs + mmPLL_CNTL);
928 continue; 949 continue;
929 } 950 }
930 } 951 }
931 if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) { 952 if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) {
932 w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1; 953 w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1;
933 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 954 } else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
934 continue;
935 }
936 if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
937 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 955 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
938 w100_pwr_state.pll_cntl.f.pll_pvg += 0x1; 956 w100_pwr_state.pll_cntl.f.pll_pvg += 0x1;
939 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 957 } else {
940 continue; 958 return 0; /* Error */
941 } 959 }
942 return 0; // error
943 } while(1); 960 } while(1);
944} 961}
945 962
946 963
947/* 964/*
948 * w100_pll_calibration 965 * w100_pll_calibration
949 * freq = target frequency of the PLL
950 * (note: crystal = 14.3MHz)
951 */ 966 */
952static u8 w100_pll_calibration(u32 freq) 967static int w100_pll_calibration(struct w100_pll_info *pll)
953{ 968{
954 u8 status; 969 int status;
955
956 /* initial setting */
957 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
958 w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
959 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
960 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
961 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
962 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
963 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
964 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
965 970
966 /* check for (tf80 >= tfgoal) && (tf20 =< tfgoal) */ 971 status = w100_pll_adjust(pll);
967 if ((w100_pwr_state.tf80 < w100_pwr_state.tfgoal) || (w100_pwr_state.tf20 > w100_pwr_state.tfgoal)) {
968 status=w100_pll_adjust();
969 }
970 /* PLL Reset And Lock */
971 972
973 /* PLL Reset And Lock */
972 /* set VCO input = 0.5 * VDD */ 974 /* set VCO input = 0.5 * VDD */
973 w100_pwr_state.pll_cntl.f.pll_dactal = 0xa; 975 w100_pwr_state.pll_cntl.f.pll_dactal = 0xa;
974 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 976 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
975 977
976 /* reset time */ 978 udelay(1); /* reset time */
977 udelay(1);
978 979
979 /* enable charge pump */ 980 /* enable charge pump */
980 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */ 981 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */
981 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 982 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
982 983
983 /* set VCO input = Hi-Z */ 984 /* set VCO input = Hi-Z, disable DAC */
984 /* disable DAC */
985 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; 985 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0;
986 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 986 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
987 987
988 /* lock time */ 988 udelay(400); /* lock time */
989 udelay(400); /* delay 400 us */
990 989
991 /* PLL locked */ 990 /* PLL locked */
992 991
993 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x1; /* PLL clock */
994 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
995
996 w100_pwr_state.tf100 = w100_pll_get_testcount(0x1); /* PLLCLK */
997
998 return status; 992 return status;
999} 993}
1000 994
1001 995
1002static u8 w100_pll_set_clk(void) 996static int w100_pll_set_clk(struct w100_pll_info *pll)
1003{ 997{
1004 u8 status; 998 int status;
1005 999
1006 if (w100_pwr_state.auto_mode == 1) /* auto mode */ 1000 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1007 { 1001 {
1008 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */ 1002 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */
1009 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */ 1003 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */
1010 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1004 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1011 } 1005 }
1012 1006
1013 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* crystal clock */ 1007 /* Set system clock source to XTAL whilst adjusting the PLL! */
1008 w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
1014 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); 1009 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1015 1010
1016 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = w100_pwr_state.M; 1011 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M;
1017 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = w100_pwr_state.N_int; 1012 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int;
1018 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = w100_pwr_state.N_fac; 1013 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac;
1019 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = w100_pwr_state.lock_time; 1014 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time;
1020 writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); 1015 writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
1021 1016
1022 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0; 1017 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0;
1023 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1018 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1024 1019
1025 status = w100_pll_calibration (w100_pwr_state.freq); 1020 status = w100_pll_calibration(pll);
1026 1021
1027 if (w100_pwr_state.auto_mode == 1) /* auto mode */ 1022 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1028 { 1023 {
1029 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */ 1024 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */
1030 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */ 1025 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */
1031 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1026 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1032 } 1027 }
1033 return status; 1028 return status;
1034} 1029}
1035 1030
1036 1031/* freq = target frequency of the PLL */
1037/* assume reference crystal clk is 12.5MHz, 1032static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq)
1038 * and that doubling is not enabled.
1039 *
1040 * Freq = 12 == 12.5MHz.
1041 */
1042static u16 w100_set_slowsysclk(u16 freq)
1043{
1044 if (w100_pwr_state.norm_sclk == freq)
1045 return freq;
1046
1047 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1048 return 0;
1049
1050 if (freq == 12) {
1051 w100_pwr_state.norm_sclk = freq;
1052 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
1053 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* crystal src */
1054
1055 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1056
1057 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x1;
1058 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
1059
1060 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x1;
1061 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1;
1062 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1063 w100_pwr_state.pwm_mode = 1; /* normal mode */
1064 return freq;
1065 } else
1066 return 0;
1067}
1068
1069
1070static u16 w100_set_fastsysclk(u16 freq)
1071{ 1033{
1072 u16 pll_freq; 1034 struct w100_pll_info *pll = par->pll_table;
1073 int i;
1074
1075 while(1) {
1076 pll_freq = (u16) (freq * (w100_pwr_state.sclk_cntl.f.sclk_post_div_fast + 1));
1077 i = 0;
1078 do {
1079 if (pll_freq == gPLLTable[i].freq) {
1080 w100_pwr_state.freq = gPLLTable[i].freq * 1000000;
1081 w100_pwr_state.M = gPLLTable[i].M;
1082 w100_pwr_state.N_int = gPLLTable[i].N_int;
1083 w100_pwr_state.N_fac = gPLLTable[i].N_fac;
1084 w100_pwr_state.tfgoal = gPLLTable[i].tfgoal;
1085 w100_pwr_state.lock_time = gPLLTable[i].lock_time;
1086 w100_pwr_state.tf20 = 0xff; /* set highest */
1087 w100_pwr_state.tf80 = 0x00; /* set lowest */
1088
1089 w100_pll_set_clk();
1090 w100_pwr_state.pwm_mode = 0; /* fast mode */
1091 w100_pwr_state.fast_sclk = freq;
1092 return freq;
1093 }
1094 i++;
1095 } while(gPLLTable[i].freq);
1096 1035
1097 if (w100_pwr_state.auto_mode == 1) 1036 do {
1098 break; 1037 if (freq == pll->freq) {
1099 1038 return w100_pll_set_clk(pll);
1100 if (w100_pwr_state.sclk_cntl.f.sclk_post_div_fast == 0) 1039 }
1101 break; 1040 pll++;
1102 1041 } while(pll->freq);
1103 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast -= 1;
1104 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1105 }
1106 return 0; 1042 return 0;
1107} 1043}
1108 1044
1109
1110/* Set up an initial state. Some values/fields set 1045/* Set up an initial state. Some values/fields set
1111 here will be overwritten. */ 1046 here will be overwritten. */
1112static void w100_pwm_setup(void) 1047static void w100_pwm_setup(struct w100fb_par *par)
1113{ 1048{
1114 w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1; 1049 w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1;
1115 w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f; 1050 w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f;
1116 w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0; 1051 w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0;
1117 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0; 1052 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0;
1118 w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = 0x0; /* no freq doubling */ 1053 w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0;
1119 w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0; 1054 w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0;
1120 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL); 1055 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
1121 1056
1122 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* Crystal Clk */ 1057 w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
1123 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */ 1058 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */
1124 w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3; 1059 w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3;
1125 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */ 1060 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
1126 w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0; 1061 w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0;
1127 w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */ 1062 w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */
1128 w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */ 1063 w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */
1129 w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */ 1064 w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */
1130 w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */ 1065 w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */
1131 w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */ 1066 w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */
1132 w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */ 1067 w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */
1133 w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */ 1068 w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */
1134 w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */ 1069 w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */
1135 w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */ 1070 w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */
1136 w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0; 1071 w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0;
1137 w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0; 1072 w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0;
1138 w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0; 1073 w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0;
1139 w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0; 1074 w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0;
1140 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); 1075 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1141 1076
1142 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x0; /* Crystal Clk */ 1077 w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL;
1143 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */ 1078 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */
1144 w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */ 1079 w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */
1145 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1080 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
1146 1081
1147 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */ 1082 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */
1148 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */ 1083 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */
1149 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0; 1084 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0;
1150 w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5; 1085 w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5;
1151 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff; 1086 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff;
@@ -1154,7 +1089,7 @@ static void w100_pwm_setup(void)
1154 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1; 1089 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1;
1155 w100_pwr_state.pll_cntl.f.pll_reset = 0x1; 1090 w100_pwr_state.pll_cntl.f.pll_reset = 0x1;
1156 w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0; 1091 w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0;
1157 w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */ 1092 w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */
1158 w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0; 1093 w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0;
1159 w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0; 1094 w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0;
1160 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; 1095 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0;
@@ -1164,220 +1099,275 @@ static void w100_pwm_setup(void)
1164 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 1099 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
1165 w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0; 1100 w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0;
1166 w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0; 1101 w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0;
1167 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */ 1102 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */
1168 w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3; 1103 w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3;
1169 w100_pwr_state.pll_cntl.f.pll_conf = 0x2; 1104 w100_pwr_state.pll_cntl.f.pll_conf = 0x2;
1170 w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2; 1105 w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2;
1171 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; 1106 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
1172 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 1107 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
1173 1108
1174 w100_pwr_state.clk_test_cntl.f.testclk_sel = 0x1; /* PLLCLK (for testing) */
1175 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0;
1176 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x0;
1177 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
1178
1179 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0; 1109 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0;
1180 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */ 1110 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */
1181 w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0; 1111 w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0;
1182 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; 1112 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0;
1183 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; 1113 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0;
1184 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */ 1114 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */
1185 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */ 1115 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */
1186 w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF; 1116 w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF;
1187 w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF; 1117 w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF;
1188 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1118 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1189 1119
1190 w100_pwr_state.auto_mode = 0; /* manual mode */ 1120 w100_pwr_state.auto_mode = 0; /* manual mode */
1191 w100_pwr_state.pwm_mode = 1; /* normal mode (0, 1, 2) */
1192 w100_pwr_state.freq = 50000000; /* 50 MHz */
1193 w100_pwr_state.M = 3; /* M = 4 */
1194 w100_pwr_state.N_int = 6; /* N = 7.0 */
1195 w100_pwr_state.N_fac = 0;
1196 w100_pwr_state.tfgoal = 0xE0;
1197 w100_pwr_state.lock_time = 56;
1198 w100_pwr_state.tf20 = 0xff; /* set highest */
1199 w100_pwr_state.tf80 = 0x00; /* set lowest */
1200 w100_pwr_state.tf100 = 0x00; /* set lowest */
1201 w100_pwr_state.fast_sclk = 50; /* 50.0 MHz */
1202 w100_pwr_state.norm_sclk = 12; /* 12.5 MHz */
1203} 1121}
1204 1122
1205 1123
1206static void w100_init_sharp_lcd(u32 mode) 1124/*
1125 * Setup the w100 clocks for the specified mode
1126 */
1127static void w100_init_clocks(struct w100fb_par *par)
1207{ 1128{
1208 u32 temp32; 1129 struct w100_mode *mode = par->mode;
1209 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
1210 1130
1211 /* Prevent display updates */ 1131 if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL)
1212 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; 1132 w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq);
1213 disp_db_buf_wr_cntl.f.update_db_buf = 0;
1214 disp_db_buf_wr_cntl.f.en_db_buf = 0;
1215 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
1216 1133
1217 switch(mode) { 1134 w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src;
1218 case LCD_SHARP_QVGA: 1135 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider;
1219 w100_set_slowsysclk(12); /* use crystal -- 12.5MHz */ 1136 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider;
1220 /* not use PLL */ 1137 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1221 1138}
1222 writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); 1139
1223 writel(0x85FF8000, remapped_regs + mmMC_FB_LOCATION); 1140static void w100_init_lcd(struct w100fb_par *par)
1224 writel(0x00000003, remapped_regs + mmLCD_FORMAT); 1141{
1225 writel(0x00CF1C06, remapped_regs + mmGRAPHIC_CTRL); 1142 u32 temp32;
1226 writel(0x01410145, remapped_regs + mmCRTC_TOTAL); 1143 struct w100_mode *mode = par->mode;
1227 writel(0x01170027, remapped_regs + mmACTIVE_H_DISP); 1144 struct w100_gen_regs *regs = par->mach->regs;
1228 writel(0x01410001, remapped_regs + mmACTIVE_V_DISP); 1145 union active_h_disp_u active_h_disp;
1229 writel(0x01170027, remapped_regs + mmGRAPHIC_H_DISP); 1146 union active_v_disp_u active_v_disp;
1230 writel(0x01410001, remapped_regs + mmGRAPHIC_V_DISP); 1147 union graphic_h_disp_u graphic_h_disp;
1231 writel(0x81170027, remapped_regs + mmCRTC_SS); 1148 union graphic_v_disp_u graphic_v_disp;
1232 writel(0xA0140000, remapped_regs + mmCRTC_LS); 1149 union crtc_total_u crtc_total;
1233 writel(0x00400008, remapped_regs + mmCRTC_REV); 1150
1234 writel(0xA0000000, remapped_regs + mmCRTC_DCLK); 1151 /* w3200 doesnt like undefined bits being set so zero register values first */
1235 writel(0xC0140014, remapped_regs + mmCRTC_GS); 1152
1236 writel(0x00010141, remapped_regs + mmCRTC_VPOS_GS); 1153 active_h_disp.val = 0;
1237 writel(0x8015010F, remapped_regs + mmCRTC_GCLK); 1154 active_h_disp.f.active_h_start=mode->left_margin;
1238 writel(0x80100110, remapped_regs + mmCRTC_GOE); 1155 active_h_disp.f.active_h_end=mode->left_margin + mode->xres;
1239 writel(0x00000000, remapped_regs + mmCRTC_FRAME); 1156 writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP);
1240 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); 1157
1241 writel(0x01CC0000, remapped_regs + mmLCDD_CNTL1); 1158 active_v_disp.val = 0;
1242 writel(0x0003FFFF, remapped_regs + mmLCDD_CNTL2); 1159 active_v_disp.f.active_v_start=mode->upper_margin;
1243 writel(0x00FFFF0D, remapped_regs + mmGENLCD_CNTL1); 1160 active_v_disp.f.active_v_end=mode->upper_margin + mode->yres;
1244 writel(0x003F3003, remapped_regs + mmGENLCD_CNTL2); 1161 writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP);
1245 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT); 1162
1246 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR); 1163 graphic_h_disp.val = 0;
1247 writel(0x000102aa, remapped_regs + mmGENLCD_CNTL3); 1164 graphic_h_disp.f.graphic_h_start=mode->left_margin;
1248 writel(0x00800000, remapped_regs + mmGRAPHIC_OFFSET); 1165 graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres;
1249 writel(0x000001e0, remapped_regs + mmGRAPHIC_PITCH); 1166 writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP);
1250 writel(0x000000bf, remapped_regs + mmGPIO_DATA); 1167
1251 writel(0x03c0feff, remapped_regs + mmGPIO_CNTL2); 1168 graphic_v_disp.val = 0;
1252 writel(0x00000000, remapped_regs + mmGPIO_CNTL1); 1169 graphic_v_disp.f.graphic_v_start=mode->upper_margin;
1253 writel(0x41060010, remapped_regs + mmCRTC_PS1_ACTIVE); 1170 graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres;
1254 break; 1171 writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP);
1255 case LCD_SHARP_VGA: 1172
1256 w100_set_slowsysclk(12); /* use crystal -- 12.5MHz */ 1173 crtc_total.val = 0;
1257 w100_set_fastsysclk(current_par->fastsysclk_mode); /* use PLL -- 75.0MHz */ 1174 crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin;
1258 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x1; 1175 crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin;
1259 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x2; 1176 writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL);
1260 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1177
1261 writel(0x15FF1000, remapped_regs + mmMC_FB_LOCATION); 1178 writel(mode->crtc_ss, remapped_regs + mmCRTC_SS);
1262 writel(0x9FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); 1179 writel(mode->crtc_ls, remapped_regs + mmCRTC_LS);
1263 writel(0x00000003, remapped_regs + mmLCD_FORMAT); 1180 writel(mode->crtc_gs, remapped_regs + mmCRTC_GS);
1264 writel(0x00DE1D66, remapped_regs + mmGRAPHIC_CTRL); 1181 writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS);
1265 1182 writel(mode->crtc_rev, remapped_regs + mmCRTC_REV);
1266 writel(0x0283028B, remapped_regs + mmCRTC_TOTAL); 1183 writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK);
1267 writel(0x02360056, remapped_regs + mmACTIVE_H_DISP); 1184 writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK);
1268 writel(0x02830003, remapped_regs + mmACTIVE_V_DISP); 1185 writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE);
1269 writel(0x02360056, remapped_regs + mmGRAPHIC_H_DISP); 1186 writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE);
1270 writel(0x02830003, remapped_regs + mmGRAPHIC_V_DISP); 1187
1271 writel(0x82360056, remapped_regs + mmCRTC_SS); 1188 writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT);
1272 writel(0xA0280000, remapped_regs + mmCRTC_LS); 1189 writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1);
1273 writel(0x00400008, remapped_regs + mmCRTC_REV); 1190 writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2);
1274 writel(0xA0000000, remapped_regs + mmCRTC_DCLK); 1191 writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1);
1275 writel(0x80280028, remapped_regs + mmCRTC_GS); 1192 writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2);
1276 writel(0x02830002, remapped_regs + mmCRTC_VPOS_GS); 1193 writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3);
1277 writel(0x8015010F, remapped_regs + mmCRTC_GCLK); 1194
1278 writel(0x80100110, remapped_regs + mmCRTC_GOE); 1195 writel(0x00000000, remapped_regs + mmCRTC_FRAME);
1279 writel(0x00000000, remapped_regs + mmCRTC_FRAME); 1196 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS);
1280 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); 1197 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
1281 writel(0x01CC0000, remapped_regs + mmLCDD_CNTL1); 1198 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
1282 writel(0x0003FFFF, remapped_regs + mmLCDD_CNTL2);
1283 writel(0x00FFFF0D, remapped_regs + mmGENLCD_CNTL1);
1284 writel(0x003F3003, remapped_regs + mmGENLCD_CNTL2);
1285 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
1286 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
1287 writel(0x000102aa, remapped_regs + mmGENLCD_CNTL3);
1288 writel(0x00800000, remapped_regs + mmGRAPHIC_OFFSET);
1289 writel(0x000003C0, remapped_regs + mmGRAPHIC_PITCH);
1290 writel(0x000000bf, remapped_regs + mmGPIO_DATA);
1291 writel(0x03c0feff, remapped_regs + mmGPIO_CNTL2);
1292 writel(0x00000000, remapped_regs + mmGPIO_CNTL1);
1293 writel(0x41060010, remapped_regs + mmCRTC_PS1_ACTIVE);
1294 break;
1295 default:
1296 break;
1297 }
1298 1199
1299 /* Hack for overlay in ext memory */ 1200 /* Hack for overlay in ext memory */
1300 temp32 = readl(remapped_regs + mmDISP_DEBUG2); 1201 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
1301 temp32 |= 0xc0000000; 1202 temp32 |= 0xc0000000;
1302 writel(temp32, remapped_regs + mmDISP_DEBUG2); 1203 writel(temp32, remapped_regs + mmDISP_DEBUG2);
1303
1304 /* Re-enable display updates */
1305 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
1306 disp_db_buf_wr_cntl.f.update_db_buf = 1;
1307 disp_db_buf_wr_cntl.f.en_db_buf = 1;
1308 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
1309} 1204}
1310 1205
1311 1206
1312static void w100_set_vga_rotation_regs(u16 divider, unsigned long ctrl, unsigned long offset, unsigned long pitch) 1207static void w100_setup_memory(struct w100fb_par *par)
1313{ 1208{
1314 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x1; 1209 union mc_ext_mem_location_u extmem_location;
1315 w100_pwr_state.pclk_cntl.f.pclk_post_div = divider; 1210 union mc_fb_location_u intmem_location;
1316 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1211 struct w100_mem_info *mem = par->mach->mem;
1212 struct w100_bm_mem_info *bm_mem = par->mach->bm_mem;
1317 1213
1318 writel(ctrl, remapped_regs + mmGRAPHIC_CTRL); 1214 if (!par->extmem_active) {
1319 writel(offset, remapped_regs + mmGRAPHIC_OFFSET); 1215 w100_suspend(W100_SUSPEND_EXTMEM);
1320 writel(pitch, remapped_regs + mmGRAPHIC_PITCH);
1321 1216
1322 /* Re-enable display updates */ 1217 /* Map Internal Memory at FB Base */
1323 writel(0x0000007b, remapped_regs + mmDISP_DB_BUF_CNTL); 1218 intmem_location.f.mc_fb_start = W100_FB_BASE >> 8;
1324} 1219 intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8;
1220 writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
1325 1221
1222 /* Unmap External Memory - value is *probably* irrelevant but may have meaning
1223 to acceleration libraries */
1224 extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
1225 extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8;
1226 writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
1227 } else {
1228 /* Map Internal Memory to its default location */
1229 intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8;
1230 intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8;
1231 writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
1326 1232
1327static void w100_init_vga_rotation(u16 deg) 1233 /* Map External Memory at FB Base */
1328{ 1234 extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8;
1329 switch(deg) { 1235 extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8;
1330 case 0: 1236 writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
1331 w100_set_vga_rotation_regs(0x02, 0x00DE1D66, 0x00800000, 0x000003c0); 1237
1332 break; 1238 writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
1333 case 90: 1239 writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL);
1334 w100_set_vga_rotation_regs(0x06, 0x00DE1D0e, 0x00895b00, 0x00000500); 1240 writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1335 break; 1241 udelay(100);
1336 case 180: 1242 writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1337 w100_set_vga_rotation_regs(0x02, 0x00DE1D7e, 0x00895ffc, 0x000003c0); 1243 udelay(100);
1338 break; 1244 writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG);
1339 case 270: 1245 udelay(100);
1340 w100_set_vga_rotation_regs(0x06, 0x00DE1D16, 0x008004fc, 0x00000500); 1246 writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1341 break; 1247 writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL);
1342 default: 1248 if (bm_mem) {
1343 /* not-support */ 1249 writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH);
1344 break; 1250 writel(bm_mem->offset, remapped_regs + mmBM_OFFSET);
1251 writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL);
1252 writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL);
1253 writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG);
1254 writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL);
1255 writel(bm_mem->config, remapped_regs + mmBM_CONFIG);
1256 }
1345 } 1257 }
1346} 1258}
1347 1259
1348 1260static void w100_set_dispregs(struct w100fb_par *par)
1349static void w100_set_qvga_rotation_regs(unsigned long ctrl, unsigned long offset, unsigned long pitch)
1350{ 1261{
1351 writel(ctrl, remapped_regs + mmGRAPHIC_CTRL); 1262 unsigned long rot=0, divider, offset=0;
1352 writel(offset, remapped_regs + mmGRAPHIC_OFFSET); 1263 union graphic_ctrl_u graphic_ctrl;
1353 writel(pitch, remapped_regs + mmGRAPHIC_PITCH); 1264
1265 /* See if the mode has been rotated */
1266 if (par->xres == par->mode->xres) {
1267 if (par->flip) {
1268 rot=3; /* 180 degree */
1269 offset=(par->xres * par->yres) - 1;
1270 } /* else 0 degree */
1271 divider = par->mode->pixclk_divider;
1272 } else {
1273 if (par->flip) {
1274 rot=2; /* 270 degree */
1275 offset=par->xres - 1;
1276 } else {
1277 rot=1; /* 90 degree */
1278 offset=par->xres * (par->yres - 1);
1279 }
1280 divider = par->mode->pixclk_divider_rotated;
1281 }
1354 1282
1355 /* Re-enable display updates */ 1283 graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */
1356 writel(0x0000007b, remapped_regs + mmDISP_DB_BUF_CNTL); 1284 switch (par->chip_id) {
1285 case CHIP_ID_W100:
1286 graphic_ctrl.f_w100.color_depth=6;
1287 graphic_ctrl.f_w100.en_crtc=1;
1288 graphic_ctrl.f_w100.en_graphic_req=1;
1289 graphic_ctrl.f_w100.en_graphic_crtc=1;
1290 graphic_ctrl.f_w100.lcd_pclk_on=1;
1291 graphic_ctrl.f_w100.lcd_sclk_on=1;
1292 graphic_ctrl.f_w100.low_power_on=0;
1293 graphic_ctrl.f_w100.req_freq=0;
1294 graphic_ctrl.f_w100.portrait_mode=rot;
1295
1296 /* Zaurus needs this */
1297 switch(par->xres) {
1298 case 240:
1299 case 320:
1300 default:
1301 graphic_ctrl.f_w100.total_req_graphic=0xa0;
1302 break;
1303 case 480:
1304 case 640:
1305 switch(rot) {
1306 case 0: /* 0 */
1307 case 3: /* 180 */
1308 graphic_ctrl.f_w100.low_power_on=1;
1309 graphic_ctrl.f_w100.req_freq=5;
1310 break;
1311 case 1: /* 90 */
1312 case 2: /* 270 */
1313 graphic_ctrl.f_w100.req_freq=4;
1314 break;
1315 default:
1316 break;
1317 }
1318 graphic_ctrl.f_w100.total_req_graphic=0xf0;
1319 break;
1320 }
1321 break;
1322 case CHIP_ID_W3200:
1323 case CHIP_ID_W3220:
1324 graphic_ctrl.f_w32xx.color_depth=6;
1325 graphic_ctrl.f_w32xx.en_crtc=1;
1326 graphic_ctrl.f_w32xx.en_graphic_req=1;
1327 graphic_ctrl.f_w32xx.en_graphic_crtc=1;
1328 graphic_ctrl.f_w32xx.lcd_pclk_on=1;
1329 graphic_ctrl.f_w32xx.lcd_sclk_on=1;
1330 graphic_ctrl.f_w32xx.low_power_on=0;
1331 graphic_ctrl.f_w32xx.req_freq=0;
1332 graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */
1333 graphic_ctrl.f_w32xx.portrait_mode=rot;
1334 break;
1335 }
1336
1337 /* Set the pixel clock source and divider */
1338 w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src;
1339 w100_pwr_state.pclk_cntl.f.pclk_post_div = divider;
1340 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
1341
1342 writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL);
1343 writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET);
1344 writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH);
1357} 1345}
1358 1346
1359 1347
1360static void w100_init_qvga_rotation(u16 deg) 1348/*
1349 * Work out how long the sync pulse lasts
1350 * Value is 1/(time in seconds)
1351 */
1352static void calc_hsync(struct w100fb_par *par)
1361{ 1353{
1362 switch(deg) { 1354 unsigned long hsync;
1363 case 0: 1355 struct w100_mode *mode = par->mode;
1364 w100_set_qvga_rotation_regs(0x00d41c06, 0x00800000, 0x000001e0); 1356 union crtc_ss_u crtc_ss;
1365 break; 1357
1366 case 90: 1358 if (mode->pixclk_src == CLK_SRC_XTAL)
1367 w100_set_qvga_rotation_regs(0x00d41c0E, 0x00825580, 0x00000280); 1359 hsync=par->mach->xtal_freq;
1368 break; 1360 else
1369 case 180: 1361 hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000;
1370 w100_set_qvga_rotation_regs(0x00d41c1e, 0x008257fc, 0x000001e0);
1371 break;
1372 case 270:
1373 w100_set_qvga_rotation_regs(0x00d41c16, 0x0080027c, 0x00000280);
1374 break;
1375 default:
1376 /* not-support */
1377 break;
1378 }
1379}
1380 1362
1363 hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1);
1364
1365 crtc_ss.val = readl(remapped_regs + mmCRTC_SS);
1366 if (crtc_ss.val)
1367 par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start);
1368 else
1369 par->hsync_len = 0;
1370}
1381 1371
1382static void w100_suspend(u32 mode) 1372static void w100_suspend(u32 mode)
1383{ 1373{
@@ -1387,30 +1377,28 @@ static void w100_suspend(u32 mode)
1387 writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL); 1377 writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL);
1388 1378
1389 val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL); 1379 val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL);
1390 val &= ~(0x00100000); /* bit20=0 */ 1380 val &= ~(0x00100000); /* bit20=0 */
1391 val |= 0xFF000000; /* bit31:24=0xff */ 1381 val |= 0xFF000000; /* bit31:24=0xff */
1392 writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL); 1382 writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1393 1383
1394 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1384 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1395 val &= ~(0x00040000); /* bit18=0 */ 1385 val &= ~(0x00040000); /* bit18=0 */
1396 val |= 0x00080000; /* bit19=1 */ 1386 val |= 0x00080000; /* bit19=1 */
1397 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1387 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1398 1388
1399 udelay(1); /* wait 1us */ 1389 udelay(1); /* wait 1us */
1400 1390
1401 if (mode == W100_SUSPEND_EXTMEM) { 1391 if (mode == W100_SUSPEND_EXTMEM) {
1402
1403 /* CKE: Tri-State */ 1392 /* CKE: Tri-State */
1404 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1393 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1405 val |= 0x40000000; /* bit30=1 */ 1394 val |= 0x40000000; /* bit30=1 */
1406 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1395 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1407 1396
1408 /* CLK: Stop */ 1397 /* CLK: Stop */
1409 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1398 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1410 val &= ~(0x00000001); /* bit0=0 */ 1399 val &= ~(0x00000001); /* bit0=0 */
1411 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1400 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1412 } else { 1401 } else {
1413
1414 writel(0x00000000, remapped_regs + mmSCLK_CNTL); 1402 writel(0x00000000, remapped_regs + mmSCLK_CNTL);
1415 writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL); 1403 writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL);
1416 writel(0x00000015, remapped_regs + mmPWRMGT_CNTL); 1404 writel(0x00000015, remapped_regs + mmPWRMGT_CNTL);
@@ -1418,43 +1406,16 @@ static void w100_suspend(u32 mode)
1418 udelay(5); 1406 udelay(5);
1419 1407
1420 val = readl(remapped_regs + mmPLL_CNTL); 1408 val = readl(remapped_regs + mmPLL_CNTL);
1421 val |= 0x00000004; /* bit2=1 */ 1409 val |= 0x00000004; /* bit2=1 */
1422 writel(val, remapped_regs + mmPLL_CNTL); 1410 writel(val, remapped_regs + mmPLL_CNTL);
1423 writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); 1411 writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL);
1424 } 1412 }
1425} 1413}
1426 1414
1427
1428static void w100_resume(void)
1429{
1430 u32 temp32;
1431
1432 w100_hw_init();
1433 w100_pwm_setup();
1434
1435 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
1436 temp32 &= 0xff7fffff;
1437 temp32 |= 0x00800000;
1438 writel(temp32, remapped_regs + mmDISP_DEBUG2);
1439
1440 if (current_par->lcdMode == LCD_MODE_480 || current_par->lcdMode == LCD_MODE_640) {
1441 w100_init_sharp_lcd(LCD_SHARP_VGA);
1442 if (current_par->lcdMode == LCD_MODE_640) {
1443 w100_init_vga_rotation(current_par->rotation_flag ? 270 : 90);
1444 }
1445 } else {
1446 w100_init_sharp_lcd(LCD_SHARP_QVGA);
1447 if (current_par->lcdMode == LCD_MODE_320) {
1448 w100_init_qvga_rotation(current_par->rotation_flag ? 270 : 90);
1449 }
1450 }
1451}
1452
1453
1454static void w100_vsync(void) 1415static void w100_vsync(void)
1455{ 1416{
1456 u32 tmp; 1417 u32 tmp;
1457 int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */ 1418 int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */
1458 1419
1459 tmp = readl(remapped_regs + mmACTIVE_V_DISP); 1420 tmp = readl(remapped_regs + mmACTIVE_V_DISP);
1460 1421
@@ -1490,363 +1451,6 @@ static void w100_vsync(void)
1490 writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); 1451 writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
1491} 1452}
1492 1453
1493
1494static void w100_InitExtMem(u32 mode)
1495{
1496 switch(mode) {
1497 case LCD_SHARP_QVGA:
1498 /* QVGA doesn't use external memory
1499 nothing to do, really. */
1500 break;
1501 case LCD_SHARP_VGA:
1502 writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
1503 writel(0x00040003, remapped_regs + mmMEM_EXT_CNTL);
1504 writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1505 udelay(100);
1506 writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1507 udelay(100);
1508 writel(0x00650021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1509 udelay(100);
1510 writel(0x10002a4a, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1511 writel(0x7ff87012, remapped_regs + mmMEM_IO_CNTL);
1512 break;
1513 default:
1514 break;
1515 }
1516}
1517
1518
1519#define RESCTL_ADRS 0x00
1520#define PHACTRL_ADRS 0x01
1521#define DUTYCTRL_ADRS 0x02
1522#define POWERREG0_ADRS 0x03
1523#define POWERREG1_ADRS 0x04
1524#define GPOR3_ADRS 0x05
1525#define PICTRL_ADRS 0x06
1526#define POLCTRL_ADRS 0x07
1527
1528#define RESCTL_QVGA 0x01
1529#define RESCTL_VGA 0x00
1530
1531#define POWER1_VW_ON 0x01 /* VW Supply FET ON */
1532#define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */
1533#define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */
1534
1535#define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */
1536#define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */
1537#define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */
1538
1539#define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */
1540#define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */
1541#define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */
1542#define POWER0_COM_ON 0x08 /* COM Powewr Supply ON */
1543#define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */
1544
1545#define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */
1546#define POWER0_COM_OFF 0x00 /* COM Powewr Supply OFF */
1547#define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */
1548
1549#define PICTRL_INIT_STATE 0x01
1550#define PICTRL_INIOFF 0x02
1551#define PICTRL_POWER_DOWN 0x04
1552#define PICTRL_COM_SIGNAL_OFF 0x08
1553#define PICTRL_DAC_SIGNAL_OFF 0x10
1554
1555#define PICTRL_POWER_ACTIVE (0)
1556
1557#define POLCTRL_SYNC_POL_FALL 0x01
1558#define POLCTRL_EN_POL_FALL 0x02
1559#define POLCTRL_DATA_POL_FALL 0x04
1560#define POLCTRL_SYNC_ACT_H 0x08
1561#define POLCTRL_EN_ACT_L 0x10
1562
1563#define POLCTRL_SYNC_POL_RISE 0x00
1564#define POLCTRL_EN_POL_RISE 0x00
1565#define POLCTRL_DATA_POL_RISE 0x00
1566#define POLCTRL_SYNC_ACT_L 0x00
1567#define POLCTRL_EN_ACT_H 0x00
1568
1569#define PHACTRL_PHASE_MANUAL 0x01
1570
1571#define PHAD_QVGA_DEFAULT_VAL (9)
1572#define COMADJ_DEFAULT (125)
1573
1574static void lcdtg_ssp_send(u8 adrs, u8 data)
1575{
1576 w100fb_ssp_send(adrs,data);
1577}
1578
1579/*
1580 * This is only a psuedo I2C interface. We can't use the standard kernel
1581 * routines as the interface is write only. We just assume the data is acked...
1582 */
1583static void lcdtg_ssp_i2c_send(u8 data)
1584{
1585 lcdtg_ssp_send(POWERREG0_ADRS, data);
1586 udelay(10);
1587}
1588
1589static void lcdtg_i2c_send_bit(u8 data)
1590{
1591 lcdtg_ssp_i2c_send(data);
1592 lcdtg_ssp_i2c_send(data | POWER0_COM_DCLK);
1593 lcdtg_ssp_i2c_send(data);
1594}
1595
1596static void lcdtg_i2c_send_start(u8 base)
1597{
1598 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
1599 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
1600 lcdtg_ssp_i2c_send(base);
1601}
1602
1603static void lcdtg_i2c_send_stop(u8 base)
1604{
1605 lcdtg_ssp_i2c_send(base);
1606 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
1607 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
1608}
1609
1610static void lcdtg_i2c_send_byte(u8 base, u8 data)
1611{
1612 int i;
1613 for (i = 0; i < 8; i++) {
1614 if (data & 0x80)
1615 lcdtg_i2c_send_bit(base | POWER0_COM_DOUT);
1616 else
1617 lcdtg_i2c_send_bit(base);
1618 data <<= 1;
1619 }
1620}
1621
1622static void lcdtg_i2c_wait_ack(u8 base)
1623{
1624 lcdtg_i2c_send_bit(base);
1625}
1626
1627static void lcdtg_set_common_voltage(u8 base_data, u8 data)
1628{
1629 /* Set Common Voltage to M62332FP via I2C */
1630 lcdtg_i2c_send_start(base_data);
1631 lcdtg_i2c_send_byte(base_data, 0x9c);
1632 lcdtg_i2c_wait_ack(base_data);
1633 lcdtg_i2c_send_byte(base_data, 0x00);
1634 lcdtg_i2c_wait_ack(base_data);
1635 lcdtg_i2c_send_byte(base_data, data);
1636 lcdtg_i2c_wait_ack(base_data);
1637 lcdtg_i2c_send_stop(base_data);
1638}
1639
1640static struct lcdtg_register_setting {
1641 u8 adrs;
1642 u8 data;
1643 u32 wait;
1644} lcdtg_power_on_table[] = {
1645
1646 /* Initialize Internal Logic & Port */
1647 { PICTRL_ADRS,
1648 PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE |
1649 PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF,
1650 0 },
1651
1652 { POWERREG0_ADRS,
1653 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF | POWER0_COM_OFF |
1654 POWER0_VCC5_OFF,
1655 0 },
1656
1657 { POWERREG1_ADRS,
1658 POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF,
1659 0 },
1660
1661 /* VDD(+8V),SVSS(-4V) ON */
1662 { POWERREG1_ADRS,
1663 POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON /* VDD ON */,
1664 3000 },
1665
1666 /* DAC ON */
1667 { POWERREG0_ADRS,
1668 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1669 POWER0_COM_OFF | POWER0_VCC5_OFF,
1670 0 },
1671
1672 /* INIB = H, INI = L */
1673 { PICTRL_ADRS,
1674 /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */
1675 PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF,
1676 0 },
1677
1678 /* Set Common Voltage */
1679 { 0xfe, 0, 0 },
1680
1681 /* VCC5 ON */
1682 { POWERREG0_ADRS,
1683 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1684 POWER0_COM_OFF | POWER0_VCC5_ON /* VCC5 ON */,
1685 0 },
1686
1687 /* GVSS(-8V) ON */
1688 { POWERREG1_ADRS,
1689 POWER1_VW_OFF | POWER1_GVSS_ON /* GVSS ON */ |
1690 POWER1_VDD_ON /* VDD ON */,
1691 2000 },
1692
1693 /* COM SIGNAL ON (PICTL[3] = L) */
1694 { PICTRL_ADRS,
1695 PICTRL_INIT_STATE,
1696 0 },
1697
1698 /* COM ON */
1699 { POWERREG0_ADRS,
1700 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1701 POWER0_COM_ON /* COM ON */ | POWER0_VCC5_ON /* VCC5_ON */,
1702 0 },
1703
1704 /* VW ON */
1705 { POWERREG1_ADRS,
1706 POWER1_VW_ON /* VW ON */ | POWER1_GVSS_ON /* GVSS ON */ |
1707 POWER1_VDD_ON /* VDD ON */,
1708 0 /* Wait 100ms */ },
1709
1710 /* Signals output enable */
1711 { PICTRL_ADRS,
1712 0 /* Signals output enable */,
1713 0 },
1714
1715 { PHACTRL_ADRS,
1716 PHACTRL_PHASE_MANUAL,
1717 0 },
1718
1719 /* Initialize for Input Signals from ATI */
1720 { POLCTRL_ADRS,
1721 POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE | POLCTRL_DATA_POL_RISE |
1722 POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H,
1723 1000 /*100000*/ /* Wait 100ms */ },
1724
1725 /* end mark */
1726 { 0xff, 0, 0 }
1727};
1728
1729static void lcdtg_resume(void)
1730{
1731 if (current_par->lcdMode == LCD_MODE_480 || current_par->lcdMode == LCD_MODE_640) {
1732 lcdtg_hw_init(LCD_SHARP_VGA);
1733 } else {
1734 lcdtg_hw_init(LCD_SHARP_QVGA);
1735 }
1736}
1737
1738static void lcdtg_suspend(void)
1739{
1740 int i;
1741
1742 for (i = 0; i < (current_par->xres * current_par->yres); i++) {
1743 writew(0xffff, remapped_fbuf + (2*i));
1744 }
1745
1746 /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */
1747 mdelay(34);
1748
1749 /* (1)VW OFF */
1750 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
1751
1752 /* (2)COM OFF */
1753 lcdtg_ssp_send(PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF);
1754 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON);
1755
1756 /* (3)Set Common Voltage Bias 0V */
1757 lcdtg_set_common_voltage(POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0);
1758
1759 /* (4)GVSS OFF */
1760 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
1761
1762 /* (5)VCC5 OFF */
1763 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF);
1764
1765 /* (6)Set PDWN, INIOFF, DACOFF */
1766 lcdtg_ssp_send(PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF |
1767 PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF);
1768
1769 /* (7)DAC OFF */
1770 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF);
1771
1772 /* (8)VDD OFF */
1773 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
1774
1775}
1776
1777static void lcdtg_set_phadadj(u32 mode)
1778{
1779 int adj;
1780
1781 if (mode == LCD_SHARP_VGA) {
1782 /* Setting for VGA */
1783 adj = current_par->phadadj;
1784 if (adj < 0) {
1785 adj = PHACTRL_PHASE_MANUAL;
1786 } else {
1787 adj = ((adj & 0x0f) << 1) | PHACTRL_PHASE_MANUAL;
1788 }
1789 } else {
1790 /* Setting for QVGA */
1791 adj = (PHAD_QVGA_DEFAULT_VAL << 1) | PHACTRL_PHASE_MANUAL;
1792 }
1793 lcdtg_ssp_send(PHACTRL_ADRS, adj);
1794}
1795
1796static void lcdtg_hw_init(u32 mode)
1797{
1798 int i;
1799 int comadj;
1800
1801 i = 0;
1802 while(lcdtg_power_on_table[i].adrs != 0xff) {
1803 if (lcdtg_power_on_table[i].adrs == 0xfe) {
1804 /* Set Common Voltage */
1805 comadj = current_par->comadj;
1806 if (comadj < 0) {
1807 comadj = COMADJ_DEFAULT;
1808 }
1809 lcdtg_set_common_voltage((POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF), comadj);
1810 } else if (lcdtg_power_on_table[i].adrs == PHACTRL_ADRS) {
1811 /* Set Phase Adjuct */
1812 lcdtg_set_phadadj(mode);
1813 } else {
1814 /* Other */
1815 lcdtg_ssp_send(lcdtg_power_on_table[i].adrs, lcdtg_power_on_table[i].data);
1816 }
1817 if (lcdtg_power_on_table[i].wait != 0)
1818 udelay(lcdtg_power_on_table[i].wait);
1819 i++;
1820 }
1821
1822 switch(mode) {
1823 case LCD_SHARP_QVGA:
1824 /* Set Lcd Resolution (QVGA) */
1825 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_QVGA);
1826 break;
1827 case LCD_SHARP_VGA:
1828 /* Set Lcd Resolution (VGA) */
1829 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_VGA);
1830 break;
1831 default:
1832 break;
1833 }
1834}
1835
1836static void lcdtg_lcd_change(u32 mode)
1837{
1838 /* Set Phase Adjuct */
1839 lcdtg_set_phadadj(mode);
1840
1841 if (mode == LCD_SHARP_VGA)
1842 /* Set Lcd Resolution (VGA) */
1843 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_VGA);
1844 else if (mode == LCD_SHARP_QVGA)
1845 /* Set Lcd Resolution (QVGA) */
1846 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_QVGA);
1847}
1848
1849
1850static struct device_driver w100fb_driver = { 1454static struct device_driver w100fb_driver = {
1851 .name = "w100fb", 1455 .name = "w100fb",
1852 .bus = &platform_bus_type, 1456 .bus = &platform_bus_type,
@@ -1870,4 +1474,4 @@ module_init(w100fb_init);
1870module_exit(w100fb_cleanup); 1474module_exit(w100fb_cleanup);
1871 1475
1872MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver"); 1476MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
1873MODULE_LICENSE("GPLv2"); 1477MODULE_LICENSE("GPL");
diff --git a/drivers/video/w100fb.h b/drivers/video/w100fb.h
index 41624f961237..7a58a1e3e427 100644
--- a/drivers/video/w100fb.h
+++ b/drivers/video/w100fb.h
@@ -5,9 +5,12 @@
5 * 5 *
6 * Copyright (C) 2002, ATI Corp. 6 * Copyright (C) 2002, ATI Corp.
7 * Copyright (C) 2004-2005 Richard Purdie 7 * Copyright (C) 2004-2005 Richard Purdie
8 * Copyright (c) 2005 Ian Molton <spyro@f2s.com>
8 * 9 *
9 * Modified to work with 2.6 by Richard Purdie <rpurdie@rpsys.net> 10 * Modified to work with 2.6 by Richard Purdie <rpurdie@rpsys.net>
10 * 11 *
12 * w32xx support by Ian Molton
13 *
11 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 15 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
@@ -19,7 +22,7 @@
19 22
20/* Block CIF Start: */ 23/* Block CIF Start: */
21#define mmCHIP_ID 0x0000 24#define mmCHIP_ID 0x0000
22#define mmREVISION_ID 0x0004 25#define mmREVISION_ID 0x0004
23#define mmWRAP_BUF_A 0x0008 26#define mmWRAP_BUF_A 0x0008
24#define mmWRAP_BUF_B 0x000C 27#define mmWRAP_BUF_B 0x000C
25#define mmWRAP_TOP_DIR 0x0010 28#define mmWRAP_TOP_DIR 0x0010
@@ -88,7 +91,7 @@
88#define mmDISP_DEBUG 0x04D4 91#define mmDISP_DEBUG 0x04D4
89#define mmDISP_DB_BUF_CNTL 0x04D8 92#define mmDISP_DB_BUF_CNTL 0x04D8
90#define mmDISP_CRC_SIG 0x04DC 93#define mmDISP_CRC_SIG 0x04DC
91#define mmCRTC_DEFAULT_COUNT 0x04E0 94#define mmCRTC_DEFAULT_COUNT 0x04E0
92#define mmLCD_BACKGROUND_COLOR 0x04E4 95#define mmLCD_BACKGROUND_COLOR 0x04E4
93#define mmCRTC_PS2 0x04E8 96#define mmCRTC_PS2 0x04E8
94#define mmCRTC_PS2_VPOS 0x04EC 97#define mmCRTC_PS2_VPOS 0x04EC
@@ -119,17 +122,17 @@
119/* Block DISPLAY End: */ 122/* Block DISPLAY End: */
120 123
121/* Block GFX Start: */ 124/* Block GFX Start: */
122#define mmBRUSH_OFFSET 0x108C 125#define mmBRUSH_OFFSET 0x108C
123#define mmBRUSH_Y_X 0x1074 126#define mmBRUSH_Y_X 0x1074
124#define mmDEFAULT_PITCH_OFFSET 0x10A0 127#define mmDEFAULT_PITCH_OFFSET 0x10A0
125#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8 128#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8
126#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC 129#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC
127#define mmGLOBAL_ALPHA 0x1210 130#define mmGLOBAL_ALPHA 0x1210
128#define mmFILTER_COEF 0x1214 131#define mmFILTER_COEF 0x1214
129#define mmMVC_CNTL_START 0x11E0 132#define mmMVC_CNTL_START 0x11E0
130#define mmE2_ARITHMETIC_CNTL 0x1220 133#define mmE2_ARITHMETIC_CNTL 0x1220
131#define mmENG_CNTL 0x13E8 134#define mmENG_CNTL 0x13E8
132#define mmENG_PERF_CNT 0x13F0 135#define mmENG_PERF_CNT 0x13F0
133/* Block GFX End: */ 136/* Block GFX End: */
134 137
135/* Block IDCT Start: */ 138/* Block IDCT Start: */
@@ -141,22 +144,38 @@
141/* Block IDCT End: */ 144/* Block IDCT End: */
142 145
143/* Block MC Start: */ 146/* Block MC Start: */
144#define mmMEM_CNTL 0x0180 147#define mmMEM_CNTL 0x0180
145#define mmMEM_ARB 0x0184 148#define mmMEM_ARB 0x0184
146#define mmMC_FB_LOCATION 0x0188 149#define mmMC_FB_LOCATION 0x0188
147#define mmMEM_EXT_CNTL 0x018C 150#define mmMEM_EXT_CNTL 0x018C
148#define mmMC_EXT_MEM_LOCATION 0x0190 151#define mmMC_EXT_MEM_LOCATION 0x0190
149#define mmMEM_EXT_TIMING_CNTL 0x0194 152#define mmMEM_EXT_TIMING_CNTL 0x0194
150#define mmMEM_SDRAM_MODE_REG 0x0198 153#define mmMEM_SDRAM_MODE_REG 0x0198
151#define mmMEM_IO_CNTL 0x019C 154#define mmMEM_IO_CNTL 0x019C
152#define mmMC_DEBUG 0x01A0 155#define mmMC_DEBUG 0x01A0
153#define mmMC_BIST_CTRL 0x01A4 156#define mmMC_BIST_CTRL 0x01A4
154#define mmMC_BIST_COLLAR_READ 0x01A8 157#define mmMC_BIST_COLLAR_READ 0x01A8
155#define mmTC_MISMATCH 0x01AC 158#define mmTC_MISMATCH 0x01AC
156#define mmMC_PERF_MON_CNTL 0x01B0 159#define mmMC_PERF_MON_CNTL 0x01B0
157#define mmMC_PERF_COUNTERS 0x01B4 160#define mmMC_PERF_COUNTERS 0x01B4
158/* Block MC End: */ 161/* Block MC End: */
159 162
163/* Block BM Start: */
164#define mmBM_EXT_MEM_BANDWIDTH 0x0A00
165#define mmBM_OFFSET 0x0A04
166#define mmBM_MEM_EXT_TIMING_CNTL 0x0A08
167#define mmBM_MEM_EXT_CNTL 0x0A0C
168#define mmBM_MEM_MODE_REG 0x0A10
169#define mmBM_MEM_IO_CNTL 0x0A18
170#define mmBM_CONFIG 0x0A1C
171#define mmBM_STATUS 0x0A20
172#define mmBM_DEBUG 0x0A24
173#define mmBM_PERF_MON_CNTL 0x0A28
174#define mmBM_PERF_COUNTERS 0x0A2C
175#define mmBM_PERF2_MON_CNTL 0x0A30
176#define mmBM_PERF2_COUNTERS 0x0A34
177/* Block BM End: */
178
160/* Block RBBM Start: */ 179/* Block RBBM Start: */
161#define mmWAIT_UNTIL 0x1400 180#define mmWAIT_UNTIL 0x1400
162#define mmISYNC_CNTL 0x1404 181#define mmISYNC_CNTL 0x1404
@@ -176,439 +195,575 @@
176/* Block CG End: */ 195/* Block CG End: */
177 196
178/* default value definitions */ 197/* default value definitions */
179#define defWRAP_TOP_DIR 0x00000000 198#define defWRAP_TOP_DIR 0x00000000
180#define defWRAP_START_DIR 0x00000000 199#define defWRAP_START_DIR 0x00000000
181#define defCFGREG_BASE 0x00000000 200#define defCFGREG_BASE 0x00000000
182#define defCIF_IO 0x000C0902 201#define defCIF_IO 0x000C0902
183#define defINTF_CNTL 0x00000011 202#define defINTF_CNTL 0x00000011
184#define defCPU_DEFAULTS 0x00000006 203#define defCPU_DEFAULTS 0x00000006
185#define defHW_INT 0x00000000 204#define defHW_INT 0x00000000
186#define defMC_EXT_MEM_LOCATION 0x07ff0000 205#define defMC_EXT_MEM_LOCATION 0x07ff0000
187#define defTC_MISMATCH 0x00000000 206#define defTC_MISMATCH 0x00000000
188 207
189#define W100_CFG_BASE 0x0 208#define W100_CFG_BASE 0x0
190#define W100_CFG_LEN 0x10 209#define W100_CFG_LEN 0x10
191#define W100_REG_BASE 0x10000 210#define W100_REG_BASE 0x10000
192#define W100_REG_LEN 0x2000 211#define W100_REG_LEN 0x2000
193#define MEM_INT_BASE_VALUE 0x100000 212#define MEM_INT_BASE_VALUE 0x100000
194#define MEM_INT_TOP_VALUE_W100 0x15ffff
195#define MEM_EXT_BASE_VALUE 0x800000 213#define MEM_EXT_BASE_VALUE 0x800000
196#define MEM_EXT_TOP_VALUE 0x9fffff 214#define MEM_INT_SIZE 0x05ffff
215#define MEM_WINDOW_BASE 0x100000
216#define MEM_WINDOW_SIZE 0xf00000
217
197#define WRAP_BUF_BASE_VALUE 0x80000 218#define WRAP_BUF_BASE_VALUE 0x80000
198#define WRAP_BUF_TOP_VALUE 0xbffff 219#define WRAP_BUF_TOP_VALUE 0xbffff
199 220
221#define CHIP_ID_W100 0x57411002
222#define CHIP_ID_W3200 0x56441002
223#define CHIP_ID_W3220 0x57441002
200 224
201/* data structure definitions */ 225/* Register structure definitions */
202 226
203struct wrap_top_dir_t { 227struct wrap_top_dir_t {
204 unsigned long top_addr : 23; 228 unsigned long top_addr : 23;
205 unsigned long : 9; 229 unsigned long : 9;
206} __attribute__((packed)); 230} __attribute__((packed));
207 231
208union wrap_top_dir_u { 232union wrap_top_dir_u {
209 unsigned long val : 32; 233 unsigned long val : 32;
210 struct wrap_top_dir_t f; 234 struct wrap_top_dir_t f;
211} __attribute__((packed)); 235} __attribute__((packed));
212 236
213struct wrap_start_dir_t { 237struct wrap_start_dir_t {
214 unsigned long start_addr : 23; 238 unsigned long start_addr : 23;
215 unsigned long : 9; 239 unsigned long : 9;
216} __attribute__((packed)); 240} __attribute__((packed));
217 241
218union wrap_start_dir_u { 242union wrap_start_dir_u {
219 unsigned long val : 32; 243 unsigned long val : 32;
220 struct wrap_start_dir_t f; 244 struct wrap_start_dir_t f;
221} __attribute__((packed)); 245} __attribute__((packed));
222 246
223struct cif_cntl_t { 247struct cif_cntl_t {
224 unsigned long swap_reg : 2; 248 unsigned long swap_reg : 2;
225 unsigned long swap_fbuf_1 : 2; 249 unsigned long swap_fbuf_1 : 2;
226 unsigned long swap_fbuf_2 : 2; 250 unsigned long swap_fbuf_2 : 2;
227 unsigned long swap_fbuf_3 : 2; 251 unsigned long swap_fbuf_3 : 2;
228 unsigned long pmi_int_disable : 1; 252 unsigned long pmi_int_disable : 1;
229 unsigned long pmi_schmen_disable : 1; 253 unsigned long pmi_schmen_disable : 1;
230 unsigned long intb_oe : 1; 254 unsigned long intb_oe : 1;
231 unsigned long en_wait_to_compensate_dq_prop_dly : 1; 255 unsigned long en_wait_to_compensate_dq_prop_dly : 1;
232 unsigned long compensate_wait_rd_size : 2; 256 unsigned long compensate_wait_rd_size : 2;
233 unsigned long wait_asserted_timeout_val : 2; 257 unsigned long wait_asserted_timeout_val : 2;
234 unsigned long wait_masked_val : 2; 258 unsigned long wait_masked_val : 2;
235 unsigned long en_wait_timeout : 1; 259 unsigned long en_wait_timeout : 1;
236 unsigned long en_one_clk_setup_before_wait : 1; 260 unsigned long en_one_clk_setup_before_wait : 1;
237 unsigned long interrupt_active_high : 1; 261 unsigned long interrupt_active_high : 1;
238 unsigned long en_overwrite_straps : 1; 262 unsigned long en_overwrite_straps : 1;
239 unsigned long strap_wait_active_hi : 1; 263 unsigned long strap_wait_active_hi : 1;
240 unsigned long lat_busy_count : 2; 264 unsigned long lat_busy_count : 2;
241 unsigned long lat_rd_pm4_sclk_busy : 1; 265 unsigned long lat_rd_pm4_sclk_busy : 1;
242 unsigned long dis_system_bits : 1; 266 unsigned long dis_system_bits : 1;
243 unsigned long dis_mr : 1; 267 unsigned long dis_mr : 1;
244 unsigned long cif_spare_1 : 4; 268 unsigned long cif_spare_1 : 4;
245} __attribute__((packed)); 269} __attribute__((packed));
246 270
247union cif_cntl_u { 271union cif_cntl_u {
248 unsigned long val : 32; 272 unsigned long val : 32;
249 struct cif_cntl_t f; 273 struct cif_cntl_t f;
250} __attribute__((packed)); 274} __attribute__((packed));
251 275
252struct cfgreg_base_t { 276struct cfgreg_base_t {
253 unsigned long cfgreg_base : 24; 277 unsigned long cfgreg_base : 24;
254 unsigned long : 8; 278 unsigned long : 8;
255} __attribute__((packed)); 279} __attribute__((packed));
256 280
257union cfgreg_base_u { 281union cfgreg_base_u {
258 unsigned long val : 32; 282 unsigned long val : 32;
259 struct cfgreg_base_t f; 283 struct cfgreg_base_t f;
260} __attribute__((packed)); 284} __attribute__((packed));
261 285
262struct cif_io_t { 286struct cif_io_t {
263 unsigned long dq_srp : 1; 287 unsigned long dq_srp : 1;
264 unsigned long dq_srn : 1; 288 unsigned long dq_srn : 1;
265 unsigned long dq_sp : 4; 289 unsigned long dq_sp : 4;
266 unsigned long dq_sn : 4; 290 unsigned long dq_sn : 4;
267 unsigned long waitb_srp : 1; 291 unsigned long waitb_srp : 1;
268 unsigned long waitb_srn : 1; 292 unsigned long waitb_srn : 1;
269 unsigned long waitb_sp : 4; 293 unsigned long waitb_sp : 4;
270 unsigned long waitb_sn : 4; 294 unsigned long waitb_sn : 4;
271 unsigned long intb_srp : 1; 295 unsigned long intb_srp : 1;
272 unsigned long intb_srn : 1; 296 unsigned long intb_srn : 1;
273 unsigned long intb_sp : 4; 297 unsigned long intb_sp : 4;
274 unsigned long intb_sn : 4; 298 unsigned long intb_sn : 4;
275 unsigned long : 2; 299 unsigned long : 2;
276} __attribute__((packed)); 300} __attribute__((packed));
277 301
278union cif_io_u { 302union cif_io_u {
279 unsigned long val : 32; 303 unsigned long val : 32;
280 struct cif_io_t f; 304 struct cif_io_t f;
281} __attribute__((packed)); 305} __attribute__((packed));
282 306
283struct cif_read_dbg_t { 307struct cif_read_dbg_t {
284 unsigned long unpacker_pre_fetch_trig_gen : 2; 308 unsigned long unpacker_pre_fetch_trig_gen : 2;
285 unsigned long dly_second_rd_fetch_trig : 1; 309 unsigned long dly_second_rd_fetch_trig : 1;
286 unsigned long rst_rd_burst_id : 1; 310 unsigned long rst_rd_burst_id : 1;
287 unsigned long dis_rd_burst_id : 1; 311 unsigned long dis_rd_burst_id : 1;
288 unsigned long en_block_rd_when_packer_is_not_emp : 1; 312 unsigned long en_block_rd_when_packer_is_not_emp : 1;
289 unsigned long dis_pre_fetch_cntl_sm : 1; 313 unsigned long dis_pre_fetch_cntl_sm : 1;
290 unsigned long rbbm_chrncy_dis : 1; 314 unsigned long rbbm_chrncy_dis : 1;
291 unsigned long rbbm_rd_after_wr_lat : 2; 315 unsigned long rbbm_rd_after_wr_lat : 2;
292 unsigned long dis_be_during_rd : 1; 316 unsigned long dis_be_during_rd : 1;
293 unsigned long one_clk_invalidate_pulse : 1; 317 unsigned long one_clk_invalidate_pulse : 1;
294 unsigned long dis_chnl_priority : 1; 318 unsigned long dis_chnl_priority : 1;
295 unsigned long rst_read_path_a_pls : 1; 319 unsigned long rst_read_path_a_pls : 1;
296 unsigned long rst_read_path_b_pls : 1; 320 unsigned long rst_read_path_b_pls : 1;
297 unsigned long dis_reg_rd_fetch_trig : 1; 321 unsigned long dis_reg_rd_fetch_trig : 1;
298 unsigned long dis_rd_fetch_trig_from_ind_addr : 1; 322 unsigned long dis_rd_fetch_trig_from_ind_addr : 1;
299 unsigned long dis_rd_same_byte_to_trig_fetch : 1; 323 unsigned long dis_rd_same_byte_to_trig_fetch : 1;
300 unsigned long dis_dir_wrap : 1; 324 unsigned long dis_dir_wrap : 1;
301 unsigned long dis_ring_buf_to_force_dec : 1; 325 unsigned long dis_ring_buf_to_force_dec : 1;
302 unsigned long dis_addr_comp_in_16bit : 1; 326 unsigned long dis_addr_comp_in_16bit : 1;
303 unsigned long clr_w : 1; 327 unsigned long clr_w : 1;
304 unsigned long err_rd_tag_is_3 : 1; 328 unsigned long err_rd_tag_is_3 : 1;
305 unsigned long err_load_when_ful_a : 1; 329 unsigned long err_load_when_ful_a : 1;
306 unsigned long err_load_when_ful_b : 1; 330 unsigned long err_load_when_ful_b : 1;
307 unsigned long : 7; 331 unsigned long : 7;
308} __attribute__((packed)); 332} __attribute__((packed));
309 333
310union cif_read_dbg_u { 334union cif_read_dbg_u {
311 unsigned long val : 32; 335 unsigned long val : 32;
312 struct cif_read_dbg_t f; 336 struct cif_read_dbg_t f;
313} __attribute__((packed)); 337} __attribute__((packed));
314 338
315struct cif_write_dbg_t { 339struct cif_write_dbg_t {
316 unsigned long packer_timeout_count : 2; 340 unsigned long packer_timeout_count : 2;
317 unsigned long en_upper_load_cond : 1; 341 unsigned long en_upper_load_cond : 1;
318 unsigned long en_chnl_change_cond : 1; 342 unsigned long en_chnl_change_cond : 1;
319 unsigned long dis_addr_comp_cond : 1; 343 unsigned long dis_addr_comp_cond : 1;
320 unsigned long dis_load_same_byte_addr_cond : 1; 344 unsigned long dis_load_same_byte_addr_cond : 1;
321 unsigned long dis_timeout_cond : 1; 345 unsigned long dis_timeout_cond : 1;
322 unsigned long dis_timeout_during_rbbm : 1; 346 unsigned long dis_timeout_during_rbbm : 1;
323 unsigned long dis_packer_ful_during_rbbm_timeout : 1; 347 unsigned long dis_packer_ful_during_rbbm_timeout : 1;
324 unsigned long en_dword_split_to_rbbm : 1; 348 unsigned long en_dword_split_to_rbbm : 1;
325 unsigned long en_dummy_val : 1; 349 unsigned long en_dummy_val : 1;
326 unsigned long dummy_val_sel : 1; 350 unsigned long dummy_val_sel : 1;
327 unsigned long mask_pm4_wrptr_dec : 1; 351 unsigned long mask_pm4_wrptr_dec : 1;
328 unsigned long dis_mc_clean_cond : 1; 352 unsigned long dis_mc_clean_cond : 1;
329 unsigned long err_two_reqi_during_ful : 1; 353 unsigned long err_two_reqi_during_ful : 1;
330 unsigned long err_reqi_during_idle_clk : 1; 354 unsigned long err_reqi_during_idle_clk : 1;
331 unsigned long err_global : 1; 355 unsigned long err_global : 1;
332 unsigned long en_wr_buf_dbg_load : 1; 356 unsigned long en_wr_buf_dbg_load : 1;
333 unsigned long en_wr_buf_dbg_path : 1; 357 unsigned long en_wr_buf_dbg_path : 1;
334 unsigned long sel_wr_buf_byte : 3; 358 unsigned long sel_wr_buf_byte : 3;
335 unsigned long dis_rd_flush_wr : 1; 359 unsigned long dis_rd_flush_wr : 1;
336 unsigned long dis_packer_ful_cond : 1; 360 unsigned long dis_packer_ful_cond : 1;
337 unsigned long dis_invalidate_by_ops_chnl : 1; 361 unsigned long dis_invalidate_by_ops_chnl : 1;
338 unsigned long en_halt_when_reqi_err : 1; 362 unsigned long en_halt_when_reqi_err : 1;
339 unsigned long cif_spare_2 : 5; 363 unsigned long cif_spare_2 : 5;
340 unsigned long : 1; 364 unsigned long : 1;
341} __attribute__((packed)); 365} __attribute__((packed));
342 366
343union cif_write_dbg_u { 367union cif_write_dbg_u {
344 unsigned long val : 32; 368 unsigned long val : 32;
345 struct cif_write_dbg_t f; 369 struct cif_write_dbg_t f;
346} __attribute__((packed)); 370} __attribute__((packed));
347 371
348 372
349struct intf_cntl_t { 373struct intf_cntl_t {
350 unsigned char ad_inc_a : 1; 374 unsigned char ad_inc_a : 1;
351 unsigned char ring_buf_a : 1; 375 unsigned char ring_buf_a : 1;
352 unsigned char rd_fetch_trigger_a : 1; 376 unsigned char rd_fetch_trigger_a : 1;
353 unsigned char rd_data_rdy_a : 1; 377 unsigned char rd_data_rdy_a : 1;
354 unsigned char ad_inc_b : 1; 378 unsigned char ad_inc_b : 1;
355 unsigned char ring_buf_b : 1; 379 unsigned char ring_buf_b : 1;
356 unsigned char rd_fetch_trigger_b : 1; 380 unsigned char rd_fetch_trigger_b : 1;
357 unsigned char rd_data_rdy_b : 1; 381 unsigned char rd_data_rdy_b : 1;
358} __attribute__((packed)); 382} __attribute__((packed));
359 383
360union intf_cntl_u { 384union intf_cntl_u {
361 unsigned char val : 8; 385 unsigned char val : 8;
362 struct intf_cntl_t f; 386 struct intf_cntl_t f;
363} __attribute__((packed)); 387} __attribute__((packed));
364 388
365struct cpu_defaults_t { 389struct cpu_defaults_t {
366 unsigned char unpack_rd_data : 1; 390 unsigned char unpack_rd_data : 1;
367 unsigned char access_ind_addr_a: 1; 391 unsigned char access_ind_addr_a : 1;
368 unsigned char access_ind_addr_b: 1; 392 unsigned char access_ind_addr_b : 1;
369 unsigned char access_scratch_reg : 1; 393 unsigned char access_scratch_reg : 1;
370 unsigned char pack_wr_data : 1; 394 unsigned char pack_wr_data : 1;
371 unsigned char transition_size : 1; 395 unsigned char transition_size : 1;
372 unsigned char en_read_buf_mode : 1; 396 unsigned char en_read_buf_mode : 1;
373 unsigned char rd_fetch_scratch : 1; 397 unsigned char rd_fetch_scratch : 1;
374} __attribute__((packed)); 398} __attribute__((packed));
375 399
376union cpu_defaults_u { 400union cpu_defaults_u {
377 unsigned char val : 8; 401 unsigned char val : 8;
378 struct cpu_defaults_t f; 402 struct cpu_defaults_t f;
403} __attribute__((packed));
404
405struct crtc_total_t {
406 unsigned long crtc_h_total : 10;
407 unsigned long : 6;
408 unsigned long crtc_v_total : 10;
409 unsigned long : 6;
410} __attribute__((packed));
411
412union crtc_total_u {
413 unsigned long val : 32;
414 struct crtc_total_t f;
415} __attribute__((packed));
416
417struct crtc_ss_t {
418 unsigned long ss_start : 10;
419 unsigned long : 6;
420 unsigned long ss_end : 10;
421 unsigned long : 2;
422 unsigned long ss_align : 1;
423 unsigned long ss_pol : 1;
424 unsigned long ss_run_mode : 1;
425 unsigned long ss_en : 1;
426} __attribute__((packed));
427
428union crtc_ss_u {
429 unsigned long val : 32;
430 struct crtc_ss_t f;
431} __attribute__((packed));
432
433struct active_h_disp_t {
434 unsigned long active_h_start : 10;
435 unsigned long : 6;
436 unsigned long active_h_end : 10;
437 unsigned long : 6;
438} __attribute__((packed));
439
440union active_h_disp_u {
441 unsigned long val : 32;
442 struct active_h_disp_t f;
443} __attribute__((packed));
444
445struct active_v_disp_t {
446 unsigned long active_v_start : 10;
447 unsigned long : 6;
448 unsigned long active_v_end : 10;
449 unsigned long : 6;
450} __attribute__((packed));
451
452union active_v_disp_u {
453 unsigned long val : 32;
454 struct active_v_disp_t f;
455} __attribute__((packed));
456
457struct graphic_h_disp_t {
458 unsigned long graphic_h_start : 10;
459 unsigned long : 6;
460 unsigned long graphic_h_end : 10;
461 unsigned long : 6;
462} __attribute__((packed));
463
464union graphic_h_disp_u {
465 unsigned long val : 32;
466 struct graphic_h_disp_t f;
467} __attribute__((packed));
468
469struct graphic_v_disp_t {
470 unsigned long graphic_v_start : 10;
471 unsigned long : 6;
472 unsigned long graphic_v_end : 10;
473 unsigned long : 6;
474} __attribute__((packed));
475
476union graphic_v_disp_u{
477 unsigned long val : 32;
478 struct graphic_v_disp_t f;
479} __attribute__((packed));
480
481struct graphic_ctrl_t_w100 {
482 unsigned long color_depth : 3;
483 unsigned long portrait_mode : 2;
484 unsigned long low_power_on : 1;
485 unsigned long req_freq : 4;
486 unsigned long en_crtc : 1;
487 unsigned long en_graphic_req : 1;
488 unsigned long en_graphic_crtc : 1;
489 unsigned long total_req_graphic : 9;
490 unsigned long lcd_pclk_on : 1;
491 unsigned long lcd_sclk_on : 1;
492 unsigned long pclk_running : 1;
493 unsigned long sclk_running : 1;
494 unsigned long : 6;
495} __attribute__((packed));
496
497struct graphic_ctrl_t_w32xx {
498 unsigned long color_depth : 3;
499 unsigned long portrait_mode : 2;
500 unsigned long low_power_on : 1;
501 unsigned long req_freq : 4;
502 unsigned long en_crtc : 1;
503 unsigned long en_graphic_req : 1;
504 unsigned long en_graphic_crtc : 1;
505 unsigned long total_req_graphic : 10;
506 unsigned long lcd_pclk_on : 1;
507 unsigned long lcd_sclk_on : 1;
508 unsigned long pclk_running : 1;
509 unsigned long sclk_running : 1;
510 unsigned long : 5;
511} __attribute__((packed));
512
513union graphic_ctrl_u {
514 unsigned long val : 32;
515 struct graphic_ctrl_t_w100 f_w100;
516 struct graphic_ctrl_t_w32xx f_w32xx;
379} __attribute__((packed)); 517} __attribute__((packed));
380 518
381struct video_ctrl_t { 519struct video_ctrl_t {
382 unsigned long video_mode : 1; 520 unsigned long video_mode : 1;
383 unsigned long keyer_en : 1; 521 unsigned long keyer_en : 1;
384 unsigned long en_video_req : 1; 522 unsigned long en_video_req : 1;
385 unsigned long en_graphic_req_video : 1; 523 unsigned long en_graphic_req_video : 1;
386 unsigned long en_video_crtc : 1; 524 unsigned long en_video_crtc : 1;
387 unsigned long video_hor_exp : 2; 525 unsigned long video_hor_exp : 2;
388 unsigned long video_ver_exp : 2; 526 unsigned long video_ver_exp : 2;
389 unsigned long uv_combine : 1; 527 unsigned long uv_combine : 1;
390 unsigned long total_req_video : 9; 528 unsigned long total_req_video : 9;
391 unsigned long video_ch_sel : 1; 529 unsigned long video_ch_sel : 1;
392 unsigned long video_portrait : 2; 530 unsigned long video_portrait : 2;
393 unsigned long yuv2rgb_en : 1; 531 unsigned long yuv2rgb_en : 1;
394 unsigned long yuv2rgb_option : 1; 532 unsigned long yuv2rgb_option : 1;
395 unsigned long video_inv_hor : 1; 533 unsigned long video_inv_hor : 1;
396 unsigned long video_inv_ver : 1; 534 unsigned long video_inv_ver : 1;
397 unsigned long gamma_sel : 2; 535 unsigned long gamma_sel : 2;
398 unsigned long dis_limit : 1; 536 unsigned long dis_limit : 1;
399 unsigned long en_uv_hblend : 1; 537 unsigned long en_uv_hblend : 1;
400 unsigned long rgb_gamma_sel : 2; 538 unsigned long rgb_gamma_sel : 2;
401} __attribute__((packed)); 539} __attribute__((packed));
402 540
403union video_ctrl_u { 541union video_ctrl_u {
404 unsigned long val : 32; 542 unsigned long val : 32;
405 struct video_ctrl_t f; 543 struct video_ctrl_t f;
406} __attribute__((packed)); 544} __attribute__((packed));
407 545
408struct disp_db_buf_cntl_rd_t { 546struct disp_db_buf_cntl_rd_t {
409 unsigned long en_db_buf : 1; 547 unsigned long en_db_buf : 1;
410 unsigned long update_db_buf_done : 1; 548 unsigned long update_db_buf_done : 1;
411 unsigned long db_buf_cntl : 6; 549 unsigned long db_buf_cntl : 6;
412 unsigned long : 24; 550 unsigned long : 24;
413} __attribute__((packed)); 551} __attribute__((packed));
414 552
415union disp_db_buf_cntl_rd_u { 553union disp_db_buf_cntl_rd_u {
416 unsigned long val : 32; 554 unsigned long val : 32;
417 struct disp_db_buf_cntl_rd_t f; 555 struct disp_db_buf_cntl_rd_t f;
418} __attribute__((packed)); 556} __attribute__((packed));
419 557
420struct disp_db_buf_cntl_wr_t { 558struct disp_db_buf_cntl_wr_t {
421 unsigned long en_db_buf : 1; 559 unsigned long en_db_buf : 1;
422 unsigned long update_db_buf : 1; 560 unsigned long update_db_buf : 1;
423 unsigned long db_buf_cntl : 6; 561 unsigned long db_buf_cntl : 6;
424 unsigned long : 24; 562 unsigned long : 24;
425} __attribute__((packed)); 563} __attribute__((packed));
426 564
427union disp_db_buf_cntl_wr_u { 565union disp_db_buf_cntl_wr_u {
428 unsigned long val : 32; 566 unsigned long val : 32;
429 struct disp_db_buf_cntl_wr_t f; 567 struct disp_db_buf_cntl_wr_t f;
430} __attribute__((packed)); 568} __attribute__((packed));
431 569
432struct gamma_value1_t { 570struct gamma_value1_t {
433 unsigned long gamma1 : 8; 571 unsigned long gamma1 : 8;
434 unsigned long gamma2 : 8; 572 unsigned long gamma2 : 8;
435 unsigned long gamma3 : 8; 573 unsigned long gamma3 : 8;
436 unsigned long gamma4 : 8; 574 unsigned long gamma4 : 8;
437} __attribute__((packed)); 575} __attribute__((packed));
438 576
439union gamma_value1_u { 577union gamma_value1_u {
440 unsigned long val : 32; 578 unsigned long val : 32;
441 struct gamma_value1_t f; 579 struct gamma_value1_t f;
442} __attribute__((packed)); 580} __attribute__((packed));
443 581
444struct gamma_value2_t { 582struct gamma_value2_t {
445 unsigned long gamma5 : 8; 583 unsigned long gamma5 : 8;
446 unsigned long gamma6 : 8; 584 unsigned long gamma6 : 8;
447 unsigned long gamma7 : 8; 585 unsigned long gamma7 : 8;
448 unsigned long gamma8 : 8; 586 unsigned long gamma8 : 8;
449} __attribute__((packed)); 587} __attribute__((packed));
450 588
451union gamma_value2_u { 589union gamma_value2_u {
452 unsigned long val : 32; 590 unsigned long val : 32;
453 struct gamma_value2_t f; 591 struct gamma_value2_t f;
454} __attribute__((packed)); 592} __attribute__((packed));
455 593
456struct gamma_slope_t { 594struct gamma_slope_t {
457 unsigned long slope1 : 3; 595 unsigned long slope1 : 3;
458 unsigned long slope2 : 3; 596 unsigned long slope2 : 3;
459 unsigned long slope3 : 3; 597 unsigned long slope3 : 3;
460 unsigned long slope4 : 3; 598 unsigned long slope4 : 3;
461 unsigned long slope5 : 3; 599 unsigned long slope5 : 3;
462 unsigned long slope6 : 3; 600 unsigned long slope6 : 3;
463 unsigned long slope7 : 3; 601 unsigned long slope7 : 3;
464 unsigned long slope8 : 3; 602 unsigned long slope8 : 3;
465 unsigned long : 8; 603 unsigned long : 8;
466} __attribute__((packed)); 604} __attribute__((packed));
467 605
468union gamma_slope_u { 606union gamma_slope_u {
469 unsigned long val : 32; 607 unsigned long val : 32;
470 struct gamma_slope_t f; 608 struct gamma_slope_t f;
471} __attribute__((packed)); 609} __attribute__((packed));
472 610
473struct mc_ext_mem_location_t { 611struct mc_ext_mem_location_t {
474 unsigned long mc_ext_mem_start : 16; 612 unsigned long mc_ext_mem_start : 16;
475 unsigned long mc_ext_mem_top : 16; 613 unsigned long mc_ext_mem_top : 16;
476} __attribute__((packed)); 614} __attribute__((packed));
477 615
478union mc_ext_mem_location_u { 616union mc_ext_mem_location_u {
479 unsigned long val : 32; 617 unsigned long val : 32;
480 struct mc_ext_mem_location_t f; 618 struct mc_ext_mem_location_t f;
619} __attribute__((packed));
620
621struct mc_fb_location_t {
622 unsigned long mc_fb_start : 16;
623 unsigned long mc_fb_top : 16;
624} __attribute__((packed));
625
626union mc_fb_location_u {
627 unsigned long val : 32;
628 struct mc_fb_location_t f;
481} __attribute__((packed)); 629} __attribute__((packed));
482 630
483struct clk_pin_cntl_t { 631struct clk_pin_cntl_t {
484 unsigned long osc_en : 1; 632 unsigned long osc_en : 1;
485 unsigned long osc_gain : 5; 633 unsigned long osc_gain : 5;
486 unsigned long dont_use_xtalin : 1; 634 unsigned long dont_use_xtalin : 1;
487 unsigned long xtalin_pm_en : 1; 635 unsigned long xtalin_pm_en : 1;
488 unsigned long xtalin_dbl_en : 1; 636 unsigned long xtalin_dbl_en : 1;
489 unsigned long : 7; 637 unsigned long : 7;
490 unsigned long cg_debug : 16; 638 unsigned long cg_debug : 16;
491} __attribute__((packed)); 639} __attribute__((packed));
492 640
493union clk_pin_cntl_u { 641union clk_pin_cntl_u {
494 unsigned long val : 32; 642 unsigned long val : 32;
495 struct clk_pin_cntl_t f; 643 struct clk_pin_cntl_t f;
496} __attribute__((packed)); 644} __attribute__((packed));
497 645
498struct pll_ref_fb_div_t { 646struct pll_ref_fb_div_t {
499 unsigned long pll_ref_div : 4; 647 unsigned long pll_ref_div : 4;
500 unsigned long : 4; 648 unsigned long : 4;
501 unsigned long pll_fb_div_int : 6; 649 unsigned long pll_fb_div_int : 6;
502 unsigned long : 2; 650 unsigned long : 2;
503 unsigned long pll_fb_div_frac : 3; 651 unsigned long pll_fb_div_frac : 3;
504 unsigned long : 1; 652 unsigned long : 1;
505 unsigned long pll_reset_time : 4; 653 unsigned long pll_reset_time : 4;
506 unsigned long pll_lock_time : 8; 654 unsigned long pll_lock_time : 8;
507} __attribute__((packed)); 655} __attribute__((packed));
508 656
509union pll_ref_fb_div_u { 657union pll_ref_fb_div_u {
510 unsigned long val : 32; 658 unsigned long val : 32;
511 struct pll_ref_fb_div_t f; 659 struct pll_ref_fb_div_t f;
512} __attribute__((packed)); 660} __attribute__((packed));
513 661
514struct pll_cntl_t { 662struct pll_cntl_t {
515 unsigned long pll_pwdn : 1; 663 unsigned long pll_pwdn : 1;
516 unsigned long pll_reset : 1; 664 unsigned long pll_reset : 1;
517 unsigned long pll_pm_en : 1; 665 unsigned long pll_pm_en : 1;
518 unsigned long pll_mode : 1; 666 unsigned long pll_mode : 1;
519 unsigned long pll_refclk_sel : 1; 667 unsigned long pll_refclk_sel : 1;
520 unsigned long pll_fbclk_sel : 1; 668 unsigned long pll_fbclk_sel : 1;
521 unsigned long pll_tcpoff : 1; 669 unsigned long pll_tcpoff : 1;
522 unsigned long pll_pcp : 3; 670 unsigned long pll_pcp : 3;
523 unsigned long pll_pvg : 3; 671 unsigned long pll_pvg : 3;
524 unsigned long pll_vcofr : 1; 672 unsigned long pll_vcofr : 1;
525 unsigned long pll_ioffset : 2; 673 unsigned long pll_ioffset : 2;
526 unsigned long pll_pecc_mode : 2; 674 unsigned long pll_pecc_mode : 2;
527 unsigned long pll_pecc_scon : 2; 675 unsigned long pll_pecc_scon : 2;
528 unsigned long pll_dactal : 4; 676 unsigned long pll_dactal : 4;
529 unsigned long pll_cp_clip : 2; 677 unsigned long pll_cp_clip : 2;
530 unsigned long pll_conf : 3; 678 unsigned long pll_conf : 3;
531 unsigned long pll_mbctrl : 2; 679 unsigned long pll_mbctrl : 2;
532 unsigned long pll_ring_off : 1; 680 unsigned long pll_ring_off : 1;
533} __attribute__((packed)); 681} __attribute__((packed));
534 682
535union pll_cntl_u { 683union pll_cntl_u {
536 unsigned long val : 32; 684 unsigned long val : 32;
537 struct pll_cntl_t f; 685 struct pll_cntl_t f;
538} __attribute__((packed)); 686} __attribute__((packed));
539 687
540struct sclk_cntl_t { 688struct sclk_cntl_t {
541 unsigned long sclk_src_sel : 2; 689 unsigned long sclk_src_sel : 2;
542 unsigned long : 2; 690 unsigned long : 2;
543 unsigned long sclk_post_div_fast : 4; 691 unsigned long sclk_post_div_fast : 4;
544 unsigned long sclk_clkon_hys : 3; 692 unsigned long sclk_clkon_hys : 3;
545 unsigned long sclk_post_div_slow : 4; 693 unsigned long sclk_post_div_slow : 4;
546 unsigned long disp_cg_ok2switch_en : 1; 694 unsigned long disp_cg_ok2switch_en : 1;
547 unsigned long sclk_force_reg : 1; 695 unsigned long sclk_force_reg : 1;
548 unsigned long sclk_force_disp : 1; 696 unsigned long sclk_force_disp : 1;
549 unsigned long sclk_force_mc : 1; 697 unsigned long sclk_force_mc : 1;
550 unsigned long sclk_force_extmc : 1; 698 unsigned long sclk_force_extmc : 1;
551 unsigned long sclk_force_cp : 1; 699 unsigned long sclk_force_cp : 1;
552 unsigned long sclk_force_e2 : 1; 700 unsigned long sclk_force_e2 : 1;
553 unsigned long sclk_force_e3 : 1; 701 unsigned long sclk_force_e3 : 1;
554 unsigned long sclk_force_idct : 1; 702 unsigned long sclk_force_idct : 1;
555 unsigned long sclk_force_bist : 1; 703 unsigned long sclk_force_bist : 1;
556 unsigned long busy_extend_cp : 1; 704 unsigned long busy_extend_cp : 1;
557 unsigned long busy_extend_e2 : 1; 705 unsigned long busy_extend_e2 : 1;
558 unsigned long busy_extend_e3 : 1; 706 unsigned long busy_extend_e3 : 1;
559 unsigned long busy_extend_idct : 1; 707 unsigned long busy_extend_idct : 1;
560 unsigned long : 3; 708 unsigned long : 3;
561} __attribute__((packed)); 709} __attribute__((packed));
562 710
563union sclk_cntl_u { 711union sclk_cntl_u {
564 unsigned long val : 32; 712 unsigned long val : 32;
565 struct sclk_cntl_t f; 713 struct sclk_cntl_t f;
566} __attribute__((packed)); 714} __attribute__((packed));
567 715
568struct pclk_cntl_t { 716struct pclk_cntl_t {
569 unsigned long pclk_src_sel : 2; 717 unsigned long pclk_src_sel : 2;
570 unsigned long : 2; 718 unsigned long : 2;
571 unsigned long pclk_post_div : 4; 719 unsigned long pclk_post_div : 4;
572 unsigned long : 8; 720 unsigned long : 8;
573 unsigned long pclk_force_disp : 1; 721 unsigned long pclk_force_disp : 1;
574 unsigned long : 15; 722 unsigned long : 15;
575} __attribute__((packed)); 723} __attribute__((packed));
576 724
577union pclk_cntl_u { 725union pclk_cntl_u {
578 unsigned long val : 32; 726 unsigned long val : 32;
579 struct pclk_cntl_t f; 727 struct pclk_cntl_t f;
580} __attribute__((packed)); 728} __attribute__((packed));
581 729
730
731#define TESTCLK_SRC_PLL 0x01
732#define TESTCLK_SRC_SCLK 0x02
733#define TESTCLK_SRC_PCLK 0x03
734/* 4 and 5 seem to by XTAL/M */
735#define TESTCLK_SRC_XTAL 0x06
736
582struct clk_test_cntl_t { 737struct clk_test_cntl_t {
583 unsigned long testclk_sel : 4; 738 unsigned long testclk_sel : 4;
584 unsigned long : 3; 739 unsigned long : 3;
585 unsigned long start_check_freq : 1; 740 unsigned long start_check_freq : 1;
586 unsigned long tstcount_rst : 1; 741 unsigned long tstcount_rst : 1;
587 unsigned long : 15; 742 unsigned long : 15;
588 unsigned long test_count : 8; 743 unsigned long test_count : 8;
589} __attribute__((packed)); 744} __attribute__((packed));
590 745
591union clk_test_cntl_u { 746union clk_test_cntl_u {
592 unsigned long val : 32; 747 unsigned long val : 32;
593 struct clk_test_cntl_t f; 748 struct clk_test_cntl_t f;
594} __attribute__((packed)); 749} __attribute__((packed));
595 750
596struct pwrmgt_cntl_t { 751struct pwrmgt_cntl_t {
597 unsigned long pwm_enable : 1; 752 unsigned long pwm_enable : 1;
598 unsigned long : 1; 753 unsigned long : 1;
599 unsigned long pwm_mode_req : 2; 754 unsigned long pwm_mode_req : 2;
600 unsigned long pwm_wakeup_cond : 2; 755 unsigned long pwm_wakeup_cond : 2;
601 unsigned long pwm_fast_noml_hw_en : 1; 756 unsigned long pwm_fast_noml_hw_en : 1;
602 unsigned long pwm_noml_fast_hw_en : 1; 757 unsigned long pwm_noml_fast_hw_en : 1;
603 unsigned long pwm_fast_noml_cond : 4; 758 unsigned long pwm_fast_noml_cond : 4;
604 unsigned long pwm_noml_fast_cond : 4; 759 unsigned long pwm_noml_fast_cond : 4;
605 unsigned long pwm_idle_timer : 8; 760 unsigned long pwm_idle_timer : 8;
606 unsigned long pwm_busy_timer : 8; 761 unsigned long pwm_busy_timer : 8;
607} __attribute__((packed)); 762} __attribute__((packed));
608 763
609union pwrmgt_cntl_u { 764union pwrmgt_cntl_u {
610 unsigned long val : 32; 765 unsigned long val : 32;
611 struct pwrmgt_cntl_t f; 766 struct pwrmgt_cntl_t f;
612} __attribute__((packed)); 767} __attribute__((packed));
613 768
614#endif 769#endif
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b5a5e04b6d37..498ad505fa5f 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -86,9 +86,9 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
86 86
87 dev->driver = driver; 87 dev->driver = driver;
88 88
89 dev->groups = 23; 89 dev->groups = 1;
90 dev->seq = 1; 90 dev->seq = 1;
91 dev->nls = netlink_kernel_create(NETLINK_W1, NULL); 91 dev->nls = netlink_kernel_create(NETLINK_W1, 1, NULL, THIS_MODULE);
92 if (!dev->nls) { 92 if (!dev->nls) {
93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n", 93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n",
94 NETLINK_NFLOG, dev->dev.bus_id); 94 NETLINK_NFLOG, dev->dev.bus_id);
@@ -225,3 +225,5 @@ void w1_remove_master_device(struct w1_bus_master *bm)
225 225
226EXPORT_SYMBOL(w1_add_master_device); 226EXPORT_SYMBOL(w1_add_master_device);
227EXPORT_SYMBOL(w1_remove_master_device); 227EXPORT_SYMBOL(w1_remove_master_device);
228
229MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_W1);
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 2a82fb055c70..e7b774423dd6 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -51,7 +51,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
51 51
52 memcpy(data, msg, sizeof(struct w1_netlink_msg)); 52 memcpy(data, msg, sizeof(struct w1_netlink_msg));
53 53
54 NETLINK_CB(skb).dst_groups = dev->groups; 54 NETLINK_CB(skb).dst_group = dev->groups;
55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC); 55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC);
56 56
57nlmsg_failure: 57nlmsg_failure: